From 8ac58f593816a69b6a6ed19ced3e6bfd84f41c08 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 13 Dec 2025 23:11:28 +0100 Subject: [PATCH 001/191] Temp --- .../example_optimization_modes.py | 40 ++- flixopt/clustering.py | 263 ++++++++++++------ flixopt/flow_system.py | 6 +- flixopt/optimization.py | 20 +- flixopt/transform_accessor.py | 256 ++++++++++++++--- tests/deprecated/test_integration.py | 12 +- 6 files changed, 447 insertions(+), 150 deletions(-) diff --git a/examples/03_Optimization_modes/example_optimization_modes.py b/examples/03_Optimization_modes/example_optimization_modes.py index 1f9968357..3c6d7ee26 100644 --- a/examples/03_Optimization_modes/example_optimization_modes.py +++ b/examples/03_Optimization_modes/example_optimization_modes.py @@ -33,15 +33,10 @@ def get_solutions(optimizations: list, variable: str) -> xr.Dataset: # Segmented Properties segment_length, overlap_length = 96, 1 - # Aggregated Properties - clustering_parameters = fx.ClusteringParameters( - hours_per_period=6, - nr_of_periods=4, - fix_storage_flows=False, - aggregate_data_and_fix_non_binary_vars=True, - percentage_of_period_freedom=0, - penalty_of_period_freedom=0, - ) + # Clustering Properties + n_clusters = 4 + cluster_duration = '6h' + include_storage = False keep_extreme_periods = True imbalance_penalty = 1e5 # or set to None if not needed @@ -195,12 +190,27 @@ def get_solutions(optimizations: list, variable: str) -> xr.Dataset: optimizations.append(optimization) if aggregated: - if keep_extreme_periods: - clustering_parameters.time_series_for_high_peaks = [TS_heat_demand] - clustering_parameters.time_series_for_low_peaks = [TS_electricity_demand, TS_heat_demand] - optimization = fx.ClusteredOptimization('Aggregated', flow_system.copy(), clustering_parameters) - optimization.do_modeling() - optimization.solve(fx.solvers.HighsSolver(0.01 / 100, 60)) + # Use the new transform.cluster() API + time_series_for_high_peaks = [TS_heat_demand] if keep_extreme_periods else None + time_series_for_low_peaks = [TS_electricity_demand, TS_heat_demand] if keep_extreme_periods else None + + clustered_fs = flow_system.copy().transform.cluster( + n_clusters=n_clusters, + cluster_duration=cluster_duration, + include_storage=include_storage, + time_series_for_high_peaks=time_series_for_high_peaks, + time_series_for_low_peaks=time_series_for_low_peaks, + ) + clustered_fs.optimize(fx.solvers.HighsSolver(0.01 / 100, 60)) + + # Wrap in a simple object for compatibility with comparison code + class ClusteredResult: + def __init__(self, name, fs): + self.name = name + self.flow_system = fs + self.durations = {'total': 0} # Placeholder + + optimization = ClusteredResult('Clustered', clustered_fs) optimizations.append(optimization) # --- Plotting for comparison --- diff --git a/flixopt/clustering.py b/flixopt/clustering.py index d392167a1..b3f295711 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -270,91 +270,162 @@ def get_equation_indices(self, skip_first_index_of_period: bool = True) -> tuple return np.array(idx_var1), np.array(idx_var2) +def _parse_cluster_duration(duration: str | float) -> float: + """Convert cluster duration to hours. + + Args: + duration: Either a pandas-style duration string ('1D', '24h', '6h') + or a numeric value in hours. + + Returns: + Duration in hours. + + Examples: + >>> _parse_cluster_duration('1D') + 24.0 + >>> _parse_cluster_duration('6h') + 6.0 + >>> _parse_cluster_duration(24) + 24.0 + """ + import pandas as pd + + if isinstance(duration, (int, float)): + return float(duration) + + # Parse pandas-style duration strings + td = pd.Timedelta(duration) + return td.total_seconds() / 3600 + + class ClusteringParameters: + """Parameters for time series clustering. + + This class configures how time series data is clustered into representative + segments using the tsam (time series aggregation module) package. + + Note: + The term "cluster" here refers to clustering time segments (e.g., typical days), + not to be confused with the FlowSystem's "period" dimension (e.g., years). + + Args: + n_clusters: Number of clusters to create (e.g., 8 typical days). + cluster_duration: Duration of each cluster segment. Can be a pandas-style + string ('1D', '24h', '6h') or a numeric value in hours. + aggregate_data: If True, aggregate time series data and fix all time-dependent + variables. If False, only fix binary variables. Default is True. + include_storage: Whether to include storage flows in clustering constraints. + If other flows are fixed, fixing storage flows is usually not required. + Default is True. + flexibility_percent: Maximum percentage (0-100) of binary values that can + deviate from the clustered pattern. Default is 0 (no flexibility). + flexibility_penalty: Penalty added to objective for each deviation. + Only applies when flexibility_percent > 0. Default is 0. + time_series_for_high_peaks: List of TimeSeriesData to force inclusion of + segments with high values. + time_series_for_low_peaks: List of TimeSeriesData to force inclusion of + segments with low values. + + Examples: + Basic usage (8 typical days): + + >>> clustered_fs = flow_system.transform.cluster( + ... n_clusters=8, + ... cluster_duration='1D', + ... ) + + With all options: + + >>> clustered_fs = flow_system.transform.cluster( + ... n_clusters=8, + ... cluster_duration=24, # 24 hours = 1 day + ... aggregate_data=True, + ... include_storage=True, + ... flexibility_percent=5, + ... flexibility_penalty=100, + ... time_series_for_high_peaks=[heat_demand_ts], + ... ) + """ + def __init__( self, - hours_per_period: float, - nr_of_periods: int, - fix_storage_flows: bool, - aggregate_data_and_fix_non_binary_vars: bool, - percentage_of_period_freedom: float = 0, - penalty_of_period_freedom: float = 0, + n_clusters: int, + cluster_duration: str | float, + aggregate_data: bool = True, + include_storage: bool = True, + flexibility_percent: float = 0, + flexibility_penalty: float = 0, time_series_for_high_peaks: list[TimeSeriesData] | None = None, time_series_for_low_peaks: list[TimeSeriesData] | None = None, ): - """ - Initializes clustering parameters for time series data - - Args: - hours_per_period: Duration of each period in hours. - nr_of_periods: Number of typical periods to use in the aggregation. - fix_storage_flows: Whether to aggregate storage flows (load/unload); if other flows - are fixed, fixing storage flows is usually not required. - aggregate_data_and_fix_non_binary_vars: Whether to aggregate all time series data, which allows to fix all time series variables (like flow_rate), - or only fix binary variables. If False non time_series data is changed!! If True, the mathematical Problem - is simplified even further. - percentage_of_period_freedom: Specifies the maximum percentage (0–100) of binary values within each period - that can deviate as "free variables", chosen by the solver (default is 0). - This allows binary variables to be 'partly equated' between aggregated periods. - penalty_of_period_freedom: The penalty associated with each "free variable"; defaults to 0. Added to Penalty - time_series_for_high_peaks: List of TimeSeriesData to use for explicitly selecting periods with high values. - time_series_for_low_peaks: List of TimeSeriesData to use for explicitly selecting periods with low values. - """ - self.hours_per_period = hours_per_period - self.nr_of_periods = nr_of_periods - self.fix_storage_flows = fix_storage_flows - self.aggregate_data_and_fix_non_binary_vars = aggregate_data_and_fix_non_binary_vars - self.percentage_of_period_freedom = percentage_of_period_freedom - self.penalty_of_period_freedom = penalty_of_period_freedom + self.n_clusters = n_clusters + self.cluster_duration_hours = _parse_cluster_duration(cluster_duration) + self.aggregate_data = aggregate_data + self.include_storage = include_storage + self.flexibility_percent = flexibility_percent + self.flexibility_penalty = flexibility_penalty self.time_series_for_high_peaks: list[TimeSeriesData] = time_series_for_high_peaks or [] self.time_series_for_low_peaks: list[TimeSeriesData] = time_series_for_low_peaks or [] @property - def use_extreme_periods(self): - return self.time_series_for_high_peaks or self.time_series_for_low_peaks + def use_extreme_periods(self) -> bool: + """Whether extreme segment selection is enabled.""" + return bool(self.time_series_for_high_peaks or self.time_series_for_low_peaks) @property def labels_for_high_peaks(self) -> list[str]: + """Names of time series used for high peak selection.""" return [ts.name for ts in self.time_series_for_high_peaks] @property def labels_for_low_peaks(self) -> list[str]: + """Names of time series used for low peak selection.""" return [ts.name for ts in self.time_series_for_low_peaks] - @property - def use_low_peaks(self) -> bool: - return bool(self.time_series_for_low_peaks) - class ClusteringModel(Submodel): - """The ClusteringModel holds equations and variables related to the Clustering of a FlowSystem. - It creates Equations that equates indices of variables, and introduces penalties related to binary variables, that - escape the equation to their related binaries in other periods""" + """Model that adds clustering constraints to equate variables across clustered time segments. + + Creates equations that equate variable values at corresponding time indices within the same cluster, + and optionally allows binary variables to deviate with a penalty. + """ def __init__( self, model: FlowSystemModel, clustering_parameters: ClusteringParameters, flow_system: FlowSystem, - clustering_data: Clustering, + clustering_data: Clustering | dict[tuple, Clustering], components_to_clusterize: list[Component] | None, ): """ - Modeling-Element for "index-equating"-equations + Args: + model: The FlowSystemModel to add constraints to. + clustering_parameters: Parameters controlling clustering behavior. + flow_system: The FlowSystem being optimized. + clustering_data: Either a single Clustering object (simple case) or a dict + mapping (period_label, scenario_label) tuples to Clustering objects + (multi-dimensional case). + components_to_clusterize: Components to apply clustering to. If None, all components. """ super().__init__(model, label_of_element='Clustering', label_of_model='Clustering') self.flow_system = flow_system self.clustering_parameters = clustering_parameters - self.clustering_data = clustering_data self.components_to_clusterize = components_to_clusterize + # Handle both single and multi-dimensional clustering + if isinstance(clustering_data, dict): + self.clustering_data_dict = clustering_data + self.is_multi_dimensional = True + else: + self.clustering_data_dict = {(None, None): clustering_data} + self.is_multi_dimensional = False + def do_modeling(self): if not self.components_to_clusterize: - components = self.flow_system.components.values() + components = list(self.flow_system.components.values()) else: - components = [component for component in self.components_to_clusterize] - - indices = self.clustering_data.get_equation_indices(skip_first_index_of_period=True) + components = list(self.components_to_clusterize) time_variables: set[str] = { name for name in self._model.variables if 'time' in self._model.variables[name].dims @@ -363,69 +434,101 @@ def do_modeling(self): binary_time_variables: set[str] = time_variables & binary_variables for component in components: - if isinstance(component, Storage) and not self.clustering_parameters.fix_storage_flows: - continue # Fix Nothing in The Storage + if isinstance(component, Storage) and not self.clustering_parameters.include_storage: + continue # Skip storage if not included all_variables_of_component = set(component.submodel.variables) - if self.clustering_parameters.aggregate_data_and_fix_non_binary_vars: + if self.clustering_parameters.aggregate_data: relevant_variables = component.submodel.variables[all_variables_of_component & time_variables] else: relevant_variables = component.submodel.variables[all_variables_of_component & binary_time_variables] + for variable in relevant_variables: - self._equate_indices(component.submodel.variables[variable], indices) + self._equate_indices_multi_dimensional(component.submodel.variables[variable]) - penalty = self.clustering_parameters.penalty_of_period_freedom - if (self.clustering_parameters.percentage_of_period_freedom > 0) and penalty != 0: + # Add penalty for flexibility deviations + penalty = self.clustering_parameters.flexibility_penalty + if self.clustering_parameters.flexibility_percent > 0 and penalty != 0: from .effects import PENALTY_EFFECT_LABEL for variable_name in self.variables_direct: variable = self.variables_direct[variable_name] - # Sum correction variables over all dimensions to get periodic penalty contribution self._model.effects.add_share_to_effects( - name='Aggregation', + name='Clustering', expressions={PENALTY_EFFECT_LABEL: (variable * penalty).sum('time')}, target='periodic', ) - def _equate_indices(self, variable: linopy.Variable, indices: tuple[np.ndarray, np.ndarray]) -> None: - assert len(indices[0]) == len(indices[1]), 'The length of the indices must match!!' + def _equate_indices_multi_dimensional(self, variable: linopy.Variable) -> None: + """Equate indices across clustered segments, handling multi-dimensional cases.""" + var_dims = set(variable.dims) + has_period = 'period' in var_dims + has_scenario = 'scenario' in var_dims + + for (period_label, scenario_label), clustering in self.clustering_data_dict.items(): + indices = clustering.get_equation_indices(skip_first_index_of_period=True) + + if len(indices[0]) == 0: + continue # No constraints needed for this cluster + + # Build selector for this period/scenario combination + selector = {} + if has_period and period_label is not None: + selector['period'] = period_label + if has_scenario and scenario_label is not None: + selector['scenario'] = scenario_label + + # Select variable slice for this dimension combination + if selector: + var_slice = variable.sel(**selector) + else: + var_slice = variable + + # Create constraint name with dimension info + dim_suffix = '' + if period_label is not None: + dim_suffix += f'_p{period_label}' + if scenario_label is not None: + dim_suffix += f'_s{scenario_label}' + + # Equate indices within this slice + self._equate_indices(var_slice, indices, dim_suffix, variable.name) + + def _equate_indices( + self, + variable: linopy.Variable, + indices: tuple[np.ndarray, np.ndarray], + dim_suffix: str = '', + original_var_name: str | None = None, + ) -> None: + """Add constraints to equate variable values at corresponding cluster indices.""" + assert len(indices[0]) == len(indices[1]), 'The length of the indices must match!' length = len(indices[0]) + var_name = original_var_name or variable.name - # Gleichung: - # eq1: x(p1,t) - x(p3,t) = 0 # wobei p1 und p3 im gleichen Cluster sind und t = 0..N_p + # Main constraint: x(cluster_a, t) - x(cluster_b, t) = 0 con = self.add_constraints( variable.isel(time=indices[0]) - variable.isel(time=indices[1]) == 0, - short_name=f'equate_indices|{variable.name}', + short_name=f'equate_indices{dim_suffix}|{var_name}', ) - # Korrektur: (bisher nur für Binärvariablen:) - if ( - variable.name in self._model.variables.binaries - and self.clustering_parameters.percentage_of_period_freedom > 0 - ): + # Add correction variables for binary flexibility + if var_name in self._model.variables.binaries and self.clustering_parameters.flexibility_percent > 0: sel = variable.isel(time=indices[0]) coords = {d: sel.indexes[d] for d in sel.dims} - var_k1 = self.add_variables(binary=True, coords=coords, short_name=f'correction1|{variable.name}') - - var_k0 = self.add_variables(binary=True, coords=coords, short_name=f'correction0|{variable.name}') + var_k1 = self.add_variables(binary=True, coords=coords, short_name=f'correction1{dim_suffix}|{var_name}') + var_k0 = self.add_variables(binary=True, coords=coords, short_name=f'correction0{dim_suffix}|{var_name}') - # equation extends ... - # --> On(p3) can be 0/1 independent of On(p1,t)! - # eq1: On(p1,t) - On(p3,t) + K1(p3,t) - K0(p3,t) = 0 - # --> correction On(p3) can be: - # On(p1,t) = 1 -> On(p3) can be 0 -> K0=1 (,K1=0) - # On(p1,t) = 0 -> On(p3) can be 1 -> K1=1 (,K0=1) + # Extend equation to allow deviation: On(a,t) - On(b,t) + K1 - K0 = 0 con.lhs += 1 * var_k1 - 1 * var_k0 - # interlock var_k1 and var_K2: - # eq: var_k0(t)+var_k1(t) <= 1 - self.add_constraints(var_k0 + var_k1 <= 1, short_name=f'lock_k0_and_k1|{variable.name}') + # Interlock K0 and K1: can't both be 1 + self.add_constraints(var_k0 + var_k1 <= 1, short_name=f'lock_k0_and_k1{dim_suffix}|{var_name}') - # Begrenzung der Korrektur-Anzahl: - # eq: sum(K) <= n_Corr_max - limit = int(np.floor(self.clustering_parameters.percentage_of_period_freedom / 100 * length)) + # Limit total corrections + limit = int(np.floor(self.clustering_parameters.flexibility_percent / 100 * length)) self.add_constraints( var_k0.sum(dim='time') + var_k1.sum(dim='time') <= limit, - short_name=f'limit_corrections|{variable.name}', + short_name=f'limit_corrections{dim_suffix}|{var_name}', ) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index c6c21bb2a..c83b2bdc2 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -1282,11 +1282,15 @@ def _add_clustering_constraints(self) -> None: from .clustering import ClusteringModel info = self._clustering_info + + # Handle both simple (single Clustering) and multi-dimensional (dict) cases + clustering_data = info.get('clustering_results') or info.get('clustering') + clustering_model = ClusteringModel( model=self.model, clustering_parameters=info['parameters'], flow_system=self, - clustering_data=info['clustering'], + clustering_data=clustering_data, components_to_clusterize=info['components_to_clusterize'], ) clustering_model.do_modeling() diff --git a/flixopt/optimization.py b/flixopt/optimization.py index 48a9f5e19..32bdd7410 100644 --- a/flixopt/optimization.py +++ b/flixopt/optimization.py @@ -393,15 +393,13 @@ def __init__( ): warnings.warn( f'ClusteredOptimization is deprecated and will be removed in v{DEPRECATION_REMOVAL_VERSION}. ' - 'Use FlowSystem.transform.cluster(params) followed by FlowSystem.optimize(solver) instead. ' - 'Example: clustered_fs = flow_system.transform.cluster(params); clustered_fs.optimize(solver)', + 'Use FlowSystem.transform.cluster() followed by FlowSystem.optimize(solver) instead. ' + 'Example: clustered_fs = flow_system.transform.cluster(n_clusters=8, cluster_duration="1D"); ' + 'clustered_fs.optimize(solver)', DeprecationWarning, stacklevel=2, ) - if flow_system.scenarios is not None: - raise ValueError('Clustering is not supported for scenarios yet. Please use Optimization instead.') - if flow_system.periods is not None: - raise ValueError('Clustering is not supported for periods yet. Please use Optimization instead.') + # Note: Multi-period and multi-scenario are now supported via the new transform.cluster() API # Skip parent deprecation warning by calling common init directly _initialize_optimization_common( self, @@ -443,10 +441,10 @@ def _perform_clustering(self): raise ValueError( f'Clustering failed due to inconsistent time step sizes:delta_t varies from {dt_min} to {dt_max} hours.' ) - ratio = self.clustering_parameters.hours_per_period / dt_max + ratio = self.clustering_parameters.cluster_duration_hours / dt_max if not np.isclose(ratio, round(ratio), atol=1e-9): raise ValueError( - f'The selected {self.clustering_parameters.hours_per_period=} does not match the time ' + f'The selected cluster_duration={self.clustering_parameters.cluster_duration_hours}h does not match the time ' f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.' ) @@ -461,8 +459,8 @@ def _perform_clustering(self): self.clustering = Clustering( original_data=temporaly_changing_ds.to_dataframe(), hours_per_time_step=float(dt_min), - hours_per_period=self.clustering_parameters.hours_per_period, - nr_of_periods=self.clustering_parameters.nr_of_periods, + hours_per_period=self.clustering_parameters.cluster_duration_hours, + nr_of_periods=self.clustering_parameters.n_clusters, weights=self.calculate_clustering_weights(temporaly_changing_ds), time_series_for_high_peaks=self.clustering_parameters.labels_for_high_peaks, time_series_for_low_peaks=self.clustering_parameters.labels_for_low_peaks, @@ -471,7 +469,7 @@ def _perform_clustering(self): self.clustering.cluster() result = self.clustering.plot(show=CONFIG.Plotting.default_show) result.to_html(self.folder / 'clustering.html') - if self.clustering_parameters.aggregate_data_and_fix_non_binary_vars: + if self.clustering_parameters.aggregate_data: ds = self.flow_system.to_dataset() for name, series in self.clustering.aggregated_data.items(): da = ( diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index eaec1a3b6..5e4164bd8 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -31,9 +31,9 @@ class TransformAccessor: with modified structure or data, accessible via `flow_system.transform`. Examples: - Clustered optimization: + Clustered optimization (8 typical days): - >>> clustered_fs = flow_system.transform.cluster(params) + >>> clustered_fs = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') >>> clustered_fs.optimize(solver) >>> print(clustered_fs.solution) @@ -54,7 +54,14 @@ def __init__(self, flow_system: FlowSystem) -> None: def cluster( self, - parameters: ClusteringParameters, + n_clusters: int, + cluster_duration: str | float, + aggregate_data: bool = True, + include_storage: bool = True, + flexibility_percent: float = 0, + flexibility_penalty: float = 0, + time_series_for_high_peaks: list | None = None, + time_series_for_low_peaks: list | None = None, components_to_clusterize: list | None = None, ) -> FlowSystem: """ @@ -62,16 +69,33 @@ def cluster( This method creates a new FlowSystem that can be optimized with clustered time series data. The clustering reduces computational - complexity by identifying representative time periods. + complexity by identifying representative time segments (e.g., typical days). + + For FlowSystems with multiple periods or scenarios, clustering is performed + independently for each period/scenario combination. The returned FlowSystem: - Has the same timesteps as the original (clustering works via constraints, not reduction) - - Has aggregated time series data (if `aggregate_data_and_fix_non_binary_vars=True`) - - Will have clustering constraints added during `build_model()` + - Has aggregated time series data (if ``aggregate_data=True``) + - Will have clustering constraints added during ``build_model()`` Args: - parameters: Clustering parameters specifying period duration, - number of periods, and aggregation settings. + n_clusters: Number of clusters (typical segments) to create. + E.g., 8 for 8 typical days from a year of data. + cluster_duration: Duration of each cluster segment. Can be a pandas-style + string ('1D', '24h', '6h') or a numeric value in hours. + aggregate_data: If True (default), aggregate time series data and fix + all time-dependent variables. If False, only fix binary variables. + include_storage: Whether to include storage flows in clustering constraints. + Default is True. + flexibility_percent: Maximum percentage (0-100) of binary values that can + deviate from the clustered pattern. Default is 0 (no flexibility). + flexibility_penalty: Penalty added to objective for each deviation. + Only applies when flexibility_percent > 0. Default is 0. + time_series_for_high_peaks: List of TimeSeriesData to force inclusion of + segments with high values. + time_series_for_low_peaks: List of TimeSeriesData to force inclusion of + segments with low values. components_to_clusterize: List of components to apply clustering to. If None, all components are clustered. @@ -80,29 +104,64 @@ def cluster( Raises: ValueError: If timestep sizes are inconsistent. - ValueError: If hours_per_period is not a multiple of timestep size. + ValueError: If cluster_duration is not a multiple of timestep size. Examples: - Basic clustered optimization: - - >>> from flixopt import ClusteringParameters - >>> params = ClusteringParameters( - ... hours_per_period=24, - ... nr_of_periods=8, - ... fix_storage_flows=True, - ... aggregate_data_and_fix_non_binary_vars=True, + Basic clustered optimization (8 typical days): + + >>> clustered_fs = flow_system.transform.cluster( + ... n_clusters=8, + ... cluster_duration='1D', ... ) - >>> clustered_fs = flow_system.transform.cluster(params) >>> clustered_fs.optimize(solver) - >>> print(clustered_fs.solution) - With model modifications: + With extreme period selection: + + >>> clustered_fs = flow_system.transform.cluster( + ... n_clusters=8, + ... cluster_duration='1D', + ... time_series_for_high_peaks=[heat_demand_ts], + ... ) - >>> clustered_fs = flow_system.transform.cluster(params) - >>> clustered_fs.build_model() - >>> clustered_fs.model.add_constraints(...) - >>> clustered_fs.solve(solver) + Multi-period FlowSystem (each year clustered independently): + + >>> multi_year_fs = fx.FlowSystem(timesteps, periods=pd.Index([2025, 2026, 2027])) + >>> clustered_fs = multi_year_fs.transform.cluster( + ... n_clusters=8, + ... cluster_duration='1D', + ... ) """ + from .clustering import ClusteringParameters + + # Create ClusteringParameters from keyword arguments + params = ClusteringParameters( + n_clusters=n_clusters, + cluster_duration=cluster_duration, + aggregate_data=aggregate_data, + include_storage=include_storage, + flexibility_percent=flexibility_percent, + flexibility_penalty=flexibility_penalty, + time_series_for_high_peaks=time_series_for_high_peaks, + time_series_for_low_peaks=time_series_for_low_peaks, + ) + + # Check for multi-period/scenario dimensions + has_periods = self._fs.periods is not None + has_scenarios = self._fs.scenarios is not None + + if not has_periods and not has_scenarios: + # Simple case: no extra dimensions + return self._cluster_simple(params, components_to_clusterize) + else: + # Multi-dimensional case: cluster independently per period/scenario + return self._cluster_multi_dimensional(params, components_to_clusterize) + + def _cluster_simple( + self, + params: ClusteringParameters, + components_to_clusterize: list | None, + ) -> FlowSystem: + """Perform clustering for simple case (no periods/scenarios).""" import numpy as np from .clustering import Clustering @@ -116,10 +175,10 @@ def cluster( f'Clustering failed due to inconsistent time step sizes: ' f'delta_t varies from {dt_min} to {dt_max} hours.' ) - ratio = parameters.hours_per_period / dt_max + ratio = params.cluster_duration_hours / dt_max if not np.isclose(ratio, round(ratio), atol=1e-9): raise ValueError( - f'The selected hours_per_period={parameters.hours_per_period} does not match the time ' + f'The selected cluster_duration={params.cluster_duration_hours}h does not match the time ' f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.' ) @@ -134,20 +193,16 @@ def cluster( clustering = Clustering( original_data=temporaly_changing_ds.to_dataframe(), hours_per_time_step=float(dt_min), - hours_per_period=parameters.hours_per_period, - nr_of_periods=parameters.nr_of_periods, + hours_per_period=params.cluster_duration_hours, + nr_of_periods=params.n_clusters, weights=self._calculate_clustering_weights(temporaly_changing_ds), - time_series_for_high_peaks=parameters.labels_for_high_peaks, - time_series_for_low_peaks=parameters.labels_for_low_peaks, + time_series_for_high_peaks=params.labels_for_high_peaks, + time_series_for_low_peaks=params.labels_for_low_peaks, ) clustering.cluster() # Create new FlowSystem (with aggregated data if requested) - if parameters.aggregate_data_and_fix_non_binary_vars: - # Note: A second to_dataset() call is required here because: - # 1. The first 'ds' (line 124) was processed by drop_constant_arrays() - # 2. We need the full unprocessed dataset to apply aggregated data modifications - # 3. The clustering used 'temporaly_changing_ds' for input, not the full 'ds' + if params.aggregate_data: ds = self._fs.to_dataset() for name, series in clustering.aggregated_data.items(): da = DataConverter.to_dataarray(series, self._fs.coords).rename(name).assign_attrs(ds[name].attrs) @@ -159,12 +214,11 @@ def cluster( clustered_fs = FlowSystem.from_dataset(ds) else: - # Copy without data modification clustered_fs = self._fs.copy() # Store clustering info for later use clustered_fs._clustering_info = { - 'parameters': parameters, + 'parameters': params, 'clustering': clustering, 'components_to_clusterize': components_to_clusterize, 'original_fs': self._fs, @@ -172,6 +226,134 @@ def cluster( return clustered_fs + def _cluster_multi_dimensional( + self, + params: ClusteringParameters, + components_to_clusterize: list | None, + ) -> FlowSystem: + """Perform clustering independently for each period/scenario combination.""" + import numpy as np + + from .clustering import Clustering + from .core import DataConverter, TimeSeriesData, drop_constant_arrays + + # Validation + dt_min = float(self._fs.hours_per_timestep.min().item()) + dt_max = float(self._fs.hours_per_timestep.max().item()) + if dt_min != dt_max: + raise ValueError( + f'Clustering failed due to inconsistent time step sizes: ' + f'delta_t varies from {dt_min} to {dt_max} hours.' + ) + ratio = params.cluster_duration_hours / dt_max + if not np.isclose(ratio, round(ratio), atol=1e-9): + raise ValueError( + f'The selected cluster_duration={params.cluster_duration_hours}h does not match the time ' + f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.' + ) + + logger.info(f'{"":#^80}') + logger.info(f'{" Clustering TimeSeries Data (Multi-dimensional) ":#^80}') + + # Determine iteration dimensions + periods = list(self._fs.periods) if self._fs.periods is not None else [None] + scenarios = list(self._fs.scenarios) if self._fs.scenarios is not None else [None] + + ds = self._fs.to_dataset() + clustering_results: dict[tuple, Clustering] = {} + + # Cluster each period x scenario combination independently + for period_label in periods: + for scenario_label in scenarios: + # Select slice for this combination + selector = {} + if period_label is not None: + selector['period'] = period_label + if scenario_label is not None: + selector['scenario'] = scenario_label + + if selector: + ds_slice = ds.sel(**selector) + else: + ds_slice = ds + + # Drop constant arrays for clustering + temporaly_changing_ds = drop_constant_arrays(ds_slice, dim='time') + + # Skip if no time-varying data + if len(temporaly_changing_ds.data_vars) == 0: + logger.warning(f'No time-varying data for period={period_label}, scenario={scenario_label}') + continue + + dim_info = [] + if period_label is not None: + dim_info.append(f'period={period_label}') + if scenario_label is not None: + dim_info.append(f'scenario={scenario_label}') + logger.info(f'Clustering {", ".join(dim_info) or "data"}...') + + # Perform clustering on this slice + clustering = Clustering( + original_data=temporaly_changing_ds.to_dataframe(), + hours_per_time_step=float(dt_min), + hours_per_period=params.cluster_duration_hours, + nr_of_periods=params.n_clusters, + weights=self._calculate_clustering_weights(temporaly_changing_ds), + time_series_for_high_peaks=params.labels_for_high_peaks, + time_series_for_low_peaks=params.labels_for_low_peaks, + ) + clustering.cluster() + clustering_results[(period_label, scenario_label)] = clustering + + # Apply aggregated data if requested + if params.aggregate_data: + for name, series in clustering.aggregated_data.items(): + if name not in ds.data_vars: + continue + # Get the original data array to update + original_da = ds[name] + # Create aggregated data array + agg_da = DataConverter.to_dataarray(series, {'time': ds_slice.coords['time']}) + + # Update the slice in the full dataset + if selector: + # Need to update just this slice in the full array + # Use xr.where or direct assignment + if 'period' in original_da.dims and period_label is not None: + if 'scenario' in original_da.dims and scenario_label is not None: + original_da.loc[{'period': period_label, 'scenario': scenario_label}] = ( + agg_da.values + ) + else: + original_da.loc[{'period': period_label}] = agg_da.values + elif 'scenario' in original_da.dims and scenario_label is not None: + original_da.loc[{'scenario': scenario_label}] = agg_da.values + + # Create new FlowSystem + from .flow_system import FlowSystem + + if params.aggregate_data: + # Ensure TimeSeriesData is preserved + for name in ds.data_vars: + da = ds[name] + if TimeSeriesData.is_timeseries_data(da): + ds[name] = TimeSeriesData.from_dataarray(da) + clustered_fs = FlowSystem.from_dataset(ds) + else: + clustered_fs = self._fs.copy() + + # Store clustering info for later use + clustered_fs._clustering_info = { + 'parameters': params, + 'clustering_results': clustering_results, # Dict of Clustering objects per dimension + 'components_to_clusterize': components_to_clusterize, + 'original_fs': self._fs, + 'has_periods': self._fs.periods is not None, + 'has_scenarios': self._fs.scenarios is not None, + } + + return clustered_fs + @staticmethod def _calculate_clustering_weights(ds) -> dict[str, float]: """Calculate weights for clustering based on dataset attributes.""" diff --git a/tests/deprecated/test_integration.py b/tests/deprecated/test_integration.py index 2f083b4fb..9b05a5c10 100644 --- a/tests/deprecated/test_integration.py +++ b/tests/deprecated/test_integration.py @@ -282,12 +282,12 @@ def modeling_calculation(self, request, flow_system_long, highs_solver): 'aggModel', flow_system, fx.ClusteringParameters( - hours_per_period=6, - nr_of_periods=4, - fix_storage_flows=False, - aggregate_data_and_fix_non_binary_vars=True, - percentage_of_period_freedom=0, - penalty_of_period_freedom=0, + n_clusters=4, + cluster_duration='6h', + include_storage=False, + aggregate_data=True, + flexibility_percent=0, + flexibility_penalty=0, time_series_for_low_peaks=[electrical_load_ts, thermal_load_ts], time_series_for_high_peaks=[thermal_load_ts], ), From e579a115652e77d6529a4ade62bdcbf65f82dacc Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 13 Dec 2025 23:40:35 +0100 Subject: [PATCH 002/191] Add n_segments --- flixopt/clustering.py | 66 +++++++++++++++++++++++++---------- flixopt/transform_accessor.py | 23 ++++++++++-- 2 files changed, 67 insertions(+), 22 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index b3f295711..da926545a 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -41,7 +41,7 @@ class Clustering: """ - Clustering organizing class + Clustering organizing class for time series aggregation using tsam. """ def __init__( @@ -49,17 +49,21 @@ def __init__( original_data: pd.DataFrame, hours_per_time_step: Scalar, hours_per_period: Scalar, - nr_of_periods: int = 8, + nr_of_periods: int | None = 8, + n_segments: int | None = None, weights: dict[str, float] | None = None, time_series_for_high_peaks: list[str] | None = None, time_series_for_low_peaks: list[str] | None = None, ): """ Args: - original_data: The original data to aggregate + original_data: The original data to aggregate. hours_per_time_step: The duration of each timestep in hours. hours_per_period: The duration of each period in hours. nr_of_periods: The number of typical periods to use in the aggregation. + Set to None to skip period clustering and only do segmentation. + n_segments: Number of segments within each period (inner-period clustering). + If None, no inner-period segmentation is performed. weights: The weights for aggregation. If None, all time series are equally weighted. time_series_for_high_peaks: List of time series to use for explicitly selecting periods with high values. time_series_for_low_peaks: List of time series to use for explicitly selecting periods with low values. @@ -72,6 +76,7 @@ def __init__( self.hours_per_time_step = hours_per_time_step self.hours_per_period = hours_per_period self.nr_of_periods = nr_of_periods + self.n_segments = n_segments self.nr_of_time_steps = len(self.original_data.index) self.weights = weights or {} self.time_series_for_high_peaks = time_series_for_high_peaks or [] @@ -83,28 +88,35 @@ def __init__( def cluster(self) -> None: """ - Durchführung der Zeitreihenaggregation + Perform time series clustering/aggregation. """ start_time = timeit.default_timer() - # Erstellen des aggregation objects + + # Determine number of periods for clustering + # If nr_of_periods is None, use segmentation only (no inter-period clustering) + total_periods = int(self.nr_of_time_steps * self.hours_per_time_step / self.hours_per_period) + n_typical_periods = self.nr_of_periods if self.nr_of_periods is not None else total_periods + + # Create aggregation object self.tsam = tsam.TimeSeriesAggregation( self.original_data, - noTypicalPeriods=self.nr_of_periods, + noTypicalPeriods=n_typical_periods, hoursPerPeriod=self.hours_per_period, resolution=self.hours_per_time_step, clusterMethod='k_means', - extremePeriodMethod='new_cluster_center' - if self.use_extreme_periods - else 'None', # Wenn Extremperioden eingebunden werden sollen, nutze die Methode 'new_cluster_center' aus tsam + extremePeriodMethod='new_cluster_center' if self.use_extreme_periods else 'None', weightDict={name: weight for name, weight in self.weights.items() if name in self.original_data.columns}, addPeakMax=self.time_series_for_high_peaks, addPeakMin=self.time_series_for_low_peaks, + # Inner-period segmentation parameters + segmentation=self.n_segments is not None, + noSegments=self.n_segments if self.n_segments is not None else 1, ) - self.tsam.createTypicalPeriods() # Ausführen der Aggregation/Clustering + self.tsam.createTypicalPeriods() self.aggregated_data = self.tsam.predictOriginalData() - self.clustering_duration_seconds = timeit.default_timer() - start_time # Zeit messen: + self.clustering_duration_seconds = timeit.default_timer() - start_time if logger.isEnabledFor(logging.INFO): logger.info(self.describe_clusters()) @@ -310,8 +322,13 @@ class ClusteringParameters: Args: n_clusters: Number of clusters to create (e.g., 8 typical days). + Set to None to skip clustering and only do segmentation. cluster_duration: Duration of each cluster segment. Can be a pandas-style string ('1D', '24h', '6h') or a numeric value in hours. + n_segments: Number of segments to create within each cluster (inner-period + clustering). For example, n_segments=4 with cluster_duration='1D' will + reduce 24 hourly timesteps to 4 representative segments per day. + Default is None (no inner-period segmentation). aggregate_data: If True, aggregate time series data and fix all time-dependent variables. If False, only fix binary variables. Default is True. include_storage: Whether to include storage flows in clustering constraints. @@ -334,23 +351,28 @@ class ClusteringParameters: ... cluster_duration='1D', ... ) - With all options: + With inner-period segmentation (8 typical days × 4 segments each = 32 timesteps): >>> clustered_fs = flow_system.transform.cluster( ... n_clusters=8, - ... cluster_duration=24, # 24 hours = 1 day - ... aggregate_data=True, - ... include_storage=True, - ... flexibility_percent=5, - ... flexibility_penalty=100, - ... time_series_for_high_peaks=[heat_demand_ts], + ... cluster_duration='1D', + ... n_segments=4, # Reduce 24h to 4 segments per day + ... ) + + Segmentation only (no clustering, just reduce to 4 segments per day): + + >>> clustered_fs = flow_system.transform.cluster( + ... n_clusters=None, # Skip clustering + ... cluster_duration='1D', + ... n_segments=4, ... ) """ def __init__( self, - n_clusters: int, + n_clusters: int | None, cluster_duration: str | float, + n_segments: int | None = None, aggregate_data: bool = True, include_storage: bool = True, flexibility_percent: float = 0, @@ -360,6 +382,7 @@ def __init__( ): self.n_clusters = n_clusters self.cluster_duration_hours = _parse_cluster_duration(cluster_duration) + self.n_segments = n_segments self.aggregate_data = aggregate_data self.include_storage = include_storage self.flexibility_percent = flexibility_percent @@ -372,6 +395,11 @@ def use_extreme_periods(self) -> bool: """Whether extreme segment selection is enabled.""" return bool(self.time_series_for_high_peaks or self.time_series_for_low_peaks) + @property + def use_segmentation(self) -> bool: + """Whether inner-period segmentation is enabled.""" + return self.n_segments is not None + @property def labels_for_high_peaks(self) -> list[str]: """Names of time series used for high peak selection.""" diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 5e4164bd8..f8fc9fca2 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -54,8 +54,9 @@ def __init__(self, flow_system: FlowSystem) -> None: def cluster( self, - n_clusters: int, + n_clusters: int | None, cluster_duration: str | float, + n_segments: int | None = None, aggregate_data: bool = True, include_storage: bool = True, flexibility_percent: float = 0, @@ -82,8 +83,13 @@ def cluster( Args: n_clusters: Number of clusters (typical segments) to create. E.g., 8 for 8 typical days from a year of data. + Set to None to skip inter-period clustering (only do segmentation). cluster_duration: Duration of each cluster segment. Can be a pandas-style string ('1D', '24h', '6h') or a numeric value in hours. + n_segments: Number of segments within each cluster (inner-period clustering). + For example, n_segments=4 with cluster_duration='1D' will reduce + 24 hourly timesteps to 4 representative segments per day. + Default is None (no inner-period segmentation). aggregate_data: If True (default), aggregate time series data and fix all time-dependent variables. If False, only fix binary variables. include_storage: Whether to include storage flows in clustering constraints. @@ -115,12 +121,20 @@ def cluster( ... ) >>> clustered_fs.optimize(solver) - With extreme period selection: + With inner-period segmentation (8 typical days × 4 segments = 32 timesteps): >>> clustered_fs = flow_system.transform.cluster( ... n_clusters=8, ... cluster_duration='1D', - ... time_series_for_high_peaks=[heat_demand_ts], + ... n_segments=4, # Reduce 24 hours to 4 segments + ... ) + + Segmentation only (no clustering, reduce each day to 4 segments): + + >>> clustered_fs = flow_system.transform.cluster( + ... n_clusters=None, # Skip inter-period clustering + ... cluster_duration='1D', + ... n_segments=4, ... ) Multi-period FlowSystem (each year clustered independently): @@ -137,6 +151,7 @@ def cluster( params = ClusteringParameters( n_clusters=n_clusters, cluster_duration=cluster_duration, + n_segments=n_segments, aggregate_data=aggregate_data, include_storage=include_storage, flexibility_percent=flexibility_percent, @@ -195,6 +210,7 @@ def _cluster_simple( hours_per_time_step=float(dt_min), hours_per_period=params.cluster_duration_hours, nr_of_periods=params.n_clusters, + n_segments=params.n_segments, weights=self._calculate_clustering_weights(temporaly_changing_ds), time_series_for_high_peaks=params.labels_for_high_peaks, time_series_for_low_peaks=params.labels_for_low_peaks, @@ -298,6 +314,7 @@ def _cluster_multi_dimensional( hours_per_time_step=float(dt_min), hours_per_period=params.cluster_duration_hours, nr_of_periods=params.n_clusters, + n_segments=params.n_segments, weights=self._calculate_clustering_weights(temporaly_changing_ds), time_series_for_high_peaks=params.labels_for_high_peaks, time_series_for_low_peaks=params.labels_for_low_peaks, From 16cffe10a63901a573551136e5bcf65dfe329e9f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 13 Dec 2025 23:42:06 +0100 Subject: [PATCH 003/191] Update CHANGELOG.md --- CHANGELOG.md | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 63804b551..0f0dbc73d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,6 +51,84 @@ If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOp Until here --> +## [5.1.0] - Upcoming + +**Summary**: This release significantly improves the time series clustering (tsam) integration with a simplified API, multi-period/scenario support, and inner-period segmentation. + +### ✨ Added + +**Improved Clustering API**: The new `transform.cluster()` method provides a clean, keyword-based interface: + +```python +# Simple: 8 typical days +clustered_fs = flow_system.transform.cluster( + n_clusters=8, + cluster_duration='1D', +) +clustered_fs.optimize(solver) + +# With inner-period segmentation (8 days × 4 segments = 32 timesteps) +clustered_fs = flow_system.transform.cluster( + n_clusters=8, + cluster_duration='1D', + n_segments=4, # Reduce 24 hours to 4 segments per day +) + +# Segmentation only (no clustering, just reduce resolution) +clustered_fs = flow_system.transform.cluster( + n_clusters=None, + cluster_duration='1D', + n_segments=4, +) +``` + +**Multi-Period Clustering**: FlowSystems with multiple periods (e.g., multi-year investment studies) now support clustering, with each period clustered independently: + +```python +multi_year_fs = fx.FlowSystem(timesteps, periods=pd.Index([2025, 2026, 2027])) +clustered_fs = multi_year_fs.transform.cluster(n_clusters=8, cluster_duration='1D') +``` + +**Multi-Scenario Clustering**: FlowSystems with scenarios now support clustering, with each scenario clustered independently. + +**Inner-Period Segmentation**: New `n_segments` parameter enables tsam's inner-period clustering to reduce timesteps within each typical period. This provides additional computational reduction beyond regular clustering. + +### 💥 Breaking Changes + +**ClusteringParameters API Changed**: The `ClusteringParameters` class has new parameter names: + +| Old Parameter | New Parameter | +|---------------|---------------| +| `hours_per_period` | `cluster_duration` (accepts '1D', '24h', or hours) | +| `nr_of_periods` | `n_clusters` | +| `fix_storage_flows` | `include_storage` | +| `aggregate_data_and_fix_non_binary_vars` | `aggregate_data` | +| `percentage_of_period_freedom` | `flexibility_percent` | +| `penalty_of_period_freedom` | `flexibility_penalty` | + +**Migration Example**: + +```python +# Old (v5.0): +params = fx.ClusteringParameters( + hours_per_period=24, + nr_of_periods=8, + fix_storage_flows=True, + aggregate_data_and_fix_non_binary_vars=True, +) +clustered_fs = flow_system.transform.cluster(params) + +# New (v5.1): +clustered_fs = flow_system.transform.cluster( + n_clusters=8, + cluster_duration='1D', + include_storage=True, + aggregate_data=True, +) +``` + +--- + ## [Upcoming] - v5.0.0 **Summary**: This is a major release that fundamentally reimagines how users interact with flixopt. The new **FlowSystem-centric API** dramatically simplifies workflows by integrating optimization, results access, and visualization directly into the FlowSystem object. This release also completes the terminology standardization (OnOff → Status) and **removes all deprecated items from v4.x**. From 139dc89bc69e55d290f8df6e59f4f2a62e58b449 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 00:02:06 +0100 Subject: [PATCH 004/191] Use deep copy --- flixopt/transform_accessor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index f8fc9fca2..add8de91d 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -275,7 +275,7 @@ def _cluster_multi_dimensional( periods = list(self._fs.periods) if self._fs.periods is not None else [None] scenarios = list(self._fs.scenarios) if self._fs.scenarios is not None else [None] - ds = self._fs.to_dataset() + ds = self._fs.to_dataset().copy(deep=True) # Deep copy to allow in-place modifications clustering_results: dict[tuple, Clustering] = {} # Cluster each period x scenario combination independently @@ -330,7 +330,7 @@ def _cluster_multi_dimensional( # Get the original data array to update original_da = ds[name] # Create aggregated data array - agg_da = DataConverter.to_dataarray(series, {'time': ds_slice.coords['time']}) + agg_da = DataConverter.to_dataarray(series, {'time': ds_slice.indexes['time']}) # Update the slice in the full dataset if selector: From 60dd6708c93c0bc4f27ca9f96b8fb0478c71e99f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 00:02:19 +0100 Subject: [PATCH 005/191] Add notebook for clustering --- docs/notebooks/08c-clustering.ipynb | 643 ++++++++++++++++++++++++++++ 1 file changed, 643 insertions(+) create mode 100644 docs/notebooks/08c-clustering.ipynb diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb new file mode 100644 index 000000000..089c70e17 --- /dev/null +++ b/docs/notebooks/08c-clustering.ipynb @@ -0,0 +1,643 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Clustering with tsam\n", + "\n", + "Speed up large problems by identifying typical periods using time series clustering.\n", + "\n", + "This notebook demonstrates how to use **`transform.cluster()`** to reduce a year of time series data to representative days (typical periods).\n", + "\n", + "!!! note \"Requirements\"\n", + " This notebook requires the `tsam` package: `pip install tsam`" + ] + }, + { + "cell_type": "markdown", + "id": "1", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "2", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-13T23:00:48.772821Z", + "start_time": "2025-12-13T23:00:48.630296Z" + }, + "execution": { + "iopub.execute_input": "2025-12-13T22:54:58.832003Z", + "iopub.status.busy": "2025-12-13T22:54:58.831893Z", + "iopub.status.idle": "2025-12-13T22:55:02.274843Z", + "shell.execute_reply": "2025-12-13T22:55:02.274346Z" + } + }, + "outputs": [ + { + "data": { + "text/plain": [ + "flixopt.config.CONFIG" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import timeit\n", + "\n", + "import pandas as pd\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ] + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "## Load Time Series Data\n", + "\n", + "We use real-world district heating data at 15-minute resolution (one week for faster execution):" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "4", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-13T23:00:49.089270Z", + "start_time": "2025-12-13T23:00:48.910406Z" + }, + "execution": { + "iopub.execute_input": "2025-12-13T22:55:02.280725Z", + "iopub.status.busy": "2025-12-13T22:55:02.280594Z", + "iopub.status.idle": "2025-12-13T22:55:02.312020Z", + "shell.execute_reply": "2025-12-13T22:55:02.311678Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Timesteps: 672 (7 days at 15-min resolution)\n", + "Heat demand: 122.2 - 254.0 MW\n" + ] + } + ], + "source": [ + "# Load time series data (15-min resolution)\n", + "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", + "data = data['2020-01-01':'2020-01-07 23:45:00'] # One week\n", + "data.index.name = 'time'\n", + "\n", + "timesteps = data.index\n", + "\n", + "# Extract profiles\n", + "electricity_demand = data['P_Netz/MW'].to_numpy()\n", + "heat_demand = data['Q_Netz/MW'].to_numpy()\n", + "electricity_price = data['Strompr.€/MWh'].to_numpy()\n", + "gas_price = data['Gaspr.€/MWh'].to_numpy()\n", + "\n", + "print(f'Timesteps: {len(timesteps)} ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Heat demand: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## Build a Simple FlowSystem\n", + "\n", + "A district heating system with CHP, boiler, and storage:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "6", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-13T23:00:49.288924Z", + "start_time": "2025-12-13T23:00:49.252174Z" + }, + "execution": { + "iopub.execute_input": "2025-12-13T22:55:02.313474Z", + "iopub.status.busy": "2025-12-13T22:55:02.313388Z", + "iopub.status.idle": "2025-12-13T22:55:02.321038Z", + "shell.execute_reply": "2025-12-13T22:55:02.320463Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "System: 672 timesteps\n" + ] + } + ], + "source": [ + "def build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price):\n", + " \"\"\"Build a district heating system.\"\"\"\n", + " fs = fx.FlowSystem(timesteps)\n", + "\n", + " fs.add_elements(\n", + " # Buses\n", + " fx.Bus('Electricity'),\n", + " fx.Bus('Heat'),\n", + " fx.Bus('Gas'),\n", + " fx.Bus('Coal'),\n", + " # Effects\n", + " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", + " # CHP\n", + " fx.linear_converters.CHP(\n", + " 'CHP',\n", + " thermal_efficiency=0.58,\n", + " electrical_efficiency=0.22,\n", + " status_parameters=fx.StatusParameters(effects_per_startup=1000),\n", + " electrical_flow=fx.Flow('P_el', bus='Electricity', size=200),\n", + " thermal_flow=fx.Flow('Q_th', bus='Heat', size=200, relative_minimum=0.3),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Coal', size=350, previous_flow_rate=100), # size ≈ 200/0.58\n", + " ),\n", + " # Gas Boiler\n", + " fx.linear_converters.Boiler(\n", + " 'Boiler',\n", + " thermal_efficiency=0.85,\n", + " status_parameters=fx.StatusParameters(effects_per_startup=500),\n", + " thermal_flow=fx.Flow('Q_th', bus='Heat', size=100, relative_minimum=0.1),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Gas', size=120, previous_flow_rate=20), # size ≈ 100/0.85\n", + " ),\n", + " # Thermal Storage\n", + " fx.Storage(\n", + " 'Storage',\n", + " capacity_in_flow_hours=500,\n", + " initial_charge_state=100,\n", + " eta_charge=0.95,\n", + " eta_discharge=0.95,\n", + " relative_loss_per_hour=0.001,\n", + " charging=fx.Flow('Charge', size=100, bus='Heat'),\n", + " discharging=fx.Flow('Discharge', size=100, bus='Heat'),\n", + " ),\n", + " # Fuel sources\n", + " fx.Source(\n", + " 'GasGrid',\n", + " outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price})],\n", + " ),\n", + " fx.Source(\n", + " 'CoalSupply',\n", + " outputs=[fx.Flow('Q_Coal', bus='Coal', size=1000, effects_per_flow_hour={'costs': 4.6})],\n", + " ),\n", + " # Electricity grid\n", + " fx.Source(\n", + " 'GridBuy',\n", + " outputs=[\n", + " fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour={'costs': electricity_price + 0.5})\n", + " ],\n", + " ),\n", + " fx.Sink(\n", + " 'GridSell',\n", + " inputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour=-(electricity_price - 0.5))],\n", + " ),\n", + " # Demands\n", + " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", + " fx.Sink(\n", + " 'ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=electricity_demand)]\n", + " ),\n", + " )\n", + "\n", + " return fs\n", + "\n", + "\n", + "flow_system = build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price)\n", + "print(f'System: {len(timesteps)} timesteps')" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "## Baseline: Full Optimization\n", + "\n", + "First, solve without clustering for comparison:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "8", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-13T23:01:00.902054Z", + "start_time": "2025-12-13T23:00:49.351114Z" + }, + "execution": { + "iopub.execute_input": "2025-12-13T22:55:02.323142Z", + "iopub.status.busy": "2025-12-13T22:55:02.322942Z", + "iopub.status.idle": "2025-12-13T22:55:08.419177Z", + "shell.execute_reply": "2025-12-13T22:55:08.417782Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[2m2025-12-14 00:00:49.360\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 71/71 [00:00<00:00, 122.37it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 51/51 [00:00<00:00, 472.81it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 13/13 [00:00<00:00, 377.15it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "MIP linopy-problem-vm730vxe has 26909 rows; 24221 cols; 84703 nonzeros; 8736 integer variables (8736 binary)\n", + "Coefficient ranges:\n", + " Matrix [1e-05, 1e+03]\n", + " Cost [1e+00, 1e+00]\n", + " Bound [1e+00, 1e+03]\n", + " RHS [1e-05, 1e+02]\n", + "WARNING: Problem has some excessively small row bounds\n", + "Presolving model\n", + "17472 rows, 13440 cols, 45021 nonzeros 0s\n", + "14789 rows, 10964 cols, 45835 nonzeros 0s\n", + "12214 rows, 9019 cols, 39022 nonzeros 0s\n", + "Presolve reductions: rows 12214(-14695); columns 9019(-15202); nonzeros 39022(-45681) \n", + "\n", + "Solving MIP model with:\n", + " 12214 rows\n", + " 9019 cols (6824 binary, 0 integer, 0 implied int., 2195 continuous, 0 domain fixed)\n", + " 39022 nonzeros\n", + "\n", + "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", + " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", + " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", + " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", + "\n", + " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", + "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", + "\n", + " 0 0 0 0.00% -91086.334692 inf inf 0 0 0 0 1.2s\n", + " 0 0 0 0.00% 510476.049542 inf inf 0 0 0 4614 1.5s\n", + " C 0 0 0 0.00% 510864.416955 602921.407557 15.27% 3667 970 22 6199 2.6s\n", + " L 0 0 0 0.00% 510864.478625 510865.621324 0.00% 4102 1011 22 7020 8.2s\n", + " 1 0 1 100.00% 510864.478782 510865.621324 0.00% 4102 1011 22 8787 8.3s\n", + "\n", + "Solving report\n", + " Model linopy-problem-vm730vxe\n", + " Status Optimal\n", + " Primal bound 510865.621324\n", + " Dual bound 510864.478782\n", + " Gap 0.000224% (tolerance: 1%)\n", + " P-D integral 0.860025388867\n", + " Solution status feasible\n", + " 510865.621324 (objective)\n", + " 0 (bound viol.)\n", + " 4.75782431897e-07 (int. viol.)\n", + " 0 (row viol.)\n", + " Timing 8.27\n", + " Max sub-MIP depth 3\n", + " Nodes 1\n", + " Repair LPs 0\n", + " LP iterations 8787\n", + " 0 (strong br.)\n", + " 2406 (separation)\n", + " 1740 (heuristics)\n", + "Full optimization: 11.54 seconds\n", + "Cost: 510,866 €\n" + ] + } + ], + "source": [ + "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", + "\n", + "start = timeit.default_timer()\n", + "fs_full = flow_system.copy()\n", + "fs_full.optimize(solver)\n", + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.2f} seconds')\n", + "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "## Basic Clustering\n", + "\n", + "Cluster the time series into **4 typical days** (since we have 7 days of data):\n", + "\n", + "```python\n", + "clustered_fs = flow_system.transform.cluster(\n", + " n_clusters=4, # Number of typical periods\n", + " cluster_duration='1D', # Duration per cluster (1 day)\n", + ")\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "10", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-13T23:01:08.615117Z", + "start_time": "2025-12-13T23:01:00.968429Z" + }, + "execution": { + "iopub.execute_input": "2025-12-13T22:55:08.429891Z", + "iopub.status.busy": "2025-12-13T22:55:08.429714Z", + "iopub.status.idle": "2025-12-13T22:55:13.471857Z", + "shell.execute_reply": "2025-12-13T22:55:13.471300Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 99/99 [00:00<00:00, 134.54it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 51/51 [00:00<00:00, 739.63it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 13/13 [00:00<00:00, 407.20it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "MIP linopy-problem-3zos3gx7 has 34889 rows; 24221 cols; 100663 nonzeros; 8736 integer variables (8736 binary)\n", + "Coefficient ranges:\n", + " Matrix [1e-05, 1e+03]\n", + " Cost [1e+00, 1e+00]\n", + " Bound [1e+00, 1e+03]\n", + " RHS [1e-05, 1e+02]\n", + "WARNING: Problem has some excessively small row bounds\n", + "Presolving model\n", + "17852 rows, 7835 cols, 46161 nonzeros 0s\n", + "8771 rows, 6538 cols, 26638 nonzeros 0s\n", + "7501 rows, 5532 cols, 24162 nonzeros 0s\n", + "Presolve reductions: rows 7501(-27388); columns 5532(-18689); nonzeros 24162(-76501) \n", + "\n", + "Solving MIP model with:\n", + " 7501 rows\n", + " 5532 cols (4223 binary, 0 integer, 0 implied int., 1309 continuous, 0 domain fixed)\n", + " 24162 nonzeros\n", + "\n", + "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", + " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", + " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", + " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", + "\n", + " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", + "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", + "\n", + " 0 0 0 0.00% -134515.631486 inf inf 0 0 0 0 0.4s\n", + " 0 0 0 0.00% 510534.808573 inf inf 0 0 0 2776 0.5s\n", + "HighsMipSolverData::transformNewIntegerFeasibleSolution tmpSolver.run();\n", + "WARNING: Solution with objective 626117 has untransformed violations: bound = 9.025e-06; integrality = 0; row = 9.025e-06\n", + "HighsMipSolverData::transformNewIntegerFeasibleSolution tmpSolver.run();\n", + " L 0 0 0 0.00% 511015.64394 511017.103333 0.00% 2825 647 36 3686 4.3s\n", + " 1 0 1 100.00% 511015.64422 511017.103333 0.00% 2825 647 36 4951 4.3s\n", + "\n", + "Solving report\n", + " Model linopy-problem-3zos3gx7\n", + " Status Optimal\n", + " Primal bound 511017.103333\n", + " Dual bound 511015.64422\n", + " Gap 0.000286% (tolerance: 1%)\n", + " P-D integral 4.28507207879e-08\n", + " Solution status feasible\n", + " 511017.103333 (objective)\n", + " 0 (bound viol.)\n", + " 0 (int. viol.)\n", + " 0 (row viol.)\n", + " Timing 4.32\n", + " Max sub-MIP depth 4\n", + " Nodes 1\n", + " Repair LPs 2 (1 feasible; 367 iterations)\n", + " LP iterations 4951\n", + " 0 (strong br.)\n", + " 910 (separation)\n", + " 1247 (heuristics)\n", + "Clustered optimization: 7.64 seconds\n", + "Cost: 511,017 €\n", + "Speedup: 1.5x\n" + ] + } + ], + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# Cluster into 4 typical days\n", + "fs_clustered = flow_system.transform.cluster(\n", + " n_clusters=4,\n", + " cluster_duration='1D',\n", + ")\n", + "\n", + "fs_clustered.optimize(solver)\n", + "time_clustered = timeit.default_timer() - start\n", + "\n", + "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", + "print(f'Cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "print(f'Speedup: {time_full / time_clustered:.1f}x')" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "## Compare Results" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "12", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-13T23:01:08.737018Z", + "start_time": "2025-12-13T23:01:08.669367Z" + }, + "execution": { + "iopub.execute_input": "2025-12-13T22:55:13.479698Z", + "iopub.status.busy": "2025-12-13T22:55:13.479483Z", + "iopub.status.idle": "2025-12-13T22:55:13.514275Z", + "shell.execute_reply": "2025-12-13T22:55:13.513839Z" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 Time [s]Cost [€]Cost Gap [%]Speedup
Full (baseline)11.54510,8660.001.0x
Clustered (4 days)7.64511,0170.031.5x
\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results = {\n", + " 'Full (baseline)': {'Time [s]': time_full, 'Cost [€]': fs_full.solution['costs'].item()},\n", + " 'Clustered (4 days)': {'Time [s]': time_clustered, 'Cost [€]': fs_clustered.solution['costs'].item()},\n", + "}\n", + "\n", + "comparison = pd.DataFrame(results).T\n", + "baseline_cost = comparison.loc['Full (baseline)', 'Cost [€]']\n", + "baseline_time = comparison.loc['Full (baseline)', 'Time [s]']\n", + "comparison['Cost Gap [%]'] = ((comparison['Cost [€]'] - baseline_cost) / abs(baseline_cost) * 100).round(2)\n", + "comparison['Speedup'] = (baseline_time / comparison['Time [s]']).round(1)\n", + "\n", + "comparison.style.format(\n", + " {\n", + " 'Time [s]': '{:.2f}',\n", + " 'Cost [€]': '{:,.0f}',\n", + " 'Cost Gap [%]': '{:.2f}',\n", + " 'Speedup': '{:.1f}x',\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "23", + "metadata": {}, + "source": [ + "## API Reference\n", + "\n", + "### `transform.cluster()` Parameters\n", + "\n", + "| Parameter | Type | Description |\n", + "|-----------|------|-------------|\n", + "| `n_clusters` | `int` | Number of typical periods (e.g., 8 typical days) |\n", + "| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h', or hours as float) |\n", + "| `aggregate_data` | `bool` | If True (default), aggregate time series data |\n", + "| `include_storage` | `bool` | Include storage in clustering constraints (default: True) |\n", + "| `flexibility_percent` | `float` | Allow binary variable deviations (default: 0) |\n", + "| `flexibility_penalty` | `float` | Penalty for deviations (default: 0) |\n", + "| `time_series_for_high_peaks` | `list` | Force inclusion of high-value periods |\n", + "| `time_series_for_low_peaks` | `list` | Force inclusion of low-value periods |\n", + "\n", + "### Common Patterns\n", + "\n", + "```python\n", + "# 8 typical days from a year\n", + "fs.transform.cluster(n_clusters=8, cluster_duration='1D')\n", + "\n", + "# 4 typical weeks\n", + "fs.transform.cluster(n_clusters=4, cluster_duration='1W')\n", + "\n", + "# Force inclusion of peak demand periods\n", + "fs.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " time_series_for_high_peaks=[heat_demand_ts],\n", + ")\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "You learned how to use **`transform.cluster()`** to identify typical periods and reduce computational complexity.\n", + "\n", + "### When to Use Clustering\n", + "\n", + "| Scenario | Recommendation |\n", + "|----------|----------------|\n", + "| Annual optimization | 8-12 typical days |\n", + "| Investment decisions | Use with two-stage optimization |\n", + "| Preserve extremes | Use `time_series_for_high_peaks` |\n", + "\n", + "### Next Steps\n", + "\n", + "- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n", + "- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 8c03f64e48a78e465a285794f1aef4f107c7dbb2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 00:13:39 +0100 Subject: [PATCH 006/191] Update notebook --- docs/notebooks/08c-clustering.ipynb | 4642 +++++++++++++++++++++++++-- 1 file changed, 4397 insertions(+), 245 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 089c70e17..4be640586 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -25,20 +25,31 @@ }, { "cell_type": "code", - "execution_count": 7, "id": "2", "metadata": { - "ExecuteTime": { - "end_time": "2025-12-13T23:00:48.772821Z", - "start_time": "2025-12-13T23:00:48.630296Z" - }, "execution": { - "iopub.execute_input": "2025-12-13T22:54:58.832003Z", - "iopub.status.busy": "2025-12-13T22:54:58.831893Z", - "iopub.status.idle": "2025-12-13T22:55:02.274843Z", - "shell.execute_reply": "2025-12-13T22:55:02.274346Z" + "iopub.execute_input": "2025-12-13T23:10:58.301051Z", + "iopub.status.busy": "2025-12-13T23:10:58.300771Z", + "iopub.status.idle": "2025-12-13T23:11:03.374938Z", + "shell.execute_reply": "2025-12-13T23:11:03.373972Z" + }, + "ExecuteTime": { + "end_time": "2025-12-13T23:13:23.844292Z", + "start_time": "2025-12-13T23:13:18.446645Z" } }, + "source": [ + "import timeit\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ], "outputs": [ { "data": { @@ -46,20 +57,12 @@ "flixopt.config.CONFIG" ] }, - "execution_count": 7, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], - "source": [ - "import timeit\n", - "\n", - "import pandas as pd\n", - "\n", - "import flixopt as fx\n", - "\n", - "fx.CONFIG.notebook()" - ] + "execution_count": 1 }, { "cell_type": "markdown", @@ -73,30 +76,19 @@ }, { "cell_type": "code", - "execution_count": 8, "id": "4", "metadata": { - "ExecuteTime": { - "end_time": "2025-12-13T23:00:49.089270Z", - "start_time": "2025-12-13T23:00:48.910406Z" - }, "execution": { - "iopub.execute_input": "2025-12-13T22:55:02.280725Z", - "iopub.status.busy": "2025-12-13T22:55:02.280594Z", - "iopub.status.idle": "2025-12-13T22:55:02.312020Z", - "shell.execute_reply": "2025-12-13T22:55:02.311678Z" + "iopub.execute_input": "2025-12-13T23:11:03.379309Z", + "iopub.status.busy": "2025-12-13T23:11:03.379129Z", + "iopub.status.idle": "2025-12-13T23:11:03.482067Z", + "shell.execute_reply": "2025-12-13T23:11:03.481664Z" + }, + "ExecuteTime": { + "end_time": "2025-12-13T23:13:24.053236Z", + "start_time": "2025-12-13T23:13:24.001969Z" } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Timesteps: 672 (7 days at 15-min resolution)\n", - "Heat demand: 122.2 - 254.0 MW\n" - ] - } - ], "source": [ "# Load time series data (15-min resolution)\n", "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", @@ -113,43 +105,4340 @@ "\n", "print(f'Timesteps: {len(timesteps)} ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", "print(f'Heat demand: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')" - ] + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Timesteps: 672 (7 days at 15-min resolution)\n", + "Heat demand: 122.2 - 254.0 MW\n" + ] + } + ], + "execution_count": 2 }, { "cell_type": "markdown", - "id": "5", + "id": "iwuyqrpxr", "metadata": {}, "source": [ - "## Build a Simple FlowSystem\n", + "## Visualizing the Clustering Effect\n", "\n", - "A district heating system with CHP, boiler, and storage:" + "Before optimizing, let's see how clustering transforms the time series data. We'll compare:\n", + "- **Original data**: 7 days × 96 timesteps = 672 timesteps\n", + "- **Clustered data**: 4 typical days, repeated to match original structure" ] }, { "cell_type": "code", - "execution_count": 9, - "id": "6", + "id": "guysdaf98es", "metadata": { - "ExecuteTime": { - "end_time": "2025-12-13T23:00:49.288924Z", - "start_time": "2025-12-13T23:00:49.252174Z" - }, "execution": { - "iopub.execute_input": "2025-12-13T22:55:02.313474Z", - "iopub.status.busy": "2025-12-13T22:55:02.313388Z", - "iopub.status.idle": "2025-12-13T22:55:02.321038Z", - "shell.execute_reply": "2025-12-13T22:55:02.320463Z" + "iopub.execute_input": "2025-12-13T23:11:03.483963Z", + "iopub.status.busy": "2025-12-13T23:11:03.483827Z", + "iopub.status.idle": "2025-12-13T23:11:04.502966Z", + "shell.execute_reply": "2025-12-13T23:11:04.501454Z" + }, + "ExecuteTime": { + "end_time": "2025-12-13T23:13:25.676859Z", + "start_time": "2025-12-13T23:13:24.399493Z" } }, + "source": [ + "# Create a simple system to demonstrate clustering\n", + "fs_demo = fx.FlowSystem(timesteps)\n", + "fs_demo.add_elements(\n", + " fx.Bus('Heat'),\n", + " fx.Bus('Gas'),\n", + " fx.Effect('costs', '€', is_standard=True, is_objective=True),\n", + " fx.linear_converters.Boiler(\n", + " 'Boiler',\n", + " thermal_efficiency=0.9,\n", + " thermal_flow=fx.Flow('Q_th', bus='Heat', size=300),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", + " ),\n", + " fx.Source('GasGrid', outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price})]),\n", + " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", + ")\n", + "\n", + "# Cluster with 4 typical days\n", + "fs_clustered_demo = fs_demo.transform.cluster(n_clusters=4, cluster_duration='1D')\n", + "\n", + "# Get the clustering object to access tsam results\n", + "clustering_info = fs_clustered_demo._clustering_info\n", + "clustering = clustering_info['clustering']\n", + "\n", + "# Plot original vs aggregated data\n", + "clustering.plot()" + ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "System: 672 timesteps\n" + "\u001B[2m2025-12-14 00:13:24.408\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" ] + }, + { + "data": { + "text/plain": [ + "PlotResult(data= Size: 27kB\n", + "Dimensions: (time: 672, variable: 2)\n", + "Coordinates:\n", + " * time (time) datetime64[ns] 5kB 2020-01-01 ... 2020-01-07T23:45:00\n", + " * variable (variable) object 16B 'GasGrid(Q_Gas)|costs|per_flow_hour' 'H...\n", + "Data variables:\n", + " original (variable, time) float64 11kB 32.46 32.46 32.46 ... 139.1 138.6\n", + " aggregated (variable, time) float64 11kB 32.46 32.46 32.46 ... 138.7 138.3, figure=Figure({\n", + " 'data': [{'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-07T23:15:00.000000000',\n", + " '2020-01-07T23:30:00.000000000', '2020-01-07T23:45:00.000000000'],\n", + " shape=(672,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-07T23:15:00.000000000',\n", + " '2020-01-07T23:30:00.000000000', '2020-01-07T23:45:00.000000000'],\n", + " shape=(672,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... '6F61FcYUAGgZVDi2RhQBFYObTIUmFA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-07T23:15:00.000000000',\n", + " '2020-01-07T23:30:00.000000000', '2020-01-07T23:45:00.000000000'],\n", + " shape=(672,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-07T23:15:00.000000000',\n", + " '2020-01-07T23:30:00.000000000', '2020-01-07T23:45:00.000000000'],\n", + " shape=(672,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('BoGVQ4vYX0DdJAaBlZteQIPAyqFFKl' ... 'eSplRMYUBWDi2ynVdhQEZvy1odS2FA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'}],\n", + " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", + " 'margin': {'t': 60},\n", + " 'template': '...',\n", + " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", + " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", + " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", + "}))" + ], + "text/html": [ + "
\n", + "
" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 3 + }, + { + "cell_type": "markdown", + "id": "coxd0duq3nb", + "metadata": {}, + "source": [ + "## Comparing Different Clustering Parameters\n", + "\n", + "Let's see how different numbers of clusters affect the data representation:" + ] + }, + { + "cell_type": "code", + "id": "q2xt2juvyo", + "metadata": { + "execution": { + "iopub.execute_input": "2025-12-13T23:11:04.508499Z", + "iopub.status.busy": "2025-12-13T23:11:04.508210Z", + "iopub.status.idle": "2025-12-13T23:11:06.424428Z", + "shell.execute_reply": "2025-12-13T23:11:06.417096Z" + }, + "ExecuteTime": { + "end_time": "2025-12-13T23:13:27.452394Z", + "start_time": "2025-12-13T23:13:26.223467Z" + } + }, + "source": [ + "# Test different numbers of clusters\n", + "cluster_configs = [2, 3, 4, 5]\n", + "clustering_results = {}\n", + "\n", + "for n in cluster_configs:\n", + " fs_test = fs_demo.copy()\n", + " fs_clustered = fs_test.transform.cluster(n_clusters=n, cluster_duration='1D')\n", + " clustering_results[n] = fs_clustered._clustering_info['clustering']\n", + "\n", + "# Compare the aggregated heat demand for each configuration\n", + "fig = make_subplots(\n", + " rows=2,\n", + " cols=2,\n", + " subplot_titles=[f'{n} Typical Days' for n in cluster_configs],\n", + " shared_xaxes=True,\n", + " shared_yaxes=True,\n", + " vertical_spacing=0.12,\n", + " horizontal_spacing=0.08,\n", + ")\n", + "\n", + "for i, (_n, clustering) in enumerate(clustering_results.items()):\n", + " row, col = divmod(i, 2)\n", + " row += 1\n", + " col += 1\n", + "\n", + " # Original data\n", + " original = clustering.original_data['HeatDemand(Q_th)|fixed_relative_profile']\n", + " aggregated = clustering.aggregated_data['HeatDemand(Q_th)|fixed_relative_profile']\n", + "\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=list(range(len(original))),\n", + " y=original.values,\n", + " name='Original',\n", + " line=dict(color='lightgray'),\n", + " showlegend=(i == 0),\n", + " ),\n", + " row=row,\n", + " col=col,\n", + " )\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=list(range(len(aggregated))),\n", + " y=aggregated.values,\n", + " name='Aggregated',\n", + " line=dict(color='blue', width=2),\n", + " showlegend=(i == 0),\n", + " ),\n", + " row=row,\n", + " col=col,\n", + " )\n", + "\n", + "fig.update_layout(\n", + " title='Heat Demand: Original vs Clustered Data',\n", + " height=500,\n", + " legend=dict(orientation='h', yanchor='bottom', y=1.02),\n", + ")\n", + "fig.update_xaxes(title_text='Timestep', row=2)\n", + "fig.update_yaxes(title_text='MW', col=1)\n", + "fig.show()" + ], + "outputs": [ + { + "data": { + "text/html": [ + " \n", + " \n", + " " + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "data": { + "text/html": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + } + ], + "execution_count": 4 + }, + { + "cell_type": "code", + "id": "3zsi1g8bokg", + "metadata": { + "execution": { + "iopub.execute_input": "2025-12-13T23:11:06.484947Z", + "iopub.status.busy": "2025-12-13T23:11:06.484493Z", + "iopub.status.idle": "2025-12-13T23:11:06.548165Z", + "shell.execute_reply": "2025-12-13T23:11:06.546796Z" + }, + "ExecuteTime": { + "end_time": "2025-12-13T23:13:28.869432Z", + "start_time": "2025-12-13T23:13:28.785666Z" + } + }, + "source": [ + "# Calculate error metrics for each configuration\n", + "metrics = []\n", + "for n, clustering in clustering_results.items():\n", + " original = clustering.original_data['HeatDemand(Q_th)|fixed_relative_profile'].values\n", + " aggregated = clustering.aggregated_data['HeatDemand(Q_th)|fixed_relative_profile'].values\n", + "\n", + " # Calculate metrics\n", + " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", + " mae = np.mean(np.abs(original - aggregated))\n", + " max_error = np.max(np.abs(original - aggregated))\n", + " correlation = np.corrcoef(original, aggregated)[0, 1]\n", + "\n", + " metrics.append(\n", + " {\n", + " 'Clusters': n,\n", + " 'RMSE [MW]': rmse,\n", + " 'MAE [MW]': mae,\n", + " 'Max Error [MW]': max_error,\n", + " 'Correlation': correlation,\n", + " }\n", + " )\n", + "\n", + "metrics_df = pd.DataFrame(metrics).set_index('Clusters')\n", + "metrics_df.style.format(\n", + " {\n", + " 'RMSE [MW]': '{:.2f}',\n", + " 'MAE [MW]': '{:.2f}',\n", + " 'Max Error [MW]': '{:.2f}',\n", + " 'Correlation': '{:.4f}',\n", + " }\n", + ")" + ], + "outputs": [ + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 RMSE [MW]MAE [MW]Max Error [MW]Correlation
Clusters    
21.871.367.890.9984
31.290.767.890.9993
40.650.373.140.9998
50.150.100.381.0000
\n" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" } ], + "execution_count": 5 + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## Build a Simple FlowSystem\n", + "\n", + "A district heating system with CHP, boiler, and storage:" + ] + }, + { + "cell_type": "code", + "id": "6", + "metadata": { + "execution": { + "iopub.execute_input": "2025-12-13T23:11:06.554382Z", + "iopub.status.busy": "2025-12-13T23:11:06.554075Z", + "iopub.status.idle": "2025-12-13T23:11:06.569809Z", + "shell.execute_reply": "2025-12-13T23:11:06.569118Z" + }, + "ExecuteTime": { + "end_time": "2025-12-13T23:13:30.027254Z", + "start_time": "2025-12-13T23:13:29.977152Z" + } + }, "source": [ "def build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price):\n", " \"\"\"Build a district heating system.\"\"\"\n", @@ -224,7 +4513,17 @@ "\n", "flow_system = build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price)\n", "print(f'System: {len(timesteps)} timesteps')" - ] + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "System: 672 timesteps\n" + ] + } + ], + "execution_count": 6 }, { "cell_type": "markdown", @@ -238,35 +4537,47 @@ }, { "cell_type": "code", - "execution_count": 10, "id": "8", "metadata": { - "ExecuteTime": { - "end_time": "2025-12-13T23:01:00.902054Z", - "start_time": "2025-12-13T23:00:49.351114Z" - }, "execution": { - "iopub.execute_input": "2025-12-13T22:55:02.323142Z", - "iopub.status.busy": "2025-12-13T22:55:02.322942Z", - "iopub.status.idle": "2025-12-13T22:55:08.419177Z", - "shell.execute_reply": "2025-12-13T22:55:08.417782Z" + "iopub.execute_input": "2025-12-13T23:11:06.574029Z", + "iopub.status.busy": "2025-12-13T23:11:06.573839Z", + "iopub.status.idle": "2025-12-13T23:11:16.402730Z", + "shell.execute_reply": "2025-12-13T23:11:16.392580Z" + }, + "jupyter": { + "is_executing": true + }, + "ExecuteTime": { + "start_time": "2025-12-13T23:13:30.491960Z" } }, + "source": [ + "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", + "\n", + "start = timeit.default_timer()\n", + "fs_full = flow_system.copy()\n", + "fs_full.optimize(solver)\n", + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.2f} seconds')\n", + "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')" + ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m2025-12-14 00:00:49.360\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" + "\u001B[2m2025-12-14 00:13:30.495\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 71/71 [00:00<00:00, 122.37it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 51/51 [00:00<00:00, 472.81it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 13/13 [00:00<00:00, 377.15it/s]\n" + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 71/71 [00:00<00:00, 101.01it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 51/51 [00:00<00:00, 243.81it/s]\n", + "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 13/13 [00:00<00:00, 514.63it/s]\n" ] }, { @@ -274,7 +4585,7 @@ "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-vm730vxe has 26909 rows; 24221 cols; 84703 nonzeros; 8736 integer variables (8736 binary)\n", + "MIP linopy-problem-ag1luz0e has 26909 rows; 24221 cols; 84703 nonzeros; 8736 integer variables (8736 binary)\n", "Coefficient ranges:\n", " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", @@ -300,48 +4611,13 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% -91086.334692 inf inf 0 0 0 0 1.2s\n", - " 0 0 0 0.00% 510476.049542 inf inf 0 0 0 4614 1.5s\n", - " C 0 0 0 0.00% 510864.416955 602921.407557 15.27% 3667 970 22 6199 2.6s\n", - " L 0 0 0 0.00% 510864.478625 510865.621324 0.00% 4102 1011 22 7020 8.2s\n", - " 1 0 1 100.00% 510864.478782 510865.621324 0.00% 4102 1011 22 8787 8.3s\n", - "\n", - "Solving report\n", - " Model linopy-problem-vm730vxe\n", - " Status Optimal\n", - " Primal bound 510865.621324\n", - " Dual bound 510864.478782\n", - " Gap 0.000224% (tolerance: 1%)\n", - " P-D integral 0.860025388867\n", - " Solution status feasible\n", - " 510865.621324 (objective)\n", - " 0 (bound viol.)\n", - " 4.75782431897e-07 (int. viol.)\n", - " 0 (row viol.)\n", - " Timing 8.27\n", - " Max sub-MIP depth 3\n", - " Nodes 1\n", - " Repair LPs 0\n", - " LP iterations 8787\n", - " 0 (strong br.)\n", - " 2406 (separation)\n", - " 1740 (heuristics)\n", - "Full optimization: 11.54 seconds\n", - "Cost: 510,866 €\n" + " 0 0 0 0.00% -91086.334692 inf inf 0 0 0 0 0.5s\n", + " 0 0 0 0.00% 510476.049542 inf inf 0 0 0 4614 0.6s\n", + " C 0 0 0 0.00% 510864.416955 602921.407557 15.27% 3667 970 22 6199 1.6s\n" ] } ], - "source": [ - "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", - "\n", - "start = timeit.default_timer()\n", - "fs_full = flow_system.copy()\n", - "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start\n", - "\n", - "print(f'Full optimization: {time_full:.2f} seconds')\n", - "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')" - ] + "execution_count": null }, { "cell_type": "markdown", @@ -362,95 +4638,15 @@ }, { "cell_type": "code", - "execution_count": 11, "id": "10", "metadata": { - "ExecuteTime": { - "end_time": "2025-12-13T23:01:08.615117Z", - "start_time": "2025-12-13T23:01:00.968429Z" - }, "execution": { - "iopub.execute_input": "2025-12-13T22:55:08.429891Z", - "iopub.status.busy": "2025-12-13T22:55:08.429714Z", - "iopub.status.idle": "2025-12-13T22:55:13.471857Z", - "shell.execute_reply": "2025-12-13T22:55:13.471300Z" + "iopub.execute_input": "2025-12-13T23:11:16.424863Z", + "iopub.status.busy": "2025-12-13T23:11:16.421861Z", + "iopub.status.idle": "2025-12-13T23:11:23.254537Z", + "shell.execute_reply": "2025-12-13T23:11:23.252897Z" } }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 99/99 [00:00<00:00, 134.54it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 51/51 [00:00<00:00, 739.63it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 13/13 [00:00<00:00, 407.20it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-3zos3gx7 has 34889 rows; 24221 cols; 100663 nonzeros; 8736 integer variables (8736 binary)\n", - "Coefficient ranges:\n", - " Matrix [1e-05, 1e+03]\n", - " Cost [1e+00, 1e+00]\n", - " Bound [1e+00, 1e+03]\n", - " RHS [1e-05, 1e+02]\n", - "WARNING: Problem has some excessively small row bounds\n", - "Presolving model\n", - "17852 rows, 7835 cols, 46161 nonzeros 0s\n", - "8771 rows, 6538 cols, 26638 nonzeros 0s\n", - "7501 rows, 5532 cols, 24162 nonzeros 0s\n", - "Presolve reductions: rows 7501(-27388); columns 5532(-18689); nonzeros 24162(-76501) \n", - "\n", - "Solving MIP model with:\n", - " 7501 rows\n", - " 5532 cols (4223 binary, 0 integer, 0 implied int., 1309 continuous, 0 domain fixed)\n", - " 24162 nonzeros\n", - "\n", - "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", - " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", - " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", - " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", - "\n", - " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", - "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", - "\n", - " 0 0 0 0.00% -134515.631486 inf inf 0 0 0 0 0.4s\n", - " 0 0 0 0.00% 510534.808573 inf inf 0 0 0 2776 0.5s\n", - "HighsMipSolverData::transformNewIntegerFeasibleSolution tmpSolver.run();\n", - "WARNING: Solution with objective 626117 has untransformed violations: bound = 9.025e-06; integrality = 0; row = 9.025e-06\n", - "HighsMipSolverData::transformNewIntegerFeasibleSolution tmpSolver.run();\n", - " L 0 0 0 0.00% 511015.64394 511017.103333 0.00% 2825 647 36 3686 4.3s\n", - " 1 0 1 100.00% 511015.64422 511017.103333 0.00% 2825 647 36 4951 4.3s\n", - "\n", - "Solving report\n", - " Model linopy-problem-3zos3gx7\n", - " Status Optimal\n", - " Primal bound 511017.103333\n", - " Dual bound 511015.64422\n", - " Gap 0.000286% (tolerance: 1%)\n", - " P-D integral 4.28507207879e-08\n", - " Solution status feasible\n", - " 511017.103333 (objective)\n", - " 0 (bound viol.)\n", - " 0 (int. viol.)\n", - " 0 (row viol.)\n", - " Timing 4.32\n", - " Max sub-MIP depth 4\n", - " Nodes 1\n", - " Repair LPs 2 (1 feasible; 367 iterations)\n", - " LP iterations 4951\n", - " 0 (strong br.)\n", - " 910 (separation)\n", - " 1247 (heuristics)\n", - "Clustered optimization: 7.64 seconds\n", - "Cost: 511,017 €\n", - "Speedup: 1.5x\n" - ] - } - ], "source": [ "start = timeit.default_timer()\n", "\n", @@ -466,7 +4662,9 @@ "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", "print(f'Cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", "print(f'Speedup: {time_full / time_clustered:.1f}x')" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -478,63 +4676,15 @@ }, { "cell_type": "code", - "execution_count": 12, "id": "12", "metadata": { - "ExecuteTime": { - "end_time": "2025-12-13T23:01:08.737018Z", - "start_time": "2025-12-13T23:01:08.669367Z" - }, "execution": { - "iopub.execute_input": "2025-12-13T22:55:13.479698Z", - "iopub.status.busy": "2025-12-13T22:55:13.479483Z", - "iopub.status.idle": "2025-12-13T22:55:13.514275Z", - "shell.execute_reply": "2025-12-13T22:55:13.513839Z" + "iopub.execute_input": "2025-12-13T23:11:23.259936Z", + "iopub.status.busy": "2025-12-13T23:11:23.259776Z", + "iopub.status.idle": "2025-12-13T23:11:23.267446Z", + "shell.execute_reply": "2025-12-13T23:11:23.267185Z" } }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 Time [s]Cost [€]Cost Gap [%]Speedup
Full (baseline)11.54510,8660.001.0x
Clustered (4 days)7.64511,0170.031.5x
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], "source": [ "results = {\n", " 'Full (baseline)': {'Time [s]': time_full, 'Cost [€]': fs_full.solution['costs'].item()},\n", @@ -555,7 +4705,9 @@ " 'Speedup': '{:.1f}x',\n", " }\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", From 9fdc53d1f5e8ca7cff29fb51e28953b502b69fba Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 02:02:26 +0100 Subject: [PATCH 007/191] Fix multi period and multi scenario clsutering --- flixopt/transform_accessor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index add8de91d..8ff2155f1 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -289,7 +289,7 @@ def _cluster_multi_dimensional( selector['scenario'] = scenario_label if selector: - ds_slice = ds.sel(**selector) + ds_slice = ds.sel(**selector, drop=True) else: ds_slice = ds From a9a442d9f40e5d561624ec0273408522e073c807 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 02:05:02 +0100 Subject: [PATCH 008/191] Improve --- CHANGELOG.md | 26 +- docs/notebooks/08c-clustering.ipynb | 882 ++++++++++++++++++---------- 2 files changed, 591 insertions(+), 317 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f0dbc73d..cafee9031 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,46 +53,28 @@ Until here --> ## [5.1.0] - Upcoming -**Summary**: This release significantly improves the time series clustering (tsam) integration with a simplified API, multi-period/scenario support, and inner-period segmentation. +**Summary**: This release improves the time series clustering (tsam) integration with a simplified keyword-based API. ### ✨ Added **Improved Clustering API**: The new `transform.cluster()` method provides a clean, keyword-based interface: ```python -# Simple: 8 typical days +# Cluster into 8 typical days clustered_fs = flow_system.transform.cluster( n_clusters=8, cluster_duration='1D', ) clustered_fs.optimize(solver) -# With inner-period segmentation (8 days × 4 segments = 32 timesteps) +# With peak preservation clustered_fs = flow_system.transform.cluster( n_clusters=8, cluster_duration='1D', - n_segments=4, # Reduce 24 hours to 4 segments per day + time_series_for_high_peaks=[heat_demand_ts], ) - -# Segmentation only (no clustering, just reduce resolution) -clustered_fs = flow_system.transform.cluster( - n_clusters=None, - cluster_duration='1D', - n_segments=4, -) -``` - -**Multi-Period Clustering**: FlowSystems with multiple periods (e.g., multi-year investment studies) now support clustering, with each period clustered independently: - -```python -multi_year_fs = fx.FlowSystem(timesteps, periods=pd.Index([2025, 2026, 2027])) -clustered_fs = multi_year_fs.transform.cluster(n_clusters=8, cluster_duration='1D') ``` -**Multi-Scenario Clustering**: FlowSystems with scenarios now support clustering, with each scenario clustered independently. - -**Inner-Period Segmentation**: New `n_segments` parameter enables tsam's inner-period clustering to reduce timesteps within each typical period. This provides additional computational reduction beyond regular clustering. - ### 💥 Breaking Changes **ClusteringParameters API Changed**: The `ClusteringParameters` class has new parameter names: diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 4be640586..76c97049a 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -4,16 +4,7 @@ "cell_type": "markdown", "id": "0", "metadata": {}, - "source": [ - "# Clustering with tsam\n", - "\n", - "Speed up large problems by identifying typical periods using time series clustering.\n", - "\n", - "This notebook demonstrates how to use **`transform.cluster()`** to reduce a year of time series data to representative days (typical periods).\n", - "\n", - "!!! note \"Requirements\"\n", - " This notebook requires the `tsam` package: `pip install tsam`" - ] + "source": "# Clustering with tsam\n\nSpeed up large problems by identifying typical periods using time series clustering.\n\nThis notebook demonstrates:\n\n- **Basic clustering**: Reduce a week/year to representative days\n- **Compare clustering parameters**: See how data changes with different cluster counts\n- **Multi-period clustering**: Cluster multi-year investment studies\n- **Multi-scenario clustering**: Cluster scenario-based analyses\n\n!!! note \"Requirements\"\n This notebook requires the `tsam` package: `pip install tsam`" }, { "cell_type": "markdown", @@ -25,31 +16,20 @@ }, { "cell_type": "code", + "execution_count": 1, "id": "2", "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:14.969351Z", + "start_time": "2025-12-14T01:03:09.926940Z" + }, "execution": { "iopub.execute_input": "2025-12-13T23:10:58.301051Z", "iopub.status.busy": "2025-12-13T23:10:58.300771Z", "iopub.status.idle": "2025-12-13T23:11:03.374938Z", "shell.execute_reply": "2025-12-13T23:11:03.373972Z" - }, - "ExecuteTime": { - "end_time": "2025-12-13T23:13:23.844292Z", - "start_time": "2025-12-13T23:13:18.446645Z" } }, - "source": [ - "import timeit\n", - "\n", - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.graph_objects as go\n", - "from plotly.subplots import make_subplots\n", - "\n", - "import flixopt as fx\n", - "\n", - "fx.CONFIG.notebook()" - ], "outputs": [ { "data": { @@ -62,7 +42,18 @@ "output_type": "execute_result" } ], - "execution_count": 1 + "source": [ + "import timeit\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ] }, { "cell_type": "markdown", @@ -76,19 +67,30 @@ }, { "cell_type": "code", + "execution_count": 2, "id": "4", "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:15.123760Z", + "start_time": "2025-12-14T01:03:15.034204Z" + }, "execution": { "iopub.execute_input": "2025-12-13T23:11:03.379309Z", "iopub.status.busy": "2025-12-13T23:11:03.379129Z", "iopub.status.idle": "2025-12-13T23:11:03.482067Z", "shell.execute_reply": "2025-12-13T23:11:03.481664Z" - }, - "ExecuteTime": { - "end_time": "2025-12-13T23:13:24.053236Z", - "start_time": "2025-12-13T23:13:24.001969Z" } }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Timesteps: 672 (7 days at 15-min resolution)\n", + "Heat demand: 122.2 - 254.0 MW\n" + ] + } + ], "source": [ "# Load time series data (15-min resolution)\n", "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", @@ -105,18 +107,7 @@ "\n", "print(f'Timesteps: {len(timesteps)} ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", "print(f'Heat demand: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Timesteps: 672 (7 days at 15-min resolution)\n", - "Heat demand: 122.2 - 254.0 MW\n" - ] - } - ], - "execution_count": 2 + ] }, { "cell_type": "markdown", @@ -132,56 +123,34 @@ }, { "cell_type": "code", + "execution_count": 3, "id": "guysdaf98es", "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:16.301377Z", + "start_time": "2025-12-14T01:03:15.131379Z" + }, "execution": { "iopub.execute_input": "2025-12-13T23:11:03.483963Z", "iopub.status.busy": "2025-12-13T23:11:03.483827Z", "iopub.status.idle": "2025-12-13T23:11:04.502966Z", "shell.execute_reply": "2025-12-13T23:11:04.501454Z" - }, - "ExecuteTime": { - "end_time": "2025-12-13T23:13:25.676859Z", - "start_time": "2025-12-13T23:13:24.399493Z" } }, - "source": [ - "# Create a simple system to demonstrate clustering\n", - "fs_demo = fx.FlowSystem(timesteps)\n", - "fs_demo.add_elements(\n", - " fx.Bus('Heat'),\n", - " fx.Bus('Gas'),\n", - " fx.Effect('costs', '€', is_standard=True, is_objective=True),\n", - " fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.9,\n", - " thermal_flow=fx.Flow('Q_th', bus='Heat', size=300),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", - " ),\n", - " fx.Source('GasGrid', outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price})]),\n", - " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", - ")\n", - "\n", - "# Cluster with 4 typical days\n", - "fs_clustered_demo = fs_demo.transform.cluster(n_clusters=4, cluster_duration='1D')\n", - "\n", - "# Get the clustering object to access tsam results\n", - "clustering_info = fs_clustered_demo._clustering_info\n", - "clustering = clustering_info['clustering']\n", - "\n", - "# Plot original vs aggregated data\n", - "clustering.plot()" - ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001B[2m2025-12-14 00:13:24.408\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" + "\u001b[2m2025-12-14 02:03:15.139\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" ] }, { "data": { + "text/html": [ + "
\n", + "
" + ], "text/plain": [ "PlotResult(data= Size: 27kB\n", "Dimensions: (time: 672, variable: 2)\n", @@ -262,10 +231,6 @@ " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", "}))" - ], - "text/html": [ - "
\n", - "
" ] }, "execution_count": 3, @@ -273,7 +238,33 @@ "output_type": "execute_result" } ], - "execution_count": 3 + "source": [ + "# Create a simple system to demonstrate clustering\n", + "fs_demo = fx.FlowSystem(timesteps)\n", + "fs_demo.add_elements(\n", + " fx.Bus('Heat'),\n", + " fx.Bus('Gas'),\n", + " fx.Effect('costs', '€', is_standard=True, is_objective=True),\n", + " fx.linear_converters.Boiler(\n", + " 'Boiler',\n", + " thermal_efficiency=0.9,\n", + " thermal_flow=fx.Flow('Q_th', bus='Heat', size=300),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", + " ),\n", + " fx.Source('GasGrid', outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price})]),\n", + " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", + ")\n", + "\n", + "# Cluster with 4 typical days\n", + "fs_clustered_demo = fs_demo.transform.cluster(n_clusters=4, cluster_duration='1D')\n", + "\n", + "# Get the clustering object to access tsam results\n", + "clustering_info = fs_clustered_demo._clustering_info\n", + "clustering = clustering_info['clustering']\n", + "\n", + "# Plot original vs aggregated data\n", + "clustering.plot()" + ] }, { "cell_type": "markdown", @@ -287,81 +278,20 @@ }, { "cell_type": "code", + "execution_count": 4, "id": "q2xt2juvyo", "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:18.069583Z", + "start_time": "2025-12-14T01:03:16.791574Z" + }, "execution": { "iopub.execute_input": "2025-12-13T23:11:04.508499Z", "iopub.status.busy": "2025-12-13T23:11:04.508210Z", "iopub.status.idle": "2025-12-13T23:11:06.424428Z", "shell.execute_reply": "2025-12-13T23:11:06.417096Z" - }, - "ExecuteTime": { - "end_time": "2025-12-13T23:13:27.452394Z", - "start_time": "2025-12-13T23:13:26.223467Z" } }, - "source": [ - "# Test different numbers of clusters\n", - "cluster_configs = [2, 3, 4, 5]\n", - "clustering_results = {}\n", - "\n", - "for n in cluster_configs:\n", - " fs_test = fs_demo.copy()\n", - " fs_clustered = fs_test.transform.cluster(n_clusters=n, cluster_duration='1D')\n", - " clustering_results[n] = fs_clustered._clustering_info['clustering']\n", - "\n", - "# Compare the aggregated heat demand for each configuration\n", - "fig = make_subplots(\n", - " rows=2,\n", - " cols=2,\n", - " subplot_titles=[f'{n} Typical Days' for n in cluster_configs],\n", - " shared_xaxes=True,\n", - " shared_yaxes=True,\n", - " vertical_spacing=0.12,\n", - " horizontal_spacing=0.08,\n", - ")\n", - "\n", - "for i, (_n, clustering) in enumerate(clustering_results.items()):\n", - " row, col = divmod(i, 2)\n", - " row += 1\n", - " col += 1\n", - "\n", - " # Original data\n", - " original = clustering.original_data['HeatDemand(Q_th)|fixed_relative_profile']\n", - " aggregated = clustering.aggregated_data['HeatDemand(Q_th)|fixed_relative_profile']\n", - "\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=list(range(len(original))),\n", - " y=original.values,\n", - " name='Original',\n", - " line=dict(color='lightgray'),\n", - " showlegend=(i == 0),\n", - " ),\n", - " row=row,\n", - " col=col,\n", - " )\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=list(range(len(aggregated))),\n", - " y=aggregated.values,\n", - " name='Aggregated',\n", - " line=dict(color='blue', width=2),\n", - " showlegend=(i == 0),\n", - " ),\n", - " row=row,\n", - " col=col,\n", - " )\n", - "\n", - "fig.update_layout(\n", - " title='Heat Demand: Original vs Clustered Data',\n", - " height=500,\n", - " legend=dict(orientation='h', yanchor='bottom', y=1.02),\n", - ")\n", - "fig.update_xaxes(title_text='Timestep', row=2)\n", - "fig.update_yaxes(title_text='MW', col=1)\n", - "fig.show()" - ], "outputs": [ { "data": { @@ -466,7 +396,7 @@ "\n", "`).concat($R(e),`\n", "`));var s=new U_({actual:e,expected:t,message:r,operator:i,stackStartFn:n});throw s.generatedMessage=o,s}}Ef.match=function e(t,r,n){T4e(t,r,n,e,\"match\")};Ef.doesNotMatch=function e(t,r,n){T4e(t,r,n,e,\"doesNotMatch\")};function A4e(){for(var e=arguments.length,t=new Array(e),r=0;r{var xE=1e3,bE=xE*60,wE=bE*60,TE=wE*24,FEt=TE*365.25;M4e.exports=function(e,t){t=t||{};var r=typeof e;if(r===\"string\"&&e.length>0)return zEt(e);if(r===\"number\"&&isNaN(e)===!1)return t.long?qEt(e):OEt(e);throw new Error(\"val is not a non-empty string or a valid number. val=\"+JSON.stringify(e))};function zEt(e){if(e=String(e),!(e.length>100)){var t=/^((?:\\d+)?\\.?\\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|years?|yrs?|y)?$/i.exec(e);if(t){var r=parseFloat(t[1]),n=(t[2]||\"ms\").toLowerCase();switch(n){case\"years\":case\"year\":case\"yrs\":case\"yr\":case\"y\":return r*FEt;case\"days\":case\"day\":case\"d\":return r*TE;case\"hours\":case\"hour\":case\"hrs\":case\"hr\":case\"h\":return r*wE;case\"minutes\":case\"minute\":case\"mins\":case\"min\":case\"m\":return r*bE;case\"seconds\":case\"second\":case\"secs\":case\"sec\":case\"s\":return r*xE;case\"milliseconds\":case\"millisecond\":case\"msecs\":case\"msec\":case\"ms\":return r;default:return}}}}function OEt(e){return e>=TE?Math.round(e/TE)+\"d\":e>=wE?Math.round(e/wE)+\"h\":e>=bE?Math.round(e/bE)+\"m\":e>=xE?Math.round(e/xE)+\"s\":e+\"ms\"}function qEt(e){return iD(e,TE,\"day\")||iD(e,wE,\"hour\")||iD(e,bE,\"minute\")||iD(e,xE,\"second\")||e+\" ms\"}function iD(e,t,r){if(!(e{Lc=k4e.exports=nW.debug=nW.default=nW;Lc.coerce=GEt;Lc.disable=UEt;Lc.enable=NEt;Lc.enabled=VEt;Lc.humanize=E4e();Lc.names=[];Lc.skips=[];Lc.formatters={};var iW;function BEt(e){var t=0,r;for(r in e)t=(t<<5)-t+e.charCodeAt(r),t|=0;return Lc.colors[Math.abs(t)%Lc.colors.length]}function nW(e){function t(){if(t.enabled){var r=t,n=+new Date,i=n-(iW||n);r.diff=i,r.prev=iW,r.curr=n,iW=n;for(var a=new Array(arguments.length),o=0;o{lp=P4e.exports=C4e();lp.log=WEt;lp.formatArgs=jEt;lp.save=XEt;lp.load=L4e;lp.useColors=HEt;lp.storage=typeof chrome!=\"undefined\"&&typeof chrome.storage!=\"undefined\"?chrome.storage.local:ZEt();lp.colors=[\"lightseagreen\",\"forestgreen\",\"goldenrod\",\"dodgerblue\",\"darkorchid\",\"crimson\"];function HEt(){return typeof window!=\"undefined\"&&window.process&&window.process.type===\"renderer\"?!0:typeof document!=\"undefined\"&&document.documentElement&&document.documentElement.style&&document.documentElement.style.WebkitAppearance||typeof window!=\"undefined\"&&window.console&&(window.console.firebug||window.console.exception&&window.console.table)||typeof navigator!=\"undefined\"&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/firefox\\/(\\d+)/)&&parseInt(RegExp.$1,10)>=31||typeof navigator!=\"undefined\"&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/applewebkit\\/(\\d+)/)}lp.formatters.j=function(e){try{return JSON.stringify(e)}catch(t){return\"[UnexpectedJSONParseError]: \"+t.message}};function jEt(e){var t=this.useColors;if(e[0]=(t?\"%c\":\"\")+this.namespace+(t?\" %c\":\" \")+e[0]+(t?\"%c \":\" \")+\"+\"+lp.humanize(this.diff),!!t){var r=\"color: \"+this.color;e.splice(1,0,r,\"color: inherit\");var n=0,i=0;e[0].replace(/%[a-zA-Z%]/g,function(a){a!==\"%%\"&&(n++,a===\"%c\"&&(i=n))}),e.splice(i,0,r)}}function WEt(){return typeof console==\"object\"&&console.log&&Function.prototype.apply.call(console.log,console,arguments)}function XEt(e){try{e==null?lp.storage.removeItem(\"debug\"):lp.storage.debug=e}catch(t){}}function L4e(){var e;try{e=lp.storage.debug}catch(t){}return!e&&typeof process!=\"undefined\"&&\"env\"in process&&(e=process.env.DEBUG),e}lp.enable(L4e());function ZEt(){try{return window.localStorage}catch(e){}}});var N4e=ye((_dr,B4e)=>{var _A=sE(),V_=I4e()(\"stream-parser\");B4e.exports=KEt;var D4e=-1,nD=0,YEt=1,F4e=2;function KEt(e){var t=e&&typeof e._transform==\"function\",r=e&&typeof e._write==\"function\";if(!t&&!r)throw new Error(\"must pass a Writable or Transform stream in\");V_(\"extending Parser into stream\"),e._bytes=JEt,e._skipBytes=$Et,t&&(e._passthrough=QEt),t?e._transform=tkt:e._write=ekt}function AE(e){V_(\"initializing parser stream\"),e._parserBytesLeft=0,e._parserBuffers=[],e._parserBuffered=0,e._parserState=D4e,e._parserCallback=null,typeof e.push==\"function\"&&(e._parserOutput=e.push.bind(e)),e._parserInit=!0}function JEt(e,t){_A(!this._parserCallback,'there is already a \"callback\" set!'),_A(isFinite(e)&&e>0,'can only buffer a finite number of bytes > 0, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"buffering %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=nD}function $Et(e,t){_A(!this._parserCallback,'there is already a \"callback\" set!'),_A(e>0,'can only skip > 0 bytes, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"skipping %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=YEt}function QEt(e,t){_A(!this._parserCallback,'There is already a \"callback\" set!'),_A(e>0,'can only pass through > 0 bytes, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"passing through %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=F4e}function ekt(e,t,r){this._parserInit||AE(this),V_(\"write(%o bytes)\",e.length),typeof t==\"function\"&&(r=t),O4e(this,e,null,r)}function tkt(e,t,r){this._parserInit||AE(this),V_(\"transform(%o bytes)\",e.length),typeof t!=\"function\"&&(t=this._parserOutput),O4e(this,e,t,r)}function z4e(e,t,r,n){return e._parserBytesLeft<=0?n(new Error(\"got data but not currently parsing anything\")):t.length<=e._parserBytesLeft?function(){return R4e(e,t,r,n)}:function(){var i=t.slice(0,e._parserBytesLeft);return R4e(e,i,r,function(a){if(a)return n(a);if(t.length>i.length)return function(){return z4e(e,t.slice(i.length),r,n)}})}}function R4e(e,t,r,n){if(e._parserBytesLeft-=t.length,V_(\"%o bytes left for stream piece\",e._parserBytesLeft),e._parserState===nD?(e._parserBuffers.push(t),e._parserBuffered+=t.length):e._parserState===F4e&&r(t),e._parserBytesLeft===0){var i=e._parserCallback;if(i&&e._parserState===nD&&e._parserBuffers.length>1&&(t=Buffer.concat(e._parserBuffers,e._parserBuffered)),e._parserState!==nD&&(t=null),e._parserCallback=null,e._parserBuffered=0,e._parserState=D4e,e._parserBuffers.splice(0),i){var a=[];t&&a.push(t),r&&a.push(r);var o=i.length>a.length;o&&a.push(q4e(n));var s=i.apply(e,a);if(!o||n===s)return n}}else return n}var O4e=q4e(z4e);function q4e(e){return function(){for(var t=e.apply(this,arguments);typeof t==\"function\";)t=t();return t}}});var rc=ye(Hy=>{\"use strict\";var U4e=RSe().Transform,rkt=N4e();function SE(){U4e.call(this,{readableObjectMode:!0})}SE.prototype=Object.create(U4e.prototype);SE.prototype.constructor=SE;rkt(SE.prototype);Hy.ParserStream=SE;Hy.sliceEq=function(e,t,r){for(var n=t,i=0;i{\"use strict\";var xA=rc().readUInt16BE,oW=rc().readUInt32BE;function ME(e,t){if(e.length<4+t)return null;var r=oW(e,t);return e.length>4&15,n=e[4]&15,i=e[5]>>4&15,a=xA(e,6),o=8,s=0;sa.width||i.width===a.width&&i.height>a.height?i:a}),r=e.reduce(function(i,a){return i.height>a.height||i.height===a.height&&i.width>a.width?i:a}),n;return t.width>r.height||t.width===r.height&&t.height>r.width?n=t:n=r,n}oD.exports.readSizeFromMeta=function(e){var t={sizes:[],transforms:[],item_inf:{},item_loc:{}};if(skt(e,t),!!t.sizes.length){var r=lkt(t.sizes),n=1;t.transforms.forEach(function(a){var o={1:6,2:5,3:8,4:7,5:4,6:3,7:2,8:1},s={1:4,2:3,3:2,4:1,5:6,6:5,7:8,8:7};if(a.type===\"imir\"&&(a.value===0?n=s[n]:(n=s[n],n=o[n],n=o[n])),a.type===\"irot\")for(var l=0;l{\"use strict\";function sD(e,t){var r=new Error(e);return r.code=t,r}function ukt(e){try{return decodeURIComponent(escape(e))}catch(t){return e}}function jy(e,t,r){this.input=e.subarray(t,r),this.start=t;var n=String.fromCharCode.apply(null,this.input.subarray(0,4));if(n!==\"II*\\0\"&&n!==\"MM\\0*\")throw sD(\"invalid TIFF signature\",\"EBADDATA\");this.big_endian=n[0]===\"M\"}jy.prototype.each=function(e){this.aborted=!1;var t=this.read_uint32(4);for(this.ifds_to_read=[{id:0,offset:t}];this.ifds_to_read.length>0&&!this.aborted;){var r=this.ifds_to_read.shift();r.offset&&this.scan_ifd(r.id,r.offset,e)}};jy.prototype.read_uint16=function(e){var t=this.input;if(e+2>t.length)throw sD(\"unexpected EOF\",\"EBADDATA\");return this.big_endian?t[e]*256+t[e+1]:t[e]+t[e+1]*256};jy.prototype.read_uint32=function(e){var t=this.input;if(e+4>t.length)throw sD(\"unexpected EOF\",\"EBADDATA\");return this.big_endian?t[e]*16777216+t[e+1]*65536+t[e+2]*256+t[e+3]:t[e]+t[e+1]*256+t[e+2]*65536+t[e+3]*16777216};jy.prototype.is_subifd_link=function(e,t){return e===0&&t===34665||e===0&&t===34853||e===34665&&t===40965};jy.prototype.exif_format_length=function(e){switch(e){case 1:case 2:case 6:case 7:return 1;case 3:case 8:return 2;case 4:case 9:case 11:return 4;case 5:case 10:case 12:return 8;default:return 0}};jy.prototype.exif_format_read=function(e,t){var r;switch(e){case 1:case 2:return r=this.input[t],r;case 6:return r=this.input[t],r|(r&128)*33554430;case 3:return r=this.read_uint16(t),r;case 8:return r=this.read_uint16(t),r|(r&32768)*131070;case 4:return r=this.read_uint32(t),r;case 9:return r=this.read_uint32(t),r|0;case 5:case 10:case 11:case 12:return null;case 7:return null;default:return null}};jy.prototype.scan_ifd=function(e,t,r){var n=this.read_uint16(t);t+=2;for(var i=0;ithis.input.length)throw sD(\"unexpected EOF\",\"EBADDATA\");for(var h=[],d=c,v=0;v0&&(this.ifds_to_read.push({id:a,offset:h[0]}),f=!0);var b={is_big_endian:this.big_endian,ifd:e,tag:a,format:o,count:s,entry_offset:t+this.start,data_length:u,data_offset:c+this.start,value:h,is_subifd_link:f};if(r(b)===!1){this.aborted=!0;return}t+=12}e===0&&this.ifds_to_read.push({id:1,offset:this.read_uint32(t)})};sW.exports.ExifParser=jy;sW.exports.get_orientation=function(e){var t=0;try{return new jy(e,0,e.length).each(function(r){if(r.ifd===0&&r.tag===274&&Array.isArray(r.value))return t=r.value[0],!1}),t}catch(r){return-1}}});var H4e=ye((Tdr,G4e)=>{\"use strict\";var ckt=rc().str2arr,fkt=rc().sliceEq,hkt=rc().readUInt32BE,uD=V4e(),dkt=lD(),vkt=ckt(\"ftyp\");G4e.exports=function(e){if(fkt(e,4,vkt)){var t=uD.unbox(e,0);if(t){var r=uD.getMimeType(t.data);if(r){for(var n,i=t.end;;){var a=uD.unbox(e,i);if(!a)break;if(i=a.end,a.boxtype===\"mdat\")return;if(a.boxtype===\"meta\"){n=a.data;break}}if(n){var o=uD.readSizeFromMeta(n);if(o){var s={width:o.width,height:o.height,type:r.type,mime:r.mime,wUnits:\"px\",hUnits:\"px\"};if(o.variants.length>1&&(s.variants=o.variants),o.orientation&&(s.orientation=o.orientation),o.exif_location&&o.exif_location.offset+o.exif_location.length<=e.length){var l=hkt(e,o.exif_location.offset),u=e.slice(o.exif_location.offset+l+4,o.exif_location.offset+o.exif_location.length),c=dkt.get_orientation(u);c>0&&(s.orientation=c)}return s}}}}}}});var X4e=ye((Adr,W4e)=>{\"use strict\";var pkt=rc().str2arr,gkt=rc().sliceEq,j4e=rc().readUInt16LE,mkt=pkt(\"BM\");W4e.exports=function(e){if(!(e.length<26)&&gkt(e,0,mkt))return{width:j4e(e,18),height:j4e(e,22),type:\"bmp\",mime:\"image/bmp\",wUnits:\"px\",hUnits:\"px\"}}});var $4e=ye((Sdr,J4e)=>{\"use strict\";var K4e=rc().str2arr,Z4e=rc().sliceEq,Y4e=rc().readUInt16LE,ykt=K4e(\"GIF87a\"),_kt=K4e(\"GIF89a\");J4e.exports=function(e){if(!(e.length<10)&&!(!Z4e(e,0,ykt)&&!Z4e(e,0,_kt)))return{width:Y4e(e,6),height:Y4e(e,8),type:\"gif\",mime:\"image/gif\",wUnits:\"px\",hUnits:\"px\"}}});var tEe=ye((Mdr,eEe)=>{\"use strict\";var lW=rc().readUInt16LE,xkt=0,bkt=1,Q4e=16;eEe.exports=function(e){var t=lW(e,0),r=lW(e,2),n=lW(e,4);if(!(t!==xkt||r!==bkt||!n)){for(var i=[],a={width:0,height:0},o=0;oa.width||l>a.height)&&(a=u)}return{width:a.width,height:a.height,variants:i,type:\"ico\",mime:\"image/x-icon\",wUnits:\"px\",hUnits:\"px\"}}}});var iEe=ye((Edr,rEe)=>{\"use strict\";var uW=rc().readUInt16BE,wkt=rc().str2arr,Tkt=rc().sliceEq,Akt=lD(),Skt=wkt(\"Exif\\0\\0\");rEe.exports=function(e){if(!(e.length<2)&&!(e[0]!==255||e[1]!==216||e[2]!==255))for(var t=2;;){for(;;){if(e.length-t<2)return;if(e[t++]===255)break}for(var r=e[t++],n;r===255;)r=e[t++];if(208<=r&&r<=217||r===1)n=0;else if(192<=r&&r<=254){if(e.length-t<2)return;n=uW(e,t)-2,t+=2}else return;if(r===217||r===218)return;var i;if(r===225&&n>=10&&Tkt(e,t,Skt)&&(i=Akt.get_orientation(e.slice(t+6,t+n))),n>=5&&192<=r&&r<=207&&r!==196&&r!==200&&r!==204){if(e.length-t0&&(a.orientation=i),a}t+=n}}});var lEe=ye((kdr,sEe)=>{\"use strict\";var oEe=rc().str2arr,nEe=rc().sliceEq,aEe=rc().readUInt32BE,Mkt=oEe(`\\x89PNG\\r\n", - "\u001A\n", + "\u001a\n", "`),Ekt=oEe(\"IHDR\");sEe.exports=function(e){if(!(e.length<24)&&nEe(e,0,Mkt)&&nEe(e,12,Ekt))return{width:aEe(e,16),height:aEe(e,20),type:\"png\",mime:\"image/png\",wUnits:\"px\",hUnits:\"px\"}}});var fEe=ye((Cdr,cEe)=>{\"use strict\";var kkt=rc().str2arr,Ckt=rc().sliceEq,uEe=rc().readUInt32BE,Lkt=kkt(\"8BPS\\0\u0001\");cEe.exports=function(e){if(!(e.length<22)&&Ckt(e,0,Lkt))return{width:uEe(e,18),height:uEe(e,14),type:\"psd\",mime:\"image/vnd.adobe.photoshop\",wUnits:\"px\",hUnits:\"px\"}}});var vEe=ye((Ldr,dEe)=>{\"use strict\";function Pkt(e){return e===32||e===9||e===13||e===10}function bA(e){return typeof e==\"number\"&&isFinite(e)&&e>0}function Ikt(e){var t=0,r=e.length;for(e[0]===239&&e[1]===187&&e[2]===191&&(t=3);t]*>/,Dkt=/^<([-_.:a-zA-Z0-9]+:)?svg\\s/,Fkt=/[^-]\\bwidth=\"([^%]+?)\"|[^-]\\bwidth='([^%]+?)'/,zkt=/\\bheight=\"([^%]+?)\"|\\bheight='([^%]+?)'/,Okt=/\\bview[bB]ox=\"(.+?)\"|\\bview[bB]ox='(.+?)'/,hEe=/in$|mm$|cm$|pt$|pc$|px$|em$|ex$/;function qkt(e){var t=e.match(Fkt),r=e.match(zkt),n=e.match(Okt);return{width:t&&(t[1]||t[2]),height:r&&(r[1]||r[2]),viewbox:n&&(n[1]||n[2])}}function Um(e){return hEe.test(e)?e.match(hEe)[0]:\"px\"}dEe.exports=function(e){if(Ikt(e)){for(var t=\"\",r=0;r{\"use strict\";var mEe=rc().str2arr,pEe=rc().sliceEq,Bkt=rc().readUInt16LE,Nkt=rc().readUInt16BE,Ukt=rc().readUInt32LE,Vkt=rc().readUInt32BE,Gkt=mEe(\"II*\\0\"),Hkt=mEe(\"MM\\0*\");function cD(e,t,r){return r?Nkt(e,t):Bkt(e,t)}function cW(e,t,r){return r?Vkt(e,t):Ukt(e,t)}function gEe(e,t,r){var n=cD(e,t+2,r),i=cW(e,t+4,r);return i!==1||n!==3&&n!==4?null:n===3?cD(e,t+8,r):cW(e,t+8,r)}yEe.exports=function(e){if(!(e.length<8)&&!(!pEe(e,0,Gkt)&&!pEe(e,0,Hkt))){var t=e[0]===77,r=cW(e,4,t)-8;if(!(r<0)){var n=r+8;if(!(e.length-n<2)){var i=cD(e,n+0,t)*12;if(!(i<=0)&&(n+=2,!(e.length-n{\"use strict\";var wEe=rc().str2arr,xEe=rc().sliceEq,bEe=rc().readUInt16LE,fW=rc().readUInt32LE,jkt=lD(),Wkt=wEe(\"RIFF\"),Xkt=wEe(\"WEBP\");function Zkt(e,t){if(!(e[t+3]!==157||e[t+4]!==1||e[t+5]!==42))return{width:bEe(e,t+6)&16383,height:bEe(e,t+8)&16383,type:\"webp\",mime:\"image/webp\",wUnits:\"px\",hUnits:\"px\"}}function Ykt(e,t){if(e[t]===47){var r=fW(e,t+1);return{width:(r&16383)+1,height:(r>>14&16383)+1,type:\"webp\",mime:\"image/webp\",wUnits:\"px\",hUnits:\"px\"}}}function Kkt(e,t){return{width:(e[t+6]<<16|e[t+5]<<8|e[t+4])+1,height:(e[t+9]<e.length)){for(;t+8=10?r=r||Zkt(e,t+8):a===\"VP8L\"&&o>=9?r=r||Ykt(e,t+8):a===\"VP8X\"&&o>=10?r=r||Kkt(e,t+8):a===\"EXIF\"&&(n=jkt.get_orientation(e.slice(t+8,t+8+o)),t=1/0),t+=8+o}if(r)return n>0&&(r.orientation=n),r}}}});var MEe=ye((Rdr,SEe)=>{\"use strict\";SEe.exports={avif:H4e(),bmp:X4e(),gif:$4e(),ico:tEe(),jpeg:iEe(),png:lEe(),psd:fEe(),svg:vEe(),tiff:_Ee(),webp:AEe()}});var EEe=ye((Ddr,dW)=>{\"use strict\";var hW=MEe();function Jkt(e){for(var t=Object.keys(hW),r=0;r{\"use strict\";var $kt=EEe(),Qkt=Py().IMAGE_URL_PREFIX,eCt=c2().Buffer;kEe.getImageSize=function(e){var t=e.replace(Qkt,\"\"),r=new eCt(t,\"base64\");return $kt(r)}});var IEe=ye((zdr,PEe)=>{\"use strict\";var LEe=Dr(),tCt=ZT(),rCt=Eo(),fD=ho(),iCt=Dr().maxRowLength,nCt=CEe().getImageSize;PEe.exports=function(t,r){var n,i;if(r._hasZ)n=r.z.length,i=iCt(r.z);else if(r._hasSource){var a=nCt(r.source);n=a.height,i=a.width}var o=fD.getFromId(t,r.xaxis||\"x\"),s=fD.getFromId(t,r.yaxis||\"y\"),l=o.d2c(r.x0)-r.dx/2,u=s.d2c(r.y0)-r.dy/2,c,f=[l,l+i*r.dx],h=[u,u+n*r.dy];if(o&&o.type===\"log\")for(c=0;c{\"use strict\";var lCt=Oa(),A2=Dr(),REe=A2.strTranslate,uCt=Wp(),cCt=ZT(),fCt=QV(),hCt=f8().STYLE;DEe.exports=function(t,r,n,i){var a=r.xaxis,o=r.yaxis,s=!t._context._exportedPlot&&fCt();A2.makeTraceGroups(i,n,\"im\").each(function(l){var u=lCt.select(this),c=l[0],f=c.trace,h=(f.zsmooth===\"fast\"||f.zsmooth===!1&&s)&&!f._hasZ&&f._hasSource&&a.type===\"linear\"&&o.type===\"linear\";f._realImage=h;var d=c.z,v=c.x0,_=c.y0,b=c.w,p=c.h,k=f.dx,E=f.dy,S,L,x,C,M,g;for(g=0;S===void 0&&g0;)L=a.c2p(v+g*k),g--;for(g=0;C===void 0&&g0;)M=o.c2p(_+g*E),g--;if(Lj[0];if(re||oe){var _e=S+T/2,Ee=C+z/2;H+=\"transform:\"+REe(_e+\"px\",Ee+\"px\")+\"scale(\"+(re?-1:1)+\",\"+(oe?-1:1)+\")\"+REe(-_e+\"px\",-Ee+\"px\")+\";\"}}Z.attr(\"style\",H);var Ce=new Promise(function(me){if(f._hasZ)me();else if(f._hasSource)if(f._canvas&&f._canvas.el.width===b&&f._canvas.el.height===p&&f._canvas.source===f.source)me();else{var ie=document.createElement(\"canvas\");ie.width=b,ie.height=p;var Se=ie.getContext(\"2d\",{willReadFrequently:!0});f._image=f._image||new Image;var Le=f._image;Le.onload=function(){Se.drawImage(Le,0,0),f._canvas={el:ie,source:f.source},me()},Le.setAttribute(\"src\",f.source)}}).then(function(){var me,ie;if(f._hasZ)ie=G(function(Ae,Fe){var Pe=d[Fe][Ae];return A2.isTypedArray(Pe)&&(Pe=Array.from(Pe)),Pe}),me=ie.toDataURL(\"image/png\");else if(f._hasSource)if(h)me=f.source;else{var Se=f._canvas.el.getContext(\"2d\",{willReadFrequently:!0}),Le=Se.getImageData(0,0,b,p).data;ie=G(function(Ae,Fe){var Pe=4*(Fe*b+Ae);return[Le[Pe],Le[Pe+1],Le[Pe+2],Le[Pe+3]]}),me=ie.toDataURL(\"image/png\")}Z.attr({\"xlink:href\":me,height:z,width:T,x:S,y:C})});t._promises.push(Ce)})}});var OEe=ye((qdr,zEe)=>{\"use strict\";var dCt=Oa();zEe.exports=function(t){dCt.select(t).selectAll(\".im image\").style(\"opacity\",function(r){return r[0].trace.opacity})}});var UEe=ye((Bdr,NEe)=>{\"use strict\";var qEe=vf(),BEe=Dr(),hD=BEe.isArrayOrTypedArray,vCt=ZT();NEe.exports=function(t,r,n){var i=t.cd[0],a=i.trace,o=t.xa,s=t.ya;if(!(qEe.inbox(r-i.x0,r-(i.x0+i.w*a.dx),0)>0||qEe.inbox(n-i.y0,n-(i.y0+i.h*a.dy),0)>0)){var l=Math.floor((r-i.x0)/a.dx),u=Math.floor(Math.abs(n-i.y0)/a.dy),c;if(a._hasZ?c=i.z[u][l]:a._hasSource&&(c=a._canvas.el.getContext(\"2d\",{willReadFrequently:!0}).getImageData(l,u,1,1).data),!!c){var f=i.hi||a.hoverinfo,h;if(f){var d=f.split(\"+\");d.indexOf(\"all\")!==-1&&(d=[\"color\"]),d.indexOf(\"color\")!==-1&&(h=!0)}var v=vCt.colormodel[a.colormodel],_=v.colormodel||a.colormodel,b=_.length,p=a._scaler(c),k=v.suffix,E=[];(a.hovertemplate||h)&&(E.push(\"[\"+[p[0]+k[0],p[1]+k[1],p[2]+k[2]].join(\", \")),b===4&&E.push(\", \"+p[3]+k[3]),E.push(\"]\"),E=E.join(\"\"),t.extraText=_.toUpperCase()+\": \"+E);var S;hD(a.hovertext)&&hD(a.hovertext[u])?S=a.hovertext[u][l]:hD(a.text)&&hD(a.text[u])&&(S=a.text[u][l]);var L=s.c2p(i.y0+(u+.5)*a.dy),x=i.x0+(l+.5)*a.dx,C=i.y0+(u+.5)*a.dy,M=\"[\"+c.slice(0,a.colormodel.length).join(\", \")+\"]\";return[BEe.extendFlat(t,{index:[u,l],x0:o.c2p(i.x0+l*a.dx),x1:o.c2p(i.x0+(l+1)*a.dx),y0:L,y1:L,color:p,xVal:x,xLabelVal:x,yVal:C,yLabelVal:C,zLabelVal:M,text:S,hovertemplateLabels:{zLabel:M,colorLabel:E,\"color[0]Label\":p[0]+k[0],\"color[1]Label\":p[1]+k[1],\"color[2]Label\":p[2]+k[2],\"color[3]Label\":p[3]+k[3]}})]}}}});var GEe=ye((Ndr,VEe)=>{\"use strict\";VEe.exports=function(t,r){return\"xVal\"in r&&(t.x=r.xVal),\"yVal\"in r&&(t.y=r.yVal),r.xa&&(t.xaxis=r.xa),r.ya&&(t.yaxis=r.ya),t.color=r.color,t.colormodel=r.trace.colormodel,t.z||(t.z=r.color),t}});var jEe=ye((Udr,HEe)=>{\"use strict\";HEe.exports={attributes:uH(),supplyDefaults:U3e(),calc:IEe(),plot:FEe(),style:OEe(),hoverPoints:UEe(),eventData:GEe(),moduleType:\"trace\",name:\"image\",basePlotModule:ph(),categories:[\"cartesian\",\"svg\",\"2dMap\",\"noSortingByValue\"],animatable:!1,meta:{}}});var XEe=ye((Vdr,WEe)=>{\"use strict\";WEe.exports=jEe()});var S2=ye((Gdr,YEe)=>{\"use strict\";var pCt=Gl(),gCt=Cc().attributes,mCt=ec(),yCt=Lh(),{hovertemplateAttrs:_Ct,texttemplateAttrs:xCt,templatefallbackAttrs:ZEe}=Ll(),EE=Ao().extendFlat,bCt=Pd().pattern,dD=mCt({editType:\"plot\",arrayOk:!0,colorEditType:\"plot\"});YEe.exports={labels:{valType:\"data_array\",editType:\"calc\"},label0:{valType:\"number\",dflt:0,editType:\"calc\"},dlabel:{valType:\"number\",dflt:1,editType:\"calc\"},values:{valType:\"data_array\",editType:\"calc\"},marker:{colors:{valType:\"data_array\",editType:\"calc\"},line:{color:{valType:\"color\",dflt:yCt.defaultLine,arrayOk:!0,editType:\"style\"},width:{valType:\"number\",min:0,dflt:0,arrayOk:!0,editType:\"style\"},editType:\"calc\"},pattern:bCt,editType:\"calc\"},text:{valType:\"data_array\",editType:\"plot\"},hovertext:{valType:\"string\",dflt:\"\",arrayOk:!0,editType:\"style\"},scalegroup:{valType:\"string\",dflt:\"\",editType:\"calc\"},textinfo:{valType:\"flaglist\",flags:[\"label\",\"text\",\"value\",\"percent\"],extras:[\"none\"],editType:\"calc\"},hoverinfo:EE({},pCt.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"percent\",\"name\"]}),hovertemplate:_Ct({},{keys:[\"label\",\"color\",\"value\",\"percent\",\"text\"]}),hovertemplatefallback:ZEe(),texttemplate:xCt({editType:\"plot\"},{keys:[\"label\",\"color\",\"value\",\"percent\",\"text\"]}),texttemplatefallback:ZEe({editType:\"plot\"}),textposition:{valType:\"enumerated\",values:[\"inside\",\"outside\",\"auto\",\"none\"],dflt:\"auto\",arrayOk:!0,editType:\"plot\"},textfont:EE({},dD,{}),insidetextorientation:{valType:\"enumerated\",values:[\"horizontal\",\"radial\",\"tangential\",\"auto\"],dflt:\"auto\",editType:\"plot\"},insidetextfont:EE({},dD,{}),outsidetextfont:EE({},dD,{}),automargin:{valType:\"boolean\",dflt:!1,editType:\"plot\"},title:{text:{valType:\"string\",dflt:\"\",editType:\"plot\"},font:EE({},dD,{}),position:{valType:\"enumerated\",values:[\"top left\",\"top center\",\"top right\",\"middle center\",\"bottom left\",\"bottom center\",\"bottom right\"],editType:\"plot\"},editType:\"plot\"},domain:gCt({name:\"pie\",trace:!0,editType:\"calc\"}),hole:{valType:\"number\",min:0,max:1,dflt:0,editType:\"calc\"},sort:{valType:\"boolean\",dflt:!0,editType:\"calc\"},direction:{valType:\"enumerated\",values:[\"clockwise\",\"counterclockwise\"],dflt:\"counterclockwise\",editType:\"calc\"},rotation:{valType:\"angle\",dflt:0,editType:\"calc\"},pull:{valType:\"number\",min:0,max:1,dflt:0,arrayOk:!0,editType:\"calc\"}}});var M2=ye((Hdr,$Ee)=>{\"use strict\";var wCt=Eo(),kE=Dr(),TCt=S2(),ACt=Cc().defaults,SCt=r0().handleText,MCt=Dr().coercePattern;function KEe(e,t){var r=kE.isArrayOrTypedArray(e),n=kE.isArrayOrTypedArray(t),i=Math.min(r?e.length:1/0,n?t.length:1/0);if(isFinite(i)||(i=0),i&&n){for(var a,o=0;o0){a=!0;break}}a||(i=0)}return{hasLabels:r,hasValues:n,len:i}}function JEe(e,t,r,n,i){var a=n(\"marker.line.width\");a&&n(\"marker.line.color\",i?void 0:r.paper_bgcolor);var o=n(\"marker.colors\");MCt(n,\"marker.pattern\",o),e.marker&&!t.marker.pattern.fgcolor&&(t.marker.pattern.fgcolor=e.marker.colors),t.marker.pattern.bgcolor||(t.marker.pattern.bgcolor=r.paper_bgcolor)}function ECt(e,t,r,n){function i(k,E){return kE.coerce(e,t,TCt,k,E)}var a=i(\"labels\"),o=i(\"values\"),s=KEe(a,o),l=s.len;if(t._hasLabels=s.hasLabels,t._hasValues=s.hasValues,!t._hasLabels&&t._hasValues&&(i(\"label0\"),i(\"dlabel\")),!l){t.visible=!1;return}t._length=l,JEe(e,t,n,i,!0),i(\"scalegroup\");var u=i(\"text\"),c=i(\"texttemplate\");i(\"texttemplatefallback\");var f;if(c||(f=i(\"textinfo\",kE.isArrayOrTypedArray(u)?\"text+percent\":\"percent\")),i(\"hovertext\"),i(\"hovertemplate\"),i(\"hovertemplatefallback\"),c||f&&f!==\"none\"){var h=i(\"textposition\");SCt(e,t,n,i,h,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1});var d=Array.isArray(h)||h===\"auto\",v=d||h===\"outside\";v&&i(\"automargin\"),(h===\"inside\"||h===\"auto\"||Array.isArray(h))&&i(\"insidetextorientation\")}else f===\"none\"&&i(\"textposition\",\"none\");ACt(t,n,i);var _=i(\"hole\"),b=i(\"title.text\");if(b){var p=i(\"title.position\",_?\"middle center\":\"top center\");!_&&p===\"middle center\"&&(t.title.position=\"top center\"),kE.coerceFont(i,\"title.font\",n.font)}i(\"sort\"),i(\"direction\"),i(\"rotation\"),i(\"pull\")}$Ee.exports={handleLabelsAndValues:KEe,handleMarkerDefaults:JEe,supplyDefaults:ECt}});var vD=ye((jdr,QEe)=>{\"use strict\";QEe.exports={hiddenlabels:{valType:\"data_array\",editType:\"calc\"},piecolorway:{valType:\"colorlist\",editType:\"calc\"},extendpiecolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var tke=ye((Wdr,eke)=>{\"use strict\";var kCt=Dr(),CCt=vD();eke.exports=function(t,r){function n(i,a){return kCt.coerce(t,r,CCt,i,a)}n(\"hiddenlabels\"),n(\"piecolorway\",r.colorway),n(\"extendpiecolors\")}});var wA=ye((Xdr,nke)=>{\"use strict\";var LCt=Eo(),vW=cd(),PCt=ka(),ICt={};function RCt(e,t){var r=[],n=e._fullLayout,i=n.hiddenlabels||[],a=t.labels,o=t.marker.colors||[],s=t.values,l=t._length,u=t._hasValues&&l,c,f;if(t.dlabel)for(a=new Array(l),c=0;c=0});var S=t.type===\"funnelarea\"?_:t.sort;return S&&r.sort(function(L,x){return x.v-L.v}),r[0]&&(r[0].vTotal=v),r}function rke(e){return function(r,n){return!r||(r=vW(r),!r.isValid())?!1:(r=PCt.addOpacity(r,r.getAlpha()),e[n]||(e[n]=r),r)}}function DCt(e,t){var r=(t||{}).type;r||(r=\"pie\");var n=e._fullLayout,i=e.calcdata,a=n[r+\"colorway\"],o=n[\"_\"+r+\"colormap\"];n[\"extend\"+r+\"colors\"]&&(a=ike(a,ICt));for(var s=0,l=0;l{\"use strict\";var FCt=ip().appendArrayMultiPointValues;ake.exports=function(t,r){var n={curveNumber:r.index,pointNumbers:t.pts,data:r._input,fullData:r,label:t.label,color:t.color,value:t.v,percent:t.percent,text:t.text,bbox:t.bbox,v:t.v};return t.pts.length===1&&(n.pointNumber=n.i=t.pts[0]),FCt(n,r,t.pts),r.type===\"funnelarea\"&&(delete n.v,delete n.i),n}});var yD=ye((Ydr,Eke)=>{\"use strict\";var Fp=Oa(),zCt=Mc(),pD=vf(),hke=ka(),Wy=So(),rv=Dr(),OCt=rv.strScale,ske=rv.strTranslate,pW=ru(),dke=bv(),qCt=dke.recordMinTextSize,BCt=dke.clearMinTextSize,vke=e2().TEXTPAD,ns=l_(),gD=oke(),lke=Dr().isValidTextValue;function NCt(e,t){var r=e._context.staticPlot,n=e._fullLayout,i=n._size;BCt(\"pie\",n),mke(t,e),Ake(t,i);var a=rv.makeTraceGroups(n._pielayer,t,\"trace\").each(function(o){var s=Fp.select(this),l=o[0],u=l.trace;YCt(o),s.attr(\"stroke-linejoin\",\"round\"),s.each(function(){var c=Fp.select(this).selectAll(\"g.slice\").data(o);c.enter().append(\"g\").classed(\"slice\",!0),c.exit().remove();var f=[[[],[]],[[],[]]],h=!1;c.each(function(S,L){if(S.hidden){Fp.select(this).selectAll(\"path,g\").remove();return}S.pointNumber=S.i,S.curveNumber=u.index,f[S.pxmid[1]<0?0:1][S.pxmid[0]<0?0:1].push(S);var x=l.cx,C=l.cy,M=Fp.select(this),g=M.selectAll(\"path.surface\").data([S]);if(g.enter().append(\"path\").classed(\"surface\",!0).style({\"pointer-events\":r?\"none\":\"all\"}),M.call(pke,e,o),u.pull){var P=+ns.castOption(u.pull,S.pts)||0;P>0&&(x+=P*S.pxmid[0],C+=P*S.pxmid[1])}S.cxFinal=x,S.cyFinal=C;function T(N,j,re,oe){var _e=oe*(j[0]-N[0]),Ee=oe*(j[1]-N[1]);return\"a\"+oe*l.r+\",\"+oe*l.r+\" 0 \"+S.largeArc+(re?\" 1 \":\" 0 \")+_e+\",\"+Ee}var z=u.hole;if(S.v===l.vTotal){var O=\"M\"+(x+S.px0[0])+\",\"+(C+S.px0[1])+T(S.px0,S.pxmid,!0,1)+T(S.pxmid,S.px0,!0,1)+\"Z\";z?g.attr(\"d\",\"M\"+(x+z*S.px0[0])+\",\"+(C+z*S.px0[1])+T(S.px0,S.pxmid,!1,z)+T(S.pxmid,S.px0,!1,z)+\"Z\"+O):g.attr(\"d\",O)}else{var V=T(S.px0,S.px1,!0,1);if(z){var G=1-z;g.attr(\"d\",\"M\"+(x+z*S.px1[0])+\",\"+(C+z*S.px1[1])+T(S.px1,S.px0,!1,z)+\"l\"+G*S.px0[0]+\",\"+G*S.px0[1]+V+\"Z\")}else g.attr(\"d\",\"M\"+x+\",\"+C+\"l\"+S.px0[0]+\",\"+S.px0[1]+V+\"Z\")}Ske(e,S,l);var Z=ns.castOption(u.textposition,S.pts),H=M.selectAll(\"g.slicetext\").data(S.text&&Z!==\"none\"?[0]:[]);H.enter().append(\"g\").classed(\"slicetext\",!0),H.exit().remove(),H.each(function(){var N=rv.ensureSingle(Fp.select(this),\"text\",\"\",function(ie){ie.attr(\"data-notex\",1)}),j=rv.ensureUniformFontSize(e,Z===\"outside\"?VCt(u,S,n.font):gke(u,S,n.font));N.text(S.text).attr({class:\"slicetext\",transform:\"\",\"text-anchor\":\"middle\"}).call(Wy.font,j).call(pW.convertToTspans,e);var re=Wy.bBox(N.node()),oe;if(Z===\"outside\")oe=fke(re,S);else if(oe=yke(re,S,l),Z===\"auto\"&&oe.scale<1){var _e=rv.ensureUniformFontSize(e,u.outsidetextfont);N.call(Wy.font,_e),re=Wy.bBox(N.node()),oe=fke(re,S)}var Ee=oe.textPosAngle,Ce=Ee===void 0?S.pxmid:mD(l.r,Ee);if(oe.targetX=x+Ce[0]*oe.rCenter+(oe.x||0),oe.targetY=C+Ce[1]*oe.rCenter+(oe.y||0),Mke(oe,re),oe.outside){var me=oe.targetY;S.yLabelMin=me-re.height/2,S.yLabelMid=me,S.yLabelMax=me+re.height/2,S.labelExtraX=0,S.labelExtraY=0,h=!0}oe.fontSize=j.size,qCt(u.type,oe,n),o[L].transform=oe,rv.setTransormAndDisplay(N,oe)})});var d=Fp.select(this).selectAll(\"g.titletext\").data(u.title.text?[0]:[]);if(d.enter().append(\"g\").classed(\"titletext\",!0),d.exit().remove(),d.each(function(){var S=rv.ensureSingle(Fp.select(this),\"text\",\"\",function(C){C.attr(\"data-notex\",1)}),L=u.title.text;u._meta&&(L=rv.templateString(L,u._meta)),S.text(L).attr({class:\"titletext\",transform:\"\",\"text-anchor\":\"middle\"}).call(Wy.font,u.title.font).call(pW.convertToTspans,e);var x;u.title.position===\"middle center\"?x=jCt(l):x=wke(l,i),S.attr(\"transform\",ske(x.x,x.y)+OCt(Math.min(1,x.scale))+ske(x.tx,x.ty))}),h&&XCt(f,u),UCt(c,u),h&&u.automargin){var v=Wy.bBox(s.node()),_=u.domain,b=i.w*(_.x[1]-_.x[0]),p=i.h*(_.y[1]-_.y[0]),k=(.5*b-l.r)/i.w,E=(.5*p-l.r)/i.h;zCt.autoMargin(e,\"pie.\"+u.uid+\".automargin\",{xl:_.x[0]-k,xr:_.x[1]+k,yb:_.y[0]-E,yt:_.y[1]+E,l:Math.max(l.cx-l.r-v.left,0),r:Math.max(v.right-(l.cx+l.r),0),b:Math.max(v.bottom-(l.cy+l.r),0),t:Math.max(l.cy-l.r-v.top,0),pad:5})}})});setTimeout(function(){a.selectAll(\"tspan\").each(function(){var o=Fp.select(this);o.attr(\"dy\")&&o.attr(\"dy\",o.attr(\"dy\"))})},0)}function UCt(e,t){e.each(function(r){var n=Fp.select(this);if(!r.labelExtraX&&!r.labelExtraY){n.select(\"path.textline\").remove();return}var i=n.select(\"g.slicetext text\");r.transform.targetX+=r.labelExtraX,r.transform.targetY+=r.labelExtraY,rv.setTransormAndDisplay(i,r.transform);var a=r.cxFinal+r.pxmid[0],o=r.cyFinal+r.pxmid[1],s=\"M\"+a+\",\"+o,l=(r.yLabelMax-r.yLabelMin)*(r.pxmid[0]<0?-1:1)/4;if(r.labelExtraX){var u=r.labelExtraX*r.pxmid[1]/r.pxmid[0],c=r.yLabelMid+r.labelExtraY-(r.cyFinal+r.pxmid[1]);Math.abs(u)>Math.abs(c)?s+=\"l\"+c*r.pxmid[0]/r.pxmid[1]+\",\"+c+\"H\"+(a+r.labelExtraX+l):s+=\"l\"+r.labelExtraX+\",\"+u+\"v\"+(c-u)+\"h\"+l}else s+=\"V\"+(r.yLabelMid+r.labelExtraY)+\"h\"+l;rv.ensureSingle(n,\"path\",\"textline\").call(hke.stroke,t.outsidetextfont.color).attr({\"stroke-width\":Math.min(2,t.outsidetextfont.size/8),d:s,fill:\"none\"})})}function pke(e,t,r){var n=r[0],i=n.cx,a=n.cy,o=n.trace,s=o.type===\"funnelarea\";\"_hasHoverLabel\"in o||(o._hasHoverLabel=!1),\"_hasHoverEvent\"in o||(o._hasHoverEvent=!1),e.on(\"mouseover\",function(l){var u=t._fullLayout,c=t._fullData[o.index];if(!(t._dragging||u.hovermode===!1)){var f=c.hoverinfo;if(Array.isArray(f)&&(f=pD.castHoverinfo({hoverinfo:[ns.castOption(f,l.pts)],_module:o._module},u,0)),f===\"all\"&&(f=\"label+text+value+percent+name\"),c.hovertemplate||f!==\"none\"&&f!==\"skip\"&&f){var h=l.rInscribed||0,d=i+l.pxmid[0]*(1-h),v=a+l.pxmid[1]*(1-h),_=u.separators,b=[];if(f&&f.indexOf(\"label\")!==-1&&b.push(l.label),l.text=ns.castOption(c.hovertext||c.text,l.pts),f&&f.indexOf(\"text\")!==-1){var p=l.text;rv.isValidTextValue(p)&&b.push(p)}l.value=l.v,l.valueLabel=ns.formatPieValue(l.v,_),f&&f.indexOf(\"value\")!==-1&&b.push(l.valueLabel),l.percent=l.v/n.vTotal,l.percentLabel=ns.formatPiePercent(l.percent,_),f&&f.indexOf(\"percent\")!==-1&&b.push(l.percentLabel);var k=c.hoverlabel,E=k.font,S=[];pD.loneHover({trace:o,x0:d-h*n.r,x1:d+h*n.r,y:v,_x0:s?i+l.TL[0]:d-h*n.r,_x1:s?i+l.TR[0]:d+h*n.r,_y0:s?a+l.TL[1]:v-h*n.r,_y1:s?a+l.BL[1]:v+h*n.r,text:b.join(\"
\"),name:c.hovertemplate||f.indexOf(\"name\")!==-1?c.name:void 0,idealAlign:l.pxmid[0]<0?\"left\":\"right\",color:ns.castOption(k.bgcolor,l.pts)||l.color,borderColor:ns.castOption(k.bordercolor,l.pts),fontFamily:ns.castOption(E.family,l.pts),fontSize:ns.castOption(E.size,l.pts),fontColor:ns.castOption(E.color,l.pts),nameLength:ns.castOption(k.namelength,l.pts),textAlign:ns.castOption(k.align,l.pts),hovertemplate:ns.castOption(c.hovertemplate,l.pts),hovertemplateLabels:l,eventData:[gD(l,c)]},{container:u._hoverlayer.node(),outerContainer:u._paper.node(),gd:t,inOut_bbox:S}),l.bbox=S[0],o._hasHoverLabel=!0}o._hasHoverEvent=!0,t.emit(\"plotly_hover\",{points:[gD(l,c)],event:Fp.event})}}),e.on(\"mouseout\",function(l){var u=t._fullLayout,c=t._fullData[o.index],f=Fp.select(this).datum();o._hasHoverEvent&&(l.originalEvent=Fp.event,t.emit(\"plotly_unhover\",{points:[gD(f,c)],event:Fp.event}),o._hasHoverEvent=!1),o._hasHoverLabel&&(pD.loneUnhover(u._hoverlayer.node()),o._hasHoverLabel=!1)}),e.on(\"click\",function(l){var u=t._fullLayout,c=t._fullData[o.index];t._dragging||u.hovermode===!1||(t._hoverdata=[gD(l,c)],pD.click(t,Fp.event))})}function VCt(e,t,r){var n=ns.castOption(e.outsidetextfont.color,t.pts)||ns.castOption(e.textfont.color,t.pts)||r.color,i=ns.castOption(e.outsidetextfont.family,t.pts)||ns.castOption(e.textfont.family,t.pts)||r.family,a=ns.castOption(e.outsidetextfont.size,t.pts)||ns.castOption(e.textfont.size,t.pts)||r.size,o=ns.castOption(e.outsidetextfont.weight,t.pts)||ns.castOption(e.textfont.weight,t.pts)||r.weight,s=ns.castOption(e.outsidetextfont.style,t.pts)||ns.castOption(e.textfont.style,t.pts)||r.style,l=ns.castOption(e.outsidetextfont.variant,t.pts)||ns.castOption(e.textfont.variant,t.pts)||r.variant,u=ns.castOption(e.outsidetextfont.textcase,t.pts)||ns.castOption(e.textfont.textcase,t.pts)||r.textcase,c=ns.castOption(e.outsidetextfont.lineposition,t.pts)||ns.castOption(e.textfont.lineposition,t.pts)||r.lineposition,f=ns.castOption(e.outsidetextfont.shadow,t.pts)||ns.castOption(e.textfont.shadow,t.pts)||r.shadow;return{color:n,family:i,size:a,weight:o,style:s,variant:l,textcase:u,lineposition:c,shadow:f}}function gke(e,t,r){var n=ns.castOption(e.insidetextfont.color,t.pts);!n&&e._input.textfont&&(n=ns.castOption(e._input.textfont.color,t.pts));var i=ns.castOption(e.insidetextfont.family,t.pts)||ns.castOption(e.textfont.family,t.pts)||r.family,a=ns.castOption(e.insidetextfont.size,t.pts)||ns.castOption(e.textfont.size,t.pts)||r.size,o=ns.castOption(e.insidetextfont.weight,t.pts)||ns.castOption(e.textfont.weight,t.pts)||r.weight,s=ns.castOption(e.insidetextfont.style,t.pts)||ns.castOption(e.textfont.style,t.pts)||r.style,l=ns.castOption(e.insidetextfont.variant,t.pts)||ns.castOption(e.textfont.variant,t.pts)||r.variant,u=ns.castOption(e.insidetextfont.textcase,t.pts)||ns.castOption(e.textfont.textcase,t.pts)||r.textcase,c=ns.castOption(e.insidetextfont.lineposition,t.pts)||ns.castOption(e.textfont.lineposition,t.pts)||r.lineposition,f=ns.castOption(e.insidetextfont.shadow,t.pts)||ns.castOption(e.textfont.shadow,t.pts)||r.shadow;return{color:n||hke.contrast(t.color),family:i,size:a,weight:o,style:s,variant:l,textcase:u,lineposition:c,shadow:f}}function mke(e,t){for(var r,n,i=0;i=-4;k-=2)p(Math.PI*k,\"tan\");for(k=4;k>=-4;k-=2)p(Math.PI*(k+1),\"tan\")}if(f||d){for(k=4;k>=-4;k-=2)p(Math.PI*(k+1.5),\"rad\");for(k=4;k>=-4;k-=2)p(Math.PI*(k+.5),\"rad\")}}if(s||v||f){var E=Math.sqrt(e.width*e.width+e.height*e.height);if(b={scale:i*n*2/E,rCenter:1-i,rotate:0},b.textPosAngle=(t.startangle+t.stopangle)/2,b.scale>=1)return b;_.push(b)}(v||d)&&(b=uke(e,n,o,l,u),b.textPosAngle=(t.startangle+t.stopangle)/2,_.push(b)),(v||h)&&(b=cke(e,n,o,l,u),b.textPosAngle=(t.startangle+t.stopangle)/2,_.push(b));for(var S=0,L=0,x=0;x<_.length;x++){var C=_[x].scale;if(L=1)break}return _[S]}function GCt(e,t){var r=e.startangle,n=e.stopangle;return r>t&&t>n||r0?1:-1)/2,y:a/(1+r*r/(n*n)),outside:!0}}function jCt(e){var t=Math.sqrt(e.titleBox.width*e.titleBox.width+e.titleBox.height*e.titleBox.height);return{x:e.cx,y:e.cy,scale:e.trace.hole*e.r*2/t,tx:0,ty:-e.titleBox.height/2+e.trace.title.font.size}}function wke(e,t){var r=1,n=1,i,a=e.trace,o={x:e.cx,y:e.cy},s={tx:0,ty:0};s.ty+=a.title.font.size,i=Tke(a),a.title.position.indexOf(\"top\")!==-1?(o.y-=(1+i)*e.r,s.ty-=e.titleBox.height):a.title.position.indexOf(\"bottom\")!==-1&&(o.y+=(1+i)*e.r);var l=WCt(e.r,e.trace.aspectratio),u=t.w*(a.domain.x[1]-a.domain.x[0])/2;return a.title.position.indexOf(\"left\")!==-1?(u=u+l,o.x-=(1+i)*l,s.tx+=e.titleBox.width/2):a.title.position.indexOf(\"center\")!==-1?u*=2:a.title.position.indexOf(\"right\")!==-1&&(u=u+l,o.x+=(1+i)*l,s.tx-=e.titleBox.width/2),r=u/e.titleBox.width,n=gW(e,t)/e.titleBox.height,{x:o.x,y:o.y,scale:Math.min(r,n),tx:s.tx,ty:s.ty}}function WCt(e,t){return e/(t===void 0?1:t)}function gW(e,t){var r=e.trace,n=t.h*(r.domain.y[1]-r.domain.y[0]);return Math.min(e.titleBox.height,n/2)}function Tke(e){var t=e.pull;if(!t)return 0;var r;if(rv.isArrayOrTypedArray(t))for(t=0,r=0;rt&&(t=e.pull[r]);return t}function XCt(e,t){var r,n,i,a,o,s,l,u,c,f,h,d,v;function _(E,S){return E.pxmid[1]-S.pxmid[1]}function b(E,S){return S.pxmid[1]-E.pxmid[1]}function p(E,S){S||(S={});var L=S.labelExtraY+(n?S.yLabelMax:S.yLabelMin),x=n?E.yLabelMin:E.yLabelMax,C=n?E.yLabelMax:E.yLabelMin,M=E.cyFinal+o(E.px0[1],E.px1[1]),g=L-x,P,T,z,O,V,G;if(g*l>0&&(E.labelExtraY=g),!!rv.isArrayOrTypedArray(t.pull))for(T=0;T=(ns.castOption(t.pull,z.pts)||0))&&((E.pxmid[1]-z.pxmid[1])*l>0?(O=z.cyFinal+o(z.px0[1],z.px1[1]),g=O-x-E.labelExtraY,g*l>0&&(E.labelExtraY+=g)):(C+E.labelExtraY-M)*l>0&&(P=3*s*Math.abs(T-f.indexOf(E)),V=z.cxFinal+a(z.px0[0],z.px1[0]),G=V+P-(E.cxFinal+E.pxmid[0])-E.labelExtraX,G*s>0&&(E.labelExtraX+=G)))}for(n=0;n<2;n++)for(i=n?_:b,o=n?Math.max:Math.min,l=n?1:-1,r=0;r<2;r++){for(a=r?Math.max:Math.min,s=r?1:-1,u=e[n][r],u.sort(i),c=e[1-n][r],f=c.concat(u),d=[],h=0;h1?(u=r.r,c=u/i.aspectratio):(c=r.r,u=c*i.aspectratio),u*=(1+i.baseratio)/2,l=u*c}o=Math.min(o,l/r.vTotal)}for(n=0;nt.vTotal/2?1:0,u.halfangle=Math.PI*Math.min(u.v/t.vTotal,.5),u.ring=1-n.hole,u.rInscribed=HCt(u,t))}function mD(e,t){return[e*Math.sin(t),-e*Math.cos(t)]}function Ske(e,t,r){var n=e._fullLayout,i=r.trace,a=i.texttemplate,o=i.textinfo;if(!a&&o&&o!==\"none\"){var s=o.split(\"+\"),l=function(S){return s.indexOf(S)!==-1},u=l(\"label\"),c=l(\"text\"),f=l(\"value\"),h=l(\"percent\"),d=n.separators,v;if(v=u?[t.label]:[],c){var _=ns.getFirstFilled(i.text,t.pts);lke(_)&&v.push(_)}f&&v.push(ns.formatPieValue(t.v,d)),h&&v.push(ns.formatPiePercent(t.v/r.vTotal,d)),t.text=v.join(\"
\")}function b(S){return{label:S.label,value:S.v,valueLabel:ns.formatPieValue(S.v,n.separators),percent:S.v/r.vTotal,percentLabel:ns.formatPiePercent(S.v/r.vTotal,n.separators),color:S.color,text:S.text,customdata:rv.castOption(i,S.i,\"customdata\")}}if(a){var p=rv.castOption(i,t.i,\"texttemplate\");if(!p)t.text=\"\";else{var k=b(t),E=ns.getFirstFilled(i.text,t.pts);(lke(E)||E===\"\")&&(k.text=E),t.text=rv.texttemplateString({data:[k,i._meta],fallback:i.texttemplatefallback,labels:k,locale:e._fullLayout._d3locale,template:p})}}}function Mke(e,t){var r=e.rotate*Math.PI/180,n=Math.cos(r),i=Math.sin(r),a=(t.left+t.right)/2,o=(t.top+t.bottom)/2;e.textX=a*n-o*i,e.textY=a*i+o*n,e.noCenter=!0}Eke.exports={plot:NCt,formatSliceLabel:Ske,transformInsideText:yke,determineInsideTextFont:gke,positionTitleOutside:wke,prerenderTitles:mke,layoutAreas:Ake,attachFxHandlers:pke,computeTransform:Mke}});var Lke=ye((Kdr,Cke)=>{\"use strict\";var kke=Oa(),KCt=q3(),JCt=bv().resizeText;Cke.exports=function(t){var r=t._fullLayout._pielayer.selectAll(\".trace\");JCt(t,r,\"pie\"),r.each(function(n){var i=n[0],a=i.trace,o=kke.select(this);o.style({opacity:a.opacity}),o.selectAll(\"path.surface\").each(function(s){kke.select(this).call(KCt,s,a,t)})})}});var Ike=ye(TA=>{\"use strict\";var Pke=Mc();TA.name=\"pie\";TA.plot=function(e,t,r,n){Pke.plotBasePlot(TA.name,e,t,r,n)};TA.clean=function(e,t,r,n){Pke.cleanBasePlot(TA.name,e,t,r,n)}});var Dke=ye(($dr,Rke)=>{\"use strict\";Rke.exports={attributes:S2(),supplyDefaults:M2().supplyDefaults,supplyLayoutDefaults:tke(),layoutAttributes:vD(),calc:wA().calc,crossTraceCalc:wA().crossTraceCalc,plot:yD().plot,style:Lke(),styleOne:q3(),moduleType:\"trace\",name:\"pie\",basePlotModule:Ike(),categories:[\"pie-like\",\"pie\",\"showLegend\"],meta:{}}});var zke=ye((Qdr,Fke)=>{\"use strict\";Fke.exports=Dke()});var qke=ye(AA=>{\"use strict\";var Oke=Mc();AA.name=\"sunburst\";AA.plot=function(e,t,r,n){Oke.plotBasePlot(AA.name,e,t,r,n)};AA.clean=function(e,t,r,n){Oke.cleanBasePlot(AA.name,e,t,r,n)}});var mW=ye((tvr,Bke)=>{\"use strict\";Bke.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:\"linear\",eventDataKeys:[\"currentPath\",\"root\",\"entry\",\"percentRoot\",\"percentEntry\",\"percentParent\"]}});var LE=ye((rvr,Vke)=>{\"use strict\";var $Ct=Gl(),{hovertemplateAttrs:QCt,texttemplateAttrs:e6t,templatefallbackAttrs:Nke}=Ll(),t6t=Tu(),r6t=Cc().attributes,Xy=S2(),Uke=mW(),CE=Ao().extendFlat,i6t=Pd().pattern;Vke.exports={labels:{valType:\"data_array\",editType:\"calc\"},parents:{valType:\"data_array\",editType:\"calc\"},values:{valType:\"data_array\",editType:\"calc\"},branchvalues:{valType:\"enumerated\",values:[\"remainder\",\"total\"],dflt:\"remainder\",editType:\"calc\"},count:{valType:\"flaglist\",flags:[\"branches\",\"leaves\"],dflt:\"leaves\",editType:\"calc\"},level:{valType:\"any\",editType:\"plot\",anim:!0},maxdepth:{valType:\"integer\",editType:\"plot\",dflt:-1},marker:CE({colors:{valType:\"data_array\",editType:\"calc\"},line:{color:CE({},Xy.marker.line.color,{dflt:null}),width:CE({},Xy.marker.line.width,{dflt:1}),editType:\"calc\"},pattern:i6t,editType:\"calc\"},t6t(\"marker\",{colorAttr:\"colors\",anim:!1})),leaf:{opacity:{valType:\"number\",editType:\"style\",min:0,max:1},editType:\"plot\"},text:Xy.text,textinfo:{valType:\"flaglist\",flags:[\"label\",\"text\",\"value\",\"current path\",\"percent root\",\"percent entry\",\"percent parent\"],extras:[\"none\"],editType:\"plot\"},texttemplate:e6t({editType:\"plot\"},{keys:Uke.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:Nke({editType:\"plot\"}),hovertext:Xy.hovertext,hoverinfo:CE({},$Ct.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"name\",\"current path\",\"percent root\",\"percent entry\",\"percent parent\"],dflt:\"label+text+value+name\"}),hovertemplate:QCt({},{keys:Uke.eventDataKeys}),hovertemplatefallback:Nke(),textfont:Xy.textfont,insidetextorientation:Xy.insidetextorientation,insidetextfont:Xy.insidetextfont,outsidetextfont:CE({},Xy.outsidetextfont,{}),rotation:{valType:\"angle\",dflt:0,editType:\"plot\"},sort:Xy.sort,root:{color:{valType:\"color\",editType:\"calc\",dflt:\"rgba(0,0,0,0)\"},editType:\"calc\"},domain:r6t({name:\"sunburst\",trace:!0,editType:\"calc\"})}});var yW=ye((ivr,Gke)=>{\"use strict\";Gke.exports={sunburstcolorway:{valType:\"colorlist\",editType:\"calc\"},extendsunburstcolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var Xke=ye((nvr,Wke)=>{\"use strict\";var Hke=Dr(),n6t=LE(),a6t=Cc().defaults,o6t=r0().handleText,s6t=M2().handleMarkerDefaults,jke=tc(),l6t=jke.hasColorscale,u6t=jke.handleDefaults;Wke.exports=function(t,r,n,i){function a(h,d){return Hke.coerce(t,r,n6t,h,d)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\"),s6t(t,r,i,a);var u=r._hasColorscale=l6t(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;u&&u6t(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}),a(\"leaf.opacity\",u?1:.7);var c=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",Hke.isArrayOrTypedArray(c)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var f=\"auto\";o6t(t,r,i,a,f,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"insidetextorientation\"),a(\"sort\"),a(\"rotation\"),a(\"root.color\"),a6t(r,i,a),r._length=null}});var Yke=ye((avr,Zke)=>{\"use strict\";var c6t=Dr(),f6t=yW();Zke.exports=function(t,r){function n(i,a){return c6t.coerce(t,r,f6t,i,a)}n(\"sunburstcolorway\",r.colorway),n(\"extendsunburstcolors\")}});var PE=ye((_D,Kke)=>{(function(e,t){typeof _D==\"object\"&&typeof Kke!=\"undefined\"?t(_D):(e=e||self,t(e.d3=e.d3||{}))})(_D,function(e){\"use strict\";function t(je,tt){return je.parent===tt.parent?1:2}function r(je){return je.reduce(n,0)/je.length}function n(je,tt){return je+tt.x}function i(je){return 1+je.reduce(a,0)}function a(je,tt){return Math.max(je,tt.y)}function o(je){for(var tt;tt=je.children;)je=tt[0];return je}function s(je){for(var tt;tt=je.children;)je=tt[tt.length-1];return je}function l(){var je=t,tt=1,xt=1,Ie=!1;function xe(ke){var vt,ir=0;ke.eachAfter(function($r){var di=$r.children;di?($r.x=r(di),$r.y=i(di)):($r.x=vt?ir+=je($r,vt):0,$r.y=0,vt=$r)});var ar=o(ke),vr=s(ke),ii=ar.x-je(ar,vr)/2,pi=vr.x+je(vr,ar)/2;return ke.eachAfter(Ie?function($r){$r.x=($r.x-ke.x)*tt,$r.y=(ke.y-$r.y)*xt}:function($r){$r.x=($r.x-ii)/(pi-ii)*tt,$r.y=(1-(ke.y?$r.y/ke.y:1))*xt})}return xe.separation=function(ke){return arguments.length?(je=ke,xe):je},xe.size=function(ke){return arguments.length?(Ie=!1,tt=+ke[0],xt=+ke[1],xe):Ie?null:[tt,xt]},xe.nodeSize=function(ke){return arguments.length?(Ie=!0,tt=+ke[0],xt=+ke[1],xe):Ie?[tt,xt]:null},xe}function u(je){var tt=0,xt=je.children,Ie=xt&&xt.length;if(!Ie)tt=1;else for(;--Ie>=0;)tt+=xt[Ie].value;je.value=tt}function c(){return this.eachAfter(u)}function f(je){var tt=this,xt,Ie=[tt],xe,ke,vt;do for(xt=Ie.reverse(),Ie=[];tt=xt.pop();)if(je(tt),xe=tt.children,xe)for(ke=0,vt=xe.length;ke=0;--xe)xt.push(Ie[xe]);return this}function d(je){for(var tt=this,xt=[tt],Ie=[],xe,ke,vt;tt=xt.pop();)if(Ie.push(tt),xe=tt.children,xe)for(ke=0,vt=xe.length;ke=0;)xt+=Ie[xe].value;tt.value=xt})}function _(je){return this.eachBefore(function(tt){tt.children&&tt.children.sort(je)})}function b(je){for(var tt=this,xt=p(tt,je),Ie=[tt];tt!==xt;)tt=tt.parent,Ie.push(tt);for(var xe=Ie.length;je!==xt;)Ie.splice(xe,0,je),je=je.parent;return Ie}function p(je,tt){if(je===tt)return je;var xt=je.ancestors(),Ie=tt.ancestors(),xe=null;for(je=xt.pop(),tt=Ie.pop();je===tt;)xe=je,je=xt.pop(),tt=Ie.pop();return xe}function k(){for(var je=this,tt=[je];je=je.parent;)tt.push(je);return tt}function E(){var je=[];return this.each(function(tt){je.push(tt)}),je}function S(){var je=[];return this.eachBefore(function(tt){tt.children||je.push(tt)}),je}function L(){var je=this,tt=[];return je.each(function(xt){xt!==je&&tt.push({source:xt.parent,target:xt})}),tt}function x(je,tt){var xt=new T(je),Ie=+je.value&&(xt.value=je.value),xe,ke=[xt],vt,ir,ar,vr;for(tt==null&&(tt=M);xe=ke.pop();)if(Ie&&(xe.value=+xe.data.value),(ir=tt(xe.data))&&(vr=ir.length))for(xe.children=new Array(vr),ar=vr-1;ar>=0;--ar)ke.push(vt=xe.children[ar]=new T(ir[ar])),vt.parent=xe,vt.depth=xe.depth+1;return xt.eachBefore(P)}function C(){return x(this).eachBefore(g)}function M(je){return je.children}function g(je){je.data=je.data.data}function P(je){var tt=0;do je.height=tt;while((je=je.parent)&&je.height<++tt)}function T(je){this.data=je,this.depth=this.height=0,this.parent=null}T.prototype=x.prototype={constructor:T,count:c,each:f,eachAfter:d,eachBefore:h,sum:v,sort:_,path:b,ancestors:k,descendants:E,leaves:S,links:L,copy:C};var z=Array.prototype.slice;function O(je){for(var tt=je.length,xt,Ie;tt;)Ie=Math.random()*tt--|0,xt=je[tt],je[tt]=je[Ie],je[Ie]=xt;return je}function V(je){for(var tt=0,xt=(je=O(z.call(je))).length,Ie=[],xe,ke;tt0&&xt*xt>Ie*Ie+xe*xe}function N(je,tt){for(var xt=0;xtar?(xe=(vr+ar-ke)/(2*vr),ir=Math.sqrt(Math.max(0,ar/vr-xe*xe)),xt.x=je.x-xe*Ie-ir*vt,xt.y=je.y-xe*vt+ir*Ie):(xe=(vr+ke-ar)/(2*vr),ir=Math.sqrt(Math.max(0,ke/vr-xe*xe)),xt.x=tt.x+xe*Ie-ir*vt,xt.y=tt.y+xe*vt+ir*Ie)):(xt.x=tt.x+xt.r,xt.y=tt.y)}function Ce(je,tt){var xt=je.r+tt.r-1e-6,Ie=tt.x-je.x,xe=tt.y-je.y;return xt>0&&xt*xt>Ie*Ie+xe*xe}function me(je){var tt=je._,xt=je.next._,Ie=tt.r+xt.r,xe=(tt.x*xt.r+xt.x*tt.r)/Ie,ke=(tt.y*xt.r+xt.y*tt.r)/Ie;return xe*xe+ke*ke}function ie(je){this._=je,this.next=null,this.previous=null}function Se(je){if(!(xe=je.length))return 0;var tt,xt,Ie,xe,ke,vt,ir,ar,vr,ii,pi;if(tt=je[0],tt.x=0,tt.y=0,!(xe>1))return tt.r;if(xt=je[1],tt.x=-xt.r,xt.x=tt.r,xt.y=0,!(xe>2))return tt.r+xt.r;Ee(xt,tt,Ie=je[2]),tt=new ie(tt),xt=new ie(xt),Ie=new ie(Ie),tt.next=Ie.previous=xt,xt.next=tt.previous=Ie,Ie.next=xt.previous=tt;e:for(ir=3;ir0)throw new Error(\"cycle\");return ir}return xt.id=function(Ie){return arguments.length?(je=Fe(Ie),xt):je},xt.parentId=function(Ie){return arguments.length?(tt=Fe(Ie),xt):tt},xt}function $e(je,tt){return je.parent===tt.parent?1:2}function St(je){var tt=je.children;return tt?tt[0]:je.t}function Qt(je){var tt=je.children;return tt?tt[tt.length-1]:je.t}function Vt(je,tt,xt){var Ie=xt/(tt.i-je.i);tt.c-=Ie,tt.s+=xt,je.c+=Ie,tt.z+=xt,tt.m+=xt}function _t(je){for(var tt=0,xt=0,Ie=je.children,xe=Ie.length,ke;--xe>=0;)ke=Ie[xe],ke.z+=tt,ke.m+=tt,tt+=ke.s+(xt+=ke.c)}function It(je,tt,xt){return je.a.parent===tt.parent?je.a:xt}function mt(je,tt){this._=je,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=tt}mt.prototype=Object.create(T.prototype);function er(je){for(var tt=new mt(je,0),xt,Ie=[tt],xe,ke,vt,ir;xt=Ie.pop();)if(ke=xt._.children)for(xt.children=new Array(ir=ke.length),vt=ir-1;vt>=0;--vt)Ie.push(xe=xt.children[vt]=new mt(ke[vt],vt)),xe.parent=xt;return(tt.parent=new mt(null,0)).children=[tt],tt}function lr(){var je=$e,tt=1,xt=1,Ie=null;function xe(vr){var ii=er(vr);if(ii.eachAfter(ke),ii.parent.m=-ii.z,ii.eachBefore(vt),Ie)vr.eachBefore(ar);else{var pi=vr,$r=vr,di=vr;vr.eachBefore(function(qn){qn.x$r.x&&($r=qn),qn.depth>di.depth&&(di=qn)});var ji=pi===$r?1:je(pi,$r)/2,In=ji-pi.x,wi=tt/($r.x+ji+In),On=xt/(di.depth||1);vr.eachBefore(function(qn){qn.x=(qn.x+In)*wi,qn.y=qn.depth*On})}return vr}function ke(vr){var ii=vr.children,pi=vr.parent.children,$r=vr.i?pi[vr.i-1]:null;if(ii){_t(vr);var di=(ii[0].z+ii[ii.length-1].z)/2;$r?(vr.z=$r.z+je(vr._,$r._),vr.m=vr.z-di):vr.z=di}else $r&&(vr.z=$r.z+je(vr._,$r._));vr.parent.A=ir(vr,$r,vr.parent.A||pi[0])}function vt(vr){vr._.x=vr.z+vr.parent.m,vr.m+=vr.parent.m}function ir(vr,ii,pi){if(ii){for(var $r=vr,di=vr,ji=ii,In=$r.parent.children[0],wi=$r.m,On=di.m,qn=ji.m,Fn=In.m,ra;ji=Qt(ji),$r=St($r),ji&&$r;)In=St(In),di=Qt(di),di.a=vr,ra=ji.z+qn-$r.z-wi+je(ji._,$r._),ra>0&&(Vt(It(ji,vr,pi),vr,ra),wi+=ra,On+=ra),qn+=ji.m,wi+=$r.m,Fn+=In.m,On+=di.m;ji&&!Qt(di)&&(di.t=ji,di.m+=qn-On),$r&&!St(In)&&(In.t=$r,In.m+=wi-Fn,pi=vr)}return pi}function ar(vr){vr.x*=tt,vr.y=vr.depth*xt}return xe.separation=function(vr){return arguments.length?(je=vr,xe):je},xe.size=function(vr){return arguments.length?(Ie=!1,tt=+vr[0],xt=+vr[1],xe):Ie?null:[tt,xt]},xe.nodeSize=function(vr){return arguments.length?(Ie=!0,tt=+vr[0],xt=+vr[1],xe):Ie?[tt,xt]:null},xe}function Tr(je,tt,xt,Ie,xe){for(var ke=je.children,vt,ir=-1,ar=ke.length,vr=je.value&&(xe-xt)/je.value;++irqn&&(qn=vr),Ut=wi*wi*la,Fn=Math.max(qn/Ut,Ut/On),Fn>ra){wi-=vr;break}ra=Fn}vt.push(ar={value:wi,dice:di1?Ie:1)},xt}(Lr);function Vr(){var je=Br,tt=!1,xt=1,Ie=1,xe=[0],ke=Pe,vt=Pe,ir=Pe,ar=Pe,vr=Pe;function ii($r){return $r.x0=$r.y0=0,$r.x1=xt,$r.y1=Ie,$r.eachBefore(pi),xe=[0],tt&&$r.eachBefore(Zt),$r}function pi($r){var di=xe[$r.depth],ji=$r.x0+di,In=$r.y0+di,wi=$r.x1-di,On=$r.y1-di;wi=$r-1){var qn=ke[pi];qn.x0=ji,qn.y0=In,qn.x1=wi,qn.y1=On;return}for(var Fn=vr[pi],ra=di/2+Fn,la=pi+1,Ut=$r-1;la>>1;vr[wt]On-In){var Er=(ji*nr+wi*rr)/di;ii(pi,la,rr,ji,In,Er,On),ii(la,$r,nr,Er,In,wi,On)}else{var Xr=(In*nr+On*rr)/di;ii(pi,la,rr,ji,In,wi,Xr),ii(la,$r,nr,ji,Xr,wi,On)}}}function Ge(je,tt,xt,Ie,xe){(je.depth&1?Tr:st)(je,tt,xt,Ie,xe)}var Je=function je(tt){function xt(Ie,xe,ke,vt,ir){if((ar=Ie._squarify)&&ar.ratio===tt)for(var ar,vr,ii,pi,$r=-1,di,ji=ar.length,In=Ie.value;++$r1?Ie:1)},xt}(Lr);e.cluster=l,e.hierarchy=x,e.pack=ce,e.packEnclose=V,e.packSiblings=Le,e.partition=lt,e.stratify=cr,e.tree=lr,e.treemap=Vr,e.treemapBinary=dt,e.treemapDice=st,e.treemapResquarify=Je,e.treemapSlice=Tr,e.treemapSliceDice=Ge,e.treemapSquarify=Br,Object.defineProperty(e,\"__esModule\",{value:!0})})});var RE=ye(IE=>{\"use strict\";var Jke=PE(),h6t=Eo(),SA=Dr(),d6t=tc().makeColorScaleFuncFromTrace,v6t=wA().makePullColorFn,p6t=wA().generateExtendedColors,g6t=tc().calc,m6t=fs().ALMOST_EQUAL,y6t={},_6t={},x6t={};IE.calc=function(e,t){var r=e._fullLayout,n=t.ids,i=SA.isArrayOrTypedArray(n),a=t.labels,o=t.parents,s=t.values,l=SA.isArrayOrTypedArray(s),u=[],c={},f={},h=function(H,N){c[H]?c[H].push(N):c[H]=[N],f[N]=1},d=function(H){return H||typeof H==\"number\"},v=function(H){return!l||h6t(s[H])&&s[H]>=0},_,b,p;i?(_=Math.min(n.length,o.length),b=function(H){return d(n[H])&&v(H)},p=function(H){return String(n[H])}):(_=Math.min(a.length,o.length),b=function(H){return d(a[H])&&v(H)},p=function(H){return String(a[H])}),l&&(_=Math.min(_,s.length));for(var k=0;k<_;k++)if(b(k)){var E=p(k),S=d(o[k])?String(o[k]):\"\",L={i:k,id:E,pid:S,label:d(a[k])?String(a[k]):\"\"};l&&(L.v=+s[k]),u.push(L),h(S,E)}if(c[\"\"]){if(c[\"\"].length>1){for(var M=SA.randstr(),g=0;g{});function Gm(){}function eCe(){return this.rgb().formatHex()}function k6t(){return this.rgb().formatHex8()}function C6t(){return sCe(this).formatHsl()}function tCe(){return this.rgb().formatRgb()}function j_(e){var t,r;return e=(e+\"\").trim().toLowerCase(),(t=b6t.exec(e))?(r=t[1].length,t=parseInt(t[1],16),r===6?rCe(t):r===3?new _d(t>>8&15|t>>4&240,t>>4&15|t&240,(t&15)<<4|t&15,1):r===8?bD(t>>24&255,t>>16&255,t>>8&255,(t&255)/255):r===4?bD(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|t&240,((t&15)<<4|t&15)/255):null):(t=w6t.exec(e))?new _d(t[1],t[2],t[3],1):(t=T6t.exec(e))?new _d(t[1]*255/100,t[2]*255/100,t[3]*255/100,1):(t=A6t.exec(e))?bD(t[1],t[2],t[3],t[4]):(t=S6t.exec(e))?bD(t[1]*255/100,t[2]*255/100,t[3]*255/100,t[4]):(t=M6t.exec(e))?aCe(t[1],t[2]/100,t[3]/100,1):(t=E6t.exec(e))?aCe(t[1],t[2]/100,t[3]/100,t[4]):Qke.hasOwnProperty(e)?rCe(Qke[e]):e===\"transparent\"?new _d(NaN,NaN,NaN,0):null}function rCe(e){return new _d(e>>16&255,e>>8&255,e&255,1)}function bD(e,t,r,n){return n<=0&&(e=t=r=NaN),new _d(e,t,r,n)}function FE(e){return e instanceof Gm||(e=j_(e)),e?(e=e.rgb(),new _d(e.r,e.g,e.b,e.opacity)):new _d}function EA(e,t,r,n){return arguments.length===1?FE(e):new _d(e,t,r,n==null?1:n)}function _d(e,t,r,n){this.r=+e,this.g=+t,this.b=+r,this.opacity=+n}function iCe(){return`#${E2(this.r)}${E2(this.g)}${E2(this.b)}`}function L6t(){return`#${E2(this.r)}${E2(this.g)}${E2(this.b)}${E2((isNaN(this.opacity)?1:this.opacity)*255)}`}function nCe(){let e=TD(this.opacity);return`${e===1?\"rgb(\":\"rgba(\"}${k2(this.r)}, ${k2(this.g)}, ${k2(this.b)}${e===1?\")\":`, ${e})`}`}function TD(e){return isNaN(e)?1:Math.max(0,Math.min(1,e))}function k2(e){return Math.max(0,Math.min(255,Math.round(e)||0))}function E2(e){return e=k2(e),(e<16?\"0\":\"\")+e.toString(16)}function aCe(e,t,r,n){return n<=0?e=t=r=NaN:r<=0||r>=1?e=t=NaN:t<=0&&(e=NaN),new Xg(e,t,r,n)}function sCe(e){if(e instanceof Xg)return new Xg(e.h,e.s,e.l,e.opacity);if(e instanceof Gm||(e=j_(e)),!e)return new Xg;if(e instanceof Xg)return e;e=e.rgb();var t=e.r/255,r=e.g/255,n=e.b/255,i=Math.min(t,r,n),a=Math.max(t,r,n),o=NaN,s=a-i,l=(a+i)/2;return s?(t===a?o=(r-n)/s+(r0&&l<1?0:o,new Xg(o,s,l,e.opacity)}function zE(e,t,r,n){return arguments.length===1?sCe(e):new Xg(e,t,r,n==null?1:n)}function Xg(e,t,r,n){this.h=+e,this.s=+t,this.l=+r,this.opacity=+n}function oCe(e){return e=(e||0)%360,e<0?e+360:e}function wD(e){return Math.max(0,Math.min(1,e||0))}function _W(e,t,r){return(e<60?t+(r-t)*e/60:e<180?r:e<240?t+(r-t)*(240-e)/60:t)*255}var H_,C2,MA,DE,Vm,b6t,w6t,T6t,A6t,S6t,M6t,E6t,Qke,AD=gu(()=>{xD();H_=.7,C2=1/H_,MA=\"\\\\s*([+-]?\\\\d+)\\\\s*\",DE=\"\\\\s*([+-]?(?:\\\\d*\\\\.)?\\\\d+(?:[eE][+-]?\\\\d+)?)\\\\s*\",Vm=\"\\\\s*([+-]?(?:\\\\d*\\\\.)?\\\\d+(?:[eE][+-]?\\\\d+)?)%\\\\s*\",b6t=/^#([0-9a-f]{3,8})$/,w6t=new RegExp(`^rgb\\\\(${MA},${MA},${MA}\\\\)$`),T6t=new RegExp(`^rgb\\\\(${Vm},${Vm},${Vm}\\\\)$`),A6t=new RegExp(`^rgba\\\\(${MA},${MA},${MA},${DE}\\\\)$`),S6t=new RegExp(`^rgba\\\\(${Vm},${Vm},${Vm},${DE}\\\\)$`),M6t=new RegExp(`^hsl\\\\(${DE},${Vm},${Vm}\\\\)$`),E6t=new RegExp(`^hsla\\\\(${DE},${Vm},${Vm},${DE}\\\\)$`),Qke={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};Zy(Gm,j_,{copy(e){return Object.assign(new this.constructor,this,e)},displayable(){return this.rgb().displayable()},hex:eCe,formatHex:eCe,formatHex8:k6t,formatHsl:C6t,formatRgb:tCe,toString:tCe});Zy(_d,EA,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new _d(this.r*e,this.g*e,this.b*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new _d(this.r*e,this.g*e,this.b*e,this.opacity)},rgb(){return this},clamp(){return new _d(k2(this.r),k2(this.g),k2(this.b),TD(this.opacity))},displayable(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:iCe,formatHex:iCe,formatHex8:L6t,formatRgb:nCe,toString:nCe}));Zy(Xg,zE,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new Xg(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new Xg(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=this.h%360+(this.h<0)*360,t=isNaN(e)||isNaN(this.s)?0:this.s,r=this.l,n=r+(r<.5?r:1-r)*t,i=2*r-n;return new _d(_W(e>=240?e-240:e+120,i,n),_W(e,i,n),_W(e<120?e+240:e-120,i,n),this.opacity)},clamp(){return new Xg(oCe(this.h),wD(this.s),wD(this.l),TD(this.opacity))},displayable(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl(){let e=TD(this.opacity);return`${e===1?\"hsl(\":\"hsla(\"}${oCe(this.h)}, ${wD(this.s)*100}%, ${wD(this.l)*100}%${e===1?\")\":`, ${e})`}`}}))});var SD,MD,xW=gu(()=>{SD=Math.PI/180,MD=180/Math.PI});function dCe(e){if(e instanceof Hm)return new Hm(e.l,e.a,e.b,e.opacity);if(e instanceof Yy)return vCe(e);e instanceof _d||(e=FE(e));var t=AW(e.r),r=AW(e.g),n=AW(e.b),i=bW((.2225045*t+.7168786*r+.0606169*n)/uCe),a,o;return t===r&&r===n?a=o=i:(a=bW((.4360747*t+.3850649*r+.1430804*n)/lCe),o=bW((.0139322*t+.0971045*r+.7141733*n)/cCe)),new Hm(116*i-16,500*(a-i),200*(i-o),e.opacity)}function CA(e,t,r,n){return arguments.length===1?dCe(e):new Hm(e,t,r,n==null?1:n)}function Hm(e,t,r,n){this.l=+e,this.a=+t,this.b=+r,this.opacity=+n}function bW(e){return e>P6t?Math.pow(e,1/3):e/hCe+fCe}function wW(e){return e>kA?e*e*e:hCe*(e-fCe)}function TW(e){return 255*(e<=.0031308?12.92*e:1.055*Math.pow(e,1/2.4)-.055)}function AW(e){return(e/=255)<=.04045?e/12.92:Math.pow((e+.055)/1.055,2.4)}function I6t(e){if(e instanceof Yy)return new Yy(e.h,e.c,e.l,e.opacity);if(e instanceof Hm||(e=dCe(e)),e.a===0&&e.b===0)return new Yy(NaN,0{xD();AD();xW();ED=18,lCe=.96422,uCe=1,cCe=.82521,fCe=4/29,kA=6/29,hCe=3*kA*kA,P6t=kA*kA*kA;Zy(Hm,CA,G_(Gm,{brighter(e){return new Hm(this.l+ED*(e==null?1:e),this.a,this.b,this.opacity)},darker(e){return new Hm(this.l-ED*(e==null?1:e),this.a,this.b,this.opacity)},rgb(){var e=(this.l+16)/116,t=isNaN(this.a)?e:e+this.a/500,r=isNaN(this.b)?e:e-this.b/200;return t=lCe*wW(t),e=uCe*wW(e),r=cCe*wW(r),new _d(TW(3.1338561*t-1.6168667*e-.4906146*r),TW(-.9787684*t+1.9161415*e+.033454*r),TW(.0719453*t-.2289914*e+1.4052427*r),this.opacity)}}));Zy(Yy,OE,G_(Gm,{brighter(e){return new Yy(this.h,this.c,this.l+ED*(e==null?1:e),this.opacity)},darker(e){return new Yy(this.h,this.c,this.l-ED*(e==null?1:e),this.opacity)},rgb(){return vCe(this).rgb()}}))});function R6t(e){if(e instanceof L2)return new L2(e.h,e.s,e.l,e.opacity);e instanceof _d||(e=FE(e));var t=e.r/255,r=e.g/255,n=e.b/255,i=(yCe*n+gCe*t-mCe*r)/(yCe+gCe-mCe),a=n-i,o=(qE*(r-i)-MW*a)/kD,s=Math.sqrt(o*o+a*a)/(qE*i*(1-i)),l=s?Math.atan2(o,a)*MD-120:NaN;return new L2(l<0?l+360:l,s,i,e.opacity)}function LA(e,t,r,n){return arguments.length===1?R6t(e):new L2(e,t,r,n==null?1:n)}function L2(e,t,r,n){this.h=+e,this.s=+t,this.l=+r,this.opacity=+n}var _Ce,SW,MW,kD,qE,gCe,mCe,yCe,xCe=gu(()=>{xD();AD();xW();_Ce=-.14861,SW=1.78277,MW=-.29227,kD=-.90649,qE=1.97294,gCe=qE*kD,mCe=qE*SW,yCe=SW*MW-kD*_Ce;Zy(L2,LA,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new L2(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new L2(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=isNaN(this.h)?0:(this.h+120)*SD,t=+this.l,r=isNaN(this.s)?0:this.s*t*(1-t),n=Math.cos(e),i=Math.sin(e);return new _d(255*(t+r*(_Ce*n+SW*i)),255*(t+r*(MW*n+kD*i)),255*(t+r*(qE*n)),this.opacity)}}))});var P2=gu(()=>{AD();pCe();xCe()});function EW(e,t,r,n,i){var a=e*e,o=a*e;return((1-3*e+3*a-o)*t+(4-6*a+3*o)*r+(1+3*e+3*a-3*o)*n+o*i)/6}function CD(e){var t=e.length-1;return function(r){var n=r<=0?r=0:r>=1?(r=1,t-1):Math.floor(r*t),i=e[n],a=e[n+1],o=n>0?e[n-1]:2*i-a,s=n{});function PD(e){var t=e.length;return function(r){var n=Math.floor(((r%=1)<0?++r:r)*t),i=e[(n+t-1)%t],a=e[n%t],o=e[(n+1)%t],s=e[(n+2)%t];return EW((r-n/t)*t,i,a,o,s)}}var kW=gu(()=>{LD()});var PA,CW=gu(()=>{PA=e=>()=>e});function bCe(e,t){return function(r){return e+r*t}}function D6t(e,t,r){return e=Math.pow(e,r),t=Math.pow(t,r)-e,r=1/r,function(n){return Math.pow(e+n*t,r)}}function W_(e,t){var r=t-e;return r?bCe(e,r>180||r<-180?r-360*Math.round(r/360):r):PA(isNaN(e)?t:e)}function wCe(e){return(e=+e)==1?$f:function(t,r){return r-t?D6t(t,r,e):PA(isNaN(t)?r:t)}}function $f(e,t){var r=t-e;return r?bCe(e,r):PA(isNaN(e)?t:e)}var I2=gu(()=>{CW()});function TCe(e){return function(t){var r=t.length,n=new Array(r),i=new Array(r),a=new Array(r),o,s;for(o=0;o{P2();LD();kW();I2();BE=function e(t){var r=wCe(t);function n(i,a){var o=r((i=EA(i)).r,(a=EA(a)).r),s=r(i.g,a.g),l=r(i.b,a.b),u=$f(i.opacity,a.opacity);return function(c){return i.r=o(c),i.g=s(c),i.b=l(c),i.opacity=u(c),i+\"\"}}return n.gamma=e,n}(1);ACe=TCe(CD),SCe=TCe(PD)});function IA(e,t){t||(t=[]);var r=e?Math.min(t.length,e.length):0,n=t.slice(),i;return function(a){for(i=0;i{});function MCe(e,t){return(ID(t)?IA:PW)(e,t)}function PW(e,t){var r=t?t.length:0,n=e?Math.min(r,e.length):0,i=new Array(n),a=new Array(r),o;for(o=0;o{NE();RD()});function DD(e,t){var r=new Date;return e=+e,t=+t,function(n){return r.setTime(e*(1-n)+t*n),r}}var RW=gu(()=>{});function zp(e,t){return e=+e,t=+t,function(r){return e*(1-r)+t*r}}var UE=gu(()=>{});function FD(e,t){var r={},n={},i;(e===null||typeof e!=\"object\")&&(e={}),(t===null||typeof t!=\"object\")&&(t={});for(i in t)i in e?r[i]=X_(e[i],t[i]):n[i]=t[i];return function(a){for(i in r)n[i]=r[i](a);return n}}var DW=gu(()=>{NE()});function F6t(e){return function(){return e}}function z6t(e){return function(t){return e(t)+\"\"}}function zD(e,t){var r=zW.lastIndex=FW.lastIndex=0,n,i,a,o=-1,s=[],l=[];for(e=e+\"\",t=t+\"\";(n=zW.exec(e))&&(i=FW.exec(t));)(a=i.index)>r&&(a=t.slice(r,a),s[o]?s[o]+=a:s[++o]=a),(n=n[0])===(i=i[0])?s[o]?s[o]+=i:s[++o]=i:(s[++o]=null,l.push({i:o,x:zp(n,i)})),r=FW.lastIndex;return r{UE();zW=/[-+]?(?:\\d+\\.?\\d*|\\.?\\d+)(?:[eE][-+]?\\d+)?/g,FW=new RegExp(zW.source,\"g\")});function X_(e,t){var r=typeof t,n;return t==null||r===\"boolean\"?PA(t):(r===\"number\"?zp:r===\"string\"?(n=j_(t))?(t=n,BE):zD:t instanceof j_?BE:t instanceof Date?DD:ID(t)?IA:Array.isArray(t)?PW:typeof t.valueOf!=\"function\"&&typeof t.toString!=\"function\"||isNaN(t)?FD:zp)(e,t)}var NE=gu(()=>{P2();LW();IW();RW();UE();DW();OW();CW();RD()});function ECe(e){var t=e.length;return function(r){return e[Math.max(0,Math.min(t-1,Math.floor(r*t)))]}}var kCe=gu(()=>{});function CCe(e,t){var r=W_(+e,+t);return function(n){var i=r(n);return i-360*Math.floor(i/360)}}var LCe=gu(()=>{I2()});function PCe(e,t){return e=+e,t=+t,function(r){return Math.round(e*(1-r)+t*r)}}var ICe=gu(()=>{});function qW(e,t,r,n,i,a){var o,s,l;return(o=Math.sqrt(e*e+t*t))&&(e/=o,t/=o),(l=e*r+t*n)&&(r-=e*l,n-=t*l),(s=Math.sqrt(r*r+n*n))&&(r/=s,n/=s,l/=s),e*n{RCe=180/Math.PI,OD={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1}});function FCe(e){let t=new(typeof DOMMatrix==\"function\"?DOMMatrix:WebKitCSSMatrix)(e+\"\");return t.isIdentity?OD:qW(t.a,t.b,t.c,t.d,t.e,t.f)}function zCe(e){return e==null?OD:(qD||(qD=document.createElementNS(\"http://www.w3.org/2000/svg\",\"g\")),qD.setAttribute(\"transform\",e),(e=qD.transform.baseVal.consolidate())?(e=e.matrix,qW(e.a,e.b,e.c,e.d,e.e,e.f)):OD)}var qD,OCe=gu(()=>{DCe()});function qCe(e,t,r,n){function i(u){return u.length?u.pop()+\" \":\"\"}function a(u,c,f,h,d,v){if(u!==f||c!==h){var _=d.push(\"translate(\",null,t,null,r);v.push({i:_-4,x:zp(u,f)},{i:_-2,x:zp(c,h)})}else(f||h)&&d.push(\"translate(\"+f+t+h+r)}function o(u,c,f,h){u!==c?(u-c>180?c+=360:c-u>180&&(u+=360),h.push({i:f.push(i(f)+\"rotate(\",null,n)-2,x:zp(u,c)})):c&&f.push(i(f)+\"rotate(\"+c+n)}function s(u,c,f,h){u!==c?h.push({i:f.push(i(f)+\"skewX(\",null,n)-2,x:zp(u,c)}):c&&f.push(i(f)+\"skewX(\"+c+n)}function l(u,c,f,h,d,v){if(u!==f||c!==h){var _=d.push(i(d)+\"scale(\",null,\",\",null,\")\");v.push({i:_-4,x:zp(u,f)},{i:_-2,x:zp(c,h)})}else(f!==1||h!==1)&&d.push(i(d)+\"scale(\"+f+\",\"+h+\")\")}return function(u,c){var f=[],h=[];return u=e(u),c=e(c),a(u.translateX,u.translateY,c.translateX,c.translateY,f,h),o(u.rotate,c.rotate,f,h),s(u.skewX,c.skewX,f,h),l(u.scaleX,u.scaleY,c.scaleX,c.scaleY,f,h),u=c=null,function(d){for(var v=-1,_=h.length,b;++v<_;)f[(b=h[v]).i]=b.x(d);return f.join(\"\")}}}var BCe,NCe,UCe=gu(()=>{UE();OCe();BCe=qCe(FCe,\"px, \",\"px)\",\"deg)\"),NCe=qCe(zCe,\", \",\")\",\")\")});function VCe(e){return((e=Math.exp(e))+1/e)/2}function q6t(e){return((e=Math.exp(e))-1/e)/2}function B6t(e){return((e=Math.exp(2*e))-1)/(e+1)}var O6t,GCe,HCe=gu(()=>{O6t=1e-12;GCe=function e(t,r,n){function i(a,o){var s=a[0],l=a[1],u=a[2],c=o[0],f=o[1],h=o[2],d=c-s,v=f-l,_=d*d+v*v,b,p;if(_{P2();I2();WCe=jCe(W_),XCe=jCe($f)});function BW(e,t){var r=$f((e=CA(e)).l,(t=CA(t)).l),n=$f(e.a,t.a),i=$f(e.b,t.b),a=$f(e.opacity,t.opacity);return function(o){return e.l=r(o),e.a=n(o),e.b=i(o),e.opacity=a(o),e+\"\"}}var YCe=gu(()=>{P2();I2()});function KCe(e){return function(t,r){var n=e((t=OE(t)).h,(r=OE(r)).h),i=$f(t.c,r.c),a=$f(t.l,r.l),o=$f(t.opacity,r.opacity);return function(s){return t.h=n(s),t.c=i(s),t.l=a(s),t.opacity=o(s),t+\"\"}}}var JCe,$Ce,QCe=gu(()=>{P2();I2();JCe=KCe(W_),$Ce=KCe($f)});function e6e(e){return function t(r){r=+r;function n(i,a){var o=e((i=LA(i)).h,(a=LA(a)).h),s=$f(i.s,a.s),l=$f(i.l,a.l),u=$f(i.opacity,a.opacity);return function(c){return i.h=o(c),i.s=s(c),i.l=l(Math.pow(c,r)),i.opacity=u(c),i+\"\"}}return n.gamma=t,n}(1)}var t6e,r6e,i6e=gu(()=>{P2();I2();t6e=e6e(W_),r6e=e6e($f)});function NW(e,t){t===void 0&&(t=e,e=X_);for(var r=0,n=t.length-1,i=t[0],a=new Array(n<0?0:n);r{NE()});function a6e(e,t){for(var r=new Array(t),n=0;n{});var R2={};uee(R2,{interpolate:()=>X_,interpolateArray:()=>MCe,interpolateBasis:()=>CD,interpolateBasisClosed:()=>PD,interpolateCubehelix:()=>t6e,interpolateCubehelixLong:()=>r6e,interpolateDate:()=>DD,interpolateDiscrete:()=>ECe,interpolateHcl:()=>JCe,interpolateHclLong:()=>$Ce,interpolateHsl:()=>WCe,interpolateHslLong:()=>XCe,interpolateHue:()=>CCe,interpolateLab:()=>BW,interpolateNumber:()=>zp,interpolateNumberArray:()=>IA,interpolateObject:()=>FD,interpolateRgb:()=>BE,interpolateRgbBasis:()=>ACe,interpolateRgbBasisClosed:()=>SCe,interpolateRound:()=>PCe,interpolateString:()=>zD,interpolateTransformCss:()=>BCe,interpolateTransformSvg:()=>NCe,interpolateZoom:()=>GCe,piecewise:()=>NW,quantize:()=>a6e});var D2=gu(()=>{NE();IW();LD();kW();RW();kCe();LCe();UE();RD();DW();ICe();OW();UCe();HCe();LW();ZCe();YCe();QCe();i6e();n6e();o6e()});var BD=ye((Ypr,s6e)=>{\"use strict\";var N6t=So(),U6t=ka();s6e.exports=function(t,r,n,i,a){var o=r.data.data,s=o.i,l=a||o.color;if(s>=0){r.i=o.i;var u=n.marker;u.pattern?(!u.colors||!u.pattern.shape)&&(u.color=l,r.color=l):(u.color=l,r.color=l),N6t.pointStyle(t,n,i,r)}else U6t.fill(t,l)}});var UW=ye((Kpr,h6e)=>{\"use strict\";var l6e=Oa(),u6e=ka(),c6e=Dr(),V6t=bv().resizeText,G6t=BD();function H6t(e){var t=e._fullLayout._sunburstlayer.selectAll(\".trace\");V6t(e,t,\"sunburst\"),t.each(function(r){var n=l6e.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){l6e.select(this).call(f6e,o,a,e)})})}function f6e(e,t,r,n){var i=t.data.data,a=!t.children,o=i.i,s=c6e.castOption(r,o,\"marker.line.color\")||u6e.defaultLine,l=c6e.castOption(r,o,\"marker.line.width\")||0;e.call(G6t,t,r,n).style(\"stroke-width\",l).call(u6e.stroke,s).style(\"opacity\",a?r.leaf.opacity:null)}h6e.exports={style:H6t,styleOne:f6e}});var Ky=ye(Bs=>{\"use strict\";var F2=Dr(),j6t=ka(),W6t=Ag(),d6e=l_();Bs.findEntryWithLevel=function(e,t){var r;return t&&e.eachAfter(function(n){if(Bs.getPtId(n)===t)return r=n.copy()}),r||e};Bs.findEntryWithChild=function(e,t){var r;return e.eachAfter(function(n){for(var i=n.children||[],a=0;a0)};Bs.getMaxDepth=function(e){return e.maxdepth>=0?e.maxdepth:1/0};Bs.isHeader=function(e,t){return!(Bs.isLeaf(e)||e.depth===t._maxDepth-1)};function v6e(e){return e.data.data.pid}Bs.getParent=function(e,t){return Bs.findEntryWithLevel(e,v6e(t))};Bs.listPath=function(e,t){var r=e.parent;if(!r)return[];var n=t?[r.data[t]]:[r];return Bs.listPath(r,t).concat(n)};Bs.getPath=function(e){return Bs.listPath(e,\"label\").join(\"/\")+\"/\"};Bs.formatValue=d6e.formatPieValue;Bs.formatPercent=function(e,t){var r=F2.formatPercent(e,0);return r===\"0%\"&&(r=d6e.formatPiePercent(e,t)),r}});var HE=ye(($pr,m6e)=>{\"use strict\";var RA=Oa(),p6e=qa(),Y6t=ip().appendArrayPointValue,VE=vf(),g6e=Dr(),K6t=y3(),rd=Ky(),J6t=l_(),$6t=J6t.formatPieValue;m6e.exports=function(t,r,n,i,a){var o=i[0],s=o.trace,l=o.hierarchy,u=s.type===\"sunburst\",c=s.type===\"treemap\"||s.type===\"icicle\";\"_hasHoverLabel\"in s||(s._hasHoverLabel=!1),\"_hasHoverEvent\"in s||(s._hasHoverEvent=!1);var f=function(v){var _=n._fullLayout;if(!(n._dragging||_.hovermode===!1)){var b=n._fullData[s.index],p=v.data.data,k=p.i,E=rd.isHierarchyRoot(v),S=rd.getParent(l,v),L=rd.getValue(v),x=function(Ee){return g6e.castOption(b,k,Ee)},C=x(\"hovertemplate\"),M=VE.castHoverinfo(b,_,k),g=_.separators,P;if(C||M&&M!==\"none\"&&M!==\"skip\"){var T,z;u&&(T=o.cx+v.pxmid[0]*(1-v.rInscribed),z=o.cy+v.pxmid[1]*(1-v.rInscribed)),c&&(T=v._hoverX,z=v._hoverY);var O={},V=[],G=[],Z=function(Ee){return V.indexOf(Ee)!==-1};M&&(V=M===\"all\"?b._module.attributes.hoverinfo.flags:M.split(\"+\")),O.label=p.label,Z(\"label\")&&O.label&&G.push(O.label),p.hasOwnProperty(\"v\")&&(O.value=p.v,O.valueLabel=$6t(O.value,g),Z(\"value\")&&G.push(O.valueLabel)),O.currentPath=v.currentPath=rd.getPath(v.data),Z(\"current path\")&&!E&&G.push(O.currentPath);var H,N=[],j=function(){N.indexOf(H)===-1&&(G.push(H),N.push(H))};O.percentParent=v.percentParent=L/rd.getValue(S),O.parent=v.parentString=rd.getPtLabel(S),Z(\"percent parent\")&&(H=rd.formatPercent(O.percentParent,g)+\" of \"+O.parent,j()),O.percentEntry=v.percentEntry=L/rd.getValue(r),O.entry=v.entry=rd.getPtLabel(r),Z(\"percent entry\")&&!E&&!v.onPathbar&&(H=rd.formatPercent(O.percentEntry,g)+\" of \"+O.entry,j()),O.percentRoot=v.percentRoot=L/rd.getValue(l),O.root=v.root=rd.getPtLabel(l),Z(\"percent root\")&&!E&&(H=rd.formatPercent(O.percentRoot,g)+\" of \"+O.root,j()),O.text=x(\"hovertext\")||x(\"text\"),Z(\"text\")&&(H=O.text,g6e.isValidTextValue(H)&&G.push(H)),P=[GE(v,b,a.eventDataKeys)];var re={trace:b,y:z,_x0:v._x0,_x1:v._x1,_y0:v._y0,_y1:v._y1,text:G.join(\"
\"),name:C||Z(\"name\")?b.name:void 0,color:x(\"hoverlabel.bgcolor\")||p.color,borderColor:x(\"hoverlabel.bordercolor\"),fontFamily:x(\"hoverlabel.font.family\"),fontSize:x(\"hoverlabel.font.size\"),fontColor:x(\"hoverlabel.font.color\"),fontWeight:x(\"hoverlabel.font.weight\"),fontStyle:x(\"hoverlabel.font.style\"),fontVariant:x(\"hoverlabel.font.variant\"),nameLength:x(\"hoverlabel.namelength\"),textAlign:x(\"hoverlabel.align\"),hovertemplate:C,hovertemplateLabels:O,eventData:P};u&&(re.x0=T-v.rInscribed*v.rpx1,re.x1=T+v.rInscribed*v.rpx1,re.idealAlign=v.pxmid[0]<0?\"left\":\"right\"),c&&(re.x=T,re.idealAlign=T<0?\"left\":\"right\");var oe=[];VE.loneHover(re,{container:_._hoverlayer.node(),outerContainer:_._paper.node(),gd:n,inOut_bbox:oe}),P[0].bbox=oe[0],s._hasHoverLabel=!0}if(c){var _e=t.select(\"path.surface\");a.styleOne(_e,v,b,n,{hovered:!0})}s._hasHoverEvent=!0,n.emit(\"plotly_hover\",{points:P||[GE(v,b,a.eventDataKeys)],event:RA.event})}},h=function(v){var _=n._fullLayout,b=n._fullData[s.index],p=RA.select(this).datum();if(s._hasHoverEvent&&(v.originalEvent=RA.event,n.emit(\"plotly_unhover\",{points:[GE(p,b,a.eventDataKeys)],event:RA.event}),s._hasHoverEvent=!1),s._hasHoverLabel&&(VE.loneUnhover(_._hoverlayer.node()),s._hasHoverLabel=!1),c){var k=t.select(\"path.surface\");a.styleOne(k,p,b,n,{hovered:!1})}},d=function(v){var _=n._fullLayout,b=n._fullData[s.index],p=u&&(rd.isHierarchyRoot(v)||rd.isLeaf(v)),k=rd.getPtId(v),E=rd.isEntry(v)?rd.findEntryWithChild(l,k):rd.findEntryWithLevel(l,k),S=rd.getPtId(E),L={points:[GE(v,b,a.eventDataKeys)],event:RA.event};p||(L.nextLevel=S);var x=K6t.triggerHandler(n,\"plotly_\"+s.type+\"click\",L);if(x!==!1&&_.hovermode&&(n._hoverdata=[GE(v,b,a.eventDataKeys)],VE.click(n,RA.event)),!p&&x!==!1&&!n._dragging&&!n._transitioning){p6e.call(\"_storeDirectGUIEdit\",b,_._tracePreGUI[b.uid],{level:b.level});var C={data:[{level:S}],traces:[s.index]},M={frame:{redraw:!1,duration:a.transitionTime},transition:{duration:a.transitionTime,easing:a.transitionEasing},mode:\"immediate\",fromcurrent:!0};VE.loneUnhover(_._hoverlayer.node()),p6e.call(\"animate\",n,C,M)}};t.on(\"mouseover\",f),t.on(\"mouseout\",h),t.on(\"click\",d)};function GE(e,t,r){for(var n=e.data.data,i={curveNumber:t.index,pointNumber:n.i,data:t._input,fullData:t},a=0;a{\"use strict\";var jE=Oa(),Q6t=PE(),Zg=(D2(),ob(R2)).interpolate,y6e=So(),Av=Dr(),eLt=ru(),w6e=bv(),_6e=w6e.recordMinTextSize,tLt=w6e.clearMinTextSize,T6e=yD(),rLt=l_().getRotationAngle,iLt=T6e.computeTransform,nLt=T6e.transformInsideText,aLt=UW().styleOne,oLt=N0().resizeText,sLt=HE(),VW=mW(),Rl=Ky();ND.plot=function(e,t,r,n){var i=e._fullLayout,a=i._sunburstlayer,o,s,l=!r,u=!i.uniformtext.mode&&Rl.hasTransition(r);if(tLt(\"sunburst\",i),o=a.selectAll(\"g.trace.sunburst\").data(t,function(f){return f[0].trace.uid}),o.enter().append(\"g\").classed(\"trace\",!0).classed(\"sunburst\",!0).attr(\"stroke-linejoin\",\"round\"),o.order(),u){n&&(s=n());var c=jE.transition().duration(r.duration).ease(r.easing).each(\"end\",function(){s&&s()}).each(\"interrupt\",function(){s&&s()});c.each(function(){a.selectAll(\"g.trace\").each(function(f){x6e(e,f,this,r)})})}else o.each(function(f){x6e(e,f,this,r)}),i.uniformtext.mode&&oLt(e,i._sunburstlayer.selectAll(\".trace\"),\"sunburst\");l&&o.exit().remove()};function x6e(e,t,r,n){var i=e._context.staticPlot,a=e._fullLayout,o=!a.uniformtext.mode&&Rl.hasTransition(n),s=jE.select(r),l=s.selectAll(\"g.slice\"),u=t[0],c=u.trace,f=u.hierarchy,h=Rl.findEntryWithLevel(f,c.level),d=Rl.getMaxDepth(c),v=a._size,_=c.domain,b=v.w*(_.x[1]-_.x[0]),p=v.h*(_.y[1]-_.y[0]),k=.5*Math.min(b,p),E=u.cx=v.l+v.w*(_.x[1]+_.x[0])/2,S=u.cy=v.t+v.h*(1-_.y[0])-p/2;if(!h)return l.remove();var L=null,x={};o&&l.each(function(me){x[Rl.getPtId(me)]={rpx0:me.rpx0,rpx1:me.rpx1,x0:me.x0,x1:me.x1,transform:me.transform},!L&&Rl.isEntry(me)&&(L=me)});var C=lLt(h).descendants(),M=h.height+1,g=0,P=d;u.hasMultipleRoots&&Rl.isHierarchyRoot(h)&&(C=C.slice(1),M-=1,g=1,P+=1),C=C.filter(function(me){return me.y1<=P});var T=rLt(c.rotation);T&&C.forEach(function(me){me.x0+=T,me.x1+=T});var z=Math.min(M,d),O=function(me){return(me-g)/z*k},V=function(me,ie){return[me*Math.cos(ie),-me*Math.sin(ie)]},G=function(me){return Av.pathAnnulus(me.rpx0,me.rpx1,me.x0,me.x1,E,S)},Z=function(me){return E+b6e(me)[0]*(me.transform.rCenter||0)+(me.transform.x||0)},H=function(me){return S+b6e(me)[1]*(me.transform.rCenter||0)+(me.transform.y||0)};l=l.data(C,Rl.getPtId),l.enter().append(\"g\").classed(\"slice\",!0),o?l.exit().transition().each(function(){var me=jE.select(this),ie=me.select(\"path.surface\");ie.transition().attrTween(\"d\",function(Le){var Ae=oe(Le);return function(Fe){return G(Ae(Fe))}});var Se=me.select(\"g.slicetext\");Se.attr(\"opacity\",0)}).remove():l.exit().remove(),l.order();var N=null;if(o&&L){var j=Rl.getPtId(L);l.each(function(me){N===null&&Rl.getPtId(me)===j&&(N=me.x1)})}var re=l;o&&(re=re.transition().each(\"end\",function(){var me=jE.select(this);Rl.setSliceCursor(me,e,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:!1})})),re.each(function(me){var ie=jE.select(this),Se=Av.ensureSingle(ie,\"path\",\"surface\",function(Re){Re.style(\"pointer-events\",i?\"none\":\"all\")});me.rpx0=O(me.y0),me.rpx1=O(me.y1),me.xmid=(me.x0+me.x1)/2,me.pxmid=V(me.rpx1,me.xmid),me.midangle=-(me.xmid-Math.PI/2),me.startangle=-(me.x0-Math.PI/2),me.stopangle=-(me.x1-Math.PI/2),me.halfangle=.5*Math.min(Av.angleDelta(me.x0,me.x1)||Math.PI,Math.PI),me.ring=1-me.rpx0/me.rpx1,me.rInscribed=uLt(me,c),o?Se.transition().attrTween(\"d\",function(Re){var ce=_e(Re);return function(Ze){return G(ce(Ze))}}):Se.attr(\"d\",G),ie.call(sLt,h,e,t,{eventDataKeys:VW.eventDataKeys,transitionTime:VW.CLICK_TRANSITION_TIME,transitionEasing:VW.CLICK_TRANSITION_EASING}).call(Rl.setSliceCursor,e,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:e._transitioning}),Se.call(aLt,me,c,e);var Le=Av.ensureSingle(ie,\"g\",\"slicetext\"),Ae=Av.ensureSingle(Le,\"text\",\"\",function(Re){Re.attr(\"data-notex\",1)}),Fe=Av.ensureUniformFontSize(e,Rl.determineTextFont(c,me,a.font));Ae.text(ND.formatSliceLabel(me,h,c,t,a)).classed(\"slicetext\",!0).attr(\"text-anchor\",\"middle\").call(y6e.font,Fe).call(eLt.convertToTspans,e);var Pe=y6e.bBox(Ae.node());me.transform=nLt(Pe,me,u),me.transform.targetX=Z(me),me.transform.targetY=H(me);var ge=function(Re,ce){var Ze=Re.transform;return iLt(Ze,ce),Ze.fontSize=Fe.size,_6e(c.type,Ze,a),Av.getTextTransform(Ze)};o?Ae.transition().attrTween(\"transform\",function(Re){var ce=Ee(Re);return function(Ze){return ge(ce(Ze),Pe)}}):Ae.attr(\"transform\",ge(me,Pe))});function oe(me){var ie=Rl.getPtId(me),Se=x[ie],Le=x[Rl.getPtId(h)],Ae;if(Le){var Fe=(me.x1>Le.x1?2*Math.PI:0)+T;Ae=me.rpx1N?2*Math.PI:0)+T;Se={x0:Ae,x1:Ae}}else Se={rpx0:k,rpx1:k},Av.extendFlat(Se,Ce(me));else Se={rpx0:0,rpx1:0};else Se={x0:T,x1:T};return Zg(Se,Le)}function Ee(me){var ie=x[Rl.getPtId(me)],Se,Le=me.transform;if(ie)Se=ie;else if(Se={rpx1:me.rpx1,transform:{textPosAngle:Le.textPosAngle,scale:0,rotate:Le.rotate,rCenter:Le.rCenter,x:Le.x,y:Le.y}},L)if(me.parent)if(N){var Ae=me.x1>N?2*Math.PI:0;Se.x0=Se.x1=Ae}else Av.extendFlat(Se,Ce(me));else Se.x0=Se.x1=T;else Se.x0=Se.x1=T;var Fe=Zg(Se.transform.textPosAngle,me.transform.textPosAngle),Pe=Zg(Se.rpx1,me.rpx1),ge=Zg(Se.x0,me.x0),Re=Zg(Se.x1,me.x1),ce=Zg(Se.transform.scale,Le.scale),Ze=Zg(Se.transform.rotate,Le.rotate),ut=Le.rCenter===0?3:Se.transform.rCenter===0?1/3:1,pt=Zg(Se.transform.rCenter,Le.rCenter),Zt=function(st){return pt(Math.pow(st,ut))};return function(st){var lt=Pe(st),Gt=ge(st),Nt=Re(st),Jt=Zt(st),sr=V(lt,(Gt+Nt)/2),wr=Fe(st),cr={pxmid:sr,rpx1:lt,transform:{textPosAngle:wr,rCenter:Jt,x:Le.x,y:Le.y}};return _6e(c.type,Le,a),{transform:{targetX:Z(cr),targetY:H(cr),scale:ce(st),rotate:Ze(st),rCenter:Jt}}}}function Ce(me){var ie=me.parent,Se=x[Rl.getPtId(ie)],Le={};if(Se){var Ae=ie.children,Fe=Ae.indexOf(me),Pe=Ae.length,ge=Zg(Se.x0,Se.x1);Le.x0=ge(Fe/Pe),Le.x1=ge(Fe/Pe)}else Le.x0=Le.x1=0;return Le}}function lLt(e){return Q6t.partition().size([2*Math.PI,e.height+1])(e)}ND.formatSliceLabel=function(e,t,r,n,i){var a=r.texttemplate,o=r.textinfo;if(!a&&(!o||o===\"none\"))return\"\";var s=i.separators,l=n[0],u=e.data.data,c=l.hierarchy,f=Rl.isHierarchyRoot(e),h=Rl.getParent(c,e),d=Rl.getValue(e);if(!a){var v=o.split(\"+\"),_=function(g){return v.indexOf(g)!==-1},b=[],p;if(_(\"label\")&&u.label&&b.push(u.label),u.hasOwnProperty(\"v\")&&_(\"value\")&&b.push(Rl.formatValue(u.v,s)),!f){_(\"current path\")&&b.push(Rl.getPath(e.data));var k=0;_(\"percent parent\")&&k++,_(\"percent entry\")&&k++,_(\"percent root\")&&k++;var E=k>1;if(k){var S,L=function(g){p=Rl.formatPercent(S,s),E&&(p+=\" of \"+g),b.push(p)};_(\"percent parent\")&&!f&&(S=d/Rl.getValue(h),L(\"parent\")),_(\"percent entry\")&&(S=d/Rl.getValue(t),L(\"entry\")),_(\"percent root\")&&(S=d/Rl.getValue(c),L(\"root\"))}}return _(\"text\")&&(p=Av.castOption(r,u.i,\"text\"),Av.isValidTextValue(p)&&b.push(p)),b.join(\"
\")}var x=Av.castOption(r,u.i,\"texttemplate\");if(!x)return\"\";var C={};u.label&&(C.label=u.label),u.hasOwnProperty(\"v\")&&(C.value=u.v,C.valueLabel=Rl.formatValue(u.v,s)),C.currentPath=Rl.getPath(e.data),f||(C.percentParent=d/Rl.getValue(h),C.percentParentLabel=Rl.formatPercent(C.percentParent,s),C.parent=Rl.getPtLabel(h)),C.percentEntry=d/Rl.getValue(t),C.percentEntryLabel=Rl.formatPercent(C.percentEntry,s),C.entry=Rl.getPtLabel(t),C.percentRoot=d/Rl.getValue(c),C.percentRootLabel=Rl.formatPercent(C.percentRoot,s),C.root=Rl.getPtLabel(c),u.hasOwnProperty(\"color\")&&(C.color=u.color);var M=Av.castOption(r,u.i,\"text\");return(Av.isValidTextValue(M)||M===\"\")&&(C.text=M),C.customdata=Av.castOption(r,u.i,\"customdata\"),Av.texttemplateString({data:[C,r._meta],fallback:r.texttemplatefallback,labels:C,locale:i._d3locale,template:x})};function uLt(e){return e.rpx0===0&&Av.isFullCircle([e.x0,e.x1])?1:Math.max(0,Math.min(1/(1+1/Math.sin(e.halfangle)),e.ring/2))}function b6e(e){return cLt(e.rpx1,e.transform.textPosAngle)}function cLt(e,t){return[e*Math.sin(t),-e*Math.cos(t)]}});var S6e=ye((e0r,A6e)=>{\"use strict\";A6e.exports={moduleType:\"trace\",name:\"sunburst\",basePlotModule:qke(),categories:[],animatable:!0,attributes:LE(),layoutAttributes:yW(),supplyDefaults:Xke(),supplyLayoutDefaults:Yke(),calc:RE().calc,crossTraceCalc:RE().crossTraceCalc,plot:UD().plot,style:UW().style,colorbar:$d(),meta:{}}});var E6e=ye((t0r,M6e)=>{\"use strict\";M6e.exports=S6e()});var C6e=ye(DA=>{\"use strict\";var k6e=Mc();DA.name=\"treemap\";DA.plot=function(e,t,r,n){k6e.plotBasePlot(DA.name,e,t,r,n)};DA.clean=function(e,t,r,n){k6e.cleanBasePlot(DA.name,e,t,r,n)}});var z2=ye((i0r,L6e)=>{\"use strict\";L6e.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:\"poly\",eventDataKeys:[\"currentPath\",\"root\",\"entry\",\"percentRoot\",\"percentEntry\",\"percentParent\"],gapWithPathbar:1}});var VD=ye((n0r,R6e)=>{\"use strict\";var{hovertemplateAttrs:fLt,texttemplateAttrs:hLt,templatefallbackAttrs:P6e}=Ll(),dLt=Tu(),vLt=Cc().attributes,O2=S2(),Q0=LE(),I6e=z2(),GW=Ao().extendFlat,pLt=Pd().pattern;R6e.exports={labels:Q0.labels,parents:Q0.parents,values:Q0.values,branchvalues:Q0.branchvalues,count:Q0.count,level:Q0.level,maxdepth:Q0.maxdepth,tiling:{packing:{valType:\"enumerated\",values:[\"squarify\",\"binary\",\"dice\",\"slice\",\"slice-dice\",\"dice-slice\"],dflt:\"squarify\",editType:\"plot\"},squarifyratio:{valType:\"number\",min:1,dflt:1,editType:\"plot\"},flip:{valType:\"flaglist\",flags:[\"x\",\"y\"],dflt:\"\",editType:\"plot\"},pad:{valType:\"number\",min:0,dflt:3,editType:\"plot\"},editType:\"calc\"},marker:GW({pad:{t:{valType:\"number\",min:0,editType:\"plot\"},l:{valType:\"number\",min:0,editType:\"plot\"},r:{valType:\"number\",min:0,editType:\"plot\"},b:{valType:\"number\",min:0,editType:\"plot\"},editType:\"calc\"},colors:Q0.marker.colors,pattern:pLt,depthfade:{valType:\"enumerated\",values:[!0,!1,\"reversed\"],editType:\"style\"},line:Q0.marker.line,cornerradius:{valType:\"number\",min:0,dflt:0,editType:\"plot\"},editType:\"calc\"},dLt(\"marker\",{colorAttr:\"colors\",anim:!1})),pathbar:{visible:{valType:\"boolean\",dflt:!0,editType:\"plot\"},side:{valType:\"enumerated\",values:[\"top\",\"bottom\"],dflt:\"top\",editType:\"plot\"},edgeshape:{valType:\"enumerated\",values:[\">\",\"<\",\"|\",\"/\",\"\\\\\"],dflt:\">\",editType:\"plot\"},thickness:{valType:\"number\",min:12,editType:\"plot\"},textfont:GW({},O2.textfont,{}),editType:\"calc\"},text:O2.text,textinfo:Q0.textinfo,texttemplate:hLt({editType:\"plot\"},{keys:I6e.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:P6e({editType:\"plot\"}),hovertext:O2.hovertext,hoverinfo:Q0.hoverinfo,hovertemplate:fLt({},{keys:I6e.eventDataKeys}),hovertemplatefallback:P6e(),textfont:O2.textfont,insidetextfont:O2.insidetextfont,outsidetextfont:GW({},O2.outsidetextfont,{}),textposition:{valType:\"enumerated\",values:[\"top left\",\"top center\",\"top right\",\"middle left\",\"middle center\",\"middle right\",\"bottom left\",\"bottom center\",\"bottom right\"],dflt:\"top left\",editType:\"plot\"},sort:O2.sort,root:Q0.root,domain:vLt({name:\"treemap\",trace:!0,editType:\"calc\"})}});var HW=ye((a0r,D6e)=>{\"use strict\";D6e.exports={treemapcolorway:{valType:\"colorlist\",editType:\"calc\"},extendtreemapcolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var q6e=ye((o0r,O6e)=>{\"use strict\";var F6e=Dr(),gLt=VD(),mLt=ka(),yLt=Cc().defaults,_Lt=r0().handleText,xLt=e2().TEXTPAD,bLt=M2().handleMarkerDefaults,z6e=tc(),wLt=z6e.hasColorscale,TLt=z6e.handleDefaults;O6e.exports=function(t,r,n,i){function a(b,p){return F6e.coerce(t,r,gLt,b,p)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\");var u=a(\"tiling.packing\");u===\"squarify\"&&a(\"tiling.squarifyratio\"),a(\"tiling.flip\"),a(\"tiling.pad\");var c=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",F6e.isArrayOrTypedArray(c)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var f=a(\"pathbar.visible\"),h=\"auto\";_Lt(t,r,i,a,h,{hasPathbar:f,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"textposition\");var d=r.textposition.indexOf(\"bottom\")!==-1;bLt(t,r,i,a);var v=r._hasColorscale=wLt(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;v?TLt(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}):a(\"marker.depthfade\",!(r.marker.colors||[]).length);var _=r.textfont.size*2;a(\"marker.pad.t\",d?_/4:_),a(\"marker.pad.l\",_/4),a(\"marker.pad.r\",_/4),a(\"marker.pad.b\",d?_:_/4),a(\"marker.cornerradius\"),r._hovered={marker:{line:{width:2,color:mLt.contrast(i.paper_bgcolor)}}},f&&(a(\"pathbar.thickness\",r.pathbar.textfont.size+2*xLt),a(\"pathbar.side\"),a(\"pathbar.edgeshape\")),a(\"sort\"),a(\"root.color\"),yLt(r,i,a),r._length=null}});var N6e=ye((s0r,B6e)=>{\"use strict\";var ALt=Dr(),SLt=HW();B6e.exports=function(t,r){function n(i,a){return ALt.coerce(t,r,SLt,i,a)}n(\"treemapcolorway\",r.colorway),n(\"extendtreemapcolors\")}});var WW=ye(jW=>{\"use strict\";var U6e=RE();jW.calc=function(e,t){return U6e.calc(e,t)};jW.crossTraceCalc=function(e){return U6e._runCrossTraceCalc(\"treemap\",e)}});var XW=ye((u0r,V6e)=>{\"use strict\";V6e.exports=function e(t,r,n){var i;n.swapXY&&(i=t.x0,t.x0=t.y0,t.y0=i,i=t.x1,t.x1=t.y1,t.y1=i),n.flipX&&(i=t.x0,t.x0=r[0]-t.x1,t.x1=r[0]-i),n.flipY&&(i=t.y0,t.y0=r[1]-t.y1,t.y1=r[1]-i);var a=t.children;if(a)for(var o=0;o{\"use strict\";var FA=PE(),MLt=XW();G6e.exports=function(t,r,n){var i=n.flipX,a=n.flipY,o=n.packing===\"dice-slice\",s=n.pad[a?\"bottom\":\"top\"],l=n.pad[i?\"right\":\"left\"],u=n.pad[i?\"left\":\"right\"],c=n.pad[a?\"top\":\"bottom\"],f;o&&(f=l,l=s,s=f,f=u,u=c,c=f);var h=FA.treemap().tile(ELt(n.packing,n.squarifyratio)).paddingInner(n.pad.inner).paddingLeft(l).paddingRight(u).paddingTop(s).paddingBottom(c).size(o?[r[1],r[0]]:r)(t);return(o||i||a)&&MLt(h,r,{swapXY:o,flipX:i,flipY:a}),h};function ELt(e,t){switch(e){case\"squarify\":return FA.treemapSquarify.ratio(t);case\"binary\":return FA.treemapBinary;case\"dice\":return FA.treemapDice;case\"slice\":return FA.treemapSlice;default:return FA.treemapSliceDice}}});var GD=ye((f0r,X6e)=>{\"use strict\";var H6e=Oa(),zA=ka(),j6e=Dr(),YW=Ky(),kLt=bv().resizeText,CLt=BD();function LLt(e){var t=e._fullLayout._treemaplayer.selectAll(\".trace\");kLt(e,t,\"treemap\"),t.each(function(r){var n=H6e.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){H6e.select(this).call(W6e,o,a,e,{hovered:!1})})})}function W6e(e,t,r,n,i){var a=(i||{}).hovered,o=t.data.data,s=o.i,l,u,c=o.color,f=YW.isHierarchyRoot(t),h=1;if(a)l=r._hovered.marker.line.color,u=r._hovered.marker.line.width;else if(f&&c===r.root.color)h=100,l=\"rgba(0,0,0,0)\",u=0;else if(l=j6e.castOption(r,s,\"marker.line.color\")||zA.defaultLine,u=j6e.castOption(r,s,\"marker.line.width\")||0,!r._hasColorscale&&!t.onPathbar){var d=r.marker.depthfade;if(d){var v=zA.combine(zA.addOpacity(r._backgroundColor,.75),c),_;if(d===!0){var b=YW.getMaxDepth(r);isFinite(b)?YW.isLeaf(t)?_=0:_=r._maxVisibleLayers-(t.data.depth-r._entryDepth):_=t.data.height+1}else _=t.data.depth-r._entryDepth,r._atRootLevel||_++;if(_>0)for(var p=0;p<_;p++){var k=.5*p/_;c=zA.combine(zA.addOpacity(v,k),c)}}}e.call(CLt,t,r,n,c).style(\"stroke-width\",u).call(zA.stroke,l).style(\"opacity\",h)}X6e.exports={style:LLt,styleOne:W6e}});var $6e=ye((h0r,J6e)=>{\"use strict\";var Z6e=Oa(),HD=Dr(),Y6e=So(),PLt=ru(),ILt=ZW(),K6e=GD().styleOne,KW=z2(),OA=Ky(),RLt=HE(),JW=!0;J6e.exports=function(t,r,n,i,a){var o=a.barDifY,s=a.width,l=a.height,u=a.viewX,c=a.viewY,f=a.pathSlice,h=a.toMoveInsideSlice,d=a.strTransform,v=a.hasTransition,_=a.handleSlicesExit,b=a.makeUpdateSliceInterpolator,p=a.makeUpdateTextInterpolator,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=L.hierarchy,M=s/x._entryDepth,g=OA.listPath(n.data,\"id\"),P=ILt(C.copy(),[s,l],{packing:\"dice\",pad:{inner:0,top:0,left:0,right:0,bottom:0}}).descendants();P=P.filter(function(z){var O=g.indexOf(z.data.id);return O===-1?!1:(z.x0=M*O,z.x1=M*(O+1),z.y0=o,z.y1=o+l,z.onPathbar=!0,!0)}),P.reverse(),i=i.data(P,OA.getPtId),i.enter().append(\"g\").classed(\"pathbar\",!0),_(i,JW,k,[s,l],f),i.order();var T=i;v&&(T=T.transition().each(\"end\",function(){var z=Z6e.select(this);OA.setSliceCursor(z,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:!1})})),T.each(function(z){z._x0=u(z.x0),z._x1=u(z.x1),z._y0=c(z.y0),z._y1=c(z.y1),z._hoverX=u(z.x1-Math.min(s,l)/2),z._hoverY=c(z.y1-l/2);var O=Z6e.select(this),V=HD.ensureSingle(O,\"path\",\"surface\",function(N){N.style(\"pointer-events\",E?\"none\":\"all\")});v?V.transition().attrTween(\"d\",function(N){var j=b(N,JW,k,[s,l]);return function(re){return f(j(re))}}):V.attr(\"d\",f),O.call(RLt,n,t,r,{styleOne:K6e,eventDataKeys:KW.eventDataKeys,transitionTime:KW.CLICK_TRANSITION_TIME,transitionEasing:KW.CLICK_TRANSITION_EASING}).call(OA.setSliceCursor,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:t._transitioning}),V.call(K6e,z,x,t,{hovered:!1}),z._text=(OA.getPtLabel(z)||\"\").split(\"
\").join(\" \")||\"\";var G=HD.ensureSingle(O,\"g\",\"slicetext\"),Z=HD.ensureSingle(G,\"text\",\"\",function(N){N.attr(\"data-notex\",1)}),H=HD.ensureUniformFontSize(t,OA.determineTextFont(x,z,S.font,{onPathbar:!0}));Z.text(z._text||\" \").classed(\"slicetext\",!0).attr(\"text-anchor\",\"start\").call(Y6e.font,H).call(PLt.convertToTspans,t),z.textBB=Y6e.bBox(Z.node()),z.transform=h(z,{fontSize:H.size,onPathbar:!0}),z.transform.fontSize=H.size,v?Z.transition().attrTween(\"transform\",function(N){var j=p(N,JW,k,[s,l]);return function(re){return d(j(re))}}):Z.attr(\"transform\",d(z))})}});var rLe=ye((d0r,tLe)=>{\"use strict\";var Q6e=Oa(),$W=(D2(),ob(R2)).interpolate,Z_=Ky(),WE=Dr(),eLe=e2().TEXTPAD,DLt=n2(),FLt=DLt.toMoveInsideBar,zLt=bv(),QW=zLt.recordMinTextSize,OLt=z2(),qLt=$6e();function q2(e){return Z_.isHierarchyRoot(e)?\"\":Z_.getPtId(e)}tLe.exports=function(t,r,n,i,a){var o=t._fullLayout,s=r[0],l=s.trace,u=l.type,c=u===\"icicle\",f=s.hierarchy,h=Z_.findEntryWithLevel(f,l.level),d=Q6e.select(n),v=d.selectAll(\"g.pathbar\"),_=d.selectAll(\"g.slice\");if(!h){v.remove(),_.remove();return}var b=Z_.isHierarchyRoot(h),p=!o.uniformtext.mode&&Z_.hasTransition(i),k=Z_.getMaxDepth(l),E=function($e){return $e.data.depth-h.data.depth-1?C+P:-(g+P):0,z={x0:M,x1:M,y0:T,y1:T+g},O=function($e,St,Qt){var Vt=l.tiling.pad,_t=function(lr){return lr-Vt<=St.x0},It=function(lr){return lr+Vt>=St.x1},mt=function(lr){return lr-Vt<=St.y0},er=function(lr){return lr+Vt>=St.y1};return $e.x0===St.x0&&$e.x1===St.x1&&$e.y0===St.y0&&$e.y1===St.y1?{x0:$e.x0,x1:$e.x1,y0:$e.y0,y1:$e.y1}:{x0:_t($e.x0-Vt)?0:It($e.x0-Vt)?Qt[0]:$e.x0,x1:_t($e.x1+Vt)?0:It($e.x1+Vt)?Qt[0]:$e.x1,y0:mt($e.y0-Vt)?0:er($e.y0-Vt)?Qt[1]:$e.y0,y1:mt($e.y1+Vt)?0:er($e.y1+Vt)?Qt[1]:$e.y1}},V=null,G={},Z={},H=null,N=function($e,St){return St?G[q2($e)]:Z[q2($e)]},j=function($e,St,Qt,Vt){if(St)return G[q2(f)]||z;var _t=Z[l.level]||Qt;return E($e)?O($e,_t,Vt):{}};s.hasMultipleRoots&&b&&k++,l._maxDepth=k,l._backgroundColor=o.paper_bgcolor,l._entryDepth=h.data.depth,l._atRootLevel=b;var re=-x/2+S.l+S.w*(L.x[1]+L.x[0])/2,oe=-C/2+S.t+S.h*(1-(L.y[1]+L.y[0])/2),_e=function($e){return re+$e},Ee=function($e){return oe+$e},Ce=Ee(0),me=_e(0),ie=function($e){return me+$e},Se=function($e){return Ce+$e};function Le($e,St){return $e+\",\"+St}var Ae=ie(0),Fe=function($e){$e.x=Math.max(Ae,$e.x)},Pe=l.pathbar.edgeshape,ge=function($e){var St=ie(Math.max(Math.min($e.x0,$e.x0),0)),Qt=ie(Math.min(Math.max($e.x1,$e.x1),M)),Vt=Se($e.y0),_t=Se($e.y1),It=g/2,mt={},er={};mt.x=St,er.x=Qt,mt.y=er.y=(Vt+_t)/2;var lr={x:St,y:Vt},Tr={x:Qt,y:Vt},Lr={x:Qt,y:_t},ti={x:St,y:_t};return Pe===\">\"?(lr.x-=It,Tr.x-=It,Lr.x-=It,ti.x-=It):Pe===\"/\"?(Lr.x-=It,ti.x-=It,mt.x-=It/2,er.x-=It/2):Pe===\"\\\\\"?(lr.x-=It,Tr.x-=It,mt.x-=It/2,er.x-=It/2):Pe===\"<\"&&(mt.x-=It,er.x-=It),Fe(lr),Fe(ti),Fe(mt),Fe(Tr),Fe(Lr),Fe(er),\"M\"+Le(lr.x,lr.y)+\"L\"+Le(Tr.x,Tr.y)+\"L\"+Le(er.x,er.y)+\"L\"+Le(Lr.x,Lr.y)+\"L\"+Le(ti.x,ti.y)+\"L\"+Le(mt.x,mt.y)+\"Z\"},Re=l[c?\"tiling\":\"marker\"].pad,ce=function($e){return l.textposition.indexOf($e)!==-1},Ze=ce(\"top\"),ut=ce(\"left\"),pt=ce(\"right\"),Zt=ce(\"bottom\"),st=function($e){var St=_e($e.x0),Qt=_e($e.x1),Vt=Ee($e.y0),_t=Ee($e.y1),It=Qt-St,mt=_t-Vt;if(!It||!mt)return\"\";var er=l.marker.cornerradius||0,lr=Math.min(er,It/2,mt/2);lr&&$e.data&&$e.data.data&&$e.data.data.label&&(Ze&&(lr=Math.min(lr,Re.t)),ut&&(lr=Math.min(lr,Re.l)),pt&&(lr=Math.min(lr,Re.r)),Zt&&(lr=Math.min(lr,Re.b)));var Tr=function(Lr,ti){return lr?\"a\"+Le(lr,lr)+\" 0 0 1 \"+Le(Lr,ti):\"\"};return\"M\"+Le(St,Vt+lr)+Tr(lr,-lr)+\"L\"+Le(Qt-lr,Vt)+Tr(lr,lr)+\"L\"+Le(Qt,_t-lr)+Tr(-lr,lr)+\"L\"+Le(St+lr,_t)+Tr(-lr,-lr)+\"Z\"},lt=function($e,St){var Qt=$e.x0,Vt=$e.x1,_t=$e.y0,It=$e.y1,mt=$e.textBB,er=Ze||St.isHeader&&!Zt,lr=er?\"start\":Zt?\"end\":\"middle\",Tr=ce(\"right\"),Lr=ce(\"left\")||St.onPathbar,ti=Lr?-1:Tr?1:0;if(St.isHeader){if(Qt+=(c?Re:Re.l)-eLe,Vt-=(c?Re:Re.r)-eLe,Qt>=Vt){var Br=(Qt+Vt)/2;Qt=Br,Vt=Br}var Vr;Zt?(Vr=It-(c?Re:Re.b),_t{\"use strict\";var BLt=Oa(),NLt=Ky(),ULt=bv(),VLt=ULt.clearMinTextSize,GLt=N0().resizeText,iLe=rLe();nLe.exports=function(t,r,n,i,a){var o=a.type,s=a.drawDescendants,l=t._fullLayout,u=l[\"_\"+o+\"layer\"],c,f,h=!n;if(VLt(o,l),c=u.selectAll(\"g.trace.\"+o).data(r,function(v){return v[0].trace.uid}),c.enter().append(\"g\").classed(\"trace\",!0).classed(o,!0),c.order(),!l.uniformtext.mode&&NLt.hasTransition(n)){i&&(f=i());var d=BLt.transition().duration(n.duration).ease(n.easing).each(\"end\",function(){f&&f()}).each(\"interrupt\",function(){f&&f()});d.each(function(){u.selectAll(\"g.trace\").each(function(v){iLe(t,v,this,n,s)})})}else c.each(function(v){iLe(t,v,this,n,s)}),l.uniformtext.mode&&GLt(t,u.selectAll(\".trace\"),o);h&&c.exit().remove()}});var uLe=ye((p0r,lLe)=>{\"use strict\";var aLe=Oa(),jD=Dr(),oLe=So(),HLt=ru(),jLt=ZW(),sLe=GD().styleOne,tX=z2(),Y_=Ky(),WLt=HE(),XLt=UD().formatSliceLabel,rX=!1;lLe.exports=function(t,r,n,i,a){var o=a.width,s=a.height,l=a.viewX,u=a.viewY,c=a.pathSlice,f=a.toMoveInsideSlice,h=a.strTransform,d=a.hasTransition,v=a.handleSlicesExit,_=a.makeUpdateSliceInterpolator,b=a.makeUpdateTextInterpolator,p=a.prevEntry,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=x.textposition.indexOf(\"left\")!==-1,M=x.textposition.indexOf(\"right\")!==-1,g=x.textposition.indexOf(\"bottom\")!==-1,P=!g&&!x.marker.pad.t||g&&!x.marker.pad.b,T=jLt(n,[o,s],{packing:x.tiling.packing,squarifyratio:x.tiling.squarifyratio,flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1,pad:{inner:x.tiling.pad,top:x.marker.pad.t,left:x.marker.pad.l,right:x.marker.pad.r,bottom:x.marker.pad.b}}),z=T.descendants(),O=1/0,V=-1/0;z.forEach(function(j){var re=j.depth;re>=x._maxDepth?(j.x0=j.x1=(j.x0+j.x1)/2,j.y0=j.y1=(j.y0+j.y1)/2):(O=Math.min(O,re),V=Math.max(V,re))}),i=i.data(z,Y_.getPtId),x._maxVisibleLayers=isFinite(V)?V-O+1:0,i.enter().append(\"g\").classed(\"slice\",!0),v(i,rX,k,[o,s],c),i.order();var G=null;if(d&&p){var Z=Y_.getPtId(p);i.each(function(j){G===null&&Y_.getPtId(j)===Z&&(G={x0:j.x0,x1:j.x1,y0:j.y0,y1:j.y1})})}var H=function(){return G||{x0:0,x1:o,y0:0,y1:s}},N=i;return d&&(N=N.transition().each(\"end\",function(){var j=aLe.select(this);Y_.setSliceCursor(j,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})})),N.each(function(j){var re=Y_.isHeader(j,x);j._x0=l(j.x0),j._x1=l(j.x1),j._y0=u(j.y0),j._y1=u(j.y1),j._hoverX=l(j.x1-x.marker.pad.r),j._hoverY=u(g?j.y1-x.marker.pad.b/2:j.y0+x.marker.pad.t/2);var oe=aLe.select(this),_e=jD.ensureSingle(oe,\"path\",\"surface\",function(Le){Le.style(\"pointer-events\",E?\"none\":\"all\")});d?_e.transition().attrTween(\"d\",function(Le){var Ae=_(Le,rX,H(),[o,s]);return function(Fe){return c(Ae(Fe))}}):_e.attr(\"d\",c),oe.call(WLt,n,t,r,{styleOne:sLe,eventDataKeys:tX.eventDataKeys,transitionTime:tX.CLICK_TRANSITION_TIME,transitionEasing:tX.CLICK_TRANSITION_EASING}).call(Y_.setSliceCursor,t,{isTransitioning:t._transitioning}),_e.call(sLe,j,x,t,{hovered:!1}),j.x0===j.x1||j.y0===j.y1?j._text=\"\":re?j._text=P?\"\":Y_.getPtLabel(j)||\"\":j._text=XLt(j,n,x,r,S)||\"\";var Ee=jD.ensureSingle(oe,\"g\",\"slicetext\"),Ce=jD.ensureSingle(Ee,\"text\",\"\",function(Le){Le.attr(\"data-notex\",1)}),me=jD.ensureUniformFontSize(t,Y_.determineTextFont(x,j,S.font)),ie=j._text||\" \",Se=re&&ie.indexOf(\"
\")===-1;Ce.text(ie).classed(\"slicetext\",!0).attr(\"text-anchor\",M?\"end\":C||Se?\"start\":\"middle\").call(oLe.font,me).call(HLt.convertToTspans,t),j.textBB=oLe.bBox(Ce.node()),j.transform=f(j,{fontSize:me.size,isHeader:re}),j.transform.fontSize=me.size,d?Ce.transition().attrTween(\"transform\",function(Le){var Ae=b(Le,rX,H(),[o,s]);return function(Fe){return h(Ae(Fe))}}):Ce.attr(\"transform\",h(j))}),G}});var fLe=ye((g0r,cLe)=>{\"use strict\";var ZLt=eX(),YLt=uLe();cLe.exports=function(t,r,n,i){return ZLt(t,r,n,i,{type:\"treemap\",drawDescendants:YLt})}});var dLe=ye((m0r,hLe)=>{\"use strict\";hLe.exports={moduleType:\"trace\",name:\"treemap\",basePlotModule:C6e(),categories:[],animatable:!0,attributes:VD(),layoutAttributes:HW(),supplyDefaults:q6e(),supplyLayoutDefaults:N6e(),calc:WW().calc,crossTraceCalc:WW().crossTraceCalc,plot:fLe(),style:GD().style,colorbar:$d(),meta:{}}});var pLe=ye((y0r,vLe)=>{\"use strict\";vLe.exports=dLe()});var mLe=ye(qA=>{\"use strict\";var gLe=Mc();qA.name=\"icicle\";qA.plot=function(e,t,r,n){gLe.plotBasePlot(qA.name,e,t,r,n)};qA.clean=function(e,t,r,n){gLe.cleanBasePlot(qA.name,e,t,r,n)}});var iX=ye((x0r,xLe)=>{\"use strict\";var{hovertemplateAttrs:KLt,texttemplateAttrs:JLt,templatefallbackAttrs:yLe}=Ll(),$Lt=Tu(),QLt=Cc().attributes,XE=S2(),o0=LE(),WD=VD(),_Le=z2(),ePt=Ao().extendFlat,tPt=Pd().pattern;xLe.exports={labels:o0.labels,parents:o0.parents,values:o0.values,branchvalues:o0.branchvalues,count:o0.count,level:o0.level,maxdepth:o0.maxdepth,tiling:{orientation:{valType:\"enumerated\",values:[\"v\",\"h\"],dflt:\"h\",editType:\"plot\"},flip:WD.tiling.flip,pad:{valType:\"number\",min:0,dflt:0,editType:\"plot\"},editType:\"calc\"},marker:ePt({colors:o0.marker.colors,line:o0.marker.line,pattern:tPt,editType:\"calc\"},$Lt(\"marker\",{colorAttr:\"colors\",anim:!1})),leaf:o0.leaf,pathbar:WD.pathbar,text:XE.text,textinfo:o0.textinfo,texttemplate:JLt({editType:\"plot\"},{keys:_Le.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:yLe({editType:\"plot\"}),hovertext:XE.hovertext,hoverinfo:o0.hoverinfo,hovertemplate:KLt({},{keys:_Le.eventDataKeys}),hovertemplatefallback:yLe(),textfont:XE.textfont,insidetextfont:XE.insidetextfont,outsidetextfont:WD.outsidetextfont,textposition:WD.textposition,sort:XE.sort,root:o0.root,domain:QLt({name:\"icicle\",trace:!0,editType:\"calc\"})}});var nX=ye((b0r,bLe)=>{\"use strict\";bLe.exports={iciclecolorway:{valType:\"colorlist\",editType:\"calc\"},extendiciclecolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var SLe=ye((w0r,ALe)=>{\"use strict\";var wLe=Dr(),rPt=iX(),iPt=ka(),nPt=Cc().defaults,aPt=r0().handleText,oPt=e2().TEXTPAD,sPt=M2().handleMarkerDefaults,TLe=tc(),lPt=TLe.hasColorscale,uPt=TLe.handleDefaults;ALe.exports=function(t,r,n,i){function a(d,v){return wLe.coerce(t,r,rPt,d,v)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\"),a(\"tiling.orientation\"),a(\"tiling.flip\"),a(\"tiling.pad\");var u=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",wLe.isArrayOrTypedArray(u)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var c=a(\"pathbar.visible\"),f=\"auto\";aPt(t,r,i,a,f,{hasPathbar:c,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"textposition\"),sPt(t,r,i,a);var h=r._hasColorscale=lPt(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;h&&uPt(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}),a(\"leaf.opacity\",h?1:.7),r._hovered={marker:{line:{width:2,color:iPt.contrast(i.paper_bgcolor)}}},c&&(a(\"pathbar.thickness\",r.pathbar.textfont.size+2*oPt),a(\"pathbar.side\"),a(\"pathbar.edgeshape\")),a(\"sort\"),a(\"root.color\"),nPt(r,i,a),r._length=null}});var ELe=ye((T0r,MLe)=>{\"use strict\";var cPt=Dr(),fPt=nX();MLe.exports=function(t,r){function n(i,a){return cPt.coerce(t,r,fPt,i,a)}n(\"iciclecolorway\",r.colorway),n(\"extendiciclecolors\")}});var oX=ye(aX=>{\"use strict\";var kLe=RE();aX.calc=function(e,t){return kLe.calc(e,t)};aX.crossTraceCalc=function(e){return kLe._runCrossTraceCalc(\"icicle\",e)}});var LLe=ye((S0r,CLe)=>{\"use strict\";var hPt=PE(),dPt=XW();CLe.exports=function(t,r,n){var i=n.flipX,a=n.flipY,o=n.orientation===\"h\",s=n.maxDepth,l=r[0],u=r[1];s&&(l=(t.height+1)*r[0]/Math.min(t.height+1,s),u=(t.height+1)*r[1]/Math.min(t.height+1,s));var c=hPt.partition().padding(n.pad.inner).size(o?[r[1],l]:[r[0],u])(t);return(o||i||a)&&dPt(c,r,{swapXY:o,flipX:i,flipY:a}),c}});var sX=ye((M0r,FLe)=>{\"use strict\";var PLe=Oa(),ILe=ka(),RLe=Dr(),vPt=bv().resizeText,pPt=BD();function gPt(e){var t=e._fullLayout._iciclelayer.selectAll(\".trace\");vPt(e,t,\"icicle\"),t.each(function(r){var n=PLe.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){PLe.select(this).call(DLe,o,a,e)})})}function DLe(e,t,r,n){var i=t.data.data,a=!t.children,o=i.i,s=RLe.castOption(r,o,\"marker.line.color\")||ILe.defaultLine,l=RLe.castOption(r,o,\"marker.line.width\")||0;e.call(pPt,t,r,n).style(\"stroke-width\",l).call(ILe.stroke,s).style(\"opacity\",a?r.leaf.opacity:null)}FLe.exports={style:gPt,styleOne:DLe}});var NLe=ye((E0r,BLe)=>{\"use strict\";var zLe=Oa(),XD=Dr(),OLe=So(),mPt=ru(),yPt=LLe(),qLe=sX().styleOne,lX=z2(),BA=Ky(),_Pt=HE(),xPt=UD().formatSliceLabel,uX=!1;BLe.exports=function(t,r,n,i,a){var o=a.width,s=a.height,l=a.viewX,u=a.viewY,c=a.pathSlice,f=a.toMoveInsideSlice,h=a.strTransform,d=a.hasTransition,v=a.handleSlicesExit,_=a.makeUpdateSliceInterpolator,b=a.makeUpdateTextInterpolator,p=a.prevEntry,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=x.textposition.indexOf(\"left\")!==-1,M=x.textposition.indexOf(\"right\")!==-1,g=x.textposition.indexOf(\"bottom\")!==-1,P=yPt(n,[o,s],{flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1,orientation:x.tiling.orientation,pad:{inner:x.tiling.pad},maxDepth:x._maxDepth}),T=P.descendants(),z=1/0,O=-1/0;T.forEach(function(N){var j=N.depth;j>=x._maxDepth?(N.x0=N.x1=(N.x0+N.x1)/2,N.y0=N.y1=(N.y0+N.y1)/2):(z=Math.min(z,j),O=Math.max(O,j))}),i=i.data(T,BA.getPtId),x._maxVisibleLayers=isFinite(O)?O-z+1:0,i.enter().append(\"g\").classed(\"slice\",!0),v(i,uX,k,[o,s],c),i.order();var V=null;if(d&&p){var G=BA.getPtId(p);i.each(function(N){V===null&&BA.getPtId(N)===G&&(V={x0:N.x0,x1:N.x1,y0:N.y0,y1:N.y1})})}var Z=function(){return V||{x0:0,x1:o,y0:0,y1:s}},H=i;return d&&(H=H.transition().each(\"end\",function(){var N=zLe.select(this);BA.setSliceCursor(N,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})})),H.each(function(N){N._x0=l(N.x0),N._x1=l(N.x1),N._y0=u(N.y0),N._y1=u(N.y1),N._hoverX=l(N.x1-x.tiling.pad),N._hoverY=u(g?N.y1-x.tiling.pad/2:N.y0+x.tiling.pad/2);var j=zLe.select(this),re=XD.ensureSingle(j,\"path\",\"surface\",function(Ce){Ce.style(\"pointer-events\",E?\"none\":\"all\")});d?re.transition().attrTween(\"d\",function(Ce){var me=_(Ce,uX,Z(),[o,s],{orientation:x.tiling.orientation,flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1});return function(ie){return c(me(ie))}}):re.attr(\"d\",c),j.call(_Pt,n,t,r,{styleOne:qLe,eventDataKeys:lX.eventDataKeys,transitionTime:lX.CLICK_TRANSITION_TIME,transitionEasing:lX.CLICK_TRANSITION_EASING}).call(BA.setSliceCursor,t,{isTransitioning:t._transitioning}),re.call(qLe,N,x,t,{hovered:!1}),N.x0===N.x1||N.y0===N.y1?N._text=\"\":N._text=xPt(N,n,x,r,S)||\"\";var oe=XD.ensureSingle(j,\"g\",\"slicetext\"),_e=XD.ensureSingle(oe,\"text\",\"\",function(Ce){Ce.attr(\"data-notex\",1)}),Ee=XD.ensureUniformFontSize(t,BA.determineTextFont(x,N,S.font));_e.text(N._text||\" \").classed(\"slicetext\",!0).attr(\"text-anchor\",M?\"end\":C?\"start\":\"middle\").call(OLe.font,Ee).call(mPt.convertToTspans,t),N.textBB=OLe.bBox(_e.node()),N.transform=f(N,{fontSize:Ee.size}),N.transform.fontSize=Ee.size,d?_e.transition().attrTween(\"transform\",function(Ce){var me=b(Ce,uX,Z(),[o,s]);return function(ie){return h(me(ie))}}):_e.attr(\"transform\",h(N))}),V}});var VLe=ye((k0r,ULe)=>{\"use strict\";var bPt=eX(),wPt=NLe();ULe.exports=function(t,r,n,i){return bPt(t,r,n,i,{type:\"icicle\",drawDescendants:wPt})}});var HLe=ye((C0r,GLe)=>{\"use strict\";GLe.exports={moduleType:\"trace\",name:\"icicle\",basePlotModule:mLe(),categories:[],animatable:!0,attributes:iX(),layoutAttributes:nX(),supplyDefaults:SLe(),supplyLayoutDefaults:ELe(),calc:oX().calc,crossTraceCalc:oX().crossTraceCalc,plot:VLe(),style:sX().style,colorbar:$d(),meta:{}}});var WLe=ye((L0r,jLe)=>{\"use strict\";jLe.exports=HLe()});var ZLe=ye(NA=>{\"use strict\";var XLe=Mc();NA.name=\"funnelarea\";NA.plot=function(e,t,r,n){XLe.plotBasePlot(NA.name,e,t,r,n)};NA.clean=function(e,t,r,n){XLe.cleanBasePlot(NA.name,e,t,r,n)}});var cX=ye((I0r,KLe)=>{\"use strict\";var iv=S2(),TPt=Gl(),APt=Cc().attributes,{hovertemplateAttrs:SPt,texttemplateAttrs:MPt,templatefallbackAttrs:YLe}=Ll(),B2=Ao().extendFlat;KLe.exports={labels:iv.labels,label0:iv.label0,dlabel:iv.dlabel,values:iv.values,marker:{colors:iv.marker.colors,line:{color:B2({},iv.marker.line.color,{dflt:null}),width:B2({},iv.marker.line.width,{dflt:1}),editType:\"calc\"},pattern:iv.marker.pattern,editType:\"calc\"},text:iv.text,hovertext:iv.hovertext,scalegroup:B2({},iv.scalegroup,{}),textinfo:B2({},iv.textinfo,{flags:[\"label\",\"text\",\"value\",\"percent\"]}),texttemplate:MPt({editType:\"plot\"},{keys:[\"label\",\"color\",\"value\",\"text\",\"percent\"]}),texttemplatefallback:YLe({editType:\"plot\"}),hoverinfo:B2({},TPt.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"percent\",\"name\"]}),hovertemplate:SPt({},{keys:[\"label\",\"color\",\"value\",\"text\",\"percent\"]}),hovertemplatefallback:YLe(),textposition:B2({},iv.textposition,{values:[\"inside\",\"none\"],dflt:\"inside\"}),textfont:iv.textfont,insidetextfont:iv.insidetextfont,title:{text:iv.title.text,font:iv.title.font,position:B2({},iv.title.position,{values:[\"top left\",\"top center\",\"top right\"],dflt:\"top center\"}),editType:\"plot\"},domain:APt({name:\"funnelarea\",trace:!0,editType:\"calc\"}),aspectratio:{valType:\"number\",min:0,dflt:1,editType:\"plot\"},baseratio:{valType:\"number\",min:0,max:1,dflt:.333,editType:\"plot\"}}});var fX=ye((R0r,JLe)=>{\"use strict\";var EPt=vD().hiddenlabels;JLe.exports={hiddenlabels:EPt,funnelareacolorway:{valType:\"colorlist\",editType:\"calc\"},extendfunnelareacolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var ePe=ye((D0r,QLe)=>{\"use strict\";var $Le=Dr(),kPt=cX(),CPt=Cc().defaults,LPt=r0().handleText,PPt=M2().handleLabelsAndValues,IPt=M2().handleMarkerDefaults;QLe.exports=function(t,r,n,i){function a(_,b){return $Le.coerce(t,r,kPt,_,b)}var o=a(\"labels\"),s=a(\"values\"),l=PPt(o,s),u=l.len;if(r._hasLabels=l.hasLabels,r._hasValues=l.hasValues,!r._hasLabels&&r._hasValues&&(a(\"label0\"),a(\"dlabel\")),!u){r.visible=!1;return}r._length=u,IPt(t,r,i,a),a(\"scalegroup\");var c=a(\"text\"),f=a(\"texttemplate\");a(\"texttemplatefallback\");var h;if(f||(h=a(\"textinfo\",Array.isArray(c)?\"text+percent\":\"percent\")),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\"),f||h&&h!==\"none\"){var d=a(\"textposition\");LPt(t,r,i,a,d,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1})}else h===\"none\"&&a(\"textposition\",\"none\");CPt(r,i,a);var v=a(\"title.text\");v&&(a(\"title.position\"),$Le.coerceFont(a,\"title.font\",i.font)),a(\"aspectratio\"),a(\"baseratio\")}});var rPe=ye((F0r,tPe)=>{\"use strict\";var RPt=Dr(),DPt=fX();tPe.exports=function(t,r){function n(i,a){return RPt.coerce(t,r,DPt,i,a)}n(\"hiddenlabels\"),n(\"funnelareacolorway\",r.colorway),n(\"extendfunnelareacolors\")}});var hX=ye((z0r,nPe)=>{\"use strict\";var iPe=wA();function FPt(e,t){return iPe.calc(e,t)}function zPt(e){iPe.crossTraceCalc(e,{type:\"funnelarea\"})}nPe.exports={calc:FPt,crossTraceCalc:zPt}});var uPe=ye((O0r,lPe)=>{\"use strict\";var N2=Oa(),dX=So(),K_=Dr(),OPt=K_.strScale,aPe=K_.strTranslate,oPe=ru(),qPt=n2(),BPt=qPt.toMoveInsideBar,sPe=bv(),NPt=sPe.recordMinTextSize,UPt=sPe.clearMinTextSize,VPt=l_(),UA=yD(),GPt=UA.attachFxHandlers,HPt=UA.determineInsideTextFont,jPt=UA.layoutAreas,WPt=UA.prerenderTitles,XPt=UA.positionTitleOutside,ZPt=UA.formatSliceLabel;lPe.exports=function(t,r){var n=t._context.staticPlot,i=t._fullLayout;UPt(\"funnelarea\",i),WPt(r,t),jPt(r,i._size),K_.makeTraceGroups(i._funnelarealayer,r,\"trace\").each(function(a){var o=N2.select(this),s=a[0],l=s.trace;KPt(a),o.each(function(){var u=N2.select(this).selectAll(\"g.slice\").data(a);u.enter().append(\"g\").classed(\"slice\",!0),u.exit().remove(),u.each(function(f,h){if(f.hidden){N2.select(this).selectAll(\"path,g\").remove();return}f.pointNumber=f.i,f.curveNumber=l.index;var d=s.cx,v=s.cy,_=N2.select(this),b=_.selectAll(\"path.surface\").data([f]);b.enter().append(\"path\").classed(\"surface\",!0).style({\"pointer-events\":n?\"none\":\"all\"}),_.call(GPt,t,a);var p=\"M\"+(d+f.TR[0])+\",\"+(v+f.TR[1])+vX(f.TR,f.BR)+vX(f.BR,f.BL)+vX(f.BL,f.TL)+\"Z\";b.attr(\"d\",p),ZPt(t,f,s);var k=VPt.castOption(l.textposition,f.pts),E=_.selectAll(\"g.slicetext\").data(f.text&&k!==\"none\"?[0]:[]);E.enter().append(\"g\").classed(\"slicetext\",!0),E.exit().remove(),E.each(function(){var S=K_.ensureSingle(N2.select(this),\"text\",\"\",function(z){z.attr(\"data-notex\",1)}),L=K_.ensureUniformFontSize(t,HPt(l,f,i.font));S.text(f.text).attr({class:\"slicetext\",transform:\"\",\"text-anchor\":\"middle\"}).call(dX.font,L).call(oPe.convertToTspans,t);var x=dX.bBox(S.node()),C,M,g,P=Math.min(f.BL[1],f.BR[1])+v,T=Math.max(f.TL[1],f.TR[1])+v;M=Math.max(f.TL[0],f.BL[0])+d,g=Math.min(f.TR[0],f.BR[0])+d,C=BPt(M,g,P,T,x,{isHorizontal:!0,constrained:!0,angle:0,anchor:\"middle\"}),C.fontSize=L.size,NPt(l.type,C,i),a[h].transform=C,K_.setTransormAndDisplay(S,C)})});var c=N2.select(this).selectAll(\"g.titletext\").data(l.title.text?[0]:[]);c.enter().append(\"g\").classed(\"titletext\",!0),c.exit().remove(),c.each(function(){var f=K_.ensureSingle(N2.select(this),\"text\",\"\",function(v){v.attr(\"data-notex\",1)}),h=l.title.text;l._meta&&(h=K_.templateString(h,l._meta)),f.text(h).attr({class:\"titletext\",transform:\"\",\"text-anchor\":\"middle\"}).call(dX.font,l.title.font).call(oPe.convertToTspans,t);var d=XPt(s,i._size);f.attr(\"transform\",aPe(d.x,d.y)+OPt(Math.min(1,d.scale))+aPe(d.tx,d.ty))})})})};function vX(e,t){var r=t[0]-e[0],n=t[1]-e[1];return\"l\"+r+\",\"+n}function YPt(e,t){return[.5*(e[0]+t[0]),.5*(e[1]+t[1])]}function KPt(e){if(!e.length)return;var t=e[0],r=t.trace,n=r.aspectratio,i=r.baseratio;i>.999&&(i=.999);var a=Math.pow(i,2),o=t.vTotal,s=o*a/(1-a),l=o,u=s/o;function c(){var O=Math.sqrt(u);return{x:O,y:-O}}function f(){var O=c();return[O.x,O.y]}var h,d=[];d.push(f());var v,_;for(v=e.length-1;v>-1;v--)if(_=e[v],!_.hidden){var b=_.v/l;u+=b,d.push(f())}var p=1/0,k=-1/0;for(v=0;v-1;v--)if(_=e[v],!_.hidden){P+=1;var T=d[P][0],z=d[P][1];_.TL=[-T,z],_.TR=[T,z],_.BL=M,_.BR=g,_.pxmid=YPt(_.TR,_.BR),M=_.TL,g=_.TR}}});var hPe=ye((q0r,fPe)=>{\"use strict\";var cPe=Oa(),JPt=q3(),$Pt=bv().resizeText;fPe.exports=function(t){var r=t._fullLayout._funnelarealayer.selectAll(\".trace\");$Pt(t,r,\"funnelarea\"),r.each(function(n){var i=n[0],a=i.trace,o=cPe.select(this);o.style({opacity:a.opacity}),o.selectAll(\"path.surface\").each(function(s){cPe.select(this).call(JPt,s,a,t)})})}});var vPe=ye((B0r,dPe)=>{\"use strict\";dPe.exports={moduleType:\"trace\",name:\"funnelarea\",basePlotModule:ZLe(),categories:[\"pie-like\",\"funnelarea\",\"showLegend\"],attributes:cX(),layoutAttributes:fX(),supplyDefaults:ePe(),supplyLayoutDefaults:rPe(),calc:hX().calc,crossTraceCalc:hX().crossTraceCalc,plot:uPe(),style:hPe(),styleOne:q3(),meta:{}}});var gPe=ye((N0r,pPe)=>{\"use strict\";pPe.exports=vPe()});var Od=ye((U0r,mPe)=>{(function(){var e={24:function(i){var a={left:0,top:0};i.exports=o;function o(l,u,c){u=u||l.currentTarget||l.srcElement,Array.isArray(c)||(c=[0,0]);var f=l.clientX||0,h=l.clientY||0,d=s(u);return c[0]=f-d.left,c[1]=h-d.top,c}function s(l){return l===window||l===document||l===document.body?a:l.getBoundingClientRect()}},109:function(i){i.exports=a;function a(o,s,l,u){var c=l[0],f=l[2],h=s[0]-c,d=s[2]-f,v=Math.sin(u),_=Math.cos(u);return o[0]=c+d*v+h*_,o[1]=s[1],o[2]=f+d*_-h*v,o}},160:function(i){i.exports=a;function a(o,s,l){return o[0]=Math.max(s[0],l[0]),o[1]=Math.max(s[1],l[1]),o[2]=Math.max(s[2],l[2]),o[3]=Math.max(s[3],l[3]),o}},216:function(i){\"use strict\";i.exports=a;function a(o,s){for(var l={},u=0;u1){v[0]in h||(h[v[0]]=[]),h=h[v[0]];for(var _=1;_=0;--N){var Se=Z[N];j=Se[0];var Le=V[j],Ae=Le[0],Fe=Le[1],Pe=O[Ae],ge=O[Fe];if((Pe[0]-ge[0]||Pe[1]-ge[1])<0){var Re=Ae;Ae=Fe,Fe=Re}Le[0]=Ae;var ce=Le[1]=Se[1],Ze;for(H&&(Ze=Le[2]);N>0&&Z[N-1][0]===j;){var Se=Z[--N],ut=Se[1];H?V.push([ce,ut,Ze]):V.push([ce,ut]),ce=ut}H?V.push([ce,Fe,Ze]):V.push([ce,Fe])}return re}function x(O,V,G){for(var Z=V.length,H=new s(Z),N=[],j=0;jV[2]?1:0)}function g(O,V,G){if(O.length!==0){if(V)for(var Z=0;Z0||j.length>0}function z(O,V,G){var Z;if(G){Z=V;for(var H=new Array(V.length),N=0;N
" ] }, - "metadata": {}, - "output_type": "display_data", "jetTransient": { "display_id": null - } + }, + "metadata": {}, + "output_type": "display_data" } ], - "execution_count": 4 - }, - { - "cell_type": "code", - "id": "3zsi1g8bokg", - "metadata": { - "execution": { - "iopub.execute_input": "2025-12-13T23:11:06.484947Z", - "iopub.status.busy": "2025-12-13T23:11:06.484493Z", - "iopub.status.idle": "2025-12-13T23:11:06.548165Z", - "shell.execute_reply": "2025-12-13T23:11:06.546796Z" - }, - "ExecuteTime": { - "end_time": "2025-12-13T23:13:28.869432Z", - "start_time": "2025-12-13T23:13:28.785666Z" - } - }, "source": [ - "# Calculate error metrics for each configuration\n", - "metrics = []\n", - "for n, clustering in clustering_results.items():\n", - " original = clustering.original_data['HeatDemand(Q_th)|fixed_relative_profile'].values\n", - " aggregated = clustering.aggregated_data['HeatDemand(Q_th)|fixed_relative_profile'].values\n", + "# Test different numbers of clusters\n", + "cluster_configs = [2, 3, 4, 5]\n", + "clustering_results = {}\n", "\n", - " # Calculate metrics\n", - " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", - " mae = np.mean(np.abs(original - aggregated))\n", - " max_error = np.max(np.abs(original - aggregated))\n", - " correlation = np.corrcoef(original, aggregated)[0, 1]\n", + "for n in cluster_configs:\n", + " fs_test = fs_demo.copy()\n", + " fs_clustered = fs_test.transform.cluster(n_clusters=n, cluster_duration='1D')\n", + " clustering_results[n] = fs_clustered._clustering_info['clustering']\n", "\n", - " metrics.append(\n", - " {\n", - " 'Clusters': n,\n", - " 'RMSE [MW]': rmse,\n", - " 'MAE [MW]': mae,\n", - " 'Max Error [MW]': max_error,\n", - " 'Correlation': correlation,\n", - " }\n", - " )\n", + "# Compare the aggregated heat demand for each configuration\n", + "fig = make_subplots(\n", + " rows=2,\n", + " cols=2,\n", + " subplot_titles=[f'{n} Typical Days' for n in cluster_configs],\n", + " shared_xaxes=True,\n", + " shared_yaxes=True,\n", + " vertical_spacing=0.12,\n", + " horizontal_spacing=0.08,\n", + ")\n", "\n", - "metrics_df = pd.DataFrame(metrics).set_index('Clusters')\n", - "metrics_df.style.format(\n", - " {\n", - " 'RMSE [MW]': '{:.2f}',\n", - " 'MAE [MW]': '{:.2f}',\n", - " 'Max Error [MW]': '{:.2f}',\n", - " 'Correlation': '{:.4f}',\n", - " }\n", - ")" - ], - "outputs": [ + "for i, (_n, clustering) in enumerate(clustering_results.items()):\n", + " row, col = divmod(i, 2)\n", + " row += 1\n", + " col += 1\n", + "\n", + " # Original data\n", + " original = clustering.original_data['HeatDemand(Q_th)|fixed_relative_profile']\n", + " aggregated = clustering.aggregated_data['HeatDemand(Q_th)|fixed_relative_profile']\n", + "\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=list(range(len(original))),\n", + " y=original.values,\n", + " name='Original',\n", + " line=dict(color='lightgray'),\n", + " showlegend=(i == 0),\n", + " ),\n", + " row=row,\n", + " col=col,\n", + " )\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=list(range(len(aggregated))),\n", + " y=aggregated.values,\n", + " name='Aggregated',\n", + " line=dict(color='blue', width=2),\n", + " showlegend=(i == 0),\n", + " ),\n", + " row=row,\n", + " col=col,\n", + " )\n", + "\n", + "fig.update_layout(\n", + " title='Heat Demand: Original vs Clustered Data',\n", + " height=500,\n", + " legend=dict(orientation='h', yanchor='bottom', y=1.02),\n", + ")\n", + "fig.update_xaxes(title_text='Timestep', row=2)\n", + "fig.update_yaxes(title_text='MW', col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "3zsi1g8bokg", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:19.061384Z", + "start_time": "2025-12-14T01:03:18.929418Z" + }, + "execution": { + "iopub.execute_input": "2025-12-13T23:11:06.484947Z", + "iopub.status.busy": "2025-12-13T23:11:06.484493Z", + "iopub.status.idle": "2025-12-13T23:11:06.548165Z", + "shell.execute_reply": "2025-12-13T23:11:06.546796Z" + } + }, + "outputs": [ { "data": { - "text/plain": [ - "" - ], "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -4376,35 +4332,38 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 RMSE [MW]MAE [MW]Max Error [MW]CorrelationRMSE [MW]MAE [MW]Max Error [MW]Correlation
Clusters
21.871.367.890.998421.871.367.890.9984
31.290.767.890.999331.290.767.890.9993
40.650.373.140.999840.650.373.140.9998
50.150.100.381.000050.150.100.381.0000
\n" + ], + "text/plain": [ + "" ] }, "execution_count": 5, @@ -4412,7 +4371,39 @@ "output_type": "execute_result" } ], - "execution_count": 5 + "source": [ + "# Calculate error metrics for each configuration\n", + "metrics = []\n", + "for n, clustering in clustering_results.items():\n", + " original = clustering.original_data['HeatDemand(Q_th)|fixed_relative_profile'].values\n", + " aggregated = clustering.aggregated_data['HeatDemand(Q_th)|fixed_relative_profile'].values\n", + "\n", + " # Calculate metrics\n", + " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", + " mae = np.mean(np.abs(original - aggregated))\n", + " max_error = np.max(np.abs(original - aggregated))\n", + " correlation = np.corrcoef(original, aggregated)[0, 1]\n", + "\n", + " metrics.append(\n", + " {\n", + " 'Clusters': n,\n", + " 'RMSE [MW]': rmse,\n", + " 'MAE [MW]': mae,\n", + " 'Max Error [MW]': max_error,\n", + " 'Correlation': correlation,\n", + " }\n", + " )\n", + "\n", + "metrics_df = pd.DataFrame(metrics).set_index('Clusters')\n", + "metrics_df.style.format(\n", + " {\n", + " 'RMSE [MW]': '{:.2f}',\n", + " 'MAE [MW]': '{:.2f}',\n", + " 'Max Error [MW]': '{:.2f}',\n", + " 'Correlation': '{:.4f}',\n", + " }\n", + ")" + ] }, { "cell_type": "markdown", @@ -4426,19 +4417,29 @@ }, { "cell_type": "code", + "execution_count": 6, "id": "6", "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:19.157280Z", + "start_time": "2025-12-14T01:03:19.123190Z" + }, "execution": { "iopub.execute_input": "2025-12-13T23:11:06.554382Z", "iopub.status.busy": "2025-12-13T23:11:06.554075Z", "iopub.status.idle": "2025-12-13T23:11:06.569809Z", "shell.execute_reply": "2025-12-13T23:11:06.569118Z" - }, - "ExecuteTime": { - "end_time": "2025-12-13T23:13:30.027254Z", - "start_time": "2025-12-13T23:13:29.977152Z" } }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "System: 672 timesteps\n" + ] + } + ], "source": [ "def build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price):\n", " \"\"\"Build a district heating system.\"\"\"\n", @@ -4513,17 +4514,7 @@ "\n", "flow_system = build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price)\n", "print(f'System: {len(timesteps)} timesteps')" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "System: 672 timesteps\n" - ] - } - ], - "execution_count": 6 + ] }, { "cell_type": "markdown", @@ -4537,47 +4528,35 @@ }, { "cell_type": "code", + "execution_count": 7, "id": "8", "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:31.221135Z", + "start_time": "2025-12-14T01:03:19.205997Z" + }, "execution": { "iopub.execute_input": "2025-12-13T23:11:06.574029Z", "iopub.status.busy": "2025-12-13T23:11:06.573839Z", "iopub.status.idle": "2025-12-13T23:11:16.402730Z", "shell.execute_reply": "2025-12-13T23:11:16.392580Z" - }, - "jupyter": { - "is_executing": true - }, - "ExecuteTime": { - "start_time": "2025-12-13T23:13:30.491960Z" } }, - "source": [ - "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", - "\n", - "start = timeit.default_timer()\n", - "fs_full = flow_system.copy()\n", - "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start\n", - "\n", - "print(f'Full optimization: {time_full:.2f} seconds')\n", - "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')" - ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001B[2m2025-12-14 00:13:30.495\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" + "\u001b[2m2025-12-14 02:03:19.211\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 71/71 [00:00<00:00, 101.01it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 51/51 [00:00<00:00, 243.81it/s]\n", - "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 13/13 [00:00<00:00, 514.63it/s]\n" + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 71/71 [00:01<00:00, 68.04it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 51/51 [00:00<00:00, 349.84it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 13/13 [00:00<00:00, 607.50it/s]\n" ] }, { @@ -4585,7 +4564,7 @@ "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-ag1luz0e has 26909 rows; 24221 cols; 84703 nonzeros; 8736 integer variables (8736 binary)\n", + "MIP linopy-problem-3pp56_27 has 26909 rows; 24221 cols; 84703 nonzeros; 8736 integer variables (8736 binary)\n", "Coefficient ranges:\n", " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", @@ -4611,13 +4590,48 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% -91086.334692 inf inf 0 0 0 0 0.5s\n", - " 0 0 0 0.00% 510476.049542 inf inf 0 0 0 4614 0.6s\n", - " C 0 0 0 0.00% 510864.416955 602921.407557 15.27% 3667 970 22 6199 1.6s\n" + " 0 0 0 0.00% -91086.334692 inf inf 0 0 0 0 0.7s\n", + " 0 0 0 0.00% 510476.049542 inf inf 0 0 0 4614 0.9s\n", + " C 0 0 0 0.00% 510864.416955 602921.407557 15.27% 3667 970 22 6199 2.1s\n", + " L 0 0 0 0.00% 510864.478625 510865.621324 0.00% 4102 1011 22 7020 6.8s\n", + " 1 0 1 100.00% 510864.478782 510865.621324 0.00% 4102 1011 22 8787 6.9s\n", + "\n", + "Solving report\n", + " Model linopy-problem-3pp56_27\n", + " Status Optimal\n", + " Primal bound 510865.621324\n", + " Dual bound 510864.478782\n", + " Gap 0.000224% (tolerance: 1%)\n", + " P-D integral 0.722979088888\n", + " Solution status feasible\n", + " 510865.621324 (objective)\n", + " 0 (bound viol.)\n", + " 4.75782431897e-07 (int. viol.)\n", + " 0 (row viol.)\n", + " Timing 6.86\n", + " Max sub-MIP depth 3\n", + " Nodes 1\n", + " Repair LPs 0\n", + " LP iterations 8787\n", + " 0 (strong br.)\n", + " 2406 (separation)\n", + " 1740 (heuristics)\n", + "Full optimization: 12.00 seconds\n", + "Cost: 510,866 €\n" ] } ], - "execution_count": null + "source": [ + "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", + "\n", + "start = timeit.default_timer()\n", + "fs_full = flow_system.copy()\n", + "fs_full.optimize(solver)\n", + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.2f} seconds')\n", + "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')" + ] }, { "cell_type": "markdown", @@ -4638,8 +4652,13 @@ }, { "cell_type": "code", + "execution_count": 8, "id": "10", "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:45.862590Z", + "start_time": "2025-12-14T01:03:31.305197Z" + }, "execution": { "iopub.execute_input": "2025-12-13T23:11:16.424863Z", "iopub.status.busy": "2025-12-13T23:11:16.421861Z", @@ -4647,6 +4666,81 @@ "shell.execute_reply": "2025-12-13T23:11:23.252897Z" } }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 99/99 [00:00<00:00, 106.56it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 51/51 [00:00<00:00, 341.57it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 13/13 [00:00<00:00, 252.77it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "MIP linopy-problem-2b32w568 has 34889 rows; 24221 cols; 100663 nonzeros; 8736 integer variables (8736 binary)\n", + "Coefficient ranges:\n", + " Matrix [1e-05, 1e+03]\n", + " Cost [1e+00, 1e+00]\n", + " Bound [1e+00, 1e+03]\n", + " RHS [1e-05, 1e+02]\n", + "WARNING: Problem has some excessively small row bounds\n", + "Presolving model\n", + "17852 rows, 7835 cols, 46161 nonzeros 0s\n", + "8771 rows, 6538 cols, 26638 nonzeros 0s\n", + "7501 rows, 5532 cols, 24162 nonzeros 0s\n", + "Presolve reductions: rows 7501(-27388); columns 5532(-18689); nonzeros 24162(-76501) \n", + "\n", + "Solving MIP model with:\n", + " 7501 rows\n", + " 5532 cols (4223 binary, 0 integer, 0 implied int., 1309 continuous, 0 domain fixed)\n", + " 24162 nonzeros\n", + "\n", + "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", + " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", + " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", + " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", + "\n", + " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", + "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", + "\n", + " 0 0 0 0.00% -134515.631486 inf inf 0 0 0 0 0.7s\n", + " 0 0 0 0.00% 510534.808573 inf inf 0 0 0 2776 0.8s\n", + "HighsMipSolverData::transformNewIntegerFeasibleSolution tmpSolver.run();\n", + "WARNING: Solution with objective 626117 has untransformed violations: bound = 9.025e-06; integrality = 0; row = 9.025e-06\n", + "HighsMipSolverData::transformNewIntegerFeasibleSolution tmpSolver.run();\n", + " L 0 0 0 0.00% 511015.64394 511017.103333 0.00% 2825 647 36 3686 8.3s\n", + " 1 0 1 100.00% 511015.64422 511017.103333 0.00% 2825 647 36 4951 8.3s\n", + "\n", + "Solving report\n", + " Model linopy-problem-2b32w568\n", + " Status Optimal\n", + " Primal bound 511017.103333\n", + " Dual bound 511015.64422\n", + " Gap 0.000286% (tolerance: 1%)\n", + " P-D integral 1.28790047777e-07\n", + " Solution status feasible\n", + " 511017.103333 (objective)\n", + " 0 (bound viol.)\n", + " 0 (int. viol.)\n", + " 0 (row viol.)\n", + " Timing 8.32\n", + " Max sub-MIP depth 4\n", + " Nodes 1\n", + " Repair LPs 2 (1 feasible; 367 iterations)\n", + " LP iterations 4951\n", + " 0 (strong br.)\n", + " 910 (separation)\n", + " 1247 (heuristics)\n", + "Clustered optimization: 14.54 seconds\n", + "Cost: 511,017 €\n", + "Speedup: 0.8x\n" + ] + } + ], "source": [ "start = timeit.default_timer()\n", "\n", @@ -4662,9 +4756,7 @@ "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", "print(f'Cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", "print(f'Speedup: {time_full / time_clustered:.1f}x')" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", @@ -4676,8 +4768,13 @@ }, { "cell_type": "code", + "execution_count": 9, "id": "12", "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:46.373652Z", + "start_time": "2025-12-14T01:03:46.196503Z" + }, "execution": { "iopub.execute_input": "2025-12-13T23:11:23.259936Z", "iopub.status.busy": "2025-12-13T23:11:23.259776Z", @@ -4685,6 +4782,49 @@ "shell.execute_reply": "2025-12-13T23:11:23.267185Z" } }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 Time [s]Cost [€]Cost Gap [%]Speedup
Full (baseline)12.00510,8660.001.0x
Clustered (4 days)14.54511,0170.030.8x
\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "results = {\n", " 'Full (baseline)': {'Time [s]': time_full, 'Cost [€]': fs_full.solution['costs'].item()},\n", @@ -4705,9 +4845,178 @@ " 'Speedup': '{:.1f}x',\n", " }\n", ")" + ] + }, + { + "cell_type": "markdown", + "id": "mn99rfcupf", + "metadata": {}, + "source": "## Multi-Period Clustering\n\nFor multi-year investment studies, clustering is applied **independently per period** (year). Each year gets its own set of typical days:" + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "24vgkxoeyqz", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:03:54.299527Z", + "start_time": "2025-12-14T01:03:46.470960Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Multi-period system: 672 timesteps × 2 periods\n", + "\u001b[2m2025-12-14 02:03:46.564\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "LP linopy-problem-o2v4edxp has 11124 rows; 9428 cols; 28956 nonzeros\n", + "Coefficient ranges:\n", + " Matrix [2e-01, 8e+00]\n", + " Cost [1e+00, 1e+00]\n", + " Bound [1e+02, 5e+02]\n", + " RHS [0e+00, 0e+00]\n", + "Presolving model\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "Presolve reductions: rows 0(-11124); columns 0(-9428); nonzeros 0(-28956) - Reduced to empty\n", + "Performed postsolve\n", + "Solving the original LP from the solution after postsolve\n", + "\n", + "Model name : linopy-problem-o2v4edxp\n", + "Model status : Optimal\n", + "Objective value : 2.4758192703e+06\n", + "P-D objective error : 6.5829287137e-16\n", + "HiGHS run time : 0.13\n", + "Multi-period clustered cost: 2,475,819 €\n" + ] + } + ], + "source": [ + "# Create a multi-period FlowSystem (simulating 2 years)\n", + "periods = pd.Index([2025, 2026], name='period')\n", + "\n", + "fs_multiperiod = fx.FlowSystem(timesteps, periods=periods)\n", + "fs_multiperiod.add_elements(\n", + " fx.Bus('Heat'),\n", + " fx.Bus('Gas'),\n", + " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", + " fx.linear_converters.Boiler(\n", + " 'Boiler',\n", + " thermal_efficiency=0.85,\n", + " thermal_flow=fx.Flow('Q_th', bus='Heat', size=300),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", + " ),\n", + " fx.Source(\n", + " 'GasGrid',\n", + " outputs=[fx.Flow('Q_Gas', bus='Gas', size=500, effects_per_flow_hour={'costs': gas_price})],\n", + " ),\n", + " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", + ")\n", + "\n", + "print(f'Multi-period system: {len(timesteps)} timesteps × {len(periods)} periods')\n", + "\n", + "# Cluster - each period gets clustered independently\n", + "fs_mp_clustered = fs_multiperiod.transform.cluster(n_clusters=3, cluster_duration='1D')\n", + "fs_mp_clustered.optimize(solver)\n", + "\n", + "print(f'Multi-period clustered cost: {fs_mp_clustered.solution[\"costs\"].sum().item():,.0f} €')" + ] + }, + { + "cell_type": "markdown", + "id": "0qjtoobc40uo", + "metadata": {}, + "source": "## Multi-Scenario Clustering\n\nFor scenario-based analyses, clustering is applied **independently per scenario**:" + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "36269qvz7ti", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T01:04:01.667341Z", + "start_time": "2025-12-14T01:03:54.503092Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Multi-scenario system: 672 timesteps × 2 scenarios\n", + "\u001b[2m2025-12-14 02:03:54.554\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "LP linopy-problem-6jfhyhl1 has 11124 rows; 9428 cols; 28956 nonzeros\n", + "Coefficient ranges:\n", + " Matrix [2e-01, 8e+00]\n", + " Cost [5e-01, 5e-01]\n", + " Bound [1e+02, 6e+02]\n", + " RHS [0e+00, 0e+00]\n", + "Presolving model\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "Presolve reductions: rows 0(-11124); columns 0(-9428); nonzeros 0(-28956) - Reduced to empty\n", + "Performed postsolve\n", + "Solving the original LP from the solution after postsolve\n", + "\n", + "Model name : linopy-problem-6jfhyhl1\n", + "Model status : Optimal\n", + "Objective value : 1.4235960804e+06\n", + "P-D objective error : 2.4532649930e-16\n", + "HiGHS run time : 0.10\n", + "Multi-scenario clustered cost: 2,847,192 €\n" + ] + } ], - "outputs": [], - "execution_count": null + "source": [ + "# Create a multi-scenario FlowSystem\n", + "scenarios = pd.Index(['base', 'high_demand'], name='scenario')\n", + "\n", + "# Different demand profiles per scenario\n", + "heat_demand_high = heat_demand * 1.3 # 30% higher demand\n", + "\n", + "fs_multiscenario = fx.FlowSystem(timesteps, scenarios=scenarios)\n", + "fs_multiscenario.add_elements(\n", + " fx.Bus('Heat'),\n", + " fx.Bus('Gas'),\n", + " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", + " fx.linear_converters.Boiler(\n", + " 'Boiler',\n", + " thermal_efficiency=0.85,\n", + " thermal_flow=fx.Flow('Q_th', bus='Heat', size=400),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", + " ),\n", + " fx.Source(\n", + " 'GasGrid',\n", + " outputs=[fx.Flow('Q_Gas', bus='Gas', size=600, effects_per_flow_hour={'costs': gas_price})],\n", + " ),\n", + " # Scenario-dependent demand\n", + " fx.Sink(\n", + " 'HeatDemand',\n", + " inputs=[\n", + " fx.Flow(\n", + " 'Q_th',\n", + " bus='Heat',\n", + " size=1,\n", + " fixed_relative_profile=fx.TimeSeriesData(\n", + " np.stack([heat_demand, heat_demand_high], axis=1),\n", + " dims=['time', 'scenario'],\n", + " ),\n", + " )\n", + " ],\n", + " ),\n", + ")\n", + "\n", + "print(f'Multi-scenario system: {len(timesteps)} timesteps × {len(scenarios)} scenarios')\n", + "\n", + "# Cluster - each scenario gets clustered independently\n", + "fs_ms_clustered = fs_multiscenario.transform.cluster(n_clusters=3, cluster_duration='1D')\n", + "fs_ms_clustered.optimize(solver)\n", + "\n", + "print(f'Multi-scenario clustered cost: {fs_ms_clustered.solution[\"costs\"].sum().item():,.0f} €')" + ] }, { "cell_type": "markdown", @@ -4751,24 +5060,7 @@ "cell_type": "markdown", "id": "24", "metadata": {}, - "source": [ - "## Summary\n", - "\n", - "You learned how to use **`transform.cluster()`** to identify typical periods and reduce computational complexity.\n", - "\n", - "### When to Use Clustering\n", - "\n", - "| Scenario | Recommendation |\n", - "|----------|----------------|\n", - "| Annual optimization | 8-12 typical days |\n", - "| Investment decisions | Use with two-stage optimization |\n", - "| Preserve extremes | Use `time_series_for_high_peaks` |\n", - "\n", - "### Next Steps\n", - "\n", - "- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n", - "- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" - ] + "source": "## Summary\n\nYou learned how to:\n\n- Use **`transform.cluster()`** to identify typical periods\n- Compare different clustering parameters and their effect on data\n- Cluster **multi-period** and **multi-scenario** FlowSystems\n\n### When to Use Clustering\n\n| Scenario | Recommendation |\n|----------|----------------|\n| Annual optimization | 8-12 typical days |\n| Investment decisions | Use with two-stage optimization |\n| Preserve extremes | Use `time_series_for_high_peaks` |\n\n### Next Steps\n\n- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" } ], "metadata": { From b29460b5369d202a80c0c705263702153104f534 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 02:21:19 +0100 Subject: [PATCH 009/191] Exclude solution when clustering --- flixopt/transform_accessor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 8ff2155f1..f10bc616e 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -201,7 +201,7 @@ def _cluster_simple( logger.info(f'{" Clustering TimeSeries Data ":#^80}') # Get dataset representation - ds = self._fs.to_dataset() + ds = self._fs.to_dataset(include_solution=False) temporaly_changing_ds = drop_constant_arrays(ds, dim='time') # Perform clustering @@ -275,7 +275,7 @@ def _cluster_multi_dimensional( periods = list(self._fs.periods) if self._fs.periods is not None else [None] scenarios = list(self._fs.scenarios) if self._fs.scenarios is not None else [None] - ds = self._fs.to_dataset().copy(deep=True) # Deep copy to allow in-place modifications + ds = self._fs.to_dataset(include_solution=False).copy(deep=True) # Deep copy to allow in-place modifications clustering_results: dict[tuple, Clustering] = {} # Cluster each period x scenario combination independently From 44aa5db21553d17f3257331562efb7c756d56339 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 02:22:59 +0100 Subject: [PATCH 010/191] Use pre-buildt flow_system --- docs/notebooks/08c-clustering.ipynb | 1232 ++++++++++++++++----------- 1 file changed, 714 insertions(+), 518 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 76c97049a..978cd7ed8 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -4,7 +4,21 @@ "cell_type": "markdown", "id": "0", "metadata": {}, - "source": "# Clustering with tsam\n\nSpeed up large problems by identifying typical periods using time series clustering.\n\nThis notebook demonstrates:\n\n- **Basic clustering**: Reduce a week/year to representative days\n- **Compare clustering parameters**: See how data changes with different cluster counts\n- **Multi-period clustering**: Cluster multi-year investment studies\n- **Multi-scenario clustering**: Cluster scenario-based analyses\n\n!!! note \"Requirements\"\n This notebook requires the `tsam` package: `pip install tsam`" + "source": [ + "# Clustering with tsam\n", + "\n", + "Speed up large problems by identifying typical periods using time series clustering.\n", + "\n", + "This notebook demonstrates:\n", + "\n", + "- **Basic clustering**: Reduce a week/year to representative days\n", + "- **Compare clustering parameters**: See how data changes with different cluster counts\n", + "- **Multi-period clustering**: Cluster multi-year investment studies\n", + "- **Multi-scenario clustering**: Cluster scenario-based analyses\n", + "\n", + "!!! note \"Requirements\"\n", + " This notebook requires the `tsam` package: `pip install tsam`" + ] }, { "cell_type": "markdown", @@ -20,14 +34,14 @@ "id": "2", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:03:14.969351Z", - "start_time": "2025-12-14T01:03:09.926940Z" + "end_time": "2025-12-14T01:18:13.229482Z", + "start_time": "2025-12-14T01:18:06.726132Z" }, "execution": { - "iopub.execute_input": "2025-12-13T23:10:58.301051Z", - "iopub.status.busy": "2025-12-13T23:10:58.300771Z", - "iopub.status.idle": "2025-12-13T23:11:03.374938Z", - "shell.execute_reply": "2025-12-13T23:11:03.373972Z" + "iopub.execute_input": "2025-12-14T01:22:15.415309Z", + "iopub.status.busy": "2025-12-14T01:22:15.415198Z", + "iopub.status.idle": "2025-12-14T01:22:20.488460Z", + "shell.execute_reply": "2025-12-14T01:22:20.487726Z" } }, "outputs": [ @@ -60,9 +74,9 @@ "id": "3", "metadata": {}, "source": [ - "## Load Time Series Data\n", + "## Load FlowSystem from File\n", "\n", - "We use real-world district heating data at 15-minute resolution (one week for faster execution):" + "We'll use a pre-built FlowSystem with time-varying data:" ] }, { @@ -71,14 +85,14 @@ "id": "4", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:03:15.123760Z", - "start_time": "2025-12-14T01:03:15.034204Z" + "end_time": "2025-12-14T01:18:13.609412Z", + "start_time": "2025-12-14T01:18:13.290728Z" }, "execution": { - "iopub.execute_input": "2025-12-13T23:11:03.379309Z", - "iopub.status.busy": "2025-12-13T23:11:03.379129Z", - "iopub.status.idle": "2025-12-13T23:11:03.482067Z", - "shell.execute_reply": "2025-12-13T23:11:03.481664Z" + "iopub.execute_input": "2025-12-14T01:22:20.498521Z", + "iopub.status.busy": "2025-12-14T01:22:20.498053Z", + "iopub.status.idle": "2025-12-14T01:22:20.717894Z", + "shell.execute_reply": "2025-12-14T01:22:20.714997Z" } }, "outputs": [ @@ -86,27 +100,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "Timesteps: 672 (7 days at 15-min resolution)\n", - "Heat demand: 122.2 - 254.0 MW\n" + "Loaded FlowSystem: 168 timesteps (7 days at hourly resolution)\n" ] } ], "source": [ - "# Load time series data (15-min resolution)\n", - "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", - "data = data['2020-01-01':'2020-01-07 23:45:00'] # One week\n", - "data.index.name = 'time'\n", - "\n", - "timesteps = data.index\n", - "\n", - "# Extract profiles\n", - "electricity_demand = data['P_Netz/MW'].to_numpy()\n", - "heat_demand = data['Q_Netz/MW'].to_numpy()\n", - "electricity_price = data['Strompr.€/MWh'].to_numpy()\n", - "gas_price = data['Gaspr.€/MWh'].to_numpy()\n", + "# Load a simple FlowSystem with time-varying demand and prices\n", + "flow_system = fx.FlowSystem.from_netcdf('data/simple_system.nc4')\n", "\n", - "print(f'Timesteps: {len(timesteps)} ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", - "print(f'Heat demand: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')" + "timesteps = flow_system.timesteps\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')" ] }, { @@ -127,101 +130,422 @@ "id": "guysdaf98es", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:03:16.301377Z", - "start_time": "2025-12-14T01:03:15.131379Z" + "end_time": "2025-12-14T01:18:14.900789Z", + "start_time": "2025-12-14T01:18:13.619481Z" }, "execution": { - "iopub.execute_input": "2025-12-13T23:11:03.483963Z", - "iopub.status.busy": "2025-12-13T23:11:03.483827Z", - "iopub.status.idle": "2025-12-13T23:11:04.502966Z", - "shell.execute_reply": "2025-12-13T23:11:04.501454Z" + "iopub.execute_input": "2025-12-14T01:22:20.727211Z", + "iopub.status.busy": "2025-12-14T01:22:20.726087Z", + "iopub.status.idle": "2025-12-14T01:22:22.468949Z", + "shell.execute_reply": "2025-12-14T01:22:22.467805Z" } }, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[2m2025-12-14 02:03:15.139\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" - ] - }, { "data": { "text/html": [ "
\n", - "
" + "
" ], "text/plain": [ - "PlotResult(data= Size: 27kB\n", - "Dimensions: (time: 672, variable: 2)\n", + "PlotResult(data= Size: 7kB\n", + "Dimensions: (time: 168, variable: 2)\n", "Coordinates:\n", - " * time (time) datetime64[ns] 5kB 2020-01-01 ... 2020-01-07T23:45:00\n", - " * variable (variable) object 16B 'GasGrid(Q_Gas)|costs|per_flow_hour' 'H...\n", + " * time (time) datetime64[ns] 1kB 2024-01-15 ... 2024-01-21T23:00:00\n", + " * variable (variable) object 16B 'GasGrid(Gas)|costs|per_flow_hour' 'Off...\n", "Data variables:\n", - " original (variable, time) float64 11kB 32.46 32.46 32.46 ... 139.1 138.6\n", - " aggregated (variable, time) float64 11kB 32.46 32.46 32.46 ... 138.7 138.3, figure=Figure({\n", - " 'data': [{'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " original (variable, time) float64 3kB 0.05 0.05 0.05 ... 20.0 20.0 24.48\n", + " aggregated (variable, time) float64 3kB 0.05 0.05 0.05 ... 20.77 20.0 22.24, figure=Figure({\n", + " 'data': [{'hovertemplate': ('variable=Original - GasGrid(Ga' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GasGrid(Gas)|costs|per_flow_hour',\n", " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", " 'marker': {'symbol': 'circle'},\n", " 'mode': 'lines',\n", - " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'name': 'Original - GasGrid(Gas)|costs|per_flow_hour',\n", + " 'orientation': 'v',\n", " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-07T23:15:00.000000000',\n", - " '2020-01-07T23:30:00.000000000', '2020-01-07T23:45:00.000000000'],\n", - " shape=(672,), dtype='datetime64[ns]'),\n", + " 'type': 'scatter',\n", + " 'x': array(['2024-01-15T00:00:00.000000000', '2024-01-15T01:00:00.000000000',\n", + " '2024-01-15T02:00:00.000000000', '2024-01-15T03:00:00.000000000',\n", + " '2024-01-15T04:00:00.000000000', '2024-01-15T05:00:00.000000000',\n", + " '2024-01-15T06:00:00.000000000', '2024-01-15T07:00:00.000000000',\n", + " '2024-01-15T08:00:00.000000000', '2024-01-15T09:00:00.000000000',\n", + " '2024-01-15T10:00:00.000000000', '2024-01-15T11:00:00.000000000',\n", + " '2024-01-15T12:00:00.000000000', '2024-01-15T13:00:00.000000000',\n", + " '2024-01-15T14:00:00.000000000', '2024-01-15T15:00:00.000000000',\n", + " '2024-01-15T16:00:00.000000000', '2024-01-15T17:00:00.000000000',\n", + " '2024-01-15T18:00:00.000000000', '2024-01-15T19:00:00.000000000',\n", + " '2024-01-15T20:00:00.000000000', '2024-01-15T21:00:00.000000000',\n", + " '2024-01-15T22:00:00.000000000', '2024-01-15T23:00:00.000000000',\n", + " '2024-01-16T00:00:00.000000000', '2024-01-16T01:00:00.000000000',\n", + " '2024-01-16T02:00:00.000000000', '2024-01-16T03:00:00.000000000',\n", + " '2024-01-16T04:00:00.000000000', '2024-01-16T05:00:00.000000000',\n", + " '2024-01-16T06:00:00.000000000', '2024-01-16T07:00:00.000000000',\n", + " '2024-01-16T08:00:00.000000000', '2024-01-16T09:00:00.000000000',\n", + " '2024-01-16T10:00:00.000000000', '2024-01-16T11:00:00.000000000',\n", + " '2024-01-16T12:00:00.000000000', '2024-01-16T13:00:00.000000000',\n", + " '2024-01-16T14:00:00.000000000', '2024-01-16T15:00:00.000000000',\n", + " '2024-01-16T16:00:00.000000000', '2024-01-16T17:00:00.000000000',\n", + " '2024-01-16T18:00:00.000000000', '2024-01-16T19:00:00.000000000',\n", + " '2024-01-16T20:00:00.000000000', '2024-01-16T21:00:00.000000000',\n", + " '2024-01-16T22:00:00.000000000', '2024-01-16T23:00:00.000000000',\n", + " '2024-01-17T00:00:00.000000000', '2024-01-17T01:00:00.000000000',\n", + " '2024-01-17T02:00:00.000000000', '2024-01-17T03:00:00.000000000',\n", + " '2024-01-17T04:00:00.000000000', '2024-01-17T05:00:00.000000000',\n", + " '2024-01-17T06:00:00.000000000', '2024-01-17T07:00:00.000000000',\n", + " '2024-01-17T08:00:00.000000000', '2024-01-17T09:00:00.000000000',\n", + " '2024-01-17T10:00:00.000000000', '2024-01-17T11:00:00.000000000',\n", + " '2024-01-17T12:00:00.000000000', '2024-01-17T13:00:00.000000000',\n", + " '2024-01-17T14:00:00.000000000', '2024-01-17T15:00:00.000000000',\n", + " '2024-01-17T16:00:00.000000000', '2024-01-17T17:00:00.000000000',\n", + " '2024-01-17T18:00:00.000000000', '2024-01-17T19:00:00.000000000',\n", + " '2024-01-17T20:00:00.000000000', '2024-01-17T21:00:00.000000000',\n", + " '2024-01-17T22:00:00.000000000', '2024-01-17T23:00:00.000000000',\n", + " '2024-01-18T00:00:00.000000000', '2024-01-18T01:00:00.000000000',\n", + " '2024-01-18T02:00:00.000000000', '2024-01-18T03:00:00.000000000',\n", + " '2024-01-18T04:00:00.000000000', '2024-01-18T05:00:00.000000000',\n", + " '2024-01-18T06:00:00.000000000', '2024-01-18T07:00:00.000000000',\n", + " '2024-01-18T08:00:00.000000000', '2024-01-18T09:00:00.000000000',\n", + " '2024-01-18T10:00:00.000000000', '2024-01-18T11:00:00.000000000',\n", + " '2024-01-18T12:00:00.000000000', '2024-01-18T13:00:00.000000000',\n", + " '2024-01-18T14:00:00.000000000', '2024-01-18T15:00:00.000000000',\n", + " '2024-01-18T16:00:00.000000000', '2024-01-18T17:00:00.000000000',\n", + " '2024-01-18T18:00:00.000000000', '2024-01-18T19:00:00.000000000',\n", + " '2024-01-18T20:00:00.000000000', '2024-01-18T21:00:00.000000000',\n", + " '2024-01-18T22:00:00.000000000', '2024-01-18T23:00:00.000000000',\n", + " '2024-01-19T00:00:00.000000000', '2024-01-19T01:00:00.000000000',\n", + " '2024-01-19T02:00:00.000000000', '2024-01-19T03:00:00.000000000',\n", + " '2024-01-19T04:00:00.000000000', '2024-01-19T05:00:00.000000000',\n", + " '2024-01-19T06:00:00.000000000', '2024-01-19T07:00:00.000000000',\n", + " '2024-01-19T08:00:00.000000000', '2024-01-19T09:00:00.000000000',\n", + " '2024-01-19T10:00:00.000000000', '2024-01-19T11:00:00.000000000',\n", + " '2024-01-19T12:00:00.000000000', '2024-01-19T13:00:00.000000000',\n", + " '2024-01-19T14:00:00.000000000', '2024-01-19T15:00:00.000000000',\n", + " '2024-01-19T16:00:00.000000000', '2024-01-19T17:00:00.000000000',\n", + " '2024-01-19T18:00:00.000000000', '2024-01-19T19:00:00.000000000',\n", + " '2024-01-19T20:00:00.000000000', '2024-01-19T21:00:00.000000000',\n", + " '2024-01-19T22:00:00.000000000', '2024-01-19T23:00:00.000000000',\n", + " '2024-01-20T00:00:00.000000000', '2024-01-20T01:00:00.000000000',\n", + " '2024-01-20T02:00:00.000000000', '2024-01-20T03:00:00.000000000',\n", + " '2024-01-20T04:00:00.000000000', '2024-01-20T05:00:00.000000000',\n", + " '2024-01-20T06:00:00.000000000', '2024-01-20T07:00:00.000000000',\n", + " '2024-01-20T08:00:00.000000000', '2024-01-20T09:00:00.000000000',\n", + " '2024-01-20T10:00:00.000000000', '2024-01-20T11:00:00.000000000',\n", + " '2024-01-20T12:00:00.000000000', '2024-01-20T13:00:00.000000000',\n", + " '2024-01-20T14:00:00.000000000', '2024-01-20T15:00:00.000000000',\n", + " '2024-01-20T16:00:00.000000000', '2024-01-20T17:00:00.000000000',\n", + " '2024-01-20T18:00:00.000000000', '2024-01-20T19:00:00.000000000',\n", + " '2024-01-20T20:00:00.000000000', '2024-01-20T21:00:00.000000000',\n", + " '2024-01-20T22:00:00.000000000', '2024-01-20T23:00:00.000000000',\n", + " '2024-01-21T00:00:00.000000000', '2024-01-21T01:00:00.000000000',\n", + " '2024-01-21T02:00:00.000000000', '2024-01-21T03:00:00.000000000',\n", + " '2024-01-21T04:00:00.000000000', '2024-01-21T05:00:00.000000000',\n", + " '2024-01-21T06:00:00.000000000', '2024-01-21T07:00:00.000000000',\n", + " '2024-01-21T08:00:00.000000000', '2024-01-21T09:00:00.000000000',\n", + " '2024-01-21T10:00:00.000000000', '2024-01-21T11:00:00.000000000',\n", + " '2024-01-21T12:00:00.000000000', '2024-01-21T13:00:00.000000000',\n", + " '2024-01-21T14:00:00.000000000', '2024-01-21T15:00:00.000000000',\n", + " '2024-01-21T16:00:00.000000000', '2024-01-21T17:00:00.000000000',\n", + " '2024-01-21T18:00:00.000000000', '2024-01-21T19:00:00.000000000',\n", + " '2024-01-21T20:00:00.000000000', '2024-01-21T21:00:00.000000000',\n", + " '2024-01-21T22:00:00.000000000', '2024-01-21T23:00:00.000000000'],\n", + " dtype='datetime64[ns]'),\n", " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'y': {'bdata': ('mpmZmZmZqT+amZmZmZmpP5qZmZmZma' ... 'SuR+F6tD97FK5H4Xq0P5qZmZmZmak/'),\n", " 'dtype': 'f8'},\n", " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " {'hovertemplate': ('variable=Original - Office(Hea' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - Office(Heat)|fixed_relative_profile',\n", " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", " 'marker': {'symbol': 'circle'},\n", " 'mode': 'lines',\n", - " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'name': 'Original - Office(Heat)|fixed_relative_profile',\n", + " 'orientation': 'v',\n", " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-07T23:15:00.000000000',\n", - " '2020-01-07T23:30:00.000000000', '2020-01-07T23:45:00.000000000'],\n", - " shape=(672,), dtype='datetime64[ns]'),\n", + " 'type': 'scatter',\n", + " 'x': array(['2024-01-15T00:00:00.000000000', '2024-01-15T01:00:00.000000000',\n", + " '2024-01-15T02:00:00.000000000', '2024-01-15T03:00:00.000000000',\n", + " '2024-01-15T04:00:00.000000000', '2024-01-15T05:00:00.000000000',\n", + " '2024-01-15T06:00:00.000000000', '2024-01-15T07:00:00.000000000',\n", + " '2024-01-15T08:00:00.000000000', '2024-01-15T09:00:00.000000000',\n", + " '2024-01-15T10:00:00.000000000', '2024-01-15T11:00:00.000000000',\n", + " '2024-01-15T12:00:00.000000000', '2024-01-15T13:00:00.000000000',\n", + " '2024-01-15T14:00:00.000000000', '2024-01-15T15:00:00.000000000',\n", + " '2024-01-15T16:00:00.000000000', '2024-01-15T17:00:00.000000000',\n", + " '2024-01-15T18:00:00.000000000', '2024-01-15T19:00:00.000000000',\n", + " '2024-01-15T20:00:00.000000000', '2024-01-15T21:00:00.000000000',\n", + " '2024-01-15T22:00:00.000000000', '2024-01-15T23:00:00.000000000',\n", + " '2024-01-16T00:00:00.000000000', '2024-01-16T01:00:00.000000000',\n", + " '2024-01-16T02:00:00.000000000', '2024-01-16T03:00:00.000000000',\n", + " '2024-01-16T04:00:00.000000000', '2024-01-16T05:00:00.000000000',\n", + " '2024-01-16T06:00:00.000000000', '2024-01-16T07:00:00.000000000',\n", + " '2024-01-16T08:00:00.000000000', '2024-01-16T09:00:00.000000000',\n", + " '2024-01-16T10:00:00.000000000', '2024-01-16T11:00:00.000000000',\n", + " '2024-01-16T12:00:00.000000000', '2024-01-16T13:00:00.000000000',\n", + " '2024-01-16T14:00:00.000000000', '2024-01-16T15:00:00.000000000',\n", + " '2024-01-16T16:00:00.000000000', '2024-01-16T17:00:00.000000000',\n", + " '2024-01-16T18:00:00.000000000', '2024-01-16T19:00:00.000000000',\n", + " '2024-01-16T20:00:00.000000000', '2024-01-16T21:00:00.000000000',\n", + " '2024-01-16T22:00:00.000000000', '2024-01-16T23:00:00.000000000',\n", + " '2024-01-17T00:00:00.000000000', '2024-01-17T01:00:00.000000000',\n", + " '2024-01-17T02:00:00.000000000', '2024-01-17T03:00:00.000000000',\n", + " '2024-01-17T04:00:00.000000000', '2024-01-17T05:00:00.000000000',\n", + " '2024-01-17T06:00:00.000000000', '2024-01-17T07:00:00.000000000',\n", + " '2024-01-17T08:00:00.000000000', '2024-01-17T09:00:00.000000000',\n", + " '2024-01-17T10:00:00.000000000', '2024-01-17T11:00:00.000000000',\n", + " '2024-01-17T12:00:00.000000000', '2024-01-17T13:00:00.000000000',\n", + " '2024-01-17T14:00:00.000000000', '2024-01-17T15:00:00.000000000',\n", + " '2024-01-17T16:00:00.000000000', '2024-01-17T17:00:00.000000000',\n", + " '2024-01-17T18:00:00.000000000', '2024-01-17T19:00:00.000000000',\n", + " '2024-01-17T20:00:00.000000000', '2024-01-17T21:00:00.000000000',\n", + " '2024-01-17T22:00:00.000000000', '2024-01-17T23:00:00.000000000',\n", + " '2024-01-18T00:00:00.000000000', '2024-01-18T01:00:00.000000000',\n", + " '2024-01-18T02:00:00.000000000', '2024-01-18T03:00:00.000000000',\n", + " '2024-01-18T04:00:00.000000000', '2024-01-18T05:00:00.000000000',\n", + " '2024-01-18T06:00:00.000000000', '2024-01-18T07:00:00.000000000',\n", + " '2024-01-18T08:00:00.000000000', '2024-01-18T09:00:00.000000000',\n", + " '2024-01-18T10:00:00.000000000', '2024-01-18T11:00:00.000000000',\n", + " '2024-01-18T12:00:00.000000000', '2024-01-18T13:00:00.000000000',\n", + " '2024-01-18T14:00:00.000000000', '2024-01-18T15:00:00.000000000',\n", + " '2024-01-18T16:00:00.000000000', '2024-01-18T17:00:00.000000000',\n", + " '2024-01-18T18:00:00.000000000', '2024-01-18T19:00:00.000000000',\n", + " '2024-01-18T20:00:00.000000000', '2024-01-18T21:00:00.000000000',\n", + " '2024-01-18T22:00:00.000000000', '2024-01-18T23:00:00.000000000',\n", + " '2024-01-19T00:00:00.000000000', '2024-01-19T01:00:00.000000000',\n", + " '2024-01-19T02:00:00.000000000', '2024-01-19T03:00:00.000000000',\n", + " '2024-01-19T04:00:00.000000000', '2024-01-19T05:00:00.000000000',\n", + " '2024-01-19T06:00:00.000000000', '2024-01-19T07:00:00.000000000',\n", + " '2024-01-19T08:00:00.000000000', '2024-01-19T09:00:00.000000000',\n", + " '2024-01-19T10:00:00.000000000', '2024-01-19T11:00:00.000000000',\n", + " '2024-01-19T12:00:00.000000000', '2024-01-19T13:00:00.000000000',\n", + " '2024-01-19T14:00:00.000000000', '2024-01-19T15:00:00.000000000',\n", + " '2024-01-19T16:00:00.000000000', '2024-01-19T17:00:00.000000000',\n", + " '2024-01-19T18:00:00.000000000', '2024-01-19T19:00:00.000000000',\n", + " '2024-01-19T20:00:00.000000000', '2024-01-19T21:00:00.000000000',\n", + " '2024-01-19T22:00:00.000000000', '2024-01-19T23:00:00.000000000',\n", + " '2024-01-20T00:00:00.000000000', '2024-01-20T01:00:00.000000000',\n", + " '2024-01-20T02:00:00.000000000', '2024-01-20T03:00:00.000000000',\n", + " '2024-01-20T04:00:00.000000000', '2024-01-20T05:00:00.000000000',\n", + " '2024-01-20T06:00:00.000000000', '2024-01-20T07:00:00.000000000',\n", + " '2024-01-20T08:00:00.000000000', '2024-01-20T09:00:00.000000000',\n", + " '2024-01-20T10:00:00.000000000', '2024-01-20T11:00:00.000000000',\n", + " '2024-01-20T12:00:00.000000000', '2024-01-20T13:00:00.000000000',\n", + " '2024-01-20T14:00:00.000000000', '2024-01-20T15:00:00.000000000',\n", + " '2024-01-20T16:00:00.000000000', '2024-01-20T17:00:00.000000000',\n", + " '2024-01-20T18:00:00.000000000', '2024-01-20T19:00:00.000000000',\n", + " '2024-01-20T20:00:00.000000000', '2024-01-20T21:00:00.000000000',\n", + " '2024-01-20T22:00:00.000000000', '2024-01-20T23:00:00.000000000',\n", + " '2024-01-21T00:00:00.000000000', '2024-01-21T01:00:00.000000000',\n", + " '2024-01-21T02:00:00.000000000', '2024-01-21T03:00:00.000000000',\n", + " '2024-01-21T04:00:00.000000000', '2024-01-21T05:00:00.000000000',\n", + " '2024-01-21T06:00:00.000000000', '2024-01-21T07:00:00.000000000',\n", + " '2024-01-21T08:00:00.000000000', '2024-01-21T09:00:00.000000000',\n", + " '2024-01-21T10:00:00.000000000', '2024-01-21T11:00:00.000000000',\n", + " '2024-01-21T12:00:00.000000000', '2024-01-21T13:00:00.000000000',\n", + " '2024-01-21T14:00:00.000000000', '2024-01-21T15:00:00.000000000',\n", + " '2024-01-21T16:00:00.000000000', '2024-01-21T17:00:00.000000000',\n", + " '2024-01-21T18:00:00.000000000', '2024-01-21T19:00:00.000000000',\n", + " '2024-01-21T20:00:00.000000000', '2024-01-21T21:00:00.000000000',\n", + " '2024-01-21T22:00:00.000000000', '2024-01-21T23:00:00.000000000'],\n", + " dtype='datetime64[ns]'),\n", " 'xaxis': 'x',\n", - " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... '6F61FcYUAGgZVDi2RhQBFYObTIUmFA'),\n", + " 'y': {'bdata': ('5ZuWpeU9QEDMU8WNBU89QGDXQkqFnk' ... 'AAAAAANEAAAAAAAAA0QK7n4h/lezhA'),\n", " 'dtype': 'f8'},\n", " 'yaxis': 'y'},\n", " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'legendgroup': 'Aggregated - GasGrid(Gas)|costs|per_flow_hour',\n", " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", " 'marker': {'symbol': 'circle'},\n", " 'mode': 'lines',\n", - " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'name': 'Aggregated - GasGrid(Gas)|costs|per_flow_hour',\n", + " 'orientation': 'v',\n", " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-07T23:15:00.000000000',\n", - " '2020-01-07T23:30:00.000000000', '2020-01-07T23:45:00.000000000'],\n", - " shape=(672,), dtype='datetime64[ns]'),\n", + " 'type': 'scatter',\n", + " 'x': array(['2024-01-15T00:00:00.000000000', '2024-01-15T01:00:00.000000000',\n", + " '2024-01-15T02:00:00.000000000', '2024-01-15T03:00:00.000000000',\n", + " '2024-01-15T04:00:00.000000000', '2024-01-15T05:00:00.000000000',\n", + " '2024-01-15T06:00:00.000000000', '2024-01-15T07:00:00.000000000',\n", + " '2024-01-15T08:00:00.000000000', '2024-01-15T09:00:00.000000000',\n", + " '2024-01-15T10:00:00.000000000', '2024-01-15T11:00:00.000000000',\n", + " '2024-01-15T12:00:00.000000000', '2024-01-15T13:00:00.000000000',\n", + " '2024-01-15T14:00:00.000000000', '2024-01-15T15:00:00.000000000',\n", + " '2024-01-15T16:00:00.000000000', '2024-01-15T17:00:00.000000000',\n", + " '2024-01-15T18:00:00.000000000', '2024-01-15T19:00:00.000000000',\n", + " '2024-01-15T20:00:00.000000000', '2024-01-15T21:00:00.000000000',\n", + " '2024-01-15T22:00:00.000000000', '2024-01-15T23:00:00.000000000',\n", + " '2024-01-16T00:00:00.000000000', '2024-01-16T01:00:00.000000000',\n", + " '2024-01-16T02:00:00.000000000', '2024-01-16T03:00:00.000000000',\n", + " '2024-01-16T04:00:00.000000000', '2024-01-16T05:00:00.000000000',\n", + " '2024-01-16T06:00:00.000000000', '2024-01-16T07:00:00.000000000',\n", + " '2024-01-16T08:00:00.000000000', '2024-01-16T09:00:00.000000000',\n", + " '2024-01-16T10:00:00.000000000', '2024-01-16T11:00:00.000000000',\n", + " '2024-01-16T12:00:00.000000000', '2024-01-16T13:00:00.000000000',\n", + " '2024-01-16T14:00:00.000000000', '2024-01-16T15:00:00.000000000',\n", + " '2024-01-16T16:00:00.000000000', '2024-01-16T17:00:00.000000000',\n", + " '2024-01-16T18:00:00.000000000', '2024-01-16T19:00:00.000000000',\n", + " '2024-01-16T20:00:00.000000000', '2024-01-16T21:00:00.000000000',\n", + " '2024-01-16T22:00:00.000000000', '2024-01-16T23:00:00.000000000',\n", + " '2024-01-17T00:00:00.000000000', '2024-01-17T01:00:00.000000000',\n", + " '2024-01-17T02:00:00.000000000', '2024-01-17T03:00:00.000000000',\n", + " '2024-01-17T04:00:00.000000000', '2024-01-17T05:00:00.000000000',\n", + " '2024-01-17T06:00:00.000000000', '2024-01-17T07:00:00.000000000',\n", + " '2024-01-17T08:00:00.000000000', '2024-01-17T09:00:00.000000000',\n", + " '2024-01-17T10:00:00.000000000', '2024-01-17T11:00:00.000000000',\n", + " '2024-01-17T12:00:00.000000000', '2024-01-17T13:00:00.000000000',\n", + " '2024-01-17T14:00:00.000000000', '2024-01-17T15:00:00.000000000',\n", + " '2024-01-17T16:00:00.000000000', '2024-01-17T17:00:00.000000000',\n", + " '2024-01-17T18:00:00.000000000', '2024-01-17T19:00:00.000000000',\n", + " '2024-01-17T20:00:00.000000000', '2024-01-17T21:00:00.000000000',\n", + " '2024-01-17T22:00:00.000000000', '2024-01-17T23:00:00.000000000',\n", + " '2024-01-18T00:00:00.000000000', '2024-01-18T01:00:00.000000000',\n", + " '2024-01-18T02:00:00.000000000', '2024-01-18T03:00:00.000000000',\n", + " '2024-01-18T04:00:00.000000000', '2024-01-18T05:00:00.000000000',\n", + " '2024-01-18T06:00:00.000000000', '2024-01-18T07:00:00.000000000',\n", + " '2024-01-18T08:00:00.000000000', '2024-01-18T09:00:00.000000000',\n", + " '2024-01-18T10:00:00.000000000', '2024-01-18T11:00:00.000000000',\n", + " '2024-01-18T12:00:00.000000000', '2024-01-18T13:00:00.000000000',\n", + " '2024-01-18T14:00:00.000000000', '2024-01-18T15:00:00.000000000',\n", + " '2024-01-18T16:00:00.000000000', '2024-01-18T17:00:00.000000000',\n", + " '2024-01-18T18:00:00.000000000', '2024-01-18T19:00:00.000000000',\n", + " '2024-01-18T20:00:00.000000000', '2024-01-18T21:00:00.000000000',\n", + " '2024-01-18T22:00:00.000000000', '2024-01-18T23:00:00.000000000',\n", + " '2024-01-19T00:00:00.000000000', '2024-01-19T01:00:00.000000000',\n", + " '2024-01-19T02:00:00.000000000', '2024-01-19T03:00:00.000000000',\n", + " '2024-01-19T04:00:00.000000000', '2024-01-19T05:00:00.000000000',\n", + " '2024-01-19T06:00:00.000000000', '2024-01-19T07:00:00.000000000',\n", + " '2024-01-19T08:00:00.000000000', '2024-01-19T09:00:00.000000000',\n", + " '2024-01-19T10:00:00.000000000', '2024-01-19T11:00:00.000000000',\n", + " '2024-01-19T12:00:00.000000000', '2024-01-19T13:00:00.000000000',\n", + " '2024-01-19T14:00:00.000000000', '2024-01-19T15:00:00.000000000',\n", + " '2024-01-19T16:00:00.000000000', '2024-01-19T17:00:00.000000000',\n", + " '2024-01-19T18:00:00.000000000', '2024-01-19T19:00:00.000000000',\n", + " '2024-01-19T20:00:00.000000000', '2024-01-19T21:00:00.000000000',\n", + " '2024-01-19T22:00:00.000000000', '2024-01-19T23:00:00.000000000',\n", + " '2024-01-20T00:00:00.000000000', '2024-01-20T01:00:00.000000000',\n", + " '2024-01-20T02:00:00.000000000', '2024-01-20T03:00:00.000000000',\n", + " '2024-01-20T04:00:00.000000000', '2024-01-20T05:00:00.000000000',\n", + " '2024-01-20T06:00:00.000000000', '2024-01-20T07:00:00.000000000',\n", + " '2024-01-20T08:00:00.000000000', '2024-01-20T09:00:00.000000000',\n", + " '2024-01-20T10:00:00.000000000', '2024-01-20T11:00:00.000000000',\n", + " '2024-01-20T12:00:00.000000000', '2024-01-20T13:00:00.000000000',\n", + " '2024-01-20T14:00:00.000000000', '2024-01-20T15:00:00.000000000',\n", + " '2024-01-20T16:00:00.000000000', '2024-01-20T17:00:00.000000000',\n", + " '2024-01-20T18:00:00.000000000', '2024-01-20T19:00:00.000000000',\n", + " '2024-01-20T20:00:00.000000000', '2024-01-20T21:00:00.000000000',\n", + " '2024-01-20T22:00:00.000000000', '2024-01-20T23:00:00.000000000',\n", + " '2024-01-21T00:00:00.000000000', '2024-01-21T01:00:00.000000000',\n", + " '2024-01-21T02:00:00.000000000', '2024-01-21T03:00:00.000000000',\n", + " '2024-01-21T04:00:00.000000000', '2024-01-21T05:00:00.000000000',\n", + " '2024-01-21T06:00:00.000000000', '2024-01-21T07:00:00.000000000',\n", + " '2024-01-21T08:00:00.000000000', '2024-01-21T09:00:00.000000000',\n", + " '2024-01-21T10:00:00.000000000', '2024-01-21T11:00:00.000000000',\n", + " '2024-01-21T12:00:00.000000000', '2024-01-21T13:00:00.000000000',\n", + " '2024-01-21T14:00:00.000000000', '2024-01-21T15:00:00.000000000',\n", + " '2024-01-21T16:00:00.000000000', '2024-01-21T17:00:00.000000000',\n", + " '2024-01-21T18:00:00.000000000', '2024-01-21T19:00:00.000000000',\n", + " '2024-01-21T20:00:00.000000000', '2024-01-21T21:00:00.000000000',\n", + " '2024-01-21T22:00:00.000000000', '2024-01-21T23:00:00.000000000'],\n", + " dtype='datetime64[ns]'),\n", " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'y': {'bdata': ('mpmZmZmZqT+amZmZmZmpP5qZmZmZma' ... 'SuR+F6tD97FK5H4Xq0P5qZmZmZmak/'),\n", " 'dtype': 'f8'},\n", " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " {'hovertemplate': ('variable=Aggregated - Office(H' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - Office(Heat)|fixed_relative_profile',\n", " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", " 'marker': {'symbol': 'circle'},\n", " 'mode': 'lines',\n", - " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'name': 'Aggregated - Office(Heat)|fixed_relative_profile',\n", + " 'orientation': 'v',\n", " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-07T23:15:00.000000000',\n", - " '2020-01-07T23:30:00.000000000', '2020-01-07T23:45:00.000000000'],\n", - " shape=(672,), dtype='datetime64[ns]'),\n", + " 'type': 'scatter',\n", + " 'x': array(['2024-01-15T00:00:00.000000000', '2024-01-15T01:00:00.000000000',\n", + " '2024-01-15T02:00:00.000000000', '2024-01-15T03:00:00.000000000',\n", + " '2024-01-15T04:00:00.000000000', '2024-01-15T05:00:00.000000000',\n", + " '2024-01-15T06:00:00.000000000', '2024-01-15T07:00:00.000000000',\n", + " '2024-01-15T08:00:00.000000000', '2024-01-15T09:00:00.000000000',\n", + " '2024-01-15T10:00:00.000000000', '2024-01-15T11:00:00.000000000',\n", + " '2024-01-15T12:00:00.000000000', '2024-01-15T13:00:00.000000000',\n", + " '2024-01-15T14:00:00.000000000', '2024-01-15T15:00:00.000000000',\n", + " '2024-01-15T16:00:00.000000000', '2024-01-15T17:00:00.000000000',\n", + " '2024-01-15T18:00:00.000000000', '2024-01-15T19:00:00.000000000',\n", + " '2024-01-15T20:00:00.000000000', '2024-01-15T21:00:00.000000000',\n", + " '2024-01-15T22:00:00.000000000', '2024-01-15T23:00:00.000000000',\n", + " '2024-01-16T00:00:00.000000000', '2024-01-16T01:00:00.000000000',\n", + " '2024-01-16T02:00:00.000000000', '2024-01-16T03:00:00.000000000',\n", + " '2024-01-16T04:00:00.000000000', '2024-01-16T05:00:00.000000000',\n", + " '2024-01-16T06:00:00.000000000', '2024-01-16T07:00:00.000000000',\n", + " '2024-01-16T08:00:00.000000000', '2024-01-16T09:00:00.000000000',\n", + " '2024-01-16T10:00:00.000000000', '2024-01-16T11:00:00.000000000',\n", + " '2024-01-16T12:00:00.000000000', '2024-01-16T13:00:00.000000000',\n", + " '2024-01-16T14:00:00.000000000', '2024-01-16T15:00:00.000000000',\n", + " '2024-01-16T16:00:00.000000000', '2024-01-16T17:00:00.000000000',\n", + " '2024-01-16T18:00:00.000000000', '2024-01-16T19:00:00.000000000',\n", + " '2024-01-16T20:00:00.000000000', '2024-01-16T21:00:00.000000000',\n", + " '2024-01-16T22:00:00.000000000', '2024-01-16T23:00:00.000000000',\n", + " '2024-01-17T00:00:00.000000000', '2024-01-17T01:00:00.000000000',\n", + " '2024-01-17T02:00:00.000000000', '2024-01-17T03:00:00.000000000',\n", + " '2024-01-17T04:00:00.000000000', '2024-01-17T05:00:00.000000000',\n", + " '2024-01-17T06:00:00.000000000', '2024-01-17T07:00:00.000000000',\n", + " '2024-01-17T08:00:00.000000000', '2024-01-17T09:00:00.000000000',\n", + " '2024-01-17T10:00:00.000000000', '2024-01-17T11:00:00.000000000',\n", + " '2024-01-17T12:00:00.000000000', '2024-01-17T13:00:00.000000000',\n", + " '2024-01-17T14:00:00.000000000', '2024-01-17T15:00:00.000000000',\n", + " '2024-01-17T16:00:00.000000000', '2024-01-17T17:00:00.000000000',\n", + " '2024-01-17T18:00:00.000000000', '2024-01-17T19:00:00.000000000',\n", + " '2024-01-17T20:00:00.000000000', '2024-01-17T21:00:00.000000000',\n", + " '2024-01-17T22:00:00.000000000', '2024-01-17T23:00:00.000000000',\n", + " '2024-01-18T00:00:00.000000000', '2024-01-18T01:00:00.000000000',\n", + " '2024-01-18T02:00:00.000000000', '2024-01-18T03:00:00.000000000',\n", + " '2024-01-18T04:00:00.000000000', '2024-01-18T05:00:00.000000000',\n", + " '2024-01-18T06:00:00.000000000', '2024-01-18T07:00:00.000000000',\n", + " '2024-01-18T08:00:00.000000000', '2024-01-18T09:00:00.000000000',\n", + " '2024-01-18T10:00:00.000000000', '2024-01-18T11:00:00.000000000',\n", + " '2024-01-18T12:00:00.000000000', '2024-01-18T13:00:00.000000000',\n", + " '2024-01-18T14:00:00.000000000', '2024-01-18T15:00:00.000000000',\n", + " '2024-01-18T16:00:00.000000000', '2024-01-18T17:00:00.000000000',\n", + " '2024-01-18T18:00:00.000000000', '2024-01-18T19:00:00.000000000',\n", + " '2024-01-18T20:00:00.000000000', '2024-01-18T21:00:00.000000000',\n", + " '2024-01-18T22:00:00.000000000', '2024-01-18T23:00:00.000000000',\n", + " '2024-01-19T00:00:00.000000000', '2024-01-19T01:00:00.000000000',\n", + " '2024-01-19T02:00:00.000000000', '2024-01-19T03:00:00.000000000',\n", + " '2024-01-19T04:00:00.000000000', '2024-01-19T05:00:00.000000000',\n", + " '2024-01-19T06:00:00.000000000', '2024-01-19T07:00:00.000000000',\n", + " '2024-01-19T08:00:00.000000000', '2024-01-19T09:00:00.000000000',\n", + " '2024-01-19T10:00:00.000000000', '2024-01-19T11:00:00.000000000',\n", + " '2024-01-19T12:00:00.000000000', '2024-01-19T13:00:00.000000000',\n", + " '2024-01-19T14:00:00.000000000', '2024-01-19T15:00:00.000000000',\n", + " '2024-01-19T16:00:00.000000000', '2024-01-19T17:00:00.000000000',\n", + " '2024-01-19T18:00:00.000000000', '2024-01-19T19:00:00.000000000',\n", + " '2024-01-19T20:00:00.000000000', '2024-01-19T21:00:00.000000000',\n", + " '2024-01-19T22:00:00.000000000', '2024-01-19T23:00:00.000000000',\n", + " '2024-01-20T00:00:00.000000000', '2024-01-20T01:00:00.000000000',\n", + " '2024-01-20T02:00:00.000000000', '2024-01-20T03:00:00.000000000',\n", + " '2024-01-20T04:00:00.000000000', '2024-01-20T05:00:00.000000000',\n", + " '2024-01-20T06:00:00.000000000', '2024-01-20T07:00:00.000000000',\n", + " '2024-01-20T08:00:00.000000000', '2024-01-20T09:00:00.000000000',\n", + " '2024-01-20T10:00:00.000000000', '2024-01-20T11:00:00.000000000',\n", + " '2024-01-20T12:00:00.000000000', '2024-01-20T13:00:00.000000000',\n", + " '2024-01-20T14:00:00.000000000', '2024-01-20T15:00:00.000000000',\n", + " '2024-01-20T16:00:00.000000000', '2024-01-20T17:00:00.000000000',\n", + " '2024-01-20T18:00:00.000000000', '2024-01-20T19:00:00.000000000',\n", + " '2024-01-20T20:00:00.000000000', '2024-01-20T21:00:00.000000000',\n", + " '2024-01-20T22:00:00.000000000', '2024-01-20T23:00:00.000000000',\n", + " '2024-01-21T00:00:00.000000000', '2024-01-21T01:00:00.000000000',\n", + " '2024-01-21T02:00:00.000000000', '2024-01-21T03:00:00.000000000',\n", + " '2024-01-21T04:00:00.000000000', '2024-01-21T05:00:00.000000000',\n", + " '2024-01-21T06:00:00.000000000', '2024-01-21T07:00:00.000000000',\n", + " '2024-01-21T08:00:00.000000000', '2024-01-21T09:00:00.000000000',\n", + " '2024-01-21T10:00:00.000000000', '2024-01-21T11:00:00.000000000',\n", + " '2024-01-21T12:00:00.000000000', '2024-01-21T13:00:00.000000000',\n", + " '2024-01-21T14:00:00.000000000', '2024-01-21T15:00:00.000000000',\n", + " '2024-01-21T16:00:00.000000000', '2024-01-21T17:00:00.000000000',\n", + " '2024-01-21T18:00:00.000000000', '2024-01-21T19:00:00.000000000',\n", + " '2024-01-21T20:00:00.000000000', '2024-01-21T21:00:00.000000000',\n", + " '2024-01-21T22:00:00.000000000', '2024-01-21T23:00:00.000000000'],\n", + " dtype='datetime64[ns]'),\n", " 'xaxis': 'x',\n", - " 'y': {'bdata': ('BoGVQ4vYX0DdJAaBlZteQIPAyqFFKl' ... 'eSplRMYUBWDi2ynVdhQEZvy1odS2FA'),\n", + " 'y': {'bdata': ('5ZuWpeU9QEDMU8WNBU89QGDXQkqFnk' ... '3URJLENEAAAAAAAAA0QNdz8Y/yPTZA'),\n", " 'dtype': 'f8'},\n", " 'yaxis': 'y'}],\n", " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", @@ -239,24 +563,8 @@ } ], "source": [ - "# Create a simple system to demonstrate clustering\n", - "fs_demo = fx.FlowSystem(timesteps)\n", - "fs_demo.add_elements(\n", - " fx.Bus('Heat'),\n", - " fx.Bus('Gas'),\n", - " fx.Effect('costs', '€', is_standard=True, is_objective=True),\n", - " fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.9,\n", - " thermal_flow=fx.Flow('Q_th', bus='Heat', size=300),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", - " ),\n", - " fx.Source('GasGrid', outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price})]),\n", - " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", - ")\n", - "\n", "# Cluster with 4 typical days\n", - "fs_clustered_demo = fs_demo.transform.cluster(n_clusters=4, cluster_duration='1D')\n", + "fs_clustered_demo = flow_system.transform.cluster(n_clusters=4, cluster_duration='1D')\n", "\n", "# Get the clustering object to access tsam results\n", "clustering_info = fs_clustered_demo._clustering_info\n", @@ -282,17 +590,24 @@ "id": "q2xt2juvyo", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:03:18.069583Z", - "start_time": "2025-12-14T01:03:16.791574Z" + "end_time": "2025-12-14T01:18:17.555198Z", + "start_time": "2025-12-14T01:18:15.217343Z" }, "execution": { - "iopub.execute_input": "2025-12-13T23:11:04.508499Z", - "iopub.status.busy": "2025-12-13T23:11:04.508210Z", - "iopub.status.idle": "2025-12-13T23:11:06.424428Z", - "shell.execute_reply": "2025-12-13T23:11:06.417096Z" + "iopub.execute_input": "2025-12-14T01:22:22.480243Z", + "iopub.status.busy": "2025-12-14T01:22:22.480054Z", + "iopub.status.idle": "2025-12-14T01:22:23.659162Z", + "shell.execute_reply": "2025-12-14T01:22:23.657338Z" } }, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Comparing: GasGrid(Gas)|costs|per_flow_hour\n" + ] + }, { "data": { "text/html": [ @@ -4185,18 +4500,15 @@ " " ] }, - "jetTransient": { - "display_id": null - }, "metadata": {}, "output_type": "display_data" }, { "data": { "text/html": [ - "
" ] }, - "jetTransient": { - "display_id": null - }, "metadata": {}, "output_type": "display_data" } @@ -4234,11 +4543,15 @@ "clustering_results = {}\n", "\n", "for n in cluster_configs:\n", - " fs_test = fs_demo.copy()\n", + " fs_test = flow_system.copy()\n", " fs_clustered = fs_test.transform.cluster(n_clusters=n, cluster_duration='1D')\n", " clustering_results[n] = fs_clustered._clustering_info['clustering']\n", "\n", - "# Compare the aggregated heat demand for each configuration\n", + "# Get the first time-varying variable name for comparison\n", + "first_var = list(clustering_results[2].original_data.columns)[0]\n", + "print(f'Comparing: {first_var}')\n", + "\n", + "# Compare the aggregated data for each configuration\n", "fig = make_subplots(\n", " rows=2,\n", " cols=2,\n", @@ -4254,9 +4567,8 @@ " row += 1\n", " col += 1\n", "\n", - " # Original data\n", - " original = clustering.original_data['HeatDemand(Q_th)|fixed_relative_profile']\n", - " aggregated = clustering.aggregated_data['HeatDemand(Q_th)|fixed_relative_profile']\n", + " original = clustering.original_data[first_var]\n", + " aggregated = clustering.aggregated_data[first_var]\n", "\n", " fig.add_trace(\n", " go.Scatter(\n", @@ -4282,12 +4594,12 @@ " )\n", "\n", "fig.update_layout(\n", - " title='Heat Demand: Original vs Clustered Data',\n", + " title=f'Original vs Clustered: {first_var}',\n", " height=500,\n", " legend=dict(orientation='h', yanchor='bottom', y=1.02),\n", ")\n", "fig.update_xaxes(title_text='Timestep', row=2)\n", - "fig.update_yaxes(title_text='MW', col=1)\n", + "fig.update_yaxes(title_text='Value', col=1)\n", "fig.show()" ] }, @@ -4297,14 +4609,14 @@ "id": "3zsi1g8bokg", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:03:19.061384Z", - "start_time": "2025-12-14T01:03:18.929418Z" + "end_time": "2025-12-14T01:18:18.818103Z", + "start_time": "2025-12-14T01:18:18.571453Z" }, "execution": { - "iopub.execute_input": "2025-12-13T23:11:06.484947Z", - "iopub.status.busy": "2025-12-13T23:11:06.484493Z", - "iopub.status.idle": "2025-12-13T23:11:06.548165Z", - "shell.execute_reply": "2025-12-13T23:11:06.546796Z" + "iopub.execute_input": "2025-12-14T01:22:23.708331Z", + "iopub.status.busy": "2025-12-14T01:22:23.708032Z", + "iopub.status.idle": "2025-12-14T01:22:23.770295Z", + "shell.execute_reply": "2025-12-14T01:22:23.768542Z" } }, "outputs": [ @@ -4313,14 +4625,14 @@ "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -4332,38 +4644,38 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 RMSE [MW]MAE [MW]Max Error [MW]CorrelationRMSEMAEMax ErrorCorrelation
Clusters
21.871.367.890.998420.000.000.001.0000
31.290.767.890.999330.000.000.001.0000
40.650.373.140.999840.000.000.001.0000
50.150.100.381.000050.000.000.001.0000
\n" ], "text/plain": [ - "" + "" ] }, "execution_count": 5, @@ -4375,8 +4687,8 @@ "# Calculate error metrics for each configuration\n", "metrics = []\n", "for n, clustering in clustering_results.items():\n", - " original = clustering.original_data['HeatDemand(Q_th)|fixed_relative_profile'].values\n", - " aggregated = clustering.aggregated_data['HeatDemand(Q_th)|fixed_relative_profile'].values\n", + " original = clustering.original_data[first_var].values\n", + " aggregated = clustering.aggregated_data[first_var].values\n", "\n", " # Calculate metrics\n", " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", @@ -4387,9 +4699,9 @@ " metrics.append(\n", " {\n", " 'Clusters': n,\n", - " 'RMSE [MW]': rmse,\n", - " 'MAE [MW]': mae,\n", - " 'Max Error [MW]': max_error,\n", + " 'RMSE': rmse,\n", + " 'MAE': mae,\n", + " 'Max Error': max_error,\n", " 'Correlation': correlation,\n", " }\n", " )\n", @@ -4397,125 +4709,14 @@ "metrics_df = pd.DataFrame(metrics).set_index('Clusters')\n", "metrics_df.style.format(\n", " {\n", - " 'RMSE [MW]': '{:.2f}',\n", - " 'MAE [MW]': '{:.2f}',\n", - " 'Max Error [MW]': '{:.2f}',\n", + " 'RMSE': '{:.2f}',\n", + " 'MAE': '{:.2f}',\n", + " 'Max Error': '{:.2f}',\n", " 'Correlation': '{:.4f}',\n", " }\n", ")" ] }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "## Build a Simple FlowSystem\n", - "\n", - "A district heating system with CHP, boiler, and storage:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "id": "6", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T01:03:19.157280Z", - "start_time": "2025-12-14T01:03:19.123190Z" - }, - "execution": { - "iopub.execute_input": "2025-12-13T23:11:06.554382Z", - "iopub.status.busy": "2025-12-13T23:11:06.554075Z", - "iopub.status.idle": "2025-12-13T23:11:06.569809Z", - "shell.execute_reply": "2025-12-13T23:11:06.569118Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "System: 672 timesteps\n" - ] - } - ], - "source": [ - "def build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price):\n", - " \"\"\"Build a district heating system.\"\"\"\n", - " fs = fx.FlowSystem(timesteps)\n", - "\n", - " fs.add_elements(\n", - " # Buses\n", - " fx.Bus('Electricity'),\n", - " fx.Bus('Heat'),\n", - " fx.Bus('Gas'),\n", - " fx.Bus('Coal'),\n", - " # Effects\n", - " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", - " # CHP\n", - " fx.linear_converters.CHP(\n", - " 'CHP',\n", - " thermal_efficiency=0.58,\n", - " electrical_efficiency=0.22,\n", - " status_parameters=fx.StatusParameters(effects_per_startup=1000),\n", - " electrical_flow=fx.Flow('P_el', bus='Electricity', size=200),\n", - " thermal_flow=fx.Flow('Q_th', bus='Heat', size=200, relative_minimum=0.3),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Coal', size=350, previous_flow_rate=100), # size ≈ 200/0.58\n", - " ),\n", - " # Gas Boiler\n", - " fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.85,\n", - " status_parameters=fx.StatusParameters(effects_per_startup=500),\n", - " thermal_flow=fx.Flow('Q_th', bus='Heat', size=100, relative_minimum=0.1),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas', size=120, previous_flow_rate=20), # size ≈ 100/0.85\n", - " ),\n", - " # Thermal Storage\n", - " fx.Storage(\n", - " 'Storage',\n", - " capacity_in_flow_hours=500,\n", - " initial_charge_state=100,\n", - " eta_charge=0.95,\n", - " eta_discharge=0.95,\n", - " relative_loss_per_hour=0.001,\n", - " charging=fx.Flow('Charge', size=100, bus='Heat'),\n", - " discharging=fx.Flow('Discharge', size=100, bus='Heat'),\n", - " ),\n", - " # Fuel sources\n", - " fx.Source(\n", - " 'GasGrid',\n", - " outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price})],\n", - " ),\n", - " fx.Source(\n", - " 'CoalSupply',\n", - " outputs=[fx.Flow('Q_Coal', bus='Coal', size=1000, effects_per_flow_hour={'costs': 4.6})],\n", - " ),\n", - " # Electricity grid\n", - " fx.Source(\n", - " 'GridBuy',\n", - " outputs=[\n", - " fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour={'costs': electricity_price + 0.5})\n", - " ],\n", - " ),\n", - " fx.Sink(\n", - " 'GridSell',\n", - " inputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour=-(electricity_price - 0.5))],\n", - " ),\n", - " # Demands\n", - " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", - " fx.Sink(\n", - " 'ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=electricity_demand)]\n", - " ),\n", - " )\n", - "\n", - " return fs\n", - "\n", - "\n", - "flow_system = build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price)\n", - "print(f'System: {len(timesteps)} timesteps')" - ] - }, { "cell_type": "markdown", "id": "7", @@ -4528,59 +4729,42 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "id": "8", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:03:31.221135Z", - "start_time": "2025-12-14T01:03:19.205997Z" + "end_time": "2025-12-14T01:18:21.636630Z", + "start_time": "2025-12-14T01:18:18.887553Z" }, "execution": { - "iopub.execute_input": "2025-12-13T23:11:06.574029Z", - "iopub.status.busy": "2025-12-13T23:11:06.573839Z", - "iopub.status.idle": "2025-12-13T23:11:16.402730Z", - "shell.execute_reply": "2025-12-13T23:11:16.392580Z" + "iopub.execute_input": "2025-12-14T01:22:23.774996Z", + "iopub.status.busy": "2025-12-14T01:22:23.774446Z", + "iopub.status.idle": "2025-12-14T01:22:25.286112Z", + "shell.execute_reply": "2025-12-14T01:22:25.284951Z" } }, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[2m2025-12-14 02:03:19.211\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 71/71 [00:01<00:00, 68.04it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 51/51 [00:00<00:00, 349.84it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 13/13 [00:00<00:00, 607.50it/s]\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-3pp56_27 has 26909 rows; 24221 cols; 84703 nonzeros; 8736 integer variables (8736 binary)\n", + "MIP linopy-problem-shi3zqp7 has 2200 rows; 2199 cols; 6740 nonzeros; 336 integer variables (336 binary)\n", "Coefficient ranges:\n", - " Matrix [1e-05, 1e+03]\n", + " Matrix [1e-05, 1e+02]\n", " Cost [1e+00, 1e+00]\n", - " Bound [1e+00, 1e+03]\n", - " RHS [1e-05, 1e+02]\n", - "WARNING: Problem has some excessively small row bounds\n", + " Bound [1e+00, 5e+02]\n", + " RHS [1e+00, 2e+02]\n", "Presolving model\n", - "17472 rows, 13440 cols, 45021 nonzeros 0s\n", - "14789 rows, 10964 cols, 45835 nonzeros 0s\n", - "12214 rows, 9019 cols, 39022 nonzeros 0s\n", - "Presolve reductions: rows 12214(-14695); columns 9019(-15202); nonzeros 39022(-45681) \n", + "1176 rows, 1008 cols, 2855 nonzeros 0s\n", + "840 rows, 672 cols, 3022 nonzeros 0s\n", + "840 rows, 672 cols, 3022 nonzeros 0s\n", + "Presolve reductions: rows 840(-1360); columns 672(-1527); nonzeros 3022(-3718) \n", "\n", "Solving MIP model with:\n", - " 12214 rows\n", - " 9019 cols (6824 binary, 0 integer, 0 implied int., 2195 continuous, 0 domain fixed)\n", - " 39022 nonzeros\n", + " 840 rows\n", + " 672 cols (336 binary, 0 integer, 0 implied int., 336 continuous, 0 domain fixed)\n", + " 3022 nonzeros\n", "\n", "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", @@ -4590,34 +4774,39 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% -91086.334692 inf inf 0 0 0 0 0.7s\n", - " 0 0 0 0.00% 510476.049542 inf inf 0 0 0 4614 0.9s\n", - " C 0 0 0 0.00% 510864.416955 602921.407557 15.27% 3667 970 22 6199 2.1s\n", - " L 0 0 0 0.00% 510864.478625 510865.621324 0.00% 4102 1011 22 7020 6.8s\n", - " 1 0 1 100.00% 510864.478782 510865.621324 0.00% 4102 1011 22 8787 6.9s\n", + " 0 0 0 0.00% 0 inf inf 0 0 0 0 0.0s\n", + " R 0 0 0 0.00% 558.830517 583.6312648 4.25% 0 0 0 465 0.1s\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + " L 0 0 0 0.00% 558.830517 558.830517 0.00% 174 58 0 523 0.3s\n", + " 1 0 1 100.00% 558.830517 558.830517 0.00% 174 58 0 731 0.3s\n", "\n", "Solving report\n", - " Model linopy-problem-3pp56_27\n", + " Model linopy-problem-shi3zqp7\n", " Status Optimal\n", - " Primal bound 510865.621324\n", - " Dual bound 510864.478782\n", - " Gap 0.000224% (tolerance: 1%)\n", - " P-D integral 0.722979088888\n", + " Primal bound 558.830516996\n", + " Dual bound 558.830516996\n", + " Gap 0% (tolerance: 1%)\n", + " P-D integral 0.00929901200095\n", " Solution status feasible\n", - " 510865.621324 (objective)\n", + " 558.830516996 (objective)\n", " 0 (bound viol.)\n", - " 4.75782431897e-07 (int. viol.)\n", + " 8.881784197e-16 (int. viol.)\n", " 0 (row viol.)\n", - " Timing 6.86\n", - " Max sub-MIP depth 3\n", + " Timing 0.27\n", + " Max sub-MIP depth 1\n", " Nodes 1\n", " Repair LPs 0\n", - " LP iterations 8787\n", + " LP iterations 731\n", " 0 (strong br.)\n", - " 2406 (separation)\n", - " 1740 (heuristics)\n", - "Full optimization: 12.00 seconds\n", - "Cost: 510,866 €\n" + " 58 (separation)\n", + " 208 (heuristics)\n", + "Full optimization: 1.50 seconds\n", + "Cost: 559 €\n" ] } ], @@ -4652,52 +4841,43 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "id": "10", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:03:45.862590Z", - "start_time": "2025-12-14T01:03:31.305197Z" + "end_time": "2025-12-14T01:18:23.485055Z", + "start_time": "2025-12-14T01:18:21.711729Z" }, "execution": { - "iopub.execute_input": "2025-12-13T23:11:16.424863Z", - "iopub.status.busy": "2025-12-13T23:11:16.421861Z", - "iopub.status.idle": "2025-12-13T23:11:23.254537Z", - "shell.execute_reply": "2025-12-13T23:11:23.252897Z" + "iopub.execute_input": "2025-12-14T01:22:25.293422Z", + "iopub.status.busy": "2025-12-14T01:22:25.293044Z", + "iopub.status.idle": "2025-12-14T01:22:26.753186Z", + "shell.execute_reply": "2025-12-14T01:22:26.749869Z" } }, "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 99/99 [00:00<00:00, 106.56it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 51/51 [00:00<00:00, 341.57it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 13/13 [00:00<00:00, 252.77it/s]\n" - ] - }, { "name": "stdout", "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-2b32w568 has 34889 rows; 24221 cols; 100663 nonzeros; 8736 integer variables (8736 binary)\n", + "MIP linopy-problem-wfesasa2 has 2890 rows; 2199 cols; 8120 nonzeros; 336 integer variables (336 binary)\n", "Coefficient ranges:\n", - " Matrix [1e-05, 1e+03]\n", + " Matrix [1e-05, 1e+02]\n", " Cost [1e+00, 1e+00]\n", - " Bound [1e+00, 1e+03]\n", - " RHS [1e-05, 1e+02]\n", - "WARNING: Problem has some excessively small row bounds\n", + " Bound [1e+00, 5e+02]\n", + " RHS [1e+00, 2e+02]\n", "Presolving model\n", - "17852 rows, 7835 cols, 46161 nonzeros 0s\n", - "8771 rows, 6538 cols, 26638 nonzeros 0s\n", - "7501 rows, 5532 cols, 24162 nonzeros 0s\n", - "Presolve reductions: rows 7501(-27388); columns 5532(-18689); nonzeros 24162(-76501) \n", + "1291 rows, 640 cols, 3200 nonzeros 0s\n", + "910 rows, 419 cols, 2886 nonzeros 0s\n", + "518 rows, 416 cols, 1734 nonzeros 0s\n", + "495 rows, 393 cols, 1780 nonzeros 0s\n", + "Presolve reductions: rows 495(-2395); columns 393(-1806); nonzeros 1780(-6340) \n", "\n", "Solving MIP model with:\n", - " 7501 rows\n", - " 5532 cols (4223 binary, 0 integer, 0 implied int., 1309 continuous, 0 domain fixed)\n", - " 24162 nonzeros\n", + " 495 rows\n", + " 393 cols (198 binary, 0 integer, 0 implied int., 195 continuous, 0 domain fixed)\n", + " 1780 nonzeros\n", "\n", "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", @@ -4707,37 +4887,34 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% -134515.631486 inf inf 0 0 0 0 0.7s\n", - " 0 0 0 0.00% 510534.808573 inf inf 0 0 0 2776 0.8s\n", - "HighsMipSolverData::transformNewIntegerFeasibleSolution tmpSolver.run();\n", - "WARNING: Solution with objective 626117 has untransformed violations: bound = 9.025e-06; integrality = 0; row = 9.025e-06\n", - "HighsMipSolverData::transformNewIntegerFeasibleSolution tmpSolver.run();\n", - " L 0 0 0 0.00% 511015.64394 511017.103333 0.00% 2825 647 36 3686 8.3s\n", - " 1 0 1 100.00% 511015.64422 511017.103333 0.00% 2825 647 36 4951 8.3s\n", + " 0 0 0 0.00% 0 inf inf 0 0 0 0 0.0s\n", + " R 0 0 0 0.00% 562.499067 588.4592791 4.41% 0 0 0 265 0.0s\n", + " L 0 0 0 0.00% 562.499067 562.499067 0.00% 108 35 0 300 0.1s\n", + " 1 0 1 100.00% 562.499067 562.499067 0.00% 108 35 0 382 0.1s\n", "\n", "Solving report\n", - " Model linopy-problem-2b32w568\n", + " Model linopy-problem-wfesasa2\n", " Status Optimal\n", - " Primal bound 511017.103333\n", - " Dual bound 511015.64422\n", - " Gap 0.000286% (tolerance: 1%)\n", - " P-D integral 1.28790047777e-07\n", + " Primal bound 562.499067033\n", + " Dual bound 562.499067033\n", + " Gap 0% (tolerance: 1%)\n", + " P-D integral 0.00288293546382\n", " Solution status feasible\n", - " 511017.103333 (objective)\n", + " 562.499067033 (objective)\n", " 0 (bound viol.)\n", - " 0 (int. viol.)\n", + " 2.56905607898e-13 (int. viol.)\n", " 0 (row viol.)\n", - " Timing 8.32\n", - " Max sub-MIP depth 4\n", + " Timing 0.10\n", + " Max sub-MIP depth 1\n", " Nodes 1\n", - " Repair LPs 2 (1 feasible; 367 iterations)\n", - " LP iterations 4951\n", + " Repair LPs 0\n", + " LP iterations 382\n", " 0 (strong br.)\n", - " 910 (separation)\n", - " 1247 (heuristics)\n", - "Clustered optimization: 14.54 seconds\n", - "Cost: 511,017 €\n", - "Speedup: 0.8x\n" + " 35 (separation)\n", + " 82 (heuristics)\n", + "Clustered optimization: 1.45 seconds\n", + "Cost: 562 €\n", + "Speedup: 1.0x\n" ] } ], @@ -4768,18 +4945,18 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "id": "12", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:03:46.373652Z", - "start_time": "2025-12-14T01:03:46.196503Z" + "end_time": "2025-12-14T01:18:23.574337Z", + "start_time": "2025-12-14T01:18:23.554953Z" }, "execution": { - "iopub.execute_input": "2025-12-13T23:11:23.259936Z", - "iopub.status.busy": "2025-12-13T23:11:23.259776Z", - "iopub.status.idle": "2025-12-13T23:11:23.267446Z", - "shell.execute_reply": "2025-12-13T23:11:23.267185Z" + "iopub.execute_input": "2025-12-14T01:22:26.757305Z", + "iopub.status.busy": "2025-12-14T01:22:26.757113Z", + "iopub.status.idle": "2025-12-14T01:22:26.765642Z", + "shell.execute_reply": "2025-12-14T01:22:26.765136Z" } }, "outputs": [ @@ -4788,39 +4965,39 @@ "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 Time [s]Cost [€]Cost Gap [%]SpeedupTime [s]Cost [€]Cost Gap [%]Speedup
Full (baseline)12.00510,8660.001.0xFull (baseline)1.505590.001.0x
Clustered (4 days)14.54511,0170.030.8xClustered (4 days)1.455620.661.0x
\n" ], "text/plain": [ - "" + "" ] }, - "execution_count": 9, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -4851,16 +5028,26 @@ "cell_type": "markdown", "id": "mn99rfcupf", "metadata": {}, - "source": "## Multi-Period Clustering\n\nFor multi-year investment studies, clustering is applied **independently per period** (year). Each year gets its own set of typical days:" + "source": [ + "## Multi-Period Clustering\n", + "\n", + "For multi-year investment studies, clustering is applied **independently per period** (year). Each year gets its own set of typical days:" + ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "id": "24vgkxoeyqz", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:03:54.299527Z", - "start_time": "2025-12-14T01:03:46.470960Z" + "end_time": "2025-12-14T01:18:30.759263Z", + "start_time": "2025-12-14T01:18:23.607144Z" + }, + "execution": { + "iopub.execute_input": "2025-12-14T01:22:26.768344Z", + "iopub.status.busy": "2025-12-14T01:22:26.768176Z", + "iopub.status.idle": "2025-12-14T01:22:32.730327Z", + "shell.execute_reply": "2025-12-14T01:22:32.729236Z" } }, "outputs": [ @@ -4868,57 +5055,84 @@ "name": "stdout", "output_type": "stream", "text": [ - "Multi-period system: 672 timesteps × 2 periods\n", - "\u001b[2m2025-12-14 02:03:46.564\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", + "Multi-period system: 48 timesteps × 3 periods × 2 scenarios\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "LP linopy-problem-o2v4edxp has 11124 rows; 9428 cols; 28956 nonzeros\n", + "MIP linopy-problem-adnjwebr has 6420 rows; 3870 cols; 16248 nonzeros; 588 integer variables (588 binary)\n", "Coefficient ranges:\n", - " Matrix [2e-01, 8e+00]\n", - " Cost [1e+00, 1e+00]\n", - " Bound [1e+02, 5e+02]\n", - " RHS [0e+00, 0e+00]\n", + " Matrix [1e-05, 1e+03]\n", + " Cost [3e-01, 7e-01]\n", + " Bound [1e+00, 5e+02]\n", + " RHS [1e+00, 1e+00]\n", "Presolving model\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "Presolve reductions: rows 0(-11124); columns 0(-9428); nonzeros 0(-28956) - Reduced to empty\n", - "Performed postsolve\n", - "Solving the original LP from the solution after postsolve\n", + "2895 rows, 1059 cols, 6927 nonzeros 0s\n", + "2050 rows, 622 cols, 6232 nonzeros 0s\n", + "1068 rows, 612 cols, 3236 nonzeros 0s\n", + "1053 rows, 604 cols, 3203 nonzeros 0s\n", + "Presolve reductions: rows 1053(-5367); columns 604(-3266); nonzeros 3203(-13045) \n", + "\n", + "Solving MIP model with:\n", + " 1053 rows\n", + " 604 cols (305 binary, 0 integer, 0 implied int., 299 continuous, 0 domain fixed)\n", + " 3203 nonzeros\n", + "\n", + "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", + " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", + " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", + " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", + "\n", + " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", + "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", + "\n", + " 0 0 0 0.00% 9394.346261 inf inf 0 0 0 0 0.0s\n", + " R 0 0 0 0.00% 18357.702407 19065.666244 3.71% 0 0 0 430 0.0s\n", + " 1 0 1 100.00% 18879.998962 19065.666244 0.97% 1111 123 0 676 0.1s\n", "\n", - "Model name : linopy-problem-o2v4edxp\n", - "Model status : Optimal\n", - "Objective value : 2.4758192703e+06\n", - "P-D objective error : 6.5829287137e-16\n", - "HiGHS run time : 0.13\n", - "Multi-period clustered cost: 2,475,819 €\n" + "Solving report\n", + " Model linopy-problem-adnjwebr\n", + " Status Optimal\n", + " Primal bound 19065.6662439\n", + " Dual bound 18879.9989619\n", + " Gap 0.974% (tolerance: 1%)\n", + " P-D integral 0.0013863514249\n", + " Solution status feasible\n", + " 19065.6662439 (objective)\n", + " 0 (bound viol.)\n", + " 0 (int. viol.)\n", + " 0 (row viol.)\n", + " Timing 0.10\n", + " Max sub-MIP depth 0\n", + " Nodes 1\n", + " Repair LPs 0\n", + " LP iterations 676\n", + " 0 (strong br.)\n", + " 246 (separation)\n", + " 0 (heuristics)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Multi-period clustered cost: 38,227 €\n" ] } ], "source": [ - "# Create a multi-period FlowSystem (simulating 2 years)\n", - "periods = pd.Index([2025, 2026], name='period')\n", + "# Load a multi-period FlowSystem (also has scenarios!)\n", + "fs_multiperiod = fx.FlowSystem.from_netcdf('data/multiperiod_system.nc4')\n", "\n", - "fs_multiperiod = fx.FlowSystem(timesteps, periods=periods)\n", - "fs_multiperiod.add_elements(\n", - " fx.Bus('Heat'),\n", - " fx.Bus('Gas'),\n", - " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", - " fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.85,\n", - " thermal_flow=fx.Flow('Q_th', bus='Heat', size=300),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", - " ),\n", - " fx.Source(\n", - " 'GasGrid',\n", - " outputs=[fx.Flow('Q_Gas', bus='Gas', size=500, effects_per_flow_hour={'costs': gas_price})],\n", - " ),\n", - " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", + "print(\n", + " f'Multi-period system: {len(fs_multiperiod.timesteps)} timesteps × {len(fs_multiperiod.periods)} periods × {len(fs_multiperiod.scenarios)} scenarios'\n", ")\n", "\n", - "print(f'Multi-period system: {len(timesteps)} timesteps × {len(periods)} periods')\n", - "\n", - "# Cluster - each period gets clustered independently\n", - "fs_mp_clustered = fs_multiperiod.transform.cluster(n_clusters=3, cluster_duration='1D')\n", + "# Cluster - each period × scenario combination gets clustered independently\n", + "fs_mp_clustered = fs_multiperiod.transform.cluster(n_clusters=1, cluster_duration='1D')\n", "fs_mp_clustered.optimize(solver)\n", "\n", "print(f'Multi-period clustered cost: {fs_mp_clustered.solution[\"costs\"].sum().item():,.0f} €')" @@ -4928,16 +5142,24 @@ "cell_type": "markdown", "id": "0qjtoobc40uo", "metadata": {}, - "source": "## Multi-Scenario Clustering\n\nFor scenario-based analyses, clustering is applied **independently per scenario**:" + "source": [ + "The loaded system includes both **periods** and **scenarios**. Clustering is applied independently for each combination:" + ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 10, "id": "36269qvz7ti", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:04:01.667341Z", - "start_time": "2025-12-14T01:03:54.503092Z" + "end_time": "2025-12-14T01:18:30.895716Z", + "start_time": "2025-12-14T01:18:30.884403Z" + }, + "execution": { + "iopub.execute_input": "2025-12-14T01:22:32.735518Z", + "iopub.status.busy": "2025-12-14T01:22:32.735020Z", + "iopub.status.idle": "2025-12-14T01:22:32.742489Z", + "shell.execute_reply": "2025-12-14T01:22:32.741900Z" } }, "outputs": [ @@ -4945,77 +5167,30 @@ "name": "stdout", "output_type": "stream", "text": [ - "Multi-scenario system: 672 timesteps × 2 scenarios\n", - "\u001b[2m2025-12-14 02:03:54.554\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "LP linopy-problem-6jfhyhl1 has 11124 rows; 9428 cols; 28956 nonzeros\n", - "Coefficient ranges:\n", - " Matrix [2e-01, 8e+00]\n", - " Cost [5e-01, 5e-01]\n", - " Bound [1e+02, 6e+02]\n", - " RHS [0e+00, 0e+00]\n", - "Presolving model\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "Presolve reductions: rows 0(-11124); columns 0(-9428); nonzeros 0(-28956) - Reduced to empty\n", - "Performed postsolve\n", - "Solving the original LP from the solution after postsolve\n", + "Periods: [2024, 2025, 2026]\n", + "Scenarios: ['high_demand', 'low_demand']\n", "\n", - "Model name : linopy-problem-6jfhyhl1\n", - "Model status : Optimal\n", - "Objective value : 1.4235960804e+06\n", - "P-D objective error : 2.4532649930e-16\n", - "HiGHS run time : 0.10\n", - "Multi-scenario clustered cost: 2,847,192 €\n" + "Clustering was applied to 6 combinations:\n", + " - period=2024, scenario=high_demand\n", + " - period=2024, scenario=low_demand\n", + " - period=2025, scenario=high_demand\n", + " - period=2025, scenario=low_demand\n", + " - period=2026, scenario=high_demand\n", + " - period=2026, scenario=low_demand\n" ] } ], "source": [ - "# Create a multi-scenario FlowSystem\n", - "scenarios = pd.Index(['base', 'high_demand'], name='scenario')\n", - "\n", - "# Different demand profiles per scenario\n", - "heat_demand_high = heat_demand * 1.3 # 30% higher demand\n", - "\n", - "fs_multiscenario = fx.FlowSystem(timesteps, scenarios=scenarios)\n", - "fs_multiscenario.add_elements(\n", - " fx.Bus('Heat'),\n", - " fx.Bus('Gas'),\n", - " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", - " fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.85,\n", - " thermal_flow=fx.Flow('Q_th', bus='Heat', size=400),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", - " ),\n", - " fx.Source(\n", - " 'GasGrid',\n", - " outputs=[fx.Flow('Q_Gas', bus='Gas', size=600, effects_per_flow_hour={'costs': gas_price})],\n", - " ),\n", - " # Scenario-dependent demand\n", - " fx.Sink(\n", - " 'HeatDemand',\n", - " inputs=[\n", - " fx.Flow(\n", - " 'Q_th',\n", - " bus='Heat',\n", - " size=1,\n", - " fixed_relative_profile=fx.TimeSeriesData(\n", - " np.stack([heat_demand, heat_demand_high], axis=1),\n", - " dims=['time', 'scenario'],\n", - " ),\n", - " )\n", - " ],\n", - " ),\n", - ")\n", + "# The multiperiod_system already has both periods AND scenarios\n", + "# Each period × scenario combination is clustered independently\n", + "print(f'Periods: {fs_multiperiod.periods.tolist()}')\n", + "print(f'Scenarios: {fs_multiperiod.scenarios.tolist()}')\n", "\n", - "print(f'Multi-scenario system: {len(timesteps)} timesteps × {len(scenarios)} scenarios')\n", - "\n", - "# Cluster - each scenario gets clustered independently\n", - "fs_ms_clustered = fs_multiscenario.transform.cluster(n_clusters=3, cluster_duration='1D')\n", - "fs_ms_clustered.optimize(solver)\n", - "\n", - "print(f'Multi-scenario clustered cost: {fs_ms_clustered.solution[\"costs\"].sum().item():,.0f} €')" + "# Get clustering info to see how many combinations were clustered\n", + "clustering_info = fs_mp_clustered._clustering_info\n", + "print(f'\\nClustering was applied to {len(clustering_info[\"clustering_results\"])} combinations:')\n", + "for (period, scenario), _ in clustering_info['clustering_results'].items():\n", + " print(f' - period={period}, scenario={scenario}')" ] }, { @@ -5060,7 +5235,28 @@ "cell_type": "markdown", "id": "24", "metadata": {}, - "source": "## Summary\n\nYou learned how to:\n\n- Use **`transform.cluster()`** to identify typical periods\n- Compare different clustering parameters and their effect on data\n- Cluster **multi-period** and **multi-scenario** FlowSystems\n\n### When to Use Clustering\n\n| Scenario | Recommendation |\n|----------|----------------|\n| Annual optimization | 8-12 typical days |\n| Investment decisions | Use with two-stage optimization |\n| Preserve extremes | Use `time_series_for_high_peaks` |\n\n### Next Steps\n\n- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" + "source": [ + "## Summary\n", + "\n", + "You learned how to:\n", + "\n", + "- Use **`transform.cluster()`** to identify typical periods\n", + "- Compare different clustering parameters and their effect on data\n", + "- Cluster **multi-period** and **multi-scenario** FlowSystems\n", + "\n", + "### When to Use Clustering\n", + "\n", + "| Scenario | Recommendation |\n", + "|----------|----------------|\n", + "| Annual optimization | 8-12 typical days |\n", + "| Investment decisions | Use with two-stage optimization |\n", + "| Preserve extremes | Use `time_series_for_high_peaks` |\n", + "\n", + "### Next Steps\n", + "\n", + "- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n", + "- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" + ] } ], "metadata": { From fe8739666e95dec497c2a2ab4b8bb04bf927882e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 15:38:37 +0100 Subject: [PATCH 011/191] Improve notebook --- docs/notebooks/08c-clustering.ipynb | 1673 ++++++++++++++------------- 1 file changed, 856 insertions(+), 817 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 978cd7ed8..0cbbbc09c 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -11,10 +11,10 @@ "\n", "This notebook demonstrates:\n", "\n", - "- **Basic clustering**: Reduce a week/year to representative days\n", - "- **Compare clustering parameters**: See how data changes with different cluster counts\n", + "- **Basic clustering**: Reduce a month to representative days\n", + "- **Visualize clustering**: See how data changes with clustering\n", + "- **Compare parameters**: Trade-off between accuracy and speed\n", "- **Multi-period clustering**: Cluster multi-year investment studies\n", - "- **Multi-scenario clustering**: Cluster scenario-based analyses\n", "\n", "!!! note \"Requirements\"\n", " This notebook requires the `tsam` package: `pip install tsam`" @@ -30,18 +30,12 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 11, "id": "2", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:18:13.229482Z", - "start_time": "2025-12-14T01:18:06.726132Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:15.415309Z", - "iopub.status.busy": "2025-12-14T01:22:15.415198Z", - "iopub.status.idle": "2025-12-14T01:22:20.488460Z", - "shell.execute_reply": "2025-12-14T01:22:20.487726Z" + "end_time": "2025-12-14T14:35:42.824263Z", + "start_time": "2025-12-14T14:35:42.676726Z" } }, "outputs": [ @@ -51,7 +45,7 @@ "flixopt.config.CONFIG" ] }, - "execution_count": 1, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } @@ -61,6 +55,7 @@ "\n", "import numpy as np\n", "import pandas as pd\n", + "import plotly.express as px\n", "import plotly.graph_objects as go\n", "from plotly.subplots import make_subplots\n", "\n", @@ -74,25 +69,19 @@ "id": "3", "metadata": {}, "source": [ - "## Load FlowSystem from File\n", + "## Load Time Series Data\n", "\n", - "We'll use a pre-built FlowSystem with time-varying data:" + "We use real-world district heating data at 15-minute resolution (one month):" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 12, "id": "4", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:18:13.609412Z", - "start_time": "2025-12-14T01:18:13.290728Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:20.498521Z", - "iopub.status.busy": "2025-12-14T01:22:20.498053Z", - "iopub.status.idle": "2025-12-14T01:22:20.717894Z", - "shell.execute_reply": "2025-12-14T01:22:20.714997Z" + "end_time": "2025-12-14T14:35:42.938317Z", + "start_time": "2025-12-14T14:35:42.830680Z" } }, "outputs": [ @@ -100,514 +89,42 @@ "name": "stdout", "output_type": "stream", "text": [ - "Loaded FlowSystem: 168 timesteps (7 days at hourly resolution)\n" + "Timesteps: 2976 (31 days at 15-min resolution)\n", + "Heat demand: 122.2 - 266.2 MW\n", + "Electricity price: -3.3 - 72.6 €/MWh\n" ] } ], "source": [ - "# Load a simple FlowSystem with time-varying demand and prices\n", - "flow_system = fx.FlowSystem.from_netcdf('data/simple_system.nc4')\n", - "\n", - "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')" - ] - }, - { - "cell_type": "markdown", - "id": "iwuyqrpxr", - "metadata": {}, - "source": [ - "## Visualizing the Clustering Effect\n", - "\n", - "Before optimizing, let's see how clustering transforms the time series data. We'll compare:\n", - "- **Original data**: 7 days × 96 timesteps = 672 timesteps\n", - "- **Clustered data**: 4 typical days, repeated to match original structure" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "guysdaf98es", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T01:18:14.900789Z", - "start_time": "2025-12-14T01:18:13.619481Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:20.727211Z", - "iopub.status.busy": "2025-12-14T01:22:20.726087Z", - "iopub.status.idle": "2025-12-14T01:22:22.468949Z", - "shell.execute_reply": "2025-12-14T01:22:22.467805Z" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "
" - ], - "text/plain": [ - "PlotResult(data= Size: 7kB\n", - "Dimensions: (time: 168, variable: 2)\n", - "Coordinates:\n", - " * time (time) datetime64[ns] 1kB 2024-01-15 ... 2024-01-21T23:00:00\n", - " * variable (variable) object 16B 'GasGrid(Gas)|costs|per_flow_hour' 'Off...\n", - "Data variables:\n", - " original (variable, time) float64 3kB 0.05 0.05 0.05 ... 20.0 20.0 24.48\n", - " aggregated (variable, time) float64 3kB 0.05 0.05 0.05 ... 20.77 20.0 22.24, figure=Figure({\n", - " 'data': [{'hovertemplate': ('variable=Original - GasGrid(Ga' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GasGrid(Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GasGrid(Gas)|costs|per_flow_hour',\n", - " 'orientation': 'v',\n", - " 'showlegend': True,\n", - " 'type': 'scatter',\n", - " 'x': array(['2024-01-15T00:00:00.000000000', '2024-01-15T01:00:00.000000000',\n", - " '2024-01-15T02:00:00.000000000', '2024-01-15T03:00:00.000000000',\n", - " '2024-01-15T04:00:00.000000000', '2024-01-15T05:00:00.000000000',\n", - " '2024-01-15T06:00:00.000000000', '2024-01-15T07:00:00.000000000',\n", - " '2024-01-15T08:00:00.000000000', '2024-01-15T09:00:00.000000000',\n", - " '2024-01-15T10:00:00.000000000', '2024-01-15T11:00:00.000000000',\n", - " '2024-01-15T12:00:00.000000000', '2024-01-15T13:00:00.000000000',\n", - " '2024-01-15T14:00:00.000000000', '2024-01-15T15:00:00.000000000',\n", - " '2024-01-15T16:00:00.000000000', '2024-01-15T17:00:00.000000000',\n", - " '2024-01-15T18:00:00.000000000', '2024-01-15T19:00:00.000000000',\n", - " '2024-01-15T20:00:00.000000000', '2024-01-15T21:00:00.000000000',\n", - " '2024-01-15T22:00:00.000000000', '2024-01-15T23:00:00.000000000',\n", - " '2024-01-16T00:00:00.000000000', '2024-01-16T01:00:00.000000000',\n", - " '2024-01-16T02:00:00.000000000', '2024-01-16T03:00:00.000000000',\n", - " '2024-01-16T04:00:00.000000000', '2024-01-16T05:00:00.000000000',\n", - " '2024-01-16T06:00:00.000000000', '2024-01-16T07:00:00.000000000',\n", - " '2024-01-16T08:00:00.000000000', '2024-01-16T09:00:00.000000000',\n", - " '2024-01-16T10:00:00.000000000', '2024-01-16T11:00:00.000000000',\n", - " '2024-01-16T12:00:00.000000000', '2024-01-16T13:00:00.000000000',\n", - " '2024-01-16T14:00:00.000000000', '2024-01-16T15:00:00.000000000',\n", - " '2024-01-16T16:00:00.000000000', '2024-01-16T17:00:00.000000000',\n", - " '2024-01-16T18:00:00.000000000', '2024-01-16T19:00:00.000000000',\n", - " '2024-01-16T20:00:00.000000000', '2024-01-16T21:00:00.000000000',\n", - " '2024-01-16T22:00:00.000000000', '2024-01-16T23:00:00.000000000',\n", - " '2024-01-17T00:00:00.000000000', '2024-01-17T01:00:00.000000000',\n", - " '2024-01-17T02:00:00.000000000', '2024-01-17T03:00:00.000000000',\n", - " '2024-01-17T04:00:00.000000000', '2024-01-17T05:00:00.000000000',\n", - " '2024-01-17T06:00:00.000000000', '2024-01-17T07:00:00.000000000',\n", - " '2024-01-17T08:00:00.000000000', '2024-01-17T09:00:00.000000000',\n", - " '2024-01-17T10:00:00.000000000', '2024-01-17T11:00:00.000000000',\n", - " '2024-01-17T12:00:00.000000000', '2024-01-17T13:00:00.000000000',\n", - " '2024-01-17T14:00:00.000000000', '2024-01-17T15:00:00.000000000',\n", - " '2024-01-17T16:00:00.000000000', '2024-01-17T17:00:00.000000000',\n", - " '2024-01-17T18:00:00.000000000', '2024-01-17T19:00:00.000000000',\n", - " '2024-01-17T20:00:00.000000000', '2024-01-17T21:00:00.000000000',\n", - " '2024-01-17T22:00:00.000000000', '2024-01-17T23:00:00.000000000',\n", - " '2024-01-18T00:00:00.000000000', '2024-01-18T01:00:00.000000000',\n", - " '2024-01-18T02:00:00.000000000', '2024-01-18T03:00:00.000000000',\n", - " '2024-01-18T04:00:00.000000000', '2024-01-18T05:00:00.000000000',\n", - " '2024-01-18T06:00:00.000000000', '2024-01-18T07:00:00.000000000',\n", - " '2024-01-18T08:00:00.000000000', '2024-01-18T09:00:00.000000000',\n", - " '2024-01-18T10:00:00.000000000', '2024-01-18T11:00:00.000000000',\n", - " '2024-01-18T12:00:00.000000000', '2024-01-18T13:00:00.000000000',\n", - " '2024-01-18T14:00:00.000000000', '2024-01-18T15:00:00.000000000',\n", - " '2024-01-18T16:00:00.000000000', '2024-01-18T17:00:00.000000000',\n", - " '2024-01-18T18:00:00.000000000', '2024-01-18T19:00:00.000000000',\n", - " '2024-01-18T20:00:00.000000000', '2024-01-18T21:00:00.000000000',\n", - " '2024-01-18T22:00:00.000000000', '2024-01-18T23:00:00.000000000',\n", - " '2024-01-19T00:00:00.000000000', '2024-01-19T01:00:00.000000000',\n", - " '2024-01-19T02:00:00.000000000', '2024-01-19T03:00:00.000000000',\n", - " '2024-01-19T04:00:00.000000000', '2024-01-19T05:00:00.000000000',\n", - " '2024-01-19T06:00:00.000000000', '2024-01-19T07:00:00.000000000',\n", - " '2024-01-19T08:00:00.000000000', '2024-01-19T09:00:00.000000000',\n", - " '2024-01-19T10:00:00.000000000', '2024-01-19T11:00:00.000000000',\n", - " '2024-01-19T12:00:00.000000000', '2024-01-19T13:00:00.000000000',\n", - " '2024-01-19T14:00:00.000000000', '2024-01-19T15:00:00.000000000',\n", - " '2024-01-19T16:00:00.000000000', '2024-01-19T17:00:00.000000000',\n", - " '2024-01-19T18:00:00.000000000', '2024-01-19T19:00:00.000000000',\n", - " '2024-01-19T20:00:00.000000000', '2024-01-19T21:00:00.000000000',\n", - " '2024-01-19T22:00:00.000000000', '2024-01-19T23:00:00.000000000',\n", - " '2024-01-20T00:00:00.000000000', '2024-01-20T01:00:00.000000000',\n", - " '2024-01-20T02:00:00.000000000', '2024-01-20T03:00:00.000000000',\n", - " '2024-01-20T04:00:00.000000000', '2024-01-20T05:00:00.000000000',\n", - " '2024-01-20T06:00:00.000000000', '2024-01-20T07:00:00.000000000',\n", - " '2024-01-20T08:00:00.000000000', '2024-01-20T09:00:00.000000000',\n", - " '2024-01-20T10:00:00.000000000', '2024-01-20T11:00:00.000000000',\n", - " '2024-01-20T12:00:00.000000000', '2024-01-20T13:00:00.000000000',\n", - " '2024-01-20T14:00:00.000000000', '2024-01-20T15:00:00.000000000',\n", - " '2024-01-20T16:00:00.000000000', '2024-01-20T17:00:00.000000000',\n", - " '2024-01-20T18:00:00.000000000', '2024-01-20T19:00:00.000000000',\n", - " '2024-01-20T20:00:00.000000000', '2024-01-20T21:00:00.000000000',\n", - " '2024-01-20T22:00:00.000000000', '2024-01-20T23:00:00.000000000',\n", - " '2024-01-21T00:00:00.000000000', '2024-01-21T01:00:00.000000000',\n", - " '2024-01-21T02:00:00.000000000', '2024-01-21T03:00:00.000000000',\n", - " '2024-01-21T04:00:00.000000000', '2024-01-21T05:00:00.000000000',\n", - " '2024-01-21T06:00:00.000000000', '2024-01-21T07:00:00.000000000',\n", - " '2024-01-21T08:00:00.000000000', '2024-01-21T09:00:00.000000000',\n", - " '2024-01-21T10:00:00.000000000', '2024-01-21T11:00:00.000000000',\n", - " '2024-01-21T12:00:00.000000000', '2024-01-21T13:00:00.000000000',\n", - " '2024-01-21T14:00:00.000000000', '2024-01-21T15:00:00.000000000',\n", - " '2024-01-21T16:00:00.000000000', '2024-01-21T17:00:00.000000000',\n", - " '2024-01-21T18:00:00.000000000', '2024-01-21T19:00:00.000000000',\n", - " '2024-01-21T20:00:00.000000000', '2024-01-21T21:00:00.000000000',\n", - " '2024-01-21T22:00:00.000000000', '2024-01-21T23:00:00.000000000'],\n", - " dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mpmZmZmZqT+amZmZmZmpP5qZmZmZma' ... 'SuR+F6tD97FK5H4Xq0P5qZmZmZmak/'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - Office(Hea' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - Office(Heat)|fixed_relative_profile',\n", - " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - Office(Heat)|fixed_relative_profile',\n", - " 'orientation': 'v',\n", - " 'showlegend': True,\n", - " 'type': 'scatter',\n", - " 'x': array(['2024-01-15T00:00:00.000000000', '2024-01-15T01:00:00.000000000',\n", - " '2024-01-15T02:00:00.000000000', '2024-01-15T03:00:00.000000000',\n", - " '2024-01-15T04:00:00.000000000', '2024-01-15T05:00:00.000000000',\n", - " '2024-01-15T06:00:00.000000000', '2024-01-15T07:00:00.000000000',\n", - " '2024-01-15T08:00:00.000000000', '2024-01-15T09:00:00.000000000',\n", - " '2024-01-15T10:00:00.000000000', '2024-01-15T11:00:00.000000000',\n", - " '2024-01-15T12:00:00.000000000', '2024-01-15T13:00:00.000000000',\n", - " '2024-01-15T14:00:00.000000000', '2024-01-15T15:00:00.000000000',\n", - " '2024-01-15T16:00:00.000000000', '2024-01-15T17:00:00.000000000',\n", - " '2024-01-15T18:00:00.000000000', '2024-01-15T19:00:00.000000000',\n", - " '2024-01-15T20:00:00.000000000', '2024-01-15T21:00:00.000000000',\n", - " '2024-01-15T22:00:00.000000000', '2024-01-15T23:00:00.000000000',\n", - " '2024-01-16T00:00:00.000000000', '2024-01-16T01:00:00.000000000',\n", - " '2024-01-16T02:00:00.000000000', '2024-01-16T03:00:00.000000000',\n", - " '2024-01-16T04:00:00.000000000', '2024-01-16T05:00:00.000000000',\n", - " '2024-01-16T06:00:00.000000000', '2024-01-16T07:00:00.000000000',\n", - " '2024-01-16T08:00:00.000000000', '2024-01-16T09:00:00.000000000',\n", - " '2024-01-16T10:00:00.000000000', '2024-01-16T11:00:00.000000000',\n", - " '2024-01-16T12:00:00.000000000', '2024-01-16T13:00:00.000000000',\n", - " '2024-01-16T14:00:00.000000000', '2024-01-16T15:00:00.000000000',\n", - " '2024-01-16T16:00:00.000000000', '2024-01-16T17:00:00.000000000',\n", - " '2024-01-16T18:00:00.000000000', '2024-01-16T19:00:00.000000000',\n", - " '2024-01-16T20:00:00.000000000', '2024-01-16T21:00:00.000000000',\n", - " '2024-01-16T22:00:00.000000000', '2024-01-16T23:00:00.000000000',\n", - " '2024-01-17T00:00:00.000000000', '2024-01-17T01:00:00.000000000',\n", - " '2024-01-17T02:00:00.000000000', '2024-01-17T03:00:00.000000000',\n", - " '2024-01-17T04:00:00.000000000', '2024-01-17T05:00:00.000000000',\n", - " '2024-01-17T06:00:00.000000000', '2024-01-17T07:00:00.000000000',\n", - " '2024-01-17T08:00:00.000000000', '2024-01-17T09:00:00.000000000',\n", - " '2024-01-17T10:00:00.000000000', '2024-01-17T11:00:00.000000000',\n", - " '2024-01-17T12:00:00.000000000', '2024-01-17T13:00:00.000000000',\n", - " '2024-01-17T14:00:00.000000000', '2024-01-17T15:00:00.000000000',\n", - " '2024-01-17T16:00:00.000000000', '2024-01-17T17:00:00.000000000',\n", - " '2024-01-17T18:00:00.000000000', '2024-01-17T19:00:00.000000000',\n", - " '2024-01-17T20:00:00.000000000', '2024-01-17T21:00:00.000000000',\n", - " '2024-01-17T22:00:00.000000000', '2024-01-17T23:00:00.000000000',\n", - " '2024-01-18T00:00:00.000000000', '2024-01-18T01:00:00.000000000',\n", - " '2024-01-18T02:00:00.000000000', '2024-01-18T03:00:00.000000000',\n", - " '2024-01-18T04:00:00.000000000', '2024-01-18T05:00:00.000000000',\n", - " '2024-01-18T06:00:00.000000000', '2024-01-18T07:00:00.000000000',\n", - " '2024-01-18T08:00:00.000000000', '2024-01-18T09:00:00.000000000',\n", - " '2024-01-18T10:00:00.000000000', '2024-01-18T11:00:00.000000000',\n", - " '2024-01-18T12:00:00.000000000', '2024-01-18T13:00:00.000000000',\n", - " '2024-01-18T14:00:00.000000000', '2024-01-18T15:00:00.000000000',\n", - " '2024-01-18T16:00:00.000000000', '2024-01-18T17:00:00.000000000',\n", - " '2024-01-18T18:00:00.000000000', '2024-01-18T19:00:00.000000000',\n", - " '2024-01-18T20:00:00.000000000', '2024-01-18T21:00:00.000000000',\n", - " '2024-01-18T22:00:00.000000000', '2024-01-18T23:00:00.000000000',\n", - " '2024-01-19T00:00:00.000000000', '2024-01-19T01:00:00.000000000',\n", - " '2024-01-19T02:00:00.000000000', '2024-01-19T03:00:00.000000000',\n", - " '2024-01-19T04:00:00.000000000', '2024-01-19T05:00:00.000000000',\n", - " '2024-01-19T06:00:00.000000000', '2024-01-19T07:00:00.000000000',\n", - " '2024-01-19T08:00:00.000000000', '2024-01-19T09:00:00.000000000',\n", - " '2024-01-19T10:00:00.000000000', '2024-01-19T11:00:00.000000000',\n", - " '2024-01-19T12:00:00.000000000', '2024-01-19T13:00:00.000000000',\n", - " '2024-01-19T14:00:00.000000000', '2024-01-19T15:00:00.000000000',\n", - " '2024-01-19T16:00:00.000000000', '2024-01-19T17:00:00.000000000',\n", - " '2024-01-19T18:00:00.000000000', '2024-01-19T19:00:00.000000000',\n", - " '2024-01-19T20:00:00.000000000', '2024-01-19T21:00:00.000000000',\n", - " '2024-01-19T22:00:00.000000000', '2024-01-19T23:00:00.000000000',\n", - " '2024-01-20T00:00:00.000000000', '2024-01-20T01:00:00.000000000',\n", - " '2024-01-20T02:00:00.000000000', '2024-01-20T03:00:00.000000000',\n", - " '2024-01-20T04:00:00.000000000', '2024-01-20T05:00:00.000000000',\n", - " '2024-01-20T06:00:00.000000000', '2024-01-20T07:00:00.000000000',\n", - " '2024-01-20T08:00:00.000000000', '2024-01-20T09:00:00.000000000',\n", - " '2024-01-20T10:00:00.000000000', '2024-01-20T11:00:00.000000000',\n", - " '2024-01-20T12:00:00.000000000', '2024-01-20T13:00:00.000000000',\n", - " '2024-01-20T14:00:00.000000000', '2024-01-20T15:00:00.000000000',\n", - " '2024-01-20T16:00:00.000000000', '2024-01-20T17:00:00.000000000',\n", - " '2024-01-20T18:00:00.000000000', '2024-01-20T19:00:00.000000000',\n", - " '2024-01-20T20:00:00.000000000', '2024-01-20T21:00:00.000000000',\n", - " '2024-01-20T22:00:00.000000000', '2024-01-20T23:00:00.000000000',\n", - " '2024-01-21T00:00:00.000000000', '2024-01-21T01:00:00.000000000',\n", - " '2024-01-21T02:00:00.000000000', '2024-01-21T03:00:00.000000000',\n", - " '2024-01-21T04:00:00.000000000', '2024-01-21T05:00:00.000000000',\n", - " '2024-01-21T06:00:00.000000000', '2024-01-21T07:00:00.000000000',\n", - " '2024-01-21T08:00:00.000000000', '2024-01-21T09:00:00.000000000',\n", - " '2024-01-21T10:00:00.000000000', '2024-01-21T11:00:00.000000000',\n", - " '2024-01-21T12:00:00.000000000', '2024-01-21T13:00:00.000000000',\n", - " '2024-01-21T14:00:00.000000000', '2024-01-21T15:00:00.000000000',\n", - " '2024-01-21T16:00:00.000000000', '2024-01-21T17:00:00.000000000',\n", - " '2024-01-21T18:00:00.000000000', '2024-01-21T19:00:00.000000000',\n", - " '2024-01-21T20:00:00.000000000', '2024-01-21T21:00:00.000000000',\n", - " '2024-01-21T22:00:00.000000000', '2024-01-21T23:00:00.000000000'],\n", - " dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('5ZuWpeU9QEDMU8WNBU89QGDXQkqFnk' ... 'AAAAAANEAAAAAAAAA0QK7n4h/lezhA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GasGrid(Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GasGrid(Gas)|costs|per_flow_hour',\n", - " 'orientation': 'v',\n", - " 'showlegend': True,\n", - " 'type': 'scatter',\n", - " 'x': array(['2024-01-15T00:00:00.000000000', '2024-01-15T01:00:00.000000000',\n", - " '2024-01-15T02:00:00.000000000', '2024-01-15T03:00:00.000000000',\n", - " '2024-01-15T04:00:00.000000000', '2024-01-15T05:00:00.000000000',\n", - " '2024-01-15T06:00:00.000000000', '2024-01-15T07:00:00.000000000',\n", - " '2024-01-15T08:00:00.000000000', '2024-01-15T09:00:00.000000000',\n", - " '2024-01-15T10:00:00.000000000', '2024-01-15T11:00:00.000000000',\n", - " '2024-01-15T12:00:00.000000000', '2024-01-15T13:00:00.000000000',\n", - " '2024-01-15T14:00:00.000000000', '2024-01-15T15:00:00.000000000',\n", - " '2024-01-15T16:00:00.000000000', '2024-01-15T17:00:00.000000000',\n", - " '2024-01-15T18:00:00.000000000', '2024-01-15T19:00:00.000000000',\n", - " '2024-01-15T20:00:00.000000000', '2024-01-15T21:00:00.000000000',\n", - " '2024-01-15T22:00:00.000000000', '2024-01-15T23:00:00.000000000',\n", - " '2024-01-16T00:00:00.000000000', '2024-01-16T01:00:00.000000000',\n", - " '2024-01-16T02:00:00.000000000', '2024-01-16T03:00:00.000000000',\n", - " '2024-01-16T04:00:00.000000000', '2024-01-16T05:00:00.000000000',\n", - " '2024-01-16T06:00:00.000000000', '2024-01-16T07:00:00.000000000',\n", - " '2024-01-16T08:00:00.000000000', '2024-01-16T09:00:00.000000000',\n", - " '2024-01-16T10:00:00.000000000', '2024-01-16T11:00:00.000000000',\n", - " '2024-01-16T12:00:00.000000000', '2024-01-16T13:00:00.000000000',\n", - " '2024-01-16T14:00:00.000000000', '2024-01-16T15:00:00.000000000',\n", - " '2024-01-16T16:00:00.000000000', '2024-01-16T17:00:00.000000000',\n", - " '2024-01-16T18:00:00.000000000', '2024-01-16T19:00:00.000000000',\n", - " '2024-01-16T20:00:00.000000000', '2024-01-16T21:00:00.000000000',\n", - " '2024-01-16T22:00:00.000000000', '2024-01-16T23:00:00.000000000',\n", - " '2024-01-17T00:00:00.000000000', '2024-01-17T01:00:00.000000000',\n", - " '2024-01-17T02:00:00.000000000', '2024-01-17T03:00:00.000000000',\n", - " '2024-01-17T04:00:00.000000000', '2024-01-17T05:00:00.000000000',\n", - " '2024-01-17T06:00:00.000000000', '2024-01-17T07:00:00.000000000',\n", - " '2024-01-17T08:00:00.000000000', '2024-01-17T09:00:00.000000000',\n", - " '2024-01-17T10:00:00.000000000', '2024-01-17T11:00:00.000000000',\n", - " '2024-01-17T12:00:00.000000000', '2024-01-17T13:00:00.000000000',\n", - " '2024-01-17T14:00:00.000000000', '2024-01-17T15:00:00.000000000',\n", - " '2024-01-17T16:00:00.000000000', '2024-01-17T17:00:00.000000000',\n", - " '2024-01-17T18:00:00.000000000', '2024-01-17T19:00:00.000000000',\n", - " '2024-01-17T20:00:00.000000000', '2024-01-17T21:00:00.000000000',\n", - " '2024-01-17T22:00:00.000000000', '2024-01-17T23:00:00.000000000',\n", - " '2024-01-18T00:00:00.000000000', '2024-01-18T01:00:00.000000000',\n", - " '2024-01-18T02:00:00.000000000', '2024-01-18T03:00:00.000000000',\n", - " '2024-01-18T04:00:00.000000000', '2024-01-18T05:00:00.000000000',\n", - " '2024-01-18T06:00:00.000000000', '2024-01-18T07:00:00.000000000',\n", - " '2024-01-18T08:00:00.000000000', '2024-01-18T09:00:00.000000000',\n", - " '2024-01-18T10:00:00.000000000', '2024-01-18T11:00:00.000000000',\n", - " '2024-01-18T12:00:00.000000000', '2024-01-18T13:00:00.000000000',\n", - " '2024-01-18T14:00:00.000000000', '2024-01-18T15:00:00.000000000',\n", - " '2024-01-18T16:00:00.000000000', '2024-01-18T17:00:00.000000000',\n", - " '2024-01-18T18:00:00.000000000', '2024-01-18T19:00:00.000000000',\n", - " '2024-01-18T20:00:00.000000000', '2024-01-18T21:00:00.000000000',\n", - " '2024-01-18T22:00:00.000000000', '2024-01-18T23:00:00.000000000',\n", - " '2024-01-19T00:00:00.000000000', '2024-01-19T01:00:00.000000000',\n", - " '2024-01-19T02:00:00.000000000', '2024-01-19T03:00:00.000000000',\n", - " '2024-01-19T04:00:00.000000000', '2024-01-19T05:00:00.000000000',\n", - " '2024-01-19T06:00:00.000000000', '2024-01-19T07:00:00.000000000',\n", - " '2024-01-19T08:00:00.000000000', '2024-01-19T09:00:00.000000000',\n", - " '2024-01-19T10:00:00.000000000', '2024-01-19T11:00:00.000000000',\n", - " '2024-01-19T12:00:00.000000000', '2024-01-19T13:00:00.000000000',\n", - " '2024-01-19T14:00:00.000000000', '2024-01-19T15:00:00.000000000',\n", - " '2024-01-19T16:00:00.000000000', '2024-01-19T17:00:00.000000000',\n", - " '2024-01-19T18:00:00.000000000', '2024-01-19T19:00:00.000000000',\n", - " '2024-01-19T20:00:00.000000000', '2024-01-19T21:00:00.000000000',\n", - " '2024-01-19T22:00:00.000000000', '2024-01-19T23:00:00.000000000',\n", - " '2024-01-20T00:00:00.000000000', '2024-01-20T01:00:00.000000000',\n", - " '2024-01-20T02:00:00.000000000', '2024-01-20T03:00:00.000000000',\n", - " '2024-01-20T04:00:00.000000000', '2024-01-20T05:00:00.000000000',\n", - " '2024-01-20T06:00:00.000000000', '2024-01-20T07:00:00.000000000',\n", - " '2024-01-20T08:00:00.000000000', '2024-01-20T09:00:00.000000000',\n", - " '2024-01-20T10:00:00.000000000', '2024-01-20T11:00:00.000000000',\n", - " '2024-01-20T12:00:00.000000000', '2024-01-20T13:00:00.000000000',\n", - " '2024-01-20T14:00:00.000000000', '2024-01-20T15:00:00.000000000',\n", - " '2024-01-20T16:00:00.000000000', '2024-01-20T17:00:00.000000000',\n", - " '2024-01-20T18:00:00.000000000', '2024-01-20T19:00:00.000000000',\n", - " '2024-01-20T20:00:00.000000000', '2024-01-20T21:00:00.000000000',\n", - " '2024-01-20T22:00:00.000000000', '2024-01-20T23:00:00.000000000',\n", - " '2024-01-21T00:00:00.000000000', '2024-01-21T01:00:00.000000000',\n", - " '2024-01-21T02:00:00.000000000', '2024-01-21T03:00:00.000000000',\n", - " '2024-01-21T04:00:00.000000000', '2024-01-21T05:00:00.000000000',\n", - " '2024-01-21T06:00:00.000000000', '2024-01-21T07:00:00.000000000',\n", - " '2024-01-21T08:00:00.000000000', '2024-01-21T09:00:00.000000000',\n", - " '2024-01-21T10:00:00.000000000', '2024-01-21T11:00:00.000000000',\n", - " '2024-01-21T12:00:00.000000000', '2024-01-21T13:00:00.000000000',\n", - " '2024-01-21T14:00:00.000000000', '2024-01-21T15:00:00.000000000',\n", - " '2024-01-21T16:00:00.000000000', '2024-01-21T17:00:00.000000000',\n", - " '2024-01-21T18:00:00.000000000', '2024-01-21T19:00:00.000000000',\n", - " '2024-01-21T20:00:00.000000000', '2024-01-21T21:00:00.000000000',\n", - " '2024-01-21T22:00:00.000000000', '2024-01-21T23:00:00.000000000'],\n", - " dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mpmZmZmZqT+amZmZmZmpP5qZmZmZma' ... 'SuR+F6tD97FK5H4Xq0P5qZmZmZmak/'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - Office(H' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - Office(Heat)|fixed_relative_profile',\n", - " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - Office(Heat)|fixed_relative_profile',\n", - " 'orientation': 'v',\n", - " 'showlegend': True,\n", - " 'type': 'scatter',\n", - " 'x': array(['2024-01-15T00:00:00.000000000', '2024-01-15T01:00:00.000000000',\n", - " '2024-01-15T02:00:00.000000000', '2024-01-15T03:00:00.000000000',\n", - " '2024-01-15T04:00:00.000000000', '2024-01-15T05:00:00.000000000',\n", - " '2024-01-15T06:00:00.000000000', '2024-01-15T07:00:00.000000000',\n", - " '2024-01-15T08:00:00.000000000', '2024-01-15T09:00:00.000000000',\n", - " '2024-01-15T10:00:00.000000000', '2024-01-15T11:00:00.000000000',\n", - " '2024-01-15T12:00:00.000000000', '2024-01-15T13:00:00.000000000',\n", - " '2024-01-15T14:00:00.000000000', '2024-01-15T15:00:00.000000000',\n", - " '2024-01-15T16:00:00.000000000', '2024-01-15T17:00:00.000000000',\n", - " '2024-01-15T18:00:00.000000000', '2024-01-15T19:00:00.000000000',\n", - " '2024-01-15T20:00:00.000000000', '2024-01-15T21:00:00.000000000',\n", - " '2024-01-15T22:00:00.000000000', '2024-01-15T23:00:00.000000000',\n", - " '2024-01-16T00:00:00.000000000', '2024-01-16T01:00:00.000000000',\n", - " '2024-01-16T02:00:00.000000000', '2024-01-16T03:00:00.000000000',\n", - " '2024-01-16T04:00:00.000000000', '2024-01-16T05:00:00.000000000',\n", - " '2024-01-16T06:00:00.000000000', '2024-01-16T07:00:00.000000000',\n", - " '2024-01-16T08:00:00.000000000', '2024-01-16T09:00:00.000000000',\n", - " '2024-01-16T10:00:00.000000000', '2024-01-16T11:00:00.000000000',\n", - " '2024-01-16T12:00:00.000000000', '2024-01-16T13:00:00.000000000',\n", - " '2024-01-16T14:00:00.000000000', '2024-01-16T15:00:00.000000000',\n", - " '2024-01-16T16:00:00.000000000', '2024-01-16T17:00:00.000000000',\n", - " '2024-01-16T18:00:00.000000000', '2024-01-16T19:00:00.000000000',\n", - " '2024-01-16T20:00:00.000000000', '2024-01-16T21:00:00.000000000',\n", - " '2024-01-16T22:00:00.000000000', '2024-01-16T23:00:00.000000000',\n", - " '2024-01-17T00:00:00.000000000', '2024-01-17T01:00:00.000000000',\n", - " '2024-01-17T02:00:00.000000000', '2024-01-17T03:00:00.000000000',\n", - " '2024-01-17T04:00:00.000000000', '2024-01-17T05:00:00.000000000',\n", - " '2024-01-17T06:00:00.000000000', '2024-01-17T07:00:00.000000000',\n", - " '2024-01-17T08:00:00.000000000', '2024-01-17T09:00:00.000000000',\n", - " '2024-01-17T10:00:00.000000000', '2024-01-17T11:00:00.000000000',\n", - " '2024-01-17T12:00:00.000000000', '2024-01-17T13:00:00.000000000',\n", - " '2024-01-17T14:00:00.000000000', '2024-01-17T15:00:00.000000000',\n", - " '2024-01-17T16:00:00.000000000', '2024-01-17T17:00:00.000000000',\n", - " '2024-01-17T18:00:00.000000000', '2024-01-17T19:00:00.000000000',\n", - " '2024-01-17T20:00:00.000000000', '2024-01-17T21:00:00.000000000',\n", - " '2024-01-17T22:00:00.000000000', '2024-01-17T23:00:00.000000000',\n", - " '2024-01-18T00:00:00.000000000', '2024-01-18T01:00:00.000000000',\n", - " '2024-01-18T02:00:00.000000000', '2024-01-18T03:00:00.000000000',\n", - " '2024-01-18T04:00:00.000000000', '2024-01-18T05:00:00.000000000',\n", - " '2024-01-18T06:00:00.000000000', '2024-01-18T07:00:00.000000000',\n", - " '2024-01-18T08:00:00.000000000', '2024-01-18T09:00:00.000000000',\n", - " '2024-01-18T10:00:00.000000000', '2024-01-18T11:00:00.000000000',\n", - " '2024-01-18T12:00:00.000000000', '2024-01-18T13:00:00.000000000',\n", - " '2024-01-18T14:00:00.000000000', '2024-01-18T15:00:00.000000000',\n", - " '2024-01-18T16:00:00.000000000', '2024-01-18T17:00:00.000000000',\n", - " '2024-01-18T18:00:00.000000000', '2024-01-18T19:00:00.000000000',\n", - " '2024-01-18T20:00:00.000000000', '2024-01-18T21:00:00.000000000',\n", - " '2024-01-18T22:00:00.000000000', '2024-01-18T23:00:00.000000000',\n", - " '2024-01-19T00:00:00.000000000', '2024-01-19T01:00:00.000000000',\n", - " '2024-01-19T02:00:00.000000000', '2024-01-19T03:00:00.000000000',\n", - " '2024-01-19T04:00:00.000000000', '2024-01-19T05:00:00.000000000',\n", - " '2024-01-19T06:00:00.000000000', '2024-01-19T07:00:00.000000000',\n", - " '2024-01-19T08:00:00.000000000', '2024-01-19T09:00:00.000000000',\n", - " '2024-01-19T10:00:00.000000000', '2024-01-19T11:00:00.000000000',\n", - " '2024-01-19T12:00:00.000000000', '2024-01-19T13:00:00.000000000',\n", - " '2024-01-19T14:00:00.000000000', '2024-01-19T15:00:00.000000000',\n", - " '2024-01-19T16:00:00.000000000', '2024-01-19T17:00:00.000000000',\n", - " '2024-01-19T18:00:00.000000000', '2024-01-19T19:00:00.000000000',\n", - " '2024-01-19T20:00:00.000000000', '2024-01-19T21:00:00.000000000',\n", - " '2024-01-19T22:00:00.000000000', '2024-01-19T23:00:00.000000000',\n", - " '2024-01-20T00:00:00.000000000', '2024-01-20T01:00:00.000000000',\n", - " '2024-01-20T02:00:00.000000000', '2024-01-20T03:00:00.000000000',\n", - " '2024-01-20T04:00:00.000000000', '2024-01-20T05:00:00.000000000',\n", - " '2024-01-20T06:00:00.000000000', '2024-01-20T07:00:00.000000000',\n", - " '2024-01-20T08:00:00.000000000', '2024-01-20T09:00:00.000000000',\n", - " '2024-01-20T10:00:00.000000000', '2024-01-20T11:00:00.000000000',\n", - " '2024-01-20T12:00:00.000000000', '2024-01-20T13:00:00.000000000',\n", - " '2024-01-20T14:00:00.000000000', '2024-01-20T15:00:00.000000000',\n", - " '2024-01-20T16:00:00.000000000', '2024-01-20T17:00:00.000000000',\n", - " '2024-01-20T18:00:00.000000000', '2024-01-20T19:00:00.000000000',\n", - " '2024-01-20T20:00:00.000000000', '2024-01-20T21:00:00.000000000',\n", - " '2024-01-20T22:00:00.000000000', '2024-01-20T23:00:00.000000000',\n", - " '2024-01-21T00:00:00.000000000', '2024-01-21T01:00:00.000000000',\n", - " '2024-01-21T02:00:00.000000000', '2024-01-21T03:00:00.000000000',\n", - " '2024-01-21T04:00:00.000000000', '2024-01-21T05:00:00.000000000',\n", - " '2024-01-21T06:00:00.000000000', '2024-01-21T07:00:00.000000000',\n", - " '2024-01-21T08:00:00.000000000', '2024-01-21T09:00:00.000000000',\n", - " '2024-01-21T10:00:00.000000000', '2024-01-21T11:00:00.000000000',\n", - " '2024-01-21T12:00:00.000000000', '2024-01-21T13:00:00.000000000',\n", - " '2024-01-21T14:00:00.000000000', '2024-01-21T15:00:00.000000000',\n", - " '2024-01-21T16:00:00.000000000', '2024-01-21T17:00:00.000000000',\n", - " '2024-01-21T18:00:00.000000000', '2024-01-21T19:00:00.000000000',\n", - " '2024-01-21T20:00:00.000000000', '2024-01-21T21:00:00.000000000',\n", - " '2024-01-21T22:00:00.000000000', '2024-01-21T23:00:00.000000000'],\n", - " dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('5ZuWpeU9QEDMU8WNBU89QGDXQkqFnk' ... '3URJLENEAAAAAAAAA0QNdz8Y/yPTZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'}],\n", - " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", - " 'margin': {'t': 60},\n", - " 'template': '...',\n", - " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", - " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", - " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", - "}))" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# Cluster with 4 typical days\n", - "fs_clustered_demo = flow_system.transform.cluster(n_clusters=4, cluster_duration='1D')\n", + "# Load time series data (15-min resolution)\n", + "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", + "data = data['2020-01-01':'2020-01-31 23:45:00'] # One month\n", + "data.index.name = 'time'\n", "\n", - "# Get the clustering object to access tsam results\n", - "clustering_info = fs_clustered_demo._clustering_info\n", - "clustering = clustering_info['clustering']\n", + "timesteps = data.index\n", "\n", - "# Plot original vs aggregated data\n", - "clustering.plot()" - ] - }, - { - "cell_type": "markdown", - "id": "coxd0duq3nb", - "metadata": {}, - "source": [ - "## Comparing Different Clustering Parameters\n", + "# Extract profiles\n", + "electricity_demand = data['P_Netz/MW'].to_numpy()\n", + "heat_demand = data['Q_Netz/MW'].to_numpy()\n", + "electricity_price = data['Strompr.€/MWh'].to_numpy()\n", + "gas_price = data['Gaspr.€/MWh'].to_numpy()\n", "\n", - "Let's see how different numbers of clusters affect the data representation:" + "print(f'Timesteps: {len(timesteps)} ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Heat demand: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')\n", + "print(f'Electricity price: {electricity_price.min():.1f} - {electricity_price.max():.1f} €/MWh')" ] }, { "cell_type": "code", - "execution_count": 4, - "id": "q2xt2juvyo", + "execution_count": 13, + "id": "5", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:18:17.555198Z", - "start_time": "2025-12-14T01:18:15.217343Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:22.480243Z", - "iopub.status.busy": "2025-12-14T01:22:22.480054Z", - "iopub.status.idle": "2025-12-14T01:22:23.659162Z", - "shell.execute_reply": "2025-12-14T01:22:23.657338Z" + "end_time": "2025-12-14T14:35:43.057128Z", + "start_time": "2025-12-14T14:35:42.948041Z" } }, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Comparing: GasGrid(Gas)|costs|per_flow_hour\n" - ] - }, { "data": { "text/html": [ @@ -711,7 +228,7 @@ "\n", "`).concat($R(e),`\n", "`));var s=new U_({actual:e,expected:t,message:r,operator:i,stackStartFn:n});throw s.generatedMessage=o,s}}Ef.match=function e(t,r,n){T4e(t,r,n,e,\"match\")};Ef.doesNotMatch=function e(t,r,n){T4e(t,r,n,e,\"doesNotMatch\")};function A4e(){for(var e=arguments.length,t=new Array(e),r=0;r{var xE=1e3,bE=xE*60,wE=bE*60,TE=wE*24,FEt=TE*365.25;M4e.exports=function(e,t){t=t||{};var r=typeof e;if(r===\"string\"&&e.length>0)return zEt(e);if(r===\"number\"&&isNaN(e)===!1)return t.long?qEt(e):OEt(e);throw new Error(\"val is not a non-empty string or a valid number. val=\"+JSON.stringify(e))};function zEt(e){if(e=String(e),!(e.length>100)){var t=/^((?:\\d+)?\\.?\\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|years?|yrs?|y)?$/i.exec(e);if(t){var r=parseFloat(t[1]),n=(t[2]||\"ms\").toLowerCase();switch(n){case\"years\":case\"year\":case\"yrs\":case\"yr\":case\"y\":return r*FEt;case\"days\":case\"day\":case\"d\":return r*TE;case\"hours\":case\"hour\":case\"hrs\":case\"hr\":case\"h\":return r*wE;case\"minutes\":case\"minute\":case\"mins\":case\"min\":case\"m\":return r*bE;case\"seconds\":case\"second\":case\"secs\":case\"sec\":case\"s\":return r*xE;case\"milliseconds\":case\"millisecond\":case\"msecs\":case\"msec\":case\"ms\":return r;default:return}}}}function OEt(e){return e>=TE?Math.round(e/TE)+\"d\":e>=wE?Math.round(e/wE)+\"h\":e>=bE?Math.round(e/bE)+\"m\":e>=xE?Math.round(e/xE)+\"s\":e+\"ms\"}function qEt(e){return iD(e,TE,\"day\")||iD(e,wE,\"hour\")||iD(e,bE,\"minute\")||iD(e,xE,\"second\")||e+\" ms\"}function iD(e,t,r){if(!(e{Lc=k4e.exports=nW.debug=nW.default=nW;Lc.coerce=GEt;Lc.disable=UEt;Lc.enable=NEt;Lc.enabled=VEt;Lc.humanize=E4e();Lc.names=[];Lc.skips=[];Lc.formatters={};var iW;function BEt(e){var t=0,r;for(r in e)t=(t<<5)-t+e.charCodeAt(r),t|=0;return Lc.colors[Math.abs(t)%Lc.colors.length]}function nW(e){function t(){if(t.enabled){var r=t,n=+new Date,i=n-(iW||n);r.diff=i,r.prev=iW,r.curr=n,iW=n;for(var a=new Array(arguments.length),o=0;o{lp=P4e.exports=C4e();lp.log=WEt;lp.formatArgs=jEt;lp.save=XEt;lp.load=L4e;lp.useColors=HEt;lp.storage=typeof chrome!=\"undefined\"&&typeof chrome.storage!=\"undefined\"?chrome.storage.local:ZEt();lp.colors=[\"lightseagreen\",\"forestgreen\",\"goldenrod\",\"dodgerblue\",\"darkorchid\",\"crimson\"];function HEt(){return typeof window!=\"undefined\"&&window.process&&window.process.type===\"renderer\"?!0:typeof document!=\"undefined\"&&document.documentElement&&document.documentElement.style&&document.documentElement.style.WebkitAppearance||typeof window!=\"undefined\"&&window.console&&(window.console.firebug||window.console.exception&&window.console.table)||typeof navigator!=\"undefined\"&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/firefox\\/(\\d+)/)&&parseInt(RegExp.$1,10)>=31||typeof navigator!=\"undefined\"&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/applewebkit\\/(\\d+)/)}lp.formatters.j=function(e){try{return JSON.stringify(e)}catch(t){return\"[UnexpectedJSONParseError]: \"+t.message}};function jEt(e){var t=this.useColors;if(e[0]=(t?\"%c\":\"\")+this.namespace+(t?\" %c\":\" \")+e[0]+(t?\"%c \":\" \")+\"+\"+lp.humanize(this.diff),!!t){var r=\"color: \"+this.color;e.splice(1,0,r,\"color: inherit\");var n=0,i=0;e[0].replace(/%[a-zA-Z%]/g,function(a){a!==\"%%\"&&(n++,a===\"%c\"&&(i=n))}),e.splice(i,0,r)}}function WEt(){return typeof console==\"object\"&&console.log&&Function.prototype.apply.call(console.log,console,arguments)}function XEt(e){try{e==null?lp.storage.removeItem(\"debug\"):lp.storage.debug=e}catch(t){}}function L4e(){var e;try{e=lp.storage.debug}catch(t){}return!e&&typeof process!=\"undefined\"&&\"env\"in process&&(e=process.env.DEBUG),e}lp.enable(L4e());function ZEt(){try{return window.localStorage}catch(e){}}});var N4e=ye((_dr,B4e)=>{var _A=sE(),V_=I4e()(\"stream-parser\");B4e.exports=KEt;var D4e=-1,nD=0,YEt=1,F4e=2;function KEt(e){var t=e&&typeof e._transform==\"function\",r=e&&typeof e._write==\"function\";if(!t&&!r)throw new Error(\"must pass a Writable or Transform stream in\");V_(\"extending Parser into stream\"),e._bytes=JEt,e._skipBytes=$Et,t&&(e._passthrough=QEt),t?e._transform=tkt:e._write=ekt}function AE(e){V_(\"initializing parser stream\"),e._parserBytesLeft=0,e._parserBuffers=[],e._parserBuffered=0,e._parserState=D4e,e._parserCallback=null,typeof e.push==\"function\"&&(e._parserOutput=e.push.bind(e)),e._parserInit=!0}function JEt(e,t){_A(!this._parserCallback,'there is already a \"callback\" set!'),_A(isFinite(e)&&e>0,'can only buffer a finite number of bytes > 0, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"buffering %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=nD}function $Et(e,t){_A(!this._parserCallback,'there is already a \"callback\" set!'),_A(e>0,'can only skip > 0 bytes, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"skipping %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=YEt}function QEt(e,t){_A(!this._parserCallback,'There is already a \"callback\" set!'),_A(e>0,'can only pass through > 0 bytes, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"passing through %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=F4e}function ekt(e,t,r){this._parserInit||AE(this),V_(\"write(%o bytes)\",e.length),typeof t==\"function\"&&(r=t),O4e(this,e,null,r)}function tkt(e,t,r){this._parserInit||AE(this),V_(\"transform(%o bytes)\",e.length),typeof t!=\"function\"&&(t=this._parserOutput),O4e(this,e,t,r)}function z4e(e,t,r,n){return e._parserBytesLeft<=0?n(new Error(\"got data but not currently parsing anything\")):t.length<=e._parserBytesLeft?function(){return R4e(e,t,r,n)}:function(){var i=t.slice(0,e._parserBytesLeft);return R4e(e,i,r,function(a){if(a)return n(a);if(t.length>i.length)return function(){return z4e(e,t.slice(i.length),r,n)}})}}function R4e(e,t,r,n){if(e._parserBytesLeft-=t.length,V_(\"%o bytes left for stream piece\",e._parserBytesLeft),e._parserState===nD?(e._parserBuffers.push(t),e._parserBuffered+=t.length):e._parserState===F4e&&r(t),e._parserBytesLeft===0){var i=e._parserCallback;if(i&&e._parserState===nD&&e._parserBuffers.length>1&&(t=Buffer.concat(e._parserBuffers,e._parserBuffered)),e._parserState!==nD&&(t=null),e._parserCallback=null,e._parserBuffered=0,e._parserState=D4e,e._parserBuffers.splice(0),i){var a=[];t&&a.push(t),r&&a.push(r);var o=i.length>a.length;o&&a.push(q4e(n));var s=i.apply(e,a);if(!o||n===s)return n}}else return n}var O4e=q4e(z4e);function q4e(e){return function(){for(var t=e.apply(this,arguments);typeof t==\"function\";)t=t();return t}}});var rc=ye(Hy=>{\"use strict\";var U4e=RSe().Transform,rkt=N4e();function SE(){U4e.call(this,{readableObjectMode:!0})}SE.prototype=Object.create(U4e.prototype);SE.prototype.constructor=SE;rkt(SE.prototype);Hy.ParserStream=SE;Hy.sliceEq=function(e,t,r){for(var n=t,i=0;i{\"use strict\";var xA=rc().readUInt16BE,oW=rc().readUInt32BE;function ME(e,t){if(e.length<4+t)return null;var r=oW(e,t);return e.length>4&15,n=e[4]&15,i=e[5]>>4&15,a=xA(e,6),o=8,s=0;sa.width||i.width===a.width&&i.height>a.height?i:a}),r=e.reduce(function(i,a){return i.height>a.height||i.height===a.height&&i.width>a.width?i:a}),n;return t.width>r.height||t.width===r.height&&t.height>r.width?n=t:n=r,n}oD.exports.readSizeFromMeta=function(e){var t={sizes:[],transforms:[],item_inf:{},item_loc:{}};if(skt(e,t),!!t.sizes.length){var r=lkt(t.sizes),n=1;t.transforms.forEach(function(a){var o={1:6,2:5,3:8,4:7,5:4,6:3,7:2,8:1},s={1:4,2:3,3:2,4:1,5:6,6:5,7:8,8:7};if(a.type===\"imir\"&&(a.value===0?n=s[n]:(n=s[n],n=o[n],n=o[n])),a.type===\"irot\")for(var l=0;l{\"use strict\";function sD(e,t){var r=new Error(e);return r.code=t,r}function ukt(e){try{return decodeURIComponent(escape(e))}catch(t){return e}}function jy(e,t,r){this.input=e.subarray(t,r),this.start=t;var n=String.fromCharCode.apply(null,this.input.subarray(0,4));if(n!==\"II*\\0\"&&n!==\"MM\\0*\")throw sD(\"invalid TIFF signature\",\"EBADDATA\");this.big_endian=n[0]===\"M\"}jy.prototype.each=function(e){this.aborted=!1;var t=this.read_uint32(4);for(this.ifds_to_read=[{id:0,offset:t}];this.ifds_to_read.length>0&&!this.aborted;){var r=this.ifds_to_read.shift();r.offset&&this.scan_ifd(r.id,r.offset,e)}};jy.prototype.read_uint16=function(e){var t=this.input;if(e+2>t.length)throw sD(\"unexpected EOF\",\"EBADDATA\");return this.big_endian?t[e]*256+t[e+1]:t[e]+t[e+1]*256};jy.prototype.read_uint32=function(e){var t=this.input;if(e+4>t.length)throw sD(\"unexpected EOF\",\"EBADDATA\");return this.big_endian?t[e]*16777216+t[e+1]*65536+t[e+2]*256+t[e+3]:t[e]+t[e+1]*256+t[e+2]*65536+t[e+3]*16777216};jy.prototype.is_subifd_link=function(e,t){return e===0&&t===34665||e===0&&t===34853||e===34665&&t===40965};jy.prototype.exif_format_length=function(e){switch(e){case 1:case 2:case 6:case 7:return 1;case 3:case 8:return 2;case 4:case 9:case 11:return 4;case 5:case 10:case 12:return 8;default:return 0}};jy.prototype.exif_format_read=function(e,t){var r;switch(e){case 1:case 2:return r=this.input[t],r;case 6:return r=this.input[t],r|(r&128)*33554430;case 3:return r=this.read_uint16(t),r;case 8:return r=this.read_uint16(t),r|(r&32768)*131070;case 4:return r=this.read_uint32(t),r;case 9:return r=this.read_uint32(t),r|0;case 5:case 10:case 11:case 12:return null;case 7:return null;default:return null}};jy.prototype.scan_ifd=function(e,t,r){var n=this.read_uint16(t);t+=2;for(var i=0;ithis.input.length)throw sD(\"unexpected EOF\",\"EBADDATA\");for(var h=[],d=c,v=0;v0&&(this.ifds_to_read.push({id:a,offset:h[0]}),f=!0);var b={is_big_endian:this.big_endian,ifd:e,tag:a,format:o,count:s,entry_offset:t+this.start,data_length:u,data_offset:c+this.start,value:h,is_subifd_link:f};if(r(b)===!1){this.aborted=!0;return}t+=12}e===0&&this.ifds_to_read.push({id:1,offset:this.read_uint32(t)})};sW.exports.ExifParser=jy;sW.exports.get_orientation=function(e){var t=0;try{return new jy(e,0,e.length).each(function(r){if(r.ifd===0&&r.tag===274&&Array.isArray(r.value))return t=r.value[0],!1}),t}catch(r){return-1}}});var H4e=ye((Tdr,G4e)=>{\"use strict\";var ckt=rc().str2arr,fkt=rc().sliceEq,hkt=rc().readUInt32BE,uD=V4e(),dkt=lD(),vkt=ckt(\"ftyp\");G4e.exports=function(e){if(fkt(e,4,vkt)){var t=uD.unbox(e,0);if(t){var r=uD.getMimeType(t.data);if(r){for(var n,i=t.end;;){var a=uD.unbox(e,i);if(!a)break;if(i=a.end,a.boxtype===\"mdat\")return;if(a.boxtype===\"meta\"){n=a.data;break}}if(n){var o=uD.readSizeFromMeta(n);if(o){var s={width:o.width,height:o.height,type:r.type,mime:r.mime,wUnits:\"px\",hUnits:\"px\"};if(o.variants.length>1&&(s.variants=o.variants),o.orientation&&(s.orientation=o.orientation),o.exif_location&&o.exif_location.offset+o.exif_location.length<=e.length){var l=hkt(e,o.exif_location.offset),u=e.slice(o.exif_location.offset+l+4,o.exif_location.offset+o.exif_location.length),c=dkt.get_orientation(u);c>0&&(s.orientation=c)}return s}}}}}}});var X4e=ye((Adr,W4e)=>{\"use strict\";var pkt=rc().str2arr,gkt=rc().sliceEq,j4e=rc().readUInt16LE,mkt=pkt(\"BM\");W4e.exports=function(e){if(!(e.length<26)&&gkt(e,0,mkt))return{width:j4e(e,18),height:j4e(e,22),type:\"bmp\",mime:\"image/bmp\",wUnits:\"px\",hUnits:\"px\"}}});var $4e=ye((Sdr,J4e)=>{\"use strict\";var K4e=rc().str2arr,Z4e=rc().sliceEq,Y4e=rc().readUInt16LE,ykt=K4e(\"GIF87a\"),_kt=K4e(\"GIF89a\");J4e.exports=function(e){if(!(e.length<10)&&!(!Z4e(e,0,ykt)&&!Z4e(e,0,_kt)))return{width:Y4e(e,6),height:Y4e(e,8),type:\"gif\",mime:\"image/gif\",wUnits:\"px\",hUnits:\"px\"}}});var tEe=ye((Mdr,eEe)=>{\"use strict\";var lW=rc().readUInt16LE,xkt=0,bkt=1,Q4e=16;eEe.exports=function(e){var t=lW(e,0),r=lW(e,2),n=lW(e,4);if(!(t!==xkt||r!==bkt||!n)){for(var i=[],a={width:0,height:0},o=0;oa.width||l>a.height)&&(a=u)}return{width:a.width,height:a.height,variants:i,type:\"ico\",mime:\"image/x-icon\",wUnits:\"px\",hUnits:\"px\"}}}});var iEe=ye((Edr,rEe)=>{\"use strict\";var uW=rc().readUInt16BE,wkt=rc().str2arr,Tkt=rc().sliceEq,Akt=lD(),Skt=wkt(\"Exif\\0\\0\");rEe.exports=function(e){if(!(e.length<2)&&!(e[0]!==255||e[1]!==216||e[2]!==255))for(var t=2;;){for(;;){if(e.length-t<2)return;if(e[t++]===255)break}for(var r=e[t++],n;r===255;)r=e[t++];if(208<=r&&r<=217||r===1)n=0;else if(192<=r&&r<=254){if(e.length-t<2)return;n=uW(e,t)-2,t+=2}else return;if(r===217||r===218)return;var i;if(r===225&&n>=10&&Tkt(e,t,Skt)&&(i=Akt.get_orientation(e.slice(t+6,t+n))),n>=5&&192<=r&&r<=207&&r!==196&&r!==200&&r!==204){if(e.length-t0&&(a.orientation=i),a}t+=n}}});var lEe=ye((kdr,sEe)=>{\"use strict\";var oEe=rc().str2arr,nEe=rc().sliceEq,aEe=rc().readUInt32BE,Mkt=oEe(`\\x89PNG\\r\n", - "\u001a\n", + "\u001A\n", "`),Ekt=oEe(\"IHDR\");sEe.exports=function(e){if(!(e.length<24)&&nEe(e,0,Mkt)&&nEe(e,12,Ekt))return{width:aEe(e,16),height:aEe(e,20),type:\"png\",mime:\"image/png\",wUnits:\"px\",hUnits:\"px\"}}});var fEe=ye((Cdr,cEe)=>{\"use strict\";var kkt=rc().str2arr,Ckt=rc().sliceEq,uEe=rc().readUInt32BE,Lkt=kkt(\"8BPS\\0\u0001\");cEe.exports=function(e){if(!(e.length<22)&&Ckt(e,0,Lkt))return{width:uEe(e,18),height:uEe(e,14),type:\"psd\",mime:\"image/vnd.adobe.photoshop\",wUnits:\"px\",hUnits:\"px\"}}});var vEe=ye((Ldr,dEe)=>{\"use strict\";function Pkt(e){return e===32||e===9||e===13||e===10}function bA(e){return typeof e==\"number\"&&isFinite(e)&&e>0}function Ikt(e){var t=0,r=e.length;for(e[0]===239&&e[1]===187&&e[2]===191&&(t=3);t]*>/,Dkt=/^<([-_.:a-zA-Z0-9]+:)?svg\\s/,Fkt=/[^-]\\bwidth=\"([^%]+?)\"|[^-]\\bwidth='([^%]+?)'/,zkt=/\\bheight=\"([^%]+?)\"|\\bheight='([^%]+?)'/,Okt=/\\bview[bB]ox=\"(.+?)\"|\\bview[bB]ox='(.+?)'/,hEe=/in$|mm$|cm$|pt$|pc$|px$|em$|ex$/;function qkt(e){var t=e.match(Fkt),r=e.match(zkt),n=e.match(Okt);return{width:t&&(t[1]||t[2]),height:r&&(r[1]||r[2]),viewbox:n&&(n[1]||n[2])}}function Um(e){return hEe.test(e)?e.match(hEe)[0]:\"px\"}dEe.exports=function(e){if(Ikt(e)){for(var t=\"\",r=0;r{\"use strict\";var mEe=rc().str2arr,pEe=rc().sliceEq,Bkt=rc().readUInt16LE,Nkt=rc().readUInt16BE,Ukt=rc().readUInt32LE,Vkt=rc().readUInt32BE,Gkt=mEe(\"II*\\0\"),Hkt=mEe(\"MM\\0*\");function cD(e,t,r){return r?Nkt(e,t):Bkt(e,t)}function cW(e,t,r){return r?Vkt(e,t):Ukt(e,t)}function gEe(e,t,r){var n=cD(e,t+2,r),i=cW(e,t+4,r);return i!==1||n!==3&&n!==4?null:n===3?cD(e,t+8,r):cW(e,t+8,r)}yEe.exports=function(e){if(!(e.length<8)&&!(!pEe(e,0,Gkt)&&!pEe(e,0,Hkt))){var t=e[0]===77,r=cW(e,4,t)-8;if(!(r<0)){var n=r+8;if(!(e.length-n<2)){var i=cD(e,n+0,t)*12;if(!(i<=0)&&(n+=2,!(e.length-n{\"use strict\";var wEe=rc().str2arr,xEe=rc().sliceEq,bEe=rc().readUInt16LE,fW=rc().readUInt32LE,jkt=lD(),Wkt=wEe(\"RIFF\"),Xkt=wEe(\"WEBP\");function Zkt(e,t){if(!(e[t+3]!==157||e[t+4]!==1||e[t+5]!==42))return{width:bEe(e,t+6)&16383,height:bEe(e,t+8)&16383,type:\"webp\",mime:\"image/webp\",wUnits:\"px\",hUnits:\"px\"}}function Ykt(e,t){if(e[t]===47){var r=fW(e,t+1);return{width:(r&16383)+1,height:(r>>14&16383)+1,type:\"webp\",mime:\"image/webp\",wUnits:\"px\",hUnits:\"px\"}}}function Kkt(e,t){return{width:(e[t+6]<<16|e[t+5]<<8|e[t+4])+1,height:(e[t+9]<e.length)){for(;t+8=10?r=r||Zkt(e,t+8):a===\"VP8L\"&&o>=9?r=r||Ykt(e,t+8):a===\"VP8X\"&&o>=10?r=r||Kkt(e,t+8):a===\"EXIF\"&&(n=jkt.get_orientation(e.slice(t+8,t+8+o)),t=1/0),t+=8+o}if(r)return n>0&&(r.orientation=n),r}}}});var MEe=ye((Rdr,SEe)=>{\"use strict\";SEe.exports={avif:H4e(),bmp:X4e(),gif:$4e(),ico:tEe(),jpeg:iEe(),png:lEe(),psd:fEe(),svg:vEe(),tiff:_Ee(),webp:AEe()}});var EEe=ye((Ddr,dW)=>{\"use strict\";var hW=MEe();function Jkt(e){for(var t=Object.keys(hW),r=0;r{\"use strict\";var $kt=EEe(),Qkt=Py().IMAGE_URL_PREFIX,eCt=c2().Buffer;kEe.getImageSize=function(e){var t=e.replace(Qkt,\"\"),r=new eCt(t,\"base64\");return $kt(r)}});var IEe=ye((zdr,PEe)=>{\"use strict\";var LEe=Dr(),tCt=ZT(),rCt=Eo(),fD=ho(),iCt=Dr().maxRowLength,nCt=CEe().getImageSize;PEe.exports=function(t,r){var n,i;if(r._hasZ)n=r.z.length,i=iCt(r.z);else if(r._hasSource){var a=nCt(r.source);n=a.height,i=a.width}var o=fD.getFromId(t,r.xaxis||\"x\"),s=fD.getFromId(t,r.yaxis||\"y\"),l=o.d2c(r.x0)-r.dx/2,u=s.d2c(r.y0)-r.dy/2,c,f=[l,l+i*r.dx],h=[u,u+n*r.dy];if(o&&o.type===\"log\")for(c=0;c{\"use strict\";var lCt=Oa(),A2=Dr(),REe=A2.strTranslate,uCt=Wp(),cCt=ZT(),fCt=QV(),hCt=f8().STYLE;DEe.exports=function(t,r,n,i){var a=r.xaxis,o=r.yaxis,s=!t._context._exportedPlot&&fCt();A2.makeTraceGroups(i,n,\"im\").each(function(l){var u=lCt.select(this),c=l[0],f=c.trace,h=(f.zsmooth===\"fast\"||f.zsmooth===!1&&s)&&!f._hasZ&&f._hasSource&&a.type===\"linear\"&&o.type===\"linear\";f._realImage=h;var d=c.z,v=c.x0,_=c.y0,b=c.w,p=c.h,k=f.dx,E=f.dy,S,L,x,C,M,g;for(g=0;S===void 0&&g0;)L=a.c2p(v+g*k),g--;for(g=0;C===void 0&&g0;)M=o.c2p(_+g*E),g--;if(Lj[0];if(re||oe){var _e=S+T/2,Ee=C+z/2;H+=\"transform:\"+REe(_e+\"px\",Ee+\"px\")+\"scale(\"+(re?-1:1)+\",\"+(oe?-1:1)+\")\"+REe(-_e+\"px\",-Ee+\"px\")+\";\"}}Z.attr(\"style\",H);var Ce=new Promise(function(me){if(f._hasZ)me();else if(f._hasSource)if(f._canvas&&f._canvas.el.width===b&&f._canvas.el.height===p&&f._canvas.source===f.source)me();else{var ie=document.createElement(\"canvas\");ie.width=b,ie.height=p;var Se=ie.getContext(\"2d\",{willReadFrequently:!0});f._image=f._image||new Image;var Le=f._image;Le.onload=function(){Se.drawImage(Le,0,0),f._canvas={el:ie,source:f.source},me()},Le.setAttribute(\"src\",f.source)}}).then(function(){var me,ie;if(f._hasZ)ie=G(function(Ae,Fe){var Pe=d[Fe][Ae];return A2.isTypedArray(Pe)&&(Pe=Array.from(Pe)),Pe}),me=ie.toDataURL(\"image/png\");else if(f._hasSource)if(h)me=f.source;else{var Se=f._canvas.el.getContext(\"2d\",{willReadFrequently:!0}),Le=Se.getImageData(0,0,b,p).data;ie=G(function(Ae,Fe){var Pe=4*(Fe*b+Ae);return[Le[Pe],Le[Pe+1],Le[Pe+2],Le[Pe+3]]}),me=ie.toDataURL(\"image/png\")}Z.attr({\"xlink:href\":me,height:z,width:T,x:S,y:C})});t._promises.push(Ce)})}});var OEe=ye((qdr,zEe)=>{\"use strict\";var dCt=Oa();zEe.exports=function(t){dCt.select(t).selectAll(\".im image\").style(\"opacity\",function(r){return r[0].trace.opacity})}});var UEe=ye((Bdr,NEe)=>{\"use strict\";var qEe=vf(),BEe=Dr(),hD=BEe.isArrayOrTypedArray,vCt=ZT();NEe.exports=function(t,r,n){var i=t.cd[0],a=i.trace,o=t.xa,s=t.ya;if(!(qEe.inbox(r-i.x0,r-(i.x0+i.w*a.dx),0)>0||qEe.inbox(n-i.y0,n-(i.y0+i.h*a.dy),0)>0)){var l=Math.floor((r-i.x0)/a.dx),u=Math.floor(Math.abs(n-i.y0)/a.dy),c;if(a._hasZ?c=i.z[u][l]:a._hasSource&&(c=a._canvas.el.getContext(\"2d\",{willReadFrequently:!0}).getImageData(l,u,1,1).data),!!c){var f=i.hi||a.hoverinfo,h;if(f){var d=f.split(\"+\");d.indexOf(\"all\")!==-1&&(d=[\"color\"]),d.indexOf(\"color\")!==-1&&(h=!0)}var v=vCt.colormodel[a.colormodel],_=v.colormodel||a.colormodel,b=_.length,p=a._scaler(c),k=v.suffix,E=[];(a.hovertemplate||h)&&(E.push(\"[\"+[p[0]+k[0],p[1]+k[1],p[2]+k[2]].join(\", \")),b===4&&E.push(\", \"+p[3]+k[3]),E.push(\"]\"),E=E.join(\"\"),t.extraText=_.toUpperCase()+\": \"+E);var S;hD(a.hovertext)&&hD(a.hovertext[u])?S=a.hovertext[u][l]:hD(a.text)&&hD(a.text[u])&&(S=a.text[u][l]);var L=s.c2p(i.y0+(u+.5)*a.dy),x=i.x0+(l+.5)*a.dx,C=i.y0+(u+.5)*a.dy,M=\"[\"+c.slice(0,a.colormodel.length).join(\", \")+\"]\";return[BEe.extendFlat(t,{index:[u,l],x0:o.c2p(i.x0+l*a.dx),x1:o.c2p(i.x0+(l+1)*a.dx),y0:L,y1:L,color:p,xVal:x,xLabelVal:x,yVal:C,yLabelVal:C,zLabelVal:M,text:S,hovertemplateLabels:{zLabel:M,colorLabel:E,\"color[0]Label\":p[0]+k[0],\"color[1]Label\":p[1]+k[1],\"color[2]Label\":p[2]+k[2],\"color[3]Label\":p[3]+k[3]}})]}}}});var GEe=ye((Ndr,VEe)=>{\"use strict\";VEe.exports=function(t,r){return\"xVal\"in r&&(t.x=r.xVal),\"yVal\"in r&&(t.y=r.yVal),r.xa&&(t.xaxis=r.xa),r.ya&&(t.yaxis=r.ya),t.color=r.color,t.colormodel=r.trace.colormodel,t.z||(t.z=r.color),t}});var jEe=ye((Udr,HEe)=>{\"use strict\";HEe.exports={attributes:uH(),supplyDefaults:U3e(),calc:IEe(),plot:FEe(),style:OEe(),hoverPoints:UEe(),eventData:GEe(),moduleType:\"trace\",name:\"image\",basePlotModule:ph(),categories:[\"cartesian\",\"svg\",\"2dMap\",\"noSortingByValue\"],animatable:!1,meta:{}}});var XEe=ye((Vdr,WEe)=>{\"use strict\";WEe.exports=jEe()});var S2=ye((Gdr,YEe)=>{\"use strict\";var pCt=Gl(),gCt=Cc().attributes,mCt=ec(),yCt=Lh(),{hovertemplateAttrs:_Ct,texttemplateAttrs:xCt,templatefallbackAttrs:ZEe}=Ll(),EE=Ao().extendFlat,bCt=Pd().pattern,dD=mCt({editType:\"plot\",arrayOk:!0,colorEditType:\"plot\"});YEe.exports={labels:{valType:\"data_array\",editType:\"calc\"},label0:{valType:\"number\",dflt:0,editType:\"calc\"},dlabel:{valType:\"number\",dflt:1,editType:\"calc\"},values:{valType:\"data_array\",editType:\"calc\"},marker:{colors:{valType:\"data_array\",editType:\"calc\"},line:{color:{valType:\"color\",dflt:yCt.defaultLine,arrayOk:!0,editType:\"style\"},width:{valType:\"number\",min:0,dflt:0,arrayOk:!0,editType:\"style\"},editType:\"calc\"},pattern:bCt,editType:\"calc\"},text:{valType:\"data_array\",editType:\"plot\"},hovertext:{valType:\"string\",dflt:\"\",arrayOk:!0,editType:\"style\"},scalegroup:{valType:\"string\",dflt:\"\",editType:\"calc\"},textinfo:{valType:\"flaglist\",flags:[\"label\",\"text\",\"value\",\"percent\"],extras:[\"none\"],editType:\"calc\"},hoverinfo:EE({},pCt.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"percent\",\"name\"]}),hovertemplate:_Ct({},{keys:[\"label\",\"color\",\"value\",\"percent\",\"text\"]}),hovertemplatefallback:ZEe(),texttemplate:xCt({editType:\"plot\"},{keys:[\"label\",\"color\",\"value\",\"percent\",\"text\"]}),texttemplatefallback:ZEe({editType:\"plot\"}),textposition:{valType:\"enumerated\",values:[\"inside\",\"outside\",\"auto\",\"none\"],dflt:\"auto\",arrayOk:!0,editType:\"plot\"},textfont:EE({},dD,{}),insidetextorientation:{valType:\"enumerated\",values:[\"horizontal\",\"radial\",\"tangential\",\"auto\"],dflt:\"auto\",editType:\"plot\"},insidetextfont:EE({},dD,{}),outsidetextfont:EE({},dD,{}),automargin:{valType:\"boolean\",dflt:!1,editType:\"plot\"},title:{text:{valType:\"string\",dflt:\"\",editType:\"plot\"},font:EE({},dD,{}),position:{valType:\"enumerated\",values:[\"top left\",\"top center\",\"top right\",\"middle center\",\"bottom left\",\"bottom center\",\"bottom right\"],editType:\"plot\"},editType:\"plot\"},domain:gCt({name:\"pie\",trace:!0,editType:\"calc\"}),hole:{valType:\"number\",min:0,max:1,dflt:0,editType:\"calc\"},sort:{valType:\"boolean\",dflt:!0,editType:\"calc\"},direction:{valType:\"enumerated\",values:[\"clockwise\",\"counterclockwise\"],dflt:\"counterclockwise\",editType:\"calc\"},rotation:{valType:\"angle\",dflt:0,editType:\"calc\"},pull:{valType:\"number\",min:0,max:1,dflt:0,arrayOk:!0,editType:\"calc\"}}});var M2=ye((Hdr,$Ee)=>{\"use strict\";var wCt=Eo(),kE=Dr(),TCt=S2(),ACt=Cc().defaults,SCt=r0().handleText,MCt=Dr().coercePattern;function KEe(e,t){var r=kE.isArrayOrTypedArray(e),n=kE.isArrayOrTypedArray(t),i=Math.min(r?e.length:1/0,n?t.length:1/0);if(isFinite(i)||(i=0),i&&n){for(var a,o=0;o0){a=!0;break}}a||(i=0)}return{hasLabels:r,hasValues:n,len:i}}function JEe(e,t,r,n,i){var a=n(\"marker.line.width\");a&&n(\"marker.line.color\",i?void 0:r.paper_bgcolor);var o=n(\"marker.colors\");MCt(n,\"marker.pattern\",o),e.marker&&!t.marker.pattern.fgcolor&&(t.marker.pattern.fgcolor=e.marker.colors),t.marker.pattern.bgcolor||(t.marker.pattern.bgcolor=r.paper_bgcolor)}function ECt(e,t,r,n){function i(k,E){return kE.coerce(e,t,TCt,k,E)}var a=i(\"labels\"),o=i(\"values\"),s=KEe(a,o),l=s.len;if(t._hasLabels=s.hasLabels,t._hasValues=s.hasValues,!t._hasLabels&&t._hasValues&&(i(\"label0\"),i(\"dlabel\")),!l){t.visible=!1;return}t._length=l,JEe(e,t,n,i,!0),i(\"scalegroup\");var u=i(\"text\"),c=i(\"texttemplate\");i(\"texttemplatefallback\");var f;if(c||(f=i(\"textinfo\",kE.isArrayOrTypedArray(u)?\"text+percent\":\"percent\")),i(\"hovertext\"),i(\"hovertemplate\"),i(\"hovertemplatefallback\"),c||f&&f!==\"none\"){var h=i(\"textposition\");SCt(e,t,n,i,h,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1});var d=Array.isArray(h)||h===\"auto\",v=d||h===\"outside\";v&&i(\"automargin\"),(h===\"inside\"||h===\"auto\"||Array.isArray(h))&&i(\"insidetextorientation\")}else f===\"none\"&&i(\"textposition\",\"none\");ACt(t,n,i);var _=i(\"hole\"),b=i(\"title.text\");if(b){var p=i(\"title.position\",_?\"middle center\":\"top center\");!_&&p===\"middle center\"&&(t.title.position=\"top center\"),kE.coerceFont(i,\"title.font\",n.font)}i(\"sort\"),i(\"direction\"),i(\"rotation\"),i(\"pull\")}$Ee.exports={handleLabelsAndValues:KEe,handleMarkerDefaults:JEe,supplyDefaults:ECt}});var vD=ye((jdr,QEe)=>{\"use strict\";QEe.exports={hiddenlabels:{valType:\"data_array\",editType:\"calc\"},piecolorway:{valType:\"colorlist\",editType:\"calc\"},extendpiecolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var tke=ye((Wdr,eke)=>{\"use strict\";var kCt=Dr(),CCt=vD();eke.exports=function(t,r){function n(i,a){return kCt.coerce(t,r,CCt,i,a)}n(\"hiddenlabels\"),n(\"piecolorway\",r.colorway),n(\"extendpiecolors\")}});var wA=ye((Xdr,nke)=>{\"use strict\";var LCt=Eo(),vW=cd(),PCt=ka(),ICt={};function RCt(e,t){var r=[],n=e._fullLayout,i=n.hiddenlabels||[],a=t.labels,o=t.marker.colors||[],s=t.values,l=t._length,u=t._hasValues&&l,c,f;if(t.dlabel)for(a=new Array(l),c=0;c=0});var S=t.type===\"funnelarea\"?_:t.sort;return S&&r.sort(function(L,x){return x.v-L.v}),r[0]&&(r[0].vTotal=v),r}function rke(e){return function(r,n){return!r||(r=vW(r),!r.isValid())?!1:(r=PCt.addOpacity(r,r.getAlpha()),e[n]||(e[n]=r),r)}}function DCt(e,t){var r=(t||{}).type;r||(r=\"pie\");var n=e._fullLayout,i=e.calcdata,a=n[r+\"colorway\"],o=n[\"_\"+r+\"colormap\"];n[\"extend\"+r+\"colors\"]&&(a=ike(a,ICt));for(var s=0,l=0;l{\"use strict\";var FCt=ip().appendArrayMultiPointValues;ake.exports=function(t,r){var n={curveNumber:r.index,pointNumbers:t.pts,data:r._input,fullData:r,label:t.label,color:t.color,value:t.v,percent:t.percent,text:t.text,bbox:t.bbox,v:t.v};return t.pts.length===1&&(n.pointNumber=n.i=t.pts[0]),FCt(n,r,t.pts),r.type===\"funnelarea\"&&(delete n.v,delete n.i),n}});var yD=ye((Ydr,Eke)=>{\"use strict\";var Fp=Oa(),zCt=Mc(),pD=vf(),hke=ka(),Wy=So(),rv=Dr(),OCt=rv.strScale,ske=rv.strTranslate,pW=ru(),dke=bv(),qCt=dke.recordMinTextSize,BCt=dke.clearMinTextSize,vke=e2().TEXTPAD,ns=l_(),gD=oke(),lke=Dr().isValidTextValue;function NCt(e,t){var r=e._context.staticPlot,n=e._fullLayout,i=n._size;BCt(\"pie\",n),mke(t,e),Ake(t,i);var a=rv.makeTraceGroups(n._pielayer,t,\"trace\").each(function(o){var s=Fp.select(this),l=o[0],u=l.trace;YCt(o),s.attr(\"stroke-linejoin\",\"round\"),s.each(function(){var c=Fp.select(this).selectAll(\"g.slice\").data(o);c.enter().append(\"g\").classed(\"slice\",!0),c.exit().remove();var f=[[[],[]],[[],[]]],h=!1;c.each(function(S,L){if(S.hidden){Fp.select(this).selectAll(\"path,g\").remove();return}S.pointNumber=S.i,S.curveNumber=u.index,f[S.pxmid[1]<0?0:1][S.pxmid[0]<0?0:1].push(S);var x=l.cx,C=l.cy,M=Fp.select(this),g=M.selectAll(\"path.surface\").data([S]);if(g.enter().append(\"path\").classed(\"surface\",!0).style({\"pointer-events\":r?\"none\":\"all\"}),M.call(pke,e,o),u.pull){var P=+ns.castOption(u.pull,S.pts)||0;P>0&&(x+=P*S.pxmid[0],C+=P*S.pxmid[1])}S.cxFinal=x,S.cyFinal=C;function T(N,j,re,oe){var _e=oe*(j[0]-N[0]),Ee=oe*(j[1]-N[1]);return\"a\"+oe*l.r+\",\"+oe*l.r+\" 0 \"+S.largeArc+(re?\" 1 \":\" 0 \")+_e+\",\"+Ee}var z=u.hole;if(S.v===l.vTotal){var O=\"M\"+(x+S.px0[0])+\",\"+(C+S.px0[1])+T(S.px0,S.pxmid,!0,1)+T(S.pxmid,S.px0,!0,1)+\"Z\";z?g.attr(\"d\",\"M\"+(x+z*S.px0[0])+\",\"+(C+z*S.px0[1])+T(S.px0,S.pxmid,!1,z)+T(S.pxmid,S.px0,!1,z)+\"Z\"+O):g.attr(\"d\",O)}else{var V=T(S.px0,S.px1,!0,1);if(z){var G=1-z;g.attr(\"d\",\"M\"+(x+z*S.px1[0])+\",\"+(C+z*S.px1[1])+T(S.px1,S.px0,!1,z)+\"l\"+G*S.px0[0]+\",\"+G*S.px0[1]+V+\"Z\")}else g.attr(\"d\",\"M\"+x+\",\"+C+\"l\"+S.px0[0]+\",\"+S.px0[1]+V+\"Z\")}Ske(e,S,l);var Z=ns.castOption(u.textposition,S.pts),H=M.selectAll(\"g.slicetext\").data(S.text&&Z!==\"none\"?[0]:[]);H.enter().append(\"g\").classed(\"slicetext\",!0),H.exit().remove(),H.each(function(){var N=rv.ensureSingle(Fp.select(this),\"text\",\"\",function(ie){ie.attr(\"data-notex\",1)}),j=rv.ensureUniformFontSize(e,Z===\"outside\"?VCt(u,S,n.font):gke(u,S,n.font));N.text(S.text).attr({class:\"slicetext\",transform:\"\",\"text-anchor\":\"middle\"}).call(Wy.font,j).call(pW.convertToTspans,e);var re=Wy.bBox(N.node()),oe;if(Z===\"outside\")oe=fke(re,S);else if(oe=yke(re,S,l),Z===\"auto\"&&oe.scale<1){var _e=rv.ensureUniformFontSize(e,u.outsidetextfont);N.call(Wy.font,_e),re=Wy.bBox(N.node()),oe=fke(re,S)}var Ee=oe.textPosAngle,Ce=Ee===void 0?S.pxmid:mD(l.r,Ee);if(oe.targetX=x+Ce[0]*oe.rCenter+(oe.x||0),oe.targetY=C+Ce[1]*oe.rCenter+(oe.y||0),Mke(oe,re),oe.outside){var me=oe.targetY;S.yLabelMin=me-re.height/2,S.yLabelMid=me,S.yLabelMax=me+re.height/2,S.labelExtraX=0,S.labelExtraY=0,h=!0}oe.fontSize=j.size,qCt(u.type,oe,n),o[L].transform=oe,rv.setTransormAndDisplay(N,oe)})});var d=Fp.select(this).selectAll(\"g.titletext\").data(u.title.text?[0]:[]);if(d.enter().append(\"g\").classed(\"titletext\",!0),d.exit().remove(),d.each(function(){var S=rv.ensureSingle(Fp.select(this),\"text\",\"\",function(C){C.attr(\"data-notex\",1)}),L=u.title.text;u._meta&&(L=rv.templateString(L,u._meta)),S.text(L).attr({class:\"titletext\",transform:\"\",\"text-anchor\":\"middle\"}).call(Wy.font,u.title.font).call(pW.convertToTspans,e);var x;u.title.position===\"middle center\"?x=jCt(l):x=wke(l,i),S.attr(\"transform\",ske(x.x,x.y)+OCt(Math.min(1,x.scale))+ske(x.tx,x.ty))}),h&&XCt(f,u),UCt(c,u),h&&u.automargin){var v=Wy.bBox(s.node()),_=u.domain,b=i.w*(_.x[1]-_.x[0]),p=i.h*(_.y[1]-_.y[0]),k=(.5*b-l.r)/i.w,E=(.5*p-l.r)/i.h;zCt.autoMargin(e,\"pie.\"+u.uid+\".automargin\",{xl:_.x[0]-k,xr:_.x[1]+k,yb:_.y[0]-E,yt:_.y[1]+E,l:Math.max(l.cx-l.r-v.left,0),r:Math.max(v.right-(l.cx+l.r),0),b:Math.max(v.bottom-(l.cy+l.r),0),t:Math.max(l.cy-l.r-v.top,0),pad:5})}})});setTimeout(function(){a.selectAll(\"tspan\").each(function(){var o=Fp.select(this);o.attr(\"dy\")&&o.attr(\"dy\",o.attr(\"dy\"))})},0)}function UCt(e,t){e.each(function(r){var n=Fp.select(this);if(!r.labelExtraX&&!r.labelExtraY){n.select(\"path.textline\").remove();return}var i=n.select(\"g.slicetext text\");r.transform.targetX+=r.labelExtraX,r.transform.targetY+=r.labelExtraY,rv.setTransormAndDisplay(i,r.transform);var a=r.cxFinal+r.pxmid[0],o=r.cyFinal+r.pxmid[1],s=\"M\"+a+\",\"+o,l=(r.yLabelMax-r.yLabelMin)*(r.pxmid[0]<0?-1:1)/4;if(r.labelExtraX){var u=r.labelExtraX*r.pxmid[1]/r.pxmid[0],c=r.yLabelMid+r.labelExtraY-(r.cyFinal+r.pxmid[1]);Math.abs(u)>Math.abs(c)?s+=\"l\"+c*r.pxmid[0]/r.pxmid[1]+\",\"+c+\"H\"+(a+r.labelExtraX+l):s+=\"l\"+r.labelExtraX+\",\"+u+\"v\"+(c-u)+\"h\"+l}else s+=\"V\"+(r.yLabelMid+r.labelExtraY)+\"h\"+l;rv.ensureSingle(n,\"path\",\"textline\").call(hke.stroke,t.outsidetextfont.color).attr({\"stroke-width\":Math.min(2,t.outsidetextfont.size/8),d:s,fill:\"none\"})})}function pke(e,t,r){var n=r[0],i=n.cx,a=n.cy,o=n.trace,s=o.type===\"funnelarea\";\"_hasHoverLabel\"in o||(o._hasHoverLabel=!1),\"_hasHoverEvent\"in o||(o._hasHoverEvent=!1),e.on(\"mouseover\",function(l){var u=t._fullLayout,c=t._fullData[o.index];if(!(t._dragging||u.hovermode===!1)){var f=c.hoverinfo;if(Array.isArray(f)&&(f=pD.castHoverinfo({hoverinfo:[ns.castOption(f,l.pts)],_module:o._module},u,0)),f===\"all\"&&(f=\"label+text+value+percent+name\"),c.hovertemplate||f!==\"none\"&&f!==\"skip\"&&f){var h=l.rInscribed||0,d=i+l.pxmid[0]*(1-h),v=a+l.pxmid[1]*(1-h),_=u.separators,b=[];if(f&&f.indexOf(\"label\")!==-1&&b.push(l.label),l.text=ns.castOption(c.hovertext||c.text,l.pts),f&&f.indexOf(\"text\")!==-1){var p=l.text;rv.isValidTextValue(p)&&b.push(p)}l.value=l.v,l.valueLabel=ns.formatPieValue(l.v,_),f&&f.indexOf(\"value\")!==-1&&b.push(l.valueLabel),l.percent=l.v/n.vTotal,l.percentLabel=ns.formatPiePercent(l.percent,_),f&&f.indexOf(\"percent\")!==-1&&b.push(l.percentLabel);var k=c.hoverlabel,E=k.font,S=[];pD.loneHover({trace:o,x0:d-h*n.r,x1:d+h*n.r,y:v,_x0:s?i+l.TL[0]:d-h*n.r,_x1:s?i+l.TR[0]:d+h*n.r,_y0:s?a+l.TL[1]:v-h*n.r,_y1:s?a+l.BL[1]:v+h*n.r,text:b.join(\"
\"),name:c.hovertemplate||f.indexOf(\"name\")!==-1?c.name:void 0,idealAlign:l.pxmid[0]<0?\"left\":\"right\",color:ns.castOption(k.bgcolor,l.pts)||l.color,borderColor:ns.castOption(k.bordercolor,l.pts),fontFamily:ns.castOption(E.family,l.pts),fontSize:ns.castOption(E.size,l.pts),fontColor:ns.castOption(E.color,l.pts),nameLength:ns.castOption(k.namelength,l.pts),textAlign:ns.castOption(k.align,l.pts),hovertemplate:ns.castOption(c.hovertemplate,l.pts),hovertemplateLabels:l,eventData:[gD(l,c)]},{container:u._hoverlayer.node(),outerContainer:u._paper.node(),gd:t,inOut_bbox:S}),l.bbox=S[0],o._hasHoverLabel=!0}o._hasHoverEvent=!0,t.emit(\"plotly_hover\",{points:[gD(l,c)],event:Fp.event})}}),e.on(\"mouseout\",function(l){var u=t._fullLayout,c=t._fullData[o.index],f=Fp.select(this).datum();o._hasHoverEvent&&(l.originalEvent=Fp.event,t.emit(\"plotly_unhover\",{points:[gD(f,c)],event:Fp.event}),o._hasHoverEvent=!1),o._hasHoverLabel&&(pD.loneUnhover(u._hoverlayer.node()),o._hasHoverLabel=!1)}),e.on(\"click\",function(l){var u=t._fullLayout,c=t._fullData[o.index];t._dragging||u.hovermode===!1||(t._hoverdata=[gD(l,c)],pD.click(t,Fp.event))})}function VCt(e,t,r){var n=ns.castOption(e.outsidetextfont.color,t.pts)||ns.castOption(e.textfont.color,t.pts)||r.color,i=ns.castOption(e.outsidetextfont.family,t.pts)||ns.castOption(e.textfont.family,t.pts)||r.family,a=ns.castOption(e.outsidetextfont.size,t.pts)||ns.castOption(e.textfont.size,t.pts)||r.size,o=ns.castOption(e.outsidetextfont.weight,t.pts)||ns.castOption(e.textfont.weight,t.pts)||r.weight,s=ns.castOption(e.outsidetextfont.style,t.pts)||ns.castOption(e.textfont.style,t.pts)||r.style,l=ns.castOption(e.outsidetextfont.variant,t.pts)||ns.castOption(e.textfont.variant,t.pts)||r.variant,u=ns.castOption(e.outsidetextfont.textcase,t.pts)||ns.castOption(e.textfont.textcase,t.pts)||r.textcase,c=ns.castOption(e.outsidetextfont.lineposition,t.pts)||ns.castOption(e.textfont.lineposition,t.pts)||r.lineposition,f=ns.castOption(e.outsidetextfont.shadow,t.pts)||ns.castOption(e.textfont.shadow,t.pts)||r.shadow;return{color:n,family:i,size:a,weight:o,style:s,variant:l,textcase:u,lineposition:c,shadow:f}}function gke(e,t,r){var n=ns.castOption(e.insidetextfont.color,t.pts);!n&&e._input.textfont&&(n=ns.castOption(e._input.textfont.color,t.pts));var i=ns.castOption(e.insidetextfont.family,t.pts)||ns.castOption(e.textfont.family,t.pts)||r.family,a=ns.castOption(e.insidetextfont.size,t.pts)||ns.castOption(e.textfont.size,t.pts)||r.size,o=ns.castOption(e.insidetextfont.weight,t.pts)||ns.castOption(e.textfont.weight,t.pts)||r.weight,s=ns.castOption(e.insidetextfont.style,t.pts)||ns.castOption(e.textfont.style,t.pts)||r.style,l=ns.castOption(e.insidetextfont.variant,t.pts)||ns.castOption(e.textfont.variant,t.pts)||r.variant,u=ns.castOption(e.insidetextfont.textcase,t.pts)||ns.castOption(e.textfont.textcase,t.pts)||r.textcase,c=ns.castOption(e.insidetextfont.lineposition,t.pts)||ns.castOption(e.textfont.lineposition,t.pts)||r.lineposition,f=ns.castOption(e.insidetextfont.shadow,t.pts)||ns.castOption(e.textfont.shadow,t.pts)||r.shadow;return{color:n||hke.contrast(t.color),family:i,size:a,weight:o,style:s,variant:l,textcase:u,lineposition:c,shadow:f}}function mke(e,t){for(var r,n,i=0;i=-4;k-=2)p(Math.PI*k,\"tan\");for(k=4;k>=-4;k-=2)p(Math.PI*(k+1),\"tan\")}if(f||d){for(k=4;k>=-4;k-=2)p(Math.PI*(k+1.5),\"rad\");for(k=4;k>=-4;k-=2)p(Math.PI*(k+.5),\"rad\")}}if(s||v||f){var E=Math.sqrt(e.width*e.width+e.height*e.height);if(b={scale:i*n*2/E,rCenter:1-i,rotate:0},b.textPosAngle=(t.startangle+t.stopangle)/2,b.scale>=1)return b;_.push(b)}(v||d)&&(b=uke(e,n,o,l,u),b.textPosAngle=(t.startangle+t.stopangle)/2,_.push(b)),(v||h)&&(b=cke(e,n,o,l,u),b.textPosAngle=(t.startangle+t.stopangle)/2,_.push(b));for(var S=0,L=0,x=0;x<_.length;x++){var C=_[x].scale;if(L=1)break}return _[S]}function GCt(e,t){var r=e.startangle,n=e.stopangle;return r>t&&t>n||r0?1:-1)/2,y:a/(1+r*r/(n*n)),outside:!0}}function jCt(e){var t=Math.sqrt(e.titleBox.width*e.titleBox.width+e.titleBox.height*e.titleBox.height);return{x:e.cx,y:e.cy,scale:e.trace.hole*e.r*2/t,tx:0,ty:-e.titleBox.height/2+e.trace.title.font.size}}function wke(e,t){var r=1,n=1,i,a=e.trace,o={x:e.cx,y:e.cy},s={tx:0,ty:0};s.ty+=a.title.font.size,i=Tke(a),a.title.position.indexOf(\"top\")!==-1?(o.y-=(1+i)*e.r,s.ty-=e.titleBox.height):a.title.position.indexOf(\"bottom\")!==-1&&(o.y+=(1+i)*e.r);var l=WCt(e.r,e.trace.aspectratio),u=t.w*(a.domain.x[1]-a.domain.x[0])/2;return a.title.position.indexOf(\"left\")!==-1?(u=u+l,o.x-=(1+i)*l,s.tx+=e.titleBox.width/2):a.title.position.indexOf(\"center\")!==-1?u*=2:a.title.position.indexOf(\"right\")!==-1&&(u=u+l,o.x+=(1+i)*l,s.tx-=e.titleBox.width/2),r=u/e.titleBox.width,n=gW(e,t)/e.titleBox.height,{x:o.x,y:o.y,scale:Math.min(r,n),tx:s.tx,ty:s.ty}}function WCt(e,t){return e/(t===void 0?1:t)}function gW(e,t){var r=e.trace,n=t.h*(r.domain.y[1]-r.domain.y[0]);return Math.min(e.titleBox.height,n/2)}function Tke(e){var t=e.pull;if(!t)return 0;var r;if(rv.isArrayOrTypedArray(t))for(t=0,r=0;rt&&(t=e.pull[r]);return t}function XCt(e,t){var r,n,i,a,o,s,l,u,c,f,h,d,v;function _(E,S){return E.pxmid[1]-S.pxmid[1]}function b(E,S){return S.pxmid[1]-E.pxmid[1]}function p(E,S){S||(S={});var L=S.labelExtraY+(n?S.yLabelMax:S.yLabelMin),x=n?E.yLabelMin:E.yLabelMax,C=n?E.yLabelMax:E.yLabelMin,M=E.cyFinal+o(E.px0[1],E.px1[1]),g=L-x,P,T,z,O,V,G;if(g*l>0&&(E.labelExtraY=g),!!rv.isArrayOrTypedArray(t.pull))for(T=0;T=(ns.castOption(t.pull,z.pts)||0))&&((E.pxmid[1]-z.pxmid[1])*l>0?(O=z.cyFinal+o(z.px0[1],z.px1[1]),g=O-x-E.labelExtraY,g*l>0&&(E.labelExtraY+=g)):(C+E.labelExtraY-M)*l>0&&(P=3*s*Math.abs(T-f.indexOf(E)),V=z.cxFinal+a(z.px0[0],z.px1[0]),G=V+P-(E.cxFinal+E.pxmid[0])-E.labelExtraX,G*s>0&&(E.labelExtraX+=G)))}for(n=0;n<2;n++)for(i=n?_:b,o=n?Math.max:Math.min,l=n?1:-1,r=0;r<2;r++){for(a=r?Math.max:Math.min,s=r?1:-1,u=e[n][r],u.sort(i),c=e[1-n][r],f=c.concat(u),d=[],h=0;h1?(u=r.r,c=u/i.aspectratio):(c=r.r,u=c*i.aspectratio),u*=(1+i.baseratio)/2,l=u*c}o=Math.min(o,l/r.vTotal)}for(n=0;nt.vTotal/2?1:0,u.halfangle=Math.PI*Math.min(u.v/t.vTotal,.5),u.ring=1-n.hole,u.rInscribed=HCt(u,t))}function mD(e,t){return[e*Math.sin(t),-e*Math.cos(t)]}function Ske(e,t,r){var n=e._fullLayout,i=r.trace,a=i.texttemplate,o=i.textinfo;if(!a&&o&&o!==\"none\"){var s=o.split(\"+\"),l=function(S){return s.indexOf(S)!==-1},u=l(\"label\"),c=l(\"text\"),f=l(\"value\"),h=l(\"percent\"),d=n.separators,v;if(v=u?[t.label]:[],c){var _=ns.getFirstFilled(i.text,t.pts);lke(_)&&v.push(_)}f&&v.push(ns.formatPieValue(t.v,d)),h&&v.push(ns.formatPiePercent(t.v/r.vTotal,d)),t.text=v.join(\"
\")}function b(S){return{label:S.label,value:S.v,valueLabel:ns.formatPieValue(S.v,n.separators),percent:S.v/r.vTotal,percentLabel:ns.formatPiePercent(S.v/r.vTotal,n.separators),color:S.color,text:S.text,customdata:rv.castOption(i,S.i,\"customdata\")}}if(a){var p=rv.castOption(i,t.i,\"texttemplate\");if(!p)t.text=\"\";else{var k=b(t),E=ns.getFirstFilled(i.text,t.pts);(lke(E)||E===\"\")&&(k.text=E),t.text=rv.texttemplateString({data:[k,i._meta],fallback:i.texttemplatefallback,labels:k,locale:e._fullLayout._d3locale,template:p})}}}function Mke(e,t){var r=e.rotate*Math.PI/180,n=Math.cos(r),i=Math.sin(r),a=(t.left+t.right)/2,o=(t.top+t.bottom)/2;e.textX=a*n-o*i,e.textY=a*i+o*n,e.noCenter=!0}Eke.exports={plot:NCt,formatSliceLabel:Ske,transformInsideText:yke,determineInsideTextFont:gke,positionTitleOutside:wke,prerenderTitles:mke,layoutAreas:Ake,attachFxHandlers:pke,computeTransform:Mke}});var Lke=ye((Kdr,Cke)=>{\"use strict\";var kke=Oa(),KCt=q3(),JCt=bv().resizeText;Cke.exports=function(t){var r=t._fullLayout._pielayer.selectAll(\".trace\");JCt(t,r,\"pie\"),r.each(function(n){var i=n[0],a=i.trace,o=kke.select(this);o.style({opacity:a.opacity}),o.selectAll(\"path.surface\").each(function(s){kke.select(this).call(KCt,s,a,t)})})}});var Ike=ye(TA=>{\"use strict\";var Pke=Mc();TA.name=\"pie\";TA.plot=function(e,t,r,n){Pke.plotBasePlot(TA.name,e,t,r,n)};TA.clean=function(e,t,r,n){Pke.cleanBasePlot(TA.name,e,t,r,n)}});var Dke=ye(($dr,Rke)=>{\"use strict\";Rke.exports={attributes:S2(),supplyDefaults:M2().supplyDefaults,supplyLayoutDefaults:tke(),layoutAttributes:vD(),calc:wA().calc,crossTraceCalc:wA().crossTraceCalc,plot:yD().plot,style:Lke(),styleOne:q3(),moduleType:\"trace\",name:\"pie\",basePlotModule:Ike(),categories:[\"pie-like\",\"pie\",\"showLegend\"],meta:{}}});var zke=ye((Qdr,Fke)=>{\"use strict\";Fke.exports=Dke()});var qke=ye(AA=>{\"use strict\";var Oke=Mc();AA.name=\"sunburst\";AA.plot=function(e,t,r,n){Oke.plotBasePlot(AA.name,e,t,r,n)};AA.clean=function(e,t,r,n){Oke.cleanBasePlot(AA.name,e,t,r,n)}});var mW=ye((tvr,Bke)=>{\"use strict\";Bke.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:\"linear\",eventDataKeys:[\"currentPath\",\"root\",\"entry\",\"percentRoot\",\"percentEntry\",\"percentParent\"]}});var LE=ye((rvr,Vke)=>{\"use strict\";var $Ct=Gl(),{hovertemplateAttrs:QCt,texttemplateAttrs:e6t,templatefallbackAttrs:Nke}=Ll(),t6t=Tu(),r6t=Cc().attributes,Xy=S2(),Uke=mW(),CE=Ao().extendFlat,i6t=Pd().pattern;Vke.exports={labels:{valType:\"data_array\",editType:\"calc\"},parents:{valType:\"data_array\",editType:\"calc\"},values:{valType:\"data_array\",editType:\"calc\"},branchvalues:{valType:\"enumerated\",values:[\"remainder\",\"total\"],dflt:\"remainder\",editType:\"calc\"},count:{valType:\"flaglist\",flags:[\"branches\",\"leaves\"],dflt:\"leaves\",editType:\"calc\"},level:{valType:\"any\",editType:\"plot\",anim:!0},maxdepth:{valType:\"integer\",editType:\"plot\",dflt:-1},marker:CE({colors:{valType:\"data_array\",editType:\"calc\"},line:{color:CE({},Xy.marker.line.color,{dflt:null}),width:CE({},Xy.marker.line.width,{dflt:1}),editType:\"calc\"},pattern:i6t,editType:\"calc\"},t6t(\"marker\",{colorAttr:\"colors\",anim:!1})),leaf:{opacity:{valType:\"number\",editType:\"style\",min:0,max:1},editType:\"plot\"},text:Xy.text,textinfo:{valType:\"flaglist\",flags:[\"label\",\"text\",\"value\",\"current path\",\"percent root\",\"percent entry\",\"percent parent\"],extras:[\"none\"],editType:\"plot\"},texttemplate:e6t({editType:\"plot\"},{keys:Uke.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:Nke({editType:\"plot\"}),hovertext:Xy.hovertext,hoverinfo:CE({},$Ct.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"name\",\"current path\",\"percent root\",\"percent entry\",\"percent parent\"],dflt:\"label+text+value+name\"}),hovertemplate:QCt({},{keys:Uke.eventDataKeys}),hovertemplatefallback:Nke(),textfont:Xy.textfont,insidetextorientation:Xy.insidetextorientation,insidetextfont:Xy.insidetextfont,outsidetextfont:CE({},Xy.outsidetextfont,{}),rotation:{valType:\"angle\",dflt:0,editType:\"plot\"},sort:Xy.sort,root:{color:{valType:\"color\",editType:\"calc\",dflt:\"rgba(0,0,0,0)\"},editType:\"calc\"},domain:r6t({name:\"sunburst\",trace:!0,editType:\"calc\"})}});var yW=ye((ivr,Gke)=>{\"use strict\";Gke.exports={sunburstcolorway:{valType:\"colorlist\",editType:\"calc\"},extendsunburstcolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var Xke=ye((nvr,Wke)=>{\"use strict\";var Hke=Dr(),n6t=LE(),a6t=Cc().defaults,o6t=r0().handleText,s6t=M2().handleMarkerDefaults,jke=tc(),l6t=jke.hasColorscale,u6t=jke.handleDefaults;Wke.exports=function(t,r,n,i){function a(h,d){return Hke.coerce(t,r,n6t,h,d)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\"),s6t(t,r,i,a);var u=r._hasColorscale=l6t(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;u&&u6t(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}),a(\"leaf.opacity\",u?1:.7);var c=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",Hke.isArrayOrTypedArray(c)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var f=\"auto\";o6t(t,r,i,a,f,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"insidetextorientation\"),a(\"sort\"),a(\"rotation\"),a(\"root.color\"),a6t(r,i,a),r._length=null}});var Yke=ye((avr,Zke)=>{\"use strict\";var c6t=Dr(),f6t=yW();Zke.exports=function(t,r){function n(i,a){return c6t.coerce(t,r,f6t,i,a)}n(\"sunburstcolorway\",r.colorway),n(\"extendsunburstcolors\")}});var PE=ye((_D,Kke)=>{(function(e,t){typeof _D==\"object\"&&typeof Kke!=\"undefined\"?t(_D):(e=e||self,t(e.d3=e.d3||{}))})(_D,function(e){\"use strict\";function t(je,tt){return je.parent===tt.parent?1:2}function r(je){return je.reduce(n,0)/je.length}function n(je,tt){return je+tt.x}function i(je){return 1+je.reduce(a,0)}function a(je,tt){return Math.max(je,tt.y)}function o(je){for(var tt;tt=je.children;)je=tt[0];return je}function s(je){for(var tt;tt=je.children;)je=tt[tt.length-1];return je}function l(){var je=t,tt=1,xt=1,Ie=!1;function xe(ke){var vt,ir=0;ke.eachAfter(function($r){var di=$r.children;di?($r.x=r(di),$r.y=i(di)):($r.x=vt?ir+=je($r,vt):0,$r.y=0,vt=$r)});var ar=o(ke),vr=s(ke),ii=ar.x-je(ar,vr)/2,pi=vr.x+je(vr,ar)/2;return ke.eachAfter(Ie?function($r){$r.x=($r.x-ke.x)*tt,$r.y=(ke.y-$r.y)*xt}:function($r){$r.x=($r.x-ii)/(pi-ii)*tt,$r.y=(1-(ke.y?$r.y/ke.y:1))*xt})}return xe.separation=function(ke){return arguments.length?(je=ke,xe):je},xe.size=function(ke){return arguments.length?(Ie=!1,tt=+ke[0],xt=+ke[1],xe):Ie?null:[tt,xt]},xe.nodeSize=function(ke){return arguments.length?(Ie=!0,tt=+ke[0],xt=+ke[1],xe):Ie?[tt,xt]:null},xe}function u(je){var tt=0,xt=je.children,Ie=xt&&xt.length;if(!Ie)tt=1;else for(;--Ie>=0;)tt+=xt[Ie].value;je.value=tt}function c(){return this.eachAfter(u)}function f(je){var tt=this,xt,Ie=[tt],xe,ke,vt;do for(xt=Ie.reverse(),Ie=[];tt=xt.pop();)if(je(tt),xe=tt.children,xe)for(ke=0,vt=xe.length;ke=0;--xe)xt.push(Ie[xe]);return this}function d(je){for(var tt=this,xt=[tt],Ie=[],xe,ke,vt;tt=xt.pop();)if(Ie.push(tt),xe=tt.children,xe)for(ke=0,vt=xe.length;ke=0;)xt+=Ie[xe].value;tt.value=xt})}function _(je){return this.eachBefore(function(tt){tt.children&&tt.children.sort(je)})}function b(je){for(var tt=this,xt=p(tt,je),Ie=[tt];tt!==xt;)tt=tt.parent,Ie.push(tt);for(var xe=Ie.length;je!==xt;)Ie.splice(xe,0,je),je=je.parent;return Ie}function p(je,tt){if(je===tt)return je;var xt=je.ancestors(),Ie=tt.ancestors(),xe=null;for(je=xt.pop(),tt=Ie.pop();je===tt;)xe=je,je=xt.pop(),tt=Ie.pop();return xe}function k(){for(var je=this,tt=[je];je=je.parent;)tt.push(je);return tt}function E(){var je=[];return this.each(function(tt){je.push(tt)}),je}function S(){var je=[];return this.eachBefore(function(tt){tt.children||je.push(tt)}),je}function L(){var je=this,tt=[];return je.each(function(xt){xt!==je&&tt.push({source:xt.parent,target:xt})}),tt}function x(je,tt){var xt=new T(je),Ie=+je.value&&(xt.value=je.value),xe,ke=[xt],vt,ir,ar,vr;for(tt==null&&(tt=M);xe=ke.pop();)if(Ie&&(xe.value=+xe.data.value),(ir=tt(xe.data))&&(vr=ir.length))for(xe.children=new Array(vr),ar=vr-1;ar>=0;--ar)ke.push(vt=xe.children[ar]=new T(ir[ar])),vt.parent=xe,vt.depth=xe.depth+1;return xt.eachBefore(P)}function C(){return x(this).eachBefore(g)}function M(je){return je.children}function g(je){je.data=je.data.data}function P(je){var tt=0;do je.height=tt;while((je=je.parent)&&je.height<++tt)}function T(je){this.data=je,this.depth=this.height=0,this.parent=null}T.prototype=x.prototype={constructor:T,count:c,each:f,eachAfter:d,eachBefore:h,sum:v,sort:_,path:b,ancestors:k,descendants:E,leaves:S,links:L,copy:C};var z=Array.prototype.slice;function O(je){for(var tt=je.length,xt,Ie;tt;)Ie=Math.random()*tt--|0,xt=je[tt],je[tt]=je[Ie],je[Ie]=xt;return je}function V(je){for(var tt=0,xt=(je=O(z.call(je))).length,Ie=[],xe,ke;tt0&&xt*xt>Ie*Ie+xe*xe}function N(je,tt){for(var xt=0;xtar?(xe=(vr+ar-ke)/(2*vr),ir=Math.sqrt(Math.max(0,ar/vr-xe*xe)),xt.x=je.x-xe*Ie-ir*vt,xt.y=je.y-xe*vt+ir*Ie):(xe=(vr+ke-ar)/(2*vr),ir=Math.sqrt(Math.max(0,ke/vr-xe*xe)),xt.x=tt.x+xe*Ie-ir*vt,xt.y=tt.y+xe*vt+ir*Ie)):(xt.x=tt.x+xt.r,xt.y=tt.y)}function Ce(je,tt){var xt=je.r+tt.r-1e-6,Ie=tt.x-je.x,xe=tt.y-je.y;return xt>0&&xt*xt>Ie*Ie+xe*xe}function me(je){var tt=je._,xt=je.next._,Ie=tt.r+xt.r,xe=(tt.x*xt.r+xt.x*tt.r)/Ie,ke=(tt.y*xt.r+xt.y*tt.r)/Ie;return xe*xe+ke*ke}function ie(je){this._=je,this.next=null,this.previous=null}function Se(je){if(!(xe=je.length))return 0;var tt,xt,Ie,xe,ke,vt,ir,ar,vr,ii,pi;if(tt=je[0],tt.x=0,tt.y=0,!(xe>1))return tt.r;if(xt=je[1],tt.x=-xt.r,xt.x=tt.r,xt.y=0,!(xe>2))return tt.r+xt.r;Ee(xt,tt,Ie=je[2]),tt=new ie(tt),xt=new ie(xt),Ie=new ie(Ie),tt.next=Ie.previous=xt,xt.next=tt.previous=Ie,Ie.next=xt.previous=tt;e:for(ir=3;ir0)throw new Error(\"cycle\");return ir}return xt.id=function(Ie){return arguments.length?(je=Fe(Ie),xt):je},xt.parentId=function(Ie){return arguments.length?(tt=Fe(Ie),xt):tt},xt}function $e(je,tt){return je.parent===tt.parent?1:2}function St(je){var tt=je.children;return tt?tt[0]:je.t}function Qt(je){var tt=je.children;return tt?tt[tt.length-1]:je.t}function Vt(je,tt,xt){var Ie=xt/(tt.i-je.i);tt.c-=Ie,tt.s+=xt,je.c+=Ie,tt.z+=xt,tt.m+=xt}function _t(je){for(var tt=0,xt=0,Ie=je.children,xe=Ie.length,ke;--xe>=0;)ke=Ie[xe],ke.z+=tt,ke.m+=tt,tt+=ke.s+(xt+=ke.c)}function It(je,tt,xt){return je.a.parent===tt.parent?je.a:xt}function mt(je,tt){this._=je,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=tt}mt.prototype=Object.create(T.prototype);function er(je){for(var tt=new mt(je,0),xt,Ie=[tt],xe,ke,vt,ir;xt=Ie.pop();)if(ke=xt._.children)for(xt.children=new Array(ir=ke.length),vt=ir-1;vt>=0;--vt)Ie.push(xe=xt.children[vt]=new mt(ke[vt],vt)),xe.parent=xt;return(tt.parent=new mt(null,0)).children=[tt],tt}function lr(){var je=$e,tt=1,xt=1,Ie=null;function xe(vr){var ii=er(vr);if(ii.eachAfter(ke),ii.parent.m=-ii.z,ii.eachBefore(vt),Ie)vr.eachBefore(ar);else{var pi=vr,$r=vr,di=vr;vr.eachBefore(function(qn){qn.x$r.x&&($r=qn),qn.depth>di.depth&&(di=qn)});var ji=pi===$r?1:je(pi,$r)/2,In=ji-pi.x,wi=tt/($r.x+ji+In),On=xt/(di.depth||1);vr.eachBefore(function(qn){qn.x=(qn.x+In)*wi,qn.y=qn.depth*On})}return vr}function ke(vr){var ii=vr.children,pi=vr.parent.children,$r=vr.i?pi[vr.i-1]:null;if(ii){_t(vr);var di=(ii[0].z+ii[ii.length-1].z)/2;$r?(vr.z=$r.z+je(vr._,$r._),vr.m=vr.z-di):vr.z=di}else $r&&(vr.z=$r.z+je(vr._,$r._));vr.parent.A=ir(vr,$r,vr.parent.A||pi[0])}function vt(vr){vr._.x=vr.z+vr.parent.m,vr.m+=vr.parent.m}function ir(vr,ii,pi){if(ii){for(var $r=vr,di=vr,ji=ii,In=$r.parent.children[0],wi=$r.m,On=di.m,qn=ji.m,Fn=In.m,ra;ji=Qt(ji),$r=St($r),ji&&$r;)In=St(In),di=Qt(di),di.a=vr,ra=ji.z+qn-$r.z-wi+je(ji._,$r._),ra>0&&(Vt(It(ji,vr,pi),vr,ra),wi+=ra,On+=ra),qn+=ji.m,wi+=$r.m,Fn+=In.m,On+=di.m;ji&&!Qt(di)&&(di.t=ji,di.m+=qn-On),$r&&!St(In)&&(In.t=$r,In.m+=wi-Fn,pi=vr)}return pi}function ar(vr){vr.x*=tt,vr.y=vr.depth*xt}return xe.separation=function(vr){return arguments.length?(je=vr,xe):je},xe.size=function(vr){return arguments.length?(Ie=!1,tt=+vr[0],xt=+vr[1],xe):Ie?null:[tt,xt]},xe.nodeSize=function(vr){return arguments.length?(Ie=!0,tt=+vr[0],xt=+vr[1],xe):Ie?[tt,xt]:null},xe}function Tr(je,tt,xt,Ie,xe){for(var ke=je.children,vt,ir=-1,ar=ke.length,vr=je.value&&(xe-xt)/je.value;++irqn&&(qn=vr),Ut=wi*wi*la,Fn=Math.max(qn/Ut,Ut/On),Fn>ra){wi-=vr;break}ra=Fn}vt.push(ar={value:wi,dice:di1?Ie:1)},xt}(Lr);function Vr(){var je=Br,tt=!1,xt=1,Ie=1,xe=[0],ke=Pe,vt=Pe,ir=Pe,ar=Pe,vr=Pe;function ii($r){return $r.x0=$r.y0=0,$r.x1=xt,$r.y1=Ie,$r.eachBefore(pi),xe=[0],tt&&$r.eachBefore(Zt),$r}function pi($r){var di=xe[$r.depth],ji=$r.x0+di,In=$r.y0+di,wi=$r.x1-di,On=$r.y1-di;wi=$r-1){var qn=ke[pi];qn.x0=ji,qn.y0=In,qn.x1=wi,qn.y1=On;return}for(var Fn=vr[pi],ra=di/2+Fn,la=pi+1,Ut=$r-1;la>>1;vr[wt]On-In){var Er=(ji*nr+wi*rr)/di;ii(pi,la,rr,ji,In,Er,On),ii(la,$r,nr,Er,In,wi,On)}else{var Xr=(In*nr+On*rr)/di;ii(pi,la,rr,ji,In,wi,Xr),ii(la,$r,nr,ji,Xr,wi,On)}}}function Ge(je,tt,xt,Ie,xe){(je.depth&1?Tr:st)(je,tt,xt,Ie,xe)}var Je=function je(tt){function xt(Ie,xe,ke,vt,ir){if((ar=Ie._squarify)&&ar.ratio===tt)for(var ar,vr,ii,pi,$r=-1,di,ji=ar.length,In=Ie.value;++$r1?Ie:1)},xt}(Lr);e.cluster=l,e.hierarchy=x,e.pack=ce,e.packEnclose=V,e.packSiblings=Le,e.partition=lt,e.stratify=cr,e.tree=lr,e.treemap=Vr,e.treemapBinary=dt,e.treemapDice=st,e.treemapResquarify=Je,e.treemapSlice=Tr,e.treemapSliceDice=Ge,e.treemapSquarify=Br,Object.defineProperty(e,\"__esModule\",{value:!0})})});var RE=ye(IE=>{\"use strict\";var Jke=PE(),h6t=Eo(),SA=Dr(),d6t=tc().makeColorScaleFuncFromTrace,v6t=wA().makePullColorFn,p6t=wA().generateExtendedColors,g6t=tc().calc,m6t=fs().ALMOST_EQUAL,y6t={},_6t={},x6t={};IE.calc=function(e,t){var r=e._fullLayout,n=t.ids,i=SA.isArrayOrTypedArray(n),a=t.labels,o=t.parents,s=t.values,l=SA.isArrayOrTypedArray(s),u=[],c={},f={},h=function(H,N){c[H]?c[H].push(N):c[H]=[N],f[N]=1},d=function(H){return H||typeof H==\"number\"},v=function(H){return!l||h6t(s[H])&&s[H]>=0},_,b,p;i?(_=Math.min(n.length,o.length),b=function(H){return d(n[H])&&v(H)},p=function(H){return String(n[H])}):(_=Math.min(a.length,o.length),b=function(H){return d(a[H])&&v(H)},p=function(H){return String(a[H])}),l&&(_=Math.min(_,s.length));for(var k=0;k<_;k++)if(b(k)){var E=p(k),S=d(o[k])?String(o[k]):\"\",L={i:k,id:E,pid:S,label:d(a[k])?String(a[k]):\"\"};l&&(L.v=+s[k]),u.push(L),h(S,E)}if(c[\"\"]){if(c[\"\"].length>1){for(var M=SA.randstr(),g=0;g{});function Gm(){}function eCe(){return this.rgb().formatHex()}function k6t(){return this.rgb().formatHex8()}function C6t(){return sCe(this).formatHsl()}function tCe(){return this.rgb().formatRgb()}function j_(e){var t,r;return e=(e+\"\").trim().toLowerCase(),(t=b6t.exec(e))?(r=t[1].length,t=parseInt(t[1],16),r===6?rCe(t):r===3?new _d(t>>8&15|t>>4&240,t>>4&15|t&240,(t&15)<<4|t&15,1):r===8?bD(t>>24&255,t>>16&255,t>>8&255,(t&255)/255):r===4?bD(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|t&240,((t&15)<<4|t&15)/255):null):(t=w6t.exec(e))?new _d(t[1],t[2],t[3],1):(t=T6t.exec(e))?new _d(t[1]*255/100,t[2]*255/100,t[3]*255/100,1):(t=A6t.exec(e))?bD(t[1],t[2],t[3],t[4]):(t=S6t.exec(e))?bD(t[1]*255/100,t[2]*255/100,t[3]*255/100,t[4]):(t=M6t.exec(e))?aCe(t[1],t[2]/100,t[3]/100,1):(t=E6t.exec(e))?aCe(t[1],t[2]/100,t[3]/100,t[4]):Qke.hasOwnProperty(e)?rCe(Qke[e]):e===\"transparent\"?new _d(NaN,NaN,NaN,0):null}function rCe(e){return new _d(e>>16&255,e>>8&255,e&255,1)}function bD(e,t,r,n){return n<=0&&(e=t=r=NaN),new _d(e,t,r,n)}function FE(e){return e instanceof Gm||(e=j_(e)),e?(e=e.rgb(),new _d(e.r,e.g,e.b,e.opacity)):new _d}function EA(e,t,r,n){return arguments.length===1?FE(e):new _d(e,t,r,n==null?1:n)}function _d(e,t,r,n){this.r=+e,this.g=+t,this.b=+r,this.opacity=+n}function iCe(){return`#${E2(this.r)}${E2(this.g)}${E2(this.b)}`}function L6t(){return`#${E2(this.r)}${E2(this.g)}${E2(this.b)}${E2((isNaN(this.opacity)?1:this.opacity)*255)}`}function nCe(){let e=TD(this.opacity);return`${e===1?\"rgb(\":\"rgba(\"}${k2(this.r)}, ${k2(this.g)}, ${k2(this.b)}${e===1?\")\":`, ${e})`}`}function TD(e){return isNaN(e)?1:Math.max(0,Math.min(1,e))}function k2(e){return Math.max(0,Math.min(255,Math.round(e)||0))}function E2(e){return e=k2(e),(e<16?\"0\":\"\")+e.toString(16)}function aCe(e,t,r,n){return n<=0?e=t=r=NaN:r<=0||r>=1?e=t=NaN:t<=0&&(e=NaN),new Xg(e,t,r,n)}function sCe(e){if(e instanceof Xg)return new Xg(e.h,e.s,e.l,e.opacity);if(e instanceof Gm||(e=j_(e)),!e)return new Xg;if(e instanceof Xg)return e;e=e.rgb();var t=e.r/255,r=e.g/255,n=e.b/255,i=Math.min(t,r,n),a=Math.max(t,r,n),o=NaN,s=a-i,l=(a+i)/2;return s?(t===a?o=(r-n)/s+(r0&&l<1?0:o,new Xg(o,s,l,e.opacity)}function zE(e,t,r,n){return arguments.length===1?sCe(e):new Xg(e,t,r,n==null?1:n)}function Xg(e,t,r,n){this.h=+e,this.s=+t,this.l=+r,this.opacity=+n}function oCe(e){return e=(e||0)%360,e<0?e+360:e}function wD(e){return Math.max(0,Math.min(1,e||0))}function _W(e,t,r){return(e<60?t+(r-t)*e/60:e<180?r:e<240?t+(r-t)*(240-e)/60:t)*255}var H_,C2,MA,DE,Vm,b6t,w6t,T6t,A6t,S6t,M6t,E6t,Qke,AD=gu(()=>{xD();H_=.7,C2=1/H_,MA=\"\\\\s*([+-]?\\\\d+)\\\\s*\",DE=\"\\\\s*([+-]?(?:\\\\d*\\\\.)?\\\\d+(?:[eE][+-]?\\\\d+)?)\\\\s*\",Vm=\"\\\\s*([+-]?(?:\\\\d*\\\\.)?\\\\d+(?:[eE][+-]?\\\\d+)?)%\\\\s*\",b6t=/^#([0-9a-f]{3,8})$/,w6t=new RegExp(`^rgb\\\\(${MA},${MA},${MA}\\\\)$`),T6t=new RegExp(`^rgb\\\\(${Vm},${Vm},${Vm}\\\\)$`),A6t=new RegExp(`^rgba\\\\(${MA},${MA},${MA},${DE}\\\\)$`),S6t=new RegExp(`^rgba\\\\(${Vm},${Vm},${Vm},${DE}\\\\)$`),M6t=new RegExp(`^hsl\\\\(${DE},${Vm},${Vm}\\\\)$`),E6t=new RegExp(`^hsla\\\\(${DE},${Vm},${Vm},${DE}\\\\)$`),Qke={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};Zy(Gm,j_,{copy(e){return Object.assign(new this.constructor,this,e)},displayable(){return this.rgb().displayable()},hex:eCe,formatHex:eCe,formatHex8:k6t,formatHsl:C6t,formatRgb:tCe,toString:tCe});Zy(_d,EA,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new _d(this.r*e,this.g*e,this.b*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new _d(this.r*e,this.g*e,this.b*e,this.opacity)},rgb(){return this},clamp(){return new _d(k2(this.r),k2(this.g),k2(this.b),TD(this.opacity))},displayable(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:iCe,formatHex:iCe,formatHex8:L6t,formatRgb:nCe,toString:nCe}));Zy(Xg,zE,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new Xg(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new Xg(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=this.h%360+(this.h<0)*360,t=isNaN(e)||isNaN(this.s)?0:this.s,r=this.l,n=r+(r<.5?r:1-r)*t,i=2*r-n;return new _d(_W(e>=240?e-240:e+120,i,n),_W(e,i,n),_W(e<120?e+240:e-120,i,n),this.opacity)},clamp(){return new Xg(oCe(this.h),wD(this.s),wD(this.l),TD(this.opacity))},displayable(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl(){let e=TD(this.opacity);return`${e===1?\"hsl(\":\"hsla(\"}${oCe(this.h)}, ${wD(this.s)*100}%, ${wD(this.l)*100}%${e===1?\")\":`, ${e})`}`}}))});var SD,MD,xW=gu(()=>{SD=Math.PI/180,MD=180/Math.PI});function dCe(e){if(e instanceof Hm)return new Hm(e.l,e.a,e.b,e.opacity);if(e instanceof Yy)return vCe(e);e instanceof _d||(e=FE(e));var t=AW(e.r),r=AW(e.g),n=AW(e.b),i=bW((.2225045*t+.7168786*r+.0606169*n)/uCe),a,o;return t===r&&r===n?a=o=i:(a=bW((.4360747*t+.3850649*r+.1430804*n)/lCe),o=bW((.0139322*t+.0971045*r+.7141733*n)/cCe)),new Hm(116*i-16,500*(a-i),200*(i-o),e.opacity)}function CA(e,t,r,n){return arguments.length===1?dCe(e):new Hm(e,t,r,n==null?1:n)}function Hm(e,t,r,n){this.l=+e,this.a=+t,this.b=+r,this.opacity=+n}function bW(e){return e>P6t?Math.pow(e,1/3):e/hCe+fCe}function wW(e){return e>kA?e*e*e:hCe*(e-fCe)}function TW(e){return 255*(e<=.0031308?12.92*e:1.055*Math.pow(e,1/2.4)-.055)}function AW(e){return(e/=255)<=.04045?e/12.92:Math.pow((e+.055)/1.055,2.4)}function I6t(e){if(e instanceof Yy)return new Yy(e.h,e.c,e.l,e.opacity);if(e instanceof Hm||(e=dCe(e)),e.a===0&&e.b===0)return new Yy(NaN,0{xD();AD();xW();ED=18,lCe=.96422,uCe=1,cCe=.82521,fCe=4/29,kA=6/29,hCe=3*kA*kA,P6t=kA*kA*kA;Zy(Hm,CA,G_(Gm,{brighter(e){return new Hm(this.l+ED*(e==null?1:e),this.a,this.b,this.opacity)},darker(e){return new Hm(this.l-ED*(e==null?1:e),this.a,this.b,this.opacity)},rgb(){var e=(this.l+16)/116,t=isNaN(this.a)?e:e+this.a/500,r=isNaN(this.b)?e:e-this.b/200;return t=lCe*wW(t),e=uCe*wW(e),r=cCe*wW(r),new _d(TW(3.1338561*t-1.6168667*e-.4906146*r),TW(-.9787684*t+1.9161415*e+.033454*r),TW(.0719453*t-.2289914*e+1.4052427*r),this.opacity)}}));Zy(Yy,OE,G_(Gm,{brighter(e){return new Yy(this.h,this.c,this.l+ED*(e==null?1:e),this.opacity)},darker(e){return new Yy(this.h,this.c,this.l-ED*(e==null?1:e),this.opacity)},rgb(){return vCe(this).rgb()}}))});function R6t(e){if(e instanceof L2)return new L2(e.h,e.s,e.l,e.opacity);e instanceof _d||(e=FE(e));var t=e.r/255,r=e.g/255,n=e.b/255,i=(yCe*n+gCe*t-mCe*r)/(yCe+gCe-mCe),a=n-i,o=(qE*(r-i)-MW*a)/kD,s=Math.sqrt(o*o+a*a)/(qE*i*(1-i)),l=s?Math.atan2(o,a)*MD-120:NaN;return new L2(l<0?l+360:l,s,i,e.opacity)}function LA(e,t,r,n){return arguments.length===1?R6t(e):new L2(e,t,r,n==null?1:n)}function L2(e,t,r,n){this.h=+e,this.s=+t,this.l=+r,this.opacity=+n}var _Ce,SW,MW,kD,qE,gCe,mCe,yCe,xCe=gu(()=>{xD();AD();xW();_Ce=-.14861,SW=1.78277,MW=-.29227,kD=-.90649,qE=1.97294,gCe=qE*kD,mCe=qE*SW,yCe=SW*MW-kD*_Ce;Zy(L2,LA,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new L2(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new L2(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=isNaN(this.h)?0:(this.h+120)*SD,t=+this.l,r=isNaN(this.s)?0:this.s*t*(1-t),n=Math.cos(e),i=Math.sin(e);return new _d(255*(t+r*(_Ce*n+SW*i)),255*(t+r*(MW*n+kD*i)),255*(t+r*(qE*n)),this.opacity)}}))});var P2=gu(()=>{AD();pCe();xCe()});function EW(e,t,r,n,i){var a=e*e,o=a*e;return((1-3*e+3*a-o)*t+(4-6*a+3*o)*r+(1+3*e+3*a-3*o)*n+o*i)/6}function CD(e){var t=e.length-1;return function(r){var n=r<=0?r=0:r>=1?(r=1,t-1):Math.floor(r*t),i=e[n],a=e[n+1],o=n>0?e[n-1]:2*i-a,s=n{});function PD(e){var t=e.length;return function(r){var n=Math.floor(((r%=1)<0?++r:r)*t),i=e[(n+t-1)%t],a=e[n%t],o=e[(n+1)%t],s=e[(n+2)%t];return EW((r-n/t)*t,i,a,o,s)}}var kW=gu(()=>{LD()});var PA,CW=gu(()=>{PA=e=>()=>e});function bCe(e,t){return function(r){return e+r*t}}function D6t(e,t,r){return e=Math.pow(e,r),t=Math.pow(t,r)-e,r=1/r,function(n){return Math.pow(e+n*t,r)}}function W_(e,t){var r=t-e;return r?bCe(e,r>180||r<-180?r-360*Math.round(r/360):r):PA(isNaN(e)?t:e)}function wCe(e){return(e=+e)==1?$f:function(t,r){return r-t?D6t(t,r,e):PA(isNaN(t)?r:t)}}function $f(e,t){var r=t-e;return r?bCe(e,r):PA(isNaN(e)?t:e)}var I2=gu(()=>{CW()});function TCe(e){return function(t){var r=t.length,n=new Array(r),i=new Array(r),a=new Array(r),o,s;for(o=0;o{P2();LD();kW();I2();BE=function e(t){var r=wCe(t);function n(i,a){var o=r((i=EA(i)).r,(a=EA(a)).r),s=r(i.g,a.g),l=r(i.b,a.b),u=$f(i.opacity,a.opacity);return function(c){return i.r=o(c),i.g=s(c),i.b=l(c),i.opacity=u(c),i+\"\"}}return n.gamma=e,n}(1);ACe=TCe(CD),SCe=TCe(PD)});function IA(e,t){t||(t=[]);var r=e?Math.min(t.length,e.length):0,n=t.slice(),i;return function(a){for(i=0;i{});function MCe(e,t){return(ID(t)?IA:PW)(e,t)}function PW(e,t){var r=t?t.length:0,n=e?Math.min(r,e.length):0,i=new Array(n),a=new Array(r),o;for(o=0;o{NE();RD()});function DD(e,t){var r=new Date;return e=+e,t=+t,function(n){return r.setTime(e*(1-n)+t*n),r}}var RW=gu(()=>{});function zp(e,t){return e=+e,t=+t,function(r){return e*(1-r)+t*r}}var UE=gu(()=>{});function FD(e,t){var r={},n={},i;(e===null||typeof e!=\"object\")&&(e={}),(t===null||typeof t!=\"object\")&&(t={});for(i in t)i in e?r[i]=X_(e[i],t[i]):n[i]=t[i];return function(a){for(i in r)n[i]=r[i](a);return n}}var DW=gu(()=>{NE()});function F6t(e){return function(){return e}}function z6t(e){return function(t){return e(t)+\"\"}}function zD(e,t){var r=zW.lastIndex=FW.lastIndex=0,n,i,a,o=-1,s=[],l=[];for(e=e+\"\",t=t+\"\";(n=zW.exec(e))&&(i=FW.exec(t));)(a=i.index)>r&&(a=t.slice(r,a),s[o]?s[o]+=a:s[++o]=a),(n=n[0])===(i=i[0])?s[o]?s[o]+=i:s[++o]=i:(s[++o]=null,l.push({i:o,x:zp(n,i)})),r=FW.lastIndex;return r{UE();zW=/[-+]?(?:\\d+\\.?\\d*|\\.?\\d+)(?:[eE][-+]?\\d+)?/g,FW=new RegExp(zW.source,\"g\")});function X_(e,t){var r=typeof t,n;return t==null||r===\"boolean\"?PA(t):(r===\"number\"?zp:r===\"string\"?(n=j_(t))?(t=n,BE):zD:t instanceof j_?BE:t instanceof Date?DD:ID(t)?IA:Array.isArray(t)?PW:typeof t.valueOf!=\"function\"&&typeof t.toString!=\"function\"||isNaN(t)?FD:zp)(e,t)}var NE=gu(()=>{P2();LW();IW();RW();UE();DW();OW();CW();RD()});function ECe(e){var t=e.length;return function(r){return e[Math.max(0,Math.min(t-1,Math.floor(r*t)))]}}var kCe=gu(()=>{});function CCe(e,t){var r=W_(+e,+t);return function(n){var i=r(n);return i-360*Math.floor(i/360)}}var LCe=gu(()=>{I2()});function PCe(e,t){return e=+e,t=+t,function(r){return Math.round(e*(1-r)+t*r)}}var ICe=gu(()=>{});function qW(e,t,r,n,i,a){var o,s,l;return(o=Math.sqrt(e*e+t*t))&&(e/=o,t/=o),(l=e*r+t*n)&&(r-=e*l,n-=t*l),(s=Math.sqrt(r*r+n*n))&&(r/=s,n/=s,l/=s),e*n{RCe=180/Math.PI,OD={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1}});function FCe(e){let t=new(typeof DOMMatrix==\"function\"?DOMMatrix:WebKitCSSMatrix)(e+\"\");return t.isIdentity?OD:qW(t.a,t.b,t.c,t.d,t.e,t.f)}function zCe(e){return e==null?OD:(qD||(qD=document.createElementNS(\"http://www.w3.org/2000/svg\",\"g\")),qD.setAttribute(\"transform\",e),(e=qD.transform.baseVal.consolidate())?(e=e.matrix,qW(e.a,e.b,e.c,e.d,e.e,e.f)):OD)}var qD,OCe=gu(()=>{DCe()});function qCe(e,t,r,n){function i(u){return u.length?u.pop()+\" \":\"\"}function a(u,c,f,h,d,v){if(u!==f||c!==h){var _=d.push(\"translate(\",null,t,null,r);v.push({i:_-4,x:zp(u,f)},{i:_-2,x:zp(c,h)})}else(f||h)&&d.push(\"translate(\"+f+t+h+r)}function o(u,c,f,h){u!==c?(u-c>180?c+=360:c-u>180&&(u+=360),h.push({i:f.push(i(f)+\"rotate(\",null,n)-2,x:zp(u,c)})):c&&f.push(i(f)+\"rotate(\"+c+n)}function s(u,c,f,h){u!==c?h.push({i:f.push(i(f)+\"skewX(\",null,n)-2,x:zp(u,c)}):c&&f.push(i(f)+\"skewX(\"+c+n)}function l(u,c,f,h,d,v){if(u!==f||c!==h){var _=d.push(i(d)+\"scale(\",null,\",\",null,\")\");v.push({i:_-4,x:zp(u,f)},{i:_-2,x:zp(c,h)})}else(f!==1||h!==1)&&d.push(i(d)+\"scale(\"+f+\",\"+h+\")\")}return function(u,c){var f=[],h=[];return u=e(u),c=e(c),a(u.translateX,u.translateY,c.translateX,c.translateY,f,h),o(u.rotate,c.rotate,f,h),s(u.skewX,c.skewX,f,h),l(u.scaleX,u.scaleY,c.scaleX,c.scaleY,f,h),u=c=null,function(d){for(var v=-1,_=h.length,b;++v<_;)f[(b=h[v]).i]=b.x(d);return f.join(\"\")}}}var BCe,NCe,UCe=gu(()=>{UE();OCe();BCe=qCe(FCe,\"px, \",\"px)\",\"deg)\"),NCe=qCe(zCe,\", \",\")\",\")\")});function VCe(e){return((e=Math.exp(e))+1/e)/2}function q6t(e){return((e=Math.exp(e))-1/e)/2}function B6t(e){return((e=Math.exp(2*e))-1)/(e+1)}var O6t,GCe,HCe=gu(()=>{O6t=1e-12;GCe=function e(t,r,n){function i(a,o){var s=a[0],l=a[1],u=a[2],c=o[0],f=o[1],h=o[2],d=c-s,v=f-l,_=d*d+v*v,b,p;if(_{P2();I2();WCe=jCe(W_),XCe=jCe($f)});function BW(e,t){var r=$f((e=CA(e)).l,(t=CA(t)).l),n=$f(e.a,t.a),i=$f(e.b,t.b),a=$f(e.opacity,t.opacity);return function(o){return e.l=r(o),e.a=n(o),e.b=i(o),e.opacity=a(o),e+\"\"}}var YCe=gu(()=>{P2();I2()});function KCe(e){return function(t,r){var n=e((t=OE(t)).h,(r=OE(r)).h),i=$f(t.c,r.c),a=$f(t.l,r.l),o=$f(t.opacity,r.opacity);return function(s){return t.h=n(s),t.c=i(s),t.l=a(s),t.opacity=o(s),t+\"\"}}}var JCe,$Ce,QCe=gu(()=>{P2();I2();JCe=KCe(W_),$Ce=KCe($f)});function e6e(e){return function t(r){r=+r;function n(i,a){var o=e((i=LA(i)).h,(a=LA(a)).h),s=$f(i.s,a.s),l=$f(i.l,a.l),u=$f(i.opacity,a.opacity);return function(c){return i.h=o(c),i.s=s(c),i.l=l(Math.pow(c,r)),i.opacity=u(c),i+\"\"}}return n.gamma=t,n}(1)}var t6e,r6e,i6e=gu(()=>{P2();I2();t6e=e6e(W_),r6e=e6e($f)});function NW(e,t){t===void 0&&(t=e,e=X_);for(var r=0,n=t.length-1,i=t[0],a=new Array(n<0?0:n);r{NE()});function a6e(e,t){for(var r=new Array(t),n=0;n{});var R2={};uee(R2,{interpolate:()=>X_,interpolateArray:()=>MCe,interpolateBasis:()=>CD,interpolateBasisClosed:()=>PD,interpolateCubehelix:()=>t6e,interpolateCubehelixLong:()=>r6e,interpolateDate:()=>DD,interpolateDiscrete:()=>ECe,interpolateHcl:()=>JCe,interpolateHclLong:()=>$Ce,interpolateHsl:()=>WCe,interpolateHslLong:()=>XCe,interpolateHue:()=>CCe,interpolateLab:()=>BW,interpolateNumber:()=>zp,interpolateNumberArray:()=>IA,interpolateObject:()=>FD,interpolateRgb:()=>BE,interpolateRgbBasis:()=>ACe,interpolateRgbBasisClosed:()=>SCe,interpolateRound:()=>PCe,interpolateString:()=>zD,interpolateTransformCss:()=>BCe,interpolateTransformSvg:()=>NCe,interpolateZoom:()=>GCe,piecewise:()=>NW,quantize:()=>a6e});var D2=gu(()=>{NE();IW();LD();kW();RW();kCe();LCe();UE();RD();DW();ICe();OW();UCe();HCe();LW();ZCe();YCe();QCe();i6e();n6e();o6e()});var BD=ye((Ypr,s6e)=>{\"use strict\";var N6t=So(),U6t=ka();s6e.exports=function(t,r,n,i,a){var o=r.data.data,s=o.i,l=a||o.color;if(s>=0){r.i=o.i;var u=n.marker;u.pattern?(!u.colors||!u.pattern.shape)&&(u.color=l,r.color=l):(u.color=l,r.color=l),N6t.pointStyle(t,n,i,r)}else U6t.fill(t,l)}});var UW=ye((Kpr,h6e)=>{\"use strict\";var l6e=Oa(),u6e=ka(),c6e=Dr(),V6t=bv().resizeText,G6t=BD();function H6t(e){var t=e._fullLayout._sunburstlayer.selectAll(\".trace\");V6t(e,t,\"sunburst\"),t.each(function(r){var n=l6e.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){l6e.select(this).call(f6e,o,a,e)})})}function f6e(e,t,r,n){var i=t.data.data,a=!t.children,o=i.i,s=c6e.castOption(r,o,\"marker.line.color\")||u6e.defaultLine,l=c6e.castOption(r,o,\"marker.line.width\")||0;e.call(G6t,t,r,n).style(\"stroke-width\",l).call(u6e.stroke,s).style(\"opacity\",a?r.leaf.opacity:null)}h6e.exports={style:H6t,styleOne:f6e}});var Ky=ye(Bs=>{\"use strict\";var F2=Dr(),j6t=ka(),W6t=Ag(),d6e=l_();Bs.findEntryWithLevel=function(e,t){var r;return t&&e.eachAfter(function(n){if(Bs.getPtId(n)===t)return r=n.copy()}),r||e};Bs.findEntryWithChild=function(e,t){var r;return e.eachAfter(function(n){for(var i=n.children||[],a=0;a0)};Bs.getMaxDepth=function(e){return e.maxdepth>=0?e.maxdepth:1/0};Bs.isHeader=function(e,t){return!(Bs.isLeaf(e)||e.depth===t._maxDepth-1)};function v6e(e){return e.data.data.pid}Bs.getParent=function(e,t){return Bs.findEntryWithLevel(e,v6e(t))};Bs.listPath=function(e,t){var r=e.parent;if(!r)return[];var n=t?[r.data[t]]:[r];return Bs.listPath(r,t).concat(n)};Bs.getPath=function(e){return Bs.listPath(e,\"label\").join(\"/\")+\"/\"};Bs.formatValue=d6e.formatPieValue;Bs.formatPercent=function(e,t){var r=F2.formatPercent(e,0);return r===\"0%\"&&(r=d6e.formatPiePercent(e,t)),r}});var HE=ye(($pr,m6e)=>{\"use strict\";var RA=Oa(),p6e=qa(),Y6t=ip().appendArrayPointValue,VE=vf(),g6e=Dr(),K6t=y3(),rd=Ky(),J6t=l_(),$6t=J6t.formatPieValue;m6e.exports=function(t,r,n,i,a){var o=i[0],s=o.trace,l=o.hierarchy,u=s.type===\"sunburst\",c=s.type===\"treemap\"||s.type===\"icicle\";\"_hasHoverLabel\"in s||(s._hasHoverLabel=!1),\"_hasHoverEvent\"in s||(s._hasHoverEvent=!1);var f=function(v){var _=n._fullLayout;if(!(n._dragging||_.hovermode===!1)){var b=n._fullData[s.index],p=v.data.data,k=p.i,E=rd.isHierarchyRoot(v),S=rd.getParent(l,v),L=rd.getValue(v),x=function(Ee){return g6e.castOption(b,k,Ee)},C=x(\"hovertemplate\"),M=VE.castHoverinfo(b,_,k),g=_.separators,P;if(C||M&&M!==\"none\"&&M!==\"skip\"){var T,z;u&&(T=o.cx+v.pxmid[0]*(1-v.rInscribed),z=o.cy+v.pxmid[1]*(1-v.rInscribed)),c&&(T=v._hoverX,z=v._hoverY);var O={},V=[],G=[],Z=function(Ee){return V.indexOf(Ee)!==-1};M&&(V=M===\"all\"?b._module.attributes.hoverinfo.flags:M.split(\"+\")),O.label=p.label,Z(\"label\")&&O.label&&G.push(O.label),p.hasOwnProperty(\"v\")&&(O.value=p.v,O.valueLabel=$6t(O.value,g),Z(\"value\")&&G.push(O.valueLabel)),O.currentPath=v.currentPath=rd.getPath(v.data),Z(\"current path\")&&!E&&G.push(O.currentPath);var H,N=[],j=function(){N.indexOf(H)===-1&&(G.push(H),N.push(H))};O.percentParent=v.percentParent=L/rd.getValue(S),O.parent=v.parentString=rd.getPtLabel(S),Z(\"percent parent\")&&(H=rd.formatPercent(O.percentParent,g)+\" of \"+O.parent,j()),O.percentEntry=v.percentEntry=L/rd.getValue(r),O.entry=v.entry=rd.getPtLabel(r),Z(\"percent entry\")&&!E&&!v.onPathbar&&(H=rd.formatPercent(O.percentEntry,g)+\" of \"+O.entry,j()),O.percentRoot=v.percentRoot=L/rd.getValue(l),O.root=v.root=rd.getPtLabel(l),Z(\"percent root\")&&!E&&(H=rd.formatPercent(O.percentRoot,g)+\" of \"+O.root,j()),O.text=x(\"hovertext\")||x(\"text\"),Z(\"text\")&&(H=O.text,g6e.isValidTextValue(H)&&G.push(H)),P=[GE(v,b,a.eventDataKeys)];var re={trace:b,y:z,_x0:v._x0,_x1:v._x1,_y0:v._y0,_y1:v._y1,text:G.join(\"
\"),name:C||Z(\"name\")?b.name:void 0,color:x(\"hoverlabel.bgcolor\")||p.color,borderColor:x(\"hoverlabel.bordercolor\"),fontFamily:x(\"hoverlabel.font.family\"),fontSize:x(\"hoverlabel.font.size\"),fontColor:x(\"hoverlabel.font.color\"),fontWeight:x(\"hoverlabel.font.weight\"),fontStyle:x(\"hoverlabel.font.style\"),fontVariant:x(\"hoverlabel.font.variant\"),nameLength:x(\"hoverlabel.namelength\"),textAlign:x(\"hoverlabel.align\"),hovertemplate:C,hovertemplateLabels:O,eventData:P};u&&(re.x0=T-v.rInscribed*v.rpx1,re.x1=T+v.rInscribed*v.rpx1,re.idealAlign=v.pxmid[0]<0?\"left\":\"right\"),c&&(re.x=T,re.idealAlign=T<0?\"left\":\"right\");var oe=[];VE.loneHover(re,{container:_._hoverlayer.node(),outerContainer:_._paper.node(),gd:n,inOut_bbox:oe}),P[0].bbox=oe[0],s._hasHoverLabel=!0}if(c){var _e=t.select(\"path.surface\");a.styleOne(_e,v,b,n,{hovered:!0})}s._hasHoverEvent=!0,n.emit(\"plotly_hover\",{points:P||[GE(v,b,a.eventDataKeys)],event:RA.event})}},h=function(v){var _=n._fullLayout,b=n._fullData[s.index],p=RA.select(this).datum();if(s._hasHoverEvent&&(v.originalEvent=RA.event,n.emit(\"plotly_unhover\",{points:[GE(p,b,a.eventDataKeys)],event:RA.event}),s._hasHoverEvent=!1),s._hasHoverLabel&&(VE.loneUnhover(_._hoverlayer.node()),s._hasHoverLabel=!1),c){var k=t.select(\"path.surface\");a.styleOne(k,p,b,n,{hovered:!1})}},d=function(v){var _=n._fullLayout,b=n._fullData[s.index],p=u&&(rd.isHierarchyRoot(v)||rd.isLeaf(v)),k=rd.getPtId(v),E=rd.isEntry(v)?rd.findEntryWithChild(l,k):rd.findEntryWithLevel(l,k),S=rd.getPtId(E),L={points:[GE(v,b,a.eventDataKeys)],event:RA.event};p||(L.nextLevel=S);var x=K6t.triggerHandler(n,\"plotly_\"+s.type+\"click\",L);if(x!==!1&&_.hovermode&&(n._hoverdata=[GE(v,b,a.eventDataKeys)],VE.click(n,RA.event)),!p&&x!==!1&&!n._dragging&&!n._transitioning){p6e.call(\"_storeDirectGUIEdit\",b,_._tracePreGUI[b.uid],{level:b.level});var C={data:[{level:S}],traces:[s.index]},M={frame:{redraw:!1,duration:a.transitionTime},transition:{duration:a.transitionTime,easing:a.transitionEasing},mode:\"immediate\",fromcurrent:!0};VE.loneUnhover(_._hoverlayer.node()),p6e.call(\"animate\",n,C,M)}};t.on(\"mouseover\",f),t.on(\"mouseout\",h),t.on(\"click\",d)};function GE(e,t,r){for(var n=e.data.data,i={curveNumber:t.index,pointNumber:n.i,data:t._input,fullData:t},a=0;a{\"use strict\";var jE=Oa(),Q6t=PE(),Zg=(D2(),ob(R2)).interpolate,y6e=So(),Av=Dr(),eLt=ru(),w6e=bv(),_6e=w6e.recordMinTextSize,tLt=w6e.clearMinTextSize,T6e=yD(),rLt=l_().getRotationAngle,iLt=T6e.computeTransform,nLt=T6e.transformInsideText,aLt=UW().styleOne,oLt=N0().resizeText,sLt=HE(),VW=mW(),Rl=Ky();ND.plot=function(e,t,r,n){var i=e._fullLayout,a=i._sunburstlayer,o,s,l=!r,u=!i.uniformtext.mode&&Rl.hasTransition(r);if(tLt(\"sunburst\",i),o=a.selectAll(\"g.trace.sunburst\").data(t,function(f){return f[0].trace.uid}),o.enter().append(\"g\").classed(\"trace\",!0).classed(\"sunburst\",!0).attr(\"stroke-linejoin\",\"round\"),o.order(),u){n&&(s=n());var c=jE.transition().duration(r.duration).ease(r.easing).each(\"end\",function(){s&&s()}).each(\"interrupt\",function(){s&&s()});c.each(function(){a.selectAll(\"g.trace\").each(function(f){x6e(e,f,this,r)})})}else o.each(function(f){x6e(e,f,this,r)}),i.uniformtext.mode&&oLt(e,i._sunburstlayer.selectAll(\".trace\"),\"sunburst\");l&&o.exit().remove()};function x6e(e,t,r,n){var i=e._context.staticPlot,a=e._fullLayout,o=!a.uniformtext.mode&&Rl.hasTransition(n),s=jE.select(r),l=s.selectAll(\"g.slice\"),u=t[0],c=u.trace,f=u.hierarchy,h=Rl.findEntryWithLevel(f,c.level),d=Rl.getMaxDepth(c),v=a._size,_=c.domain,b=v.w*(_.x[1]-_.x[0]),p=v.h*(_.y[1]-_.y[0]),k=.5*Math.min(b,p),E=u.cx=v.l+v.w*(_.x[1]+_.x[0])/2,S=u.cy=v.t+v.h*(1-_.y[0])-p/2;if(!h)return l.remove();var L=null,x={};o&&l.each(function(me){x[Rl.getPtId(me)]={rpx0:me.rpx0,rpx1:me.rpx1,x0:me.x0,x1:me.x1,transform:me.transform},!L&&Rl.isEntry(me)&&(L=me)});var C=lLt(h).descendants(),M=h.height+1,g=0,P=d;u.hasMultipleRoots&&Rl.isHierarchyRoot(h)&&(C=C.slice(1),M-=1,g=1,P+=1),C=C.filter(function(me){return me.y1<=P});var T=rLt(c.rotation);T&&C.forEach(function(me){me.x0+=T,me.x1+=T});var z=Math.min(M,d),O=function(me){return(me-g)/z*k},V=function(me,ie){return[me*Math.cos(ie),-me*Math.sin(ie)]},G=function(me){return Av.pathAnnulus(me.rpx0,me.rpx1,me.x0,me.x1,E,S)},Z=function(me){return E+b6e(me)[0]*(me.transform.rCenter||0)+(me.transform.x||0)},H=function(me){return S+b6e(me)[1]*(me.transform.rCenter||0)+(me.transform.y||0)};l=l.data(C,Rl.getPtId),l.enter().append(\"g\").classed(\"slice\",!0),o?l.exit().transition().each(function(){var me=jE.select(this),ie=me.select(\"path.surface\");ie.transition().attrTween(\"d\",function(Le){var Ae=oe(Le);return function(Fe){return G(Ae(Fe))}});var Se=me.select(\"g.slicetext\");Se.attr(\"opacity\",0)}).remove():l.exit().remove(),l.order();var N=null;if(o&&L){var j=Rl.getPtId(L);l.each(function(me){N===null&&Rl.getPtId(me)===j&&(N=me.x1)})}var re=l;o&&(re=re.transition().each(\"end\",function(){var me=jE.select(this);Rl.setSliceCursor(me,e,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:!1})})),re.each(function(me){var ie=jE.select(this),Se=Av.ensureSingle(ie,\"path\",\"surface\",function(Re){Re.style(\"pointer-events\",i?\"none\":\"all\")});me.rpx0=O(me.y0),me.rpx1=O(me.y1),me.xmid=(me.x0+me.x1)/2,me.pxmid=V(me.rpx1,me.xmid),me.midangle=-(me.xmid-Math.PI/2),me.startangle=-(me.x0-Math.PI/2),me.stopangle=-(me.x1-Math.PI/2),me.halfangle=.5*Math.min(Av.angleDelta(me.x0,me.x1)||Math.PI,Math.PI),me.ring=1-me.rpx0/me.rpx1,me.rInscribed=uLt(me,c),o?Se.transition().attrTween(\"d\",function(Re){var ce=_e(Re);return function(Ze){return G(ce(Ze))}}):Se.attr(\"d\",G),ie.call(sLt,h,e,t,{eventDataKeys:VW.eventDataKeys,transitionTime:VW.CLICK_TRANSITION_TIME,transitionEasing:VW.CLICK_TRANSITION_EASING}).call(Rl.setSliceCursor,e,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:e._transitioning}),Se.call(aLt,me,c,e);var Le=Av.ensureSingle(ie,\"g\",\"slicetext\"),Ae=Av.ensureSingle(Le,\"text\",\"\",function(Re){Re.attr(\"data-notex\",1)}),Fe=Av.ensureUniformFontSize(e,Rl.determineTextFont(c,me,a.font));Ae.text(ND.formatSliceLabel(me,h,c,t,a)).classed(\"slicetext\",!0).attr(\"text-anchor\",\"middle\").call(y6e.font,Fe).call(eLt.convertToTspans,e);var Pe=y6e.bBox(Ae.node());me.transform=nLt(Pe,me,u),me.transform.targetX=Z(me),me.transform.targetY=H(me);var ge=function(Re,ce){var Ze=Re.transform;return iLt(Ze,ce),Ze.fontSize=Fe.size,_6e(c.type,Ze,a),Av.getTextTransform(Ze)};o?Ae.transition().attrTween(\"transform\",function(Re){var ce=Ee(Re);return function(Ze){return ge(ce(Ze),Pe)}}):Ae.attr(\"transform\",ge(me,Pe))});function oe(me){var ie=Rl.getPtId(me),Se=x[ie],Le=x[Rl.getPtId(h)],Ae;if(Le){var Fe=(me.x1>Le.x1?2*Math.PI:0)+T;Ae=me.rpx1N?2*Math.PI:0)+T;Se={x0:Ae,x1:Ae}}else Se={rpx0:k,rpx1:k},Av.extendFlat(Se,Ce(me));else Se={rpx0:0,rpx1:0};else Se={x0:T,x1:T};return Zg(Se,Le)}function Ee(me){var ie=x[Rl.getPtId(me)],Se,Le=me.transform;if(ie)Se=ie;else if(Se={rpx1:me.rpx1,transform:{textPosAngle:Le.textPosAngle,scale:0,rotate:Le.rotate,rCenter:Le.rCenter,x:Le.x,y:Le.y}},L)if(me.parent)if(N){var Ae=me.x1>N?2*Math.PI:0;Se.x0=Se.x1=Ae}else Av.extendFlat(Se,Ce(me));else Se.x0=Se.x1=T;else Se.x0=Se.x1=T;var Fe=Zg(Se.transform.textPosAngle,me.transform.textPosAngle),Pe=Zg(Se.rpx1,me.rpx1),ge=Zg(Se.x0,me.x0),Re=Zg(Se.x1,me.x1),ce=Zg(Se.transform.scale,Le.scale),Ze=Zg(Se.transform.rotate,Le.rotate),ut=Le.rCenter===0?3:Se.transform.rCenter===0?1/3:1,pt=Zg(Se.transform.rCenter,Le.rCenter),Zt=function(st){return pt(Math.pow(st,ut))};return function(st){var lt=Pe(st),Gt=ge(st),Nt=Re(st),Jt=Zt(st),sr=V(lt,(Gt+Nt)/2),wr=Fe(st),cr={pxmid:sr,rpx1:lt,transform:{textPosAngle:wr,rCenter:Jt,x:Le.x,y:Le.y}};return _6e(c.type,Le,a),{transform:{targetX:Z(cr),targetY:H(cr),scale:ce(st),rotate:Ze(st),rCenter:Jt}}}}function Ce(me){var ie=me.parent,Se=x[Rl.getPtId(ie)],Le={};if(Se){var Ae=ie.children,Fe=Ae.indexOf(me),Pe=Ae.length,ge=Zg(Se.x0,Se.x1);Le.x0=ge(Fe/Pe),Le.x1=ge(Fe/Pe)}else Le.x0=Le.x1=0;return Le}}function lLt(e){return Q6t.partition().size([2*Math.PI,e.height+1])(e)}ND.formatSliceLabel=function(e,t,r,n,i){var a=r.texttemplate,o=r.textinfo;if(!a&&(!o||o===\"none\"))return\"\";var s=i.separators,l=n[0],u=e.data.data,c=l.hierarchy,f=Rl.isHierarchyRoot(e),h=Rl.getParent(c,e),d=Rl.getValue(e);if(!a){var v=o.split(\"+\"),_=function(g){return v.indexOf(g)!==-1},b=[],p;if(_(\"label\")&&u.label&&b.push(u.label),u.hasOwnProperty(\"v\")&&_(\"value\")&&b.push(Rl.formatValue(u.v,s)),!f){_(\"current path\")&&b.push(Rl.getPath(e.data));var k=0;_(\"percent parent\")&&k++,_(\"percent entry\")&&k++,_(\"percent root\")&&k++;var E=k>1;if(k){var S,L=function(g){p=Rl.formatPercent(S,s),E&&(p+=\" of \"+g),b.push(p)};_(\"percent parent\")&&!f&&(S=d/Rl.getValue(h),L(\"parent\")),_(\"percent entry\")&&(S=d/Rl.getValue(t),L(\"entry\")),_(\"percent root\")&&(S=d/Rl.getValue(c),L(\"root\"))}}return _(\"text\")&&(p=Av.castOption(r,u.i,\"text\"),Av.isValidTextValue(p)&&b.push(p)),b.join(\"
\")}var x=Av.castOption(r,u.i,\"texttemplate\");if(!x)return\"\";var C={};u.label&&(C.label=u.label),u.hasOwnProperty(\"v\")&&(C.value=u.v,C.valueLabel=Rl.formatValue(u.v,s)),C.currentPath=Rl.getPath(e.data),f||(C.percentParent=d/Rl.getValue(h),C.percentParentLabel=Rl.formatPercent(C.percentParent,s),C.parent=Rl.getPtLabel(h)),C.percentEntry=d/Rl.getValue(t),C.percentEntryLabel=Rl.formatPercent(C.percentEntry,s),C.entry=Rl.getPtLabel(t),C.percentRoot=d/Rl.getValue(c),C.percentRootLabel=Rl.formatPercent(C.percentRoot,s),C.root=Rl.getPtLabel(c),u.hasOwnProperty(\"color\")&&(C.color=u.color);var M=Av.castOption(r,u.i,\"text\");return(Av.isValidTextValue(M)||M===\"\")&&(C.text=M),C.customdata=Av.castOption(r,u.i,\"customdata\"),Av.texttemplateString({data:[C,r._meta],fallback:r.texttemplatefallback,labels:C,locale:i._d3locale,template:x})};function uLt(e){return e.rpx0===0&&Av.isFullCircle([e.x0,e.x1])?1:Math.max(0,Math.min(1/(1+1/Math.sin(e.halfangle)),e.ring/2))}function b6e(e){return cLt(e.rpx1,e.transform.textPosAngle)}function cLt(e,t){return[e*Math.sin(t),-e*Math.cos(t)]}});var S6e=ye((e0r,A6e)=>{\"use strict\";A6e.exports={moduleType:\"trace\",name:\"sunburst\",basePlotModule:qke(),categories:[],animatable:!0,attributes:LE(),layoutAttributes:yW(),supplyDefaults:Xke(),supplyLayoutDefaults:Yke(),calc:RE().calc,crossTraceCalc:RE().crossTraceCalc,plot:UD().plot,style:UW().style,colorbar:$d(),meta:{}}});var E6e=ye((t0r,M6e)=>{\"use strict\";M6e.exports=S6e()});var C6e=ye(DA=>{\"use strict\";var k6e=Mc();DA.name=\"treemap\";DA.plot=function(e,t,r,n){k6e.plotBasePlot(DA.name,e,t,r,n)};DA.clean=function(e,t,r,n){k6e.cleanBasePlot(DA.name,e,t,r,n)}});var z2=ye((i0r,L6e)=>{\"use strict\";L6e.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:\"poly\",eventDataKeys:[\"currentPath\",\"root\",\"entry\",\"percentRoot\",\"percentEntry\",\"percentParent\"],gapWithPathbar:1}});var VD=ye((n0r,R6e)=>{\"use strict\";var{hovertemplateAttrs:fLt,texttemplateAttrs:hLt,templatefallbackAttrs:P6e}=Ll(),dLt=Tu(),vLt=Cc().attributes,O2=S2(),Q0=LE(),I6e=z2(),GW=Ao().extendFlat,pLt=Pd().pattern;R6e.exports={labels:Q0.labels,parents:Q0.parents,values:Q0.values,branchvalues:Q0.branchvalues,count:Q0.count,level:Q0.level,maxdepth:Q0.maxdepth,tiling:{packing:{valType:\"enumerated\",values:[\"squarify\",\"binary\",\"dice\",\"slice\",\"slice-dice\",\"dice-slice\"],dflt:\"squarify\",editType:\"plot\"},squarifyratio:{valType:\"number\",min:1,dflt:1,editType:\"plot\"},flip:{valType:\"flaglist\",flags:[\"x\",\"y\"],dflt:\"\",editType:\"plot\"},pad:{valType:\"number\",min:0,dflt:3,editType:\"plot\"},editType:\"calc\"},marker:GW({pad:{t:{valType:\"number\",min:0,editType:\"plot\"},l:{valType:\"number\",min:0,editType:\"plot\"},r:{valType:\"number\",min:0,editType:\"plot\"},b:{valType:\"number\",min:0,editType:\"plot\"},editType:\"calc\"},colors:Q0.marker.colors,pattern:pLt,depthfade:{valType:\"enumerated\",values:[!0,!1,\"reversed\"],editType:\"style\"},line:Q0.marker.line,cornerradius:{valType:\"number\",min:0,dflt:0,editType:\"plot\"},editType:\"calc\"},dLt(\"marker\",{colorAttr:\"colors\",anim:!1})),pathbar:{visible:{valType:\"boolean\",dflt:!0,editType:\"plot\"},side:{valType:\"enumerated\",values:[\"top\",\"bottom\"],dflt:\"top\",editType:\"plot\"},edgeshape:{valType:\"enumerated\",values:[\">\",\"<\",\"|\",\"/\",\"\\\\\"],dflt:\">\",editType:\"plot\"},thickness:{valType:\"number\",min:12,editType:\"plot\"},textfont:GW({},O2.textfont,{}),editType:\"calc\"},text:O2.text,textinfo:Q0.textinfo,texttemplate:hLt({editType:\"plot\"},{keys:I6e.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:P6e({editType:\"plot\"}),hovertext:O2.hovertext,hoverinfo:Q0.hoverinfo,hovertemplate:fLt({},{keys:I6e.eventDataKeys}),hovertemplatefallback:P6e(),textfont:O2.textfont,insidetextfont:O2.insidetextfont,outsidetextfont:GW({},O2.outsidetextfont,{}),textposition:{valType:\"enumerated\",values:[\"top left\",\"top center\",\"top right\",\"middle left\",\"middle center\",\"middle right\",\"bottom left\",\"bottom center\",\"bottom right\"],dflt:\"top left\",editType:\"plot\"},sort:O2.sort,root:Q0.root,domain:vLt({name:\"treemap\",trace:!0,editType:\"calc\"})}});var HW=ye((a0r,D6e)=>{\"use strict\";D6e.exports={treemapcolorway:{valType:\"colorlist\",editType:\"calc\"},extendtreemapcolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var q6e=ye((o0r,O6e)=>{\"use strict\";var F6e=Dr(),gLt=VD(),mLt=ka(),yLt=Cc().defaults,_Lt=r0().handleText,xLt=e2().TEXTPAD,bLt=M2().handleMarkerDefaults,z6e=tc(),wLt=z6e.hasColorscale,TLt=z6e.handleDefaults;O6e.exports=function(t,r,n,i){function a(b,p){return F6e.coerce(t,r,gLt,b,p)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\");var u=a(\"tiling.packing\");u===\"squarify\"&&a(\"tiling.squarifyratio\"),a(\"tiling.flip\"),a(\"tiling.pad\");var c=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",F6e.isArrayOrTypedArray(c)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var f=a(\"pathbar.visible\"),h=\"auto\";_Lt(t,r,i,a,h,{hasPathbar:f,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"textposition\");var d=r.textposition.indexOf(\"bottom\")!==-1;bLt(t,r,i,a);var v=r._hasColorscale=wLt(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;v?TLt(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}):a(\"marker.depthfade\",!(r.marker.colors||[]).length);var _=r.textfont.size*2;a(\"marker.pad.t\",d?_/4:_),a(\"marker.pad.l\",_/4),a(\"marker.pad.r\",_/4),a(\"marker.pad.b\",d?_:_/4),a(\"marker.cornerradius\"),r._hovered={marker:{line:{width:2,color:mLt.contrast(i.paper_bgcolor)}}},f&&(a(\"pathbar.thickness\",r.pathbar.textfont.size+2*xLt),a(\"pathbar.side\"),a(\"pathbar.edgeshape\")),a(\"sort\"),a(\"root.color\"),yLt(r,i,a),r._length=null}});var N6e=ye((s0r,B6e)=>{\"use strict\";var ALt=Dr(),SLt=HW();B6e.exports=function(t,r){function n(i,a){return ALt.coerce(t,r,SLt,i,a)}n(\"treemapcolorway\",r.colorway),n(\"extendtreemapcolors\")}});var WW=ye(jW=>{\"use strict\";var U6e=RE();jW.calc=function(e,t){return U6e.calc(e,t)};jW.crossTraceCalc=function(e){return U6e._runCrossTraceCalc(\"treemap\",e)}});var XW=ye((u0r,V6e)=>{\"use strict\";V6e.exports=function e(t,r,n){var i;n.swapXY&&(i=t.x0,t.x0=t.y0,t.y0=i,i=t.x1,t.x1=t.y1,t.y1=i),n.flipX&&(i=t.x0,t.x0=r[0]-t.x1,t.x1=r[0]-i),n.flipY&&(i=t.y0,t.y0=r[1]-t.y1,t.y1=r[1]-i);var a=t.children;if(a)for(var o=0;o{\"use strict\";var FA=PE(),MLt=XW();G6e.exports=function(t,r,n){var i=n.flipX,a=n.flipY,o=n.packing===\"dice-slice\",s=n.pad[a?\"bottom\":\"top\"],l=n.pad[i?\"right\":\"left\"],u=n.pad[i?\"left\":\"right\"],c=n.pad[a?\"top\":\"bottom\"],f;o&&(f=l,l=s,s=f,f=u,u=c,c=f);var h=FA.treemap().tile(ELt(n.packing,n.squarifyratio)).paddingInner(n.pad.inner).paddingLeft(l).paddingRight(u).paddingTop(s).paddingBottom(c).size(o?[r[1],r[0]]:r)(t);return(o||i||a)&&MLt(h,r,{swapXY:o,flipX:i,flipY:a}),h};function ELt(e,t){switch(e){case\"squarify\":return FA.treemapSquarify.ratio(t);case\"binary\":return FA.treemapBinary;case\"dice\":return FA.treemapDice;case\"slice\":return FA.treemapSlice;default:return FA.treemapSliceDice}}});var GD=ye((f0r,X6e)=>{\"use strict\";var H6e=Oa(),zA=ka(),j6e=Dr(),YW=Ky(),kLt=bv().resizeText,CLt=BD();function LLt(e){var t=e._fullLayout._treemaplayer.selectAll(\".trace\");kLt(e,t,\"treemap\"),t.each(function(r){var n=H6e.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){H6e.select(this).call(W6e,o,a,e,{hovered:!1})})})}function W6e(e,t,r,n,i){var a=(i||{}).hovered,o=t.data.data,s=o.i,l,u,c=o.color,f=YW.isHierarchyRoot(t),h=1;if(a)l=r._hovered.marker.line.color,u=r._hovered.marker.line.width;else if(f&&c===r.root.color)h=100,l=\"rgba(0,0,0,0)\",u=0;else if(l=j6e.castOption(r,s,\"marker.line.color\")||zA.defaultLine,u=j6e.castOption(r,s,\"marker.line.width\")||0,!r._hasColorscale&&!t.onPathbar){var d=r.marker.depthfade;if(d){var v=zA.combine(zA.addOpacity(r._backgroundColor,.75),c),_;if(d===!0){var b=YW.getMaxDepth(r);isFinite(b)?YW.isLeaf(t)?_=0:_=r._maxVisibleLayers-(t.data.depth-r._entryDepth):_=t.data.height+1}else _=t.data.depth-r._entryDepth,r._atRootLevel||_++;if(_>0)for(var p=0;p<_;p++){var k=.5*p/_;c=zA.combine(zA.addOpacity(v,k),c)}}}e.call(CLt,t,r,n,c).style(\"stroke-width\",u).call(zA.stroke,l).style(\"opacity\",h)}X6e.exports={style:LLt,styleOne:W6e}});var $6e=ye((h0r,J6e)=>{\"use strict\";var Z6e=Oa(),HD=Dr(),Y6e=So(),PLt=ru(),ILt=ZW(),K6e=GD().styleOne,KW=z2(),OA=Ky(),RLt=HE(),JW=!0;J6e.exports=function(t,r,n,i,a){var o=a.barDifY,s=a.width,l=a.height,u=a.viewX,c=a.viewY,f=a.pathSlice,h=a.toMoveInsideSlice,d=a.strTransform,v=a.hasTransition,_=a.handleSlicesExit,b=a.makeUpdateSliceInterpolator,p=a.makeUpdateTextInterpolator,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=L.hierarchy,M=s/x._entryDepth,g=OA.listPath(n.data,\"id\"),P=ILt(C.copy(),[s,l],{packing:\"dice\",pad:{inner:0,top:0,left:0,right:0,bottom:0}}).descendants();P=P.filter(function(z){var O=g.indexOf(z.data.id);return O===-1?!1:(z.x0=M*O,z.x1=M*(O+1),z.y0=o,z.y1=o+l,z.onPathbar=!0,!0)}),P.reverse(),i=i.data(P,OA.getPtId),i.enter().append(\"g\").classed(\"pathbar\",!0),_(i,JW,k,[s,l],f),i.order();var T=i;v&&(T=T.transition().each(\"end\",function(){var z=Z6e.select(this);OA.setSliceCursor(z,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:!1})})),T.each(function(z){z._x0=u(z.x0),z._x1=u(z.x1),z._y0=c(z.y0),z._y1=c(z.y1),z._hoverX=u(z.x1-Math.min(s,l)/2),z._hoverY=c(z.y1-l/2);var O=Z6e.select(this),V=HD.ensureSingle(O,\"path\",\"surface\",function(N){N.style(\"pointer-events\",E?\"none\":\"all\")});v?V.transition().attrTween(\"d\",function(N){var j=b(N,JW,k,[s,l]);return function(re){return f(j(re))}}):V.attr(\"d\",f),O.call(RLt,n,t,r,{styleOne:K6e,eventDataKeys:KW.eventDataKeys,transitionTime:KW.CLICK_TRANSITION_TIME,transitionEasing:KW.CLICK_TRANSITION_EASING}).call(OA.setSliceCursor,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:t._transitioning}),V.call(K6e,z,x,t,{hovered:!1}),z._text=(OA.getPtLabel(z)||\"\").split(\"
\").join(\" \")||\"\";var G=HD.ensureSingle(O,\"g\",\"slicetext\"),Z=HD.ensureSingle(G,\"text\",\"\",function(N){N.attr(\"data-notex\",1)}),H=HD.ensureUniformFontSize(t,OA.determineTextFont(x,z,S.font,{onPathbar:!0}));Z.text(z._text||\" \").classed(\"slicetext\",!0).attr(\"text-anchor\",\"start\").call(Y6e.font,H).call(PLt.convertToTspans,t),z.textBB=Y6e.bBox(Z.node()),z.transform=h(z,{fontSize:H.size,onPathbar:!0}),z.transform.fontSize=H.size,v?Z.transition().attrTween(\"transform\",function(N){var j=p(N,JW,k,[s,l]);return function(re){return d(j(re))}}):Z.attr(\"transform\",d(z))})}});var rLe=ye((d0r,tLe)=>{\"use strict\";var Q6e=Oa(),$W=(D2(),ob(R2)).interpolate,Z_=Ky(),WE=Dr(),eLe=e2().TEXTPAD,DLt=n2(),FLt=DLt.toMoveInsideBar,zLt=bv(),QW=zLt.recordMinTextSize,OLt=z2(),qLt=$6e();function q2(e){return Z_.isHierarchyRoot(e)?\"\":Z_.getPtId(e)}tLe.exports=function(t,r,n,i,a){var o=t._fullLayout,s=r[0],l=s.trace,u=l.type,c=u===\"icicle\",f=s.hierarchy,h=Z_.findEntryWithLevel(f,l.level),d=Q6e.select(n),v=d.selectAll(\"g.pathbar\"),_=d.selectAll(\"g.slice\");if(!h){v.remove(),_.remove();return}var b=Z_.isHierarchyRoot(h),p=!o.uniformtext.mode&&Z_.hasTransition(i),k=Z_.getMaxDepth(l),E=function($e){return $e.data.depth-h.data.depth-1?C+P:-(g+P):0,z={x0:M,x1:M,y0:T,y1:T+g},O=function($e,St,Qt){var Vt=l.tiling.pad,_t=function(lr){return lr-Vt<=St.x0},It=function(lr){return lr+Vt>=St.x1},mt=function(lr){return lr-Vt<=St.y0},er=function(lr){return lr+Vt>=St.y1};return $e.x0===St.x0&&$e.x1===St.x1&&$e.y0===St.y0&&$e.y1===St.y1?{x0:$e.x0,x1:$e.x1,y0:$e.y0,y1:$e.y1}:{x0:_t($e.x0-Vt)?0:It($e.x0-Vt)?Qt[0]:$e.x0,x1:_t($e.x1+Vt)?0:It($e.x1+Vt)?Qt[0]:$e.x1,y0:mt($e.y0-Vt)?0:er($e.y0-Vt)?Qt[1]:$e.y0,y1:mt($e.y1+Vt)?0:er($e.y1+Vt)?Qt[1]:$e.y1}},V=null,G={},Z={},H=null,N=function($e,St){return St?G[q2($e)]:Z[q2($e)]},j=function($e,St,Qt,Vt){if(St)return G[q2(f)]||z;var _t=Z[l.level]||Qt;return E($e)?O($e,_t,Vt):{}};s.hasMultipleRoots&&b&&k++,l._maxDepth=k,l._backgroundColor=o.paper_bgcolor,l._entryDepth=h.data.depth,l._atRootLevel=b;var re=-x/2+S.l+S.w*(L.x[1]+L.x[0])/2,oe=-C/2+S.t+S.h*(1-(L.y[1]+L.y[0])/2),_e=function($e){return re+$e},Ee=function($e){return oe+$e},Ce=Ee(0),me=_e(0),ie=function($e){return me+$e},Se=function($e){return Ce+$e};function Le($e,St){return $e+\",\"+St}var Ae=ie(0),Fe=function($e){$e.x=Math.max(Ae,$e.x)},Pe=l.pathbar.edgeshape,ge=function($e){var St=ie(Math.max(Math.min($e.x0,$e.x0),0)),Qt=ie(Math.min(Math.max($e.x1,$e.x1),M)),Vt=Se($e.y0),_t=Se($e.y1),It=g/2,mt={},er={};mt.x=St,er.x=Qt,mt.y=er.y=(Vt+_t)/2;var lr={x:St,y:Vt},Tr={x:Qt,y:Vt},Lr={x:Qt,y:_t},ti={x:St,y:_t};return Pe===\">\"?(lr.x-=It,Tr.x-=It,Lr.x-=It,ti.x-=It):Pe===\"/\"?(Lr.x-=It,ti.x-=It,mt.x-=It/2,er.x-=It/2):Pe===\"\\\\\"?(lr.x-=It,Tr.x-=It,mt.x-=It/2,er.x-=It/2):Pe===\"<\"&&(mt.x-=It,er.x-=It),Fe(lr),Fe(ti),Fe(mt),Fe(Tr),Fe(Lr),Fe(er),\"M\"+Le(lr.x,lr.y)+\"L\"+Le(Tr.x,Tr.y)+\"L\"+Le(er.x,er.y)+\"L\"+Le(Lr.x,Lr.y)+\"L\"+Le(ti.x,ti.y)+\"L\"+Le(mt.x,mt.y)+\"Z\"},Re=l[c?\"tiling\":\"marker\"].pad,ce=function($e){return l.textposition.indexOf($e)!==-1},Ze=ce(\"top\"),ut=ce(\"left\"),pt=ce(\"right\"),Zt=ce(\"bottom\"),st=function($e){var St=_e($e.x0),Qt=_e($e.x1),Vt=Ee($e.y0),_t=Ee($e.y1),It=Qt-St,mt=_t-Vt;if(!It||!mt)return\"\";var er=l.marker.cornerradius||0,lr=Math.min(er,It/2,mt/2);lr&&$e.data&&$e.data.data&&$e.data.data.label&&(Ze&&(lr=Math.min(lr,Re.t)),ut&&(lr=Math.min(lr,Re.l)),pt&&(lr=Math.min(lr,Re.r)),Zt&&(lr=Math.min(lr,Re.b)));var Tr=function(Lr,ti){return lr?\"a\"+Le(lr,lr)+\" 0 0 1 \"+Le(Lr,ti):\"\"};return\"M\"+Le(St,Vt+lr)+Tr(lr,-lr)+\"L\"+Le(Qt-lr,Vt)+Tr(lr,lr)+\"L\"+Le(Qt,_t-lr)+Tr(-lr,lr)+\"L\"+Le(St+lr,_t)+Tr(-lr,-lr)+\"Z\"},lt=function($e,St){var Qt=$e.x0,Vt=$e.x1,_t=$e.y0,It=$e.y1,mt=$e.textBB,er=Ze||St.isHeader&&!Zt,lr=er?\"start\":Zt?\"end\":\"middle\",Tr=ce(\"right\"),Lr=ce(\"left\")||St.onPathbar,ti=Lr?-1:Tr?1:0;if(St.isHeader){if(Qt+=(c?Re:Re.l)-eLe,Vt-=(c?Re:Re.r)-eLe,Qt>=Vt){var Br=(Qt+Vt)/2;Qt=Br,Vt=Br}var Vr;Zt?(Vr=It-(c?Re:Re.b),_t{\"use strict\";var BLt=Oa(),NLt=Ky(),ULt=bv(),VLt=ULt.clearMinTextSize,GLt=N0().resizeText,iLe=rLe();nLe.exports=function(t,r,n,i,a){var o=a.type,s=a.drawDescendants,l=t._fullLayout,u=l[\"_\"+o+\"layer\"],c,f,h=!n;if(VLt(o,l),c=u.selectAll(\"g.trace.\"+o).data(r,function(v){return v[0].trace.uid}),c.enter().append(\"g\").classed(\"trace\",!0).classed(o,!0),c.order(),!l.uniformtext.mode&&NLt.hasTransition(n)){i&&(f=i());var d=BLt.transition().duration(n.duration).ease(n.easing).each(\"end\",function(){f&&f()}).each(\"interrupt\",function(){f&&f()});d.each(function(){u.selectAll(\"g.trace\").each(function(v){iLe(t,v,this,n,s)})})}else c.each(function(v){iLe(t,v,this,n,s)}),l.uniformtext.mode&&GLt(t,u.selectAll(\".trace\"),o);h&&c.exit().remove()}});var uLe=ye((p0r,lLe)=>{\"use strict\";var aLe=Oa(),jD=Dr(),oLe=So(),HLt=ru(),jLt=ZW(),sLe=GD().styleOne,tX=z2(),Y_=Ky(),WLt=HE(),XLt=UD().formatSliceLabel,rX=!1;lLe.exports=function(t,r,n,i,a){var o=a.width,s=a.height,l=a.viewX,u=a.viewY,c=a.pathSlice,f=a.toMoveInsideSlice,h=a.strTransform,d=a.hasTransition,v=a.handleSlicesExit,_=a.makeUpdateSliceInterpolator,b=a.makeUpdateTextInterpolator,p=a.prevEntry,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=x.textposition.indexOf(\"left\")!==-1,M=x.textposition.indexOf(\"right\")!==-1,g=x.textposition.indexOf(\"bottom\")!==-1,P=!g&&!x.marker.pad.t||g&&!x.marker.pad.b,T=jLt(n,[o,s],{packing:x.tiling.packing,squarifyratio:x.tiling.squarifyratio,flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1,pad:{inner:x.tiling.pad,top:x.marker.pad.t,left:x.marker.pad.l,right:x.marker.pad.r,bottom:x.marker.pad.b}}),z=T.descendants(),O=1/0,V=-1/0;z.forEach(function(j){var re=j.depth;re>=x._maxDepth?(j.x0=j.x1=(j.x0+j.x1)/2,j.y0=j.y1=(j.y0+j.y1)/2):(O=Math.min(O,re),V=Math.max(V,re))}),i=i.data(z,Y_.getPtId),x._maxVisibleLayers=isFinite(V)?V-O+1:0,i.enter().append(\"g\").classed(\"slice\",!0),v(i,rX,k,[o,s],c),i.order();var G=null;if(d&&p){var Z=Y_.getPtId(p);i.each(function(j){G===null&&Y_.getPtId(j)===Z&&(G={x0:j.x0,x1:j.x1,y0:j.y0,y1:j.y1})})}var H=function(){return G||{x0:0,x1:o,y0:0,y1:s}},N=i;return d&&(N=N.transition().each(\"end\",function(){var j=aLe.select(this);Y_.setSliceCursor(j,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})})),N.each(function(j){var re=Y_.isHeader(j,x);j._x0=l(j.x0),j._x1=l(j.x1),j._y0=u(j.y0),j._y1=u(j.y1),j._hoverX=l(j.x1-x.marker.pad.r),j._hoverY=u(g?j.y1-x.marker.pad.b/2:j.y0+x.marker.pad.t/2);var oe=aLe.select(this),_e=jD.ensureSingle(oe,\"path\",\"surface\",function(Le){Le.style(\"pointer-events\",E?\"none\":\"all\")});d?_e.transition().attrTween(\"d\",function(Le){var Ae=_(Le,rX,H(),[o,s]);return function(Fe){return c(Ae(Fe))}}):_e.attr(\"d\",c),oe.call(WLt,n,t,r,{styleOne:sLe,eventDataKeys:tX.eventDataKeys,transitionTime:tX.CLICK_TRANSITION_TIME,transitionEasing:tX.CLICK_TRANSITION_EASING}).call(Y_.setSliceCursor,t,{isTransitioning:t._transitioning}),_e.call(sLe,j,x,t,{hovered:!1}),j.x0===j.x1||j.y0===j.y1?j._text=\"\":re?j._text=P?\"\":Y_.getPtLabel(j)||\"\":j._text=XLt(j,n,x,r,S)||\"\";var Ee=jD.ensureSingle(oe,\"g\",\"slicetext\"),Ce=jD.ensureSingle(Ee,\"text\",\"\",function(Le){Le.attr(\"data-notex\",1)}),me=jD.ensureUniformFontSize(t,Y_.determineTextFont(x,j,S.font)),ie=j._text||\" \",Se=re&&ie.indexOf(\"
\")===-1;Ce.text(ie).classed(\"slicetext\",!0).attr(\"text-anchor\",M?\"end\":C||Se?\"start\":\"middle\").call(oLe.font,me).call(HLt.convertToTspans,t),j.textBB=oLe.bBox(Ce.node()),j.transform=f(j,{fontSize:me.size,isHeader:re}),j.transform.fontSize=me.size,d?Ce.transition().attrTween(\"transform\",function(Le){var Ae=b(Le,rX,H(),[o,s]);return function(Fe){return h(Ae(Fe))}}):Ce.attr(\"transform\",h(j))}),G}});var fLe=ye((g0r,cLe)=>{\"use strict\";var ZLt=eX(),YLt=uLe();cLe.exports=function(t,r,n,i){return ZLt(t,r,n,i,{type:\"treemap\",drawDescendants:YLt})}});var dLe=ye((m0r,hLe)=>{\"use strict\";hLe.exports={moduleType:\"trace\",name:\"treemap\",basePlotModule:C6e(),categories:[],animatable:!0,attributes:VD(),layoutAttributes:HW(),supplyDefaults:q6e(),supplyLayoutDefaults:N6e(),calc:WW().calc,crossTraceCalc:WW().crossTraceCalc,plot:fLe(),style:GD().style,colorbar:$d(),meta:{}}});var pLe=ye((y0r,vLe)=>{\"use strict\";vLe.exports=dLe()});var mLe=ye(qA=>{\"use strict\";var gLe=Mc();qA.name=\"icicle\";qA.plot=function(e,t,r,n){gLe.plotBasePlot(qA.name,e,t,r,n)};qA.clean=function(e,t,r,n){gLe.cleanBasePlot(qA.name,e,t,r,n)}});var iX=ye((x0r,xLe)=>{\"use strict\";var{hovertemplateAttrs:KLt,texttemplateAttrs:JLt,templatefallbackAttrs:yLe}=Ll(),$Lt=Tu(),QLt=Cc().attributes,XE=S2(),o0=LE(),WD=VD(),_Le=z2(),ePt=Ao().extendFlat,tPt=Pd().pattern;xLe.exports={labels:o0.labels,parents:o0.parents,values:o0.values,branchvalues:o0.branchvalues,count:o0.count,level:o0.level,maxdepth:o0.maxdepth,tiling:{orientation:{valType:\"enumerated\",values:[\"v\",\"h\"],dflt:\"h\",editType:\"plot\"},flip:WD.tiling.flip,pad:{valType:\"number\",min:0,dflt:0,editType:\"plot\"},editType:\"calc\"},marker:ePt({colors:o0.marker.colors,line:o0.marker.line,pattern:tPt,editType:\"calc\"},$Lt(\"marker\",{colorAttr:\"colors\",anim:!1})),leaf:o0.leaf,pathbar:WD.pathbar,text:XE.text,textinfo:o0.textinfo,texttemplate:JLt({editType:\"plot\"},{keys:_Le.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:yLe({editType:\"plot\"}),hovertext:XE.hovertext,hoverinfo:o0.hoverinfo,hovertemplate:KLt({},{keys:_Le.eventDataKeys}),hovertemplatefallback:yLe(),textfont:XE.textfont,insidetextfont:XE.insidetextfont,outsidetextfont:WD.outsidetextfont,textposition:WD.textposition,sort:XE.sort,root:o0.root,domain:QLt({name:\"icicle\",trace:!0,editType:\"calc\"})}});var nX=ye((b0r,bLe)=>{\"use strict\";bLe.exports={iciclecolorway:{valType:\"colorlist\",editType:\"calc\"},extendiciclecolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var SLe=ye((w0r,ALe)=>{\"use strict\";var wLe=Dr(),rPt=iX(),iPt=ka(),nPt=Cc().defaults,aPt=r0().handleText,oPt=e2().TEXTPAD,sPt=M2().handleMarkerDefaults,TLe=tc(),lPt=TLe.hasColorscale,uPt=TLe.handleDefaults;ALe.exports=function(t,r,n,i){function a(d,v){return wLe.coerce(t,r,rPt,d,v)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\"),a(\"tiling.orientation\"),a(\"tiling.flip\"),a(\"tiling.pad\");var u=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",wLe.isArrayOrTypedArray(u)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var c=a(\"pathbar.visible\"),f=\"auto\";aPt(t,r,i,a,f,{hasPathbar:c,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"textposition\"),sPt(t,r,i,a);var h=r._hasColorscale=lPt(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;h&&uPt(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}),a(\"leaf.opacity\",h?1:.7),r._hovered={marker:{line:{width:2,color:iPt.contrast(i.paper_bgcolor)}}},c&&(a(\"pathbar.thickness\",r.pathbar.textfont.size+2*oPt),a(\"pathbar.side\"),a(\"pathbar.edgeshape\")),a(\"sort\"),a(\"root.color\"),nPt(r,i,a),r._length=null}});var ELe=ye((T0r,MLe)=>{\"use strict\";var cPt=Dr(),fPt=nX();MLe.exports=function(t,r){function n(i,a){return cPt.coerce(t,r,fPt,i,a)}n(\"iciclecolorway\",r.colorway),n(\"extendiciclecolors\")}});var oX=ye(aX=>{\"use strict\";var kLe=RE();aX.calc=function(e,t){return kLe.calc(e,t)};aX.crossTraceCalc=function(e){return kLe._runCrossTraceCalc(\"icicle\",e)}});var LLe=ye((S0r,CLe)=>{\"use strict\";var hPt=PE(),dPt=XW();CLe.exports=function(t,r,n){var i=n.flipX,a=n.flipY,o=n.orientation===\"h\",s=n.maxDepth,l=r[0],u=r[1];s&&(l=(t.height+1)*r[0]/Math.min(t.height+1,s),u=(t.height+1)*r[1]/Math.min(t.height+1,s));var c=hPt.partition().padding(n.pad.inner).size(o?[r[1],l]:[r[0],u])(t);return(o||i||a)&&dPt(c,r,{swapXY:o,flipX:i,flipY:a}),c}});var sX=ye((M0r,FLe)=>{\"use strict\";var PLe=Oa(),ILe=ka(),RLe=Dr(),vPt=bv().resizeText,pPt=BD();function gPt(e){var t=e._fullLayout._iciclelayer.selectAll(\".trace\");vPt(e,t,\"icicle\"),t.each(function(r){var n=PLe.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){PLe.select(this).call(DLe,o,a,e)})})}function DLe(e,t,r,n){var i=t.data.data,a=!t.children,o=i.i,s=RLe.castOption(r,o,\"marker.line.color\")||ILe.defaultLine,l=RLe.castOption(r,o,\"marker.line.width\")||0;e.call(pPt,t,r,n).style(\"stroke-width\",l).call(ILe.stroke,s).style(\"opacity\",a?r.leaf.opacity:null)}FLe.exports={style:gPt,styleOne:DLe}});var NLe=ye((E0r,BLe)=>{\"use strict\";var zLe=Oa(),XD=Dr(),OLe=So(),mPt=ru(),yPt=LLe(),qLe=sX().styleOne,lX=z2(),BA=Ky(),_Pt=HE(),xPt=UD().formatSliceLabel,uX=!1;BLe.exports=function(t,r,n,i,a){var o=a.width,s=a.height,l=a.viewX,u=a.viewY,c=a.pathSlice,f=a.toMoveInsideSlice,h=a.strTransform,d=a.hasTransition,v=a.handleSlicesExit,_=a.makeUpdateSliceInterpolator,b=a.makeUpdateTextInterpolator,p=a.prevEntry,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=x.textposition.indexOf(\"left\")!==-1,M=x.textposition.indexOf(\"right\")!==-1,g=x.textposition.indexOf(\"bottom\")!==-1,P=yPt(n,[o,s],{flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1,orientation:x.tiling.orientation,pad:{inner:x.tiling.pad},maxDepth:x._maxDepth}),T=P.descendants(),z=1/0,O=-1/0;T.forEach(function(N){var j=N.depth;j>=x._maxDepth?(N.x0=N.x1=(N.x0+N.x1)/2,N.y0=N.y1=(N.y0+N.y1)/2):(z=Math.min(z,j),O=Math.max(O,j))}),i=i.data(T,BA.getPtId),x._maxVisibleLayers=isFinite(O)?O-z+1:0,i.enter().append(\"g\").classed(\"slice\",!0),v(i,uX,k,[o,s],c),i.order();var V=null;if(d&&p){var G=BA.getPtId(p);i.each(function(N){V===null&&BA.getPtId(N)===G&&(V={x0:N.x0,x1:N.x1,y0:N.y0,y1:N.y1})})}var Z=function(){return V||{x0:0,x1:o,y0:0,y1:s}},H=i;return d&&(H=H.transition().each(\"end\",function(){var N=zLe.select(this);BA.setSliceCursor(N,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})})),H.each(function(N){N._x0=l(N.x0),N._x1=l(N.x1),N._y0=u(N.y0),N._y1=u(N.y1),N._hoverX=l(N.x1-x.tiling.pad),N._hoverY=u(g?N.y1-x.tiling.pad/2:N.y0+x.tiling.pad/2);var j=zLe.select(this),re=XD.ensureSingle(j,\"path\",\"surface\",function(Ce){Ce.style(\"pointer-events\",E?\"none\":\"all\")});d?re.transition().attrTween(\"d\",function(Ce){var me=_(Ce,uX,Z(),[o,s],{orientation:x.tiling.orientation,flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1});return function(ie){return c(me(ie))}}):re.attr(\"d\",c),j.call(_Pt,n,t,r,{styleOne:qLe,eventDataKeys:lX.eventDataKeys,transitionTime:lX.CLICK_TRANSITION_TIME,transitionEasing:lX.CLICK_TRANSITION_EASING}).call(BA.setSliceCursor,t,{isTransitioning:t._transitioning}),re.call(qLe,N,x,t,{hovered:!1}),N.x0===N.x1||N.y0===N.y1?N._text=\"\":N._text=xPt(N,n,x,r,S)||\"\";var oe=XD.ensureSingle(j,\"g\",\"slicetext\"),_e=XD.ensureSingle(oe,\"text\",\"\",function(Ce){Ce.attr(\"data-notex\",1)}),Ee=XD.ensureUniformFontSize(t,BA.determineTextFont(x,N,S.font));_e.text(N._text||\" \").classed(\"slicetext\",!0).attr(\"text-anchor\",M?\"end\":C?\"start\":\"middle\").call(OLe.font,Ee).call(mPt.convertToTspans,t),N.textBB=OLe.bBox(_e.node()),N.transform=f(N,{fontSize:Ee.size}),N.transform.fontSize=Ee.size,d?_e.transition().attrTween(\"transform\",function(Ce){var me=b(Ce,uX,Z(),[o,s]);return function(ie){return h(me(ie))}}):_e.attr(\"transform\",h(N))}),V}});var VLe=ye((k0r,ULe)=>{\"use strict\";var bPt=eX(),wPt=NLe();ULe.exports=function(t,r,n,i){return bPt(t,r,n,i,{type:\"icicle\",drawDescendants:wPt})}});var HLe=ye((C0r,GLe)=>{\"use strict\";GLe.exports={moduleType:\"trace\",name:\"icicle\",basePlotModule:mLe(),categories:[],animatable:!0,attributes:iX(),layoutAttributes:nX(),supplyDefaults:SLe(),supplyLayoutDefaults:ELe(),calc:oX().calc,crossTraceCalc:oX().crossTraceCalc,plot:VLe(),style:sX().style,colorbar:$d(),meta:{}}});var WLe=ye((L0r,jLe)=>{\"use strict\";jLe.exports=HLe()});var ZLe=ye(NA=>{\"use strict\";var XLe=Mc();NA.name=\"funnelarea\";NA.plot=function(e,t,r,n){XLe.plotBasePlot(NA.name,e,t,r,n)};NA.clean=function(e,t,r,n){XLe.cleanBasePlot(NA.name,e,t,r,n)}});var cX=ye((I0r,KLe)=>{\"use strict\";var iv=S2(),TPt=Gl(),APt=Cc().attributes,{hovertemplateAttrs:SPt,texttemplateAttrs:MPt,templatefallbackAttrs:YLe}=Ll(),B2=Ao().extendFlat;KLe.exports={labels:iv.labels,label0:iv.label0,dlabel:iv.dlabel,values:iv.values,marker:{colors:iv.marker.colors,line:{color:B2({},iv.marker.line.color,{dflt:null}),width:B2({},iv.marker.line.width,{dflt:1}),editType:\"calc\"},pattern:iv.marker.pattern,editType:\"calc\"},text:iv.text,hovertext:iv.hovertext,scalegroup:B2({},iv.scalegroup,{}),textinfo:B2({},iv.textinfo,{flags:[\"label\",\"text\",\"value\",\"percent\"]}),texttemplate:MPt({editType:\"plot\"},{keys:[\"label\",\"color\",\"value\",\"text\",\"percent\"]}),texttemplatefallback:YLe({editType:\"plot\"}),hoverinfo:B2({},TPt.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"percent\",\"name\"]}),hovertemplate:SPt({},{keys:[\"label\",\"color\",\"value\",\"text\",\"percent\"]}),hovertemplatefallback:YLe(),textposition:B2({},iv.textposition,{values:[\"inside\",\"none\"],dflt:\"inside\"}),textfont:iv.textfont,insidetextfont:iv.insidetextfont,title:{text:iv.title.text,font:iv.title.font,position:B2({},iv.title.position,{values:[\"top left\",\"top center\",\"top right\"],dflt:\"top center\"}),editType:\"plot\"},domain:APt({name:\"funnelarea\",trace:!0,editType:\"calc\"}),aspectratio:{valType:\"number\",min:0,dflt:1,editType:\"plot\"},baseratio:{valType:\"number\",min:0,max:1,dflt:.333,editType:\"plot\"}}});var fX=ye((R0r,JLe)=>{\"use strict\";var EPt=vD().hiddenlabels;JLe.exports={hiddenlabels:EPt,funnelareacolorway:{valType:\"colorlist\",editType:\"calc\"},extendfunnelareacolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var ePe=ye((D0r,QLe)=>{\"use strict\";var $Le=Dr(),kPt=cX(),CPt=Cc().defaults,LPt=r0().handleText,PPt=M2().handleLabelsAndValues,IPt=M2().handleMarkerDefaults;QLe.exports=function(t,r,n,i){function a(_,b){return $Le.coerce(t,r,kPt,_,b)}var o=a(\"labels\"),s=a(\"values\"),l=PPt(o,s),u=l.len;if(r._hasLabels=l.hasLabels,r._hasValues=l.hasValues,!r._hasLabels&&r._hasValues&&(a(\"label0\"),a(\"dlabel\")),!u){r.visible=!1;return}r._length=u,IPt(t,r,i,a),a(\"scalegroup\");var c=a(\"text\"),f=a(\"texttemplate\");a(\"texttemplatefallback\");var h;if(f||(h=a(\"textinfo\",Array.isArray(c)?\"text+percent\":\"percent\")),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\"),f||h&&h!==\"none\"){var d=a(\"textposition\");LPt(t,r,i,a,d,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1})}else h===\"none\"&&a(\"textposition\",\"none\");CPt(r,i,a);var v=a(\"title.text\");v&&(a(\"title.position\"),$Le.coerceFont(a,\"title.font\",i.font)),a(\"aspectratio\"),a(\"baseratio\")}});var rPe=ye((F0r,tPe)=>{\"use strict\";var RPt=Dr(),DPt=fX();tPe.exports=function(t,r){function n(i,a){return RPt.coerce(t,r,DPt,i,a)}n(\"hiddenlabels\"),n(\"funnelareacolorway\",r.colorway),n(\"extendfunnelareacolors\")}});var hX=ye((z0r,nPe)=>{\"use strict\";var iPe=wA();function FPt(e,t){return iPe.calc(e,t)}function zPt(e){iPe.crossTraceCalc(e,{type:\"funnelarea\"})}nPe.exports={calc:FPt,crossTraceCalc:zPt}});var uPe=ye((O0r,lPe)=>{\"use strict\";var N2=Oa(),dX=So(),K_=Dr(),OPt=K_.strScale,aPe=K_.strTranslate,oPe=ru(),qPt=n2(),BPt=qPt.toMoveInsideBar,sPe=bv(),NPt=sPe.recordMinTextSize,UPt=sPe.clearMinTextSize,VPt=l_(),UA=yD(),GPt=UA.attachFxHandlers,HPt=UA.determineInsideTextFont,jPt=UA.layoutAreas,WPt=UA.prerenderTitles,XPt=UA.positionTitleOutside,ZPt=UA.formatSliceLabel;lPe.exports=function(t,r){var n=t._context.staticPlot,i=t._fullLayout;UPt(\"funnelarea\",i),WPt(r,t),jPt(r,i._size),K_.makeTraceGroups(i._funnelarealayer,r,\"trace\").each(function(a){var o=N2.select(this),s=a[0],l=s.trace;KPt(a),o.each(function(){var u=N2.select(this).selectAll(\"g.slice\").data(a);u.enter().append(\"g\").classed(\"slice\",!0),u.exit().remove(),u.each(function(f,h){if(f.hidden){N2.select(this).selectAll(\"path,g\").remove();return}f.pointNumber=f.i,f.curveNumber=l.index;var d=s.cx,v=s.cy,_=N2.select(this),b=_.selectAll(\"path.surface\").data([f]);b.enter().append(\"path\").classed(\"surface\",!0).style({\"pointer-events\":n?\"none\":\"all\"}),_.call(GPt,t,a);var p=\"M\"+(d+f.TR[0])+\",\"+(v+f.TR[1])+vX(f.TR,f.BR)+vX(f.BR,f.BL)+vX(f.BL,f.TL)+\"Z\";b.attr(\"d\",p),ZPt(t,f,s);var k=VPt.castOption(l.textposition,f.pts),E=_.selectAll(\"g.slicetext\").data(f.text&&k!==\"none\"?[0]:[]);E.enter().append(\"g\").classed(\"slicetext\",!0),E.exit().remove(),E.each(function(){var S=K_.ensureSingle(N2.select(this),\"text\",\"\",function(z){z.attr(\"data-notex\",1)}),L=K_.ensureUniformFontSize(t,HPt(l,f,i.font));S.text(f.text).attr({class:\"slicetext\",transform:\"\",\"text-anchor\":\"middle\"}).call(dX.font,L).call(oPe.convertToTspans,t);var x=dX.bBox(S.node()),C,M,g,P=Math.min(f.BL[1],f.BR[1])+v,T=Math.max(f.TL[1],f.TR[1])+v;M=Math.max(f.TL[0],f.BL[0])+d,g=Math.min(f.TR[0],f.BR[0])+d,C=BPt(M,g,P,T,x,{isHorizontal:!0,constrained:!0,angle:0,anchor:\"middle\"}),C.fontSize=L.size,NPt(l.type,C,i),a[h].transform=C,K_.setTransormAndDisplay(S,C)})});var c=N2.select(this).selectAll(\"g.titletext\").data(l.title.text?[0]:[]);c.enter().append(\"g\").classed(\"titletext\",!0),c.exit().remove(),c.each(function(){var f=K_.ensureSingle(N2.select(this),\"text\",\"\",function(v){v.attr(\"data-notex\",1)}),h=l.title.text;l._meta&&(h=K_.templateString(h,l._meta)),f.text(h).attr({class:\"titletext\",transform:\"\",\"text-anchor\":\"middle\"}).call(dX.font,l.title.font).call(oPe.convertToTspans,t);var d=XPt(s,i._size);f.attr(\"transform\",aPe(d.x,d.y)+OPt(Math.min(1,d.scale))+aPe(d.tx,d.ty))})})})};function vX(e,t){var r=t[0]-e[0],n=t[1]-e[1];return\"l\"+r+\",\"+n}function YPt(e,t){return[.5*(e[0]+t[0]),.5*(e[1]+t[1])]}function KPt(e){if(!e.length)return;var t=e[0],r=t.trace,n=r.aspectratio,i=r.baseratio;i>.999&&(i=.999);var a=Math.pow(i,2),o=t.vTotal,s=o*a/(1-a),l=o,u=s/o;function c(){var O=Math.sqrt(u);return{x:O,y:-O}}function f(){var O=c();return[O.x,O.y]}var h,d=[];d.push(f());var v,_;for(v=e.length-1;v>-1;v--)if(_=e[v],!_.hidden){var b=_.v/l;u+=b,d.push(f())}var p=1/0,k=-1/0;for(v=0;v-1;v--)if(_=e[v],!_.hidden){P+=1;var T=d[P][0],z=d[P][1];_.TL=[-T,z],_.TR=[T,z],_.BL=M,_.BR=g,_.pxmid=YPt(_.TR,_.BR),M=_.TL,g=_.TR}}});var hPe=ye((q0r,fPe)=>{\"use strict\";var cPe=Oa(),JPt=q3(),$Pt=bv().resizeText;fPe.exports=function(t){var r=t._fullLayout._funnelarealayer.selectAll(\".trace\");$Pt(t,r,\"funnelarea\"),r.each(function(n){var i=n[0],a=i.trace,o=cPe.select(this);o.style({opacity:a.opacity}),o.selectAll(\"path.surface\").each(function(s){cPe.select(this).call(JPt,s,a,t)})})}});var vPe=ye((B0r,dPe)=>{\"use strict\";dPe.exports={moduleType:\"trace\",name:\"funnelarea\",basePlotModule:ZLe(),categories:[\"pie-like\",\"funnelarea\",\"showLegend\"],attributes:cX(),layoutAttributes:fX(),supplyDefaults:ePe(),supplyLayoutDefaults:rPe(),calc:hX().calc,crossTraceCalc:hX().crossTraceCalc,plot:uPe(),style:hPe(),styleOne:q3(),meta:{}}});var gPe=ye((N0r,pPe)=>{\"use strict\";pPe.exports=vPe()});var Od=ye((U0r,mPe)=>{(function(){var e={24:function(i){var a={left:0,top:0};i.exports=o;function o(l,u,c){u=u||l.currentTarget||l.srcElement,Array.isArray(c)||(c=[0,0]);var f=l.clientX||0,h=l.clientY||0,d=s(u);return c[0]=f-d.left,c[1]=h-d.top,c}function s(l){return l===window||l===document||l===document.body?a:l.getBoundingClientRect()}},109:function(i){i.exports=a;function a(o,s,l,u){var c=l[0],f=l[2],h=s[0]-c,d=s[2]-f,v=Math.sin(u),_=Math.cos(u);return o[0]=c+d*v+h*_,o[1]=s[1],o[2]=f+d*_-h*v,o}},160:function(i){i.exports=a;function a(o,s,l){return o[0]=Math.max(s[0],l[0]),o[1]=Math.max(s[1],l[1]),o[2]=Math.max(s[2],l[2]),o[3]=Math.max(s[3],l[3]),o}},216:function(i){\"use strict\";i.exports=a;function a(o,s){for(var l={},u=0;u1){v[0]in h||(h[v[0]]=[]),h=h[v[0]];for(var _=1;_=0;--N){var Se=Z[N];j=Se[0];var Le=V[j],Ae=Le[0],Fe=Le[1],Pe=O[Ae],ge=O[Fe];if((Pe[0]-ge[0]||Pe[1]-ge[1])<0){var Re=Ae;Ae=Fe,Fe=Re}Le[0]=Ae;var ce=Le[1]=Se[1],Ze;for(H&&(Ze=Le[2]);N>0&&Z[N-1][0]===j;){var Se=Z[--N],ut=Se[1];H?V.push([ce,ut,Ze]):V.push([ce,ut]),ce=ut}H?V.push([ce,Fe,Ze]):V.push([ce,Fe])}return re}function x(O,V,G){for(var Z=V.length,H=new s(Z),N=[],j=0;jV[2]?1:0)}function g(O,V,G){if(O.length!==0){if(V)for(var Z=0;Z0||j.length>0}function z(O,V,G){var Z;if(G){Z=V;for(var H=new Array(V.length),N=0;N
" ] }, + "jetTransient": { + "display_id": null + }, "metadata": {}, "output_type": "display_data" } ], + "source": [ + "# Visualize first two weeks\n", + "import xarray as xr\n", + "\n", + "profiles = xr.Dataset(\n", + " {\n", + " 'Heat Demand [MW]': xr.DataArray(heat_demand[:1344], dims=['time'], coords={'time': timesteps[:1344]}),\n", + " 'Electricity Price [€/MWh]': xr.DataArray(\n", + " electricity_price[:1344], dims=['time'], coords={'time': timesteps[:1344]}\n", + " ),\n", + " }\n", + ")\n", + "\n", + "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", + "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", + "fig.update_yaxes(matches=None, showticklabels=True)\n", + "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", + "fig" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "## Build the FlowSystem\n", + "\n", + "A district heating system with CHP, boiler, and storage:" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "7", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T14:35:43.493407Z", + "start_time": "2025-12-14T14:35:43.461305Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "System: 2976 timesteps (31 days)\n" + ] + } + ], + "source": [ + "def build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price):\n", + " \"\"\"Build a district heating system with CHP, boiler, and storage.\"\"\"\n", + " fs = fx.FlowSystem(timesteps)\n", + "\n", + " fs.add_elements(\n", + " # Buses\n", + " fx.Bus('Electricity'),\n", + " fx.Bus('Heat'),\n", + " fx.Bus('Gas'),\n", + " fx.Bus('Coal'),\n", + " # Effects\n", + " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", + " fx.Effect('CO2', 'kg', 'CO2 Emissions'),\n", + " # CHP unit\n", + " fx.linear_converters.CHP(\n", + " 'CHP',\n", + " thermal_efficiency=0.58,\n", + " electrical_efficiency=0.22,\n", + " electrical_flow=fx.Flow('P_el', bus='Electricity', size=200),\n", + " thermal_flow=fx.Flow(\n", + " 'Q_th',\n", + " bus='Heat',\n", + " size=fx.InvestParameters(\n", + " minimum_size=100,\n", + " maximum_size=300,\n", + " effects_of_investment_per_size={'costs': 10},\n", + " ),\n", + " relative_minimum=0.3,\n", + " ),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Coal'),\n", + " ),\n", + " # Gas Boiler\n", + " fx.linear_converters.Boiler(\n", + " 'Boiler',\n", + " thermal_efficiency=0.85,\n", + " thermal_flow=fx.Flow(\n", + " 'Q_th',\n", + " bus='Heat',\n", + " size=fx.InvestParameters(\n", + " minimum_size=0,\n", + " maximum_size=150,\n", + " effects_of_investment_per_size={'costs': 5},\n", + " ),\n", + " relative_minimum=0.1,\n", + " ),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", + " ),\n", + " # Thermal Storage\n", + " fx.Storage(\n", + " 'Storage',\n", + " capacity_in_flow_hours=fx.InvestParameters(\n", + " minimum_size=0,\n", + " maximum_size=1000,\n", + " effects_of_investment_per_size={'costs': 0.5},\n", + " ),\n", + " initial_charge_state=0,\n", + " eta_charge=1,\n", + " eta_discharge=1,\n", + " relative_loss_per_hour=0.001,\n", + " charging=fx.Flow('Charge', size=137, bus='Heat'),\n", + " discharging=fx.Flow('Discharge', size=158, bus='Heat'),\n", + " ),\n", + " # Fuel sources\n", + " fx.Source(\n", + " 'GasGrid',\n", + " outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price, 'CO2': 0.3})],\n", + " ),\n", + " fx.Source(\n", + " 'CoalSupply',\n", + " outputs=[fx.Flow('Q_Coal', bus='Coal', size=1000, effects_per_flow_hour={'costs': 4.6, 'CO2': 0.3})],\n", + " ),\n", + " # Electricity grid\n", + " fx.Source(\n", + " 'GridBuy',\n", + " outputs=[\n", + " fx.Flow(\n", + " 'P_el',\n", + " bus='Electricity',\n", + " size=1000,\n", + " effects_per_flow_hour={'costs': electricity_price + 0.5, 'CO2': 0.3},\n", + " )\n", + " ],\n", + " ),\n", + " fx.Sink(\n", + " 'GridSell',\n", + " inputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour=-(electricity_price - 0.5))],\n", + " ),\n", + " # Demands\n", + " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", + " fx.Sink(\n", + " 'ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=electricity_demand)]\n", + " ),\n", + " )\n", + "\n", + " return fs\n", + "\n", + "\n", + "flow_system = build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price)\n", + "print(f'System: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days)')" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "## Visualizing the Clustering Effect\n", + "\n", + "Before optimizing, let's see how clustering transforms the time series data:" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "9", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T14:35:46.501672Z", + "start_time": "2025-12-14T14:35:43.520577Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001B[2m2025-12-14 15:35:43.523\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "
" + ], + "text/plain": [ + "PlotResult(data= Size: 262kB\n", + "Dimensions: (time: 2976, variable: 5)\n", + "Coordinates:\n", + " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", + " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", + "Data variables:\n", + " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", + " aggregated (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0, figure=Figure({\n", + " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('UbgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('9dJNYhDYG8D10k1iENgbwPXSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'}],\n", + " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", + " 'margin': {'t': 60},\n", + " 'template': '...',\n", + " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", + " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", + " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", + "}))" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Cluster with 8 typical days (from 31 days)\n", + "fs_demo = flow_system.copy()\n", + "fs_clustered_demo = fs_demo.transform.cluster(n_clusters=8, cluster_duration='1D')\n", + "\n", + "# Get the clustering object to access tsam results\n", + "clustering = fs_clustered_demo._clustering_info['clustering']\n", + "\n", + "# Plot original vs aggregated data\n", + "clustering.plot()" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "## Comparing Different Clustering Parameters\n", + "\n", + "Let's see how different numbers of typical days affect the data representation:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "11", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T14:35:51.309343Z", + "start_time": "2025-12-14T14:35:46.889873Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" + ] + } + ], "source": [ "# Test different numbers of clusters\n", - "cluster_configs = [2, 3, 4, 5]\n", + "cluster_configs = [4, 8, 12, 16]\n", "clustering_results = {}\n", "\n", "for n in cluster_configs:\n", @@ -4547,10 +4479,60 @@ " fs_clustered = fs_test.transform.cluster(n_clusters=n, cluster_duration='1D')\n", " clustering_results[n] = fs_clustered._clustering_info['clustering']\n", "\n", - "# Get the first time-varying variable name for comparison\n", - "first_var = list(clustering_results[2].original_data.columns)[0]\n", - "print(f'Comparing: {first_var}')\n", - "\n", + "# Use heat demand for comparison (most relevant for district heating)\n", + "heat_demand_col = [c for c in clustering_results[4].original_data.columns if 'Heat' in c or 'Q_th' in c][0]\n", + "print(f'Comparing: {heat_demand_col}')" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "12", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T14:35:51.557060Z", + "start_time": "2025-12-14T14:35:51.433611Z" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "
" + ] + }, + "jetTransient": { + "display_id": null + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ "# Compare the aggregated data for each configuration\n", "fig = make_subplots(\n", " rows=2,\n", @@ -4567,8 +4549,8 @@ " row += 1\n", " col += 1\n", "\n", - " original = clustering.original_data[first_var]\n", - " aggregated = clustering.aggregated_data[first_var]\n", + " original = clustering.original_data[heat_demand_col]\n", + " aggregated = clustering.aggregated_data[heat_demand_col]\n", "\n", " fig.add_trace(\n", " go.Scatter(\n", @@ -4585,7 +4567,7 @@ " go.Scatter(\n", " x=list(range(len(aggregated))),\n", " y=aggregated.values,\n", - " name='Aggregated',\n", + " name='Clustered',\n", " line=dict(color='blue', width=2),\n", " showlegend=(i == 0),\n", " ),\n", @@ -4594,29 +4576,23 @@ " )\n", "\n", "fig.update_layout(\n", - " title=f'Original vs Clustered: {first_var}',\n", + " title='Heat Demand: Original vs Clustered',\n", " height=500,\n", " legend=dict(orientation='h', yanchor='bottom', y=1.02),\n", ")\n", "fig.update_xaxes(title_text='Timestep', row=2)\n", - "fig.update_yaxes(title_text='Value', col=1)\n", + "fig.update_yaxes(title_text='Heat Demand [MW]', col=1)\n", "fig.show()" ] }, { "cell_type": "code", - "execution_count": 5, - "id": "3zsi1g8bokg", + "execution_count": 18, + "id": "13", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:18:18.818103Z", - "start_time": "2025-12-14T01:18:18.571453Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:23.708331Z", - "iopub.status.busy": "2025-12-14T01:22:23.708032Z", - "iopub.status.idle": "2025-12-14T01:22:23.770295Z", - "shell.execute_reply": "2025-12-14T01:22:23.768542Z" + "end_time": "2025-12-14T14:35:51.666590Z", + "start_time": "2025-12-14T14:35:51.615328Z" } }, "outputs": [ @@ -4625,17 +4601,17 @@ "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", + " \n", " \n", " \n", " \n", @@ -4644,41 +4620,41 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 RMSEMAEMax ErrorCorrelationRMSEMAEMax ErrorCorrelation
ClustersTypical Days   
20.000.000.001.000044.844.5212.190.9905
30.000.000.001.000083.452.606.890.9952
40.000.000.001.0000121.680.836.390.9989
50.000.000.001.0000160.370.251.860.9999
\n" ], "text/plain": [ - "" + "" ] }, - "execution_count": 5, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -4687,10 +4663,9 @@ "# Calculate error metrics for each configuration\n", "metrics = []\n", "for n, clustering in clustering_results.items():\n", - " original = clustering.original_data[first_var].values\n", - " aggregated = clustering.aggregated_data[first_var].values\n", + " original = clustering.original_data[heat_demand_col].values\n", + " aggregated = clustering.aggregated_data[heat_demand_col].values\n", "\n", - " # Calculate metrics\n", " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", " mae = np.mean(np.abs(original - aggregated))\n", " max_error = np.max(np.abs(original - aggregated))\n", @@ -4698,7 +4673,7 @@ "\n", " metrics.append(\n", " {\n", - " 'Clusters': n,\n", + " 'Typical Days': n,\n", " 'RMSE': rmse,\n", " 'MAE': mae,\n", " 'Max Error': max_error,\n", @@ -4706,7 +4681,7 @@ " }\n", " )\n", "\n", - "metrics_df = pd.DataFrame(metrics).set_index('Clusters')\n", + "metrics_df = pd.DataFrame(metrics).set_index('Typical Days')\n", "metrics_df.style.format(\n", " {\n", " 'RMSE': '{:.2f}',\n", @@ -4719,7 +4694,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "14", "metadata": {}, "source": [ "## Baseline: Full Optimization\n", @@ -4729,42 +4704,59 @@ }, { "cell_type": "code", - "execution_count": 6, - "id": "8", + "execution_count": 19, + "id": "15", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:18:21.636630Z", - "start_time": "2025-12-14T01:18:18.887553Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:23.774996Z", - "iopub.status.busy": "2025-12-14T01:22:23.774446Z", - "iopub.status.idle": "2025-12-14T01:22:25.286112Z", - "shell.execute_reply": "2025-12-14T01:22:25.284951Z" + "end_time": "2025-12-14T14:36:18.822362Z", + "start_time": "2025-12-14T14:35:51.686260Z" } }, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001B[2m2025-12-14 15:35:52.048\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001B[2m2025-12-14 15:35:52.216\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 64/64 [00:00<00:00, 75.17it/s] \n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 421.23it/s]\n", + "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 368.46it/s]\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-shi3zqp7 has 2200 rows; 2199 cols; 6740 nonzeros; 336 integer variables (336 binary)\n", + "MIP linopy-problem-dqtvcofp has 89316 rows; 80386 cols; 264919 nonzeros; 5955 integer variables (5955 binary)\n", "Coefficient ranges:\n", - " Matrix [1e-05, 1e+02]\n", + " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", - " Bound [1e+00, 5e+02]\n", - " RHS [1e+00, 2e+02]\n", + " Bound [1e+00, 1e+03]\n", + " RHS [1e+00, 1e+00]\n", "Presolving model\n", - "1176 rows, 1008 cols, 2855 nonzeros 0s\n", - "840 rows, 672 cols, 3022 nonzeros 0s\n", - "840 rows, 672 cols, 3022 nonzeros 0s\n", - "Presolve reductions: rows 840(-1360); columns 672(-1527); nonzeros 3022(-3718) \n", + "38694 rows, 26790 cols, 92267 nonzeros 0s\n", + "31169 rows, 18018 cols, 88849 nonzeros 0s\n", + "30836 rows, 17685 cols, 89182 nonzeros 0s\n", + "Presolve reductions: rows 30836(-58480); columns 17685(-62701); nonzeros 89182(-175737) \n", "\n", "Solving MIP model with:\n", - " 840 rows\n", - " 672 cols (336 binary, 0 integer, 0 implied int., 336 continuous, 0 domain fixed)\n", - " 3022 nonzeros\n", + " 30836 rows\n", + " 17685 cols (5955 binary, 0 integer, 0 implied int., 11730 continuous, 0 domain fixed)\n", + " 89182 nonzeros\n", "\n", "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", @@ -4774,39 +4766,40 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% 0 inf inf 0 0 0 0 0.0s\n", - " R 0 0 0 0.00% 558.830517 583.6312648 4.25% 0 0 0 465 0.1s\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - " L 0 0 0 0.00% 558.830517 558.830517 0.00% 174 58 0 523 0.3s\n", - " 1 0 1 100.00% 558.830517 558.830517 0.00% 174 58 0 731 0.3s\n", + " 0 0 0 0.00% -48251946.82856 inf inf 0 0 0 0 0.8s\n", + " R 0 0 0 0.00% 2209206.133553 2278967.860722 3.06% 0 0 0 15439 2.3s\n", + " C 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7380 2937 0 18513 6.5s\n", + " 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7578 2982 0 18623 11.5s\n", + " L 0 0 0 0.00% 2209206.133553 2209206.150262 0.00% 7578 2989 0 18631 23.4s\n", + " 1 0 1 100.00% 2209206.133553 2209206.150262 0.00% 7578 2989 0 21605 23.5s\n", "\n", "Solving report\n", - " Model linopy-problem-shi3zqp7\n", + " Model linopy-problem-dqtvcofp\n", " Status Optimal\n", - " Primal bound 558.830516996\n", - " Dual bound 558.830516996\n", + " Primal bound 2209206.15026\n", + " Dual bound 2209206.13355\n", " Gap 0% (tolerance: 1%)\n", - " P-D integral 0.00929901200095\n", + " P-D integral 0.629336568023\n", " Solution status feasible\n", - " 558.830516996 (objective)\n", + " 2209206.15026 (objective)\n", " 0 (bound viol.)\n", - " 8.881784197e-16 (int. viol.)\n", + " 0 (int. viol.)\n", " 0 (row viol.)\n", - " Timing 0.27\n", - " Max sub-MIP depth 1\n", + " Timing 23.46\n", + " Max sub-MIP depth 2\n", " Nodes 1\n", " Repair LPs 0\n", - " LP iterations 731\n", + " LP iterations 21605\n", " 0 (strong br.)\n", - " 58 (separation)\n", - " 208 (heuristics)\n", - "Full optimization: 1.50 seconds\n", - "Cost: 559 €\n" + " 3192 (separation)\n", + " 2974 (heuristics)\n", + "Full optimization: 27.13 seconds\n", + "Cost: 2,209,206 €\n", + "\n", + "Optimized sizes:\n", + " CHP(Q_th): 300.0\n", + " Boiler(Q_th): 0.0\n", + " Storage: 1000.0\n" ] } ], @@ -4819,21 +4812,24 @@ "time_full = timeit.default_timer() - start\n", "\n", "print(f'Full optimization: {time_full:.2f} seconds')\n", - "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')" + "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_full.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" ] }, { "cell_type": "markdown", - "id": "9", + "id": "16", "metadata": {}, "source": [ "## Basic Clustering\n", "\n", - "Cluster the time series into **4 typical days** (since we have 7 days of data):\n", + "Cluster the time series into **8 typical days** (from 31 days of data):\n", "\n", "```python\n", "clustered_fs = flow_system.transform.cluster(\n", - " n_clusters=4, # Number of typical periods\n", + " n_clusters=8, # Number of typical periods\n", " cluster_duration='1D', # Duration per cluster (1 day)\n", ")\n", "```" @@ -4841,43 +4837,59 @@ }, { "cell_type": "code", - "execution_count": 7, - "id": "10", + "execution_count": 20, + "id": "17", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:18:23.485055Z", - "start_time": "2025-12-14T01:18:21.711729Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:25.293422Z", - "iopub.status.busy": "2025-12-14T01:22:25.293044Z", - "iopub.status.idle": "2025-12-14T01:22:26.753186Z", - "shell.execute_reply": "2025-12-14T01:22:26.749869Z" + "end_time": "2025-12-14T14:36:25.518948Z", + "start_time": "2025-12-14T14:36:18.969306Z" } }, "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001B[2m2025-12-14 15:36:19.954\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001B[2m2025-12-14 15:36:20.127\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 81/81 [00:01<00:00, 65.44it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 808.42it/s]\n", + "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 766.39it/s]\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-wfesasa2 has 2890 rows; 2199 cols; 8120 nonzeros; 336 integer variables (336 binary)\n", + "MIP linopy-problem-bhnhp1id has 126461 rows; 80386 cols; 339209 nonzeros; 5955 integer variables (5955 binary)\n", "Coefficient ranges:\n", - " Matrix [1e-05, 1e+02]\n", + " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", - " Bound [1e+00, 5e+02]\n", - " RHS [1e+00, 2e+02]\n", + " Bound [1e+00, 1e+03]\n", + " RHS [1e+00, 1e+00]\n", "Presolving model\n", - "1291 rows, 640 cols, 3200 nonzeros 0s\n", - "910 rows, 419 cols, 2886 nonzeros 0s\n", - "518 rows, 416 cols, 1734 nonzeros 0s\n", - "495 rows, 393 cols, 1780 nonzeros 0s\n", - "Presolve reductions: rows 495(-2395); columns 393(-1806); nonzeros 1780(-6340) \n", + "41449 rows, 7695 cols, 100532 nonzeros 0s\n", + "9148 rows, 5691 cols, 23883 nonzeros 0s\n", + "8222 rows, 4788 cols, 23865 nonzeros 0s\n", + "Presolve reductions: rows 8222(-118239); columns 4788(-75598); nonzeros 23865(-315344) \n", "\n", "Solving MIP model with:\n", - " 495 rows\n", - " 393 cols (198 binary, 0 integer, 0 implied int., 195 continuous, 0 domain fixed)\n", - " 1780 nonzeros\n", + " 8222 rows\n", + " 4788 cols (1585 binary, 0 integer, 0 implied int., 3203 continuous, 0 domain fixed)\n", + " 23865 nonzeros\n", "\n", "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", @@ -4887,43 +4899,48 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% 0 inf inf 0 0 0 0 0.0s\n", - " R 0 0 0 0.00% 562.499067 588.4592791 4.41% 0 0 0 265 0.0s\n", - " L 0 0 0 0.00% 562.499067 562.499067 0.00% 108 35 0 300 0.1s\n", - " 1 0 1 100.00% 562.499067 562.499067 0.00% 108 35 0 382 0.1s\n", + " 0 0 0 0.00% -35212528.89731 inf inf 0 0 0 0 0.4s\n", + " 0 0 0 0.00% 2215408.582854 inf inf 0 0 0 3609 0.6s\n", + " R 0 0 0 0.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 1.5s\n", + " 1 0 1 100.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 1.5s\n", "\n", "Solving report\n", - " Model linopy-problem-wfesasa2\n", + " Model linopy-problem-bhnhp1id\n", " Status Optimal\n", - " Primal bound 562.499067033\n", - " Dual bound 562.499067033\n", - " Gap 0% (tolerance: 1%)\n", - " P-D integral 0.00288293546382\n", + " Primal bound 2215424.33152\n", + " Dual bound 2215408.58285\n", + " Gap 0.000711% (tolerance: 1%)\n", + " P-D integral 3.61566538396e-08\n", " Solution status feasible\n", - " 562.499067033 (objective)\n", + " 2215424.33152 (objective)\n", " 0 (bound viol.)\n", - " 2.56905607898e-13 (int. viol.)\n", + " 0 (int. viol.)\n", " 0 (row viol.)\n", - " Timing 0.10\n", - " Max sub-MIP depth 1\n", + " Timing 1.53\n", + " Max sub-MIP depth 0\n", " Nodes 1\n", " Repair LPs 0\n", - " LP iterations 382\n", + " LP iterations 4378\n", " 0 (strong br.)\n", - " 35 (separation)\n", - " 82 (heuristics)\n", - "Clustered optimization: 1.45 seconds\n", - "Cost: 562 €\n", - "Speedup: 1.0x\n" + " 769 (separation)\n", + " 0 (heuristics)\n", + "Clustered optimization: 6.54 seconds\n", + "Cost: 2,215,424 €\n", + "Speedup: 4.1x\n", + "\n", + "Optimized sizes:\n", + " CHP(Q_th): 300.0\n", + " Boiler(Q_th): 0.0\n", + " Storage: 1000.0\n" ] } ], "source": [ "start = timeit.default_timer()\n", "\n", - "# Cluster into 4 typical days\n", + "# Cluster into 8 typical days\n", "fs_clustered = flow_system.transform.cluster(\n", - " n_clusters=4,\n", + " n_clusters=8,\n", " cluster_duration='1D',\n", ")\n", "\n", @@ -4932,12 +4949,15 @@ "\n", "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", "print(f'Cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", - "print(f'Speedup: {time_full / time_clustered:.1f}x')" + "print(f'Speedup: {time_full / time_clustered:.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_clustered.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" ] }, { "cell_type": "markdown", - "id": "11", + "id": "18", "metadata": {}, "source": [ "## Compare Results" @@ -4945,18 +4965,12 @@ }, { "cell_type": "code", - "execution_count": 8, - "id": "12", + "execution_count": 21, + "id": "19", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:18:23.574337Z", - "start_time": "2025-12-14T01:18:23.554953Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:26.757305Z", - "iopub.status.busy": "2025-12-14T01:22:26.757113Z", - "iopub.status.idle": "2025-12-14T01:22:26.765642Z", - "shell.execute_reply": "2025-12-14T01:22:26.765136Z" + "end_time": "2025-12-14T14:36:25.618993Z", + "start_time": "2025-12-14T14:36:25.608382Z" } }, "outputs": [ @@ -4965,47 +4979,68 @@ "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 Time [s]Cost [€]Cost Gap [%]SpeedupTime [s]Cost [€]CHP SizeBoiler SizeStorage SizeCost Gap [%]Speedup
Full (baseline)1.505590.001.0xFull (baseline)27.132,209,206300.00.010000.001.0x
Clustered (4 days)1.455620.661.0xClustered (8 days)6.542,215,424300.00.010000.284.1x
\n" ], "text/plain": [ - "" + "" ] }, - "execution_count": 8, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "results = {\n", - " 'Full (baseline)': {'Time [s]': time_full, 'Cost [€]': fs_full.solution['costs'].item()},\n", - " 'Clustered (4 days)': {'Time [s]': time_clustered, 'Cost [€]': fs_clustered.solution['costs'].item()},\n", + " 'Full (baseline)': {\n", + " 'Time [s]': time_full,\n", + " 'Cost [€]': fs_full.solution['costs'].item(),\n", + " 'CHP Size': fs_full.statistics.sizes['CHP(Q_th)'].item(),\n", + " 'Boiler Size': fs_full.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Storage Size': fs_full.statistics.sizes['Storage'].item(),\n", + " },\n", + " 'Clustered (8 days)': {\n", + " 'Time [s]': time_clustered,\n", + " 'Cost [€]': fs_clustered.solution['costs'].item(),\n", + " 'CHP Size': fs_clustered.statistics.sizes['CHP(Q_th)'].item(),\n", + " 'Boiler Size': fs_clustered.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Storage Size': fs_clustered.statistics.sizes['Storage'].item(),\n", + " },\n", "}\n", "\n", "comparison = pd.DataFrame(results).T\n", @@ -5018,6 +5053,9 @@ " {\n", " 'Time [s]': '{:.2f}',\n", " 'Cost [€]': '{:,.0f}',\n", + " 'CHP Size': '{:.1f}',\n", + " 'Boiler Size': '{:.1f}',\n", + " 'Storage Size': '{:.0f}',\n", " 'Cost Gap [%]': '{:.2f}',\n", " 'Speedup': '{:.1f}x',\n", " }\n", @@ -5026,28 +5064,23 @@ }, { "cell_type": "markdown", - "id": "mn99rfcupf", + "id": "20", "metadata": {}, "source": [ "## Multi-Period Clustering\n", "\n", - "For multi-year investment studies, clustering is applied **independently per period** (year). Each year gets its own set of typical days:" + "For multi-year investment studies, clustering is applied **independently per period** (year).\n", + "Each year gets its own set of typical days:" ] }, { "cell_type": "code", - "execution_count": 9, - "id": "24vgkxoeyqz", + "execution_count": 22, + "id": "21", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:18:30.759263Z", - "start_time": "2025-12-14T01:18:23.607144Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:26.768344Z", - "iopub.status.busy": "2025-12-14T01:22:26.768176Z", - "iopub.status.idle": "2025-12-14T01:22:32.730327Z", - "shell.execute_reply": "2025-12-14T01:22:32.729236Z" + "end_time": "2025-12-14T14:36:25.680716Z", + "start_time": "2025-12-14T14:36:25.657257Z" } }, "outputs": [ @@ -5055,147 +5088,152 @@ "name": "stdout", "output_type": "stream", "text": [ - "Multi-period system: 48 timesteps × 3 periods × 2 scenarios\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-adnjwebr has 6420 rows; 3870 cols; 16248 nonzeros; 588 integer variables (588 binary)\n", - "Coefficient ranges:\n", - " Matrix [1e-05, 1e+03]\n", - " Cost [3e-01, 7e-01]\n", - " Bound [1e+00, 5e+02]\n", - " RHS [1e+00, 1e+00]\n", - "Presolving model\n", - "2895 rows, 1059 cols, 6927 nonzeros 0s\n", - "2050 rows, 622 cols, 6232 nonzeros 0s\n", - "1068 rows, 612 cols, 3236 nonzeros 0s\n", - "1053 rows, 604 cols, 3203 nonzeros 0s\n", - "Presolve reductions: rows 1053(-5367); columns 604(-3266); nonzeros 3203(-13045) \n", - "\n", - "Solving MIP model with:\n", - " 1053 rows\n", - " 604 cols (305 binary, 0 integer, 0 implied int., 299 continuous, 0 domain fixed)\n", - " 3203 nonzeros\n", - "\n", - "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", - " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", - " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", - " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", - "\n", - " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", - "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", - "\n", - " 0 0 0 0.00% 9394.346261 inf inf 0 0 0 0 0.0s\n", - " R 0 0 0 0.00% 18357.702407 19065.666244 3.71% 0 0 0 430 0.0s\n", - " 1 0 1 100.00% 18879.998962 19065.666244 0.97% 1111 123 0 676 0.1s\n", - "\n", - "Solving report\n", - " Model linopy-problem-adnjwebr\n", - " Status Optimal\n", - " Primal bound 19065.6662439\n", - " Dual bound 18879.9989619\n", - " Gap 0.974% (tolerance: 1%)\n", - " P-D integral 0.0013863514249\n", - " Solution status feasible\n", - " 19065.6662439 (objective)\n", - " 0 (bound viol.)\n", - " 0 (int. viol.)\n", - " 0 (row viol.)\n", - " Timing 0.10\n", - " Max sub-MIP depth 0\n", - " Nodes 1\n", - " Repair LPs 0\n", - " LP iterations 676\n", - " 0 (strong br.)\n", - " 246 (separation)\n", - " 0 (heuristics)\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Multi-period clustered cost: 38,227 €\n" + "Multi-period system: 1344 timesteps × 3 periods\n" ] } ], "source": [ - "# Load a multi-period FlowSystem (also has scenarios!)\n", - "fs_multiperiod = fx.FlowSystem.from_netcdf('data/multiperiod_system.nc4')\n", + "# Create a multi-period system (3 years, each with 2 weeks of data)\n", + "data_2w = data['2020-01-01':'2020-01-14 23:45:00'] # Two weeks\n", + "timesteps_2w = data_2w.index\n", "\n", - "print(\n", - " f'Multi-period system: {len(fs_multiperiod.timesteps)} timesteps × {len(fs_multiperiod.periods)} periods × {len(fs_multiperiod.scenarios)} scenarios'\n", + "# Build system with periods\n", + "fs_mp = fx.FlowSystem(\n", + " timesteps_2w,\n", + " periods=pd.Index([2024, 2025, 2026], name='year'),\n", ")\n", "\n", - "# Cluster - each period × scenario combination gets clustered independently\n", - "fs_mp_clustered = fs_multiperiod.transform.cluster(n_clusters=1, cluster_duration='1D')\n", - "fs_mp_clustered.optimize(solver)\n", + "# Scale demands by year (growing demand)\n", + "heat_demand_2w = data_2w['Q_Netz/MW'].to_numpy()\n", + "elec_demand_2w = data_2w['P_Netz/MW'].to_numpy()\n", + "elec_price_2w = data_2w['Strompr.€/MWh'].to_numpy()\n", + "gas_price_2w = data_2w['Gaspr.€/MWh'].to_numpy()\n", "\n", - "print(f'Multi-period clustered cost: {fs_mp_clustered.solution[\"costs\"].sum().item():,.0f} €')" + "# Create period-varying profiles (demand grows 5% per year)\n", + "heat_profile = fx.TimeSeriesData(\n", + " np.stack([heat_demand_2w * 1.0, heat_demand_2w * 1.05, heat_demand_2w * 1.10]),\n", + " dims=['period', 'time'],\n", + ")\n", + "elec_profile = fx.TimeSeriesData(\n", + " np.stack([elec_demand_2w * 1.0, elec_demand_2w * 1.05, elec_demand_2w * 1.10]),\n", + " dims=['period', 'time'],\n", + ")\n", + "\n", + "fs_mp.add_elements(\n", + " fx.Bus('Electricity'),\n", + " fx.Bus('Heat'),\n", + " fx.Bus('Gas'),\n", + " fx.Effect('costs', '€', is_standard=True, is_objective=True),\n", + " fx.linear_converters.Boiler(\n", + " 'Boiler',\n", + " thermal_efficiency=0.85,\n", + " thermal_flow=fx.Flow('Q_th', bus='Heat', size=350),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", + " ),\n", + " fx.Source(\n", + " 'GasGrid',\n", + " outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price_2w})],\n", + " ),\n", + " fx.Source(\n", + " 'GridBuy',\n", + " outputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour={'costs': elec_price_2w})],\n", + " ),\n", + " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_profile)]),\n", + " fx.Sink('ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=elec_profile)]),\n", + ")\n", + "\n", + "print(f'Multi-period system: {len(fs_mp.timesteps)} timesteps × {len(fs_mp.periods)} periods')" ] }, { - "cell_type": "markdown", - "id": "0qjtoobc40uo", - "metadata": {}, + "cell_type": "code", + "execution_count": 23, + "id": "22", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T14:36:27.218698Z", + "start_time": "2025-12-14T14:36:25.699998Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001B[2m2025-12-14 15:36:25.701\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", + "Clustering was applied to 3 period(s):\n", + " - period=2024\n", + " - period=2025\n", + " - period=2026\n" + ] + } + ], "source": [ - "The loaded system includes both **periods** and **scenarios**. Clustering is applied independently for each combination:" + "# Cluster - each period gets clustered independently\n", + "fs_mp_clustered = fs_mp.transform.cluster(n_clusters=4, cluster_duration='1D')\n", + "\n", + "# Get clustering info\n", + "clustering_info = fs_mp_clustered._clustering_info\n", + "print(f'Clustering was applied to {len(clustering_info[\"clustering_results\"])} period(s):')\n", + "for (period, _scenario), _ in clustering_info['clustering_results'].items():\n", + " print(f' - period={period}')" ] }, { "cell_type": "code", - "execution_count": 10, - "id": "36269qvz7ti", + "execution_count": 24, + "id": "23", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T01:18:30.895716Z", - "start_time": "2025-12-14T01:18:30.884403Z" - }, - "execution": { - "iopub.execute_input": "2025-12-14T01:22:32.735518Z", - "iopub.status.busy": "2025-12-14T01:22:32.735020Z", - "iopub.status.idle": "2025-12-14T01:22:32.742489Z", - "shell.execute_reply": "2025-12-14T01:22:32.741900Z" + "end_time": "2025-12-14T14:36:29.672690Z", + "start_time": "2025-12-14T14:36:27.402738Z" } }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 38/38 [00:00<00:00, 80.29it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 22/22 [00:00<00:00, 398.66it/s]\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "Periods: [2024, 2025, 2026]\n", - "Scenarios: ['high_demand', 'low_demand']\n", + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "LP linopy-problem-u73pgf9e has 49392 rows; 40356 cols; 131016 nonzeros\n", + "Coefficient ranges:\n", + " Matrix [2e-01, 2e+01]\n", + " Cost [1e+00, 1e+00]\n", + " Bound [5e+01, 1e+03]\n", + " RHS [0e+00, 0e+00]\n", + "Presolving model\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "Presolve reductions: rows 0(-49392); columns 0(-40356); nonzeros 0(-131016) - Reduced to empty\n", + "Performed postsolve\n", + "Solving the original LP from the solution after postsolve\n", "\n", - "Clustering was applied to 6 combinations:\n", - " - period=2024, scenario=high_demand\n", - " - period=2024, scenario=low_demand\n", - " - period=2025, scenario=high_demand\n", - " - period=2025, scenario=low_demand\n", - " - period=2026, scenario=high_demand\n", - " - period=2026, scenario=low_demand\n" + "Model name : linopy-problem-u73pgf9e\n", + "Model status : Optimal\n", + "Objective value : 1.3352558890e+07\n", + "P-D objective error : 1.7437154695e-15\n", + "HiGHS run time : 0.17\n", + "Multi-period clustered cost: 13,352,559 €\n" ] } ], "source": [ - "# The multiperiod_system already has both periods AND scenarios\n", - "# Each period × scenario combination is clustered independently\n", - "print(f'Periods: {fs_multiperiod.periods.tolist()}')\n", - "print(f'Scenarios: {fs_multiperiod.scenarios.tolist()}')\n", - "\n", - "# Get clustering info to see how many combinations were clustered\n", - "clustering_info = fs_mp_clustered._clustering_info\n", - "print(f'\\nClustering was applied to {len(clustering_info[\"clustering_results\"])} combinations:')\n", - "for (period, scenario), _ in clustering_info['clustering_results'].items():\n", - " print(f' - period={period}, scenario={scenario}')" + "# Optimize\n", + "fs_mp_clustered.optimize(solver)\n", + "print(f'Multi-period clustered cost: {fs_mp_clustered.solution[\"costs\"].sum().item():,.0f} €')" ] }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "## API Reference\n", @@ -5233,7 +5271,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "25", "metadata": {}, "source": [ "## Summary\n", @@ -5241,8 +5279,9 @@ "You learned how to:\n", "\n", "- Use **`transform.cluster()`** to identify typical periods\n", - "- Compare different clustering parameters and their effect on data\n", - "- Cluster **multi-period** and **multi-scenario** FlowSystems\n", + "- Visualize the **clustering effect** on time series data\n", + "- Compare different clustering parameters and their **accuracy trade-offs**\n", + "- Cluster **multi-period** FlowSystems (each period independently)\n", "\n", "### When to Use Clustering\n", "\n", From 584e9070e344d4fb92c06f2507803efad95c5427 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 15:39:30 +0100 Subject: [PATCH 012/191] Added new system to notebook defaults --- .../data/generate_example_systems.py | 121 +++++++++++++++++- 1 file changed, 119 insertions(+), 2 deletions(-) diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index 556463302..f0a6db405 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -1,9 +1,10 @@ -"""Generate example FlowSystem files for the plotting notebook. +"""Generate example FlowSystem files for notebooks. -This script creates three FlowSystems of varying complexity: +This script creates FlowSystems of varying complexity: 1. simple_system - Basic heat system (boiler + storage + sink) 2. complex_system - Multi-carrier with multiple effects and piecewise efficiency 3. multiperiod_system - System with periods and scenarios +4. district_heating_system - Real-world district heating data (from Zeitreihen2020.csv) Run this script to regenerate the example data files. """ @@ -229,6 +230,121 @@ def create_complex_system() -> fx.FlowSystem: return fs +def create_district_heating_system() -> fx.FlowSystem: + """Create a district heating system using real-world data. + + Based on Zeitreihen2020.csv data: + - One month of data at 15-minute resolution + - CHP, boiler, storage, and grid connections + - Investment optimization for sizing + + Used by: 08a-aggregation, 08b-rolling-horizon, 08c-clustering notebooks + """ + # Load real data (relative to examples/resources) + data_path = Path(__file__).parent.parent.parent.parent / 'examples' / 'resources' / 'Zeitreihen2020.csv' + data = pd.read_csv(data_path, index_col=0, parse_dates=True).sort_index() + data = data['2020-01-01':'2020-01-31 23:45:00'] # One month + data.index.name = 'time' + + timesteps = data.index + electricity_demand = data['P_Netz/MW'].to_numpy() + heat_demand = data['Q_Netz/MW'].to_numpy() + electricity_price = data['Strompr.€/MWh'].to_numpy() + gas_price = data['Gaspr.€/MWh'].to_numpy() + + fs = fx.FlowSystem(timesteps) + fs.add_elements( + # Buses + fx.Bus('Electricity'), + fx.Bus('Heat'), + fx.Bus('Gas'), + fx.Bus('Coal'), + # Effects + fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True), + fx.Effect('CO2', 'kg', 'CO2 Emissions'), + # CHP unit with investment + fx.linear_converters.CHP( + 'CHP', + thermal_efficiency=0.58, + electrical_efficiency=0.22, + electrical_flow=fx.Flow('P_el', bus='Electricity', size=200), + thermal_flow=fx.Flow( + 'Q_th', + bus='Heat', + size=fx.InvestParameters( + minimum_size=100, + maximum_size=300, + effects_of_investment_per_size={'costs': 10}, + ), + relative_minimum=0.3, + ), + fuel_flow=fx.Flow('Q_fu', bus='Coal'), + ), + # Gas Boiler with investment + fx.linear_converters.Boiler( + 'Boiler', + thermal_efficiency=0.85, + thermal_flow=fx.Flow( + 'Q_th', + bus='Heat', + size=fx.InvestParameters( + minimum_size=0, + maximum_size=150, + effects_of_investment_per_size={'costs': 5}, + ), + relative_minimum=0.1, + ), + fuel_flow=fx.Flow('Q_fu', bus='Gas'), + ), + # Thermal Storage with investment + fx.Storage( + 'Storage', + capacity_in_flow_hours=fx.InvestParameters( + minimum_size=0, + maximum_size=1000, + effects_of_investment_per_size={'costs': 0.5}, + ), + initial_charge_state=0, + eta_charge=1, + eta_discharge=1, + relative_loss_per_hour=0.001, + charging=fx.Flow('Charge', size=137, bus='Heat'), + discharging=fx.Flow('Discharge', size=158, bus='Heat'), + ), + # Fuel sources + fx.Source( + 'GasGrid', + outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price, 'CO2': 0.3})], + ), + fx.Source( + 'CoalSupply', + outputs=[fx.Flow('Q_Coal', bus='Coal', size=1000, effects_per_flow_hour={'costs': 4.6, 'CO2': 0.3})], + ), + # Electricity grid + fx.Source( + 'GridBuy', + outputs=[ + fx.Flow( + 'P_el', + bus='Electricity', + size=1000, + effects_per_flow_hour={'costs': electricity_price + 0.5, 'CO2': 0.3}, + ) + ], + ), + fx.Sink( + 'GridSell', + inputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour=-(electricity_price - 0.5))], + ), + # Demands + fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]), + fx.Sink( + 'ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=electricity_demand)] + ), + ) + return fs + + def create_multiperiod_system() -> fx.FlowSystem: """Create a system with multiple periods and scenarios. @@ -322,6 +438,7 @@ def main(): ('simple_system', create_simple_system), ('complex_system', create_complex_system), ('multiperiod_system', create_multiperiod_system), + ('district_heating_system', create_district_heating_system), ] for name, create_func in systems: From f47ef3865ccab51664b1f8c2731d35e93ebcf46a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 15:47:06 +0100 Subject: [PATCH 013/191] Use realistic flow system in notebooks --- docs/notebooks/08a-aggregation.ipynb | 270 +- docs/notebooks/08b-rolling-horizon.ipynb | 109 +- docs/notebooks/08c-clustering.ipynb | 4224 +---------------- .../data/generate_example_systems.py | 104 +- 4 files changed, 260 insertions(+), 4447 deletions(-) diff --git a/docs/notebooks/08a-aggregation.ipynb b/docs/notebooks/08a-aggregation.ipynb index f3d5d1686..24f0883fd 100644 --- a/docs/notebooks/08a-aggregation.ipynb +++ b/docs/notebooks/08a-aggregation.ipynb @@ -4,7 +4,17 @@ "cell_type": "markdown", "id": "0", "metadata": {}, - "source": "# Aggregation\n\nSpeed up large problems with time series aggregation techniques.\n\nThis notebook introduces:\n\n- **Resampling**: Reduce time resolution (e.g., hourly → 4-hourly)\n- **Clustering**: Identify typical periods (e.g., 8 representative days)\n- **Two-stage optimization**: Size with reduced data, dispatch at full resolution\n- **Speed vs. accuracy trade-offs**: When to use each technique" + "source": [ + "# Aggregation\n", + "\n", + "Speed up large problems with time series aggregation techniques.\n", + "\n", + "This notebook introduces:\n", + "\n", + "- **Resampling**: Reduce time resolution (e.g., hourly → 4-hourly)\n", + "- **Two-stage optimization**: Size with reduced data, dispatch at full resolution\n", + "- **Speed vs. accuracy trade-offs**: When to use each technique" + ] }, { "cell_type": "markdown", @@ -24,8 +34,8 @@ "import timeit\n", "\n", "import pandas as pd\n", - "import plotly.express as px\n", - "import xarray as xr\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", "\n", "import flixopt as fx\n", "\n", @@ -36,178 +46,52 @@ "cell_type": "markdown", "id": "3", "metadata": {}, - "source": "## Load Time Series Data\n\nWe use real-world district heating data at 15-minute resolution (one month):" - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], "source": [ - "# Load time series data (15-min resolution)\n", - "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", - "data = data['2020-01-01':'2020-01-31 23:45:00'] # One month\n", - "data.index.name = 'time' # Rename index for consistency\n", - "\n", - "timesteps = data.index\n", - "\n", - "# Extract profiles\n", - "electricity_demand = data['P_Netz/MW'].to_numpy()\n", - "heat_demand = data['Q_Netz/MW'].to_numpy()\n", - "electricity_price = data['Strompr.€/MWh'].to_numpy()\n", - "gas_price = data['Gaspr.€/MWh'].to_numpy()\n", - "\n", - "print(f'Timesteps: {len(timesteps)} ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", - "print(f'Heat demand: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')\n", - "print(f'Electricity price: {electricity_price.min():.1f} - {electricity_price.max():.1f} €/MWh')" + "## Load the FlowSystem\n", + "\n", + "We use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" ] }, { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "4", "metadata": {}, "outputs": [], "source": [ - "# Visualize first week\n", - "profiles = xr.Dataset(\n", - " {\n", - " 'Heat Demand [MW]': xr.DataArray(heat_demand[:672], dims=['time'], coords={'time': timesteps[:672]}),\n", - " 'Electricity Price [€/MWh]': xr.DataArray(\n", - " electricity_price[:672], dims=['time'], coords={'time': timesteps[:672]}\n", - " ),\n", - " }\n", - ")\n", + "# Load the district heating system (real data from Zeitreihen2020.csv)\n", + "flow_system = fx.FlowSystem.from_netcdf('data/district_heating_system.nc4')\n", "\n", - "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", - "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", - "fig.update_yaxes(matches=None, showticklabels=True)\n", - "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", - "fig" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "## Build the Base FlowSystem\n", - "\n", - "A typical district heating system with investment decisions:" + "timesteps = flow_system.timesteps\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Components: {list(flow_system.components.keys())}')" ] }, { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "5", "metadata": {}, "outputs": [], "source": [ - "def build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price):\n", - " \"\"\"Build a district heating system with CHP, boiler, and storage (with investment options).\"\"\"\n", - " fs = fx.FlowSystem(timesteps)\n", - "\n", - " fs.add_elements(\n", - " # Buses\n", - " fx.Bus('Electricity'),\n", - " fx.Bus('Heat'),\n", - " fx.Bus('Gas'),\n", - " fx.Bus('Coal'),\n", - " # Effects\n", - " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", - " fx.Effect('CO2', 'kg', 'CO2 Emissions'),\n", - " # CHP with investment optimization\n", - " fx.linear_converters.CHP(\n", - " 'CHP',\n", - " thermal_efficiency=0.58,\n", - " electrical_efficiency=0.22,\n", - " electrical_flow=fx.Flow('P_el', bus='Electricity', size=200),\n", - " thermal_flow=fx.Flow(\n", - " 'Q_th',\n", - " bus='Heat',\n", - " size=fx.InvestParameters(\n", - " minimum_size=100,\n", - " maximum_size=300,\n", - " effects_of_investment_per_size={'costs': 10},\n", - " ),\n", - " relative_minimum=0.3,\n", - " ),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Coal'),\n", - " ),\n", - " # Gas Boiler with investment optimization\n", - " fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.85,\n", - " thermal_flow=fx.Flow(\n", - " 'Q_th',\n", - " bus='Heat',\n", - " size=fx.InvestParameters(\n", - " minimum_size=0,\n", - " maximum_size=150,\n", - " effects_of_investment_per_size={'costs': 5},\n", - " ),\n", - " relative_minimum=0.1,\n", - " ),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", - " ),\n", - " # Thermal Storage with investment optimization\n", - " fx.Storage(\n", - " 'Storage',\n", - " capacity_in_flow_hours=fx.InvestParameters(\n", - " minimum_size=0,\n", - " maximum_size=1000,\n", - " effects_of_investment_per_size={'costs': 0.5},\n", - " ),\n", - " initial_charge_state=0,\n", - " eta_charge=1,\n", - " eta_discharge=1,\n", - " relative_loss_per_hour=0.001,\n", - " charging=fx.Flow('Charge', size=137, bus='Heat'),\n", - " discharging=fx.Flow('Discharge', size=158, bus='Heat'),\n", - " ),\n", - " # Fuel sources\n", - " fx.Source(\n", - " 'GasGrid',\n", - " outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price, 'CO2': 0.3})],\n", - " ),\n", - " fx.Source(\n", - " 'CoalSupply',\n", - " outputs=[fx.Flow('Q_Coal', bus='Coal', size=1000, effects_per_flow_hour={'costs': 4.6, 'CO2': 0.3})],\n", - " ),\n", - " # Electricity grid connection\n", - " fx.Source(\n", - " 'GridBuy',\n", - " outputs=[\n", - " fx.Flow(\n", - " 'P_el',\n", - " bus='Electricity',\n", - " size=1000,\n", - " effects_per_flow_hour={'costs': electricity_price + 0.5, 'CO2': 0.3},\n", - " )\n", - " ],\n", - " ),\n", - " fx.Sink(\n", - " 'GridSell',\n", - " inputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour=-(electricity_price - 0.5))],\n", - " ),\n", - " # Demands\n", - " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", - " fx.Sink(\n", - " 'ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=electricity_demand)]\n", - " ),\n", - " )\n", - "\n", - " return fs\n", - "\n", - "\n", - "flow_system = build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price)\n", - "print(f'System: {len(timesteps)} timesteps')" + "# Visualize first week of data\n", + "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", + "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", + "\n", + "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", + "\n", + "fig.add_trace(go.Scatter(x=timesteps[:672], y=heat_demand.values[:672], name='Heat Demand'), row=1, col=1)\n", + "fig.add_trace(go.Scatter(x=timesteps[:672], y=electricity_price.values[:672], name='Electricity Price'), row=2, col=1)\n", + "\n", + "fig.update_layout(height=400, title='First Week of Data')\n", + "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", + "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", + "fig.show()" ] }, { "cell_type": "markdown", - "id": "9", + "id": "6", "metadata": {}, "source": [ "## Technique 1: Resampling\n", @@ -218,13 +102,13 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "7", "metadata": {}, "outputs": [], "source": [ "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", "\n", - "# Resample from 1h to 4h resolution\n", + "# Resample from 15-min to 4h resolution\n", "fs_resampled = flow_system.transform.resample('4h')\n", "\n", "print(f'Original: {len(flow_system.timesteps)} timesteps')\n", @@ -235,7 +119,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -245,12 +129,12 @@ "time_resampled = timeit.default_timer() - start\n", "\n", "print(f'\\nResampled optimization: {time_resampled:.2f} seconds')\n", - "print(f'Cost: {fs_resampled.solution[\"costs\"].item():.2f} €')" + "print(f'Cost: {fs_resampled.solution[\"costs\"].item():,.0f} €')" ] }, { "cell_type": "markdown", - "id": "12", + "id": "9", "metadata": {}, "source": [ "## Technique 2: Two-Stage Optimization\n", @@ -262,7 +146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -282,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -294,13 +178,13 @@ "\n", "print('=== Stage 2: Dispatch ===')\n", "print(f'Time: {time_stage2:.2f} seconds')\n", - "print(f'Cost: {fs_dispatch.solution[\"costs\"].item():.2f} €')\n", + "print(f'Cost: {fs_dispatch.solution[\"costs\"].item():,.0f} €')\n", "print(f'\\nTotal two-stage time: {time_stage1 + time_stage2:.2f} seconds')" ] }, { "cell_type": "markdown", - "id": "15", + "id": "12", "metadata": {}, "source": [ "## Technique 3: Full Optimization (Baseline)\n", @@ -311,7 +195,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -322,12 +206,12 @@ "\n", "print('=== Full Optimization ===')\n", "print(f'Time: {time_full:.2f} seconds')\n", - "print(f'Cost: {fs_full.solution[\"costs\"].item():.2f} €')" + "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')" ] }, { "cell_type": "markdown", - "id": "17", + "id": "14", "metadata": {}, "source": [ "## Compare Results" @@ -336,7 +220,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -388,7 +272,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "16", "metadata": {}, "source": [ "## Visual Comparison: Heat Balance" @@ -397,7 +281,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -408,7 +292,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -418,7 +302,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "19", "metadata": {}, "source": [ "### Energy Flow Sankey (Full Optimization)\n", @@ -429,7 +313,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -438,7 +322,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "21", "metadata": {}, "source": [ "## When to Use Each Technique\n", @@ -478,20 +362,54 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "22", "metadata": {}, - "source": "## Summary\n\nYou learned how to:\n\n- Use **`transform.resample()`** to reduce time resolution\n- Apply **two-stage optimization** for large investment problems\n- Use **`transform.fix_sizes()`** to lock in investment decisions\n- Compare **speed vs. accuracy** trade-offs\n\n### Key Takeaways\n\n1. **Start fast**: Use resampling for initial exploration\n2. **Iterate**: Refine with two-stage optimization\n3. **Validate**: Run full optimization for final results\n4. **Monitor**: Check cost gaps to ensure acceptable accuracy\n\n### Next Steps\n\n- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: For operational problems without investment decisions, decompose time into sequential segments\n\n### Further Reading\n\n- For clustering with typical periods, see `transform.cluster()` (requires `tsam` package)\n- For time selection, see `transform.sel()` and `transform.isel()`" + "source": [ + "## Summary\n", + "\n", + "You learned how to:\n", + "\n", + "- Use **`transform.resample()`** to reduce time resolution\n", + "- Apply **two-stage optimization** for large investment problems\n", + "- Use **`transform.fix_sizes()`** to lock in investment decisions\n", + "- Compare **speed vs. accuracy** trade-offs\n", + "\n", + "### Key Takeaways\n", + "\n", + "1. **Start fast**: Use resampling for initial exploration\n", + "2. **Iterate**: Refine with two-stage optimization\n", + "3. **Validate**: Run full optimization for final results\n", + "4. **Monitor**: Check cost gaps to ensure acceptable accuracy\n", + "\n", + "### Next Steps\n", + "\n", + "- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: For operational problems, decompose time into sequential segments\n", + "- **[08c-Clustering](08c-clustering.ipynb)**: Use typical periods with the `tsam` package\n", + "\n", + "### Further Reading\n", + "\n", + "- For clustering with typical periods, see `transform.cluster()` (requires `tsam` package)\n", + "- For time selection, see `transform.sel()` and `transform.isel()`" + ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", "name": "python", - "version": "3.11.0" + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" } }, "nbformat": 4, diff --git a/docs/notebooks/08b-rolling-horizon.ipynb b/docs/notebooks/08b-rolling-horizon.ipynb index 67edf9aa8..bad3fe983 100644 --- a/docs/notebooks/08b-rolling-horizon.ipynb +++ b/docs/notebooks/08b-rolling-horizon.ipynb @@ -54,7 +54,7 @@ "cell_type": "markdown", "id": "3", "metadata": {}, - "source": "## Load Time Series Data\n\nWe use real-world district heating data at 15-minute resolution (two weeks):" + "source": "## Load the FlowSystem\n\nWe use a pre-built operational district heating system with real-world data (two weeks at 15-min resolution):" }, { "cell_type": "code", @@ -65,115 +65,10 @@ "start_time": "2025-12-13T19:01:44.973157Z" } }, - "source": "# Load time series data (15-min resolution)\ndata = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\ndata = data['2020-01-01':'2020-01-14 23:45:00'] # Two weeks\ndata.index.name = 'time' # Rename index for consistency\n\ntimesteps = data.index\n\n# Extract profiles\nelectricity_demand = data['P_Netz/MW'].to_numpy()\nheat_demand = data['Q_Netz/MW'].to_numpy()\nelectricity_price = data['Strompr.€/MWh'].to_numpy()\ngas_price = data['Gaspr.€/MWh'].to_numpy()\n\nprint(f'Timesteps: {len(timesteps)} ({len(timesteps) / 96:.0f} days at 15-min resolution)')\nprint(f'Heat demand: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')\nprint(f'Electricity price: {electricity_price.min():.1f} - {electricity_price.max():.1f} €/MWh')", + "source": "# Load the operational system (real data from Zeitreihen2020.csv, two weeks)\nflow_system = fx.FlowSystem.from_netcdf('data/operational_system.nc4')\n\ntimesteps = flow_system.timesteps\nprint(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\nprint(f'Components: {list(flow_system.components.keys())}')", "outputs": [], "execution_count": null }, - { - "cell_type": "code", - "id": "5", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-13T19:01:45.204918Z", - "start_time": "2025-12-13T19:01:45.183230Z" - } - }, - "source": [ - "def build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price):\n", - " \"\"\"Build a district heating system with CHP, boiler, and storage.\"\"\"\n", - " fs = fx.FlowSystem(timesteps)\n", - "\n", - " # Effects\n", - "\n", - " # Buses\n", - " fs.add_elements(\n", - " fx.Bus('Electricity'),\n", - " fx.Bus('Heat'),\n", - " fx.Bus('Gas'),\n", - " fx.Bus('Coal'),\n", - " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", - " fx.Effect('CO2', 'kg', 'CO2 Emissions'),\n", - " fx.linear_converters.CHP(\n", - " 'CHP',\n", - " thermal_efficiency=0.58,\n", - " electrical_efficiency=0.22,\n", - " status_parameters=fx.StatusParameters(effects_per_startup=24000),\n", - " electrical_flow=fx.Flow('P_el', bus='Electricity', size=200),\n", - " thermal_flow=fx.Flow('Q_th', bus='Heat', size=200),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Coal', size=288, relative_minimum=87 / 288, previous_flow_rate=100),\n", - " ),\n", - " fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.85,\n", - " thermal_flow=fx.Flow('Q_th', bus='Heat'),\n", - " fuel_flow=fx.Flow(\n", - " 'Q_fu',\n", - " bus='Gas',\n", - " size=95,\n", - " relative_minimum=12 / 95,\n", - " previous_flow_rate=20,\n", - " status_parameters=fx.StatusParameters(effects_per_startup=1000),\n", - " ),\n", - " ),\n", - " fx.Storage(\n", - " 'Storage',\n", - " capacity_in_flow_hours=684,\n", - " initial_charge_state=137,\n", - " minimal_final_charge_state=137,\n", - " maximal_final_charge_state=158,\n", - " eta_charge=1,\n", - " eta_discharge=1,\n", - " relative_loss_per_hour=0.001,\n", - " prevent_simultaneous_charge_and_discharge=True,\n", - " charging=fx.Flow('Charge', size=137, bus='Heat'),\n", - " discharging=fx.Flow('Discharge', size=158, bus='Heat'),\n", - " ),\n", - " fx.Source(\n", - " 'GasGrid',\n", - " outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price, 'CO2': 0.3})],\n", - " ),\n", - " fx.Source(\n", - " 'CoalSupply',\n", - " outputs=[fx.Flow('Q_Coal', bus='Coal', size=1000, effects_per_flow_hour={'costs': 4.6, 'CO2': 0.3})],\n", - " ),\n", - " fx.Source(\n", - " 'GridBuy',\n", - " outputs=[\n", - " fx.Flow(\n", - " 'P_el',\n", - " bus='Electricity',\n", - " size=1000,\n", - " effects_per_flow_hour={'costs': electricity_price + 0.5, 'CO2': 0.3},\n", - " )\n", - " ],\n", - " ),\n", - " fx.Sink(\n", - " 'GridSell',\n", - " inputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour=-(electricity_price - 0.5))],\n", - " ),\n", - " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", - " fx.Sink(\n", - " 'ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=electricity_demand)]\n", - " ),\n", - " )\n", - "\n", - " return fs\n", - "\n", - "\n", - "flow_system = build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price)\n", - "print(f'System: {len(timesteps)} timesteps')" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "System: 1344 timesteps\n" - ] - } - ], - "execution_count": 3 - }, { "cell_type": "markdown", "id": "6", diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 0cbbbc09c..db4b79b77 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -55,7 +55,6 @@ "\n", "import numpy as np\n", "import pandas as pd\n", - "import plotly.express as px\n", "import plotly.graph_objects as go\n", "from plotly.subplots import make_subplots\n", "\n", @@ -68,4147 +67,53 @@ "cell_type": "markdown", "id": "3", "metadata": {}, - "source": [ - "## Load Time Series Data\n", - "\n", - "We use real-world district heating data at 15-minute resolution (one month):" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "id": "4", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T14:35:42.938317Z", - "start_time": "2025-12-14T14:35:42.830680Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Timesteps: 2976 (31 days at 15-min resolution)\n", - "Heat demand: 122.2 - 266.2 MW\n", - "Electricity price: -3.3 - 72.6 €/MWh\n" - ] - } - ], - "source": [ - "# Load time series data (15-min resolution)\n", - "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", - "data = data['2020-01-01':'2020-01-31 23:45:00'] # One month\n", - "data.index.name = 'time'\n", - "\n", - "timesteps = data.index\n", - "\n", - "# Extract profiles\n", - "electricity_demand = data['P_Netz/MW'].to_numpy()\n", - "heat_demand = data['Q_Netz/MW'].to_numpy()\n", - "electricity_price = data['Strompr.€/MWh'].to_numpy()\n", - "gas_price = data['Gaspr.€/MWh'].to_numpy()\n", - "\n", - "print(f'Timesteps: {len(timesteps)} ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", - "print(f'Heat demand: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')\n", - "print(f'Electricity price: {electricity_price.min():.1f} - {electricity_price.max():.1f} €/MWh')" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "5", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T14:35:43.057128Z", - "start_time": "2025-12-14T14:35:42.948041Z" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - " \n", - " \n", - " " - ] - }, - "jetTransient": { - "display_id": null - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
" - ] - }, - "jetTransient": { - "display_id": null - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "# Visualize first two weeks\n", - "import xarray as xr\n", - "\n", - "profiles = xr.Dataset(\n", - " {\n", - " 'Heat Demand [MW]': xr.DataArray(heat_demand[:1344], dims=['time'], coords={'time': timesteps[:1344]}),\n", - " 'Electricity Price [€/MWh]': xr.DataArray(\n", - " electricity_price[:1344], dims=['time'], coords={'time': timesteps[:1344]}\n", - " ),\n", - " }\n", - ")\n", - "\n", - "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", - "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", - "fig.update_yaxes(matches=None, showticklabels=True)\n", - "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", - "fig" - ] + "source": "## Load the FlowSystem\n\nWe use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" }, { - "cell_type": "markdown", - "id": "6", - "metadata": {}, + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T14:35:42.938317Z", + "start_time": "2025-12-14T14:35:42.830680Z" + } + }, + "outputs": [], "source": [ - "## Build the FlowSystem\n", + "# Load the district heating system (real data from Zeitreihen2020.csv)\n", + "flow_system = fx.FlowSystem.from_netcdf('data/district_heating_system.nc4')\n", "\n", - "A district heating system with CHP, boiler, and storage:" + "timesteps = flow_system.timesteps\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Components: {list(flow_system.components.keys())}')" ] }, { "cell_type": "code", - "execution_count": 14, - "id": "7", + "execution_count": null, + "id": "5", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:35:43.493407Z", - "start_time": "2025-12-14T14:35:43.461305Z" + "end_time": "2025-12-14T14:35:43.057128Z", + "start_time": "2025-12-14T14:35:42.948041Z" } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "System: 2976 timesteps (31 days)\n" - ] - } - ], + "outputs": [], "source": [ - "def build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price):\n", - " \"\"\"Build a district heating system with CHP, boiler, and storage.\"\"\"\n", - " fs = fx.FlowSystem(timesteps)\n", - "\n", - " fs.add_elements(\n", - " # Buses\n", - " fx.Bus('Electricity'),\n", - " fx.Bus('Heat'),\n", - " fx.Bus('Gas'),\n", - " fx.Bus('Coal'),\n", - " # Effects\n", - " fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True),\n", - " fx.Effect('CO2', 'kg', 'CO2 Emissions'),\n", - " # CHP unit\n", - " fx.linear_converters.CHP(\n", - " 'CHP',\n", - " thermal_efficiency=0.58,\n", - " electrical_efficiency=0.22,\n", - " electrical_flow=fx.Flow('P_el', bus='Electricity', size=200),\n", - " thermal_flow=fx.Flow(\n", - " 'Q_th',\n", - " bus='Heat',\n", - " size=fx.InvestParameters(\n", - " minimum_size=100,\n", - " maximum_size=300,\n", - " effects_of_investment_per_size={'costs': 10},\n", - " ),\n", - " relative_minimum=0.3,\n", - " ),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Coal'),\n", - " ),\n", - " # Gas Boiler\n", - " fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.85,\n", - " thermal_flow=fx.Flow(\n", - " 'Q_th',\n", - " bus='Heat',\n", - " size=fx.InvestParameters(\n", - " minimum_size=0,\n", - " maximum_size=150,\n", - " effects_of_investment_per_size={'costs': 5},\n", - " ),\n", - " relative_minimum=0.1,\n", - " ),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", - " ),\n", - " # Thermal Storage\n", - " fx.Storage(\n", - " 'Storage',\n", - " capacity_in_flow_hours=fx.InvestParameters(\n", - " minimum_size=0,\n", - " maximum_size=1000,\n", - " effects_of_investment_per_size={'costs': 0.5},\n", - " ),\n", - " initial_charge_state=0,\n", - " eta_charge=1,\n", - " eta_discharge=1,\n", - " relative_loss_per_hour=0.001,\n", - " charging=fx.Flow('Charge', size=137, bus='Heat'),\n", - " discharging=fx.Flow('Discharge', size=158, bus='Heat'),\n", - " ),\n", - " # Fuel sources\n", - " fx.Source(\n", - " 'GasGrid',\n", - " outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price, 'CO2': 0.3})],\n", - " ),\n", - " fx.Source(\n", - " 'CoalSupply',\n", - " outputs=[fx.Flow('Q_Coal', bus='Coal', size=1000, effects_per_flow_hour={'costs': 4.6, 'CO2': 0.3})],\n", - " ),\n", - " # Electricity grid\n", - " fx.Source(\n", - " 'GridBuy',\n", - " outputs=[\n", - " fx.Flow(\n", - " 'P_el',\n", - " bus='Electricity',\n", - " size=1000,\n", - " effects_per_flow_hour={'costs': electricity_price + 0.5, 'CO2': 0.3},\n", - " )\n", - " ],\n", - " ),\n", - " fx.Sink(\n", - " 'GridSell',\n", - " inputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour=-(electricity_price - 0.5))],\n", - " ),\n", - " # Demands\n", - " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", - " fx.Sink(\n", - " 'ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=electricity_demand)]\n", - " ),\n", - " )\n", + "# Visualize first two weeks of data\n", + "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", + "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", "\n", - " return fs\n", + "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", "\n", + "fig.add_trace(go.Scatter(x=timesteps[:1344], y=heat_demand.values[:1344], name='Heat Demand'), row=1, col=1)\n", + "fig.add_trace(go.Scatter(x=timesteps[:1344], y=electricity_price.values[:1344], name='Electricity Price'), row=2, col=1)\n", "\n", - "flow_system = build_system(timesteps, heat_demand, electricity_demand, electricity_price, gas_price)\n", - "print(f'System: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days)')" + "fig.update_layout(height=400, title='First Two Weeks of Data')\n", + "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", + "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", + "fig.show()" ] }, { @@ -4236,7 +141,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001B[2m2025-12-14 15:35:43.523\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" + "\u001b[2m2025-12-14 15:35:43.523\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" ] }, { @@ -4717,23 +622,23 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001B[2m2025-12-14 15:35:52.048\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001B[2m2025-12-14 15:35:52.216\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001b[2m2025-12-14 15:35:52.048\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001b[2m2025-12-14 15:35:52.216\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 64/64 [00:00<00:00, 75.17it/s] \n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 421.23it/s]\n", - "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 368.46it/s]\n" + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 64/64 [00:00<00:00, 75.17it/s] \n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 421.23it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 368.46it/s]\n" ] }, { @@ -4850,23 +755,23 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001B[2m2025-12-14 15:36:19.954\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001B[2m2025-12-14 15:36:20.127\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001b[2m2025-12-14 15:36:19.954\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001b[2m2025-12-14 15:36:20.127\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 81/81 [00:01<00:00, 65.44it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 808.42it/s]\n", - "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 766.39it/s]\n" + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 81/81 [00:01<00:00, 65.44it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 808.42it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 766.39it/s]\n" ] }, { @@ -5075,7 +980,7 @@ }, { "cell_type": "code", - "execution_count": 22, + "execution_count": null, "id": "21", "metadata": { "ExecuteTime": { @@ -5083,17 +988,10 @@ "start_time": "2025-12-14T14:36:25.657257Z" } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Multi-period system: 1344 timesteps × 3 periods\n" - ] - } - ], + "outputs": [], "source": [ - "# Create a multi-period system (3 years, each with 2 weeks of data)\n", + "# Load raw data for multi-period example\n", + "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", "data_2w = data['2020-01-01':'2020-01-14 23:45:00'] # Two weeks\n", "timesteps_2w = data_2w.index\n", "\n", @@ -5160,7 +1058,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001B[2m2025-12-14 15:36:25.701\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", + "\u001b[2m2025-12-14 15:36:25.701\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", "Clustering was applied to 3 period(s):\n", " - period=2024\n", " - period=2025\n", @@ -5194,8 +1092,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 38/38 [00:00<00:00, 80.29it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 22/22 [00:00<00:00, 398.66it/s]\n" + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 38/38 [00:00<00:00, 80.29it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 22/22 [00:00<00:00, 398.66it/s]\n" ] }, { diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index f0a6db405..e42968de7 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -4,7 +4,8 @@ 1. simple_system - Basic heat system (boiler + storage + sink) 2. complex_system - Multi-carrier with multiple effects and piecewise efficiency 3. multiperiod_system - System with periods and scenarios -4. district_heating_system - Real-world district heating data (from Zeitreihen2020.csv) +4. district_heating_system - Real-world district heating data with investments (1 month) +5. operational_system - Real-world district heating for operational planning (2 weeks, no investments) Run this script to regenerate the example data files. """ @@ -345,6 +346,106 @@ def create_district_heating_system() -> fx.FlowSystem: return fs +def create_operational_system() -> fx.FlowSystem: + """Create an operational district heating system (no investments). + + Based on Zeitreihen2020.csv data (two weeks): + - CHP with startup costs + - Boiler with startup costs + - Storage with fixed capacity + - No investment parameters (for rolling horizon optimization) + + Used by: 08b-rolling-horizon notebook + """ + # Load real data + data_path = Path(__file__).parent.parent.parent.parent / 'examples' / 'resources' / 'Zeitreihen2020.csv' + data = pd.read_csv(data_path, index_col=0, parse_dates=True).sort_index() + data = data['2020-01-01':'2020-01-14 23:45:00'] # Two weeks + data.index.name = 'time' + + timesteps = data.index + electricity_demand = data['P_Netz/MW'].to_numpy() + heat_demand = data['Q_Netz/MW'].to_numpy() + electricity_price = data['Strompr.€/MWh'].to_numpy() + gas_price = data['Gaspr.€/MWh'].to_numpy() + + fs = fx.FlowSystem(timesteps) + fs.add_elements( + fx.Bus('Electricity'), + fx.Bus('Heat'), + fx.Bus('Gas'), + fx.Bus('Coal'), + fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True), + fx.Effect('CO2', 'kg', 'CO2 Emissions'), + # CHP with startup costs + fx.linear_converters.CHP( + 'CHP', + thermal_efficiency=0.58, + electrical_efficiency=0.22, + status_parameters=fx.StatusParameters(effects_per_startup=24000), + electrical_flow=fx.Flow('P_el', bus='Electricity', size=200), + thermal_flow=fx.Flow('Q_th', bus='Heat', size=200), + fuel_flow=fx.Flow('Q_fu', bus='Coal', size=288, relative_minimum=87 / 288, previous_flow_rate=100), + ), + # Boiler with startup costs + fx.linear_converters.Boiler( + 'Boiler', + thermal_efficiency=0.85, + thermal_flow=fx.Flow('Q_th', bus='Heat'), + fuel_flow=fx.Flow( + 'Q_fu', + bus='Gas', + size=95, + relative_minimum=12 / 95, + previous_flow_rate=20, + status_parameters=fx.StatusParameters(effects_per_startup=1000), + ), + ), + # Storage with fixed capacity + fx.Storage( + 'Storage', + capacity_in_flow_hours=684, + initial_charge_state=137, + minimal_final_charge_state=137, + maximal_final_charge_state=158, + eta_charge=1, + eta_discharge=1, + relative_loss_per_hour=0.001, + prevent_simultaneous_charge_and_discharge=True, + charging=fx.Flow('Charge', size=137, bus='Heat'), + discharging=fx.Flow('Discharge', size=158, bus='Heat'), + ), + fx.Source( + 'GasGrid', + outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price, 'CO2': 0.3})], + ), + fx.Source( + 'CoalSupply', + outputs=[fx.Flow('Q_Coal', bus='Coal', size=1000, effects_per_flow_hour={'costs': 4.6, 'CO2': 0.3})], + ), + fx.Source( + 'GridBuy', + outputs=[ + fx.Flow( + 'P_el', + bus='Electricity', + size=1000, + effects_per_flow_hour={'costs': electricity_price + 0.5, 'CO2': 0.3}, + ) + ], + ), + fx.Sink( + 'GridSell', + inputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour=-(electricity_price - 0.5))], + ), + fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)]), + fx.Sink( + 'ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=electricity_demand)] + ), + ) + return fs + + def create_multiperiod_system() -> fx.FlowSystem: """Create a system with multiple periods and scenarios. @@ -439,6 +540,7 @@ def main(): ('complex_system', create_complex_system), ('multiperiod_system', create_multiperiod_system), ('district_heating_system', create_district_heating_system), + ('operational_system', create_operational_system), ] for name, create_func in systems: From 1bfdc56de854abb46916a58ef79ffa6443a61eec Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 16:18:29 +0100 Subject: [PATCH 014/191] add segmentation to notebooks --- docs/notebooks/08c-clustering.ipynb | 761 ++++++++++++++-------------- 1 file changed, 367 insertions(+), 394 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index db4b79b77..b1e083cc5 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -4,21 +4,7 @@ "cell_type": "markdown", "id": "0", "metadata": {}, - "source": [ - "# Clustering with tsam\n", - "\n", - "Speed up large problems by identifying typical periods using time series clustering.\n", - "\n", - "This notebook demonstrates:\n", - "\n", - "- **Basic clustering**: Reduce a month to representative days\n", - "- **Visualize clustering**: See how data changes with clustering\n", - "- **Compare parameters**: Trade-off between accuracy and speed\n", - "- **Multi-period clustering**: Cluster multi-year investment studies\n", - "\n", - "!!! note \"Requirements\"\n", - " This notebook requires the `tsam` package: `pip install tsam`" - ] + "source": "# Clustering and Segmentation with tsam\n\nSpeed up large problems by reducing time series complexity using the [tsam](https://github.com/FZJ-IEK3-VSA/tsam) package.\n\nThis notebook demonstrates two complementary techniques:\n\n- **Clustering** (inter-period): Identify typical periods (e.g., 8 typical days from 365 days)\n- **Segmentation** (inner-period): Reduce timesteps within periods (e.g., 24 hours to 4 segments)\n\nBoth can be used independently or combined for maximum speedup.\n\n!!! note \"Requirements\"\n This notebook requires the `tsam` package: `pip install tsam`" }, { "cell_type": "markdown", @@ -30,12 +16,12 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 3, "id": "2", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:35:42.824263Z", - "start_time": "2025-12-14T14:35:42.676726Z" + "end_time": "2025-12-14T15:15:03.886557Z", + "start_time": "2025-12-14T15:15:03.823696Z" } }, "outputs": [ @@ -45,7 +31,7 @@ "flixopt.config.CONFIG" ] }, - "execution_count": 11, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -71,15 +57,51 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "id": "4", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:35:42.938317Z", - "start_time": "2025-12-14T14:35:42.830680Z" + "end_time": "2025-12-14T15:15:04.193792Z", + "start_time": "2025-12-14T15:15:03.954424Z" } }, - "outputs": [], + "outputs": [ + { + "ename": "OSError", + "evalue": "Failed to load FlowSystem from NetCDF file data/district_heating_system.nc4: [Errno 2] No such file or directory: '/Users/felix/PycharmProjects/flixopt_182303/docs/notebooks/data/district_heating_system.nc4'", + "output_type": "error", + "traceback": [ + "\u001B[31m---------------------------------------------------------------------------\u001B[39m", + "\u001B[31mKeyError\u001B[39m Traceback (most recent call last)", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/file_manager.py:219\u001B[39m, in \u001B[36mCachingFileManager._acquire_with_cache_info\u001B[39m\u001B[34m(self, needs_lock)\u001B[39m\n\u001B[32m 218\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m219\u001B[39m file = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_cache\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_key\u001B[49m\u001B[43m]\u001B[49m\n\u001B[32m 220\u001B[39m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mKeyError\u001B[39;00m:\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/lru_cache.py:56\u001B[39m, in \u001B[36mLRUCache.__getitem__\u001B[39m\u001B[34m(self, key)\u001B[39m\n\u001B[32m 55\u001B[39m \u001B[38;5;28;01mwith\u001B[39;00m \u001B[38;5;28mself\u001B[39m._lock:\n\u001B[32m---> \u001B[39m\u001B[32m56\u001B[39m value = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_cache\u001B[49m\u001B[43m[\u001B[49m\u001B[43mkey\u001B[49m\u001B[43m]\u001B[49m\n\u001B[32m 57\u001B[39m \u001B[38;5;28mself\u001B[39m._cache.move_to_end(key)\n", + "\u001B[31mKeyError\u001B[39m: [, ('/Users/felix/PycharmProjects/flixopt_182303/docs/notebooks/data/district_heating_system.nc4',), 'r', (('clobber', True), ('diskless', False), ('format', 'NETCDF4'), ('persist', False)), '7662be6e-fdfd-436b-880a-38f84ad236df']", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001B[31mFileNotFoundError\u001B[39m Traceback (most recent call last)", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/flixopt/structure.py:975\u001B[39m, in \u001B[36mInterface.from_netcdf\u001B[39m\u001B[34m(cls, path)\u001B[39m\n\u001B[32m 974\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m975\u001B[39m ds = \u001B[43mfx_io\u001B[49m\u001B[43m.\u001B[49m\u001B[43mload_dataset_from_netcdf\u001B[49m\u001B[43m(\u001B[49m\u001B[43mpath\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 976\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mcls\u001B[39m.from_dataset(ds)\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/flixopt/io.py:581\u001B[39m, in \u001B[36mload_dataset_from_netcdf\u001B[39m\u001B[34m(path)\u001B[39m\n\u001B[32m 572\u001B[39m \u001B[38;5;250m\u001B[39m\u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m 573\u001B[39m \u001B[33;03mLoad a dataset from a netcdf file. Load all attrs from 'attrs' attributes.\u001B[39;00m\n\u001B[32m 574\u001B[39m \n\u001B[32m (...)\u001B[39m\u001B[32m 579\u001B[39m \u001B[33;03m Dataset: Loaded dataset with restored attrs.\u001B[39;00m\n\u001B[32m 580\u001B[39m \u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m--> \u001B[39m\u001B[32m581\u001B[39m ds = \u001B[43mxr\u001B[49m\u001B[43m.\u001B[49m\u001B[43mload_dataset\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mstr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mpath\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mengine\u001B[49m\u001B[43m=\u001B[49m\u001B[33;43m'\u001B[39;49m\u001B[33;43mnetcdf4\u001B[39;49m\u001B[33;43m'\u001B[39;49m\u001B[43m)\u001B[49m\n\u001B[32m 583\u001B[39m \u001B[38;5;66;03m# Restore Dataset attrs\u001B[39;00m\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/api.py:165\u001B[39m, in \u001B[36mload_dataset\u001B[39m\u001B[34m(filename_or_obj, **kwargs)\u001B[39m\n\u001B[32m 163\u001B[39m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mTypeError\u001B[39;00m(\u001B[33m\"\u001B[39m\u001B[33mcache has no effect in this context\u001B[39m\u001B[33m\"\u001B[39m)\n\u001B[32m--> \u001B[39m\u001B[32m165\u001B[39m \u001B[38;5;28;01mwith\u001B[39;00m \u001B[43mopen_dataset\u001B[49m\u001B[43m(\u001B[49m\u001B[43mfilename_or_obj\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m \u001B[38;5;28;01mas\u001B[39;00m ds:\n\u001B[32m 166\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m ds.load()\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/api.py:606\u001B[39m, in \u001B[36mopen_dataset\u001B[39m\u001B[34m(filename_or_obj, engine, chunks, cache, decode_cf, mask_and_scale, decode_times, decode_timedelta, use_cftime, concat_characters, decode_coords, drop_variables, create_default_indexes, inline_array, chunked_array_type, from_array_kwargs, backend_kwargs, **kwargs)\u001B[39m\n\u001B[32m 605\u001B[39m overwrite_encoded_chunks = kwargs.pop(\u001B[33m\"\u001B[39m\u001B[33moverwrite_encoded_chunks\u001B[39m\u001B[33m\"\u001B[39m, \u001B[38;5;28;01mNone\u001B[39;00m)\n\u001B[32m--> \u001B[39m\u001B[32m606\u001B[39m backend_ds = \u001B[43mbackend\u001B[49m\u001B[43m.\u001B[49m\u001B[43mopen_dataset\u001B[49m\u001B[43m(\u001B[49m\n\u001B[32m 607\u001B[39m \u001B[43m \u001B[49m\u001B[43mfilename_or_obj\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 608\u001B[39m \u001B[43m \u001B[49m\u001B[43mdrop_variables\u001B[49m\u001B[43m=\u001B[49m\u001B[43mdrop_variables\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 609\u001B[39m \u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mdecoders\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 610\u001B[39m \u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mkwargs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 611\u001B[39m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 612\u001B[39m ds = _dataset_from_backend_dataset(\n\u001B[32m 613\u001B[39m backend_ds,\n\u001B[32m 614\u001B[39m filename_or_obj,\n\u001B[32m (...)\u001B[39m\u001B[32m 625\u001B[39m **kwargs,\n\u001B[32m 626\u001B[39m )\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:758\u001B[39m, in \u001B[36mNetCDF4BackendEntrypoint.open_dataset\u001B[39m\u001B[34m(self, filename_or_obj, mask_and_scale, decode_times, concat_characters, decode_coords, drop_variables, use_cftime, decode_timedelta, group, mode, format, clobber, diskless, persist, auto_complex, lock, autoclose)\u001B[39m\n\u001B[32m 757\u001B[39m filename_or_obj = _normalize_path(filename_or_obj)\n\u001B[32m--> \u001B[39m\u001B[32m758\u001B[39m store = \u001B[43mNetCDF4DataStore\u001B[49m\u001B[43m.\u001B[49m\u001B[43mopen\u001B[49m\u001B[43m(\u001B[49m\n\u001B[32m 759\u001B[39m \u001B[43m \u001B[49m\u001B[43mfilename_or_obj\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 760\u001B[39m \u001B[43m \u001B[49m\u001B[43mmode\u001B[49m\u001B[43m=\u001B[49m\u001B[43mmode\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 761\u001B[39m \u001B[43m \u001B[49m\u001B[38;5;28;43mformat\u001B[39;49m\u001B[43m=\u001B[49m\u001B[38;5;28;43mformat\u001B[39;49m\u001B[43m,\u001B[49m\n\u001B[32m 762\u001B[39m \u001B[43m \u001B[49m\u001B[43mgroup\u001B[49m\u001B[43m=\u001B[49m\u001B[43mgroup\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 763\u001B[39m \u001B[43m \u001B[49m\u001B[43mclobber\u001B[49m\u001B[43m=\u001B[49m\u001B[43mclobber\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 764\u001B[39m \u001B[43m \u001B[49m\u001B[43mdiskless\u001B[49m\u001B[43m=\u001B[49m\u001B[43mdiskless\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 765\u001B[39m \u001B[43m \u001B[49m\u001B[43mpersist\u001B[49m\u001B[43m=\u001B[49m\u001B[43mpersist\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 766\u001B[39m \u001B[43m \u001B[49m\u001B[43mauto_complex\u001B[49m\u001B[43m=\u001B[49m\u001B[43mauto_complex\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 767\u001B[39m \u001B[43m \u001B[49m\u001B[43mlock\u001B[49m\u001B[43m=\u001B[49m\u001B[43mlock\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 768\u001B[39m \u001B[43m \u001B[49m\u001B[43mautoclose\u001B[49m\u001B[43m=\u001B[49m\u001B[43mautoclose\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 769\u001B[39m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 771\u001B[39m store_entrypoint = StoreBackendEntrypoint()\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:525\u001B[39m, in \u001B[36mNetCDF4DataStore.open\u001B[39m\u001B[34m(cls, filename, mode, format, group, clobber, diskless, persist, auto_complex, lock, lock_maker, autoclose)\u001B[39m\n\u001B[32m 522\u001B[39m manager = CachingFileManager(\n\u001B[32m 523\u001B[39m netCDF4.Dataset, filename, mode=mode, kwargs=kwargs\n\u001B[32m 524\u001B[39m )\n\u001B[32m--> \u001B[39m\u001B[32m525\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mcls\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mmanager\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mgroup\u001B[49m\u001B[43m=\u001B[49m\u001B[43mgroup\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmode\u001B[49m\u001B[43m=\u001B[49m\u001B[43mmode\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mlock\u001B[49m\u001B[43m=\u001B[49m\u001B[43mlock\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mautoclose\u001B[49m\u001B[43m=\u001B[49m\u001B[43mautoclose\u001B[49m\u001B[43m)\u001B[49m\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:429\u001B[39m, in \u001B[36mNetCDF4DataStore.__init__\u001B[39m\u001B[34m(self, manager, group, mode, lock, autoclose)\u001B[39m\n\u001B[32m 428\u001B[39m \u001B[38;5;28mself\u001B[39m._mode = mode\n\u001B[32m--> \u001B[39m\u001B[32m429\u001B[39m \u001B[38;5;28mself\u001B[39m.format = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43mds\u001B[49m.data_model\n\u001B[32m 430\u001B[39m \u001B[38;5;28mself\u001B[39m._filename = \u001B[38;5;28mself\u001B[39m.ds.filepath()\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:534\u001B[39m, in \u001B[36mNetCDF4DataStore.ds\u001B[39m\u001B[34m(self)\u001B[39m\n\u001B[32m 532\u001B[39m \u001B[38;5;129m@property\u001B[39m\n\u001B[32m 533\u001B[39m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34mds\u001B[39m(\u001B[38;5;28mself\u001B[39m):\n\u001B[32m--> \u001B[39m\u001B[32m534\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_acquire\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:528\u001B[39m, in \u001B[36mNetCDF4DataStore._acquire\u001B[39m\u001B[34m(self, needs_lock)\u001B[39m\n\u001B[32m 527\u001B[39m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34m_acquire\u001B[39m(\u001B[38;5;28mself\u001B[39m, needs_lock=\u001B[38;5;28;01mTrue\u001B[39;00m):\n\u001B[32m--> \u001B[39m\u001B[32m528\u001B[39m \u001B[43m \u001B[49m\u001B[38;5;28;43;01mwith\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_manager\u001B[49m\u001B[43m.\u001B[49m\u001B[43macquire_context\u001B[49m\u001B[43m(\u001B[49m\u001B[43mneeds_lock\u001B[49m\u001B[43m)\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mas\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mroot\u001B[49m\u001B[43m:\u001B[49m\n\u001B[32m 529\u001B[39m \u001B[43m \u001B[49m\u001B[43mds\u001B[49m\u001B[43m \u001B[49m\u001B[43m=\u001B[49m\u001B[43m \u001B[49m\u001B[43m_nc4_require_group\u001B[49m\u001B[43m(\u001B[49m\u001B[43mroot\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_group\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_mode\u001B[49m\u001B[43m)\u001B[49m\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/.local/share/uv/python/cpython-3.11.11-macos-aarch64-none/lib/python3.11/contextlib.py:137\u001B[39m, in \u001B[36m_GeneratorContextManager.__enter__\u001B[39m\u001B[34m(self)\u001B[39m\n\u001B[32m 136\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m137\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mnext\u001B[39m(\u001B[38;5;28mself\u001B[39m.gen)\n\u001B[32m 138\u001B[39m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mStopIteration\u001B[39;00m:\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/file_manager.py:207\u001B[39m, in \u001B[36mCachingFileManager.acquire_context\u001B[39m\u001B[34m(self, needs_lock)\u001B[39m\n\u001B[32m 206\u001B[39m \u001B[38;5;250m\u001B[39m\u001B[33;03m\"\"\"Context manager for acquiring a file.\"\"\"\u001B[39;00m\n\u001B[32m--> \u001B[39m\u001B[32m207\u001B[39m file, cached = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_acquire_with_cache_info\u001B[49m\u001B[43m(\u001B[49m\u001B[43mneeds_lock\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 208\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/file_manager.py:225\u001B[39m, in \u001B[36mCachingFileManager._acquire_with_cache_info\u001B[39m\u001B[34m(self, needs_lock)\u001B[39m\n\u001B[32m 224\u001B[39m kwargs[\u001B[33m\"\u001B[39m\u001B[33mmode\u001B[39m\u001B[33m\"\u001B[39m] = \u001B[38;5;28mself\u001B[39m._mode\n\u001B[32m--> \u001B[39m\u001B[32m225\u001B[39m file = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_opener\u001B[49m\u001B[43m(\u001B[49m\u001B[43m*\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_args\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 226\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m._mode == \u001B[33m\"\u001B[39m\u001B[33mw\u001B[39m\u001B[33m\"\u001B[39m:\n\u001B[32m 227\u001B[39m \u001B[38;5;66;03m# ensure file doesn't get overridden when opened again\u001B[39;00m\n", + "\u001B[36mFile \u001B[39m\u001B[32msrc/netCDF4/_netCDF4.pyx:2517\u001B[39m, in \u001B[36mnetCDF4._netCDF4.Dataset.__init__\u001B[39m\u001B[34m()\u001B[39m\n", + "\u001B[36mFile \u001B[39m\u001B[32msrc/netCDF4/_netCDF4.pyx:2154\u001B[39m, in \u001B[36mnetCDF4._netCDF4._ensure_nc_success\u001B[39m\u001B[34m()\u001B[39m\n", + "\u001B[31mFileNotFoundError\u001B[39m: [Errno 2] No such file or directory: '/Users/felix/PycharmProjects/flixopt_182303/docs/notebooks/data/district_heating_system.nc4'", + "\nThe above exception was the direct cause of the following exception:\n", + "\u001B[31mOSError\u001B[39m Traceback (most recent call last)", + "\u001B[36mCell\u001B[39m\u001B[36m \u001B[39m\u001B[32mIn[4]\u001B[39m\u001B[32m, line 2\u001B[39m\n\u001B[32m 1\u001B[39m \u001B[38;5;66;03m# Load the district heating system (real data from Zeitreihen2020.csv)\u001B[39;00m\n\u001B[32m----> \u001B[39m\u001B[32m2\u001B[39m flow_system = \u001B[43mfx\u001B[49m\u001B[43m.\u001B[49m\u001B[43mFlowSystem\u001B[49m\u001B[43m.\u001B[49m\u001B[43mfrom_netcdf\u001B[49m\u001B[43m(\u001B[49m\u001B[33;43m'\u001B[39;49m\u001B[33;43mdata/district_heating_system.nc4\u001B[39;49m\u001B[33;43m'\u001B[39;49m\u001B[43m)\u001B[49m\n\u001B[32m 4\u001B[39m timesteps = flow_system.timesteps\n\u001B[32m 5\u001B[39m \u001B[38;5;28mprint\u001B[39m(\u001B[33mf\u001B[39m\u001B[33m'\u001B[39m\u001B[33mLoaded FlowSystem: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mlen\u001B[39m(timesteps)\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m timesteps (\u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mlen\u001B[39m(timesteps)\u001B[38;5;250m \u001B[39m/\u001B[38;5;250m \u001B[39m\u001B[32m96\u001B[39m\u001B[38;5;132;01m:\u001B[39;00m\u001B[33m.0f\u001B[39m\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m days at 15-min resolution)\u001B[39m\u001B[33m'\u001B[39m)\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/flixopt/flow_system.py:771\u001B[39m, in \u001B[36mFlowSystem.from_netcdf\u001B[39m\u001B[34m(cls, path)\u001B[39m\n\u001B[32m 758\u001B[39m \u001B[38;5;250m\u001B[39m\u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m 759\u001B[39m \u001B[33;03mLoad a FlowSystem from a NetCDF file.\u001B[39;00m\n\u001B[32m 760\u001B[39m \n\u001B[32m (...)\u001B[39m\u001B[32m 768\u001B[39m \u001B[33;03m FlowSystem instance with name set from filename\u001B[39;00m\n\u001B[32m 769\u001B[39m \u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m 770\u001B[39m path = pathlib.Path(path)\n\u001B[32m--> \u001B[39m\u001B[32m771\u001B[39m flow_system = \u001B[38;5;28;43msuper\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m.\u001B[49m\u001B[43mfrom_netcdf\u001B[49m\u001B[43m(\u001B[49m\u001B[43mpath\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 772\u001B[39m \u001B[38;5;66;03m# Derive name from filename (without extension)\u001B[39;00m\n\u001B[32m 773\u001B[39m flow_system.name = path.stem\n", + "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/flixopt/structure.py:978\u001B[39m, in \u001B[36mInterface.from_netcdf\u001B[39m\u001B[34m(cls, path)\u001B[39m\n\u001B[32m 976\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mcls\u001B[39m.from_dataset(ds)\n\u001B[32m 977\u001B[39m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[32m--> \u001B[39m\u001B[32m978\u001B[39m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mOSError\u001B[39;00m(\u001B[33mf\u001B[39m\u001B[33m'\u001B[39m\u001B[33mFailed to load \u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mcls\u001B[39m.\u001B[34m__name__\u001B[39m\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m from NetCDF file \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mpath\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00me\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m'\u001B[39m) \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34;01me\u001B[39;00m\n", + "\u001B[31mOSError\u001B[39m: Failed to load FlowSystem from NetCDF file data/district_heating_system.nc4: [Errno 2] No such file or directory: '/Users/felix/PycharmProjects/flixopt_182303/docs/notebooks/data/district_heating_system.nc4'" + ] + } + ], "source": [ "# Load the district heating system (real data from Zeitreihen2020.csv)\n", "flow_system = fx.FlowSystem.from_netcdf('data/district_heating_system.nc4')\n", @@ -95,7 +117,7 @@ "id": "5", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:35:43.057128Z", + "end_time": "2025-12-14T15:15:04.199645Z", "start_time": "2025-12-14T14:35:42.948041Z" } }, @@ -120,227 +142,31 @@ "cell_type": "markdown", "id": "8", "metadata": {}, - "source": [ - "## Visualizing the Clustering Effect\n", - "\n", - "Before optimizing, let's see how clustering transforms the time series data:" - ] + "source": "## Part 1: Clustering (Inter-Period Aggregation)\n\n**Clustering** groups similar periods together to find representative \"typical\" periods.\n\nFor example, with 31 days of data:\n- Original: 31 days × 96 timesteps/day = 2,976 timesteps \n- Clustered (8 typical days): 8 days × 96 timesteps/day = 768 representative timesteps\n\nThe optimizer only solves for 8 unique days, but weights results by how often each typical day occurred.\n\n```python\nfs.transform.cluster(\n n_clusters=8, # Find 8 typical days\n cluster_duration='1D', # Each cluster is 1 day\n)\n```" }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "9", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:35:46.501672Z", + "end_time": "2025-12-14T15:15:04.206436Z", "start_time": "2025-12-14T14:35:43.520577Z" } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[2m2025-12-14 15:35:43.523\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "
" - ], - "text/plain": [ - "PlotResult(data= Size: 262kB\n", - "Dimensions: (time: 2976, variable: 5)\n", - "Coordinates:\n", - " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", - " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", - "Data variables:\n", - " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", - " aggregated (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0, figure=Figure({\n", - " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('UbgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('9dJNYhDYG8D10k1iENgbwPXSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'}],\n", - " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", - " 'margin': {'t': 60},\n", - " 'template': '...',\n", - " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", - " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", - " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", - "}))" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Cluster with 8 typical days (from 31 days)\n", - "fs_demo = flow_system.copy()\n", - "fs_clustered_demo = fs_demo.transform.cluster(n_clusters=8, cluster_duration='1D')\n", + "fs_clustering_demo = flow_system.copy()\n", + "fs_clustered_demo = fs_clustering_demo.transform.cluster(n_clusters=8, cluster_duration='1D')\n", "\n", "# Get the clustering object to access tsam results\n", "clustering = fs_clustered_demo._clustering_info['clustering']\n", "\n", + "print(f'Original: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) / 96:.0f} days)')\n", + "print(f'Clustered: {clustering.nr_of_periods} typical days')\n", + "print(f'Cluster assignments: {list(clustering.tsam.clusterOrder)}')\n", + "\n", "# Plot original vs aggregated data\n", "clustering.plot()" ] @@ -349,11 +175,7 @@ "cell_type": "markdown", "id": "10", "metadata": {}, - "source": [ - "## Comparing Different Clustering Parameters\n", - "\n", - "Let's see how different numbers of typical days affect the data representation:" - ] + "source": "### Comparing Different Cluster Counts\n\nMore clusters = better accuracy but less speedup. Let's compare:" }, { "cell_type": "code", @@ -361,7 +183,7 @@ "id": "11", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:35:51.309343Z", + "end_time": "2025-12-14T15:15:04.206969Z", "start_time": "2025-12-14T14:35:46.889873Z" } }, @@ -395,7 +217,7 @@ "id": "12", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:35:51.557060Z", + "end_time": "2025-12-14T15:15:04.207122Z", "start_time": "2025-12-14T14:35:51.433611Z" } }, @@ -496,7 +318,7 @@ "id": "13", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:35:51.666590Z", + "end_time": "2025-12-14T15:15:04.211399Z", "start_time": "2025-12-14T14:35:51.615328Z" } }, @@ -599,21 +421,208 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "t8et37i26k", "metadata": {}, + "source": "## Part 2: Segmentation (Inner-Period Aggregation)\n\n**Segmentation** reduces the number of timesteps *within* each period by grouping similar consecutive timesteps.\n\nFor example, with 15-minute resolution data:\n- Original day: 96 timesteps (24h × 4 per hour)\n- Segmented (4 segments): 4 representative timesteps per day\n\nThis is useful when you have high-resolution data but don't need that granularity for your analysis.\n\n```python\nfs.transform.cluster(\n n_clusters=None, # Skip clustering (keep all periods)\n cluster_duration='1D', # Segment within each day\n n_segments=4, # Reduce to 4 segments per day\n)\n```" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "lbpmw6mnb5k", + "metadata": {}, + "outputs": [], "source": [ - "## Baseline: Full Optimization\n", + "# Segmentation only: reduce 96 timesteps/day to 4 segments/day\n", + "fs_segmentation_demo = flow_system.copy()\n", + "fs_segmented_demo = fs_segmentation_demo.transform.cluster(\n", + " n_clusters=None, # No clustering - keep all 31 days\n", + " cluster_duration='1D', # Segment within each day\n", + " n_segments=4, # 4 segments per day\n", + ")\n", + "\n", + "# Get the clustering object\n", + "segmentation = fs_segmented_demo._clustering_info['clustering']\n", "\n", - "First, solve without clustering for comparison:" + "print('Original: 96 timesteps per day (15-min resolution)')\n", + "print(f'Segmented: {segmentation.n_segments} segments per day')\n", + "\n", + "# Plot original vs segmented data\n", + "segmentation.plot()" + ] + }, + { + "cell_type": "markdown", + "id": "6bgh7f0vsj", + "metadata": {}, + "source": "### Comparing Different Segment Counts\n\nMore segments = better accuracy but less speedup:" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "do29lhcinx7", + "metadata": {}, + "outputs": [], + "source": [ + "# Test different numbers of segments\n", + "segment_configs = [4, 8, 12, 24]\n", + "segmentation_results = {}\n", + "\n", + "for n_seg in segment_configs:\n", + " fs_test = flow_system.copy()\n", + " fs_seg = fs_test.transform.cluster(n_clusters=None, cluster_duration='1D', n_segments=n_seg)\n", + " segmentation_results[n_seg] = fs_seg._clustering_info['clustering']\n", + "\n", + "# Use heat demand for comparison\n", + "heat_demand_col = [c for c in segmentation_results[4].original_data.columns if 'Heat' in c or 'Q_th' in c][0]\n", + "print(f'Comparing: {heat_demand_col}')" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "21athrtuavw", + "metadata": {}, + "outputs": [], + "source": [ + "# Compare the segmented data for first day only (clearer visualization)\n", + "fig = make_subplots(\n", + " rows=2,\n", + " cols=2,\n", + " subplot_titles=[f'{n} Segments per Day' for n in segment_configs],\n", + " shared_xaxes=True,\n", + " shared_yaxes=True,\n", + " vertical_spacing=0.12,\n", + " horizontal_spacing=0.08,\n", + ")\n", + "\n", + "# Only show first day (96 timesteps) for clarity\n", + "day_length = 96\n", + "\n", + "for i, (_n_seg, seg_result) in enumerate(segmentation_results.items()):\n", + " row, col = divmod(i, 2)\n", + " row += 1\n", + " col += 1\n", + "\n", + " original = seg_result.original_data[heat_demand_col][:day_length]\n", + " aggregated = seg_result.aggregated_data[heat_demand_col][:day_length]\n", + "\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=list(range(len(original))),\n", + " y=original.values,\n", + " name='Original',\n", + " line=dict(color='lightgray'),\n", + " showlegend=(i == 0),\n", + " ),\n", + " row=row,\n", + " col=col,\n", + " )\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=list(range(len(aggregated))),\n", + " y=aggregated.values,\n", + " name='Segmented',\n", + " line=dict(color='green', width=2),\n", + " showlegend=(i == 0),\n", + " ),\n", + " row=row,\n", + " col=col,\n", + " )\n", + "\n", + "fig.update_layout(\n", + " title='Heat Demand (First Day): Original vs Segmented',\n", + " height=500,\n", + " legend=dict(orientation='h', yanchor='bottom', y=1.02),\n", + ")\n", + "fig.update_xaxes(title_text='Timestep', row=2)\n", + "fig.update_yaxes(title_text='Heat Demand [MW]', col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "phpx36k23p", + "metadata": {}, + "outputs": [], + "source": [ + "# Calculate error metrics for segmentation\n", + "seg_metrics = []\n", + "for n_seg, seg_result in segmentation_results.items():\n", + " original = seg_result.original_data[heat_demand_col].values\n", + " aggregated = seg_result.aggregated_data[heat_demand_col].values\n", + "\n", + " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", + " mae = np.mean(np.abs(original - aggregated))\n", + " max_error = np.max(np.abs(original - aggregated))\n", + " correlation = np.corrcoef(original, aggregated)[0, 1]\n", + "\n", + " seg_metrics.append(\n", + " {\n", + " 'Segments': n_seg,\n", + " 'RMSE': rmse,\n", + " 'MAE': mae,\n", + " 'Max Error': max_error,\n", + " 'Correlation': correlation,\n", + " }\n", + " )\n", + "\n", + "seg_metrics_df = pd.DataFrame(seg_metrics).set_index('Segments')\n", + "seg_metrics_df.style.format(\n", + " {\n", + " 'RMSE': '{:.2f}',\n", + " 'MAE': '{:.2f}',\n", + " 'Max Error': '{:.2f}',\n", + " 'Correlation': '{:.4f}',\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "u6sc5ek0rya", + "metadata": {}, + "source": "## Part 3: Combined Clustering + Segmentation\n\nFor maximum speedup, combine both techniques:\n\n```python\nfs.transform.cluster(\n n_clusters=8, # 8 typical days (inter-period)\n cluster_duration='1D',\n n_segments=4, # 4 segments per day (inner-period)\n)\n```\n\nThis reduces 2,976 timesteps to just 8 × 4 = 32 representative timesteps!" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "j24sbfpl0x", + "metadata": {}, + "outputs": [], + "source": [ + "# Combined: 8 typical days × 4 segments each\n", + "fs_combined_demo = flow_system.copy()\n", + "fs_combined = fs_combined_demo.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " n_segments=4,\n", + ")\n", + "\n", + "combined_clustering = fs_combined._clustering_info['clustering']\n", + "\n", + "print(f'Original: {len(flow_system.timesteps)} timesteps')\n", + "print(\n", + " f'Combined: {combined_clustering.nr_of_periods} typical days × {combined_clustering.n_segments} segments = {combined_clustering.nr_of_periods * combined_clustering.n_segments} representative timesteps'\n", + ")\n", + "\n", + "# Plot the combined result\n", + "combined_clustering.plot()" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": "## Performance Comparison\n\nNow let's compare the optimization performance of all approaches.\n\n### Baseline: Full Optimization (No Aggregation)" + }, { "cell_type": "code", "execution_count": 19, "id": "15", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:36:18.822362Z", + "end_time": "2025-12-14T15:15:04.217161Z", "start_time": "2025-12-14T14:35:51.686260Z" } }, @@ -622,23 +631,23 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m2025-12-14 15:35:52.048\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001b[2m2025-12-14 15:35:52.216\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001B[2m2025-12-14 15:35:52.048\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001B[2m2025-12-14 15:35:52.216\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 64/64 [00:00<00:00, 75.17it/s] \n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 421.23it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 368.46it/s]\n" + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 64/64 [00:00<00:00, 75.17it/s] \n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 421.23it/s]\n", + "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 368.46it/s]\n" ] }, { @@ -727,18 +736,7 @@ "cell_type": "markdown", "id": "16", "metadata": {}, - "source": [ - "## Basic Clustering\n", - "\n", - "Cluster the time series into **8 typical days** (from 31 days of data):\n", - "\n", - "```python\n", - "clustered_fs = flow_system.transform.cluster(\n", - " n_clusters=8, # Number of typical periods\n", - " cluster_duration='1D', # Duration per cluster (1 day)\n", - ")\n", - "```" - ] + "source": "### Clustering Only (8 Typical Days)" }, { "cell_type": "code", @@ -746,7 +744,7 @@ "id": "17", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:36:25.518948Z", + "end_time": "2025-12-14T15:15:04.218001Z", "start_time": "2025-12-14T14:36:18.969306Z" } }, @@ -755,23 +753,23 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m2025-12-14 15:36:19.954\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001b[2m2025-12-14 15:36:20.127\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001B[2m2025-12-14 15:36:19.954\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001B[2m2025-12-14 15:36:20.127\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 81/81 [00:01<00:00, 65.44it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 808.42it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 766.39it/s]\n" + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 81/81 [00:01<00:00, 65.44it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 808.42it/s]\n", + "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 766.39it/s]\n" ] }, { @@ -860,6 +858,72 @@ " print(f' {name}: {float(size.item()):.1f}')" ] }, + { + "cell_type": "markdown", + "id": "qk9l29yv32p", + "metadata": {}, + "source": "### Segmentation Only (4 Segments per Day)" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "puisldf6fa", + "metadata": {}, + "outputs": [], + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# Segmentation only: reduce timesteps within each day\n", + "fs_segmented = flow_system.transform.cluster(\n", + " n_clusters=None, # No clustering\n", + " cluster_duration='1D',\n", + " n_segments=4, # 4 segments per day\n", + ")\n", + "\n", + "fs_segmented.optimize(solver)\n", + "time_segmented = timeit.default_timer() - start\n", + "\n", + "print(f'Segmentation optimization: {time_segmented:.2f} seconds')\n", + "print(f'Cost: {fs_segmented.solution[\"costs\"].item():,.0f} €')\n", + "print(f'Speedup: {time_full / time_segmented:.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_segmented.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] + }, + { + "cell_type": "markdown", + "id": "6nlsdmx326d", + "metadata": {}, + "source": "### Combined: Clustering + Segmentation" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "frq1vct5l4v", + "metadata": {}, + "outputs": [], + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# Combined: 8 typical days × 4 segments each\n", + "fs_combined_opt = flow_system.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " n_segments=4,\n", + ")\n", + "\n", + "fs_combined_opt.optimize(solver)\n", + "time_combined = timeit.default_timer() - start\n", + "\n", + "print(f'Combined optimization: {time_combined:.2f} seconds')\n", + "print(f'Cost: {fs_combined_opt.solution[\"costs\"].item():,.0f} €')\n", + "print(f'Speedup: {time_full / time_combined:.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_combined_opt.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] + }, { "cell_type": "markdown", "id": "18", @@ -870,66 +934,15 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "id": "19", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:36:25.618993Z", + "end_time": "2025-12-14T15:15:04.218208Z", "start_time": "2025-12-14T14:36:25.608382Z" } }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 Time [s]Cost [€]CHP SizeBoiler SizeStorage SizeCost Gap [%]Speedup
Full (baseline)27.132,209,206300.00.010000.001.0x
Clustered (8 days)6.542,215,424300.00.010000.284.1x
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "results = {\n", " 'Full (baseline)': {\n", @@ -939,13 +952,27 @@ " 'Boiler Size': fs_full.statistics.sizes['Boiler(Q_th)'].item(),\n", " 'Storage Size': fs_full.statistics.sizes['Storage'].item(),\n", " },\n", - " 'Clustered (8 days)': {\n", + " 'Clustering (8 days)': {\n", " 'Time [s]': time_clustered,\n", " 'Cost [€]': fs_clustered.solution['costs'].item(),\n", " 'CHP Size': fs_clustered.statistics.sizes['CHP(Q_th)'].item(),\n", " 'Boiler Size': fs_clustered.statistics.sizes['Boiler(Q_th)'].item(),\n", " 'Storage Size': fs_clustered.statistics.sizes['Storage'].item(),\n", " },\n", + " 'Segmentation (4 seg)': {\n", + " 'Time [s]': time_segmented,\n", + " 'Cost [€]': fs_segmented.solution['costs'].item(),\n", + " 'CHP Size': fs_segmented.statistics.sizes['CHP(Q_th)'].item(),\n", + " 'Boiler Size': fs_segmented.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Storage Size': fs_segmented.statistics.sizes['Storage'].item(),\n", + " },\n", + " 'Combined (8×4)': {\n", + " 'Time [s]': time_combined,\n", + " 'Cost [€]': fs_combined_opt.solution['costs'].item(),\n", + " 'CHP Size': fs_combined_opt.statistics.sizes['CHP(Q_th)'].item(),\n", + " 'Boiler Size': fs_combined_opt.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Storage Size': fs_combined_opt.statistics.sizes['Storage'].item(),\n", + " },\n", "}\n", "\n", "comparison = pd.DataFrame(results).T\n", @@ -984,7 +1011,7 @@ "id": "21", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:36:25.680716Z", + "end_time": "2025-12-14T15:15:04.218834Z", "start_time": "2025-12-14T14:36:25.657257Z" } }, @@ -1049,7 +1076,7 @@ "id": "22", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:36:27.218698Z", + "end_time": "2025-12-14T15:15:04.219477Z", "start_time": "2025-12-14T14:36:25.699998Z" } }, @@ -1058,7 +1085,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m2025-12-14 15:36:25.701\u001b[0m \u001b[33mWARNING \u001b[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", + "\u001B[2m2025-12-14 15:36:25.701\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", "Clustering was applied to 3 period(s):\n", " - period=2024\n", " - period=2025\n", @@ -1083,7 +1110,7 @@ "id": "23", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T14:36:29.672690Z", + "end_time": "2025-12-14T15:15:04.219676Z", "start_time": "2025-12-14T14:36:27.402738Z" } }, @@ -1092,8 +1119,8 @@ "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 38/38 [00:00<00:00, 80.29it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 22/22 [00:00<00:00, 398.66it/s]\n" + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 38/38 [00:00<00:00, 80.29it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 22/22 [00:00<00:00, 398.66it/s]\n" ] }, { @@ -1133,67 +1160,13 @@ "cell_type": "markdown", "id": "24", "metadata": {}, - "source": [ - "## API Reference\n", - "\n", - "### `transform.cluster()` Parameters\n", - "\n", - "| Parameter | Type | Description |\n", - "|-----------|------|-------------|\n", - "| `n_clusters` | `int` | Number of typical periods (e.g., 8 typical days) |\n", - "| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h', or hours as float) |\n", - "| `aggregate_data` | `bool` | If True (default), aggregate time series data |\n", - "| `include_storage` | `bool` | Include storage in clustering constraints (default: True) |\n", - "| `flexibility_percent` | `float` | Allow binary variable deviations (default: 0) |\n", - "| `flexibility_penalty` | `float` | Penalty for deviations (default: 0) |\n", - "| `time_series_for_high_peaks` | `list` | Force inclusion of high-value periods |\n", - "| `time_series_for_low_peaks` | `list` | Force inclusion of low-value periods |\n", - "\n", - "### Common Patterns\n", - "\n", - "```python\n", - "# 8 typical days from a year\n", - "fs.transform.cluster(n_clusters=8, cluster_duration='1D')\n", - "\n", - "# 4 typical weeks\n", - "fs.transform.cluster(n_clusters=4, cluster_duration='1W')\n", - "\n", - "# Force inclusion of peak demand periods\n", - "fs.transform.cluster(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " time_series_for_high_peaks=[heat_demand_ts],\n", - ")\n", - "```" - ] + "source": "## API Reference\n\n### `transform.cluster()` Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `n_clusters` | `int \\| None` | Number of typical periods (e.g., 8 typical days). Set to `None` for segmentation-only. |\n| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h', or hours as float) |\n| `n_segments` | `int \\| None` | Segments within each period (inner-period aggregation). Default: `None` (no segmentation) |\n| `aggregate_data` | `bool` | If True (default), aggregate time series data |\n| `include_storage` | `bool` | Include storage in clustering constraints (default: True) |\n| `flexibility_percent` | `float` | Allow binary variable deviations (default: 0) |\n| `flexibility_penalty` | `float` | Penalty for deviations (default: 0) |\n| `time_series_for_high_peaks` | `list` | Force inclusion of high-value periods |\n| `time_series_for_low_peaks` | `list` | Force inclusion of low-value periods |\n\n### Common Patterns\n\n```python\n# Clustering only: 8 typical days from a year\nfs.transform.cluster(n_clusters=8, cluster_duration='1D')\n\n# Segmentation only: reduce to 4 segments per day\nfs.transform.cluster(n_clusters=None, cluster_duration='1D', n_segments=4)\n\n# Combined: 8 typical days × 4 segments each\nfs.transform.cluster(n_clusters=8, cluster_duration='1D', n_segments=4)\n\n# Force inclusion of peak demand periods\nfs.transform.cluster(\n n_clusters=8,\n cluster_duration='1D',\n time_series_for_high_peaks=[heat_demand_ts],\n)\n```" }, { "cell_type": "markdown", "id": "25", "metadata": {}, - "source": [ - "## Summary\n", - "\n", - "You learned how to:\n", - "\n", - "- Use **`transform.cluster()`** to identify typical periods\n", - "- Visualize the **clustering effect** on time series data\n", - "- Compare different clustering parameters and their **accuracy trade-offs**\n", - "- Cluster **multi-period** FlowSystems (each period independently)\n", - "\n", - "### When to Use Clustering\n", - "\n", - "| Scenario | Recommendation |\n", - "|----------|----------------|\n", - "| Annual optimization | 8-12 typical days |\n", - "| Investment decisions | Use with two-stage optimization |\n", - "| Preserve extremes | Use `time_series_for_high_peaks` |\n", - "\n", - "### Next Steps\n", - "\n", - "- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n", - "- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" - ] + "source": "## Summary\n\nYou learned how to:\n\n- Use **clustering** (`n_clusters`) to identify typical periods (inter-period aggregation)\n- Use **segmentation** (`n_segments`) to reduce timesteps within periods (inner-period aggregation)\n- **Combine both** techniques for maximum speedup\n- Cluster **multi-period** FlowSystems (each period independently)\n\n### When to Use Each Technique\n\n| Technique | Use Case | Example |\n|-----------|----------|---------|\n| **Clustering** | Many similar periods (days, weeks) | 365 days → 12 typical days |\n| **Segmentation** | High-resolution data not needed | 96 timesteps/day → 4 segments |\n| **Combined** | Large problems with high resolution | 365 × 96 → 12 × 4 = 48 timesteps |\n\n### Accuracy vs. Speed Trade-off\n\n| Approach | Speedup | Accuracy | Best For |\n|----------|---------|----------|----------|\n| More clusters/segments | Lower | Higher | Final results |\n| Fewer clusters/segments | Higher | Lower | Screening, exploration |\n\n### Next Steps\n\n- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" } ], "metadata": { From 6aebd183f10328366245372b44f67652aa60b5f9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 16:26:17 +0100 Subject: [PATCH 015/191] fix cluster_multi_dimensional_data --- flixopt/transform_accessor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index f10bc616e..6ab2bca9f 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -362,6 +362,7 @@ def _cluster_multi_dimensional( # Store clustering info for later use clustered_fs._clustering_info = { 'parameters': params, + 'clustering': clustering_results, # Required by _add_clustering_constraints 'clustering_results': clustering_results, # Dict of Clustering objects per dimension 'components_to_clusterize': components_to_clusterize, 'original_fs': self._fs, From c0c7c459996072e37ef4c6ce1b43c688ac5aabc5 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 16:27:13 +0100 Subject: [PATCH 016/191] fix notebooks to only create flow_system if needed --- docs/notebooks/08a-aggregation.ipynb | 9 +- docs/notebooks/08b-rolling-horizon.ipynb | 2 +- docs/notebooks/08c-clustering.ipynb | 5594 ++++++++++++++++++++-- 3 files changed, 5167 insertions(+), 438 deletions(-) diff --git a/docs/notebooks/08a-aggregation.ipynb b/docs/notebooks/08a-aggregation.ipynb index 24f0883fd..560829492 100644 --- a/docs/notebooks/08a-aggregation.ipynb +++ b/docs/notebooks/08a-aggregation.ipynb @@ -58,14 +58,7 @@ "id": "4", "metadata": {}, "outputs": [], - "source": [ - "# Load the district heating system (real data from Zeitreihen2020.csv)\n", - "flow_system = fx.FlowSystem.from_netcdf('data/district_heating_system.nc4')\n", - "\n", - "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", - "print(f'Components: {list(flow_system.components.keys())}')" - ] + "source": "from pathlib import Path\n\n# Generate example data if not present (for local development)\ndata_file = Path('data/district_heating_system.nc4')\nif not data_file.exists():\n from data.generate_example_systems import create_district_heating_system\n\n fs = create_district_heating_system()\n fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n fs.to_netcdf(data_file, overwrite=True)\n\n# Load the district heating system (real data from Zeitreihen2020.csv)\nflow_system = fx.FlowSystem.from_netcdf(data_file)\n\ntimesteps = flow_system.timesteps\nprint(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\nprint(f'Components: {list(flow_system.components.keys())}')" }, { "cell_type": "code", diff --git a/docs/notebooks/08b-rolling-horizon.ipynb b/docs/notebooks/08b-rolling-horizon.ipynb index bad3fe983..4f868374b 100644 --- a/docs/notebooks/08b-rolling-horizon.ipynb +++ b/docs/notebooks/08b-rolling-horizon.ipynb @@ -65,7 +65,7 @@ "start_time": "2025-12-13T19:01:44.973157Z" } }, - "source": "# Load the operational system (real data from Zeitreihen2020.csv, two weeks)\nflow_system = fx.FlowSystem.from_netcdf('data/operational_system.nc4')\n\ntimesteps = flow_system.timesteps\nprint(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\nprint(f'Components: {list(flow_system.components.keys())}')", + "source": "from pathlib import Path\n\n# Generate example data if not present (for local development)\ndata_file = Path('data/operational_system.nc4')\nif not data_file.exists():\n from data.generate_example_systems import create_operational_system\n\n fs = create_operational_system()\n fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n fs.to_netcdf(data_file, overwrite=True)\n\n# Load the operational system (real data from Zeitreihen2020.csv, two weeks)\nflow_system = fx.FlowSystem.from_netcdf(data_file)\n\ntimesteps = flow_system.timesteps\nprint(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\nprint(f'Components: {list(flow_system.components.keys())}')", "outputs": [], "execution_count": null }, diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index b1e083cc5..93cc6a70c 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -16,127 +16,4019 @@ }, { "cell_type": "code", - "execution_count": 3, "id": "2", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:15:03.886557Z", - "start_time": "2025-12-14T15:15:03.823696Z" + "end_time": "2025-12-14T15:23:07.849032Z", + "start_time": "2025-12-14T15:23:03.981261Z" } }, + "source": [ + "import timeit\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ], + "outputs": [ + { + "data": { + "text/plain": [ + "flixopt.config.CONFIG" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 2 + }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": "## Load the FlowSystem\n\nWe use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" + }, + { + "cell_type": "code", + "id": "4", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:08.098050Z", + "start_time": "2025-12-14T15:23:07.872444Z" + } + }, + "source": "from pathlib import Path\n\n# Generate example data if not present (for local development)\ndata_file = Path('data/district_heating_system.nc4')\nif not data_file.exists():\n from data.generate_example_systems import create_district_heating_system\n\n fs = create_district_heating_system()\n fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n fs.to_netcdf(data_file, overwrite=True)\n\n# Load the district heating system (real data from Zeitreihen2020.csv)\nflow_system = fx.FlowSystem.from_netcdf(data_file)\n\ntimesteps = flow_system.timesteps\nprint(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\nprint(f'Components: {list(flow_system.components.keys())}')", + "outputs": [], + "execution_count": null + }, + { + "cell_type": "code", + "id": "5", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:08.202452Z", + "start_time": "2025-12-14T15:23:08.106640Z" + } + }, + "source": [ + "# Visualize first two weeks of data\n", + "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", + "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", + "\n", + "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", + "\n", + "fig.add_trace(go.Scatter(x=timesteps[:1344], y=heat_demand.values[:1344], name='Heat Demand'), row=1, col=1)\n", + "fig.add_trace(go.Scatter(x=timesteps[:1344], y=electricity_price.values[:1344], name='Electricity Price'), row=2, col=1)\n", + "\n", + "fig.update_layout(height=400, title='First Two Weeks of Data')\n", + "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", + "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", + "fig.show()" + ], "outputs": [ { "data": { - "text/plain": [ - "flixopt.config.CONFIG" + "text/html": [ + " \n", + " \n", + " " + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + }, + { + "data": { + "text/html": [ + "
" ] }, - "execution_count": 3, "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "import timeit\n", - "\n", - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.graph_objects as go\n", - "from plotly.subplots import make_subplots\n", - "\n", - "import flixopt as fx\n", - "\n", - "fx.CONFIG.notebook()" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": "## Load the FlowSystem\n\nWe use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "4", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.193792Z", - "start_time": "2025-12-14T15:15:03.954424Z" - } - }, - "outputs": [ - { - "ename": "OSError", - "evalue": "Failed to load FlowSystem from NetCDF file data/district_heating_system.nc4: [Errno 2] No such file or directory: '/Users/felix/PycharmProjects/flixopt_182303/docs/notebooks/data/district_heating_system.nc4'", - "output_type": "error", - "traceback": [ - "\u001B[31m---------------------------------------------------------------------------\u001B[39m", - "\u001B[31mKeyError\u001B[39m Traceback (most recent call last)", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/file_manager.py:219\u001B[39m, in \u001B[36mCachingFileManager._acquire_with_cache_info\u001B[39m\u001B[34m(self, needs_lock)\u001B[39m\n\u001B[32m 218\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m219\u001B[39m file = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_cache\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_key\u001B[49m\u001B[43m]\u001B[49m\n\u001B[32m 220\u001B[39m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mKeyError\u001B[39;00m:\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/lru_cache.py:56\u001B[39m, in \u001B[36mLRUCache.__getitem__\u001B[39m\u001B[34m(self, key)\u001B[39m\n\u001B[32m 55\u001B[39m \u001B[38;5;28;01mwith\u001B[39;00m \u001B[38;5;28mself\u001B[39m._lock:\n\u001B[32m---> \u001B[39m\u001B[32m56\u001B[39m value = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_cache\u001B[49m\u001B[43m[\u001B[49m\u001B[43mkey\u001B[49m\u001B[43m]\u001B[49m\n\u001B[32m 57\u001B[39m \u001B[38;5;28mself\u001B[39m._cache.move_to_end(key)\n", - "\u001B[31mKeyError\u001B[39m: [, ('/Users/felix/PycharmProjects/flixopt_182303/docs/notebooks/data/district_heating_system.nc4',), 'r', (('clobber', True), ('diskless', False), ('format', 'NETCDF4'), ('persist', False)), '7662be6e-fdfd-436b-880a-38f84ad236df']", - "\nDuring handling of the above exception, another exception occurred:\n", - "\u001B[31mFileNotFoundError\u001B[39m Traceback (most recent call last)", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/flixopt/structure.py:975\u001B[39m, in \u001B[36mInterface.from_netcdf\u001B[39m\u001B[34m(cls, path)\u001B[39m\n\u001B[32m 974\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m975\u001B[39m ds = \u001B[43mfx_io\u001B[49m\u001B[43m.\u001B[49m\u001B[43mload_dataset_from_netcdf\u001B[49m\u001B[43m(\u001B[49m\u001B[43mpath\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 976\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mcls\u001B[39m.from_dataset(ds)\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/flixopt/io.py:581\u001B[39m, in \u001B[36mload_dataset_from_netcdf\u001B[39m\u001B[34m(path)\u001B[39m\n\u001B[32m 572\u001B[39m \u001B[38;5;250m\u001B[39m\u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m 573\u001B[39m \u001B[33;03mLoad a dataset from a netcdf file. Load all attrs from 'attrs' attributes.\u001B[39;00m\n\u001B[32m 574\u001B[39m \n\u001B[32m (...)\u001B[39m\u001B[32m 579\u001B[39m \u001B[33;03m Dataset: Loaded dataset with restored attrs.\u001B[39;00m\n\u001B[32m 580\u001B[39m \u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m--> \u001B[39m\u001B[32m581\u001B[39m ds = \u001B[43mxr\u001B[49m\u001B[43m.\u001B[49m\u001B[43mload_dataset\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mstr\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mpath\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mengine\u001B[49m\u001B[43m=\u001B[49m\u001B[33;43m'\u001B[39;49m\u001B[33;43mnetcdf4\u001B[39;49m\u001B[33;43m'\u001B[39;49m\u001B[43m)\u001B[49m\n\u001B[32m 583\u001B[39m \u001B[38;5;66;03m# Restore Dataset attrs\u001B[39;00m\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/api.py:165\u001B[39m, in \u001B[36mload_dataset\u001B[39m\u001B[34m(filename_or_obj, **kwargs)\u001B[39m\n\u001B[32m 163\u001B[39m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mTypeError\u001B[39;00m(\u001B[33m\"\u001B[39m\u001B[33mcache has no effect in this context\u001B[39m\u001B[33m\"\u001B[39m)\n\u001B[32m--> \u001B[39m\u001B[32m165\u001B[39m \u001B[38;5;28;01mwith\u001B[39;00m \u001B[43mopen_dataset\u001B[49m\u001B[43m(\u001B[49m\u001B[43mfilename_or_obj\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m \u001B[38;5;28;01mas\u001B[39;00m ds:\n\u001B[32m 166\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m ds.load()\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/api.py:606\u001B[39m, in \u001B[36mopen_dataset\u001B[39m\u001B[34m(filename_or_obj, engine, chunks, cache, decode_cf, mask_and_scale, decode_times, decode_timedelta, use_cftime, concat_characters, decode_coords, drop_variables, create_default_indexes, inline_array, chunked_array_type, from_array_kwargs, backend_kwargs, **kwargs)\u001B[39m\n\u001B[32m 605\u001B[39m overwrite_encoded_chunks = kwargs.pop(\u001B[33m\"\u001B[39m\u001B[33moverwrite_encoded_chunks\u001B[39m\u001B[33m\"\u001B[39m, \u001B[38;5;28;01mNone\u001B[39;00m)\n\u001B[32m--> \u001B[39m\u001B[32m606\u001B[39m backend_ds = \u001B[43mbackend\u001B[49m\u001B[43m.\u001B[49m\u001B[43mopen_dataset\u001B[49m\u001B[43m(\u001B[49m\n\u001B[32m 607\u001B[39m \u001B[43m \u001B[49m\u001B[43mfilename_or_obj\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 608\u001B[39m \u001B[43m \u001B[49m\u001B[43mdrop_variables\u001B[49m\u001B[43m=\u001B[49m\u001B[43mdrop_variables\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 609\u001B[39m \u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mdecoders\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 610\u001B[39m \u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mkwargs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 611\u001B[39m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 612\u001B[39m ds = _dataset_from_backend_dataset(\n\u001B[32m 613\u001B[39m backend_ds,\n\u001B[32m 614\u001B[39m filename_or_obj,\n\u001B[32m (...)\u001B[39m\u001B[32m 625\u001B[39m **kwargs,\n\u001B[32m 626\u001B[39m )\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:758\u001B[39m, in \u001B[36mNetCDF4BackendEntrypoint.open_dataset\u001B[39m\u001B[34m(self, filename_or_obj, mask_and_scale, decode_times, concat_characters, decode_coords, drop_variables, use_cftime, decode_timedelta, group, mode, format, clobber, diskless, persist, auto_complex, lock, autoclose)\u001B[39m\n\u001B[32m 757\u001B[39m filename_or_obj = _normalize_path(filename_or_obj)\n\u001B[32m--> \u001B[39m\u001B[32m758\u001B[39m store = \u001B[43mNetCDF4DataStore\u001B[49m\u001B[43m.\u001B[49m\u001B[43mopen\u001B[49m\u001B[43m(\u001B[49m\n\u001B[32m 759\u001B[39m \u001B[43m \u001B[49m\u001B[43mfilename_or_obj\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 760\u001B[39m \u001B[43m \u001B[49m\u001B[43mmode\u001B[49m\u001B[43m=\u001B[49m\u001B[43mmode\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 761\u001B[39m \u001B[43m \u001B[49m\u001B[38;5;28;43mformat\u001B[39;49m\u001B[43m=\u001B[49m\u001B[38;5;28;43mformat\u001B[39;49m\u001B[43m,\u001B[49m\n\u001B[32m 762\u001B[39m \u001B[43m \u001B[49m\u001B[43mgroup\u001B[49m\u001B[43m=\u001B[49m\u001B[43mgroup\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 763\u001B[39m \u001B[43m \u001B[49m\u001B[43mclobber\u001B[49m\u001B[43m=\u001B[49m\u001B[43mclobber\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 764\u001B[39m \u001B[43m \u001B[49m\u001B[43mdiskless\u001B[49m\u001B[43m=\u001B[49m\u001B[43mdiskless\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 765\u001B[39m \u001B[43m \u001B[49m\u001B[43mpersist\u001B[49m\u001B[43m=\u001B[49m\u001B[43mpersist\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 766\u001B[39m \u001B[43m \u001B[49m\u001B[43mauto_complex\u001B[49m\u001B[43m=\u001B[49m\u001B[43mauto_complex\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 767\u001B[39m \u001B[43m \u001B[49m\u001B[43mlock\u001B[49m\u001B[43m=\u001B[49m\u001B[43mlock\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 768\u001B[39m \u001B[43m \u001B[49m\u001B[43mautoclose\u001B[49m\u001B[43m=\u001B[49m\u001B[43mautoclose\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m 769\u001B[39m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 771\u001B[39m store_entrypoint = StoreBackendEntrypoint()\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:525\u001B[39m, in \u001B[36mNetCDF4DataStore.open\u001B[39m\u001B[34m(cls, filename, mode, format, group, clobber, diskless, persist, auto_complex, lock, lock_maker, autoclose)\u001B[39m\n\u001B[32m 522\u001B[39m manager = CachingFileManager(\n\u001B[32m 523\u001B[39m netCDF4.Dataset, filename, mode=mode, kwargs=kwargs\n\u001B[32m 524\u001B[39m )\n\u001B[32m--> \u001B[39m\u001B[32m525\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mcls\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mmanager\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mgroup\u001B[49m\u001B[43m=\u001B[49m\u001B[43mgroup\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmode\u001B[49m\u001B[43m=\u001B[49m\u001B[43mmode\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mlock\u001B[49m\u001B[43m=\u001B[49m\u001B[43mlock\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mautoclose\u001B[49m\u001B[43m=\u001B[49m\u001B[43mautoclose\u001B[49m\u001B[43m)\u001B[49m\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:429\u001B[39m, in \u001B[36mNetCDF4DataStore.__init__\u001B[39m\u001B[34m(self, manager, group, mode, lock, autoclose)\u001B[39m\n\u001B[32m 428\u001B[39m \u001B[38;5;28mself\u001B[39m._mode = mode\n\u001B[32m--> \u001B[39m\u001B[32m429\u001B[39m \u001B[38;5;28mself\u001B[39m.format = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43mds\u001B[49m.data_model\n\u001B[32m 430\u001B[39m \u001B[38;5;28mself\u001B[39m._filename = \u001B[38;5;28mself\u001B[39m.ds.filepath()\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:534\u001B[39m, in \u001B[36mNetCDF4DataStore.ds\u001B[39m\u001B[34m(self)\u001B[39m\n\u001B[32m 532\u001B[39m \u001B[38;5;129m@property\u001B[39m\n\u001B[32m 533\u001B[39m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34mds\u001B[39m(\u001B[38;5;28mself\u001B[39m):\n\u001B[32m--> \u001B[39m\u001B[32m534\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_acquire\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/netCDF4_.py:528\u001B[39m, in \u001B[36mNetCDF4DataStore._acquire\u001B[39m\u001B[34m(self, needs_lock)\u001B[39m\n\u001B[32m 527\u001B[39m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34m_acquire\u001B[39m(\u001B[38;5;28mself\u001B[39m, needs_lock=\u001B[38;5;28;01mTrue\u001B[39;00m):\n\u001B[32m--> \u001B[39m\u001B[32m528\u001B[39m \u001B[43m \u001B[49m\u001B[38;5;28;43;01mwith\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_manager\u001B[49m\u001B[43m.\u001B[49m\u001B[43macquire_context\u001B[49m\u001B[43m(\u001B[49m\u001B[43mneeds_lock\u001B[49m\u001B[43m)\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mas\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mroot\u001B[49m\u001B[43m:\u001B[49m\n\u001B[32m 529\u001B[39m \u001B[43m \u001B[49m\u001B[43mds\u001B[49m\u001B[43m \u001B[49m\u001B[43m=\u001B[49m\u001B[43m \u001B[49m\u001B[43m_nc4_require_group\u001B[49m\u001B[43m(\u001B[49m\u001B[43mroot\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_group\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_mode\u001B[49m\u001B[43m)\u001B[49m\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/.local/share/uv/python/cpython-3.11.11-macos-aarch64-none/lib/python3.11/contextlib.py:137\u001B[39m, in \u001B[36m_GeneratorContextManager.__enter__\u001B[39m\u001B[34m(self)\u001B[39m\n\u001B[32m 136\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m137\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mnext\u001B[39m(\u001B[38;5;28mself\u001B[39m.gen)\n\u001B[32m 138\u001B[39m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mStopIteration\u001B[39;00m:\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/file_manager.py:207\u001B[39m, in \u001B[36mCachingFileManager.acquire_context\u001B[39m\u001B[34m(self, needs_lock)\u001B[39m\n\u001B[32m 206\u001B[39m \u001B[38;5;250m\u001B[39m\u001B[33;03m\"\"\"Context manager for acquiring a file.\"\"\"\u001B[39;00m\n\u001B[32m--> \u001B[39m\u001B[32m207\u001B[39m file, cached = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_acquire_with_cache_info\u001B[49m\u001B[43m(\u001B[49m\u001B[43mneeds_lock\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 208\u001B[39m \u001B[38;5;28;01mtry\u001B[39;00m:\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/xarray/backends/file_manager.py:225\u001B[39m, in \u001B[36mCachingFileManager._acquire_with_cache_info\u001B[39m\u001B[34m(self, needs_lock)\u001B[39m\n\u001B[32m 224\u001B[39m kwargs[\u001B[33m\"\u001B[39m\u001B[33mmode\u001B[39m\u001B[33m\"\u001B[39m] = \u001B[38;5;28mself\u001B[39m._mode\n\u001B[32m--> \u001B[39m\u001B[32m225\u001B[39m file = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_opener\u001B[49m\u001B[43m(\u001B[49m\u001B[43m*\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_args\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 226\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m._mode == \u001B[33m\"\u001B[39m\u001B[33mw\u001B[39m\u001B[33m\"\u001B[39m:\n\u001B[32m 227\u001B[39m \u001B[38;5;66;03m# ensure file doesn't get overridden when opened again\u001B[39;00m\n", - "\u001B[36mFile \u001B[39m\u001B[32msrc/netCDF4/_netCDF4.pyx:2517\u001B[39m, in \u001B[36mnetCDF4._netCDF4.Dataset.__init__\u001B[39m\u001B[34m()\u001B[39m\n", - "\u001B[36mFile \u001B[39m\u001B[32msrc/netCDF4/_netCDF4.pyx:2154\u001B[39m, in \u001B[36mnetCDF4._netCDF4._ensure_nc_success\u001B[39m\u001B[34m()\u001B[39m\n", - "\u001B[31mFileNotFoundError\u001B[39m: [Errno 2] No such file or directory: '/Users/felix/PycharmProjects/flixopt_182303/docs/notebooks/data/district_heating_system.nc4'", - "\nThe above exception was the direct cause of the following exception:\n", - "\u001B[31mOSError\u001B[39m Traceback (most recent call last)", - "\u001B[36mCell\u001B[39m\u001B[36m \u001B[39m\u001B[32mIn[4]\u001B[39m\u001B[32m, line 2\u001B[39m\n\u001B[32m 1\u001B[39m \u001B[38;5;66;03m# Load the district heating system (real data from Zeitreihen2020.csv)\u001B[39;00m\n\u001B[32m----> \u001B[39m\u001B[32m2\u001B[39m flow_system = \u001B[43mfx\u001B[49m\u001B[43m.\u001B[49m\u001B[43mFlowSystem\u001B[49m\u001B[43m.\u001B[49m\u001B[43mfrom_netcdf\u001B[49m\u001B[43m(\u001B[49m\u001B[33;43m'\u001B[39;49m\u001B[33;43mdata/district_heating_system.nc4\u001B[39;49m\u001B[33;43m'\u001B[39;49m\u001B[43m)\u001B[49m\n\u001B[32m 4\u001B[39m timesteps = flow_system.timesteps\n\u001B[32m 5\u001B[39m \u001B[38;5;28mprint\u001B[39m(\u001B[33mf\u001B[39m\u001B[33m'\u001B[39m\u001B[33mLoaded FlowSystem: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mlen\u001B[39m(timesteps)\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m timesteps (\u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mlen\u001B[39m(timesteps)\u001B[38;5;250m \u001B[39m/\u001B[38;5;250m \u001B[39m\u001B[32m96\u001B[39m\u001B[38;5;132;01m:\u001B[39;00m\u001B[33m.0f\u001B[39m\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m days at 15-min resolution)\u001B[39m\u001B[33m'\u001B[39m)\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/flixopt/flow_system.py:771\u001B[39m, in \u001B[36mFlowSystem.from_netcdf\u001B[39m\u001B[34m(cls, path)\u001B[39m\n\u001B[32m 758\u001B[39m \u001B[38;5;250m\u001B[39m\u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m 759\u001B[39m \u001B[33;03mLoad a FlowSystem from a NetCDF file.\u001B[39;00m\n\u001B[32m 760\u001B[39m \n\u001B[32m (...)\u001B[39m\u001B[32m 768\u001B[39m \u001B[33;03m FlowSystem instance with name set from filename\u001B[39;00m\n\u001B[32m 769\u001B[39m \u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m 770\u001B[39m path = pathlib.Path(path)\n\u001B[32m--> \u001B[39m\u001B[32m771\u001B[39m flow_system = \u001B[38;5;28;43msuper\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m.\u001B[49m\u001B[43mfrom_netcdf\u001B[49m\u001B[43m(\u001B[49m\u001B[43mpath\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m 772\u001B[39m \u001B[38;5;66;03m# Derive name from filename (without extension)\u001B[39;00m\n\u001B[32m 773\u001B[39m flow_system.name = path.stem\n", - "\u001B[36mFile \u001B[39m\u001B[32m~/PycharmProjects/flixopt_182303/flixopt/structure.py:978\u001B[39m, in \u001B[36mInterface.from_netcdf\u001B[39m\u001B[34m(cls, path)\u001B[39m\n\u001B[32m 976\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mcls\u001B[39m.from_dataset(ds)\n\u001B[32m 977\u001B[39m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[32m--> \u001B[39m\u001B[32m978\u001B[39m \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mOSError\u001B[39;00m(\u001B[33mf\u001B[39m\u001B[33m'\u001B[39m\u001B[33mFailed to load \u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mcls\u001B[39m.\u001B[34m__name__\u001B[39m\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m from NetCDF file \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mpath\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00me\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m'\u001B[39m) \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34;01me\u001B[39;00m\n", - "\u001B[31mOSError\u001B[39m: Failed to load FlowSystem from NetCDF file data/district_heating_system.nc4: [Errno 2] No such file or directory: '/Users/felix/PycharmProjects/flixopt_182303/docs/notebooks/data/district_heating_system.nc4'" - ] + "output_type": "display_data", + "jetTransient": { + "display_id": null + } } ], - "source": [ - "# Load the district heating system (real data from Zeitreihen2020.csv)\n", - "flow_system = fx.FlowSystem.from_netcdf('data/district_heating_system.nc4')\n", - "\n", - "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", - "print(f'Components: {list(flow_system.components.keys())}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.199645Z", - "start_time": "2025-12-14T14:35:42.948041Z" - } - }, - "outputs": [], - "source": [ - "# Visualize first two weeks of data\n", - "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", - "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", - "\n", - "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", - "\n", - "fig.add_trace(go.Scatter(x=timesteps[:1344], y=heat_demand.values[:1344], name='Heat Demand'), row=1, col=1)\n", - "fig.add_trace(go.Scatter(x=timesteps[:1344], y=electricity_price.values[:1344], name='Electricity Price'), row=2, col=1)\n", - "\n", - "fig.update_layout(height=400, title='First Two Weeks of Data')\n", - "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", - "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", - "fig.show()" - ] + "execution_count": 4 }, { "cell_type": "markdown", @@ -146,15 +4038,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "9", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.206436Z", - "start_time": "2025-12-14T14:35:43.520577Z" + "end_time": "2025-12-14T15:23:09.916554Z", + "start_time": "2025-12-14T15:23:08.633006Z" } }, - "outputs": [], "source": [ "# Cluster with 8 typical days (from 31 days)\n", "fs_clustering_demo = flow_system.copy()\n", @@ -169,7 +4059,207 @@ "\n", "# Plot original vs aggregated data\n", "clustering.plot()" - ] + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original: 2976 timesteps (31 days)\n", + "Clustered: 8 typical days\n", + "Cluster assignments: [np.int32(3), np.int32(7), np.int32(5), np.int32(2), np.int32(1), np.int32(4), np.int32(7), np.int32(7), np.int32(7), np.int32(5), np.int32(2), np.int32(1), np.int32(4), np.int32(7), np.int32(7), np.int32(0), np.int32(5), np.int32(2), np.int32(1), np.int32(4), np.int32(0), np.int32(0), np.int32(0), np.int32(5), np.int32(2), np.int32(1), np.int32(4), np.int32(0), np.int32(0), np.int32(0), np.int32(6)]\n" + ] + }, + { + "data": { + "text/plain": [ + "PlotResult(data= Size: 262kB\n", + "Dimensions: (time: 2976, variable: 5)\n", + "Coordinates:\n", + " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", + " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", + "Data variables:\n", + " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", + " aggregated (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0, figure=Figure({\n", + " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('UbgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('9dJNYhDYG8D10k1iENgbwPXSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'}],\n", + " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", + " 'margin': {'t': 60},\n", + " 'template': '...',\n", + " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", + " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", + " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", + "}))" + ], + "text/html": [ + "
\n", + "
" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 5 }, { "cell_type": "markdown", @@ -179,23 +4269,13 @@ }, { "cell_type": "code", - "execution_count": 16, "id": "11", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.206969Z", - "start_time": "2025-12-14T14:35:46.889873Z" + "end_time": "2025-12-14T15:23:12.938789Z", + "start_time": "2025-12-14T15:23:10.060235Z" } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" - ] - } - ], "source": [ "# Test different numbers of clusters\n", "cluster_configs = [4, 8, 12, 16]\n", @@ -209,56 +4289,27 @@ "# Use heat demand for comparison (most relevant for district heating)\n", "heat_demand_col = [c for c in clustering_results[4].original_data.columns if 'Heat' in c or 'Q_th' in c][0]\n", "print(f'Comparing: {heat_demand_col}')" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "id": "12", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.207122Z", - "start_time": "2025-12-14T14:35:51.433611Z" - } - }, + ], "outputs": [ { - "data": { - "text/html": [ - "
" - ] - }, - "jetTransient": { - "display_id": null - }, - "metadata": {}, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" + ] } ], + "execution_count": 6 + }, + { + "cell_type": "code", + "id": "12", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:13.151753Z", + "start_time": "2025-12-14T15:23:13.034772Z" + } + }, "source": [ "# Compare the aggregated data for each configuration\n", "fig = make_subplots(\n", @@ -310,32 +4361,105 @@ "fig.update_xaxes(title_text='Timestep', row=2)\n", "fig.update_yaxes(title_text='Heat Demand [MW]', col=1)\n", "fig.show()" - ] + ], + "outputs": [ + { + "data": { + "text/html": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + } + ], + "execution_count": 7 }, { "cell_type": "code", - "execution_count": 18, "id": "13", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.211399Z", - "start_time": "2025-12-14T14:35:51.615328Z" + "end_time": "2025-12-14T15:23:13.465042Z", + "start_time": "2025-12-14T15:23:13.306250Z" } }, + "source": [ + "# Calculate error metrics for each configuration\n", + "metrics = []\n", + "for n, clustering in clustering_results.items():\n", + " original = clustering.original_data[heat_demand_col].values\n", + " aggregated = clustering.aggregated_data[heat_demand_col].values\n", + "\n", + " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", + " mae = np.mean(np.abs(original - aggregated))\n", + " max_error = np.max(np.abs(original - aggregated))\n", + " correlation = np.corrcoef(original, aggregated)[0, 1]\n", + "\n", + " metrics.append(\n", + " {\n", + " 'Typical Days': n,\n", + " 'RMSE': rmse,\n", + " 'MAE': mae,\n", + " 'Max Error': max_error,\n", + " 'Correlation': correlation,\n", + " }\n", + " )\n", + "\n", + "metrics_df = pd.DataFrame(metrics).set_index('Typical Days')\n", + "metrics_df.style.format(\n", + " {\n", + " 'RMSE': '{:.2f}',\n", + " 'MAE': '{:.2f}',\n", + " 'Max Error': '{:.2f}',\n", + " 'Correlation': '{:.4f}',\n", + " }\n", + ")" + ], "outputs": [ { "data": { + "text/plain": [ + "" + ], "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -347,77 +4471,43 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 RMSEMAEMax ErrorCorrelationRMSEMAEMax ErrorCorrelation
Typical Days
44.844.5212.190.990544.844.5212.190.9905
83.452.606.890.995283.452.606.890.9952
121.680.836.390.9989121.680.836.390.9989
160.370.251.860.9999160.370.251.860.9999
\n" - ], - "text/plain": [ - "" ] }, - "execution_count": 18, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], - "source": [ - "# Calculate error metrics for each configuration\n", - "metrics = []\n", - "for n, clustering in clustering_results.items():\n", - " original = clustering.original_data[heat_demand_col].values\n", - " aggregated = clustering.aggregated_data[heat_demand_col].values\n", - "\n", - " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", - " mae = np.mean(np.abs(original - aggregated))\n", - " max_error = np.max(np.abs(original - aggregated))\n", - " correlation = np.corrcoef(original, aggregated)[0, 1]\n", - "\n", - " metrics.append(\n", - " {\n", - " 'Typical Days': n,\n", - " 'RMSE': rmse,\n", - " 'MAE': mae,\n", - " 'Max Error': max_error,\n", - " 'Correlation': correlation,\n", - " }\n", - " )\n", - "\n", - "metrics_df = pd.DataFrame(metrics).set_index('Typical Days')\n", - "metrics_df.style.format(\n", - " {\n", - " 'RMSE': '{:.2f}',\n", - " 'MAE': '{:.2f}',\n", - " 'Max Error': '{:.2f}',\n", - " 'Correlation': '{:.4f}',\n", - " }\n", - ")" - ] + "execution_count": 8 }, { "cell_type": "markdown", @@ -427,10 +4517,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "lbpmw6mnb5k", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:14.883574Z", + "start_time": "2025-12-14T15:23:13.565649Z" + } + }, "source": [ "# Segmentation only: reduce 96 timesteps/day to 4 segments/day\n", "fs_segmentation_demo = flow_system.copy()\n", @@ -448,7 +4541,206 @@ "\n", "# Plot original vs segmented data\n", "segmentation.plot()" - ] + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original: 96 timesteps per day (15-min resolution)\n", + "Segmented: 4 segments per day\n" + ] + }, + { + "data": { + "text/plain": [ + "PlotResult(data= Size: 262kB\n", + "Dimensions: (time: 2976, variable: 5)\n", + "Coordinates:\n", + " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", + " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", + "Data variables:\n", + " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", + " aggregated (variable, time) float64 119kB 51.79 51.79 51.79 ... 163.1 163.1, figure=Figure({\n", + " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('ERERERHlSUAREREREeVJQBERERER5U' ... 'mZmZlhVUCZmZmZmWFVQJmZmZmZYVVA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('IUX9G6GMrj8hRf0boYyuPyFF/RuhjK' ... 'Olm8TISEBV46WbxMhIQFXjpZvEyEhA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('zitA7jUX7j/OK0DuNRfuP84rQO41F+' ... 'Olm8RISMBU46WbxEhIwFTjpZvESEjA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('zRP1bwQ9YkDNE/VvBD1iQM0T9W8EPW' ... 'qhRbZkZEDAyqFFtmRkQMDKoUW2ZGRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'}],\n", + " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", + " 'margin': {'t': 60},\n", + " 'template': '...',\n", + " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", + " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", + " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", + "}))" + ], + "text/html": [ + "
\n", + "
" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 9 }, { "cell_type": "markdown", @@ -458,10 +4750,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "do29lhcinx7", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:18.123816Z", + "start_time": "2025-12-14T15:23:15.186456Z" + } + }, "source": [ "# Test different numbers of segments\n", "segment_configs = [4, 8, 12, 24]\n", @@ -475,14 +4770,27 @@ "# Use heat demand for comparison\n", "heat_demand_col = [c for c in segmentation_results[4].original_data.columns if 'Heat' in c or 'Q_th' in c][0]\n", "print(f'Comparing: {heat_demand_col}')" - ] + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" + ] + } + ], + "execution_count": 10 }, { "cell_type": "code", - "execution_count": null, "id": "21athrtuavw", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:18.237902Z", + "start_time": "2025-12-14T15:23:18.213686Z" + } + }, "source": [ "# Compare the segmented data for first day only (clearer visualization)\n", "fig = make_subplots(\n", @@ -537,14 +4845,56 @@ "fig.update_xaxes(title_text='Timestep', row=2)\n", "fig.update_yaxes(title_text='Heat Demand [MW]', col=1)\n", "fig.show()" - ] + ], + "outputs": [ + { + "data": { + "text/html": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data", + "jetTransient": { + "display_id": null + } + } + ], + "execution_count": 11 }, { "cell_type": "code", - "execution_count": null, "id": "phpx36k23p", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:18.572642Z", + "start_time": "2025-12-14T15:23:18.550552Z" + } + }, "source": [ "# Calculate error metrics for segmentation\n", "seg_metrics = []\n", @@ -576,7 +4926,72 @@ " 'Correlation': '{:.4f}',\n", " }\n", ")" - ] + ], + "outputs": [ + { + "data": { + "text/plain": [ + "" + ], + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 RMSEMAEMax ErrorCorrelation
Segments    
415.6712.3443.150.8954
88.246.4935.360.9722
125.894.5323.950.9859
242.732.1211.380.9970
\n" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 12 }, { "cell_type": "markdown", @@ -586,10 +5001,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "j24sbfpl0x", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:19.470745Z", + "start_time": "2025-12-14T15:23:18.603866Z" + } + }, "source": [ "# Combined: 8 typical days × 4 segments each\n", "fs_combined_demo = flow_system.copy()\n", @@ -608,7 +5026,206 @@ "\n", "# Plot the combined result\n", "combined_clustering.plot()" - ] + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original: 2976 timesteps\n", + "Combined: 8 typical days × 4 segments = 32 representative timesteps\n" + ] + }, + { + "data": { + "text/plain": [ + "PlotResult(data= Size: 262kB\n", + "Dimensions: (time: 2976, variable: 5)\n", + "Coordinates:\n", + " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", + " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", + "Data variables:\n", + " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", + " aggregated (variable, time) float64 119kB 51.79 51.79 51.79 ... 163.1 163.1, figure=Figure({\n", + " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('ERERERHlSUAREREREeVJQBERERER5U' ... 'mZmZlhVUCZmZmZmWFVQJmZmZmZYVVA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('IUX9G6GMrj8hRf0boYyuPyFF/RuhjK' ... 'Olm8TISEBV46WbxMhIQFXjpZvEyEhA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('zitA7jUX7j/OK0DuNRfuP84rQO41F+' ... 'Olm8RISMBU46WbxEhIwFTjpZvESEjA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('zRP1bwQ9YkDNE/VvBD1iQM0T9W8EPW' ... 'qhRbZkZEDAyqFFtmRkQMDKoUW2ZGRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'}],\n", + " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", + " 'margin': {'t': 60},\n", + " 'template': '...',\n", + " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", + " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", + " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", + "}))" + ], + "text/html": [ + "
\n", + "
" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "execution_count": 13 }, { "cell_type": "markdown", @@ -618,36 +5235,49 @@ }, { "cell_type": "code", - "execution_count": 19, "id": "15", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.217161Z", - "start_time": "2025-12-14T14:35:51.686260Z" + "end_time": "2025-12-14T15:23:35.046838Z", + "start_time": "2025-12-14T15:23:19.572483Z" } }, + "source": [ + "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", + "\n", + "start = timeit.default_timer()\n", + "fs_full = flow_system.copy()\n", + "fs_full.optimize(solver)\n", + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.2f} seconds')\n", + "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_full.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001B[2m2025-12-14 15:35:52.048\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001B[2m2025-12-14 15:35:52.216\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001b[2m2025-12-14 16:23:19.851\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001b[2m2025-12-14 16:23:19.935\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 64/64 [00:00<00:00, 75.17it/s] \n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 421.23it/s]\n", - "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 368.46it/s]\n" + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 64/64 [00:00<00:00, 160.20it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 1037.01it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 1103.71it/s]\n" ] }, { @@ -655,7 +5285,7 @@ "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-dqtvcofp has 89316 rows; 80386 cols; 264919 nonzeros; 5955 integer variables (5955 binary)\n", + "MIP linopy-problem-56t2tceu has 89316 rows; 80386 cols; 264919 nonzeros; 5955 integer variables (5955 binary)\n", "Coefficient ranges:\n", " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", @@ -680,26 +5310,26 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% -48251946.82856 inf inf 0 0 0 0 0.8s\n", - " R 0 0 0 0.00% 2209206.133553 2278967.860722 3.06% 0 0 0 15439 2.3s\n", - " C 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7380 2937 0 18513 6.5s\n", - " 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7578 2982 0 18623 11.5s\n", - " L 0 0 0 0.00% 2209206.133553 2209206.150262 0.00% 7578 2989 0 18631 23.4s\n", - " 1 0 1 100.00% 2209206.133553 2209206.150262 0.00% 7578 2989 0 21605 23.5s\n", + " 0 0 0 0.00% -48251946.82856 inf inf 0 0 0 0 0.4s\n", + " R 0 0 0 0.00% 2209206.133553 2278967.860722 3.06% 0 0 0 15439 1.1s\n", + " C 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7380 2937 0 18513 3.5s\n", + " 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7578 2988 0 18630 8.6s\n", + " L 0 0 0 0.00% 2209206.133553 2209206.150262 0.00% 7578 2989 0 18631 13.4s\n", + " 1 0 1 100.00% 2209206.133553 2209206.150262 0.00% 7578 2989 0 21605 13.5s\n", "\n", "Solving report\n", - " Model linopy-problem-dqtvcofp\n", + " Model linopy-problem-56t2tceu\n", " Status Optimal\n", " Primal bound 2209206.15026\n", " Dual bound 2209206.13355\n", " Gap 0% (tolerance: 1%)\n", - " P-D integral 0.629336568023\n", + " P-D integral 0.366913523912\n", " Solution status feasible\n", " 2209206.15026 (objective)\n", " 0 (bound viol.)\n", " 0 (int. viol.)\n", " 0 (row viol.)\n", - " Timing 23.46\n", + " Timing 13.45\n", " Max sub-MIP depth 2\n", " Nodes 1\n", " Repair LPs 0\n", @@ -707,7 +5337,7 @@ " 0 (strong br.)\n", " 3192 (separation)\n", " 2974 (heuristics)\n", - "Full optimization: 27.13 seconds\n", + "Full optimization: 15.47 seconds\n", "Cost: 2,209,206 €\n", "\n", "Optimized sizes:\n", @@ -717,20 +5347,7 @@ ] } ], - "source": [ - "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", - "\n", - "start = timeit.default_timer()\n", - "fs_full = flow_system.copy()\n", - "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start\n", - "\n", - "print(f'Full optimization: {time_full:.2f} seconds')\n", - "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_full.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ] + "execution_count": 14 }, { "cell_type": "markdown", @@ -740,36 +5357,54 @@ }, { "cell_type": "code", - "execution_count": 20, "id": "17", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.218001Z", - "start_time": "2025-12-14T14:36:18.969306Z" + "end_time": "2025-12-14T15:23:38.482737Z", + "start_time": "2025-12-14T15:23:35.095109Z" } }, + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# Cluster into 8 typical days\n", + "fs_clustered = flow_system.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + ")\n", + "\n", + "fs_clustered.optimize(solver)\n", + "time_clustered = timeit.default_timer() - start\n", + "\n", + "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", + "print(f'Cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "print(f'Speedup: {time_full / time_clustered:.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_clustered.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001B[2m2025-12-14 15:36:19.954\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001B[2m2025-12-14 15:36:20.127\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001b[2m2025-12-14 16:23:35.770\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001b[2m2025-12-14 16:23:35.843\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 81/81 [00:01<00:00, 65.44it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 808.42it/s]\n", - "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 766.39it/s]\n" + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 81/81 [00:00<00:00, 190.84it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 830.91it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 1081.84it/s]\n" ] }, { @@ -777,7 +5412,7 @@ "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-bhnhp1id has 126461 rows; 80386 cols; 339209 nonzeros; 5955 integer variables (5955 binary)\n", + "MIP linopy-problem-7bbt94cv has 126461 rows; 80386 cols; 339209 nonzeros; 5955 integer variables (5955 binary)\n", "Coefficient ranges:\n", " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", @@ -802,24 +5437,24 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% -35212528.89731 inf inf 0 0 0 0 0.4s\n", - " 0 0 0 0.00% 2215408.582854 inf inf 0 0 0 3609 0.6s\n", - " R 0 0 0 0.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 1.5s\n", - " 1 0 1 100.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 1.5s\n", + " 0 0 0 0.00% -35212528.89731 inf inf 0 0 0 0 0.2s\n", + " 0 0 0 0.00% 2215408.582854 inf inf 0 0 0 3609 0.3s\n", + " R 0 0 0 0.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 0.7s\n", + " 1 0 1 100.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 0.8s\n", "\n", "Solving report\n", - " Model linopy-problem-bhnhp1id\n", + " Model linopy-problem-7bbt94cv\n", " Status Optimal\n", " Primal bound 2215424.33152\n", " Dual bound 2215408.58285\n", " Gap 0.000711% (tolerance: 1%)\n", - " P-D integral 3.61566538396e-08\n", + " P-D integral 3.17706101743e-08\n", " Solution status feasible\n", " 2215424.33152 (objective)\n", " 0 (bound viol.)\n", " 0 (int. viol.)\n", " 0 (row viol.)\n", - " Timing 1.53\n", + " Timing 0.75\n", " Max sub-MIP depth 0\n", " Nodes 1\n", " Repair LPs 0\n", @@ -827,9 +5462,9 @@ " 0 (strong br.)\n", " 769 (separation)\n", " 0 (heuristics)\n", - "Clustered optimization: 6.54 seconds\n", + "Clustered optimization: 3.38 seconds\n", "Cost: 2,215,424 €\n", - "Speedup: 4.1x\n", + "Speedup: 4.6x\n", "\n", "Optimized sizes:\n", " CHP(Q_th): 300.0\n", @@ -838,25 +5473,7 @@ ] } ], - "source": [ - "start = timeit.default_timer()\n", - "\n", - "# Cluster into 8 typical days\n", - "fs_clustered = flow_system.transform.cluster(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - ")\n", - "\n", - "fs_clustered.optimize(solver)\n", - "time_clustered = timeit.default_timer() - start\n", - "\n", - "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", - "print(f'Cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", - "print(f'Speedup: {time_full / time_clustered:.1f}x')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_clustered.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ] + "execution_count": 15 }, { "cell_type": "markdown", @@ -866,10 +5483,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "puisldf6fa", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:41.658320Z", + "start_time": "2025-12-14T15:23:38.516100Z" + } + }, "source": [ "start = timeit.default_timer()\n", "\n", @@ -889,7 +5509,99 @@ "print('\\nOptimized sizes:')\n", "for name, size in fs_segmented.statistics.sizes.items():\n", " print(f' {name}: {float(size.item()):.1f}')" - ] + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[2m2025-12-14 16:23:39.304\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001b[2m2025-12-14 16:23:39.395\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 81/81 [00:00<00:00, 182.90it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 756.45it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 900.41it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "MIP linopy-problem-0bf83fdn has 137800 rows; 80386 cols; 361887 nonzeros; 5955 integer variables (5955 binary)\n", + "Coefficient ranges:\n", + " Matrix [1e-05, 1e+03]\n", + " Cost [1e+00, 1e+00]\n", + " Bound [1e+00, 1e+03]\n", + " RHS [1e+00, 1e+00]\n", + "Presolving model\n", + "41647 rows, 1246 cols, 98274 nonzeros 0s\n", + "29666 rows, 749 cols, 62308 nonzeros 0s\n", + "1125 rows, 534 cols, 2267 nonzeros 0s\n", + "1123 rows, 159 cols, 1027 nonzeros 0s\n", + "501 rows, 159 cols, 1025 nonzeros 0s\n", + "Presolve reductions: rows 501(-137299); columns 159(-80227); nonzeros 1025(-360862) \n", + "\n", + "Solving MIP model with:\n", + " 501 rows\n", + " 159 cols (1 binary, 0 integer, 0 implied int., 158 continuous, 0 domain fixed)\n", + " 1025 nonzeros\n", + "\n", + "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", + " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", + " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", + " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", + "\n", + " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", + "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", + "\n", + " J 0 0 0 0.00% -inf 2426442.894624 Large 0 0 0 0 0.2s\n", + " T 0 0 0 0.00% -144158.139812 2407140.32574 105.99% 0 0 0 18 0.2s\n", + " 1 0 1 100.00% 2407140.32574 2407140.32574 0.00% 0 0 0 18 0.2s\n", + "\n", + "Solving report\n", + " Model linopy-problem-0bf83fdn\n", + " Status Optimal\n", + " Primal bound 2407140.32574\n", + " Dual bound 2407140.32574\n", + " Gap 0% (tolerance: 1%)\n", + " P-D integral 0.00398780947211\n", + " Solution status feasible\n", + " 2407140.32574 (objective)\n", + " 0 (bound viol.)\n", + " 0 (int. viol.)\n", + " 0 (row viol.)\n", + " Timing 0.19\n", + " Max sub-MIP depth 0\n", + " Nodes 1\n", + " Repair LPs 0\n", + " LP iterations 18\n", + " 0 (strong br.)\n", + " 0 (separation)\n", + " 0 (heuristics)\n", + "Segmentation optimization: 3.14 seconds\n", + "Cost: 2,407,140 €\n", + "Speedup: 4.9x\n", + "\n", + "Optimized sizes:\n", + " CHP(Q_th): 248.4\n", + " Boiler(Q_th): 0.0\n", + " Storage: 0.0\n" + ] + } + ], + "execution_count": 16 }, { "cell_type": "markdown", @@ -899,10 +5611,13 @@ }, { "cell_type": "code", - "execution_count": null, "id": "frq1vct5l4v", - "metadata": {}, - "outputs": [], + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:23:45.087307Z", + "start_time": "2025-12-14T15:23:41.695745Z" + } + }, "source": [ "start = timeit.default_timer()\n", "\n", @@ -922,7 +5637,95 @@ "print('\\nOptimized sizes:')\n", "for name, size in fs_combined_opt.statistics.sizes.items():\n", " print(f' {name}: {float(size.item()):.1f}')" - ] + ], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[2m2025-12-14 16:23:42.373\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001b[2m2025-12-14 16:23:42.449\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 98/98 [00:00<00:00, 175.72it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 1011.42it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 1102.60it/s]\n", + "Optimization potentially failed: \n", + "Status: warning\n", + "Termination condition: infeasible\n", + "Solution: 0 primals, 0 duals\n", + "Objective: nan\n", + "Solver model: available\n", + "Solver message: Infeasible\n", + "\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "MIP linopy-problem-mhjwmmv5 has 138973 rows; 80386 cols; 364233 nonzeros; 5955 integer variables (5955 binary)\n", + "Coefficient ranges:\n", + " Matrix [1e-05, 1e+03]\n", + " Cost [1e+00, 1e+00]\n", + " Bound [1e+00, 1e+03]\n", + " RHS [1e+00, 1e+00]\n", + "Presolving model\n", + "Presolve: Infeasible\n", + "\n", + "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", + " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", + " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", + " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", + "\n", + " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", + "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", + "\n", + " 0 0 0 0.00% -inf inf inf 0 0 0 0 0.1s\n", + "\n", + "Solving report\n", + " Model linopy-problem-mhjwmmv5\n", + " Status Infeasible\n", + " Primal bound inf\n", + " Dual bound -inf\n", + " Gap inf\n", + " P-D integral 0\n", + " Solution status -\n", + " Timing 0.12\n", + " Max sub-MIP depth 0\n", + " Nodes 0\n", + " Repair LPs 0\n", + " LP iterations 0\n" + ] + }, + { + "ename": "NotImplementedError", + "evalue": "Computing infeasibilities is only supported for Gurobi and Xpress solvers. Current solver model type: Highs", + "output_type": "error", + "traceback": [ + "\u001b[31m---------------------------------------------------------------------------\u001b[39m", + "\u001b[31mNotImplementedError\u001b[39m Traceback (most recent call last)", + "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[17]\u001b[39m\u001b[32m, line 10\u001b[39m\n\u001b[32m 3\u001b[39m \u001b[38;5;66;03m# Combined: 8 typical days × 4 segments each\u001b[39;00m\n\u001b[32m 4\u001b[39m fs_combined_opt = flow_system.transform.cluster(\n\u001b[32m 5\u001b[39m n_clusters=\u001b[32m8\u001b[39m,\n\u001b[32m 6\u001b[39m cluster_duration=\u001b[33m'\u001b[39m\u001b[33m1D\u001b[39m\u001b[33m'\u001b[39m,\n\u001b[32m 7\u001b[39m n_segments=\u001b[32m4\u001b[39m,\n\u001b[32m 8\u001b[39m )\n\u001b[32m---> \u001b[39m\u001b[32m10\u001b[39m \u001b[43mfs_combined_opt\u001b[49m\u001b[43m.\u001b[49m\u001b[43moptimize\u001b[49m\u001b[43m(\u001b[49m\u001b[43msolver\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 11\u001b[39m time_combined = timeit.default_timer() - start\n\u001b[32m 13\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33mCombined optimization: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtime_combined\u001b[38;5;132;01m:\u001b[39;00m\u001b[33m.2f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m seconds\u001b[39m\u001b[33m'\u001b[39m)\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/PycharmProjects/flixopt_182303/flixopt/optimize_accessor.py:89\u001b[39m, in \u001b[36mOptimizeAccessor.__call__\u001b[39m\u001b[34m(self, solver, normalize_weights)\u001b[39m\n\u001b[32m 57\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 58\u001b[39m \u001b[33;03mBuild and solve the optimization model in one step.\u001b[39;00m\n\u001b[32m 59\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 86\u001b[39m \u001b[33;03m >>> solution = flow_system.optimize(solver).solution\u001b[39;00m\n\u001b[32m 87\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 88\u001b[39m \u001b[38;5;28mself\u001b[39m._fs.build_model(normalize_weights)\n\u001b[32m---> \u001b[39m\u001b[32m89\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_fs\u001b[49m\u001b[43m.\u001b[49m\u001b[43msolve\u001b[49m\u001b[43m(\u001b[49m\u001b[43msolver\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 90\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._fs\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/PycharmProjects/flixopt_182303/flixopt/flow_system.py:1341\u001b[39m, in \u001b[36mFlowSystem.solve\u001b[39m\u001b[34m(self, solver)\u001b[39m\n\u001b[32m 1339\u001b[39m \u001b[38;5;66;03m# Redirect stdout to our buffer\u001b[39;00m\n\u001b[32m 1340\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m redirect_stdout(f):\n\u001b[32m-> \u001b[39m\u001b[32m1341\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m.\u001b[49m\u001b[43mprint_infeasibilities\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1343\u001b[39m infeasibilities = f.getvalue()\n\u001b[32m 1344\u001b[39m logger.error(\u001b[33m'\u001b[39m\u001b[33mSuccessfully extracted infeasibilities: \u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m'\u001b[39m, infeasibilities)\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/linopy/model.py:1470\u001b[39m, in \u001b[36mModel.print_infeasibilities\u001b[39m\u001b[34m(self, display_max_terms)\u001b[39m\n\u001b[32m 1451\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mprint_infeasibilities\u001b[39m(\u001b[38;5;28mself\u001b[39m, display_max_terms: \u001b[38;5;28mint\u001b[39m | \u001b[38;5;28;01mNone\u001b[39;00m = \u001b[38;5;28;01mNone\u001b[39;00m) -> \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 1452\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1453\u001b[39m \u001b[33;03m Print a list of infeasible constraints.\u001b[39;00m\n\u001b[32m 1454\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 1468\u001b[39m \u001b[33;03m infeasible constraints.\u001b[39;00m\n\u001b[32m 1469\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1470\u001b[39m labels = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mcompute_infeasibilities\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1471\u001b[39m \u001b[38;5;28mself\u001b[39m.constraints.print_labels(labels, display_max_terms=display_max_terms)\n", + "\u001b[36mFile \u001b[39m\u001b[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/linopy/model.py:1355\u001b[39m, in \u001b[36mModel.compute_infeasibilities\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 1349\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\n\u001b[32m 1350\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mComputing infeasibilities is not supported for \u001b[39m\u001b[33m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00msolver_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m'\u001b[39m\u001b[33m solver. \u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 1351\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mOnly Gurobi and Xpress solvers support IIS computation.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 1352\u001b[39m )\n\u001b[32m 1353\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1354\u001b[39m \u001b[38;5;66;03m# We have a solver model but it's not a supported type\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1355\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\n\u001b[32m 1356\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mComputing infeasibilities is only supported for Gurobi and Xpress solvers. \u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 1357\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mCurrent solver model type: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mtype\u001b[39m(solver_model).\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\n\u001b[32m 1358\u001b[39m )\n", + "\u001b[31mNotImplementedError\u001b[39m: Computing infeasibilities is only supported for Gurobi and Xpress solvers. Current solver model type: Highs" + ] + } + ], + "execution_count": 17 }, { "cell_type": "markdown", @@ -934,15 +5737,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "19", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.218208Z", - "start_time": "2025-12-14T14:36:25.608382Z" - } - }, - "outputs": [], + "metadata": {}, "source": [ "results = {\n", " 'Full (baseline)': {\n", @@ -992,7 +5788,9 @@ " 'Speedup': '{:.1f}x',\n", " }\n", ")" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", @@ -1007,15 +5805,8 @@ }, { "cell_type": "code", - "execution_count": null, "id": "21", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.218834Z", - "start_time": "2025-12-14T14:36:25.657257Z" - } - }, - "outputs": [], + "metadata": {}, "source": [ "# Load raw data for multi-period example\n", "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", @@ -1068,31 +5859,14 @@ ")\n", "\n", "print(f'Multi-period system: {len(fs_mp.timesteps)} timesteps × {len(fs_mp.periods)} periods')" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": 23, "id": "22", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.219477Z", - "start_time": "2025-12-14T14:36:25.699998Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001B[2m2025-12-14 15:36:25.701\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", - "Clustering was applied to 3 period(s):\n", - " - period=2024\n", - " - period=2025\n", - " - period=2026\n" - ] - } - ], + "metadata": {}, "source": [ "# Cluster - each period gets clustered independently\n", "fs_mp_clustered = fs_mp.transform.cluster(n_clusters=4, cluster_duration='1D')\n", @@ -1102,59 +5876,21 @@ "print(f'Clustering was applied to {len(clustering_info[\"clustering_results\"])} period(s):')\n", "for (period, _scenario), _ in clustering_info['clustering_results'].items():\n", " print(f' - period={period}')" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "code", - "execution_count": 24, "id": "23", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:15:04.219676Z", - "start_time": "2025-12-14T14:36:27.402738Z" - } - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 38/38 [00:00<00:00, 80.29it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 22/22 [00:00<00:00, 398.66it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "LP linopy-problem-u73pgf9e has 49392 rows; 40356 cols; 131016 nonzeros\n", - "Coefficient ranges:\n", - " Matrix [2e-01, 2e+01]\n", - " Cost [1e+00, 1e+00]\n", - " Bound [5e+01, 1e+03]\n", - " RHS [0e+00, 0e+00]\n", - "Presolving model\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "Presolve reductions: rows 0(-49392); columns 0(-40356); nonzeros 0(-131016) - Reduced to empty\n", - "Performed postsolve\n", - "Solving the original LP from the solution after postsolve\n", - "\n", - "Model name : linopy-problem-u73pgf9e\n", - "Model status : Optimal\n", - "Objective value : 1.3352558890e+07\n", - "P-D objective error : 1.7437154695e-15\n", - "HiGHS run time : 0.17\n", - "Multi-period clustered cost: 13,352,559 €\n" - ] - } - ], + "metadata": {}, "source": [ "# Optimize\n", "fs_mp_clustered.optimize(solver)\n", "print(f'Multi-period clustered cost: {fs_mp_clustered.solution[\"costs\"].sum().item():,.0f} €')" - ] + ], + "outputs": [], + "execution_count": null }, { "cell_type": "markdown", From db3e37e5e47885f4cf46375299fa59f6d66b12e4 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 16:31:45 +0100 Subject: [PATCH 017/191] Fix inter-cluster segmentation --- flixopt/clustering.py | 61 +++++++++++++++++++++++++++++++++++++------ 1 file changed, 53 insertions(+), 8 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index da926545a..94e117102 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -236,7 +236,8 @@ def get_cluster_indices(self) -> dict[str, list[np.ndarray]]: clusters = self.tsam.clusterPeriodNoOccur.keys() index_vectors = {cluster: [] for cluster in clusters} - period_length = len(self.tsam.stepIdx) + # Use actual timesteps per period, not segment count + period_length = int(self.hours_per_period / self.hours_per_time_step) total_steps = len(self.tsam.timeSeries) for period, cluster_id in enumerate(self.tsam.clusterOrder): @@ -281,6 +282,48 @@ def get_equation_indices(self, skip_first_index_of_period: bool = True) -> tuple # Convert lists to numpy arrays return np.array(idx_var1), np.array(idx_var2) + def get_segment_equation_indices(self) -> tuple[np.ndarray, np.ndarray]: + """ + Generates pairs of indices for intra-segment equalization. + + When segmentation is enabled, all timesteps within the same segment should have + equal values. This method returns index pairs where each timestep in a segment + is paired with the first timestep of that segment. + + Returns: + tuple[np.ndarray, np.ndarray]: Two arrays of indices. For each pair (i, j), + variable[i] should equal variable[j]. + + Note: + Only generates constraints when n_segments is set. Returns empty arrays otherwise. + """ + if self.n_segments is None: + return np.array([]), np.array([]) + + idx_var1 = [] + idx_var2 = [] + + period_length = int(self.hours_per_period / self.hours_per_time_step) + segment_duration_dict = self.tsam.segmentDurationDict['Segment Duration'] + + for period_idx, cluster_id in enumerate(self.tsam.clusterOrder): + period_offset = period_idx * period_length + start_step = 0 + + for seg_idx in range(self.n_segments): + # Get duration for this (cluster, segment) + duration = segment_duration_dict[(cluster_id, seg_idx)] + + # Equate all timesteps in this segment to the first timestep + first_ts = period_offset + start_step + for step in range(1, duration): + idx_var1.append(first_ts) + idx_var2.append(period_offset + start_step + step) + + start_step += duration + + return np.array(idx_var1), np.array(idx_var2) + def _parse_cluster_duration(duration: str | float) -> float: """Convert cluster duration to hours. @@ -495,11 +538,6 @@ def _equate_indices_multi_dimensional(self, variable: linopy.Variable) -> None: has_scenario = 'scenario' in var_dims for (period_label, scenario_label), clustering in self.clustering_data_dict.items(): - indices = clustering.get_equation_indices(skip_first_index_of_period=True) - - if len(indices[0]) == 0: - continue # No constraints needed for this cluster - # Build selector for this period/scenario combination selector = {} if has_period and period_label is not None: @@ -520,8 +558,15 @@ def _equate_indices_multi_dimensional(self, variable: linopy.Variable) -> None: if scenario_label is not None: dim_suffix += f'_s{scenario_label}' - # Equate indices within this slice - self._equate_indices(var_slice, indices, dim_suffix, variable.name) + # 1. Inter-period clustering constraints (equate timesteps across periods in same cluster) + cluster_indices = clustering.get_equation_indices(skip_first_index_of_period=True) + if len(cluster_indices[0]) > 0: + self._equate_indices(var_slice, cluster_indices, dim_suffix + '_cluster', variable.name) + + # 2. Intra-segment constraints (equate timesteps within same segment) + segment_indices = clustering.get_segment_equation_indices() + if len(segment_indices[0]) > 0: + self._equate_indices(var_slice, segment_indices, dim_suffix + '_segment', variable.name) def _equate_indices( self, From bcf713641b14eddfaafd99b37956883283d185b3 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 16:34:48 +0100 Subject: [PATCH 018/191] Improve notebook to use more segments --- docs/notebooks/08c-clustering.ipynb | 1648 +++++++++++---------------- 1 file changed, 674 insertions(+), 974 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 93cc6a70c..f5c22c5b5 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -16,25 +16,14 @@ }, { "cell_type": "code", + "execution_count": 21, "id": "2", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:07.849032Z", - "start_time": "2025-12-14T15:23:03.981261Z" + "end_time": "2025-12-14T15:34:02.670767Z", + "start_time": "2025-12-14T15:34:02.373234Z" } }, - "source": [ - "import timeit\n", - "\n", - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.graph_objects as go\n", - "from plotly.subplots import make_subplots\n", - "\n", - "import flixopt as fx\n", - "\n", - "fx.CONFIG.notebook()" - ], "outputs": [ { "data": { @@ -42,12 +31,23 @@ "flixopt.config.CONFIG" ] }, - "execution_count": 2, + "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], - "execution_count": 2 + "source": [ + "import timeit\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ] }, { "cell_type": "markdown", @@ -57,41 +57,44 @@ }, { "cell_type": "code", + "execution_count": null, "id": "4", "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:23:08.098050Z", - "start_time": "2025-12-14T15:23:07.872444Z" + "jupyter": { + "is_executing": true } }, - "source": "from pathlib import Path\n\n# Generate example data if not present (for local development)\ndata_file = Path('data/district_heating_system.nc4')\nif not data_file.exists():\n from data.generate_example_systems import create_district_heating_system\n\n fs = create_district_heating_system()\n fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n fs.to_netcdf(data_file, overwrite=True)\n\n# Load the district heating system (real data from Zeitreihen2020.csv)\nflow_system = fx.FlowSystem.from_netcdf(data_file)\n\ntimesteps = flow_system.timesteps\nprint(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\nprint(f'Components: {list(flow_system.components.keys())}')", "outputs": [], - "execution_count": null + "source": [ + "from pathlib import Path\n", + "\n", + "# Generate example data if not present (for local development)\n", + "data_file = Path('data/district_heating_system.nc4')\n", + "if not data_file.exists():\n", + " from data.generate_example_systems import create_district_heating_system\n", + "\n", + " fs = create_district_heating_system()\n", + " fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n", + " fs.to_netcdf(data_file, overwrite=True)\n", + "\n", + "# Load the district heating system (real data from Zeitreihen2020.csv)\n", + "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "\n", + "timesteps = flow_system.timesteps\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Components: {list(flow_system.components.keys())}')" + ] }, { "cell_type": "code", + "execution_count": 3, "id": "5", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:08.202452Z", - "start_time": "2025-12-14T15:23:08.106640Z" + "end_time": "2025-12-14T15:30:51.930035Z", + "start_time": "2025-12-14T15:30:51.835194Z" } }, - "source": [ - "# Visualize first two weeks of data\n", - "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", - "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", - "\n", - "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", - "\n", - "fig.add_trace(go.Scatter(x=timesteps[:1344], y=heat_demand.values[:1344], name='Heat Demand'), row=1, col=1)\n", - "fig.add_trace(go.Scatter(x=timesteps[:1344], y=electricity_price.values[:1344], name='Electricity Price'), row=2, col=1)\n", - "\n", - "fig.update_layout(height=400, title='First Two Weeks of Data')\n", - "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", - "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", - "fig.show()" - ], "outputs": [ { "data": { @@ -196,7 +199,7 @@ "\n", "`).concat($R(e),`\n", "`));var s=new U_({actual:e,expected:t,message:r,operator:i,stackStartFn:n});throw s.generatedMessage=o,s}}Ef.match=function e(t,r,n){T4e(t,r,n,e,\"match\")};Ef.doesNotMatch=function e(t,r,n){T4e(t,r,n,e,\"doesNotMatch\")};function A4e(){for(var e=arguments.length,t=new Array(e),r=0;r{var xE=1e3,bE=xE*60,wE=bE*60,TE=wE*24,FEt=TE*365.25;M4e.exports=function(e,t){t=t||{};var r=typeof e;if(r===\"string\"&&e.length>0)return zEt(e);if(r===\"number\"&&isNaN(e)===!1)return t.long?qEt(e):OEt(e);throw new Error(\"val is not a non-empty string or a valid number. val=\"+JSON.stringify(e))};function zEt(e){if(e=String(e),!(e.length>100)){var t=/^((?:\\d+)?\\.?\\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|years?|yrs?|y)?$/i.exec(e);if(t){var r=parseFloat(t[1]),n=(t[2]||\"ms\").toLowerCase();switch(n){case\"years\":case\"year\":case\"yrs\":case\"yr\":case\"y\":return r*FEt;case\"days\":case\"day\":case\"d\":return r*TE;case\"hours\":case\"hour\":case\"hrs\":case\"hr\":case\"h\":return r*wE;case\"minutes\":case\"minute\":case\"mins\":case\"min\":case\"m\":return r*bE;case\"seconds\":case\"second\":case\"secs\":case\"sec\":case\"s\":return r*xE;case\"milliseconds\":case\"millisecond\":case\"msecs\":case\"msec\":case\"ms\":return r;default:return}}}}function OEt(e){return e>=TE?Math.round(e/TE)+\"d\":e>=wE?Math.round(e/wE)+\"h\":e>=bE?Math.round(e/bE)+\"m\":e>=xE?Math.round(e/xE)+\"s\":e+\"ms\"}function qEt(e){return iD(e,TE,\"day\")||iD(e,wE,\"hour\")||iD(e,bE,\"minute\")||iD(e,xE,\"second\")||e+\" ms\"}function iD(e,t,r){if(!(e{Lc=k4e.exports=nW.debug=nW.default=nW;Lc.coerce=GEt;Lc.disable=UEt;Lc.enable=NEt;Lc.enabled=VEt;Lc.humanize=E4e();Lc.names=[];Lc.skips=[];Lc.formatters={};var iW;function BEt(e){var t=0,r;for(r in e)t=(t<<5)-t+e.charCodeAt(r),t|=0;return Lc.colors[Math.abs(t)%Lc.colors.length]}function nW(e){function t(){if(t.enabled){var r=t,n=+new Date,i=n-(iW||n);r.diff=i,r.prev=iW,r.curr=n,iW=n;for(var a=new Array(arguments.length),o=0;o{lp=P4e.exports=C4e();lp.log=WEt;lp.formatArgs=jEt;lp.save=XEt;lp.load=L4e;lp.useColors=HEt;lp.storage=typeof chrome!=\"undefined\"&&typeof chrome.storage!=\"undefined\"?chrome.storage.local:ZEt();lp.colors=[\"lightseagreen\",\"forestgreen\",\"goldenrod\",\"dodgerblue\",\"darkorchid\",\"crimson\"];function HEt(){return typeof window!=\"undefined\"&&window.process&&window.process.type===\"renderer\"?!0:typeof document!=\"undefined\"&&document.documentElement&&document.documentElement.style&&document.documentElement.style.WebkitAppearance||typeof window!=\"undefined\"&&window.console&&(window.console.firebug||window.console.exception&&window.console.table)||typeof navigator!=\"undefined\"&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/firefox\\/(\\d+)/)&&parseInt(RegExp.$1,10)>=31||typeof navigator!=\"undefined\"&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/applewebkit\\/(\\d+)/)}lp.formatters.j=function(e){try{return JSON.stringify(e)}catch(t){return\"[UnexpectedJSONParseError]: \"+t.message}};function jEt(e){var t=this.useColors;if(e[0]=(t?\"%c\":\"\")+this.namespace+(t?\" %c\":\" \")+e[0]+(t?\"%c \":\" \")+\"+\"+lp.humanize(this.diff),!!t){var r=\"color: \"+this.color;e.splice(1,0,r,\"color: inherit\");var n=0,i=0;e[0].replace(/%[a-zA-Z%]/g,function(a){a!==\"%%\"&&(n++,a===\"%c\"&&(i=n))}),e.splice(i,0,r)}}function WEt(){return typeof console==\"object\"&&console.log&&Function.prototype.apply.call(console.log,console,arguments)}function XEt(e){try{e==null?lp.storage.removeItem(\"debug\"):lp.storage.debug=e}catch(t){}}function L4e(){var e;try{e=lp.storage.debug}catch(t){}return!e&&typeof process!=\"undefined\"&&\"env\"in process&&(e=process.env.DEBUG),e}lp.enable(L4e());function ZEt(){try{return window.localStorage}catch(e){}}});var N4e=ye((_dr,B4e)=>{var _A=sE(),V_=I4e()(\"stream-parser\");B4e.exports=KEt;var D4e=-1,nD=0,YEt=1,F4e=2;function KEt(e){var t=e&&typeof e._transform==\"function\",r=e&&typeof e._write==\"function\";if(!t&&!r)throw new Error(\"must pass a Writable or Transform stream in\");V_(\"extending Parser into stream\"),e._bytes=JEt,e._skipBytes=$Et,t&&(e._passthrough=QEt),t?e._transform=tkt:e._write=ekt}function AE(e){V_(\"initializing parser stream\"),e._parserBytesLeft=0,e._parserBuffers=[],e._parserBuffered=0,e._parserState=D4e,e._parserCallback=null,typeof e.push==\"function\"&&(e._parserOutput=e.push.bind(e)),e._parserInit=!0}function JEt(e,t){_A(!this._parserCallback,'there is already a \"callback\" set!'),_A(isFinite(e)&&e>0,'can only buffer a finite number of bytes > 0, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"buffering %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=nD}function $Et(e,t){_A(!this._parserCallback,'there is already a \"callback\" set!'),_A(e>0,'can only skip > 0 bytes, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"skipping %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=YEt}function QEt(e,t){_A(!this._parserCallback,'There is already a \"callback\" set!'),_A(e>0,'can only pass through > 0 bytes, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"passing through %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=F4e}function ekt(e,t,r){this._parserInit||AE(this),V_(\"write(%o bytes)\",e.length),typeof t==\"function\"&&(r=t),O4e(this,e,null,r)}function tkt(e,t,r){this._parserInit||AE(this),V_(\"transform(%o bytes)\",e.length),typeof t!=\"function\"&&(t=this._parserOutput),O4e(this,e,t,r)}function z4e(e,t,r,n){return e._parserBytesLeft<=0?n(new Error(\"got data but not currently parsing anything\")):t.length<=e._parserBytesLeft?function(){return R4e(e,t,r,n)}:function(){var i=t.slice(0,e._parserBytesLeft);return R4e(e,i,r,function(a){if(a)return n(a);if(t.length>i.length)return function(){return z4e(e,t.slice(i.length),r,n)}})}}function R4e(e,t,r,n){if(e._parserBytesLeft-=t.length,V_(\"%o bytes left for stream piece\",e._parserBytesLeft),e._parserState===nD?(e._parserBuffers.push(t),e._parserBuffered+=t.length):e._parserState===F4e&&r(t),e._parserBytesLeft===0){var i=e._parserCallback;if(i&&e._parserState===nD&&e._parserBuffers.length>1&&(t=Buffer.concat(e._parserBuffers,e._parserBuffered)),e._parserState!==nD&&(t=null),e._parserCallback=null,e._parserBuffered=0,e._parserState=D4e,e._parserBuffers.splice(0),i){var a=[];t&&a.push(t),r&&a.push(r);var o=i.length>a.length;o&&a.push(q4e(n));var s=i.apply(e,a);if(!o||n===s)return n}}else return n}var O4e=q4e(z4e);function q4e(e){return function(){for(var t=e.apply(this,arguments);typeof t==\"function\";)t=t();return t}}});var rc=ye(Hy=>{\"use strict\";var U4e=RSe().Transform,rkt=N4e();function SE(){U4e.call(this,{readableObjectMode:!0})}SE.prototype=Object.create(U4e.prototype);SE.prototype.constructor=SE;rkt(SE.prototype);Hy.ParserStream=SE;Hy.sliceEq=function(e,t,r){for(var n=t,i=0;i{\"use strict\";var xA=rc().readUInt16BE,oW=rc().readUInt32BE;function ME(e,t){if(e.length<4+t)return null;var r=oW(e,t);return e.length>4&15,n=e[4]&15,i=e[5]>>4&15,a=xA(e,6),o=8,s=0;sa.width||i.width===a.width&&i.height>a.height?i:a}),r=e.reduce(function(i,a){return i.height>a.height||i.height===a.height&&i.width>a.width?i:a}),n;return t.width>r.height||t.width===r.height&&t.height>r.width?n=t:n=r,n}oD.exports.readSizeFromMeta=function(e){var t={sizes:[],transforms:[],item_inf:{},item_loc:{}};if(skt(e,t),!!t.sizes.length){var r=lkt(t.sizes),n=1;t.transforms.forEach(function(a){var o={1:6,2:5,3:8,4:7,5:4,6:3,7:2,8:1},s={1:4,2:3,3:2,4:1,5:6,6:5,7:8,8:7};if(a.type===\"imir\"&&(a.value===0?n=s[n]:(n=s[n],n=o[n],n=o[n])),a.type===\"irot\")for(var l=0;l{\"use strict\";function sD(e,t){var r=new Error(e);return r.code=t,r}function ukt(e){try{return decodeURIComponent(escape(e))}catch(t){return e}}function jy(e,t,r){this.input=e.subarray(t,r),this.start=t;var n=String.fromCharCode.apply(null,this.input.subarray(0,4));if(n!==\"II*\\0\"&&n!==\"MM\\0*\")throw sD(\"invalid TIFF signature\",\"EBADDATA\");this.big_endian=n[0]===\"M\"}jy.prototype.each=function(e){this.aborted=!1;var t=this.read_uint32(4);for(this.ifds_to_read=[{id:0,offset:t}];this.ifds_to_read.length>0&&!this.aborted;){var r=this.ifds_to_read.shift();r.offset&&this.scan_ifd(r.id,r.offset,e)}};jy.prototype.read_uint16=function(e){var t=this.input;if(e+2>t.length)throw sD(\"unexpected EOF\",\"EBADDATA\");return this.big_endian?t[e]*256+t[e+1]:t[e]+t[e+1]*256};jy.prototype.read_uint32=function(e){var t=this.input;if(e+4>t.length)throw sD(\"unexpected EOF\",\"EBADDATA\");return this.big_endian?t[e]*16777216+t[e+1]*65536+t[e+2]*256+t[e+3]:t[e]+t[e+1]*256+t[e+2]*65536+t[e+3]*16777216};jy.prototype.is_subifd_link=function(e,t){return e===0&&t===34665||e===0&&t===34853||e===34665&&t===40965};jy.prototype.exif_format_length=function(e){switch(e){case 1:case 2:case 6:case 7:return 1;case 3:case 8:return 2;case 4:case 9:case 11:return 4;case 5:case 10:case 12:return 8;default:return 0}};jy.prototype.exif_format_read=function(e,t){var r;switch(e){case 1:case 2:return r=this.input[t],r;case 6:return r=this.input[t],r|(r&128)*33554430;case 3:return r=this.read_uint16(t),r;case 8:return r=this.read_uint16(t),r|(r&32768)*131070;case 4:return r=this.read_uint32(t),r;case 9:return r=this.read_uint32(t),r|0;case 5:case 10:case 11:case 12:return null;case 7:return null;default:return null}};jy.prototype.scan_ifd=function(e,t,r){var n=this.read_uint16(t);t+=2;for(var i=0;ithis.input.length)throw sD(\"unexpected EOF\",\"EBADDATA\");for(var h=[],d=c,v=0;v0&&(this.ifds_to_read.push({id:a,offset:h[0]}),f=!0);var b={is_big_endian:this.big_endian,ifd:e,tag:a,format:o,count:s,entry_offset:t+this.start,data_length:u,data_offset:c+this.start,value:h,is_subifd_link:f};if(r(b)===!1){this.aborted=!0;return}t+=12}e===0&&this.ifds_to_read.push({id:1,offset:this.read_uint32(t)})};sW.exports.ExifParser=jy;sW.exports.get_orientation=function(e){var t=0;try{return new jy(e,0,e.length).each(function(r){if(r.ifd===0&&r.tag===274&&Array.isArray(r.value))return t=r.value[0],!1}),t}catch(r){return-1}}});var H4e=ye((Tdr,G4e)=>{\"use strict\";var ckt=rc().str2arr,fkt=rc().sliceEq,hkt=rc().readUInt32BE,uD=V4e(),dkt=lD(),vkt=ckt(\"ftyp\");G4e.exports=function(e){if(fkt(e,4,vkt)){var t=uD.unbox(e,0);if(t){var r=uD.getMimeType(t.data);if(r){for(var n,i=t.end;;){var a=uD.unbox(e,i);if(!a)break;if(i=a.end,a.boxtype===\"mdat\")return;if(a.boxtype===\"meta\"){n=a.data;break}}if(n){var o=uD.readSizeFromMeta(n);if(o){var s={width:o.width,height:o.height,type:r.type,mime:r.mime,wUnits:\"px\",hUnits:\"px\"};if(o.variants.length>1&&(s.variants=o.variants),o.orientation&&(s.orientation=o.orientation),o.exif_location&&o.exif_location.offset+o.exif_location.length<=e.length){var l=hkt(e,o.exif_location.offset),u=e.slice(o.exif_location.offset+l+4,o.exif_location.offset+o.exif_location.length),c=dkt.get_orientation(u);c>0&&(s.orientation=c)}return s}}}}}}});var X4e=ye((Adr,W4e)=>{\"use strict\";var pkt=rc().str2arr,gkt=rc().sliceEq,j4e=rc().readUInt16LE,mkt=pkt(\"BM\");W4e.exports=function(e){if(!(e.length<26)&&gkt(e,0,mkt))return{width:j4e(e,18),height:j4e(e,22),type:\"bmp\",mime:\"image/bmp\",wUnits:\"px\",hUnits:\"px\"}}});var $4e=ye((Sdr,J4e)=>{\"use strict\";var K4e=rc().str2arr,Z4e=rc().sliceEq,Y4e=rc().readUInt16LE,ykt=K4e(\"GIF87a\"),_kt=K4e(\"GIF89a\");J4e.exports=function(e){if(!(e.length<10)&&!(!Z4e(e,0,ykt)&&!Z4e(e,0,_kt)))return{width:Y4e(e,6),height:Y4e(e,8),type:\"gif\",mime:\"image/gif\",wUnits:\"px\",hUnits:\"px\"}}});var tEe=ye((Mdr,eEe)=>{\"use strict\";var lW=rc().readUInt16LE,xkt=0,bkt=1,Q4e=16;eEe.exports=function(e){var t=lW(e,0),r=lW(e,2),n=lW(e,4);if(!(t!==xkt||r!==bkt||!n)){for(var i=[],a={width:0,height:0},o=0;oa.width||l>a.height)&&(a=u)}return{width:a.width,height:a.height,variants:i,type:\"ico\",mime:\"image/x-icon\",wUnits:\"px\",hUnits:\"px\"}}}});var iEe=ye((Edr,rEe)=>{\"use strict\";var uW=rc().readUInt16BE,wkt=rc().str2arr,Tkt=rc().sliceEq,Akt=lD(),Skt=wkt(\"Exif\\0\\0\");rEe.exports=function(e){if(!(e.length<2)&&!(e[0]!==255||e[1]!==216||e[2]!==255))for(var t=2;;){for(;;){if(e.length-t<2)return;if(e[t++]===255)break}for(var r=e[t++],n;r===255;)r=e[t++];if(208<=r&&r<=217||r===1)n=0;else if(192<=r&&r<=254){if(e.length-t<2)return;n=uW(e,t)-2,t+=2}else return;if(r===217||r===218)return;var i;if(r===225&&n>=10&&Tkt(e,t,Skt)&&(i=Akt.get_orientation(e.slice(t+6,t+n))),n>=5&&192<=r&&r<=207&&r!==196&&r!==200&&r!==204){if(e.length-t0&&(a.orientation=i),a}t+=n}}});var lEe=ye((kdr,sEe)=>{\"use strict\";var oEe=rc().str2arr,nEe=rc().sliceEq,aEe=rc().readUInt32BE,Mkt=oEe(`\\x89PNG\\r\n", - "\u001a\n", + "\u001A\n", "`),Ekt=oEe(\"IHDR\");sEe.exports=function(e){if(!(e.length<24)&&nEe(e,0,Mkt)&&nEe(e,12,Ekt))return{width:aEe(e,16),height:aEe(e,20),type:\"png\",mime:\"image/png\",wUnits:\"px\",hUnits:\"px\"}}});var fEe=ye((Cdr,cEe)=>{\"use strict\";var kkt=rc().str2arr,Ckt=rc().sliceEq,uEe=rc().readUInt32BE,Lkt=kkt(\"8BPS\\0\u0001\");cEe.exports=function(e){if(!(e.length<22)&&Ckt(e,0,Lkt))return{width:uEe(e,18),height:uEe(e,14),type:\"psd\",mime:\"image/vnd.adobe.photoshop\",wUnits:\"px\",hUnits:\"px\"}}});var vEe=ye((Ldr,dEe)=>{\"use strict\";function Pkt(e){return e===32||e===9||e===13||e===10}function bA(e){return typeof e==\"number\"&&isFinite(e)&&e>0}function Ikt(e){var t=0,r=e.length;for(e[0]===239&&e[1]===187&&e[2]===191&&(t=3);t]*>/,Dkt=/^<([-_.:a-zA-Z0-9]+:)?svg\\s/,Fkt=/[^-]\\bwidth=\"([^%]+?)\"|[^-]\\bwidth='([^%]+?)'/,zkt=/\\bheight=\"([^%]+?)\"|\\bheight='([^%]+?)'/,Okt=/\\bview[bB]ox=\"(.+?)\"|\\bview[bB]ox='(.+?)'/,hEe=/in$|mm$|cm$|pt$|pc$|px$|em$|ex$/;function qkt(e){var t=e.match(Fkt),r=e.match(zkt),n=e.match(Okt);return{width:t&&(t[1]||t[2]),height:r&&(r[1]||r[2]),viewbox:n&&(n[1]||n[2])}}function Um(e){return hEe.test(e)?e.match(hEe)[0]:\"px\"}dEe.exports=function(e){if(Ikt(e)){for(var t=\"\",r=0;r{\"use strict\";var mEe=rc().str2arr,pEe=rc().sliceEq,Bkt=rc().readUInt16LE,Nkt=rc().readUInt16BE,Ukt=rc().readUInt32LE,Vkt=rc().readUInt32BE,Gkt=mEe(\"II*\\0\"),Hkt=mEe(\"MM\\0*\");function cD(e,t,r){return r?Nkt(e,t):Bkt(e,t)}function cW(e,t,r){return r?Vkt(e,t):Ukt(e,t)}function gEe(e,t,r){var n=cD(e,t+2,r),i=cW(e,t+4,r);return i!==1||n!==3&&n!==4?null:n===3?cD(e,t+8,r):cW(e,t+8,r)}yEe.exports=function(e){if(!(e.length<8)&&!(!pEe(e,0,Gkt)&&!pEe(e,0,Hkt))){var t=e[0]===77,r=cW(e,4,t)-8;if(!(r<0)){var n=r+8;if(!(e.length-n<2)){var i=cD(e,n+0,t)*12;if(!(i<=0)&&(n+=2,!(e.length-n{\"use strict\";var wEe=rc().str2arr,xEe=rc().sliceEq,bEe=rc().readUInt16LE,fW=rc().readUInt32LE,jkt=lD(),Wkt=wEe(\"RIFF\"),Xkt=wEe(\"WEBP\");function Zkt(e,t){if(!(e[t+3]!==157||e[t+4]!==1||e[t+5]!==42))return{width:bEe(e,t+6)&16383,height:bEe(e,t+8)&16383,type:\"webp\",mime:\"image/webp\",wUnits:\"px\",hUnits:\"px\"}}function Ykt(e,t){if(e[t]===47){var r=fW(e,t+1);return{width:(r&16383)+1,height:(r>>14&16383)+1,type:\"webp\",mime:\"image/webp\",wUnits:\"px\",hUnits:\"px\"}}}function Kkt(e,t){return{width:(e[t+6]<<16|e[t+5]<<8|e[t+4])+1,height:(e[t+9]<e.length)){for(;t+8=10?r=r||Zkt(e,t+8):a===\"VP8L\"&&o>=9?r=r||Ykt(e,t+8):a===\"VP8X\"&&o>=10?r=r||Kkt(e,t+8):a===\"EXIF\"&&(n=jkt.get_orientation(e.slice(t+8,t+8+o)),t=1/0),t+=8+o}if(r)return n>0&&(r.orientation=n),r}}}});var MEe=ye((Rdr,SEe)=>{\"use strict\";SEe.exports={avif:H4e(),bmp:X4e(),gif:$4e(),ico:tEe(),jpeg:iEe(),png:lEe(),psd:fEe(),svg:vEe(),tiff:_Ee(),webp:AEe()}});var EEe=ye((Ddr,dW)=>{\"use strict\";var hW=MEe();function Jkt(e){for(var t=Object.keys(hW),r=0;r{\"use strict\";var $kt=EEe(),Qkt=Py().IMAGE_URL_PREFIX,eCt=c2().Buffer;kEe.getImageSize=function(e){var t=e.replace(Qkt,\"\"),r=new eCt(t,\"base64\");return $kt(r)}});var IEe=ye((zdr,PEe)=>{\"use strict\";var LEe=Dr(),tCt=ZT(),rCt=Eo(),fD=ho(),iCt=Dr().maxRowLength,nCt=CEe().getImageSize;PEe.exports=function(t,r){var n,i;if(r._hasZ)n=r.z.length,i=iCt(r.z);else if(r._hasSource){var a=nCt(r.source);n=a.height,i=a.width}var o=fD.getFromId(t,r.xaxis||\"x\"),s=fD.getFromId(t,r.yaxis||\"y\"),l=o.d2c(r.x0)-r.dx/2,u=s.d2c(r.y0)-r.dy/2,c,f=[l,l+i*r.dx],h=[u,u+n*r.dy];if(o&&o.type===\"log\")for(c=0;c{\"use strict\";var lCt=Oa(),A2=Dr(),REe=A2.strTranslate,uCt=Wp(),cCt=ZT(),fCt=QV(),hCt=f8().STYLE;DEe.exports=function(t,r,n,i){var a=r.xaxis,o=r.yaxis,s=!t._context._exportedPlot&&fCt();A2.makeTraceGroups(i,n,\"im\").each(function(l){var u=lCt.select(this),c=l[0],f=c.trace,h=(f.zsmooth===\"fast\"||f.zsmooth===!1&&s)&&!f._hasZ&&f._hasSource&&a.type===\"linear\"&&o.type===\"linear\";f._realImage=h;var d=c.z,v=c.x0,_=c.y0,b=c.w,p=c.h,k=f.dx,E=f.dy,S,L,x,C,M,g;for(g=0;S===void 0&&g0;)L=a.c2p(v+g*k),g--;for(g=0;C===void 0&&g0;)M=o.c2p(_+g*E),g--;if(Lj[0];if(re||oe){var _e=S+T/2,Ee=C+z/2;H+=\"transform:\"+REe(_e+\"px\",Ee+\"px\")+\"scale(\"+(re?-1:1)+\",\"+(oe?-1:1)+\")\"+REe(-_e+\"px\",-Ee+\"px\")+\";\"}}Z.attr(\"style\",H);var Ce=new Promise(function(me){if(f._hasZ)me();else if(f._hasSource)if(f._canvas&&f._canvas.el.width===b&&f._canvas.el.height===p&&f._canvas.source===f.source)me();else{var ie=document.createElement(\"canvas\");ie.width=b,ie.height=p;var Se=ie.getContext(\"2d\",{willReadFrequently:!0});f._image=f._image||new Image;var Le=f._image;Le.onload=function(){Se.drawImage(Le,0,0),f._canvas={el:ie,source:f.source},me()},Le.setAttribute(\"src\",f.source)}}).then(function(){var me,ie;if(f._hasZ)ie=G(function(Ae,Fe){var Pe=d[Fe][Ae];return A2.isTypedArray(Pe)&&(Pe=Array.from(Pe)),Pe}),me=ie.toDataURL(\"image/png\");else if(f._hasSource)if(h)me=f.source;else{var Se=f._canvas.el.getContext(\"2d\",{willReadFrequently:!0}),Le=Se.getImageData(0,0,b,p).data;ie=G(function(Ae,Fe){var Pe=4*(Fe*b+Ae);return[Le[Pe],Le[Pe+1],Le[Pe+2],Le[Pe+3]]}),me=ie.toDataURL(\"image/png\")}Z.attr({\"xlink:href\":me,height:z,width:T,x:S,y:C})});t._promises.push(Ce)})}});var OEe=ye((qdr,zEe)=>{\"use strict\";var dCt=Oa();zEe.exports=function(t){dCt.select(t).selectAll(\".im image\").style(\"opacity\",function(r){return r[0].trace.opacity})}});var UEe=ye((Bdr,NEe)=>{\"use strict\";var qEe=vf(),BEe=Dr(),hD=BEe.isArrayOrTypedArray,vCt=ZT();NEe.exports=function(t,r,n){var i=t.cd[0],a=i.trace,o=t.xa,s=t.ya;if(!(qEe.inbox(r-i.x0,r-(i.x0+i.w*a.dx),0)>0||qEe.inbox(n-i.y0,n-(i.y0+i.h*a.dy),0)>0)){var l=Math.floor((r-i.x0)/a.dx),u=Math.floor(Math.abs(n-i.y0)/a.dy),c;if(a._hasZ?c=i.z[u][l]:a._hasSource&&(c=a._canvas.el.getContext(\"2d\",{willReadFrequently:!0}).getImageData(l,u,1,1).data),!!c){var f=i.hi||a.hoverinfo,h;if(f){var d=f.split(\"+\");d.indexOf(\"all\")!==-1&&(d=[\"color\"]),d.indexOf(\"color\")!==-1&&(h=!0)}var v=vCt.colormodel[a.colormodel],_=v.colormodel||a.colormodel,b=_.length,p=a._scaler(c),k=v.suffix,E=[];(a.hovertemplate||h)&&(E.push(\"[\"+[p[0]+k[0],p[1]+k[1],p[2]+k[2]].join(\", \")),b===4&&E.push(\", \"+p[3]+k[3]),E.push(\"]\"),E=E.join(\"\"),t.extraText=_.toUpperCase()+\": \"+E);var S;hD(a.hovertext)&&hD(a.hovertext[u])?S=a.hovertext[u][l]:hD(a.text)&&hD(a.text[u])&&(S=a.text[u][l]);var L=s.c2p(i.y0+(u+.5)*a.dy),x=i.x0+(l+.5)*a.dx,C=i.y0+(u+.5)*a.dy,M=\"[\"+c.slice(0,a.colormodel.length).join(\", \")+\"]\";return[BEe.extendFlat(t,{index:[u,l],x0:o.c2p(i.x0+l*a.dx),x1:o.c2p(i.x0+(l+1)*a.dx),y0:L,y1:L,color:p,xVal:x,xLabelVal:x,yVal:C,yLabelVal:C,zLabelVal:M,text:S,hovertemplateLabels:{zLabel:M,colorLabel:E,\"color[0]Label\":p[0]+k[0],\"color[1]Label\":p[1]+k[1],\"color[2]Label\":p[2]+k[2],\"color[3]Label\":p[3]+k[3]}})]}}}});var GEe=ye((Ndr,VEe)=>{\"use strict\";VEe.exports=function(t,r){return\"xVal\"in r&&(t.x=r.xVal),\"yVal\"in r&&(t.y=r.yVal),r.xa&&(t.xaxis=r.xa),r.ya&&(t.yaxis=r.ya),t.color=r.color,t.colormodel=r.trace.colormodel,t.z||(t.z=r.color),t}});var jEe=ye((Udr,HEe)=>{\"use strict\";HEe.exports={attributes:uH(),supplyDefaults:U3e(),calc:IEe(),plot:FEe(),style:OEe(),hoverPoints:UEe(),eventData:GEe(),moduleType:\"trace\",name:\"image\",basePlotModule:ph(),categories:[\"cartesian\",\"svg\",\"2dMap\",\"noSortingByValue\"],animatable:!1,meta:{}}});var XEe=ye((Vdr,WEe)=>{\"use strict\";WEe.exports=jEe()});var S2=ye((Gdr,YEe)=>{\"use strict\";var pCt=Gl(),gCt=Cc().attributes,mCt=ec(),yCt=Lh(),{hovertemplateAttrs:_Ct,texttemplateAttrs:xCt,templatefallbackAttrs:ZEe}=Ll(),EE=Ao().extendFlat,bCt=Pd().pattern,dD=mCt({editType:\"plot\",arrayOk:!0,colorEditType:\"plot\"});YEe.exports={labels:{valType:\"data_array\",editType:\"calc\"},label0:{valType:\"number\",dflt:0,editType:\"calc\"},dlabel:{valType:\"number\",dflt:1,editType:\"calc\"},values:{valType:\"data_array\",editType:\"calc\"},marker:{colors:{valType:\"data_array\",editType:\"calc\"},line:{color:{valType:\"color\",dflt:yCt.defaultLine,arrayOk:!0,editType:\"style\"},width:{valType:\"number\",min:0,dflt:0,arrayOk:!0,editType:\"style\"},editType:\"calc\"},pattern:bCt,editType:\"calc\"},text:{valType:\"data_array\",editType:\"plot\"},hovertext:{valType:\"string\",dflt:\"\",arrayOk:!0,editType:\"style\"},scalegroup:{valType:\"string\",dflt:\"\",editType:\"calc\"},textinfo:{valType:\"flaglist\",flags:[\"label\",\"text\",\"value\",\"percent\"],extras:[\"none\"],editType:\"calc\"},hoverinfo:EE({},pCt.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"percent\",\"name\"]}),hovertemplate:_Ct({},{keys:[\"label\",\"color\",\"value\",\"percent\",\"text\"]}),hovertemplatefallback:ZEe(),texttemplate:xCt({editType:\"plot\"},{keys:[\"label\",\"color\",\"value\",\"percent\",\"text\"]}),texttemplatefallback:ZEe({editType:\"plot\"}),textposition:{valType:\"enumerated\",values:[\"inside\",\"outside\",\"auto\",\"none\"],dflt:\"auto\",arrayOk:!0,editType:\"plot\"},textfont:EE({},dD,{}),insidetextorientation:{valType:\"enumerated\",values:[\"horizontal\",\"radial\",\"tangential\",\"auto\"],dflt:\"auto\",editType:\"plot\"},insidetextfont:EE({},dD,{}),outsidetextfont:EE({},dD,{}),automargin:{valType:\"boolean\",dflt:!1,editType:\"plot\"},title:{text:{valType:\"string\",dflt:\"\",editType:\"plot\"},font:EE({},dD,{}),position:{valType:\"enumerated\",values:[\"top left\",\"top center\",\"top right\",\"middle center\",\"bottom left\",\"bottom center\",\"bottom right\"],editType:\"plot\"},editType:\"plot\"},domain:gCt({name:\"pie\",trace:!0,editType:\"calc\"}),hole:{valType:\"number\",min:0,max:1,dflt:0,editType:\"calc\"},sort:{valType:\"boolean\",dflt:!0,editType:\"calc\"},direction:{valType:\"enumerated\",values:[\"clockwise\",\"counterclockwise\"],dflt:\"counterclockwise\",editType:\"calc\"},rotation:{valType:\"angle\",dflt:0,editType:\"calc\"},pull:{valType:\"number\",min:0,max:1,dflt:0,arrayOk:!0,editType:\"calc\"}}});var M2=ye((Hdr,$Ee)=>{\"use strict\";var wCt=Eo(),kE=Dr(),TCt=S2(),ACt=Cc().defaults,SCt=r0().handleText,MCt=Dr().coercePattern;function KEe(e,t){var r=kE.isArrayOrTypedArray(e),n=kE.isArrayOrTypedArray(t),i=Math.min(r?e.length:1/0,n?t.length:1/0);if(isFinite(i)||(i=0),i&&n){for(var a,o=0;o0){a=!0;break}}a||(i=0)}return{hasLabels:r,hasValues:n,len:i}}function JEe(e,t,r,n,i){var a=n(\"marker.line.width\");a&&n(\"marker.line.color\",i?void 0:r.paper_bgcolor);var o=n(\"marker.colors\");MCt(n,\"marker.pattern\",o),e.marker&&!t.marker.pattern.fgcolor&&(t.marker.pattern.fgcolor=e.marker.colors),t.marker.pattern.bgcolor||(t.marker.pattern.bgcolor=r.paper_bgcolor)}function ECt(e,t,r,n){function i(k,E){return kE.coerce(e,t,TCt,k,E)}var a=i(\"labels\"),o=i(\"values\"),s=KEe(a,o),l=s.len;if(t._hasLabels=s.hasLabels,t._hasValues=s.hasValues,!t._hasLabels&&t._hasValues&&(i(\"label0\"),i(\"dlabel\")),!l){t.visible=!1;return}t._length=l,JEe(e,t,n,i,!0),i(\"scalegroup\");var u=i(\"text\"),c=i(\"texttemplate\");i(\"texttemplatefallback\");var f;if(c||(f=i(\"textinfo\",kE.isArrayOrTypedArray(u)?\"text+percent\":\"percent\")),i(\"hovertext\"),i(\"hovertemplate\"),i(\"hovertemplatefallback\"),c||f&&f!==\"none\"){var h=i(\"textposition\");SCt(e,t,n,i,h,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1});var d=Array.isArray(h)||h===\"auto\",v=d||h===\"outside\";v&&i(\"automargin\"),(h===\"inside\"||h===\"auto\"||Array.isArray(h))&&i(\"insidetextorientation\")}else f===\"none\"&&i(\"textposition\",\"none\");ACt(t,n,i);var _=i(\"hole\"),b=i(\"title.text\");if(b){var p=i(\"title.position\",_?\"middle center\":\"top center\");!_&&p===\"middle center\"&&(t.title.position=\"top center\"),kE.coerceFont(i,\"title.font\",n.font)}i(\"sort\"),i(\"direction\"),i(\"rotation\"),i(\"pull\")}$Ee.exports={handleLabelsAndValues:KEe,handleMarkerDefaults:JEe,supplyDefaults:ECt}});var vD=ye((jdr,QEe)=>{\"use strict\";QEe.exports={hiddenlabels:{valType:\"data_array\",editType:\"calc\"},piecolorway:{valType:\"colorlist\",editType:\"calc\"},extendpiecolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var tke=ye((Wdr,eke)=>{\"use strict\";var kCt=Dr(),CCt=vD();eke.exports=function(t,r){function n(i,a){return kCt.coerce(t,r,CCt,i,a)}n(\"hiddenlabels\"),n(\"piecolorway\",r.colorway),n(\"extendpiecolors\")}});var wA=ye((Xdr,nke)=>{\"use strict\";var LCt=Eo(),vW=cd(),PCt=ka(),ICt={};function RCt(e,t){var r=[],n=e._fullLayout,i=n.hiddenlabels||[],a=t.labels,o=t.marker.colors||[],s=t.values,l=t._length,u=t._hasValues&&l,c,f;if(t.dlabel)for(a=new Array(l),c=0;c=0});var S=t.type===\"funnelarea\"?_:t.sort;return S&&r.sort(function(L,x){return x.v-L.v}),r[0]&&(r[0].vTotal=v),r}function rke(e){return function(r,n){return!r||(r=vW(r),!r.isValid())?!1:(r=PCt.addOpacity(r,r.getAlpha()),e[n]||(e[n]=r),r)}}function DCt(e,t){var r=(t||{}).type;r||(r=\"pie\");var n=e._fullLayout,i=e.calcdata,a=n[r+\"colorway\"],o=n[\"_\"+r+\"colormap\"];n[\"extend\"+r+\"colors\"]&&(a=ike(a,ICt));for(var s=0,l=0;l{\"use strict\";var FCt=ip().appendArrayMultiPointValues;ake.exports=function(t,r){var n={curveNumber:r.index,pointNumbers:t.pts,data:r._input,fullData:r,label:t.label,color:t.color,value:t.v,percent:t.percent,text:t.text,bbox:t.bbox,v:t.v};return t.pts.length===1&&(n.pointNumber=n.i=t.pts[0]),FCt(n,r,t.pts),r.type===\"funnelarea\"&&(delete n.v,delete n.i),n}});var yD=ye((Ydr,Eke)=>{\"use strict\";var Fp=Oa(),zCt=Mc(),pD=vf(),hke=ka(),Wy=So(),rv=Dr(),OCt=rv.strScale,ske=rv.strTranslate,pW=ru(),dke=bv(),qCt=dke.recordMinTextSize,BCt=dke.clearMinTextSize,vke=e2().TEXTPAD,ns=l_(),gD=oke(),lke=Dr().isValidTextValue;function NCt(e,t){var r=e._context.staticPlot,n=e._fullLayout,i=n._size;BCt(\"pie\",n),mke(t,e),Ake(t,i);var a=rv.makeTraceGroups(n._pielayer,t,\"trace\").each(function(o){var s=Fp.select(this),l=o[0],u=l.trace;YCt(o),s.attr(\"stroke-linejoin\",\"round\"),s.each(function(){var c=Fp.select(this).selectAll(\"g.slice\").data(o);c.enter().append(\"g\").classed(\"slice\",!0),c.exit().remove();var f=[[[],[]],[[],[]]],h=!1;c.each(function(S,L){if(S.hidden){Fp.select(this).selectAll(\"path,g\").remove();return}S.pointNumber=S.i,S.curveNumber=u.index,f[S.pxmid[1]<0?0:1][S.pxmid[0]<0?0:1].push(S);var x=l.cx,C=l.cy,M=Fp.select(this),g=M.selectAll(\"path.surface\").data([S]);if(g.enter().append(\"path\").classed(\"surface\",!0).style({\"pointer-events\":r?\"none\":\"all\"}),M.call(pke,e,o),u.pull){var P=+ns.castOption(u.pull,S.pts)||0;P>0&&(x+=P*S.pxmid[0],C+=P*S.pxmid[1])}S.cxFinal=x,S.cyFinal=C;function T(N,j,re,oe){var _e=oe*(j[0]-N[0]),Ee=oe*(j[1]-N[1]);return\"a\"+oe*l.r+\",\"+oe*l.r+\" 0 \"+S.largeArc+(re?\" 1 \":\" 0 \")+_e+\",\"+Ee}var z=u.hole;if(S.v===l.vTotal){var O=\"M\"+(x+S.px0[0])+\",\"+(C+S.px0[1])+T(S.px0,S.pxmid,!0,1)+T(S.pxmid,S.px0,!0,1)+\"Z\";z?g.attr(\"d\",\"M\"+(x+z*S.px0[0])+\",\"+(C+z*S.px0[1])+T(S.px0,S.pxmid,!1,z)+T(S.pxmid,S.px0,!1,z)+\"Z\"+O):g.attr(\"d\",O)}else{var V=T(S.px0,S.px1,!0,1);if(z){var G=1-z;g.attr(\"d\",\"M\"+(x+z*S.px1[0])+\",\"+(C+z*S.px1[1])+T(S.px1,S.px0,!1,z)+\"l\"+G*S.px0[0]+\",\"+G*S.px0[1]+V+\"Z\")}else g.attr(\"d\",\"M\"+x+\",\"+C+\"l\"+S.px0[0]+\",\"+S.px0[1]+V+\"Z\")}Ske(e,S,l);var Z=ns.castOption(u.textposition,S.pts),H=M.selectAll(\"g.slicetext\").data(S.text&&Z!==\"none\"?[0]:[]);H.enter().append(\"g\").classed(\"slicetext\",!0),H.exit().remove(),H.each(function(){var N=rv.ensureSingle(Fp.select(this),\"text\",\"\",function(ie){ie.attr(\"data-notex\",1)}),j=rv.ensureUniformFontSize(e,Z===\"outside\"?VCt(u,S,n.font):gke(u,S,n.font));N.text(S.text).attr({class:\"slicetext\",transform:\"\",\"text-anchor\":\"middle\"}).call(Wy.font,j).call(pW.convertToTspans,e);var re=Wy.bBox(N.node()),oe;if(Z===\"outside\")oe=fke(re,S);else if(oe=yke(re,S,l),Z===\"auto\"&&oe.scale<1){var _e=rv.ensureUniformFontSize(e,u.outsidetextfont);N.call(Wy.font,_e),re=Wy.bBox(N.node()),oe=fke(re,S)}var Ee=oe.textPosAngle,Ce=Ee===void 0?S.pxmid:mD(l.r,Ee);if(oe.targetX=x+Ce[0]*oe.rCenter+(oe.x||0),oe.targetY=C+Ce[1]*oe.rCenter+(oe.y||0),Mke(oe,re),oe.outside){var me=oe.targetY;S.yLabelMin=me-re.height/2,S.yLabelMid=me,S.yLabelMax=me+re.height/2,S.labelExtraX=0,S.labelExtraY=0,h=!0}oe.fontSize=j.size,qCt(u.type,oe,n),o[L].transform=oe,rv.setTransormAndDisplay(N,oe)})});var d=Fp.select(this).selectAll(\"g.titletext\").data(u.title.text?[0]:[]);if(d.enter().append(\"g\").classed(\"titletext\",!0),d.exit().remove(),d.each(function(){var S=rv.ensureSingle(Fp.select(this),\"text\",\"\",function(C){C.attr(\"data-notex\",1)}),L=u.title.text;u._meta&&(L=rv.templateString(L,u._meta)),S.text(L).attr({class:\"titletext\",transform:\"\",\"text-anchor\":\"middle\"}).call(Wy.font,u.title.font).call(pW.convertToTspans,e);var x;u.title.position===\"middle center\"?x=jCt(l):x=wke(l,i),S.attr(\"transform\",ske(x.x,x.y)+OCt(Math.min(1,x.scale))+ske(x.tx,x.ty))}),h&&XCt(f,u),UCt(c,u),h&&u.automargin){var v=Wy.bBox(s.node()),_=u.domain,b=i.w*(_.x[1]-_.x[0]),p=i.h*(_.y[1]-_.y[0]),k=(.5*b-l.r)/i.w,E=(.5*p-l.r)/i.h;zCt.autoMargin(e,\"pie.\"+u.uid+\".automargin\",{xl:_.x[0]-k,xr:_.x[1]+k,yb:_.y[0]-E,yt:_.y[1]+E,l:Math.max(l.cx-l.r-v.left,0),r:Math.max(v.right-(l.cx+l.r),0),b:Math.max(v.bottom-(l.cy+l.r),0),t:Math.max(l.cy-l.r-v.top,0),pad:5})}})});setTimeout(function(){a.selectAll(\"tspan\").each(function(){var o=Fp.select(this);o.attr(\"dy\")&&o.attr(\"dy\",o.attr(\"dy\"))})},0)}function UCt(e,t){e.each(function(r){var n=Fp.select(this);if(!r.labelExtraX&&!r.labelExtraY){n.select(\"path.textline\").remove();return}var i=n.select(\"g.slicetext text\");r.transform.targetX+=r.labelExtraX,r.transform.targetY+=r.labelExtraY,rv.setTransormAndDisplay(i,r.transform);var a=r.cxFinal+r.pxmid[0],o=r.cyFinal+r.pxmid[1],s=\"M\"+a+\",\"+o,l=(r.yLabelMax-r.yLabelMin)*(r.pxmid[0]<0?-1:1)/4;if(r.labelExtraX){var u=r.labelExtraX*r.pxmid[1]/r.pxmid[0],c=r.yLabelMid+r.labelExtraY-(r.cyFinal+r.pxmid[1]);Math.abs(u)>Math.abs(c)?s+=\"l\"+c*r.pxmid[0]/r.pxmid[1]+\",\"+c+\"H\"+(a+r.labelExtraX+l):s+=\"l\"+r.labelExtraX+\",\"+u+\"v\"+(c-u)+\"h\"+l}else s+=\"V\"+(r.yLabelMid+r.labelExtraY)+\"h\"+l;rv.ensureSingle(n,\"path\",\"textline\").call(hke.stroke,t.outsidetextfont.color).attr({\"stroke-width\":Math.min(2,t.outsidetextfont.size/8),d:s,fill:\"none\"})})}function pke(e,t,r){var n=r[0],i=n.cx,a=n.cy,o=n.trace,s=o.type===\"funnelarea\";\"_hasHoverLabel\"in o||(o._hasHoverLabel=!1),\"_hasHoverEvent\"in o||(o._hasHoverEvent=!1),e.on(\"mouseover\",function(l){var u=t._fullLayout,c=t._fullData[o.index];if(!(t._dragging||u.hovermode===!1)){var f=c.hoverinfo;if(Array.isArray(f)&&(f=pD.castHoverinfo({hoverinfo:[ns.castOption(f,l.pts)],_module:o._module},u,0)),f===\"all\"&&(f=\"label+text+value+percent+name\"),c.hovertemplate||f!==\"none\"&&f!==\"skip\"&&f){var h=l.rInscribed||0,d=i+l.pxmid[0]*(1-h),v=a+l.pxmid[1]*(1-h),_=u.separators,b=[];if(f&&f.indexOf(\"label\")!==-1&&b.push(l.label),l.text=ns.castOption(c.hovertext||c.text,l.pts),f&&f.indexOf(\"text\")!==-1){var p=l.text;rv.isValidTextValue(p)&&b.push(p)}l.value=l.v,l.valueLabel=ns.formatPieValue(l.v,_),f&&f.indexOf(\"value\")!==-1&&b.push(l.valueLabel),l.percent=l.v/n.vTotal,l.percentLabel=ns.formatPiePercent(l.percent,_),f&&f.indexOf(\"percent\")!==-1&&b.push(l.percentLabel);var k=c.hoverlabel,E=k.font,S=[];pD.loneHover({trace:o,x0:d-h*n.r,x1:d+h*n.r,y:v,_x0:s?i+l.TL[0]:d-h*n.r,_x1:s?i+l.TR[0]:d+h*n.r,_y0:s?a+l.TL[1]:v-h*n.r,_y1:s?a+l.BL[1]:v+h*n.r,text:b.join(\"
\"),name:c.hovertemplate||f.indexOf(\"name\")!==-1?c.name:void 0,idealAlign:l.pxmid[0]<0?\"left\":\"right\",color:ns.castOption(k.bgcolor,l.pts)||l.color,borderColor:ns.castOption(k.bordercolor,l.pts),fontFamily:ns.castOption(E.family,l.pts),fontSize:ns.castOption(E.size,l.pts),fontColor:ns.castOption(E.color,l.pts),nameLength:ns.castOption(k.namelength,l.pts),textAlign:ns.castOption(k.align,l.pts),hovertemplate:ns.castOption(c.hovertemplate,l.pts),hovertemplateLabels:l,eventData:[gD(l,c)]},{container:u._hoverlayer.node(),outerContainer:u._paper.node(),gd:t,inOut_bbox:S}),l.bbox=S[0],o._hasHoverLabel=!0}o._hasHoverEvent=!0,t.emit(\"plotly_hover\",{points:[gD(l,c)],event:Fp.event})}}),e.on(\"mouseout\",function(l){var u=t._fullLayout,c=t._fullData[o.index],f=Fp.select(this).datum();o._hasHoverEvent&&(l.originalEvent=Fp.event,t.emit(\"plotly_unhover\",{points:[gD(f,c)],event:Fp.event}),o._hasHoverEvent=!1),o._hasHoverLabel&&(pD.loneUnhover(u._hoverlayer.node()),o._hasHoverLabel=!1)}),e.on(\"click\",function(l){var u=t._fullLayout,c=t._fullData[o.index];t._dragging||u.hovermode===!1||(t._hoverdata=[gD(l,c)],pD.click(t,Fp.event))})}function VCt(e,t,r){var n=ns.castOption(e.outsidetextfont.color,t.pts)||ns.castOption(e.textfont.color,t.pts)||r.color,i=ns.castOption(e.outsidetextfont.family,t.pts)||ns.castOption(e.textfont.family,t.pts)||r.family,a=ns.castOption(e.outsidetextfont.size,t.pts)||ns.castOption(e.textfont.size,t.pts)||r.size,o=ns.castOption(e.outsidetextfont.weight,t.pts)||ns.castOption(e.textfont.weight,t.pts)||r.weight,s=ns.castOption(e.outsidetextfont.style,t.pts)||ns.castOption(e.textfont.style,t.pts)||r.style,l=ns.castOption(e.outsidetextfont.variant,t.pts)||ns.castOption(e.textfont.variant,t.pts)||r.variant,u=ns.castOption(e.outsidetextfont.textcase,t.pts)||ns.castOption(e.textfont.textcase,t.pts)||r.textcase,c=ns.castOption(e.outsidetextfont.lineposition,t.pts)||ns.castOption(e.textfont.lineposition,t.pts)||r.lineposition,f=ns.castOption(e.outsidetextfont.shadow,t.pts)||ns.castOption(e.textfont.shadow,t.pts)||r.shadow;return{color:n,family:i,size:a,weight:o,style:s,variant:l,textcase:u,lineposition:c,shadow:f}}function gke(e,t,r){var n=ns.castOption(e.insidetextfont.color,t.pts);!n&&e._input.textfont&&(n=ns.castOption(e._input.textfont.color,t.pts));var i=ns.castOption(e.insidetextfont.family,t.pts)||ns.castOption(e.textfont.family,t.pts)||r.family,a=ns.castOption(e.insidetextfont.size,t.pts)||ns.castOption(e.textfont.size,t.pts)||r.size,o=ns.castOption(e.insidetextfont.weight,t.pts)||ns.castOption(e.textfont.weight,t.pts)||r.weight,s=ns.castOption(e.insidetextfont.style,t.pts)||ns.castOption(e.textfont.style,t.pts)||r.style,l=ns.castOption(e.insidetextfont.variant,t.pts)||ns.castOption(e.textfont.variant,t.pts)||r.variant,u=ns.castOption(e.insidetextfont.textcase,t.pts)||ns.castOption(e.textfont.textcase,t.pts)||r.textcase,c=ns.castOption(e.insidetextfont.lineposition,t.pts)||ns.castOption(e.textfont.lineposition,t.pts)||r.lineposition,f=ns.castOption(e.insidetextfont.shadow,t.pts)||ns.castOption(e.textfont.shadow,t.pts)||r.shadow;return{color:n||hke.contrast(t.color),family:i,size:a,weight:o,style:s,variant:l,textcase:u,lineposition:c,shadow:f}}function mke(e,t){for(var r,n,i=0;i=-4;k-=2)p(Math.PI*k,\"tan\");for(k=4;k>=-4;k-=2)p(Math.PI*(k+1),\"tan\")}if(f||d){for(k=4;k>=-4;k-=2)p(Math.PI*(k+1.5),\"rad\");for(k=4;k>=-4;k-=2)p(Math.PI*(k+.5),\"rad\")}}if(s||v||f){var E=Math.sqrt(e.width*e.width+e.height*e.height);if(b={scale:i*n*2/E,rCenter:1-i,rotate:0},b.textPosAngle=(t.startangle+t.stopangle)/2,b.scale>=1)return b;_.push(b)}(v||d)&&(b=uke(e,n,o,l,u),b.textPosAngle=(t.startangle+t.stopangle)/2,_.push(b)),(v||h)&&(b=cke(e,n,o,l,u),b.textPosAngle=(t.startangle+t.stopangle)/2,_.push(b));for(var S=0,L=0,x=0;x<_.length;x++){var C=_[x].scale;if(L=1)break}return _[S]}function GCt(e,t){var r=e.startangle,n=e.stopangle;return r>t&&t>n||r0?1:-1)/2,y:a/(1+r*r/(n*n)),outside:!0}}function jCt(e){var t=Math.sqrt(e.titleBox.width*e.titleBox.width+e.titleBox.height*e.titleBox.height);return{x:e.cx,y:e.cy,scale:e.trace.hole*e.r*2/t,tx:0,ty:-e.titleBox.height/2+e.trace.title.font.size}}function wke(e,t){var r=1,n=1,i,a=e.trace,o={x:e.cx,y:e.cy},s={tx:0,ty:0};s.ty+=a.title.font.size,i=Tke(a),a.title.position.indexOf(\"top\")!==-1?(o.y-=(1+i)*e.r,s.ty-=e.titleBox.height):a.title.position.indexOf(\"bottom\")!==-1&&(o.y+=(1+i)*e.r);var l=WCt(e.r,e.trace.aspectratio),u=t.w*(a.domain.x[1]-a.domain.x[0])/2;return a.title.position.indexOf(\"left\")!==-1?(u=u+l,o.x-=(1+i)*l,s.tx+=e.titleBox.width/2):a.title.position.indexOf(\"center\")!==-1?u*=2:a.title.position.indexOf(\"right\")!==-1&&(u=u+l,o.x+=(1+i)*l,s.tx-=e.titleBox.width/2),r=u/e.titleBox.width,n=gW(e,t)/e.titleBox.height,{x:o.x,y:o.y,scale:Math.min(r,n),tx:s.tx,ty:s.ty}}function WCt(e,t){return e/(t===void 0?1:t)}function gW(e,t){var r=e.trace,n=t.h*(r.domain.y[1]-r.domain.y[0]);return Math.min(e.titleBox.height,n/2)}function Tke(e){var t=e.pull;if(!t)return 0;var r;if(rv.isArrayOrTypedArray(t))for(t=0,r=0;rt&&(t=e.pull[r]);return t}function XCt(e,t){var r,n,i,a,o,s,l,u,c,f,h,d,v;function _(E,S){return E.pxmid[1]-S.pxmid[1]}function b(E,S){return S.pxmid[1]-E.pxmid[1]}function p(E,S){S||(S={});var L=S.labelExtraY+(n?S.yLabelMax:S.yLabelMin),x=n?E.yLabelMin:E.yLabelMax,C=n?E.yLabelMax:E.yLabelMin,M=E.cyFinal+o(E.px0[1],E.px1[1]),g=L-x,P,T,z,O,V,G;if(g*l>0&&(E.labelExtraY=g),!!rv.isArrayOrTypedArray(t.pull))for(T=0;T=(ns.castOption(t.pull,z.pts)||0))&&((E.pxmid[1]-z.pxmid[1])*l>0?(O=z.cyFinal+o(z.px0[1],z.px1[1]),g=O-x-E.labelExtraY,g*l>0&&(E.labelExtraY+=g)):(C+E.labelExtraY-M)*l>0&&(P=3*s*Math.abs(T-f.indexOf(E)),V=z.cxFinal+a(z.px0[0],z.px1[0]),G=V+P-(E.cxFinal+E.pxmid[0])-E.labelExtraX,G*s>0&&(E.labelExtraX+=G)))}for(n=0;n<2;n++)for(i=n?_:b,o=n?Math.max:Math.min,l=n?1:-1,r=0;r<2;r++){for(a=r?Math.max:Math.min,s=r?1:-1,u=e[n][r],u.sort(i),c=e[1-n][r],f=c.concat(u),d=[],h=0;h1?(u=r.r,c=u/i.aspectratio):(c=r.r,u=c*i.aspectratio),u*=(1+i.baseratio)/2,l=u*c}o=Math.min(o,l/r.vTotal)}for(n=0;nt.vTotal/2?1:0,u.halfangle=Math.PI*Math.min(u.v/t.vTotal,.5),u.ring=1-n.hole,u.rInscribed=HCt(u,t))}function mD(e,t){return[e*Math.sin(t),-e*Math.cos(t)]}function Ske(e,t,r){var n=e._fullLayout,i=r.trace,a=i.texttemplate,o=i.textinfo;if(!a&&o&&o!==\"none\"){var s=o.split(\"+\"),l=function(S){return s.indexOf(S)!==-1},u=l(\"label\"),c=l(\"text\"),f=l(\"value\"),h=l(\"percent\"),d=n.separators,v;if(v=u?[t.label]:[],c){var _=ns.getFirstFilled(i.text,t.pts);lke(_)&&v.push(_)}f&&v.push(ns.formatPieValue(t.v,d)),h&&v.push(ns.formatPiePercent(t.v/r.vTotal,d)),t.text=v.join(\"
\")}function b(S){return{label:S.label,value:S.v,valueLabel:ns.formatPieValue(S.v,n.separators),percent:S.v/r.vTotal,percentLabel:ns.formatPiePercent(S.v/r.vTotal,n.separators),color:S.color,text:S.text,customdata:rv.castOption(i,S.i,\"customdata\")}}if(a){var p=rv.castOption(i,t.i,\"texttemplate\");if(!p)t.text=\"\";else{var k=b(t),E=ns.getFirstFilled(i.text,t.pts);(lke(E)||E===\"\")&&(k.text=E),t.text=rv.texttemplateString({data:[k,i._meta],fallback:i.texttemplatefallback,labels:k,locale:e._fullLayout._d3locale,template:p})}}}function Mke(e,t){var r=e.rotate*Math.PI/180,n=Math.cos(r),i=Math.sin(r),a=(t.left+t.right)/2,o=(t.top+t.bottom)/2;e.textX=a*n-o*i,e.textY=a*i+o*n,e.noCenter=!0}Eke.exports={plot:NCt,formatSliceLabel:Ske,transformInsideText:yke,determineInsideTextFont:gke,positionTitleOutside:wke,prerenderTitles:mke,layoutAreas:Ake,attachFxHandlers:pke,computeTransform:Mke}});var Lke=ye((Kdr,Cke)=>{\"use strict\";var kke=Oa(),KCt=q3(),JCt=bv().resizeText;Cke.exports=function(t){var r=t._fullLayout._pielayer.selectAll(\".trace\");JCt(t,r,\"pie\"),r.each(function(n){var i=n[0],a=i.trace,o=kke.select(this);o.style({opacity:a.opacity}),o.selectAll(\"path.surface\").each(function(s){kke.select(this).call(KCt,s,a,t)})})}});var Ike=ye(TA=>{\"use strict\";var Pke=Mc();TA.name=\"pie\";TA.plot=function(e,t,r,n){Pke.plotBasePlot(TA.name,e,t,r,n)};TA.clean=function(e,t,r,n){Pke.cleanBasePlot(TA.name,e,t,r,n)}});var Dke=ye(($dr,Rke)=>{\"use strict\";Rke.exports={attributes:S2(),supplyDefaults:M2().supplyDefaults,supplyLayoutDefaults:tke(),layoutAttributes:vD(),calc:wA().calc,crossTraceCalc:wA().crossTraceCalc,plot:yD().plot,style:Lke(),styleOne:q3(),moduleType:\"trace\",name:\"pie\",basePlotModule:Ike(),categories:[\"pie-like\",\"pie\",\"showLegend\"],meta:{}}});var zke=ye((Qdr,Fke)=>{\"use strict\";Fke.exports=Dke()});var qke=ye(AA=>{\"use strict\";var Oke=Mc();AA.name=\"sunburst\";AA.plot=function(e,t,r,n){Oke.plotBasePlot(AA.name,e,t,r,n)};AA.clean=function(e,t,r,n){Oke.cleanBasePlot(AA.name,e,t,r,n)}});var mW=ye((tvr,Bke)=>{\"use strict\";Bke.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:\"linear\",eventDataKeys:[\"currentPath\",\"root\",\"entry\",\"percentRoot\",\"percentEntry\",\"percentParent\"]}});var LE=ye((rvr,Vke)=>{\"use strict\";var $Ct=Gl(),{hovertemplateAttrs:QCt,texttemplateAttrs:e6t,templatefallbackAttrs:Nke}=Ll(),t6t=Tu(),r6t=Cc().attributes,Xy=S2(),Uke=mW(),CE=Ao().extendFlat,i6t=Pd().pattern;Vke.exports={labels:{valType:\"data_array\",editType:\"calc\"},parents:{valType:\"data_array\",editType:\"calc\"},values:{valType:\"data_array\",editType:\"calc\"},branchvalues:{valType:\"enumerated\",values:[\"remainder\",\"total\"],dflt:\"remainder\",editType:\"calc\"},count:{valType:\"flaglist\",flags:[\"branches\",\"leaves\"],dflt:\"leaves\",editType:\"calc\"},level:{valType:\"any\",editType:\"plot\",anim:!0},maxdepth:{valType:\"integer\",editType:\"plot\",dflt:-1},marker:CE({colors:{valType:\"data_array\",editType:\"calc\"},line:{color:CE({},Xy.marker.line.color,{dflt:null}),width:CE({},Xy.marker.line.width,{dflt:1}),editType:\"calc\"},pattern:i6t,editType:\"calc\"},t6t(\"marker\",{colorAttr:\"colors\",anim:!1})),leaf:{opacity:{valType:\"number\",editType:\"style\",min:0,max:1},editType:\"plot\"},text:Xy.text,textinfo:{valType:\"flaglist\",flags:[\"label\",\"text\",\"value\",\"current path\",\"percent root\",\"percent entry\",\"percent parent\"],extras:[\"none\"],editType:\"plot\"},texttemplate:e6t({editType:\"plot\"},{keys:Uke.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:Nke({editType:\"plot\"}),hovertext:Xy.hovertext,hoverinfo:CE({},$Ct.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"name\",\"current path\",\"percent root\",\"percent entry\",\"percent parent\"],dflt:\"label+text+value+name\"}),hovertemplate:QCt({},{keys:Uke.eventDataKeys}),hovertemplatefallback:Nke(),textfont:Xy.textfont,insidetextorientation:Xy.insidetextorientation,insidetextfont:Xy.insidetextfont,outsidetextfont:CE({},Xy.outsidetextfont,{}),rotation:{valType:\"angle\",dflt:0,editType:\"plot\"},sort:Xy.sort,root:{color:{valType:\"color\",editType:\"calc\",dflt:\"rgba(0,0,0,0)\"},editType:\"calc\"},domain:r6t({name:\"sunburst\",trace:!0,editType:\"calc\"})}});var yW=ye((ivr,Gke)=>{\"use strict\";Gke.exports={sunburstcolorway:{valType:\"colorlist\",editType:\"calc\"},extendsunburstcolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var Xke=ye((nvr,Wke)=>{\"use strict\";var Hke=Dr(),n6t=LE(),a6t=Cc().defaults,o6t=r0().handleText,s6t=M2().handleMarkerDefaults,jke=tc(),l6t=jke.hasColorscale,u6t=jke.handleDefaults;Wke.exports=function(t,r,n,i){function a(h,d){return Hke.coerce(t,r,n6t,h,d)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\"),s6t(t,r,i,a);var u=r._hasColorscale=l6t(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;u&&u6t(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}),a(\"leaf.opacity\",u?1:.7);var c=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",Hke.isArrayOrTypedArray(c)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var f=\"auto\";o6t(t,r,i,a,f,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"insidetextorientation\"),a(\"sort\"),a(\"rotation\"),a(\"root.color\"),a6t(r,i,a),r._length=null}});var Yke=ye((avr,Zke)=>{\"use strict\";var c6t=Dr(),f6t=yW();Zke.exports=function(t,r){function n(i,a){return c6t.coerce(t,r,f6t,i,a)}n(\"sunburstcolorway\",r.colorway),n(\"extendsunburstcolors\")}});var PE=ye((_D,Kke)=>{(function(e,t){typeof _D==\"object\"&&typeof Kke!=\"undefined\"?t(_D):(e=e||self,t(e.d3=e.d3||{}))})(_D,function(e){\"use strict\";function t(je,tt){return je.parent===tt.parent?1:2}function r(je){return je.reduce(n,0)/je.length}function n(je,tt){return je+tt.x}function i(je){return 1+je.reduce(a,0)}function a(je,tt){return Math.max(je,tt.y)}function o(je){for(var tt;tt=je.children;)je=tt[0];return je}function s(je){for(var tt;tt=je.children;)je=tt[tt.length-1];return je}function l(){var je=t,tt=1,xt=1,Ie=!1;function xe(ke){var vt,ir=0;ke.eachAfter(function($r){var di=$r.children;di?($r.x=r(di),$r.y=i(di)):($r.x=vt?ir+=je($r,vt):0,$r.y=0,vt=$r)});var ar=o(ke),vr=s(ke),ii=ar.x-je(ar,vr)/2,pi=vr.x+je(vr,ar)/2;return ke.eachAfter(Ie?function($r){$r.x=($r.x-ke.x)*tt,$r.y=(ke.y-$r.y)*xt}:function($r){$r.x=($r.x-ii)/(pi-ii)*tt,$r.y=(1-(ke.y?$r.y/ke.y:1))*xt})}return xe.separation=function(ke){return arguments.length?(je=ke,xe):je},xe.size=function(ke){return arguments.length?(Ie=!1,tt=+ke[0],xt=+ke[1],xe):Ie?null:[tt,xt]},xe.nodeSize=function(ke){return arguments.length?(Ie=!0,tt=+ke[0],xt=+ke[1],xe):Ie?[tt,xt]:null},xe}function u(je){var tt=0,xt=je.children,Ie=xt&&xt.length;if(!Ie)tt=1;else for(;--Ie>=0;)tt+=xt[Ie].value;je.value=tt}function c(){return this.eachAfter(u)}function f(je){var tt=this,xt,Ie=[tt],xe,ke,vt;do for(xt=Ie.reverse(),Ie=[];tt=xt.pop();)if(je(tt),xe=tt.children,xe)for(ke=0,vt=xe.length;ke=0;--xe)xt.push(Ie[xe]);return this}function d(je){for(var tt=this,xt=[tt],Ie=[],xe,ke,vt;tt=xt.pop();)if(Ie.push(tt),xe=tt.children,xe)for(ke=0,vt=xe.length;ke=0;)xt+=Ie[xe].value;tt.value=xt})}function _(je){return this.eachBefore(function(tt){tt.children&&tt.children.sort(je)})}function b(je){for(var tt=this,xt=p(tt,je),Ie=[tt];tt!==xt;)tt=tt.parent,Ie.push(tt);for(var xe=Ie.length;je!==xt;)Ie.splice(xe,0,je),je=je.parent;return Ie}function p(je,tt){if(je===tt)return je;var xt=je.ancestors(),Ie=tt.ancestors(),xe=null;for(je=xt.pop(),tt=Ie.pop();je===tt;)xe=je,je=xt.pop(),tt=Ie.pop();return xe}function k(){for(var je=this,tt=[je];je=je.parent;)tt.push(je);return tt}function E(){var je=[];return this.each(function(tt){je.push(tt)}),je}function S(){var je=[];return this.eachBefore(function(tt){tt.children||je.push(tt)}),je}function L(){var je=this,tt=[];return je.each(function(xt){xt!==je&&tt.push({source:xt.parent,target:xt})}),tt}function x(je,tt){var xt=new T(je),Ie=+je.value&&(xt.value=je.value),xe,ke=[xt],vt,ir,ar,vr;for(tt==null&&(tt=M);xe=ke.pop();)if(Ie&&(xe.value=+xe.data.value),(ir=tt(xe.data))&&(vr=ir.length))for(xe.children=new Array(vr),ar=vr-1;ar>=0;--ar)ke.push(vt=xe.children[ar]=new T(ir[ar])),vt.parent=xe,vt.depth=xe.depth+1;return xt.eachBefore(P)}function C(){return x(this).eachBefore(g)}function M(je){return je.children}function g(je){je.data=je.data.data}function P(je){var tt=0;do je.height=tt;while((je=je.parent)&&je.height<++tt)}function T(je){this.data=je,this.depth=this.height=0,this.parent=null}T.prototype=x.prototype={constructor:T,count:c,each:f,eachAfter:d,eachBefore:h,sum:v,sort:_,path:b,ancestors:k,descendants:E,leaves:S,links:L,copy:C};var z=Array.prototype.slice;function O(je){for(var tt=je.length,xt,Ie;tt;)Ie=Math.random()*tt--|0,xt=je[tt],je[tt]=je[Ie],je[Ie]=xt;return je}function V(je){for(var tt=0,xt=(je=O(z.call(je))).length,Ie=[],xe,ke;tt0&&xt*xt>Ie*Ie+xe*xe}function N(je,tt){for(var xt=0;xtar?(xe=(vr+ar-ke)/(2*vr),ir=Math.sqrt(Math.max(0,ar/vr-xe*xe)),xt.x=je.x-xe*Ie-ir*vt,xt.y=je.y-xe*vt+ir*Ie):(xe=(vr+ke-ar)/(2*vr),ir=Math.sqrt(Math.max(0,ke/vr-xe*xe)),xt.x=tt.x+xe*Ie-ir*vt,xt.y=tt.y+xe*vt+ir*Ie)):(xt.x=tt.x+xt.r,xt.y=tt.y)}function Ce(je,tt){var xt=je.r+tt.r-1e-6,Ie=tt.x-je.x,xe=tt.y-je.y;return xt>0&&xt*xt>Ie*Ie+xe*xe}function me(je){var tt=je._,xt=je.next._,Ie=tt.r+xt.r,xe=(tt.x*xt.r+xt.x*tt.r)/Ie,ke=(tt.y*xt.r+xt.y*tt.r)/Ie;return xe*xe+ke*ke}function ie(je){this._=je,this.next=null,this.previous=null}function Se(je){if(!(xe=je.length))return 0;var tt,xt,Ie,xe,ke,vt,ir,ar,vr,ii,pi;if(tt=je[0],tt.x=0,tt.y=0,!(xe>1))return tt.r;if(xt=je[1],tt.x=-xt.r,xt.x=tt.r,xt.y=0,!(xe>2))return tt.r+xt.r;Ee(xt,tt,Ie=je[2]),tt=new ie(tt),xt=new ie(xt),Ie=new ie(Ie),tt.next=Ie.previous=xt,xt.next=tt.previous=Ie,Ie.next=xt.previous=tt;e:for(ir=3;ir0)throw new Error(\"cycle\");return ir}return xt.id=function(Ie){return arguments.length?(je=Fe(Ie),xt):je},xt.parentId=function(Ie){return arguments.length?(tt=Fe(Ie),xt):tt},xt}function $e(je,tt){return je.parent===tt.parent?1:2}function St(je){var tt=je.children;return tt?tt[0]:je.t}function Qt(je){var tt=je.children;return tt?tt[tt.length-1]:je.t}function Vt(je,tt,xt){var Ie=xt/(tt.i-je.i);tt.c-=Ie,tt.s+=xt,je.c+=Ie,tt.z+=xt,tt.m+=xt}function _t(je){for(var tt=0,xt=0,Ie=je.children,xe=Ie.length,ke;--xe>=0;)ke=Ie[xe],ke.z+=tt,ke.m+=tt,tt+=ke.s+(xt+=ke.c)}function It(je,tt,xt){return je.a.parent===tt.parent?je.a:xt}function mt(je,tt){this._=je,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=tt}mt.prototype=Object.create(T.prototype);function er(je){for(var tt=new mt(je,0),xt,Ie=[tt],xe,ke,vt,ir;xt=Ie.pop();)if(ke=xt._.children)for(xt.children=new Array(ir=ke.length),vt=ir-1;vt>=0;--vt)Ie.push(xe=xt.children[vt]=new mt(ke[vt],vt)),xe.parent=xt;return(tt.parent=new mt(null,0)).children=[tt],tt}function lr(){var je=$e,tt=1,xt=1,Ie=null;function xe(vr){var ii=er(vr);if(ii.eachAfter(ke),ii.parent.m=-ii.z,ii.eachBefore(vt),Ie)vr.eachBefore(ar);else{var pi=vr,$r=vr,di=vr;vr.eachBefore(function(qn){qn.x$r.x&&($r=qn),qn.depth>di.depth&&(di=qn)});var ji=pi===$r?1:je(pi,$r)/2,In=ji-pi.x,wi=tt/($r.x+ji+In),On=xt/(di.depth||1);vr.eachBefore(function(qn){qn.x=(qn.x+In)*wi,qn.y=qn.depth*On})}return vr}function ke(vr){var ii=vr.children,pi=vr.parent.children,$r=vr.i?pi[vr.i-1]:null;if(ii){_t(vr);var di=(ii[0].z+ii[ii.length-1].z)/2;$r?(vr.z=$r.z+je(vr._,$r._),vr.m=vr.z-di):vr.z=di}else $r&&(vr.z=$r.z+je(vr._,$r._));vr.parent.A=ir(vr,$r,vr.parent.A||pi[0])}function vt(vr){vr._.x=vr.z+vr.parent.m,vr.m+=vr.parent.m}function ir(vr,ii,pi){if(ii){for(var $r=vr,di=vr,ji=ii,In=$r.parent.children[0],wi=$r.m,On=di.m,qn=ji.m,Fn=In.m,ra;ji=Qt(ji),$r=St($r),ji&&$r;)In=St(In),di=Qt(di),di.a=vr,ra=ji.z+qn-$r.z-wi+je(ji._,$r._),ra>0&&(Vt(It(ji,vr,pi),vr,ra),wi+=ra,On+=ra),qn+=ji.m,wi+=$r.m,Fn+=In.m,On+=di.m;ji&&!Qt(di)&&(di.t=ji,di.m+=qn-On),$r&&!St(In)&&(In.t=$r,In.m+=wi-Fn,pi=vr)}return pi}function ar(vr){vr.x*=tt,vr.y=vr.depth*xt}return xe.separation=function(vr){return arguments.length?(je=vr,xe):je},xe.size=function(vr){return arguments.length?(Ie=!1,tt=+vr[0],xt=+vr[1],xe):Ie?null:[tt,xt]},xe.nodeSize=function(vr){return arguments.length?(Ie=!0,tt=+vr[0],xt=+vr[1],xe):Ie?[tt,xt]:null},xe}function Tr(je,tt,xt,Ie,xe){for(var ke=je.children,vt,ir=-1,ar=ke.length,vr=je.value&&(xe-xt)/je.value;++irqn&&(qn=vr),Ut=wi*wi*la,Fn=Math.max(qn/Ut,Ut/On),Fn>ra){wi-=vr;break}ra=Fn}vt.push(ar={value:wi,dice:di1?Ie:1)},xt}(Lr);function Vr(){var je=Br,tt=!1,xt=1,Ie=1,xe=[0],ke=Pe,vt=Pe,ir=Pe,ar=Pe,vr=Pe;function ii($r){return $r.x0=$r.y0=0,$r.x1=xt,$r.y1=Ie,$r.eachBefore(pi),xe=[0],tt&&$r.eachBefore(Zt),$r}function pi($r){var di=xe[$r.depth],ji=$r.x0+di,In=$r.y0+di,wi=$r.x1-di,On=$r.y1-di;wi=$r-1){var qn=ke[pi];qn.x0=ji,qn.y0=In,qn.x1=wi,qn.y1=On;return}for(var Fn=vr[pi],ra=di/2+Fn,la=pi+1,Ut=$r-1;la>>1;vr[wt]On-In){var Er=(ji*nr+wi*rr)/di;ii(pi,la,rr,ji,In,Er,On),ii(la,$r,nr,Er,In,wi,On)}else{var Xr=(In*nr+On*rr)/di;ii(pi,la,rr,ji,In,wi,Xr),ii(la,$r,nr,ji,Xr,wi,On)}}}function Ge(je,tt,xt,Ie,xe){(je.depth&1?Tr:st)(je,tt,xt,Ie,xe)}var Je=function je(tt){function xt(Ie,xe,ke,vt,ir){if((ar=Ie._squarify)&&ar.ratio===tt)for(var ar,vr,ii,pi,$r=-1,di,ji=ar.length,In=Ie.value;++$r1?Ie:1)},xt}(Lr);e.cluster=l,e.hierarchy=x,e.pack=ce,e.packEnclose=V,e.packSiblings=Le,e.partition=lt,e.stratify=cr,e.tree=lr,e.treemap=Vr,e.treemapBinary=dt,e.treemapDice=st,e.treemapResquarify=Je,e.treemapSlice=Tr,e.treemapSliceDice=Ge,e.treemapSquarify=Br,Object.defineProperty(e,\"__esModule\",{value:!0})})});var RE=ye(IE=>{\"use strict\";var Jke=PE(),h6t=Eo(),SA=Dr(),d6t=tc().makeColorScaleFuncFromTrace,v6t=wA().makePullColorFn,p6t=wA().generateExtendedColors,g6t=tc().calc,m6t=fs().ALMOST_EQUAL,y6t={},_6t={},x6t={};IE.calc=function(e,t){var r=e._fullLayout,n=t.ids,i=SA.isArrayOrTypedArray(n),a=t.labels,o=t.parents,s=t.values,l=SA.isArrayOrTypedArray(s),u=[],c={},f={},h=function(H,N){c[H]?c[H].push(N):c[H]=[N],f[N]=1},d=function(H){return H||typeof H==\"number\"},v=function(H){return!l||h6t(s[H])&&s[H]>=0},_,b,p;i?(_=Math.min(n.length,o.length),b=function(H){return d(n[H])&&v(H)},p=function(H){return String(n[H])}):(_=Math.min(a.length,o.length),b=function(H){return d(a[H])&&v(H)},p=function(H){return String(a[H])}),l&&(_=Math.min(_,s.length));for(var k=0;k<_;k++)if(b(k)){var E=p(k),S=d(o[k])?String(o[k]):\"\",L={i:k,id:E,pid:S,label:d(a[k])?String(a[k]):\"\"};l&&(L.v=+s[k]),u.push(L),h(S,E)}if(c[\"\"]){if(c[\"\"].length>1){for(var M=SA.randstr(),g=0;g{});function Gm(){}function eCe(){return this.rgb().formatHex()}function k6t(){return this.rgb().formatHex8()}function C6t(){return sCe(this).formatHsl()}function tCe(){return this.rgb().formatRgb()}function j_(e){var t,r;return e=(e+\"\").trim().toLowerCase(),(t=b6t.exec(e))?(r=t[1].length,t=parseInt(t[1],16),r===6?rCe(t):r===3?new _d(t>>8&15|t>>4&240,t>>4&15|t&240,(t&15)<<4|t&15,1):r===8?bD(t>>24&255,t>>16&255,t>>8&255,(t&255)/255):r===4?bD(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|t&240,((t&15)<<4|t&15)/255):null):(t=w6t.exec(e))?new _d(t[1],t[2],t[3],1):(t=T6t.exec(e))?new _d(t[1]*255/100,t[2]*255/100,t[3]*255/100,1):(t=A6t.exec(e))?bD(t[1],t[2],t[3],t[4]):(t=S6t.exec(e))?bD(t[1]*255/100,t[2]*255/100,t[3]*255/100,t[4]):(t=M6t.exec(e))?aCe(t[1],t[2]/100,t[3]/100,1):(t=E6t.exec(e))?aCe(t[1],t[2]/100,t[3]/100,t[4]):Qke.hasOwnProperty(e)?rCe(Qke[e]):e===\"transparent\"?new _d(NaN,NaN,NaN,0):null}function rCe(e){return new _d(e>>16&255,e>>8&255,e&255,1)}function bD(e,t,r,n){return n<=0&&(e=t=r=NaN),new _d(e,t,r,n)}function FE(e){return e instanceof Gm||(e=j_(e)),e?(e=e.rgb(),new _d(e.r,e.g,e.b,e.opacity)):new _d}function EA(e,t,r,n){return arguments.length===1?FE(e):new _d(e,t,r,n==null?1:n)}function _d(e,t,r,n){this.r=+e,this.g=+t,this.b=+r,this.opacity=+n}function iCe(){return`#${E2(this.r)}${E2(this.g)}${E2(this.b)}`}function L6t(){return`#${E2(this.r)}${E2(this.g)}${E2(this.b)}${E2((isNaN(this.opacity)?1:this.opacity)*255)}`}function nCe(){let e=TD(this.opacity);return`${e===1?\"rgb(\":\"rgba(\"}${k2(this.r)}, ${k2(this.g)}, ${k2(this.b)}${e===1?\")\":`, ${e})`}`}function TD(e){return isNaN(e)?1:Math.max(0,Math.min(1,e))}function k2(e){return Math.max(0,Math.min(255,Math.round(e)||0))}function E2(e){return e=k2(e),(e<16?\"0\":\"\")+e.toString(16)}function aCe(e,t,r,n){return n<=0?e=t=r=NaN:r<=0||r>=1?e=t=NaN:t<=0&&(e=NaN),new Xg(e,t,r,n)}function sCe(e){if(e instanceof Xg)return new Xg(e.h,e.s,e.l,e.opacity);if(e instanceof Gm||(e=j_(e)),!e)return new Xg;if(e instanceof Xg)return e;e=e.rgb();var t=e.r/255,r=e.g/255,n=e.b/255,i=Math.min(t,r,n),a=Math.max(t,r,n),o=NaN,s=a-i,l=(a+i)/2;return s?(t===a?o=(r-n)/s+(r0&&l<1?0:o,new Xg(o,s,l,e.opacity)}function zE(e,t,r,n){return arguments.length===1?sCe(e):new Xg(e,t,r,n==null?1:n)}function Xg(e,t,r,n){this.h=+e,this.s=+t,this.l=+r,this.opacity=+n}function oCe(e){return e=(e||0)%360,e<0?e+360:e}function wD(e){return Math.max(0,Math.min(1,e||0))}function _W(e,t,r){return(e<60?t+(r-t)*e/60:e<180?r:e<240?t+(r-t)*(240-e)/60:t)*255}var H_,C2,MA,DE,Vm,b6t,w6t,T6t,A6t,S6t,M6t,E6t,Qke,AD=gu(()=>{xD();H_=.7,C2=1/H_,MA=\"\\\\s*([+-]?\\\\d+)\\\\s*\",DE=\"\\\\s*([+-]?(?:\\\\d*\\\\.)?\\\\d+(?:[eE][+-]?\\\\d+)?)\\\\s*\",Vm=\"\\\\s*([+-]?(?:\\\\d*\\\\.)?\\\\d+(?:[eE][+-]?\\\\d+)?)%\\\\s*\",b6t=/^#([0-9a-f]{3,8})$/,w6t=new RegExp(`^rgb\\\\(${MA},${MA},${MA}\\\\)$`),T6t=new RegExp(`^rgb\\\\(${Vm},${Vm},${Vm}\\\\)$`),A6t=new RegExp(`^rgba\\\\(${MA},${MA},${MA},${DE}\\\\)$`),S6t=new RegExp(`^rgba\\\\(${Vm},${Vm},${Vm},${DE}\\\\)$`),M6t=new RegExp(`^hsl\\\\(${DE},${Vm},${Vm}\\\\)$`),E6t=new RegExp(`^hsla\\\\(${DE},${Vm},${Vm},${DE}\\\\)$`),Qke={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};Zy(Gm,j_,{copy(e){return Object.assign(new this.constructor,this,e)},displayable(){return this.rgb().displayable()},hex:eCe,formatHex:eCe,formatHex8:k6t,formatHsl:C6t,formatRgb:tCe,toString:tCe});Zy(_d,EA,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new _d(this.r*e,this.g*e,this.b*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new _d(this.r*e,this.g*e,this.b*e,this.opacity)},rgb(){return this},clamp(){return new _d(k2(this.r),k2(this.g),k2(this.b),TD(this.opacity))},displayable(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:iCe,formatHex:iCe,formatHex8:L6t,formatRgb:nCe,toString:nCe}));Zy(Xg,zE,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new Xg(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new Xg(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=this.h%360+(this.h<0)*360,t=isNaN(e)||isNaN(this.s)?0:this.s,r=this.l,n=r+(r<.5?r:1-r)*t,i=2*r-n;return new _d(_W(e>=240?e-240:e+120,i,n),_W(e,i,n),_W(e<120?e+240:e-120,i,n),this.opacity)},clamp(){return new Xg(oCe(this.h),wD(this.s),wD(this.l),TD(this.opacity))},displayable(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl(){let e=TD(this.opacity);return`${e===1?\"hsl(\":\"hsla(\"}${oCe(this.h)}, ${wD(this.s)*100}%, ${wD(this.l)*100}%${e===1?\")\":`, ${e})`}`}}))});var SD,MD,xW=gu(()=>{SD=Math.PI/180,MD=180/Math.PI});function dCe(e){if(e instanceof Hm)return new Hm(e.l,e.a,e.b,e.opacity);if(e instanceof Yy)return vCe(e);e instanceof _d||(e=FE(e));var t=AW(e.r),r=AW(e.g),n=AW(e.b),i=bW((.2225045*t+.7168786*r+.0606169*n)/uCe),a,o;return t===r&&r===n?a=o=i:(a=bW((.4360747*t+.3850649*r+.1430804*n)/lCe),o=bW((.0139322*t+.0971045*r+.7141733*n)/cCe)),new Hm(116*i-16,500*(a-i),200*(i-o),e.opacity)}function CA(e,t,r,n){return arguments.length===1?dCe(e):new Hm(e,t,r,n==null?1:n)}function Hm(e,t,r,n){this.l=+e,this.a=+t,this.b=+r,this.opacity=+n}function bW(e){return e>P6t?Math.pow(e,1/3):e/hCe+fCe}function wW(e){return e>kA?e*e*e:hCe*(e-fCe)}function TW(e){return 255*(e<=.0031308?12.92*e:1.055*Math.pow(e,1/2.4)-.055)}function AW(e){return(e/=255)<=.04045?e/12.92:Math.pow((e+.055)/1.055,2.4)}function I6t(e){if(e instanceof Yy)return new Yy(e.h,e.c,e.l,e.opacity);if(e instanceof Hm||(e=dCe(e)),e.a===0&&e.b===0)return new Yy(NaN,0{xD();AD();xW();ED=18,lCe=.96422,uCe=1,cCe=.82521,fCe=4/29,kA=6/29,hCe=3*kA*kA,P6t=kA*kA*kA;Zy(Hm,CA,G_(Gm,{brighter(e){return new Hm(this.l+ED*(e==null?1:e),this.a,this.b,this.opacity)},darker(e){return new Hm(this.l-ED*(e==null?1:e),this.a,this.b,this.opacity)},rgb(){var e=(this.l+16)/116,t=isNaN(this.a)?e:e+this.a/500,r=isNaN(this.b)?e:e-this.b/200;return t=lCe*wW(t),e=uCe*wW(e),r=cCe*wW(r),new _d(TW(3.1338561*t-1.6168667*e-.4906146*r),TW(-.9787684*t+1.9161415*e+.033454*r),TW(.0719453*t-.2289914*e+1.4052427*r),this.opacity)}}));Zy(Yy,OE,G_(Gm,{brighter(e){return new Yy(this.h,this.c,this.l+ED*(e==null?1:e),this.opacity)},darker(e){return new Yy(this.h,this.c,this.l-ED*(e==null?1:e),this.opacity)},rgb(){return vCe(this).rgb()}}))});function R6t(e){if(e instanceof L2)return new L2(e.h,e.s,e.l,e.opacity);e instanceof _d||(e=FE(e));var t=e.r/255,r=e.g/255,n=e.b/255,i=(yCe*n+gCe*t-mCe*r)/(yCe+gCe-mCe),a=n-i,o=(qE*(r-i)-MW*a)/kD,s=Math.sqrt(o*o+a*a)/(qE*i*(1-i)),l=s?Math.atan2(o,a)*MD-120:NaN;return new L2(l<0?l+360:l,s,i,e.opacity)}function LA(e,t,r,n){return arguments.length===1?R6t(e):new L2(e,t,r,n==null?1:n)}function L2(e,t,r,n){this.h=+e,this.s=+t,this.l=+r,this.opacity=+n}var _Ce,SW,MW,kD,qE,gCe,mCe,yCe,xCe=gu(()=>{xD();AD();xW();_Ce=-.14861,SW=1.78277,MW=-.29227,kD=-.90649,qE=1.97294,gCe=qE*kD,mCe=qE*SW,yCe=SW*MW-kD*_Ce;Zy(L2,LA,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new L2(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new L2(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=isNaN(this.h)?0:(this.h+120)*SD,t=+this.l,r=isNaN(this.s)?0:this.s*t*(1-t),n=Math.cos(e),i=Math.sin(e);return new _d(255*(t+r*(_Ce*n+SW*i)),255*(t+r*(MW*n+kD*i)),255*(t+r*(qE*n)),this.opacity)}}))});var P2=gu(()=>{AD();pCe();xCe()});function EW(e,t,r,n,i){var a=e*e,o=a*e;return((1-3*e+3*a-o)*t+(4-6*a+3*o)*r+(1+3*e+3*a-3*o)*n+o*i)/6}function CD(e){var t=e.length-1;return function(r){var n=r<=0?r=0:r>=1?(r=1,t-1):Math.floor(r*t),i=e[n],a=e[n+1],o=n>0?e[n-1]:2*i-a,s=n{});function PD(e){var t=e.length;return function(r){var n=Math.floor(((r%=1)<0?++r:r)*t),i=e[(n+t-1)%t],a=e[n%t],o=e[(n+1)%t],s=e[(n+2)%t];return EW((r-n/t)*t,i,a,o,s)}}var kW=gu(()=>{LD()});var PA,CW=gu(()=>{PA=e=>()=>e});function bCe(e,t){return function(r){return e+r*t}}function D6t(e,t,r){return e=Math.pow(e,r),t=Math.pow(t,r)-e,r=1/r,function(n){return Math.pow(e+n*t,r)}}function W_(e,t){var r=t-e;return r?bCe(e,r>180||r<-180?r-360*Math.round(r/360):r):PA(isNaN(e)?t:e)}function wCe(e){return(e=+e)==1?$f:function(t,r){return r-t?D6t(t,r,e):PA(isNaN(t)?r:t)}}function $f(e,t){var r=t-e;return r?bCe(e,r):PA(isNaN(e)?t:e)}var I2=gu(()=>{CW()});function TCe(e){return function(t){var r=t.length,n=new Array(r),i=new Array(r),a=new Array(r),o,s;for(o=0;o{P2();LD();kW();I2();BE=function e(t){var r=wCe(t);function n(i,a){var o=r((i=EA(i)).r,(a=EA(a)).r),s=r(i.g,a.g),l=r(i.b,a.b),u=$f(i.opacity,a.opacity);return function(c){return i.r=o(c),i.g=s(c),i.b=l(c),i.opacity=u(c),i+\"\"}}return n.gamma=e,n}(1);ACe=TCe(CD),SCe=TCe(PD)});function IA(e,t){t||(t=[]);var r=e?Math.min(t.length,e.length):0,n=t.slice(),i;return function(a){for(i=0;i{});function MCe(e,t){return(ID(t)?IA:PW)(e,t)}function PW(e,t){var r=t?t.length:0,n=e?Math.min(r,e.length):0,i=new Array(n),a=new Array(r),o;for(o=0;o{NE();RD()});function DD(e,t){var r=new Date;return e=+e,t=+t,function(n){return r.setTime(e*(1-n)+t*n),r}}var RW=gu(()=>{});function zp(e,t){return e=+e,t=+t,function(r){return e*(1-r)+t*r}}var UE=gu(()=>{});function FD(e,t){var r={},n={},i;(e===null||typeof e!=\"object\")&&(e={}),(t===null||typeof t!=\"object\")&&(t={});for(i in t)i in e?r[i]=X_(e[i],t[i]):n[i]=t[i];return function(a){for(i in r)n[i]=r[i](a);return n}}var DW=gu(()=>{NE()});function F6t(e){return function(){return e}}function z6t(e){return function(t){return e(t)+\"\"}}function zD(e,t){var r=zW.lastIndex=FW.lastIndex=0,n,i,a,o=-1,s=[],l=[];for(e=e+\"\",t=t+\"\";(n=zW.exec(e))&&(i=FW.exec(t));)(a=i.index)>r&&(a=t.slice(r,a),s[o]?s[o]+=a:s[++o]=a),(n=n[0])===(i=i[0])?s[o]?s[o]+=i:s[++o]=i:(s[++o]=null,l.push({i:o,x:zp(n,i)})),r=FW.lastIndex;return r{UE();zW=/[-+]?(?:\\d+\\.?\\d*|\\.?\\d+)(?:[eE][-+]?\\d+)?/g,FW=new RegExp(zW.source,\"g\")});function X_(e,t){var r=typeof t,n;return t==null||r===\"boolean\"?PA(t):(r===\"number\"?zp:r===\"string\"?(n=j_(t))?(t=n,BE):zD:t instanceof j_?BE:t instanceof Date?DD:ID(t)?IA:Array.isArray(t)?PW:typeof t.valueOf!=\"function\"&&typeof t.toString!=\"function\"||isNaN(t)?FD:zp)(e,t)}var NE=gu(()=>{P2();LW();IW();RW();UE();DW();OW();CW();RD()});function ECe(e){var t=e.length;return function(r){return e[Math.max(0,Math.min(t-1,Math.floor(r*t)))]}}var kCe=gu(()=>{});function CCe(e,t){var r=W_(+e,+t);return function(n){var i=r(n);return i-360*Math.floor(i/360)}}var LCe=gu(()=>{I2()});function PCe(e,t){return e=+e,t=+t,function(r){return Math.round(e*(1-r)+t*r)}}var ICe=gu(()=>{});function qW(e,t,r,n,i,a){var o,s,l;return(o=Math.sqrt(e*e+t*t))&&(e/=o,t/=o),(l=e*r+t*n)&&(r-=e*l,n-=t*l),(s=Math.sqrt(r*r+n*n))&&(r/=s,n/=s,l/=s),e*n{RCe=180/Math.PI,OD={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1}});function FCe(e){let t=new(typeof DOMMatrix==\"function\"?DOMMatrix:WebKitCSSMatrix)(e+\"\");return t.isIdentity?OD:qW(t.a,t.b,t.c,t.d,t.e,t.f)}function zCe(e){return e==null?OD:(qD||(qD=document.createElementNS(\"http://www.w3.org/2000/svg\",\"g\")),qD.setAttribute(\"transform\",e),(e=qD.transform.baseVal.consolidate())?(e=e.matrix,qW(e.a,e.b,e.c,e.d,e.e,e.f)):OD)}var qD,OCe=gu(()=>{DCe()});function qCe(e,t,r,n){function i(u){return u.length?u.pop()+\" \":\"\"}function a(u,c,f,h,d,v){if(u!==f||c!==h){var _=d.push(\"translate(\",null,t,null,r);v.push({i:_-4,x:zp(u,f)},{i:_-2,x:zp(c,h)})}else(f||h)&&d.push(\"translate(\"+f+t+h+r)}function o(u,c,f,h){u!==c?(u-c>180?c+=360:c-u>180&&(u+=360),h.push({i:f.push(i(f)+\"rotate(\",null,n)-2,x:zp(u,c)})):c&&f.push(i(f)+\"rotate(\"+c+n)}function s(u,c,f,h){u!==c?h.push({i:f.push(i(f)+\"skewX(\",null,n)-2,x:zp(u,c)}):c&&f.push(i(f)+\"skewX(\"+c+n)}function l(u,c,f,h,d,v){if(u!==f||c!==h){var _=d.push(i(d)+\"scale(\",null,\",\",null,\")\");v.push({i:_-4,x:zp(u,f)},{i:_-2,x:zp(c,h)})}else(f!==1||h!==1)&&d.push(i(d)+\"scale(\"+f+\",\"+h+\")\")}return function(u,c){var f=[],h=[];return u=e(u),c=e(c),a(u.translateX,u.translateY,c.translateX,c.translateY,f,h),o(u.rotate,c.rotate,f,h),s(u.skewX,c.skewX,f,h),l(u.scaleX,u.scaleY,c.scaleX,c.scaleY,f,h),u=c=null,function(d){for(var v=-1,_=h.length,b;++v<_;)f[(b=h[v]).i]=b.x(d);return f.join(\"\")}}}var BCe,NCe,UCe=gu(()=>{UE();OCe();BCe=qCe(FCe,\"px, \",\"px)\",\"deg)\"),NCe=qCe(zCe,\", \",\")\",\")\")});function VCe(e){return((e=Math.exp(e))+1/e)/2}function q6t(e){return((e=Math.exp(e))-1/e)/2}function B6t(e){return((e=Math.exp(2*e))-1)/(e+1)}var O6t,GCe,HCe=gu(()=>{O6t=1e-12;GCe=function e(t,r,n){function i(a,o){var s=a[0],l=a[1],u=a[2],c=o[0],f=o[1],h=o[2],d=c-s,v=f-l,_=d*d+v*v,b,p;if(_{P2();I2();WCe=jCe(W_),XCe=jCe($f)});function BW(e,t){var r=$f((e=CA(e)).l,(t=CA(t)).l),n=$f(e.a,t.a),i=$f(e.b,t.b),a=$f(e.opacity,t.opacity);return function(o){return e.l=r(o),e.a=n(o),e.b=i(o),e.opacity=a(o),e+\"\"}}var YCe=gu(()=>{P2();I2()});function KCe(e){return function(t,r){var n=e((t=OE(t)).h,(r=OE(r)).h),i=$f(t.c,r.c),a=$f(t.l,r.l),o=$f(t.opacity,r.opacity);return function(s){return t.h=n(s),t.c=i(s),t.l=a(s),t.opacity=o(s),t+\"\"}}}var JCe,$Ce,QCe=gu(()=>{P2();I2();JCe=KCe(W_),$Ce=KCe($f)});function e6e(e){return function t(r){r=+r;function n(i,a){var o=e((i=LA(i)).h,(a=LA(a)).h),s=$f(i.s,a.s),l=$f(i.l,a.l),u=$f(i.opacity,a.opacity);return function(c){return i.h=o(c),i.s=s(c),i.l=l(Math.pow(c,r)),i.opacity=u(c),i+\"\"}}return n.gamma=t,n}(1)}var t6e,r6e,i6e=gu(()=>{P2();I2();t6e=e6e(W_),r6e=e6e($f)});function NW(e,t){t===void 0&&(t=e,e=X_);for(var r=0,n=t.length-1,i=t[0],a=new Array(n<0?0:n);r{NE()});function a6e(e,t){for(var r=new Array(t),n=0;n{});var R2={};uee(R2,{interpolate:()=>X_,interpolateArray:()=>MCe,interpolateBasis:()=>CD,interpolateBasisClosed:()=>PD,interpolateCubehelix:()=>t6e,interpolateCubehelixLong:()=>r6e,interpolateDate:()=>DD,interpolateDiscrete:()=>ECe,interpolateHcl:()=>JCe,interpolateHclLong:()=>$Ce,interpolateHsl:()=>WCe,interpolateHslLong:()=>XCe,interpolateHue:()=>CCe,interpolateLab:()=>BW,interpolateNumber:()=>zp,interpolateNumberArray:()=>IA,interpolateObject:()=>FD,interpolateRgb:()=>BE,interpolateRgbBasis:()=>ACe,interpolateRgbBasisClosed:()=>SCe,interpolateRound:()=>PCe,interpolateString:()=>zD,interpolateTransformCss:()=>BCe,interpolateTransformSvg:()=>NCe,interpolateZoom:()=>GCe,piecewise:()=>NW,quantize:()=>a6e});var D2=gu(()=>{NE();IW();LD();kW();RW();kCe();LCe();UE();RD();DW();ICe();OW();UCe();HCe();LW();ZCe();YCe();QCe();i6e();n6e();o6e()});var BD=ye((Ypr,s6e)=>{\"use strict\";var N6t=So(),U6t=ka();s6e.exports=function(t,r,n,i,a){var o=r.data.data,s=o.i,l=a||o.color;if(s>=0){r.i=o.i;var u=n.marker;u.pattern?(!u.colors||!u.pattern.shape)&&(u.color=l,r.color=l):(u.color=l,r.color=l),N6t.pointStyle(t,n,i,r)}else U6t.fill(t,l)}});var UW=ye((Kpr,h6e)=>{\"use strict\";var l6e=Oa(),u6e=ka(),c6e=Dr(),V6t=bv().resizeText,G6t=BD();function H6t(e){var t=e._fullLayout._sunburstlayer.selectAll(\".trace\");V6t(e,t,\"sunburst\"),t.each(function(r){var n=l6e.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){l6e.select(this).call(f6e,o,a,e)})})}function f6e(e,t,r,n){var i=t.data.data,a=!t.children,o=i.i,s=c6e.castOption(r,o,\"marker.line.color\")||u6e.defaultLine,l=c6e.castOption(r,o,\"marker.line.width\")||0;e.call(G6t,t,r,n).style(\"stroke-width\",l).call(u6e.stroke,s).style(\"opacity\",a?r.leaf.opacity:null)}h6e.exports={style:H6t,styleOne:f6e}});var Ky=ye(Bs=>{\"use strict\";var F2=Dr(),j6t=ka(),W6t=Ag(),d6e=l_();Bs.findEntryWithLevel=function(e,t){var r;return t&&e.eachAfter(function(n){if(Bs.getPtId(n)===t)return r=n.copy()}),r||e};Bs.findEntryWithChild=function(e,t){var r;return e.eachAfter(function(n){for(var i=n.children||[],a=0;a0)};Bs.getMaxDepth=function(e){return e.maxdepth>=0?e.maxdepth:1/0};Bs.isHeader=function(e,t){return!(Bs.isLeaf(e)||e.depth===t._maxDepth-1)};function v6e(e){return e.data.data.pid}Bs.getParent=function(e,t){return Bs.findEntryWithLevel(e,v6e(t))};Bs.listPath=function(e,t){var r=e.parent;if(!r)return[];var n=t?[r.data[t]]:[r];return Bs.listPath(r,t).concat(n)};Bs.getPath=function(e){return Bs.listPath(e,\"label\").join(\"/\")+\"/\"};Bs.formatValue=d6e.formatPieValue;Bs.formatPercent=function(e,t){var r=F2.formatPercent(e,0);return r===\"0%\"&&(r=d6e.formatPiePercent(e,t)),r}});var HE=ye(($pr,m6e)=>{\"use strict\";var RA=Oa(),p6e=qa(),Y6t=ip().appendArrayPointValue,VE=vf(),g6e=Dr(),K6t=y3(),rd=Ky(),J6t=l_(),$6t=J6t.formatPieValue;m6e.exports=function(t,r,n,i,a){var o=i[0],s=o.trace,l=o.hierarchy,u=s.type===\"sunburst\",c=s.type===\"treemap\"||s.type===\"icicle\";\"_hasHoverLabel\"in s||(s._hasHoverLabel=!1),\"_hasHoverEvent\"in s||(s._hasHoverEvent=!1);var f=function(v){var _=n._fullLayout;if(!(n._dragging||_.hovermode===!1)){var b=n._fullData[s.index],p=v.data.data,k=p.i,E=rd.isHierarchyRoot(v),S=rd.getParent(l,v),L=rd.getValue(v),x=function(Ee){return g6e.castOption(b,k,Ee)},C=x(\"hovertemplate\"),M=VE.castHoverinfo(b,_,k),g=_.separators,P;if(C||M&&M!==\"none\"&&M!==\"skip\"){var T,z;u&&(T=o.cx+v.pxmid[0]*(1-v.rInscribed),z=o.cy+v.pxmid[1]*(1-v.rInscribed)),c&&(T=v._hoverX,z=v._hoverY);var O={},V=[],G=[],Z=function(Ee){return V.indexOf(Ee)!==-1};M&&(V=M===\"all\"?b._module.attributes.hoverinfo.flags:M.split(\"+\")),O.label=p.label,Z(\"label\")&&O.label&&G.push(O.label),p.hasOwnProperty(\"v\")&&(O.value=p.v,O.valueLabel=$6t(O.value,g),Z(\"value\")&&G.push(O.valueLabel)),O.currentPath=v.currentPath=rd.getPath(v.data),Z(\"current path\")&&!E&&G.push(O.currentPath);var H,N=[],j=function(){N.indexOf(H)===-1&&(G.push(H),N.push(H))};O.percentParent=v.percentParent=L/rd.getValue(S),O.parent=v.parentString=rd.getPtLabel(S),Z(\"percent parent\")&&(H=rd.formatPercent(O.percentParent,g)+\" of \"+O.parent,j()),O.percentEntry=v.percentEntry=L/rd.getValue(r),O.entry=v.entry=rd.getPtLabel(r),Z(\"percent entry\")&&!E&&!v.onPathbar&&(H=rd.formatPercent(O.percentEntry,g)+\" of \"+O.entry,j()),O.percentRoot=v.percentRoot=L/rd.getValue(l),O.root=v.root=rd.getPtLabel(l),Z(\"percent root\")&&!E&&(H=rd.formatPercent(O.percentRoot,g)+\" of \"+O.root,j()),O.text=x(\"hovertext\")||x(\"text\"),Z(\"text\")&&(H=O.text,g6e.isValidTextValue(H)&&G.push(H)),P=[GE(v,b,a.eventDataKeys)];var re={trace:b,y:z,_x0:v._x0,_x1:v._x1,_y0:v._y0,_y1:v._y1,text:G.join(\"
\"),name:C||Z(\"name\")?b.name:void 0,color:x(\"hoverlabel.bgcolor\")||p.color,borderColor:x(\"hoverlabel.bordercolor\"),fontFamily:x(\"hoverlabel.font.family\"),fontSize:x(\"hoverlabel.font.size\"),fontColor:x(\"hoverlabel.font.color\"),fontWeight:x(\"hoverlabel.font.weight\"),fontStyle:x(\"hoverlabel.font.style\"),fontVariant:x(\"hoverlabel.font.variant\"),nameLength:x(\"hoverlabel.namelength\"),textAlign:x(\"hoverlabel.align\"),hovertemplate:C,hovertemplateLabels:O,eventData:P};u&&(re.x0=T-v.rInscribed*v.rpx1,re.x1=T+v.rInscribed*v.rpx1,re.idealAlign=v.pxmid[0]<0?\"left\":\"right\"),c&&(re.x=T,re.idealAlign=T<0?\"left\":\"right\");var oe=[];VE.loneHover(re,{container:_._hoverlayer.node(),outerContainer:_._paper.node(),gd:n,inOut_bbox:oe}),P[0].bbox=oe[0],s._hasHoverLabel=!0}if(c){var _e=t.select(\"path.surface\");a.styleOne(_e,v,b,n,{hovered:!0})}s._hasHoverEvent=!0,n.emit(\"plotly_hover\",{points:P||[GE(v,b,a.eventDataKeys)],event:RA.event})}},h=function(v){var _=n._fullLayout,b=n._fullData[s.index],p=RA.select(this).datum();if(s._hasHoverEvent&&(v.originalEvent=RA.event,n.emit(\"plotly_unhover\",{points:[GE(p,b,a.eventDataKeys)],event:RA.event}),s._hasHoverEvent=!1),s._hasHoverLabel&&(VE.loneUnhover(_._hoverlayer.node()),s._hasHoverLabel=!1),c){var k=t.select(\"path.surface\");a.styleOne(k,p,b,n,{hovered:!1})}},d=function(v){var _=n._fullLayout,b=n._fullData[s.index],p=u&&(rd.isHierarchyRoot(v)||rd.isLeaf(v)),k=rd.getPtId(v),E=rd.isEntry(v)?rd.findEntryWithChild(l,k):rd.findEntryWithLevel(l,k),S=rd.getPtId(E),L={points:[GE(v,b,a.eventDataKeys)],event:RA.event};p||(L.nextLevel=S);var x=K6t.triggerHandler(n,\"plotly_\"+s.type+\"click\",L);if(x!==!1&&_.hovermode&&(n._hoverdata=[GE(v,b,a.eventDataKeys)],VE.click(n,RA.event)),!p&&x!==!1&&!n._dragging&&!n._transitioning){p6e.call(\"_storeDirectGUIEdit\",b,_._tracePreGUI[b.uid],{level:b.level});var C={data:[{level:S}],traces:[s.index]},M={frame:{redraw:!1,duration:a.transitionTime},transition:{duration:a.transitionTime,easing:a.transitionEasing},mode:\"immediate\",fromcurrent:!0};VE.loneUnhover(_._hoverlayer.node()),p6e.call(\"animate\",n,C,M)}};t.on(\"mouseover\",f),t.on(\"mouseout\",h),t.on(\"click\",d)};function GE(e,t,r){for(var n=e.data.data,i={curveNumber:t.index,pointNumber:n.i,data:t._input,fullData:t},a=0;a{\"use strict\";var jE=Oa(),Q6t=PE(),Zg=(D2(),ob(R2)).interpolate,y6e=So(),Av=Dr(),eLt=ru(),w6e=bv(),_6e=w6e.recordMinTextSize,tLt=w6e.clearMinTextSize,T6e=yD(),rLt=l_().getRotationAngle,iLt=T6e.computeTransform,nLt=T6e.transformInsideText,aLt=UW().styleOne,oLt=N0().resizeText,sLt=HE(),VW=mW(),Rl=Ky();ND.plot=function(e,t,r,n){var i=e._fullLayout,a=i._sunburstlayer,o,s,l=!r,u=!i.uniformtext.mode&&Rl.hasTransition(r);if(tLt(\"sunburst\",i),o=a.selectAll(\"g.trace.sunburst\").data(t,function(f){return f[0].trace.uid}),o.enter().append(\"g\").classed(\"trace\",!0).classed(\"sunburst\",!0).attr(\"stroke-linejoin\",\"round\"),o.order(),u){n&&(s=n());var c=jE.transition().duration(r.duration).ease(r.easing).each(\"end\",function(){s&&s()}).each(\"interrupt\",function(){s&&s()});c.each(function(){a.selectAll(\"g.trace\").each(function(f){x6e(e,f,this,r)})})}else o.each(function(f){x6e(e,f,this,r)}),i.uniformtext.mode&&oLt(e,i._sunburstlayer.selectAll(\".trace\"),\"sunburst\");l&&o.exit().remove()};function x6e(e,t,r,n){var i=e._context.staticPlot,a=e._fullLayout,o=!a.uniformtext.mode&&Rl.hasTransition(n),s=jE.select(r),l=s.selectAll(\"g.slice\"),u=t[0],c=u.trace,f=u.hierarchy,h=Rl.findEntryWithLevel(f,c.level),d=Rl.getMaxDepth(c),v=a._size,_=c.domain,b=v.w*(_.x[1]-_.x[0]),p=v.h*(_.y[1]-_.y[0]),k=.5*Math.min(b,p),E=u.cx=v.l+v.w*(_.x[1]+_.x[0])/2,S=u.cy=v.t+v.h*(1-_.y[0])-p/2;if(!h)return l.remove();var L=null,x={};o&&l.each(function(me){x[Rl.getPtId(me)]={rpx0:me.rpx0,rpx1:me.rpx1,x0:me.x0,x1:me.x1,transform:me.transform},!L&&Rl.isEntry(me)&&(L=me)});var C=lLt(h).descendants(),M=h.height+1,g=0,P=d;u.hasMultipleRoots&&Rl.isHierarchyRoot(h)&&(C=C.slice(1),M-=1,g=1,P+=1),C=C.filter(function(me){return me.y1<=P});var T=rLt(c.rotation);T&&C.forEach(function(me){me.x0+=T,me.x1+=T});var z=Math.min(M,d),O=function(me){return(me-g)/z*k},V=function(me,ie){return[me*Math.cos(ie),-me*Math.sin(ie)]},G=function(me){return Av.pathAnnulus(me.rpx0,me.rpx1,me.x0,me.x1,E,S)},Z=function(me){return E+b6e(me)[0]*(me.transform.rCenter||0)+(me.transform.x||0)},H=function(me){return S+b6e(me)[1]*(me.transform.rCenter||0)+(me.transform.y||0)};l=l.data(C,Rl.getPtId),l.enter().append(\"g\").classed(\"slice\",!0),o?l.exit().transition().each(function(){var me=jE.select(this),ie=me.select(\"path.surface\");ie.transition().attrTween(\"d\",function(Le){var Ae=oe(Le);return function(Fe){return G(Ae(Fe))}});var Se=me.select(\"g.slicetext\");Se.attr(\"opacity\",0)}).remove():l.exit().remove(),l.order();var N=null;if(o&&L){var j=Rl.getPtId(L);l.each(function(me){N===null&&Rl.getPtId(me)===j&&(N=me.x1)})}var re=l;o&&(re=re.transition().each(\"end\",function(){var me=jE.select(this);Rl.setSliceCursor(me,e,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:!1})})),re.each(function(me){var ie=jE.select(this),Se=Av.ensureSingle(ie,\"path\",\"surface\",function(Re){Re.style(\"pointer-events\",i?\"none\":\"all\")});me.rpx0=O(me.y0),me.rpx1=O(me.y1),me.xmid=(me.x0+me.x1)/2,me.pxmid=V(me.rpx1,me.xmid),me.midangle=-(me.xmid-Math.PI/2),me.startangle=-(me.x0-Math.PI/2),me.stopangle=-(me.x1-Math.PI/2),me.halfangle=.5*Math.min(Av.angleDelta(me.x0,me.x1)||Math.PI,Math.PI),me.ring=1-me.rpx0/me.rpx1,me.rInscribed=uLt(me,c),o?Se.transition().attrTween(\"d\",function(Re){var ce=_e(Re);return function(Ze){return G(ce(Ze))}}):Se.attr(\"d\",G),ie.call(sLt,h,e,t,{eventDataKeys:VW.eventDataKeys,transitionTime:VW.CLICK_TRANSITION_TIME,transitionEasing:VW.CLICK_TRANSITION_EASING}).call(Rl.setSliceCursor,e,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:e._transitioning}),Se.call(aLt,me,c,e);var Le=Av.ensureSingle(ie,\"g\",\"slicetext\"),Ae=Av.ensureSingle(Le,\"text\",\"\",function(Re){Re.attr(\"data-notex\",1)}),Fe=Av.ensureUniformFontSize(e,Rl.determineTextFont(c,me,a.font));Ae.text(ND.formatSliceLabel(me,h,c,t,a)).classed(\"slicetext\",!0).attr(\"text-anchor\",\"middle\").call(y6e.font,Fe).call(eLt.convertToTspans,e);var Pe=y6e.bBox(Ae.node());me.transform=nLt(Pe,me,u),me.transform.targetX=Z(me),me.transform.targetY=H(me);var ge=function(Re,ce){var Ze=Re.transform;return iLt(Ze,ce),Ze.fontSize=Fe.size,_6e(c.type,Ze,a),Av.getTextTransform(Ze)};o?Ae.transition().attrTween(\"transform\",function(Re){var ce=Ee(Re);return function(Ze){return ge(ce(Ze),Pe)}}):Ae.attr(\"transform\",ge(me,Pe))});function oe(me){var ie=Rl.getPtId(me),Se=x[ie],Le=x[Rl.getPtId(h)],Ae;if(Le){var Fe=(me.x1>Le.x1?2*Math.PI:0)+T;Ae=me.rpx1N?2*Math.PI:0)+T;Se={x0:Ae,x1:Ae}}else Se={rpx0:k,rpx1:k},Av.extendFlat(Se,Ce(me));else Se={rpx0:0,rpx1:0};else Se={x0:T,x1:T};return Zg(Se,Le)}function Ee(me){var ie=x[Rl.getPtId(me)],Se,Le=me.transform;if(ie)Se=ie;else if(Se={rpx1:me.rpx1,transform:{textPosAngle:Le.textPosAngle,scale:0,rotate:Le.rotate,rCenter:Le.rCenter,x:Le.x,y:Le.y}},L)if(me.parent)if(N){var Ae=me.x1>N?2*Math.PI:0;Se.x0=Se.x1=Ae}else Av.extendFlat(Se,Ce(me));else Se.x0=Se.x1=T;else Se.x0=Se.x1=T;var Fe=Zg(Se.transform.textPosAngle,me.transform.textPosAngle),Pe=Zg(Se.rpx1,me.rpx1),ge=Zg(Se.x0,me.x0),Re=Zg(Se.x1,me.x1),ce=Zg(Se.transform.scale,Le.scale),Ze=Zg(Se.transform.rotate,Le.rotate),ut=Le.rCenter===0?3:Se.transform.rCenter===0?1/3:1,pt=Zg(Se.transform.rCenter,Le.rCenter),Zt=function(st){return pt(Math.pow(st,ut))};return function(st){var lt=Pe(st),Gt=ge(st),Nt=Re(st),Jt=Zt(st),sr=V(lt,(Gt+Nt)/2),wr=Fe(st),cr={pxmid:sr,rpx1:lt,transform:{textPosAngle:wr,rCenter:Jt,x:Le.x,y:Le.y}};return _6e(c.type,Le,a),{transform:{targetX:Z(cr),targetY:H(cr),scale:ce(st),rotate:Ze(st),rCenter:Jt}}}}function Ce(me){var ie=me.parent,Se=x[Rl.getPtId(ie)],Le={};if(Se){var Ae=ie.children,Fe=Ae.indexOf(me),Pe=Ae.length,ge=Zg(Se.x0,Se.x1);Le.x0=ge(Fe/Pe),Le.x1=ge(Fe/Pe)}else Le.x0=Le.x1=0;return Le}}function lLt(e){return Q6t.partition().size([2*Math.PI,e.height+1])(e)}ND.formatSliceLabel=function(e,t,r,n,i){var a=r.texttemplate,o=r.textinfo;if(!a&&(!o||o===\"none\"))return\"\";var s=i.separators,l=n[0],u=e.data.data,c=l.hierarchy,f=Rl.isHierarchyRoot(e),h=Rl.getParent(c,e),d=Rl.getValue(e);if(!a){var v=o.split(\"+\"),_=function(g){return v.indexOf(g)!==-1},b=[],p;if(_(\"label\")&&u.label&&b.push(u.label),u.hasOwnProperty(\"v\")&&_(\"value\")&&b.push(Rl.formatValue(u.v,s)),!f){_(\"current path\")&&b.push(Rl.getPath(e.data));var k=0;_(\"percent parent\")&&k++,_(\"percent entry\")&&k++,_(\"percent root\")&&k++;var E=k>1;if(k){var S,L=function(g){p=Rl.formatPercent(S,s),E&&(p+=\" of \"+g),b.push(p)};_(\"percent parent\")&&!f&&(S=d/Rl.getValue(h),L(\"parent\")),_(\"percent entry\")&&(S=d/Rl.getValue(t),L(\"entry\")),_(\"percent root\")&&(S=d/Rl.getValue(c),L(\"root\"))}}return _(\"text\")&&(p=Av.castOption(r,u.i,\"text\"),Av.isValidTextValue(p)&&b.push(p)),b.join(\"
\")}var x=Av.castOption(r,u.i,\"texttemplate\");if(!x)return\"\";var C={};u.label&&(C.label=u.label),u.hasOwnProperty(\"v\")&&(C.value=u.v,C.valueLabel=Rl.formatValue(u.v,s)),C.currentPath=Rl.getPath(e.data),f||(C.percentParent=d/Rl.getValue(h),C.percentParentLabel=Rl.formatPercent(C.percentParent,s),C.parent=Rl.getPtLabel(h)),C.percentEntry=d/Rl.getValue(t),C.percentEntryLabel=Rl.formatPercent(C.percentEntry,s),C.entry=Rl.getPtLabel(t),C.percentRoot=d/Rl.getValue(c),C.percentRootLabel=Rl.formatPercent(C.percentRoot,s),C.root=Rl.getPtLabel(c),u.hasOwnProperty(\"color\")&&(C.color=u.color);var M=Av.castOption(r,u.i,\"text\");return(Av.isValidTextValue(M)||M===\"\")&&(C.text=M),C.customdata=Av.castOption(r,u.i,\"customdata\"),Av.texttemplateString({data:[C,r._meta],fallback:r.texttemplatefallback,labels:C,locale:i._d3locale,template:x})};function uLt(e){return e.rpx0===0&&Av.isFullCircle([e.x0,e.x1])?1:Math.max(0,Math.min(1/(1+1/Math.sin(e.halfangle)),e.ring/2))}function b6e(e){return cLt(e.rpx1,e.transform.textPosAngle)}function cLt(e,t){return[e*Math.sin(t),-e*Math.cos(t)]}});var S6e=ye((e0r,A6e)=>{\"use strict\";A6e.exports={moduleType:\"trace\",name:\"sunburst\",basePlotModule:qke(),categories:[],animatable:!0,attributes:LE(),layoutAttributes:yW(),supplyDefaults:Xke(),supplyLayoutDefaults:Yke(),calc:RE().calc,crossTraceCalc:RE().crossTraceCalc,plot:UD().plot,style:UW().style,colorbar:$d(),meta:{}}});var E6e=ye((t0r,M6e)=>{\"use strict\";M6e.exports=S6e()});var C6e=ye(DA=>{\"use strict\";var k6e=Mc();DA.name=\"treemap\";DA.plot=function(e,t,r,n){k6e.plotBasePlot(DA.name,e,t,r,n)};DA.clean=function(e,t,r,n){k6e.cleanBasePlot(DA.name,e,t,r,n)}});var z2=ye((i0r,L6e)=>{\"use strict\";L6e.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:\"poly\",eventDataKeys:[\"currentPath\",\"root\",\"entry\",\"percentRoot\",\"percentEntry\",\"percentParent\"],gapWithPathbar:1}});var VD=ye((n0r,R6e)=>{\"use strict\";var{hovertemplateAttrs:fLt,texttemplateAttrs:hLt,templatefallbackAttrs:P6e}=Ll(),dLt=Tu(),vLt=Cc().attributes,O2=S2(),Q0=LE(),I6e=z2(),GW=Ao().extendFlat,pLt=Pd().pattern;R6e.exports={labels:Q0.labels,parents:Q0.parents,values:Q0.values,branchvalues:Q0.branchvalues,count:Q0.count,level:Q0.level,maxdepth:Q0.maxdepth,tiling:{packing:{valType:\"enumerated\",values:[\"squarify\",\"binary\",\"dice\",\"slice\",\"slice-dice\",\"dice-slice\"],dflt:\"squarify\",editType:\"plot\"},squarifyratio:{valType:\"number\",min:1,dflt:1,editType:\"plot\"},flip:{valType:\"flaglist\",flags:[\"x\",\"y\"],dflt:\"\",editType:\"plot\"},pad:{valType:\"number\",min:0,dflt:3,editType:\"plot\"},editType:\"calc\"},marker:GW({pad:{t:{valType:\"number\",min:0,editType:\"plot\"},l:{valType:\"number\",min:0,editType:\"plot\"},r:{valType:\"number\",min:0,editType:\"plot\"},b:{valType:\"number\",min:0,editType:\"plot\"},editType:\"calc\"},colors:Q0.marker.colors,pattern:pLt,depthfade:{valType:\"enumerated\",values:[!0,!1,\"reversed\"],editType:\"style\"},line:Q0.marker.line,cornerradius:{valType:\"number\",min:0,dflt:0,editType:\"plot\"},editType:\"calc\"},dLt(\"marker\",{colorAttr:\"colors\",anim:!1})),pathbar:{visible:{valType:\"boolean\",dflt:!0,editType:\"plot\"},side:{valType:\"enumerated\",values:[\"top\",\"bottom\"],dflt:\"top\",editType:\"plot\"},edgeshape:{valType:\"enumerated\",values:[\">\",\"<\",\"|\",\"/\",\"\\\\\"],dflt:\">\",editType:\"plot\"},thickness:{valType:\"number\",min:12,editType:\"plot\"},textfont:GW({},O2.textfont,{}),editType:\"calc\"},text:O2.text,textinfo:Q0.textinfo,texttemplate:hLt({editType:\"plot\"},{keys:I6e.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:P6e({editType:\"plot\"}),hovertext:O2.hovertext,hoverinfo:Q0.hoverinfo,hovertemplate:fLt({},{keys:I6e.eventDataKeys}),hovertemplatefallback:P6e(),textfont:O2.textfont,insidetextfont:O2.insidetextfont,outsidetextfont:GW({},O2.outsidetextfont,{}),textposition:{valType:\"enumerated\",values:[\"top left\",\"top center\",\"top right\",\"middle left\",\"middle center\",\"middle right\",\"bottom left\",\"bottom center\",\"bottom right\"],dflt:\"top left\",editType:\"plot\"},sort:O2.sort,root:Q0.root,domain:vLt({name:\"treemap\",trace:!0,editType:\"calc\"})}});var HW=ye((a0r,D6e)=>{\"use strict\";D6e.exports={treemapcolorway:{valType:\"colorlist\",editType:\"calc\"},extendtreemapcolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var q6e=ye((o0r,O6e)=>{\"use strict\";var F6e=Dr(),gLt=VD(),mLt=ka(),yLt=Cc().defaults,_Lt=r0().handleText,xLt=e2().TEXTPAD,bLt=M2().handleMarkerDefaults,z6e=tc(),wLt=z6e.hasColorscale,TLt=z6e.handleDefaults;O6e.exports=function(t,r,n,i){function a(b,p){return F6e.coerce(t,r,gLt,b,p)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\");var u=a(\"tiling.packing\");u===\"squarify\"&&a(\"tiling.squarifyratio\"),a(\"tiling.flip\"),a(\"tiling.pad\");var c=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",F6e.isArrayOrTypedArray(c)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var f=a(\"pathbar.visible\"),h=\"auto\";_Lt(t,r,i,a,h,{hasPathbar:f,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"textposition\");var d=r.textposition.indexOf(\"bottom\")!==-1;bLt(t,r,i,a);var v=r._hasColorscale=wLt(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;v?TLt(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}):a(\"marker.depthfade\",!(r.marker.colors||[]).length);var _=r.textfont.size*2;a(\"marker.pad.t\",d?_/4:_),a(\"marker.pad.l\",_/4),a(\"marker.pad.r\",_/4),a(\"marker.pad.b\",d?_:_/4),a(\"marker.cornerradius\"),r._hovered={marker:{line:{width:2,color:mLt.contrast(i.paper_bgcolor)}}},f&&(a(\"pathbar.thickness\",r.pathbar.textfont.size+2*xLt),a(\"pathbar.side\"),a(\"pathbar.edgeshape\")),a(\"sort\"),a(\"root.color\"),yLt(r,i,a),r._length=null}});var N6e=ye((s0r,B6e)=>{\"use strict\";var ALt=Dr(),SLt=HW();B6e.exports=function(t,r){function n(i,a){return ALt.coerce(t,r,SLt,i,a)}n(\"treemapcolorway\",r.colorway),n(\"extendtreemapcolors\")}});var WW=ye(jW=>{\"use strict\";var U6e=RE();jW.calc=function(e,t){return U6e.calc(e,t)};jW.crossTraceCalc=function(e){return U6e._runCrossTraceCalc(\"treemap\",e)}});var XW=ye((u0r,V6e)=>{\"use strict\";V6e.exports=function e(t,r,n){var i;n.swapXY&&(i=t.x0,t.x0=t.y0,t.y0=i,i=t.x1,t.x1=t.y1,t.y1=i),n.flipX&&(i=t.x0,t.x0=r[0]-t.x1,t.x1=r[0]-i),n.flipY&&(i=t.y0,t.y0=r[1]-t.y1,t.y1=r[1]-i);var a=t.children;if(a)for(var o=0;o{\"use strict\";var FA=PE(),MLt=XW();G6e.exports=function(t,r,n){var i=n.flipX,a=n.flipY,o=n.packing===\"dice-slice\",s=n.pad[a?\"bottom\":\"top\"],l=n.pad[i?\"right\":\"left\"],u=n.pad[i?\"left\":\"right\"],c=n.pad[a?\"top\":\"bottom\"],f;o&&(f=l,l=s,s=f,f=u,u=c,c=f);var h=FA.treemap().tile(ELt(n.packing,n.squarifyratio)).paddingInner(n.pad.inner).paddingLeft(l).paddingRight(u).paddingTop(s).paddingBottom(c).size(o?[r[1],r[0]]:r)(t);return(o||i||a)&&MLt(h,r,{swapXY:o,flipX:i,flipY:a}),h};function ELt(e,t){switch(e){case\"squarify\":return FA.treemapSquarify.ratio(t);case\"binary\":return FA.treemapBinary;case\"dice\":return FA.treemapDice;case\"slice\":return FA.treemapSlice;default:return FA.treemapSliceDice}}});var GD=ye((f0r,X6e)=>{\"use strict\";var H6e=Oa(),zA=ka(),j6e=Dr(),YW=Ky(),kLt=bv().resizeText,CLt=BD();function LLt(e){var t=e._fullLayout._treemaplayer.selectAll(\".trace\");kLt(e,t,\"treemap\"),t.each(function(r){var n=H6e.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){H6e.select(this).call(W6e,o,a,e,{hovered:!1})})})}function W6e(e,t,r,n,i){var a=(i||{}).hovered,o=t.data.data,s=o.i,l,u,c=o.color,f=YW.isHierarchyRoot(t),h=1;if(a)l=r._hovered.marker.line.color,u=r._hovered.marker.line.width;else if(f&&c===r.root.color)h=100,l=\"rgba(0,0,0,0)\",u=0;else if(l=j6e.castOption(r,s,\"marker.line.color\")||zA.defaultLine,u=j6e.castOption(r,s,\"marker.line.width\")||0,!r._hasColorscale&&!t.onPathbar){var d=r.marker.depthfade;if(d){var v=zA.combine(zA.addOpacity(r._backgroundColor,.75),c),_;if(d===!0){var b=YW.getMaxDepth(r);isFinite(b)?YW.isLeaf(t)?_=0:_=r._maxVisibleLayers-(t.data.depth-r._entryDepth):_=t.data.height+1}else _=t.data.depth-r._entryDepth,r._atRootLevel||_++;if(_>0)for(var p=0;p<_;p++){var k=.5*p/_;c=zA.combine(zA.addOpacity(v,k),c)}}}e.call(CLt,t,r,n,c).style(\"stroke-width\",u).call(zA.stroke,l).style(\"opacity\",h)}X6e.exports={style:LLt,styleOne:W6e}});var $6e=ye((h0r,J6e)=>{\"use strict\";var Z6e=Oa(),HD=Dr(),Y6e=So(),PLt=ru(),ILt=ZW(),K6e=GD().styleOne,KW=z2(),OA=Ky(),RLt=HE(),JW=!0;J6e.exports=function(t,r,n,i,a){var o=a.barDifY,s=a.width,l=a.height,u=a.viewX,c=a.viewY,f=a.pathSlice,h=a.toMoveInsideSlice,d=a.strTransform,v=a.hasTransition,_=a.handleSlicesExit,b=a.makeUpdateSliceInterpolator,p=a.makeUpdateTextInterpolator,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=L.hierarchy,M=s/x._entryDepth,g=OA.listPath(n.data,\"id\"),P=ILt(C.copy(),[s,l],{packing:\"dice\",pad:{inner:0,top:0,left:0,right:0,bottom:0}}).descendants();P=P.filter(function(z){var O=g.indexOf(z.data.id);return O===-1?!1:(z.x0=M*O,z.x1=M*(O+1),z.y0=o,z.y1=o+l,z.onPathbar=!0,!0)}),P.reverse(),i=i.data(P,OA.getPtId),i.enter().append(\"g\").classed(\"pathbar\",!0),_(i,JW,k,[s,l],f),i.order();var T=i;v&&(T=T.transition().each(\"end\",function(){var z=Z6e.select(this);OA.setSliceCursor(z,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:!1})})),T.each(function(z){z._x0=u(z.x0),z._x1=u(z.x1),z._y0=c(z.y0),z._y1=c(z.y1),z._hoverX=u(z.x1-Math.min(s,l)/2),z._hoverY=c(z.y1-l/2);var O=Z6e.select(this),V=HD.ensureSingle(O,\"path\",\"surface\",function(N){N.style(\"pointer-events\",E?\"none\":\"all\")});v?V.transition().attrTween(\"d\",function(N){var j=b(N,JW,k,[s,l]);return function(re){return f(j(re))}}):V.attr(\"d\",f),O.call(RLt,n,t,r,{styleOne:K6e,eventDataKeys:KW.eventDataKeys,transitionTime:KW.CLICK_TRANSITION_TIME,transitionEasing:KW.CLICK_TRANSITION_EASING}).call(OA.setSliceCursor,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:t._transitioning}),V.call(K6e,z,x,t,{hovered:!1}),z._text=(OA.getPtLabel(z)||\"\").split(\"
\").join(\" \")||\"\";var G=HD.ensureSingle(O,\"g\",\"slicetext\"),Z=HD.ensureSingle(G,\"text\",\"\",function(N){N.attr(\"data-notex\",1)}),H=HD.ensureUniformFontSize(t,OA.determineTextFont(x,z,S.font,{onPathbar:!0}));Z.text(z._text||\" \").classed(\"slicetext\",!0).attr(\"text-anchor\",\"start\").call(Y6e.font,H).call(PLt.convertToTspans,t),z.textBB=Y6e.bBox(Z.node()),z.transform=h(z,{fontSize:H.size,onPathbar:!0}),z.transform.fontSize=H.size,v?Z.transition().attrTween(\"transform\",function(N){var j=p(N,JW,k,[s,l]);return function(re){return d(j(re))}}):Z.attr(\"transform\",d(z))})}});var rLe=ye((d0r,tLe)=>{\"use strict\";var Q6e=Oa(),$W=(D2(),ob(R2)).interpolate,Z_=Ky(),WE=Dr(),eLe=e2().TEXTPAD,DLt=n2(),FLt=DLt.toMoveInsideBar,zLt=bv(),QW=zLt.recordMinTextSize,OLt=z2(),qLt=$6e();function q2(e){return Z_.isHierarchyRoot(e)?\"\":Z_.getPtId(e)}tLe.exports=function(t,r,n,i,a){var o=t._fullLayout,s=r[0],l=s.trace,u=l.type,c=u===\"icicle\",f=s.hierarchy,h=Z_.findEntryWithLevel(f,l.level),d=Q6e.select(n),v=d.selectAll(\"g.pathbar\"),_=d.selectAll(\"g.slice\");if(!h){v.remove(),_.remove();return}var b=Z_.isHierarchyRoot(h),p=!o.uniformtext.mode&&Z_.hasTransition(i),k=Z_.getMaxDepth(l),E=function($e){return $e.data.depth-h.data.depth-1?C+P:-(g+P):0,z={x0:M,x1:M,y0:T,y1:T+g},O=function($e,St,Qt){var Vt=l.tiling.pad,_t=function(lr){return lr-Vt<=St.x0},It=function(lr){return lr+Vt>=St.x1},mt=function(lr){return lr-Vt<=St.y0},er=function(lr){return lr+Vt>=St.y1};return $e.x0===St.x0&&$e.x1===St.x1&&$e.y0===St.y0&&$e.y1===St.y1?{x0:$e.x0,x1:$e.x1,y0:$e.y0,y1:$e.y1}:{x0:_t($e.x0-Vt)?0:It($e.x0-Vt)?Qt[0]:$e.x0,x1:_t($e.x1+Vt)?0:It($e.x1+Vt)?Qt[0]:$e.x1,y0:mt($e.y0-Vt)?0:er($e.y0-Vt)?Qt[1]:$e.y0,y1:mt($e.y1+Vt)?0:er($e.y1+Vt)?Qt[1]:$e.y1}},V=null,G={},Z={},H=null,N=function($e,St){return St?G[q2($e)]:Z[q2($e)]},j=function($e,St,Qt,Vt){if(St)return G[q2(f)]||z;var _t=Z[l.level]||Qt;return E($e)?O($e,_t,Vt):{}};s.hasMultipleRoots&&b&&k++,l._maxDepth=k,l._backgroundColor=o.paper_bgcolor,l._entryDepth=h.data.depth,l._atRootLevel=b;var re=-x/2+S.l+S.w*(L.x[1]+L.x[0])/2,oe=-C/2+S.t+S.h*(1-(L.y[1]+L.y[0])/2),_e=function($e){return re+$e},Ee=function($e){return oe+$e},Ce=Ee(0),me=_e(0),ie=function($e){return me+$e},Se=function($e){return Ce+$e};function Le($e,St){return $e+\",\"+St}var Ae=ie(0),Fe=function($e){$e.x=Math.max(Ae,$e.x)},Pe=l.pathbar.edgeshape,ge=function($e){var St=ie(Math.max(Math.min($e.x0,$e.x0),0)),Qt=ie(Math.min(Math.max($e.x1,$e.x1),M)),Vt=Se($e.y0),_t=Se($e.y1),It=g/2,mt={},er={};mt.x=St,er.x=Qt,mt.y=er.y=(Vt+_t)/2;var lr={x:St,y:Vt},Tr={x:Qt,y:Vt},Lr={x:Qt,y:_t},ti={x:St,y:_t};return Pe===\">\"?(lr.x-=It,Tr.x-=It,Lr.x-=It,ti.x-=It):Pe===\"/\"?(Lr.x-=It,ti.x-=It,mt.x-=It/2,er.x-=It/2):Pe===\"\\\\\"?(lr.x-=It,Tr.x-=It,mt.x-=It/2,er.x-=It/2):Pe===\"<\"&&(mt.x-=It,er.x-=It),Fe(lr),Fe(ti),Fe(mt),Fe(Tr),Fe(Lr),Fe(er),\"M\"+Le(lr.x,lr.y)+\"L\"+Le(Tr.x,Tr.y)+\"L\"+Le(er.x,er.y)+\"L\"+Le(Lr.x,Lr.y)+\"L\"+Le(ti.x,ti.y)+\"L\"+Le(mt.x,mt.y)+\"Z\"},Re=l[c?\"tiling\":\"marker\"].pad,ce=function($e){return l.textposition.indexOf($e)!==-1},Ze=ce(\"top\"),ut=ce(\"left\"),pt=ce(\"right\"),Zt=ce(\"bottom\"),st=function($e){var St=_e($e.x0),Qt=_e($e.x1),Vt=Ee($e.y0),_t=Ee($e.y1),It=Qt-St,mt=_t-Vt;if(!It||!mt)return\"\";var er=l.marker.cornerradius||0,lr=Math.min(er,It/2,mt/2);lr&&$e.data&&$e.data.data&&$e.data.data.label&&(Ze&&(lr=Math.min(lr,Re.t)),ut&&(lr=Math.min(lr,Re.l)),pt&&(lr=Math.min(lr,Re.r)),Zt&&(lr=Math.min(lr,Re.b)));var Tr=function(Lr,ti){return lr?\"a\"+Le(lr,lr)+\" 0 0 1 \"+Le(Lr,ti):\"\"};return\"M\"+Le(St,Vt+lr)+Tr(lr,-lr)+\"L\"+Le(Qt-lr,Vt)+Tr(lr,lr)+\"L\"+Le(Qt,_t-lr)+Tr(-lr,lr)+\"L\"+Le(St+lr,_t)+Tr(-lr,-lr)+\"Z\"},lt=function($e,St){var Qt=$e.x0,Vt=$e.x1,_t=$e.y0,It=$e.y1,mt=$e.textBB,er=Ze||St.isHeader&&!Zt,lr=er?\"start\":Zt?\"end\":\"middle\",Tr=ce(\"right\"),Lr=ce(\"left\")||St.onPathbar,ti=Lr?-1:Tr?1:0;if(St.isHeader){if(Qt+=(c?Re:Re.l)-eLe,Vt-=(c?Re:Re.r)-eLe,Qt>=Vt){var Br=(Qt+Vt)/2;Qt=Br,Vt=Br}var Vr;Zt?(Vr=It-(c?Re:Re.b),_t{\"use strict\";var BLt=Oa(),NLt=Ky(),ULt=bv(),VLt=ULt.clearMinTextSize,GLt=N0().resizeText,iLe=rLe();nLe.exports=function(t,r,n,i,a){var o=a.type,s=a.drawDescendants,l=t._fullLayout,u=l[\"_\"+o+\"layer\"],c,f,h=!n;if(VLt(o,l),c=u.selectAll(\"g.trace.\"+o).data(r,function(v){return v[0].trace.uid}),c.enter().append(\"g\").classed(\"trace\",!0).classed(o,!0),c.order(),!l.uniformtext.mode&&NLt.hasTransition(n)){i&&(f=i());var d=BLt.transition().duration(n.duration).ease(n.easing).each(\"end\",function(){f&&f()}).each(\"interrupt\",function(){f&&f()});d.each(function(){u.selectAll(\"g.trace\").each(function(v){iLe(t,v,this,n,s)})})}else c.each(function(v){iLe(t,v,this,n,s)}),l.uniformtext.mode&&GLt(t,u.selectAll(\".trace\"),o);h&&c.exit().remove()}});var uLe=ye((p0r,lLe)=>{\"use strict\";var aLe=Oa(),jD=Dr(),oLe=So(),HLt=ru(),jLt=ZW(),sLe=GD().styleOne,tX=z2(),Y_=Ky(),WLt=HE(),XLt=UD().formatSliceLabel,rX=!1;lLe.exports=function(t,r,n,i,a){var o=a.width,s=a.height,l=a.viewX,u=a.viewY,c=a.pathSlice,f=a.toMoveInsideSlice,h=a.strTransform,d=a.hasTransition,v=a.handleSlicesExit,_=a.makeUpdateSliceInterpolator,b=a.makeUpdateTextInterpolator,p=a.prevEntry,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=x.textposition.indexOf(\"left\")!==-1,M=x.textposition.indexOf(\"right\")!==-1,g=x.textposition.indexOf(\"bottom\")!==-1,P=!g&&!x.marker.pad.t||g&&!x.marker.pad.b,T=jLt(n,[o,s],{packing:x.tiling.packing,squarifyratio:x.tiling.squarifyratio,flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1,pad:{inner:x.tiling.pad,top:x.marker.pad.t,left:x.marker.pad.l,right:x.marker.pad.r,bottom:x.marker.pad.b}}),z=T.descendants(),O=1/0,V=-1/0;z.forEach(function(j){var re=j.depth;re>=x._maxDepth?(j.x0=j.x1=(j.x0+j.x1)/2,j.y0=j.y1=(j.y0+j.y1)/2):(O=Math.min(O,re),V=Math.max(V,re))}),i=i.data(z,Y_.getPtId),x._maxVisibleLayers=isFinite(V)?V-O+1:0,i.enter().append(\"g\").classed(\"slice\",!0),v(i,rX,k,[o,s],c),i.order();var G=null;if(d&&p){var Z=Y_.getPtId(p);i.each(function(j){G===null&&Y_.getPtId(j)===Z&&(G={x0:j.x0,x1:j.x1,y0:j.y0,y1:j.y1})})}var H=function(){return G||{x0:0,x1:o,y0:0,y1:s}},N=i;return d&&(N=N.transition().each(\"end\",function(){var j=aLe.select(this);Y_.setSliceCursor(j,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})})),N.each(function(j){var re=Y_.isHeader(j,x);j._x0=l(j.x0),j._x1=l(j.x1),j._y0=u(j.y0),j._y1=u(j.y1),j._hoverX=l(j.x1-x.marker.pad.r),j._hoverY=u(g?j.y1-x.marker.pad.b/2:j.y0+x.marker.pad.t/2);var oe=aLe.select(this),_e=jD.ensureSingle(oe,\"path\",\"surface\",function(Le){Le.style(\"pointer-events\",E?\"none\":\"all\")});d?_e.transition().attrTween(\"d\",function(Le){var Ae=_(Le,rX,H(),[o,s]);return function(Fe){return c(Ae(Fe))}}):_e.attr(\"d\",c),oe.call(WLt,n,t,r,{styleOne:sLe,eventDataKeys:tX.eventDataKeys,transitionTime:tX.CLICK_TRANSITION_TIME,transitionEasing:tX.CLICK_TRANSITION_EASING}).call(Y_.setSliceCursor,t,{isTransitioning:t._transitioning}),_e.call(sLe,j,x,t,{hovered:!1}),j.x0===j.x1||j.y0===j.y1?j._text=\"\":re?j._text=P?\"\":Y_.getPtLabel(j)||\"\":j._text=XLt(j,n,x,r,S)||\"\";var Ee=jD.ensureSingle(oe,\"g\",\"slicetext\"),Ce=jD.ensureSingle(Ee,\"text\",\"\",function(Le){Le.attr(\"data-notex\",1)}),me=jD.ensureUniformFontSize(t,Y_.determineTextFont(x,j,S.font)),ie=j._text||\" \",Se=re&&ie.indexOf(\"
\")===-1;Ce.text(ie).classed(\"slicetext\",!0).attr(\"text-anchor\",M?\"end\":C||Se?\"start\":\"middle\").call(oLe.font,me).call(HLt.convertToTspans,t),j.textBB=oLe.bBox(Ce.node()),j.transform=f(j,{fontSize:me.size,isHeader:re}),j.transform.fontSize=me.size,d?Ce.transition().attrTween(\"transform\",function(Le){var Ae=b(Le,rX,H(),[o,s]);return function(Fe){return h(Ae(Fe))}}):Ce.attr(\"transform\",h(j))}),G}});var fLe=ye((g0r,cLe)=>{\"use strict\";var ZLt=eX(),YLt=uLe();cLe.exports=function(t,r,n,i){return ZLt(t,r,n,i,{type:\"treemap\",drawDescendants:YLt})}});var dLe=ye((m0r,hLe)=>{\"use strict\";hLe.exports={moduleType:\"trace\",name:\"treemap\",basePlotModule:C6e(),categories:[],animatable:!0,attributes:VD(),layoutAttributes:HW(),supplyDefaults:q6e(),supplyLayoutDefaults:N6e(),calc:WW().calc,crossTraceCalc:WW().crossTraceCalc,plot:fLe(),style:GD().style,colorbar:$d(),meta:{}}});var pLe=ye((y0r,vLe)=>{\"use strict\";vLe.exports=dLe()});var mLe=ye(qA=>{\"use strict\";var gLe=Mc();qA.name=\"icicle\";qA.plot=function(e,t,r,n){gLe.plotBasePlot(qA.name,e,t,r,n)};qA.clean=function(e,t,r,n){gLe.cleanBasePlot(qA.name,e,t,r,n)}});var iX=ye((x0r,xLe)=>{\"use strict\";var{hovertemplateAttrs:KLt,texttemplateAttrs:JLt,templatefallbackAttrs:yLe}=Ll(),$Lt=Tu(),QLt=Cc().attributes,XE=S2(),o0=LE(),WD=VD(),_Le=z2(),ePt=Ao().extendFlat,tPt=Pd().pattern;xLe.exports={labels:o0.labels,parents:o0.parents,values:o0.values,branchvalues:o0.branchvalues,count:o0.count,level:o0.level,maxdepth:o0.maxdepth,tiling:{orientation:{valType:\"enumerated\",values:[\"v\",\"h\"],dflt:\"h\",editType:\"plot\"},flip:WD.tiling.flip,pad:{valType:\"number\",min:0,dflt:0,editType:\"plot\"},editType:\"calc\"},marker:ePt({colors:o0.marker.colors,line:o0.marker.line,pattern:tPt,editType:\"calc\"},$Lt(\"marker\",{colorAttr:\"colors\",anim:!1})),leaf:o0.leaf,pathbar:WD.pathbar,text:XE.text,textinfo:o0.textinfo,texttemplate:JLt({editType:\"plot\"},{keys:_Le.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:yLe({editType:\"plot\"}),hovertext:XE.hovertext,hoverinfo:o0.hoverinfo,hovertemplate:KLt({},{keys:_Le.eventDataKeys}),hovertemplatefallback:yLe(),textfont:XE.textfont,insidetextfont:XE.insidetextfont,outsidetextfont:WD.outsidetextfont,textposition:WD.textposition,sort:XE.sort,root:o0.root,domain:QLt({name:\"icicle\",trace:!0,editType:\"calc\"})}});var nX=ye((b0r,bLe)=>{\"use strict\";bLe.exports={iciclecolorway:{valType:\"colorlist\",editType:\"calc\"},extendiciclecolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var SLe=ye((w0r,ALe)=>{\"use strict\";var wLe=Dr(),rPt=iX(),iPt=ka(),nPt=Cc().defaults,aPt=r0().handleText,oPt=e2().TEXTPAD,sPt=M2().handleMarkerDefaults,TLe=tc(),lPt=TLe.hasColorscale,uPt=TLe.handleDefaults;ALe.exports=function(t,r,n,i){function a(d,v){return wLe.coerce(t,r,rPt,d,v)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\"),a(\"tiling.orientation\"),a(\"tiling.flip\"),a(\"tiling.pad\");var u=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",wLe.isArrayOrTypedArray(u)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var c=a(\"pathbar.visible\"),f=\"auto\";aPt(t,r,i,a,f,{hasPathbar:c,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"textposition\"),sPt(t,r,i,a);var h=r._hasColorscale=lPt(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;h&&uPt(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}),a(\"leaf.opacity\",h?1:.7),r._hovered={marker:{line:{width:2,color:iPt.contrast(i.paper_bgcolor)}}},c&&(a(\"pathbar.thickness\",r.pathbar.textfont.size+2*oPt),a(\"pathbar.side\"),a(\"pathbar.edgeshape\")),a(\"sort\"),a(\"root.color\"),nPt(r,i,a),r._length=null}});var ELe=ye((T0r,MLe)=>{\"use strict\";var cPt=Dr(),fPt=nX();MLe.exports=function(t,r){function n(i,a){return cPt.coerce(t,r,fPt,i,a)}n(\"iciclecolorway\",r.colorway),n(\"extendiciclecolors\")}});var oX=ye(aX=>{\"use strict\";var kLe=RE();aX.calc=function(e,t){return kLe.calc(e,t)};aX.crossTraceCalc=function(e){return kLe._runCrossTraceCalc(\"icicle\",e)}});var LLe=ye((S0r,CLe)=>{\"use strict\";var hPt=PE(),dPt=XW();CLe.exports=function(t,r,n){var i=n.flipX,a=n.flipY,o=n.orientation===\"h\",s=n.maxDepth,l=r[0],u=r[1];s&&(l=(t.height+1)*r[0]/Math.min(t.height+1,s),u=(t.height+1)*r[1]/Math.min(t.height+1,s));var c=hPt.partition().padding(n.pad.inner).size(o?[r[1],l]:[r[0],u])(t);return(o||i||a)&&dPt(c,r,{swapXY:o,flipX:i,flipY:a}),c}});var sX=ye((M0r,FLe)=>{\"use strict\";var PLe=Oa(),ILe=ka(),RLe=Dr(),vPt=bv().resizeText,pPt=BD();function gPt(e){var t=e._fullLayout._iciclelayer.selectAll(\".trace\");vPt(e,t,\"icicle\"),t.each(function(r){var n=PLe.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){PLe.select(this).call(DLe,o,a,e)})})}function DLe(e,t,r,n){var i=t.data.data,a=!t.children,o=i.i,s=RLe.castOption(r,o,\"marker.line.color\")||ILe.defaultLine,l=RLe.castOption(r,o,\"marker.line.width\")||0;e.call(pPt,t,r,n).style(\"stroke-width\",l).call(ILe.stroke,s).style(\"opacity\",a?r.leaf.opacity:null)}FLe.exports={style:gPt,styleOne:DLe}});var NLe=ye((E0r,BLe)=>{\"use strict\";var zLe=Oa(),XD=Dr(),OLe=So(),mPt=ru(),yPt=LLe(),qLe=sX().styleOne,lX=z2(),BA=Ky(),_Pt=HE(),xPt=UD().formatSliceLabel,uX=!1;BLe.exports=function(t,r,n,i,a){var o=a.width,s=a.height,l=a.viewX,u=a.viewY,c=a.pathSlice,f=a.toMoveInsideSlice,h=a.strTransform,d=a.hasTransition,v=a.handleSlicesExit,_=a.makeUpdateSliceInterpolator,b=a.makeUpdateTextInterpolator,p=a.prevEntry,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=x.textposition.indexOf(\"left\")!==-1,M=x.textposition.indexOf(\"right\")!==-1,g=x.textposition.indexOf(\"bottom\")!==-1,P=yPt(n,[o,s],{flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1,orientation:x.tiling.orientation,pad:{inner:x.tiling.pad},maxDepth:x._maxDepth}),T=P.descendants(),z=1/0,O=-1/0;T.forEach(function(N){var j=N.depth;j>=x._maxDepth?(N.x0=N.x1=(N.x0+N.x1)/2,N.y0=N.y1=(N.y0+N.y1)/2):(z=Math.min(z,j),O=Math.max(O,j))}),i=i.data(T,BA.getPtId),x._maxVisibleLayers=isFinite(O)?O-z+1:0,i.enter().append(\"g\").classed(\"slice\",!0),v(i,uX,k,[o,s],c),i.order();var V=null;if(d&&p){var G=BA.getPtId(p);i.each(function(N){V===null&&BA.getPtId(N)===G&&(V={x0:N.x0,x1:N.x1,y0:N.y0,y1:N.y1})})}var Z=function(){return V||{x0:0,x1:o,y0:0,y1:s}},H=i;return d&&(H=H.transition().each(\"end\",function(){var N=zLe.select(this);BA.setSliceCursor(N,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})})),H.each(function(N){N._x0=l(N.x0),N._x1=l(N.x1),N._y0=u(N.y0),N._y1=u(N.y1),N._hoverX=l(N.x1-x.tiling.pad),N._hoverY=u(g?N.y1-x.tiling.pad/2:N.y0+x.tiling.pad/2);var j=zLe.select(this),re=XD.ensureSingle(j,\"path\",\"surface\",function(Ce){Ce.style(\"pointer-events\",E?\"none\":\"all\")});d?re.transition().attrTween(\"d\",function(Ce){var me=_(Ce,uX,Z(),[o,s],{orientation:x.tiling.orientation,flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1});return function(ie){return c(me(ie))}}):re.attr(\"d\",c),j.call(_Pt,n,t,r,{styleOne:qLe,eventDataKeys:lX.eventDataKeys,transitionTime:lX.CLICK_TRANSITION_TIME,transitionEasing:lX.CLICK_TRANSITION_EASING}).call(BA.setSliceCursor,t,{isTransitioning:t._transitioning}),re.call(qLe,N,x,t,{hovered:!1}),N.x0===N.x1||N.y0===N.y1?N._text=\"\":N._text=xPt(N,n,x,r,S)||\"\";var oe=XD.ensureSingle(j,\"g\",\"slicetext\"),_e=XD.ensureSingle(oe,\"text\",\"\",function(Ce){Ce.attr(\"data-notex\",1)}),Ee=XD.ensureUniformFontSize(t,BA.determineTextFont(x,N,S.font));_e.text(N._text||\" \").classed(\"slicetext\",!0).attr(\"text-anchor\",M?\"end\":C?\"start\":\"middle\").call(OLe.font,Ee).call(mPt.convertToTspans,t),N.textBB=OLe.bBox(_e.node()),N.transform=f(N,{fontSize:Ee.size}),N.transform.fontSize=Ee.size,d?_e.transition().attrTween(\"transform\",function(Ce){var me=b(Ce,uX,Z(),[o,s]);return function(ie){return h(me(ie))}}):_e.attr(\"transform\",h(N))}),V}});var VLe=ye((k0r,ULe)=>{\"use strict\";var bPt=eX(),wPt=NLe();ULe.exports=function(t,r,n,i){return bPt(t,r,n,i,{type:\"icicle\",drawDescendants:wPt})}});var HLe=ye((C0r,GLe)=>{\"use strict\";GLe.exports={moduleType:\"trace\",name:\"icicle\",basePlotModule:mLe(),categories:[],animatable:!0,attributes:iX(),layoutAttributes:nX(),supplyDefaults:SLe(),supplyLayoutDefaults:ELe(),calc:oX().calc,crossTraceCalc:oX().crossTraceCalc,plot:VLe(),style:sX().style,colorbar:$d(),meta:{}}});var WLe=ye((L0r,jLe)=>{\"use strict\";jLe.exports=HLe()});var ZLe=ye(NA=>{\"use strict\";var XLe=Mc();NA.name=\"funnelarea\";NA.plot=function(e,t,r,n){XLe.plotBasePlot(NA.name,e,t,r,n)};NA.clean=function(e,t,r,n){XLe.cleanBasePlot(NA.name,e,t,r,n)}});var cX=ye((I0r,KLe)=>{\"use strict\";var iv=S2(),TPt=Gl(),APt=Cc().attributes,{hovertemplateAttrs:SPt,texttemplateAttrs:MPt,templatefallbackAttrs:YLe}=Ll(),B2=Ao().extendFlat;KLe.exports={labels:iv.labels,label0:iv.label0,dlabel:iv.dlabel,values:iv.values,marker:{colors:iv.marker.colors,line:{color:B2({},iv.marker.line.color,{dflt:null}),width:B2({},iv.marker.line.width,{dflt:1}),editType:\"calc\"},pattern:iv.marker.pattern,editType:\"calc\"},text:iv.text,hovertext:iv.hovertext,scalegroup:B2({},iv.scalegroup,{}),textinfo:B2({},iv.textinfo,{flags:[\"label\",\"text\",\"value\",\"percent\"]}),texttemplate:MPt({editType:\"plot\"},{keys:[\"label\",\"color\",\"value\",\"text\",\"percent\"]}),texttemplatefallback:YLe({editType:\"plot\"}),hoverinfo:B2({},TPt.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"percent\",\"name\"]}),hovertemplate:SPt({},{keys:[\"label\",\"color\",\"value\",\"text\",\"percent\"]}),hovertemplatefallback:YLe(),textposition:B2({},iv.textposition,{values:[\"inside\",\"none\"],dflt:\"inside\"}),textfont:iv.textfont,insidetextfont:iv.insidetextfont,title:{text:iv.title.text,font:iv.title.font,position:B2({},iv.title.position,{values:[\"top left\",\"top center\",\"top right\"],dflt:\"top center\"}),editType:\"plot\"},domain:APt({name:\"funnelarea\",trace:!0,editType:\"calc\"}),aspectratio:{valType:\"number\",min:0,dflt:1,editType:\"plot\"},baseratio:{valType:\"number\",min:0,max:1,dflt:.333,editType:\"plot\"}}});var fX=ye((R0r,JLe)=>{\"use strict\";var EPt=vD().hiddenlabels;JLe.exports={hiddenlabels:EPt,funnelareacolorway:{valType:\"colorlist\",editType:\"calc\"},extendfunnelareacolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var ePe=ye((D0r,QLe)=>{\"use strict\";var $Le=Dr(),kPt=cX(),CPt=Cc().defaults,LPt=r0().handleText,PPt=M2().handleLabelsAndValues,IPt=M2().handleMarkerDefaults;QLe.exports=function(t,r,n,i){function a(_,b){return $Le.coerce(t,r,kPt,_,b)}var o=a(\"labels\"),s=a(\"values\"),l=PPt(o,s),u=l.len;if(r._hasLabels=l.hasLabels,r._hasValues=l.hasValues,!r._hasLabels&&r._hasValues&&(a(\"label0\"),a(\"dlabel\")),!u){r.visible=!1;return}r._length=u,IPt(t,r,i,a),a(\"scalegroup\");var c=a(\"text\"),f=a(\"texttemplate\");a(\"texttemplatefallback\");var h;if(f||(h=a(\"textinfo\",Array.isArray(c)?\"text+percent\":\"percent\")),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\"),f||h&&h!==\"none\"){var d=a(\"textposition\");LPt(t,r,i,a,d,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1})}else h===\"none\"&&a(\"textposition\",\"none\");CPt(r,i,a);var v=a(\"title.text\");v&&(a(\"title.position\"),$Le.coerceFont(a,\"title.font\",i.font)),a(\"aspectratio\"),a(\"baseratio\")}});var rPe=ye((F0r,tPe)=>{\"use strict\";var RPt=Dr(),DPt=fX();tPe.exports=function(t,r){function n(i,a){return RPt.coerce(t,r,DPt,i,a)}n(\"hiddenlabels\"),n(\"funnelareacolorway\",r.colorway),n(\"extendfunnelareacolors\")}});var hX=ye((z0r,nPe)=>{\"use strict\";var iPe=wA();function FPt(e,t){return iPe.calc(e,t)}function zPt(e){iPe.crossTraceCalc(e,{type:\"funnelarea\"})}nPe.exports={calc:FPt,crossTraceCalc:zPt}});var uPe=ye((O0r,lPe)=>{\"use strict\";var N2=Oa(),dX=So(),K_=Dr(),OPt=K_.strScale,aPe=K_.strTranslate,oPe=ru(),qPt=n2(),BPt=qPt.toMoveInsideBar,sPe=bv(),NPt=sPe.recordMinTextSize,UPt=sPe.clearMinTextSize,VPt=l_(),UA=yD(),GPt=UA.attachFxHandlers,HPt=UA.determineInsideTextFont,jPt=UA.layoutAreas,WPt=UA.prerenderTitles,XPt=UA.positionTitleOutside,ZPt=UA.formatSliceLabel;lPe.exports=function(t,r){var n=t._context.staticPlot,i=t._fullLayout;UPt(\"funnelarea\",i),WPt(r,t),jPt(r,i._size),K_.makeTraceGroups(i._funnelarealayer,r,\"trace\").each(function(a){var o=N2.select(this),s=a[0],l=s.trace;KPt(a),o.each(function(){var u=N2.select(this).selectAll(\"g.slice\").data(a);u.enter().append(\"g\").classed(\"slice\",!0),u.exit().remove(),u.each(function(f,h){if(f.hidden){N2.select(this).selectAll(\"path,g\").remove();return}f.pointNumber=f.i,f.curveNumber=l.index;var d=s.cx,v=s.cy,_=N2.select(this),b=_.selectAll(\"path.surface\").data([f]);b.enter().append(\"path\").classed(\"surface\",!0).style({\"pointer-events\":n?\"none\":\"all\"}),_.call(GPt,t,a);var p=\"M\"+(d+f.TR[0])+\",\"+(v+f.TR[1])+vX(f.TR,f.BR)+vX(f.BR,f.BL)+vX(f.BL,f.TL)+\"Z\";b.attr(\"d\",p),ZPt(t,f,s);var k=VPt.castOption(l.textposition,f.pts),E=_.selectAll(\"g.slicetext\").data(f.text&&k!==\"none\"?[0]:[]);E.enter().append(\"g\").classed(\"slicetext\",!0),E.exit().remove(),E.each(function(){var S=K_.ensureSingle(N2.select(this),\"text\",\"\",function(z){z.attr(\"data-notex\",1)}),L=K_.ensureUniformFontSize(t,HPt(l,f,i.font));S.text(f.text).attr({class:\"slicetext\",transform:\"\",\"text-anchor\":\"middle\"}).call(dX.font,L).call(oPe.convertToTspans,t);var x=dX.bBox(S.node()),C,M,g,P=Math.min(f.BL[1],f.BR[1])+v,T=Math.max(f.TL[1],f.TR[1])+v;M=Math.max(f.TL[0],f.BL[0])+d,g=Math.min(f.TR[0],f.BR[0])+d,C=BPt(M,g,P,T,x,{isHorizontal:!0,constrained:!0,angle:0,anchor:\"middle\"}),C.fontSize=L.size,NPt(l.type,C,i),a[h].transform=C,K_.setTransormAndDisplay(S,C)})});var c=N2.select(this).selectAll(\"g.titletext\").data(l.title.text?[0]:[]);c.enter().append(\"g\").classed(\"titletext\",!0),c.exit().remove(),c.each(function(){var f=K_.ensureSingle(N2.select(this),\"text\",\"\",function(v){v.attr(\"data-notex\",1)}),h=l.title.text;l._meta&&(h=K_.templateString(h,l._meta)),f.text(h).attr({class:\"titletext\",transform:\"\",\"text-anchor\":\"middle\"}).call(dX.font,l.title.font).call(oPe.convertToTspans,t);var d=XPt(s,i._size);f.attr(\"transform\",aPe(d.x,d.y)+OPt(Math.min(1,d.scale))+aPe(d.tx,d.ty))})})})};function vX(e,t){var r=t[0]-e[0],n=t[1]-e[1];return\"l\"+r+\",\"+n}function YPt(e,t){return[.5*(e[0]+t[0]),.5*(e[1]+t[1])]}function KPt(e){if(!e.length)return;var t=e[0],r=t.trace,n=r.aspectratio,i=r.baseratio;i>.999&&(i=.999);var a=Math.pow(i,2),o=t.vTotal,s=o*a/(1-a),l=o,u=s/o;function c(){var O=Math.sqrt(u);return{x:O,y:-O}}function f(){var O=c();return[O.x,O.y]}var h,d=[];d.push(f());var v,_;for(v=e.length-1;v>-1;v--)if(_=e[v],!_.hidden){var b=_.v/l;u+=b,d.push(f())}var p=1/0,k=-1/0;for(v=0;v-1;v--)if(_=e[v],!_.hidden){P+=1;var T=d[P][0],z=d[P][1];_.TL=[-T,z],_.TR=[T,z],_.BL=M,_.BR=g,_.pxmid=YPt(_.TR,_.BR),M=_.TL,g=_.TR}}});var hPe=ye((q0r,fPe)=>{\"use strict\";var cPe=Oa(),JPt=q3(),$Pt=bv().resizeText;fPe.exports=function(t){var r=t._fullLayout._funnelarealayer.selectAll(\".trace\");$Pt(t,r,\"funnelarea\"),r.each(function(n){var i=n[0],a=i.trace,o=cPe.select(this);o.style({opacity:a.opacity}),o.selectAll(\"path.surface\").each(function(s){cPe.select(this).call(JPt,s,a,t)})})}});var vPe=ye((B0r,dPe)=>{\"use strict\";dPe.exports={moduleType:\"trace\",name:\"funnelarea\",basePlotModule:ZLe(),categories:[\"pie-like\",\"funnelarea\",\"showLegend\"],attributes:cX(),layoutAttributes:fX(),supplyDefaults:ePe(),supplyLayoutDefaults:rPe(),calc:hX().calc,crossTraceCalc:hX().crossTraceCalc,plot:uPe(),style:hPe(),styleOne:q3(),meta:{}}});var gPe=ye((N0r,pPe)=>{\"use strict\";pPe.exports=vPe()});var Od=ye((U0r,mPe)=>{(function(){var e={24:function(i){var a={left:0,top:0};i.exports=o;function o(l,u,c){u=u||l.currentTarget||l.srcElement,Array.isArray(c)||(c=[0,0]);var f=l.clientX||0,h=l.clientY||0,d=s(u);return c[0]=f-d.left,c[1]=h-d.top,c}function s(l){return l===window||l===document||l===document.body?a:l.getBoundingClientRect()}},109:function(i){i.exports=a;function a(o,s,l,u){var c=l[0],f=l[2],h=s[0]-c,d=s[2]-f,v=Math.sin(u),_=Math.cos(u);return o[0]=c+d*v+h*_,o[1]=s[1],o[2]=f+d*_-h*v,o}},160:function(i){i.exports=a;function a(o,s,l){return o[0]=Math.max(s[0],l[0]),o[1]=Math.max(s[1],l[1]),o[2]=Math.max(s[2],l[2]),o[3]=Math.max(s[3],l[3]),o}},216:function(i){\"use strict\";i.exports=a;function a(o,s){for(var l={},u=0;u1){v[0]in h||(h[v[0]]=[]),h=h[v[0]];for(var _=1;_=0;--N){var Se=Z[N];j=Se[0];var Le=V[j],Ae=Le[0],Fe=Le[1],Pe=O[Ae],ge=O[Fe];if((Pe[0]-ge[0]||Pe[1]-ge[1])<0){var Re=Ae;Ae=Fe,Fe=Re}Le[0]=Ae;var ce=Le[1]=Se[1],Ze;for(H&&(Ze=Le[2]);N>0&&Z[N-1][0]===j;){var Se=Z[--N],ut=Se[1];H?V.push([ce,ut,Ze]):V.push([ce,ut]),ce=ut}H?V.push([ce,Fe,Ze]):V.push([ce,Fe])}return re}function x(O,V,G){for(var Z=V.length,H=new s(Z),N=[],j=0;jV[2]?1:0)}function g(O,V,G){if(O.length!==0){if(V)for(var Z=0;Z0||j.length>0}function z(O,V,G){var Z;if(G){Z=V;for(var H=new Array(V.length),N=0;N
" ] }, - "metadata": {}, - "output_type": "display_data", "jetTransient": { "display_id": null - } + }, + "metadata": {}, + "output_type": "display_data" } ], - "execution_count": 4 + "source": [ + "# Visualize first two weeks of data\n", + "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", + "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", + "\n", + "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", + "\n", + "fig.add_trace(go.Scatter(x=timesteps[:1344], y=heat_demand.values[:1344], name='Heat Demand'), row=1, col=1)\n", + "fig.add_trace(go.Scatter(x=timesteps[:1344], y=electricity_price.values[:1344], name='Electricity Price'), row=2, col=1)\n", + "\n", + "fig.update_layout(height=400, title='First Two Weeks of Data')\n", + "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", + "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", + "fig.show()" + ] }, { "cell_type": "markdown", @@ -4038,28 +4055,14 @@ }, { "cell_type": "code", + "execution_count": 4, "id": "9", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:09.916554Z", - "start_time": "2025-12-14T15:23:08.633006Z" + "end_time": "2025-12-14T15:30:55.105224Z", + "start_time": "2025-12-14T15:30:52.805969Z" } }, - "source": [ - "# Cluster with 8 typical days (from 31 days)\n", - "fs_clustering_demo = flow_system.copy()\n", - "fs_clustered_demo = fs_clustering_demo.transform.cluster(n_clusters=8, cluster_duration='1D')\n", - "\n", - "# Get the clustering object to access tsam results\n", - "clustering = fs_clustered_demo._clustering_info['clustering']\n", - "\n", - "print(f'Original: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) / 96:.0f} days)')\n", - "print(f'Clustered: {clustering.nr_of_periods} typical days')\n", - "print(f'Cluster assignments: {list(clustering.tsam.clusterOrder)}')\n", - "\n", - "# Plot original vs aggregated data\n", - "clustering.plot()" - ], "outputs": [ { "name": "stdout", @@ -4067,11 +4070,15 @@ "text": [ "Original: 2976 timesteps (31 days)\n", "Clustered: 8 typical days\n", - "Cluster assignments: [np.int32(3), np.int32(7), np.int32(5), np.int32(2), np.int32(1), np.int32(4), np.int32(7), np.int32(7), np.int32(7), np.int32(5), np.int32(2), np.int32(1), np.int32(4), np.int32(7), np.int32(7), np.int32(0), np.int32(5), np.int32(2), np.int32(1), np.int32(4), np.int32(0), np.int32(0), np.int32(0), np.int32(5), np.int32(2), np.int32(1), np.int32(4), np.int32(0), np.int32(0), np.int32(0), np.int32(6)]\n" + "Cluster assignments: [np.int32(2), np.int32(6), np.int32(7), np.int32(3), np.int32(0), np.int32(4), np.int32(6), np.int32(6), np.int32(6), np.int32(7), np.int32(3), np.int32(0), np.int32(4), np.int32(6), np.int32(6), np.int32(1), np.int32(7), np.int32(3), np.int32(0), np.int32(4), np.int32(1), np.int32(1), np.int32(1), np.int32(7), np.int32(3), np.int32(0), np.int32(4), np.int32(1), np.int32(1), np.int32(1), np.int32(5)]\n" ] }, { "data": { + "text/html": [ + "
\n", + "
" + ], "text/plain": [ "PlotResult(data= Size: 262kB\n", "Dimensions: (time: 2976, variable: 5)\n", @@ -4248,18 +4255,28 @@ " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", "}))" - ], - "text/html": [ - "
\n", - "
" ] }, - "execution_count": 5, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], - "execution_count": 5 + "source": [ + "# Cluster with 8 typical days (from 31 days)\n", + "fs_clustering_demo = flow_system.copy()\n", + "fs_clustered_demo = fs_clustering_demo.transform.cluster(n_clusters=8, cluster_duration='1D')\n", + "\n", + "# Get the clustering object to access tsam results\n", + "clustering = fs_clustered_demo._clustering_info['clustering']\n", + "\n", + "print(f'Original: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) / 96:.0f} days)')\n", + "print(f'Clustered: {clustering.nr_of_periods} typical days')\n", + "print(f'Cluster assignments: {list(clustering.tsam.clusterOrder)}')\n", + "\n", + "# Plot original vs aggregated data\n", + "clustering.plot()" + ] }, { "cell_type": "markdown", @@ -4269,13 +4286,23 @@ }, { "cell_type": "code", + "execution_count": 5, "id": "11", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:12.938789Z", - "start_time": "2025-12-14T15:23:10.060235Z" + "end_time": "2025-12-14T15:30:57.850628Z", + "start_time": "2025-12-14T15:30:55.260086Z" } }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" + ] + } + ], "source": [ "# Test different numbers of clusters\n", "cluster_configs = [4, 8, 12, 16]\n", @@ -4289,27 +4316,56 @@ "# Use heat demand for comparison (most relevant for district heating)\n", "heat_demand_col = [c for c in clustering_results[4].original_data.columns if 'Heat' in c or 'Q_th' in c][0]\n", "print(f'Comparing: {heat_demand_col}')" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" - ] - } - ], - "execution_count": 6 + ] }, { "cell_type": "code", + "execution_count": 6, "id": "12", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:13.151753Z", - "start_time": "2025-12-14T15:23:13.034772Z" + "end_time": "2025-12-14T15:30:57.963460Z", + "start_time": "2025-12-14T15:30:57.895913Z" } }, + "outputs": [ + { + "data": { + "text/html": [ + "
" + ] + }, + "jetTransient": { + "display_id": null + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Compare the aggregated data for each configuration\n", "fig = make_subplots(\n", @@ -4361,105 +4417,32 @@ "fig.update_xaxes(title_text='Timestep', row=2)\n", "fig.update_yaxes(title_text='Heat Demand [MW]', col=1)\n", "fig.show()" - ], - "outputs": [ - { - "data": { - "text/html": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data", - "jetTransient": { - "display_id": null - } - } - ], - "execution_count": 7 + ] }, { "cell_type": "code", + "execution_count": 7, "id": "13", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:13.465042Z", - "start_time": "2025-12-14T15:23:13.306250Z" + "end_time": "2025-12-14T15:30:58.102726Z", + "start_time": "2025-12-14T15:30:58.030163Z" } }, - "source": [ - "# Calculate error metrics for each configuration\n", - "metrics = []\n", - "for n, clustering in clustering_results.items():\n", - " original = clustering.original_data[heat_demand_col].values\n", - " aggregated = clustering.aggregated_data[heat_demand_col].values\n", - "\n", - " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", - " mae = np.mean(np.abs(original - aggregated))\n", - " max_error = np.max(np.abs(original - aggregated))\n", - " correlation = np.corrcoef(original, aggregated)[0, 1]\n", - "\n", - " metrics.append(\n", - " {\n", - " 'Typical Days': n,\n", - " 'RMSE': rmse,\n", - " 'MAE': mae,\n", - " 'Max Error': max_error,\n", - " 'Correlation': correlation,\n", - " }\n", - " )\n", - "\n", - "metrics_df = pd.DataFrame(metrics).set_index('Typical Days')\n", - "metrics_df.style.format(\n", - " {\n", - " 'RMSE': '{:.2f}',\n", - " 'MAE': '{:.2f}',\n", - " 'Max Error': '{:.2f}',\n", - " 'Correlation': '{:.4f}',\n", - " }\n", - ")" - ], "outputs": [ { "data": { - "text/plain": [ - "" - ], "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -4471,276 +4454,113 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 RMSEMAEMax ErrorCorrelationRMSEMAEMax ErrorCorrelation
Typical Days
44.844.5212.190.990544.844.5212.190.9905
83.452.606.890.995283.452.606.890.9952
121.680.836.390.9989121.680.836.390.9989
160.370.251.860.9999160.370.251.860.9999
\n" + ], + "text/plain": [ + "" ] }, - "execution_count": 8, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], - "execution_count": 8 - }, - { - "cell_type": "markdown", - "id": "t8et37i26k", - "metadata": {}, - "source": "## Part 2: Segmentation (Inner-Period Aggregation)\n\n**Segmentation** reduces the number of timesteps *within* each period by grouping similar consecutive timesteps.\n\nFor example, with 15-minute resolution data:\n- Original day: 96 timesteps (24h × 4 per hour)\n- Segmented (4 segments): 4 representative timesteps per day\n\nThis is useful when you have high-resolution data but don't need that granularity for your analysis.\n\n```python\nfs.transform.cluster(\n n_clusters=None, # Skip clustering (keep all periods)\n cluster_duration='1D', # Segment within each day\n n_segments=4, # Reduce to 4 segments per day\n)\n```" - }, - { - "cell_type": "code", - "id": "lbpmw6mnb5k", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:23:14.883574Z", - "start_time": "2025-12-14T15:23:13.565649Z" - } - }, "source": [ - "# Segmentation only: reduce 96 timesteps/day to 4 segments/day\n", - "fs_segmentation_demo = flow_system.copy()\n", - "fs_segmented_demo = fs_segmentation_demo.transform.cluster(\n", - " n_clusters=None, # No clustering - keep all 31 days\n", - " cluster_duration='1D', # Segment within each day\n", - " n_segments=4, # 4 segments per day\n", - ")\n", + "# Calculate error metrics for each configuration\n", + "metrics = []\n", + "for n, clustering in clustering_results.items():\n", + " original = clustering.original_data[heat_demand_col].values\n", + " aggregated = clustering.aggregated_data[heat_demand_col].values\n", "\n", - "# Get the clustering object\n", + " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", + " mae = np.mean(np.abs(original - aggregated))\n", + " max_error = np.max(np.abs(original - aggregated))\n", + " correlation = np.corrcoef(original, aggregated)[0, 1]\n", + "\n", + " metrics.append(\n", + " {\n", + " 'Typical Days': n,\n", + " 'RMSE': rmse,\n", + " 'MAE': mae,\n", + " 'Max Error': max_error,\n", + " 'Correlation': correlation,\n", + " }\n", + " )\n", + "\n", + "metrics_df = pd.DataFrame(metrics).set_index('Typical Days')\n", + "metrics_df.style.format(\n", + " {\n", + " 'RMSE': '{:.2f}',\n", + " 'MAE': '{:.2f}',\n", + " 'Max Error': '{:.2f}',\n", + " 'Correlation': '{:.4f}',\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "t8et37i26k", + "metadata": {}, + "source": "## Part 2: Segmentation (Inner-Period Aggregation)\n\n**Segmentation** reduces the number of timesteps *within* each period by grouping similar consecutive timesteps.\n\nFor example, with 15-minute resolution data:\n- Original day: 96 timesteps (24h × 4 per hour)\n- Segmented (12 segments): 12 representative timesteps per day (~2 hours each)\n\nThis is useful when you have high-resolution data but don't need that granularity for your analysis.\n\n```python\nfs.transform.cluster(\n n_clusters=None, # Skip clustering (keep all periods)\n cluster_duration='1D', # Segment within each day\n n_segments=12, # Reduce to 12 segments per day\n)\n```" + }, + { + "cell_type": "code", + "execution_count": null, + "id": "lbpmw6mnb5k", + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:30:59.400877Z", + "start_time": "2025-12-14T15:30:58.128623Z" + } + }, + "outputs": [], + "source": [ + "# Segmentation only: reduce 96 timesteps/day to 12 segments/day\n", + "fs_segmentation_demo = flow_system.copy()\n", + "fs_segmented_demo = fs_segmentation_demo.transform.cluster(\n", + " n_clusters=None, # No clustering - keep all 31 days\n", + " cluster_duration='1D', # Segment within each day\n", + " n_segments=12, # 12 segments per day (~2 hours each)\n", + ")\n", + "\n", + "# Get the clustering object\n", "segmentation = fs_segmented_demo._clustering_info['clustering']\n", "\n", "print('Original: 96 timesteps per day (15-min resolution)')\n", - "print(f'Segmented: {segmentation.n_segments} segments per day')\n", + "print(f'Segmented: {segmentation.n_segments} segments per day (~2 hours each)')\n", "\n", "# Plot original vs segmented data\n", "segmentation.plot()" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Original: 96 timesteps per day (15-min resolution)\n", - "Segmented: 4 segments per day\n" - ] - }, - { - "data": { - "text/plain": [ - "PlotResult(data= Size: 262kB\n", - "Dimensions: (time: 2976, variable: 5)\n", - "Coordinates:\n", - " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", - " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", - "Data variables:\n", - " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", - " aggregated (variable, time) float64 119kB 51.79 51.79 51.79 ... 163.1 163.1, figure=Figure({\n", - " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('ERERERHlSUAREREREeVJQBERERER5U' ... 'mZmZlhVUCZmZmZmWFVQJmZmZmZYVVA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('IUX9G6GMrj8hRf0boYyuPyFF/RuhjK' ... 'Olm8TISEBV46WbxMhIQFXjpZvEyEhA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('zitA7jUX7j/OK0DuNRfuP84rQO41F+' ... 'Olm8RISMBU46WbxEhIwFTjpZvESEjA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('zRP1bwQ9YkDNE/VvBD1iQM0T9W8EPW' ... 'qhRbZkZEDAyqFFtmRkQMDKoUW2ZGRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'}],\n", - " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", - " 'margin': {'t': 60},\n", - " 'template': '...',\n", - " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", - " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", - " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", - "}))" - ], - "text/html": [ - "
\n", - "
" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "execution_count": 9 + ] }, { "cell_type": "markdown", @@ -4750,16 +4570,18 @@ }, { "cell_type": "code", + "execution_count": null, "id": "do29lhcinx7", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:18.123816Z", - "start_time": "2025-12-14T15:23:15.186456Z" + "end_time": "2025-12-14T15:31:06.393171Z", + "start_time": "2025-12-14T15:31:00.010062Z" } }, + "outputs": [], "source": [ "# Test different numbers of segments\n", - "segment_configs = [4, 8, 12, 24]\n", + "segment_configs = [6, 12, 24, 48]\n", "segmentation_results = {}\n", "\n", "for n_seg in segment_configs:\n", @@ -4768,29 +4590,21 @@ " segmentation_results[n_seg] = fs_seg._clustering_info['clustering']\n", "\n", "# Use heat demand for comparison\n", - "heat_demand_col = [c for c in segmentation_results[4].original_data.columns if 'Heat' in c or 'Q_th' in c][0]\n", + "heat_demand_col = [c for c in segmentation_results[6].original_data.columns if 'Heat' in c or 'Q_th' in c][0]\n", "print(f'Comparing: {heat_demand_col}')" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" - ] - } - ], - "execution_count": 10 + ] }, { "cell_type": "code", + "execution_count": null, "id": "21athrtuavw", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:18.237902Z", - "start_time": "2025-12-14T15:23:18.213686Z" + "end_time": "2025-12-14T15:31:07.706095Z", + "start_time": "2025-12-14T15:31:07.626274Z" } }, + "outputs": [], "source": [ "# Compare the segmented data for first day only (clearer visualization)\n", "fig = make_subplots(\n", @@ -4845,105 +4659,32 @@ "fig.update_xaxes(title_text='Timestep', row=2)\n", "fig.update_yaxes(title_text='Heat Demand [MW]', col=1)\n", "fig.show()" - ], - "outputs": [ - { - "data": { - "text/html": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data", - "jetTransient": { - "display_id": null - } - } - ], - "execution_count": 11 + ] }, { "cell_type": "code", + "execution_count": 11, "id": "phpx36k23p", "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:23:18.572642Z", - "start_time": "2025-12-14T15:23:18.550552Z" - } - }, - "source": [ - "# Calculate error metrics for segmentation\n", - "seg_metrics = []\n", - "for n_seg, seg_result in segmentation_results.items():\n", - " original = seg_result.original_data[heat_demand_col].values\n", - " aggregated = seg_result.aggregated_data[heat_demand_col].values\n", - "\n", - " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", - " mae = np.mean(np.abs(original - aggregated))\n", - " max_error = np.max(np.abs(original - aggregated))\n", - " correlation = np.corrcoef(original, aggregated)[0, 1]\n", - "\n", - " seg_metrics.append(\n", - " {\n", - " 'Segments': n_seg,\n", - " 'RMSE': rmse,\n", - " 'MAE': mae,\n", - " 'Max Error': max_error,\n", - " 'Correlation': correlation,\n", - " }\n", - " )\n", - "\n", - "seg_metrics_df = pd.DataFrame(seg_metrics).set_index('Segments')\n", - "seg_metrics_df.style.format(\n", - " {\n", - " 'RMSE': '{:.2f}',\n", - " 'MAE': '{:.2f}',\n", - " 'Max Error': '{:.2f}',\n", - " 'Correlation': '{:.4f}',\n", - " }\n", - ")" - ], + "ExecuteTime": { + "end_time": "2025-12-14T15:31:08.542772Z", + "start_time": "2025-12-14T15:31:08.441233Z" + } + }, "outputs": [ { "data": { - "text/plain": [ - "" - ], "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -4955,66 +4696,102 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 RMSEMAEMax ErrorCorrelationRMSEMAEMax ErrorCorrelation
Segments
415.6712.3443.150.8954415.6712.3443.150.8954
88.246.4935.360.972288.246.4935.360.9722
125.894.5323.950.9859125.894.5323.950.9859
242.732.1211.380.9970242.732.1211.380.9970
\n" + ], + "text/plain": [ + "" ] }, - "execution_count": 12, + "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], - "execution_count": 12 + "source": [ + "# Calculate error metrics for segmentation\n", + "seg_metrics = []\n", + "for n_seg, seg_result in segmentation_results.items():\n", + " original = seg_result.original_data[heat_demand_col].values\n", + " aggregated = seg_result.aggregated_data[heat_demand_col].values\n", + "\n", + " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", + " mae = np.mean(np.abs(original - aggregated))\n", + " max_error = np.max(np.abs(original - aggregated))\n", + " correlation = np.corrcoef(original, aggregated)[0, 1]\n", + "\n", + " seg_metrics.append(\n", + " {\n", + " 'Segments': n_seg,\n", + " 'RMSE': rmse,\n", + " 'MAE': mae,\n", + " 'Max Error': max_error,\n", + " 'Correlation': correlation,\n", + " }\n", + " )\n", + "\n", + "seg_metrics_df = pd.DataFrame(seg_metrics).set_index('Segments')\n", + "seg_metrics_df.style.format(\n", + " {\n", + " 'RMSE': '{:.2f}',\n", + " 'MAE': '{:.2f}',\n", + " 'Max Error': '{:.2f}',\n", + " 'Correlation': '{:.4f}',\n", + " }\n", + ")" + ] }, { "cell_type": "markdown", "id": "u6sc5ek0rya", "metadata": {}, - "source": "## Part 3: Combined Clustering + Segmentation\n\nFor maximum speedup, combine both techniques:\n\n```python\nfs.transform.cluster(\n n_clusters=8, # 8 typical days (inter-period)\n cluster_duration='1D',\n n_segments=4, # 4 segments per day (inner-period)\n)\n```\n\nThis reduces 2,976 timesteps to just 8 × 4 = 32 representative timesteps!" + "source": "## Part 3: Combined Clustering + Segmentation\n\nFor maximum speedup, combine both techniques:\n\n```python\nfs.transform.cluster(\n n_clusters=8, # 8 typical days (inter-period)\n cluster_duration='1D',\n n_segments=12, # 12 segments per day (inner-period)\n)\n```\n\nThis reduces 2,976 timesteps to just 8 × 12 = 96 representative timesteps!" }, { "cell_type": "code", + "execution_count": null, "id": "j24sbfpl0x", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:19.470745Z", - "start_time": "2025-12-14T15:23:18.603866Z" + "end_time": "2025-12-14T15:31:10.678388Z", + "start_time": "2025-12-14T15:31:09.101883Z" } }, + "outputs": [], "source": [ - "# Combined: 8 typical days × 4 segments each\n", + "# Combined: 8 typical days × 12 segments each\n", "fs_combined_demo = flow_system.copy()\n", "fs_combined = fs_combined_demo.transform.cluster(\n", " n_clusters=8,\n", " cluster_duration='1D',\n", - " n_segments=4,\n", + " n_segments=12,\n", ")\n", "\n", "combined_clustering = fs_combined._clustering_info['clustering']\n", @@ -5026,206 +4803,7 @@ "\n", "# Plot the combined result\n", "combined_clustering.plot()" - ], - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Original: 2976 timesteps\n", - "Combined: 8 typical days × 4 segments = 32 representative timesteps\n" - ] - }, - { - "data": { - "text/plain": [ - "PlotResult(data= Size: 262kB\n", - "Dimensions: (time: 2976, variable: 5)\n", - "Coordinates:\n", - " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", - " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", - "Data variables:\n", - " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", - " aggregated (variable, time) float64 119kB 51.79 51.79 51.79 ... 163.1 163.1, figure=Figure({\n", - " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('ERERERHlSUAREREREeVJQBERERER5U' ... 'mZmZlhVUCZmZmZmWFVQJmZmZmZYVVA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('IUX9G6GMrj8hRf0boYyuPyFF/RuhjK' ... 'Olm8TISEBV46WbxMhIQFXjpZvEyEhA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('zitA7jUX7j/OK0DuNRfuP84rQO41F+' ... 'Olm8RISMBU46WbxEhIwFTjpZvESEjA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('zRP1bwQ9YkDNE/VvBD1iQM0T9W8EPW' ... 'qhRbZkZEDAyqFFtmRkQMDKoUW2ZGRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'}],\n", - " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", - " 'margin': {'t': 60},\n", - " 'template': '...',\n", - " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", - " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", - " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", - "}))" - ], - "text/html": [ - "
\n", - "
" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "execution_count": 13 + ] }, { "cell_type": "markdown", @@ -5237,9 +4815,11 @@ "cell_type": "code", "id": "15", "metadata": { + "jupyter": { + "is_executing": true + }, "ExecuteTime": { - "end_time": "2025-12-14T15:23:35.046838Z", - "start_time": "2025-12-14T15:23:19.572483Z" + "start_time": "2025-12-14T15:34:32.897171Z" } }, "source": [ @@ -5261,23 +4841,23 @@ "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m2025-12-14 16:23:19.851\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001b[2m2025-12-14 16:23:19.935\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001B[2m2025-12-14 16:34:33.058\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001B[2m2025-12-14 16:34:33.146\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 64/64 [00:00<00:00, 160.20it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 1037.01it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 1103.71it/s]\n" + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 64/64 [00:00<00:00, 157.71it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 1053.62it/s]\n", + "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 886.33it/s]\n" ] }, { @@ -5285,7 +4865,7 @@ "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-56t2tceu has 89316 rows; 80386 cols; 264919 nonzeros; 5955 integer variables (5955 binary)\n", + "MIP linopy-problem-fnex2i30 has 89316 rows; 80386 cols; 264919 nonzeros; 5955 integer variables (5955 binary)\n", "Coefficient ranges:\n", " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", @@ -5299,55 +4879,25 @@ "\n", "Solving MIP model with:\n", " 30836 rows\n", - " 17685 cols (5955 binary, 0 integer, 0 implied int., 11730 continuous, 0 domain fixed)\n", - " 89182 nonzeros\n", - "\n", - "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", - " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", - " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", - " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", - "\n", - " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", - "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", - "\n", - " 0 0 0 0.00% -48251946.82856 inf inf 0 0 0 0 0.4s\n", - " R 0 0 0 0.00% 2209206.133553 2278967.860722 3.06% 0 0 0 15439 1.1s\n", - " C 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7380 2937 0 18513 3.5s\n", - " 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7578 2988 0 18630 8.6s\n", - " L 0 0 0 0.00% 2209206.133553 2209206.150262 0.00% 7578 2989 0 18631 13.4s\n", - " 1 0 1 100.00% 2209206.133553 2209206.150262 0.00% 7578 2989 0 21605 13.5s\n", - "\n", - "Solving report\n", - " Model linopy-problem-56t2tceu\n", - " Status Optimal\n", - " Primal bound 2209206.15026\n", - " Dual bound 2209206.13355\n", - " Gap 0% (tolerance: 1%)\n", - " P-D integral 0.366913523912\n", - " Solution status feasible\n", - " 2209206.15026 (objective)\n", - " 0 (bound viol.)\n", - " 0 (int. viol.)\n", - " 0 (row viol.)\n", - " Timing 13.45\n", - " Max sub-MIP depth 2\n", - " Nodes 1\n", - " Repair LPs 0\n", - " LP iterations 21605\n", - " 0 (strong br.)\n", - " 3192 (separation)\n", - " 2974 (heuristics)\n", - "Full optimization: 15.47 seconds\n", - "Cost: 2,209,206 €\n", + " 17685 cols (5955 binary, 0 integer, 0 implied int., 11730 continuous, 0 domain fixed)\n", + " 89182 nonzeros\n", "\n", - "Optimized sizes:\n", - " CHP(Q_th): 300.0\n", - " Boiler(Q_th): 0.0\n", - " Storage: 1000.0\n" + "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", + " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", + " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", + " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", + "\n", + " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", + "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", + "\n", + " 0 0 0 0.00% -48251946.82856 inf inf 0 0 0 0 0.8s\n", + " R 0 0 0 0.00% 2209206.133553 2278967.860722 3.06% 0 0 0 15439 2.4s\n", + " C 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7380 2937 0 18513 5.0s\n", + " 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7578 2989 0 18631 10.0s\n" ] } ], - "execution_count": 14 + "execution_count": null }, { "cell_type": "markdown", @@ -5357,54 +4907,36 @@ }, { "cell_type": "code", + "execution_count": 14, "id": "17", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:38.482737Z", - "start_time": "2025-12-14T15:23:35.095109Z" + "end_time": "2025-12-14T15:31:34.009387Z", + "start_time": "2025-12-14T15:31:30.196907Z" } }, - "source": [ - "start = timeit.default_timer()\n", - "\n", - "# Cluster into 8 typical days\n", - "fs_clustered = flow_system.transform.cluster(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - ")\n", - "\n", - "fs_clustered.optimize(solver)\n", - "time_clustered = timeit.default_timer() - start\n", - "\n", - "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", - "print(f'Cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", - "print(f'Speedup: {time_full / time_clustered:.1f}x')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_clustered.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m2025-12-14 16:23:35.770\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001b[2m2025-12-14 16:23:35.843\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001B[2m2025-12-14 16:31:30.931\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001B[2m2025-12-14 16:31:31.015\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 81/81 [00:00<00:00, 190.84it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 830.91it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 1081.84it/s]\n" + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 81/81 [00:00<00:00, 131.46it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 956.49it/s]\n", + "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 747.94it/s]\n" ] }, { @@ -5412,7 +4944,7 @@ "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-7bbt94cv has 126461 rows; 80386 cols; 339209 nonzeros; 5955 integer variables (5955 binary)\n", + "MIP linopy-problem-gj5jp5dp has 126461 rows; 80386 cols; 339209 nonzeros; 5955 integer variables (5955 binary)\n", "Coefficient ranges:\n", " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", @@ -5437,24 +4969,24 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% -35212528.89731 inf inf 0 0 0 0 0.2s\n", - " 0 0 0 0.00% 2215408.582854 inf inf 0 0 0 3609 0.3s\n", - " R 0 0 0 0.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 0.7s\n", - " 1 0 1 100.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 0.8s\n", + " 0 0 0 0.00% -35212528.89731 inf inf 0 0 0 0 0.3s\n", + " 0 0 0 0.00% 2215408.582854 inf inf 0 0 0 3609 0.4s\n", + " R 0 0 0 0.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 0.9s\n", + " 1 0 1 100.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 0.9s\n", "\n", "Solving report\n", - " Model linopy-problem-7bbt94cv\n", + " Model linopy-problem-gj5jp5dp\n", " Status Optimal\n", " Primal bound 2215424.33152\n", " Dual bound 2215408.58285\n", " Gap 0.000711% (tolerance: 1%)\n", - " P-D integral 3.17706101743e-08\n", + " P-D integral 7.89234528479e-08\n", " Solution status feasible\n", " 2215424.33152 (objective)\n", " 0 (bound viol.)\n", " 0 (int. viol.)\n", " 0 (row viol.)\n", - " Timing 0.75\n", + " Timing 0.91\n", " Max sub-MIP depth 0\n", " Nodes 1\n", " Repair LPs 0\n", @@ -5462,9 +4994,9 @@ " 0 (strong br.)\n", " 769 (separation)\n", " 0 (heuristics)\n", - "Clustered optimization: 3.38 seconds\n", + "Clustered optimization: 3.81 seconds\n", "Cost: 2,215,424 €\n", - "Speedup: 4.6x\n", + "Speedup: 4.9x\n", "\n", "Optimized sizes:\n", " CHP(Q_th): 300.0\n", @@ -5473,65 +5005,64 @@ ] } ], - "execution_count": 15 + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# Cluster into 8 typical days\n", + "fs_clustered = flow_system.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + ")\n", + "\n", + "fs_clustered.optimize(solver)\n", + "time_clustered = timeit.default_timer() - start\n", + "\n", + "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", + "print(f'Cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "print(f'Speedup: {time_full / time_clustered:.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_clustered.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] }, { "cell_type": "markdown", "id": "qk9l29yv32p", "metadata": {}, - "source": "### Segmentation Only (4 Segments per Day)" + "source": "### Segmentation Only (12 Segments per Day)" }, { "cell_type": "code", + "execution_count": 15, "id": "puisldf6fa", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:41.658320Z", - "start_time": "2025-12-14T15:23:38.516100Z" + "end_time": "2025-12-14T15:31:37.427466Z", + "start_time": "2025-12-14T15:31:34.040817Z" } }, - "source": [ - "start = timeit.default_timer()\n", - "\n", - "# Segmentation only: reduce timesteps within each day\n", - "fs_segmented = flow_system.transform.cluster(\n", - " n_clusters=None, # No clustering\n", - " cluster_duration='1D',\n", - " n_segments=4, # 4 segments per day\n", - ")\n", - "\n", - "fs_segmented.optimize(solver)\n", - "time_segmented = timeit.default_timer() - start\n", - "\n", - "print(f'Segmentation optimization: {time_segmented:.2f} seconds')\n", - "print(f'Cost: {fs_segmented.solution[\"costs\"].item():,.0f} €')\n", - "print(f'Speedup: {time_full / time_segmented:.1f}x')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_segmented.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m2025-12-14 16:23:39.304\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001b[2m2025-12-14 16:23:39.395\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001B[2m2025-12-14 16:31:34.760\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001B[2m2025-12-14 16:31:34.852\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 81/81 [00:00<00:00, 182.90it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 756.45it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 900.41it/s]\n" + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 81/81 [00:00<00:00, 117.21it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 858.30it/s]\n", + "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 993.25it/s]\n" ] }, { @@ -5539,7 +5070,7 @@ "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-0bf83fdn has 137800 rows; 80386 cols; 361887 nonzeros; 5955 integer variables (5955 binary)\n", + "MIP linopy-problem-bjm6577n has 137800 rows; 80386 cols; 361887 nonzeros; 5955 integer variables (5955 binary)\n", "Coefficient ranges:\n", " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", @@ -5571,18 +5102,18 @@ " 1 0 1 100.00% 2407140.32574 2407140.32574 0.00% 0 0 0 18 0.2s\n", "\n", "Solving report\n", - " Model linopy-problem-0bf83fdn\n", + " Model linopy-problem-bjm6577n\n", " Status Optimal\n", " Primal bound 2407140.32574\n", " Dual bound 2407140.32574\n", " Gap 0% (tolerance: 1%)\n", - " P-D integral 0.00398780947211\n", + " P-D integral 0.00409873540413\n", " Solution status feasible\n", " 2407140.32574 (objective)\n", " 0 (bound viol.)\n", " 0 (int. viol.)\n", " 0 (row viol.)\n", - " Timing 0.19\n", + " Timing 0.17\n", " Max sub-MIP depth 0\n", " Nodes 1\n", " Repair LPs 0\n", @@ -5590,9 +5121,9 @@ " 0 (strong br.)\n", " 0 (separation)\n", " 0 (heuristics)\n", - "Segmentation optimization: 3.14 seconds\n", + "Segmentation optimization: 3.38 seconds\n", "Cost: 2,407,140 €\n", - "Speedup: 4.9x\n", + "Speedup: 5.5x\n", "\n", "Optimized sizes:\n", " CHP(Q_th): 248.4\n", @@ -5601,7 +5132,26 @@ ] } ], - "execution_count": 16 + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# Segmentation only: reduce timesteps within each day\n", + "fs_segmented = flow_system.transform.cluster(\n", + " n_clusters=None, # No clustering\n", + " cluster_duration='1D',\n", + " n_segments=4, # 4 segments per day\n", + ")\n", + "\n", + "fs_segmented.optimize(solver)\n", + "time_segmented = timeit.default_timer() - start\n", + "\n", + "print(f'Segmentation optimization: {time_segmented:.2f} seconds')\n", + "print(f'Cost: {fs_segmented.solution[\"costs\"].item():,.0f} €')\n", + "print(f'Speedup: {time_full / time_segmented:.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_segmented.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] }, { "cell_type": "markdown", @@ -5611,63 +5161,36 @@ }, { "cell_type": "code", + "execution_count": 16, "id": "frq1vct5l4v", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:23:45.087307Z", - "start_time": "2025-12-14T15:23:41.695745Z" + "end_time": "2025-12-14T15:31:40.701389Z", + "start_time": "2025-12-14T15:31:37.455352Z" } }, - "source": [ - "start = timeit.default_timer()\n", - "\n", - "# Combined: 8 typical days × 4 segments each\n", - "fs_combined_opt = flow_system.transform.cluster(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " n_segments=4,\n", - ")\n", - "\n", - "fs_combined_opt.optimize(solver)\n", - "time_combined = timeit.default_timer() - start\n", - "\n", - "print(f'Combined optimization: {time_combined:.2f} seconds')\n", - "print(f'Cost: {fs_combined_opt.solution[\"costs\"].item():,.0f} €')\n", - "print(f'Speedup: {time_full / time_combined:.1f}x')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_combined_opt.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[2m2025-12-14 16:23:42.373\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001b[2m2025-12-14 16:23:42.449\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001B[2m2025-12-14 16:31:38.177\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001B[2m2025-12-14 16:31:38.252\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", + "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 98/98 [00:00<00:00, 175.72it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 1011.42it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 1102.60it/s]\n", - "Optimization potentially failed: \n", - "Status: warning\n", - "Termination condition: infeasible\n", - "Solution: 0 primals, 0 duals\n", - "Objective: nan\n", - "Solver model: available\n", - "Solver message: Infeasible\n", - "\n" + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 98/98 [00:00<00:00, 173.62it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 1005.32it/s]\n", + "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 846.58it/s]\n" ] }, { @@ -5675,14 +5198,24 @@ "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-mhjwmmv5 has 138973 rows; 80386 cols; 364233 nonzeros; 5955 integer variables (5955 binary)\n", + "MIP linopy-problem-s6_9mxfb has 174945 rows; 80386 cols; 436177 nonzeros; 5955 integer variables (5955 binary)\n", "Coefficient ranges:\n", " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", " Bound [1e+00, 1e+03]\n", " RHS [1e+00, 1e+00]\n", "Presolving model\n", - "Presolve: Infeasible\n", + "41647 rows, 326 cols, 98274 nonzeros 0s\n", + "29654 rows, 197 cols, 62284 nonzeros 0s\n", + "295 rows, 144 cols, 596 nonzeros 0s\n", + "294 rows, 46 cols, 275 nonzeros 0s\n", + "132 rows, 46 cols, 273 nonzeros 0s\n", + "Presolve reductions: rows 132(-174813); columns 46(-80340); nonzeros 273(-435904) \n", + "\n", + "Solving MIP model with:\n", + " 132 rows\n", + " 46 cols (1 binary, 0 integer, 0 implied int., 45 continuous, 0 domain fixed)\n", + " 273 nonzeros\n", "\n", "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", @@ -5692,40 +5225,61 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% -inf inf inf 0 0 0 0 0.1s\n", + " J 0 0 0 0.00% -inf 2461461.294894 Large 0 0 0 0 0.2s\n", + " T 0 0 0 0.00% 148461.143926 2407158.736494 93.83% 0 0 0 7 0.2s\n", + " 1 0 1 100.00% 2407158.736494 2407158.736494 0.00% 0 0 0 7 0.2s\n", "\n", "Solving report\n", - " Model linopy-problem-mhjwmmv5\n", - " Status Infeasible\n", - " Primal bound inf\n", - " Dual bound -inf\n", - " Gap inf\n", - " P-D integral 0\n", - " Solution status -\n", - " Timing 0.12\n", + " Model linopy-problem-s6_9mxfb\n", + " Status Optimal\n", + " Primal bound 2407158.73649\n", + " Dual bound 2407158.73649\n", + " Gap 0% (tolerance: 1%)\n", + " P-D integral 0.00427957544687\n", + " Solution status feasible\n", + " 2407158.73649 (objective)\n", + " 0 (bound viol.)\n", + " 0 (int. viol.)\n", + " 0 (row viol.)\n", + " Timing 0.22\n", " Max sub-MIP depth 0\n", - " Nodes 0\n", + " Nodes 1\n", " Repair LPs 0\n", - " LP iterations 0\n" - ] - }, - { - "ename": "NotImplementedError", - "evalue": "Computing infeasibilities is only supported for Gurobi and Xpress solvers. Current solver model type: Highs", - "output_type": "error", - "traceback": [ - "\u001b[31m---------------------------------------------------------------------------\u001b[39m", - "\u001b[31mNotImplementedError\u001b[39m Traceback (most recent call last)", - "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[17]\u001b[39m\u001b[32m, line 10\u001b[39m\n\u001b[32m 3\u001b[39m \u001b[38;5;66;03m# Combined: 8 typical days × 4 segments each\u001b[39;00m\n\u001b[32m 4\u001b[39m fs_combined_opt = flow_system.transform.cluster(\n\u001b[32m 5\u001b[39m n_clusters=\u001b[32m8\u001b[39m,\n\u001b[32m 6\u001b[39m cluster_duration=\u001b[33m'\u001b[39m\u001b[33m1D\u001b[39m\u001b[33m'\u001b[39m,\n\u001b[32m 7\u001b[39m n_segments=\u001b[32m4\u001b[39m,\n\u001b[32m 8\u001b[39m )\n\u001b[32m---> \u001b[39m\u001b[32m10\u001b[39m \u001b[43mfs_combined_opt\u001b[49m\u001b[43m.\u001b[49m\u001b[43moptimize\u001b[49m\u001b[43m(\u001b[49m\u001b[43msolver\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 11\u001b[39m time_combined = timeit.default_timer() - start\n\u001b[32m 13\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33mCombined optimization: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mtime_combined\u001b[38;5;132;01m:\u001b[39;00m\u001b[33m.2f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m seconds\u001b[39m\u001b[33m'\u001b[39m)\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/PycharmProjects/flixopt_182303/flixopt/optimize_accessor.py:89\u001b[39m, in \u001b[36mOptimizeAccessor.__call__\u001b[39m\u001b[34m(self, solver, normalize_weights)\u001b[39m\n\u001b[32m 57\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 58\u001b[39m \u001b[33;03mBuild and solve the optimization model in one step.\u001b[39;00m\n\u001b[32m 59\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 86\u001b[39m \u001b[33;03m >>> solution = flow_system.optimize(solver).solution\u001b[39;00m\n\u001b[32m 87\u001b[39m \u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 88\u001b[39m \u001b[38;5;28mself\u001b[39m._fs.build_model(normalize_weights)\n\u001b[32m---> \u001b[39m\u001b[32m89\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_fs\u001b[49m\u001b[43m.\u001b[49m\u001b[43msolve\u001b[49m\u001b[43m(\u001b[49m\u001b[43msolver\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 90\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._fs\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/PycharmProjects/flixopt_182303/flixopt/flow_system.py:1341\u001b[39m, in \u001b[36mFlowSystem.solve\u001b[39m\u001b[34m(self, solver)\u001b[39m\n\u001b[32m 1339\u001b[39m \u001b[38;5;66;03m# Redirect stdout to our buffer\u001b[39;00m\n\u001b[32m 1340\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m redirect_stdout(f):\n\u001b[32m-> \u001b[39m\u001b[32m1341\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m.\u001b[49m\u001b[43mprint_infeasibilities\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1343\u001b[39m infeasibilities = f.getvalue()\n\u001b[32m 1344\u001b[39m logger.error(\u001b[33m'\u001b[39m\u001b[33mSuccessfully extracted infeasibilities: \u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m%s\u001b[39;00m\u001b[33m'\u001b[39m, infeasibilities)\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/linopy/model.py:1470\u001b[39m, in \u001b[36mModel.print_infeasibilities\u001b[39m\u001b[34m(self, display_max_terms)\u001b[39m\n\u001b[32m 1451\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mprint_infeasibilities\u001b[39m(\u001b[38;5;28mself\u001b[39m, display_max_terms: \u001b[38;5;28mint\u001b[39m | \u001b[38;5;28;01mNone\u001b[39;00m = \u001b[38;5;28;01mNone\u001b[39;00m) -> \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 1452\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 1453\u001b[39m \u001b[33;03m Print a list of infeasible constraints.\u001b[39;00m\n\u001b[32m 1454\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 1468\u001b[39m \u001b[33;03m infeasible constraints.\u001b[39;00m\n\u001b[32m 1469\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1470\u001b[39m labels = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mcompute_infeasibilities\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 1471\u001b[39m \u001b[38;5;28mself\u001b[39m.constraints.print_labels(labels, display_max_terms=display_max_terms)\n", - "\u001b[36mFile \u001b[39m\u001b[32m~/PycharmProjects/flixopt_182303/.venv/lib/python3.11/site-packages/linopy/model.py:1355\u001b[39m, in \u001b[36mModel.compute_infeasibilities\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 1349\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\n\u001b[32m 1350\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mComputing infeasibilities is not supported for \u001b[39m\u001b[33m'\u001b[39m\u001b[38;5;132;01m{\u001b[39;00msolver_name\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m'\u001b[39m\u001b[33m solver. \u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 1351\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mOnly Gurobi and Xpress solvers support IIS computation.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 1352\u001b[39m )\n\u001b[32m 1353\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 1354\u001b[39m \u001b[38;5;66;03m# We have a solver model but it's not a supported type\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1355\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\n\u001b[32m 1356\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mComputing infeasibilities is only supported for Gurobi and Xpress solvers. \u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 1357\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mCurrent solver model type: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mtype\u001b[39m(solver_model).\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\n\u001b[32m 1358\u001b[39m )\n", - "\u001b[31mNotImplementedError\u001b[39m: Computing infeasibilities is only supported for Gurobi and Xpress solvers. Current solver model type: Highs" + " LP iterations 7\n", + " 0 (strong br.)\n", + " 0 (separation)\n", + " 0 (heuristics)\n", + "Combined optimization: 3.24 seconds\n", + "Cost: 2,407,159 €\n", + "Speedup: 5.7x\n", + "\n", + "Optimized sizes:\n", + " CHP(Q_th): 248.4\n", + " Boiler(Q_th): 0.0\n", + " Storage: 0.0\n" ] } ], - "execution_count": 17 + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# Combined: 8 typical days × 4 segments each\n", + "fs_combined_opt = flow_system.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " n_segments=4,\n", + ")\n", + "\n", + "fs_combined_opt.optimize(solver)\n", + "time_combined = timeit.default_timer() - start\n", + "\n", + "print(f'Combined optimization: {time_combined:.2f} seconds')\n", + "print(f'Cost: {fs_combined_opt.solution[\"costs\"].item():,.0f} €')\n", + "print(f'Speedup: {time_full / time_combined:.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_combined_opt.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] }, { "cell_type": "markdown", @@ -5737,8 +5291,86 @@ }, { "cell_type": "code", + "execution_count": 17, "id": "19", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:31:40.744033Z", + "start_time": "2025-12-14T15:31:40.734687Z" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
 Time [s]Cost [€]CHP SizeBoiler SizeStorage SizeCost Gap [%]Speedup
Full (baseline)18.622,209,206300.00.010000.001.0x
Clustering (8 days)3.812,215,424300.00.010000.284.9x
Segmentation (4 seg)3.382,407,140248.40.008.965.5x
Combined (8×4)3.242,407,159248.40.008.965.7x
\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "results = {\n", " 'Full (baseline)': {\n", @@ -5788,9 +5420,7 @@ " 'Speedup': '{:.1f}x',\n", " }\n", ")" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", @@ -5805,8 +5435,23 @@ }, { "cell_type": "code", + "execution_count": 18, "id": "21", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:31:40.802963Z", + "start_time": "2025-12-14T15:31:40.748748Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Multi-period system: 1344 timesteps × 3 periods\n" + ] + } + ], "source": [ "# Load raw data for multi-period example\n", "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", @@ -5859,14 +5504,31 @@ ")\n", "\n", "print(f'Multi-period system: {len(fs_mp.timesteps)} timesteps × {len(fs_mp.periods)} periods')" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "code", + "execution_count": 19, "id": "22", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:31:41.653370Z", + "start_time": "2025-12-14T15:31:40.807443Z" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001B[2m2025-12-14 16:31:40.808\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", + "Clustering was applied to 3 period(s):\n", + " - period=2024\n", + " - period=2025\n", + " - period=2026\n" + ] + } + ], "source": [ "# Cluster - each period gets clustered independently\n", "fs_mp_clustered = fs_mp.transform.cluster(n_clusters=4, cluster_duration='1D')\n", @@ -5876,21 +5538,59 @@ "print(f'Clustering was applied to {len(clustering_info[\"clustering_results\"])} period(s):')\n", "for (period, _scenario), _ in clustering_info['clustering_results'].items():\n", " print(f' - period={period}')" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "code", + "execution_count": 20, "id": "23", - "metadata": {}, + "metadata": { + "ExecuteTime": { + "end_time": "2025-12-14T15:31:43.152426Z", + "start_time": "2025-12-14T15:31:41.711392Z" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 38/38 [00:00<00:00, 155.98it/s]\n", + "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 22/22 [00:00<00:00, 686.93it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", + "LP linopy-problem-9o95ub30 has 49392 rows; 40356 cols; 131016 nonzeros\n", + "Coefficient ranges:\n", + " Matrix [2e-01, 2e+01]\n", + " Cost [1e+00, 1e+00]\n", + " Bound [5e+01, 1e+03]\n", + " RHS [0e+00, 0e+00]\n", + "Presolving model\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "0 rows, 0 cols, 0 nonzeros 0s\n", + "Presolve reductions: rows 0(-49392); columns 0(-40356); nonzeros 0(-131016) - Reduced to empty\n", + "Performed postsolve\n", + "Solving the original LP from the solution after postsolve\n", + "\n", + "Model name : linopy-problem-9o95ub30\n", + "Model status : Optimal\n", + "Objective value : 1.3352558890e+07\n", + "P-D objective error : 1.7437154695e-15\n", + "HiGHS run time : 0.06\n", + "Multi-period clustered cost: 13,352,559 €\n" + ] + } + ], "source": [ "# Optimize\n", "fs_mp_clustered.optimize(solver)\n", "print(f'Multi-period clustered cost: {fs_mp_clustered.solution[\"costs\"].sum().item():,.0f} €')" - ], - "outputs": [], - "execution_count": null + ] }, { "cell_type": "markdown", From 6b5a6381fa0a88a0f856dc6c1c474c967df164ee Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 14 Dec 2025 16:37:19 +0100 Subject: [PATCH 019/191] Improve notebook to use more segments --- docs/notebooks/08c-clustering.ipynb | 1222 ++++++++++++++------------- 1 file changed, 634 insertions(+), 588 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index f5c22c5b5..c477cfcbe 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -16,12 +16,12 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": 1, "id": "2", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:34:02.670767Z", - "start_time": "2025-12-14T15:34:02.373234Z" + "end_time": "2025-12-14T15:36:41.528074Z", + "start_time": "2025-12-14T15:36:38.134971Z" } }, "outputs": [ @@ -31,7 +31,7 @@ "flixopt.config.CONFIG" ] }, - "execution_count": 21, + "execution_count": 1, "metadata": {}, "output_type": "execute_result" } @@ -57,14 +57,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "4", "metadata": { - "jupyter": { - "is_executing": true + "ExecuteTime": { + "end_time": "2025-12-14T15:36:47.851549Z", + "start_time": "2025-12-14T15:36:47.598380Z" } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Loaded FlowSystem: 2976 timesteps (31 days at 15-min resolution)\n", + "Components: ['CHP', 'Boiler', 'Storage', 'GasGrid', 'CoalSupply', 'GridBuy', 'GridSell', 'HeatDemand', 'ElecDemand']\n" + ] + } + ], "source": [ "from pathlib import Path\n", "\n", @@ -91,8 +101,8 @@ "id": "5", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:30:51.930035Z", - "start_time": "2025-12-14T15:30:51.835194Z" + "end_time": "2025-12-14T15:36:47.942912Z", + "start_time": "2025-12-14T15:36:47.863137Z" } }, "outputs": [ @@ -199,7 +209,7 @@ "\n", "`).concat($R(e),`\n", "`));var s=new U_({actual:e,expected:t,message:r,operator:i,stackStartFn:n});throw s.generatedMessage=o,s}}Ef.match=function e(t,r,n){T4e(t,r,n,e,\"match\")};Ef.doesNotMatch=function e(t,r,n){T4e(t,r,n,e,\"doesNotMatch\")};function A4e(){for(var e=arguments.length,t=new Array(e),r=0;r{var xE=1e3,bE=xE*60,wE=bE*60,TE=wE*24,FEt=TE*365.25;M4e.exports=function(e,t){t=t||{};var r=typeof e;if(r===\"string\"&&e.length>0)return zEt(e);if(r===\"number\"&&isNaN(e)===!1)return t.long?qEt(e):OEt(e);throw new Error(\"val is not a non-empty string or a valid number. val=\"+JSON.stringify(e))};function zEt(e){if(e=String(e),!(e.length>100)){var t=/^((?:\\d+)?\\.?\\d+) *(milliseconds?|msecs?|ms|seconds?|secs?|s|minutes?|mins?|m|hours?|hrs?|h|days?|d|years?|yrs?|y)?$/i.exec(e);if(t){var r=parseFloat(t[1]),n=(t[2]||\"ms\").toLowerCase();switch(n){case\"years\":case\"year\":case\"yrs\":case\"yr\":case\"y\":return r*FEt;case\"days\":case\"day\":case\"d\":return r*TE;case\"hours\":case\"hour\":case\"hrs\":case\"hr\":case\"h\":return r*wE;case\"minutes\":case\"minute\":case\"mins\":case\"min\":case\"m\":return r*bE;case\"seconds\":case\"second\":case\"secs\":case\"sec\":case\"s\":return r*xE;case\"milliseconds\":case\"millisecond\":case\"msecs\":case\"msec\":case\"ms\":return r;default:return}}}}function OEt(e){return e>=TE?Math.round(e/TE)+\"d\":e>=wE?Math.round(e/wE)+\"h\":e>=bE?Math.round(e/bE)+\"m\":e>=xE?Math.round(e/xE)+\"s\":e+\"ms\"}function qEt(e){return iD(e,TE,\"day\")||iD(e,wE,\"hour\")||iD(e,bE,\"minute\")||iD(e,xE,\"second\")||e+\" ms\"}function iD(e,t,r){if(!(e{Lc=k4e.exports=nW.debug=nW.default=nW;Lc.coerce=GEt;Lc.disable=UEt;Lc.enable=NEt;Lc.enabled=VEt;Lc.humanize=E4e();Lc.names=[];Lc.skips=[];Lc.formatters={};var iW;function BEt(e){var t=0,r;for(r in e)t=(t<<5)-t+e.charCodeAt(r),t|=0;return Lc.colors[Math.abs(t)%Lc.colors.length]}function nW(e){function t(){if(t.enabled){var r=t,n=+new Date,i=n-(iW||n);r.diff=i,r.prev=iW,r.curr=n,iW=n;for(var a=new Array(arguments.length),o=0;o{lp=P4e.exports=C4e();lp.log=WEt;lp.formatArgs=jEt;lp.save=XEt;lp.load=L4e;lp.useColors=HEt;lp.storage=typeof chrome!=\"undefined\"&&typeof chrome.storage!=\"undefined\"?chrome.storage.local:ZEt();lp.colors=[\"lightseagreen\",\"forestgreen\",\"goldenrod\",\"dodgerblue\",\"darkorchid\",\"crimson\"];function HEt(){return typeof window!=\"undefined\"&&window.process&&window.process.type===\"renderer\"?!0:typeof document!=\"undefined\"&&document.documentElement&&document.documentElement.style&&document.documentElement.style.WebkitAppearance||typeof window!=\"undefined\"&&window.console&&(window.console.firebug||window.console.exception&&window.console.table)||typeof navigator!=\"undefined\"&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/firefox\\/(\\d+)/)&&parseInt(RegExp.$1,10)>=31||typeof navigator!=\"undefined\"&&navigator.userAgent&&navigator.userAgent.toLowerCase().match(/applewebkit\\/(\\d+)/)}lp.formatters.j=function(e){try{return JSON.stringify(e)}catch(t){return\"[UnexpectedJSONParseError]: \"+t.message}};function jEt(e){var t=this.useColors;if(e[0]=(t?\"%c\":\"\")+this.namespace+(t?\" %c\":\" \")+e[0]+(t?\"%c \":\" \")+\"+\"+lp.humanize(this.diff),!!t){var r=\"color: \"+this.color;e.splice(1,0,r,\"color: inherit\");var n=0,i=0;e[0].replace(/%[a-zA-Z%]/g,function(a){a!==\"%%\"&&(n++,a===\"%c\"&&(i=n))}),e.splice(i,0,r)}}function WEt(){return typeof console==\"object\"&&console.log&&Function.prototype.apply.call(console.log,console,arguments)}function XEt(e){try{e==null?lp.storage.removeItem(\"debug\"):lp.storage.debug=e}catch(t){}}function L4e(){var e;try{e=lp.storage.debug}catch(t){}return!e&&typeof process!=\"undefined\"&&\"env\"in process&&(e=process.env.DEBUG),e}lp.enable(L4e());function ZEt(){try{return window.localStorage}catch(e){}}});var N4e=ye((_dr,B4e)=>{var _A=sE(),V_=I4e()(\"stream-parser\");B4e.exports=KEt;var D4e=-1,nD=0,YEt=1,F4e=2;function KEt(e){var t=e&&typeof e._transform==\"function\",r=e&&typeof e._write==\"function\";if(!t&&!r)throw new Error(\"must pass a Writable or Transform stream in\");V_(\"extending Parser into stream\"),e._bytes=JEt,e._skipBytes=$Et,t&&(e._passthrough=QEt),t?e._transform=tkt:e._write=ekt}function AE(e){V_(\"initializing parser stream\"),e._parserBytesLeft=0,e._parserBuffers=[],e._parserBuffered=0,e._parserState=D4e,e._parserCallback=null,typeof e.push==\"function\"&&(e._parserOutput=e.push.bind(e)),e._parserInit=!0}function JEt(e,t){_A(!this._parserCallback,'there is already a \"callback\" set!'),_A(isFinite(e)&&e>0,'can only buffer a finite number of bytes > 0, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"buffering %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=nD}function $Et(e,t){_A(!this._parserCallback,'there is already a \"callback\" set!'),_A(e>0,'can only skip > 0 bytes, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"skipping %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=YEt}function QEt(e,t){_A(!this._parserCallback,'There is already a \"callback\" set!'),_A(e>0,'can only pass through > 0 bytes, got \"'+e+'\"'),this._parserInit||AE(this),V_(\"passing through %o bytes\",e),this._parserBytesLeft=e,this._parserCallback=t,this._parserState=F4e}function ekt(e,t,r){this._parserInit||AE(this),V_(\"write(%o bytes)\",e.length),typeof t==\"function\"&&(r=t),O4e(this,e,null,r)}function tkt(e,t,r){this._parserInit||AE(this),V_(\"transform(%o bytes)\",e.length),typeof t!=\"function\"&&(t=this._parserOutput),O4e(this,e,t,r)}function z4e(e,t,r,n){return e._parserBytesLeft<=0?n(new Error(\"got data but not currently parsing anything\")):t.length<=e._parserBytesLeft?function(){return R4e(e,t,r,n)}:function(){var i=t.slice(0,e._parserBytesLeft);return R4e(e,i,r,function(a){if(a)return n(a);if(t.length>i.length)return function(){return z4e(e,t.slice(i.length),r,n)}})}}function R4e(e,t,r,n){if(e._parserBytesLeft-=t.length,V_(\"%o bytes left for stream piece\",e._parserBytesLeft),e._parserState===nD?(e._parserBuffers.push(t),e._parserBuffered+=t.length):e._parserState===F4e&&r(t),e._parserBytesLeft===0){var i=e._parserCallback;if(i&&e._parserState===nD&&e._parserBuffers.length>1&&(t=Buffer.concat(e._parserBuffers,e._parserBuffered)),e._parserState!==nD&&(t=null),e._parserCallback=null,e._parserBuffered=0,e._parserState=D4e,e._parserBuffers.splice(0),i){var a=[];t&&a.push(t),r&&a.push(r);var o=i.length>a.length;o&&a.push(q4e(n));var s=i.apply(e,a);if(!o||n===s)return n}}else return n}var O4e=q4e(z4e);function q4e(e){return function(){for(var t=e.apply(this,arguments);typeof t==\"function\";)t=t();return t}}});var rc=ye(Hy=>{\"use strict\";var U4e=RSe().Transform,rkt=N4e();function SE(){U4e.call(this,{readableObjectMode:!0})}SE.prototype=Object.create(U4e.prototype);SE.prototype.constructor=SE;rkt(SE.prototype);Hy.ParserStream=SE;Hy.sliceEq=function(e,t,r){for(var n=t,i=0;i{\"use strict\";var xA=rc().readUInt16BE,oW=rc().readUInt32BE;function ME(e,t){if(e.length<4+t)return null;var r=oW(e,t);return e.length>4&15,n=e[4]&15,i=e[5]>>4&15,a=xA(e,6),o=8,s=0;sa.width||i.width===a.width&&i.height>a.height?i:a}),r=e.reduce(function(i,a){return i.height>a.height||i.height===a.height&&i.width>a.width?i:a}),n;return t.width>r.height||t.width===r.height&&t.height>r.width?n=t:n=r,n}oD.exports.readSizeFromMeta=function(e){var t={sizes:[],transforms:[],item_inf:{},item_loc:{}};if(skt(e,t),!!t.sizes.length){var r=lkt(t.sizes),n=1;t.transforms.forEach(function(a){var o={1:6,2:5,3:8,4:7,5:4,6:3,7:2,8:1},s={1:4,2:3,3:2,4:1,5:6,6:5,7:8,8:7};if(a.type===\"imir\"&&(a.value===0?n=s[n]:(n=s[n],n=o[n],n=o[n])),a.type===\"irot\")for(var l=0;l{\"use strict\";function sD(e,t){var r=new Error(e);return r.code=t,r}function ukt(e){try{return decodeURIComponent(escape(e))}catch(t){return e}}function jy(e,t,r){this.input=e.subarray(t,r),this.start=t;var n=String.fromCharCode.apply(null,this.input.subarray(0,4));if(n!==\"II*\\0\"&&n!==\"MM\\0*\")throw sD(\"invalid TIFF signature\",\"EBADDATA\");this.big_endian=n[0]===\"M\"}jy.prototype.each=function(e){this.aborted=!1;var t=this.read_uint32(4);for(this.ifds_to_read=[{id:0,offset:t}];this.ifds_to_read.length>0&&!this.aborted;){var r=this.ifds_to_read.shift();r.offset&&this.scan_ifd(r.id,r.offset,e)}};jy.prototype.read_uint16=function(e){var t=this.input;if(e+2>t.length)throw sD(\"unexpected EOF\",\"EBADDATA\");return this.big_endian?t[e]*256+t[e+1]:t[e]+t[e+1]*256};jy.prototype.read_uint32=function(e){var t=this.input;if(e+4>t.length)throw sD(\"unexpected EOF\",\"EBADDATA\");return this.big_endian?t[e]*16777216+t[e+1]*65536+t[e+2]*256+t[e+3]:t[e]+t[e+1]*256+t[e+2]*65536+t[e+3]*16777216};jy.prototype.is_subifd_link=function(e,t){return e===0&&t===34665||e===0&&t===34853||e===34665&&t===40965};jy.prototype.exif_format_length=function(e){switch(e){case 1:case 2:case 6:case 7:return 1;case 3:case 8:return 2;case 4:case 9:case 11:return 4;case 5:case 10:case 12:return 8;default:return 0}};jy.prototype.exif_format_read=function(e,t){var r;switch(e){case 1:case 2:return r=this.input[t],r;case 6:return r=this.input[t],r|(r&128)*33554430;case 3:return r=this.read_uint16(t),r;case 8:return r=this.read_uint16(t),r|(r&32768)*131070;case 4:return r=this.read_uint32(t),r;case 9:return r=this.read_uint32(t),r|0;case 5:case 10:case 11:case 12:return null;case 7:return null;default:return null}};jy.prototype.scan_ifd=function(e,t,r){var n=this.read_uint16(t);t+=2;for(var i=0;ithis.input.length)throw sD(\"unexpected EOF\",\"EBADDATA\");for(var h=[],d=c,v=0;v0&&(this.ifds_to_read.push({id:a,offset:h[0]}),f=!0);var b={is_big_endian:this.big_endian,ifd:e,tag:a,format:o,count:s,entry_offset:t+this.start,data_length:u,data_offset:c+this.start,value:h,is_subifd_link:f};if(r(b)===!1){this.aborted=!0;return}t+=12}e===0&&this.ifds_to_read.push({id:1,offset:this.read_uint32(t)})};sW.exports.ExifParser=jy;sW.exports.get_orientation=function(e){var t=0;try{return new jy(e,0,e.length).each(function(r){if(r.ifd===0&&r.tag===274&&Array.isArray(r.value))return t=r.value[0],!1}),t}catch(r){return-1}}});var H4e=ye((Tdr,G4e)=>{\"use strict\";var ckt=rc().str2arr,fkt=rc().sliceEq,hkt=rc().readUInt32BE,uD=V4e(),dkt=lD(),vkt=ckt(\"ftyp\");G4e.exports=function(e){if(fkt(e,4,vkt)){var t=uD.unbox(e,0);if(t){var r=uD.getMimeType(t.data);if(r){for(var n,i=t.end;;){var a=uD.unbox(e,i);if(!a)break;if(i=a.end,a.boxtype===\"mdat\")return;if(a.boxtype===\"meta\"){n=a.data;break}}if(n){var o=uD.readSizeFromMeta(n);if(o){var s={width:o.width,height:o.height,type:r.type,mime:r.mime,wUnits:\"px\",hUnits:\"px\"};if(o.variants.length>1&&(s.variants=o.variants),o.orientation&&(s.orientation=o.orientation),o.exif_location&&o.exif_location.offset+o.exif_location.length<=e.length){var l=hkt(e,o.exif_location.offset),u=e.slice(o.exif_location.offset+l+4,o.exif_location.offset+o.exif_location.length),c=dkt.get_orientation(u);c>0&&(s.orientation=c)}return s}}}}}}});var X4e=ye((Adr,W4e)=>{\"use strict\";var pkt=rc().str2arr,gkt=rc().sliceEq,j4e=rc().readUInt16LE,mkt=pkt(\"BM\");W4e.exports=function(e){if(!(e.length<26)&&gkt(e,0,mkt))return{width:j4e(e,18),height:j4e(e,22),type:\"bmp\",mime:\"image/bmp\",wUnits:\"px\",hUnits:\"px\"}}});var $4e=ye((Sdr,J4e)=>{\"use strict\";var K4e=rc().str2arr,Z4e=rc().sliceEq,Y4e=rc().readUInt16LE,ykt=K4e(\"GIF87a\"),_kt=K4e(\"GIF89a\");J4e.exports=function(e){if(!(e.length<10)&&!(!Z4e(e,0,ykt)&&!Z4e(e,0,_kt)))return{width:Y4e(e,6),height:Y4e(e,8),type:\"gif\",mime:\"image/gif\",wUnits:\"px\",hUnits:\"px\"}}});var tEe=ye((Mdr,eEe)=>{\"use strict\";var lW=rc().readUInt16LE,xkt=0,bkt=1,Q4e=16;eEe.exports=function(e){var t=lW(e,0),r=lW(e,2),n=lW(e,4);if(!(t!==xkt||r!==bkt||!n)){for(var i=[],a={width:0,height:0},o=0;oa.width||l>a.height)&&(a=u)}return{width:a.width,height:a.height,variants:i,type:\"ico\",mime:\"image/x-icon\",wUnits:\"px\",hUnits:\"px\"}}}});var iEe=ye((Edr,rEe)=>{\"use strict\";var uW=rc().readUInt16BE,wkt=rc().str2arr,Tkt=rc().sliceEq,Akt=lD(),Skt=wkt(\"Exif\\0\\0\");rEe.exports=function(e){if(!(e.length<2)&&!(e[0]!==255||e[1]!==216||e[2]!==255))for(var t=2;;){for(;;){if(e.length-t<2)return;if(e[t++]===255)break}for(var r=e[t++],n;r===255;)r=e[t++];if(208<=r&&r<=217||r===1)n=0;else if(192<=r&&r<=254){if(e.length-t<2)return;n=uW(e,t)-2,t+=2}else return;if(r===217||r===218)return;var i;if(r===225&&n>=10&&Tkt(e,t,Skt)&&(i=Akt.get_orientation(e.slice(t+6,t+n))),n>=5&&192<=r&&r<=207&&r!==196&&r!==200&&r!==204){if(e.length-t0&&(a.orientation=i),a}t+=n}}});var lEe=ye((kdr,sEe)=>{\"use strict\";var oEe=rc().str2arr,nEe=rc().sliceEq,aEe=rc().readUInt32BE,Mkt=oEe(`\\x89PNG\\r\n", - "\u001A\n", + "\u001a\n", "`),Ekt=oEe(\"IHDR\");sEe.exports=function(e){if(!(e.length<24)&&nEe(e,0,Mkt)&&nEe(e,12,Ekt))return{width:aEe(e,16),height:aEe(e,20),type:\"png\",mime:\"image/png\",wUnits:\"px\",hUnits:\"px\"}}});var fEe=ye((Cdr,cEe)=>{\"use strict\";var kkt=rc().str2arr,Ckt=rc().sliceEq,uEe=rc().readUInt32BE,Lkt=kkt(\"8BPS\\0\u0001\");cEe.exports=function(e){if(!(e.length<22)&&Ckt(e,0,Lkt))return{width:uEe(e,18),height:uEe(e,14),type:\"psd\",mime:\"image/vnd.adobe.photoshop\",wUnits:\"px\",hUnits:\"px\"}}});var vEe=ye((Ldr,dEe)=>{\"use strict\";function Pkt(e){return e===32||e===9||e===13||e===10}function bA(e){return typeof e==\"number\"&&isFinite(e)&&e>0}function Ikt(e){var t=0,r=e.length;for(e[0]===239&&e[1]===187&&e[2]===191&&(t=3);t]*>/,Dkt=/^<([-_.:a-zA-Z0-9]+:)?svg\\s/,Fkt=/[^-]\\bwidth=\"([^%]+?)\"|[^-]\\bwidth='([^%]+?)'/,zkt=/\\bheight=\"([^%]+?)\"|\\bheight='([^%]+?)'/,Okt=/\\bview[bB]ox=\"(.+?)\"|\\bview[bB]ox='(.+?)'/,hEe=/in$|mm$|cm$|pt$|pc$|px$|em$|ex$/;function qkt(e){var t=e.match(Fkt),r=e.match(zkt),n=e.match(Okt);return{width:t&&(t[1]||t[2]),height:r&&(r[1]||r[2]),viewbox:n&&(n[1]||n[2])}}function Um(e){return hEe.test(e)?e.match(hEe)[0]:\"px\"}dEe.exports=function(e){if(Ikt(e)){for(var t=\"\",r=0;r{\"use strict\";var mEe=rc().str2arr,pEe=rc().sliceEq,Bkt=rc().readUInt16LE,Nkt=rc().readUInt16BE,Ukt=rc().readUInt32LE,Vkt=rc().readUInt32BE,Gkt=mEe(\"II*\\0\"),Hkt=mEe(\"MM\\0*\");function cD(e,t,r){return r?Nkt(e,t):Bkt(e,t)}function cW(e,t,r){return r?Vkt(e,t):Ukt(e,t)}function gEe(e,t,r){var n=cD(e,t+2,r),i=cW(e,t+4,r);return i!==1||n!==3&&n!==4?null:n===3?cD(e,t+8,r):cW(e,t+8,r)}yEe.exports=function(e){if(!(e.length<8)&&!(!pEe(e,0,Gkt)&&!pEe(e,0,Hkt))){var t=e[0]===77,r=cW(e,4,t)-8;if(!(r<0)){var n=r+8;if(!(e.length-n<2)){var i=cD(e,n+0,t)*12;if(!(i<=0)&&(n+=2,!(e.length-n{\"use strict\";var wEe=rc().str2arr,xEe=rc().sliceEq,bEe=rc().readUInt16LE,fW=rc().readUInt32LE,jkt=lD(),Wkt=wEe(\"RIFF\"),Xkt=wEe(\"WEBP\");function Zkt(e,t){if(!(e[t+3]!==157||e[t+4]!==1||e[t+5]!==42))return{width:bEe(e,t+6)&16383,height:bEe(e,t+8)&16383,type:\"webp\",mime:\"image/webp\",wUnits:\"px\",hUnits:\"px\"}}function Ykt(e,t){if(e[t]===47){var r=fW(e,t+1);return{width:(r&16383)+1,height:(r>>14&16383)+1,type:\"webp\",mime:\"image/webp\",wUnits:\"px\",hUnits:\"px\"}}}function Kkt(e,t){return{width:(e[t+6]<<16|e[t+5]<<8|e[t+4])+1,height:(e[t+9]<e.length)){for(;t+8=10?r=r||Zkt(e,t+8):a===\"VP8L\"&&o>=9?r=r||Ykt(e,t+8):a===\"VP8X\"&&o>=10?r=r||Kkt(e,t+8):a===\"EXIF\"&&(n=jkt.get_orientation(e.slice(t+8,t+8+o)),t=1/0),t+=8+o}if(r)return n>0&&(r.orientation=n),r}}}});var MEe=ye((Rdr,SEe)=>{\"use strict\";SEe.exports={avif:H4e(),bmp:X4e(),gif:$4e(),ico:tEe(),jpeg:iEe(),png:lEe(),psd:fEe(),svg:vEe(),tiff:_Ee(),webp:AEe()}});var EEe=ye((Ddr,dW)=>{\"use strict\";var hW=MEe();function Jkt(e){for(var t=Object.keys(hW),r=0;r{\"use strict\";var $kt=EEe(),Qkt=Py().IMAGE_URL_PREFIX,eCt=c2().Buffer;kEe.getImageSize=function(e){var t=e.replace(Qkt,\"\"),r=new eCt(t,\"base64\");return $kt(r)}});var IEe=ye((zdr,PEe)=>{\"use strict\";var LEe=Dr(),tCt=ZT(),rCt=Eo(),fD=ho(),iCt=Dr().maxRowLength,nCt=CEe().getImageSize;PEe.exports=function(t,r){var n,i;if(r._hasZ)n=r.z.length,i=iCt(r.z);else if(r._hasSource){var a=nCt(r.source);n=a.height,i=a.width}var o=fD.getFromId(t,r.xaxis||\"x\"),s=fD.getFromId(t,r.yaxis||\"y\"),l=o.d2c(r.x0)-r.dx/2,u=s.d2c(r.y0)-r.dy/2,c,f=[l,l+i*r.dx],h=[u,u+n*r.dy];if(o&&o.type===\"log\")for(c=0;c{\"use strict\";var lCt=Oa(),A2=Dr(),REe=A2.strTranslate,uCt=Wp(),cCt=ZT(),fCt=QV(),hCt=f8().STYLE;DEe.exports=function(t,r,n,i){var a=r.xaxis,o=r.yaxis,s=!t._context._exportedPlot&&fCt();A2.makeTraceGroups(i,n,\"im\").each(function(l){var u=lCt.select(this),c=l[0],f=c.trace,h=(f.zsmooth===\"fast\"||f.zsmooth===!1&&s)&&!f._hasZ&&f._hasSource&&a.type===\"linear\"&&o.type===\"linear\";f._realImage=h;var d=c.z,v=c.x0,_=c.y0,b=c.w,p=c.h,k=f.dx,E=f.dy,S,L,x,C,M,g;for(g=0;S===void 0&&g0;)L=a.c2p(v+g*k),g--;for(g=0;C===void 0&&g0;)M=o.c2p(_+g*E),g--;if(Lj[0];if(re||oe){var _e=S+T/2,Ee=C+z/2;H+=\"transform:\"+REe(_e+\"px\",Ee+\"px\")+\"scale(\"+(re?-1:1)+\",\"+(oe?-1:1)+\")\"+REe(-_e+\"px\",-Ee+\"px\")+\";\"}}Z.attr(\"style\",H);var Ce=new Promise(function(me){if(f._hasZ)me();else if(f._hasSource)if(f._canvas&&f._canvas.el.width===b&&f._canvas.el.height===p&&f._canvas.source===f.source)me();else{var ie=document.createElement(\"canvas\");ie.width=b,ie.height=p;var Se=ie.getContext(\"2d\",{willReadFrequently:!0});f._image=f._image||new Image;var Le=f._image;Le.onload=function(){Se.drawImage(Le,0,0),f._canvas={el:ie,source:f.source},me()},Le.setAttribute(\"src\",f.source)}}).then(function(){var me,ie;if(f._hasZ)ie=G(function(Ae,Fe){var Pe=d[Fe][Ae];return A2.isTypedArray(Pe)&&(Pe=Array.from(Pe)),Pe}),me=ie.toDataURL(\"image/png\");else if(f._hasSource)if(h)me=f.source;else{var Se=f._canvas.el.getContext(\"2d\",{willReadFrequently:!0}),Le=Se.getImageData(0,0,b,p).data;ie=G(function(Ae,Fe){var Pe=4*(Fe*b+Ae);return[Le[Pe],Le[Pe+1],Le[Pe+2],Le[Pe+3]]}),me=ie.toDataURL(\"image/png\")}Z.attr({\"xlink:href\":me,height:z,width:T,x:S,y:C})});t._promises.push(Ce)})}});var OEe=ye((qdr,zEe)=>{\"use strict\";var dCt=Oa();zEe.exports=function(t){dCt.select(t).selectAll(\".im image\").style(\"opacity\",function(r){return r[0].trace.opacity})}});var UEe=ye((Bdr,NEe)=>{\"use strict\";var qEe=vf(),BEe=Dr(),hD=BEe.isArrayOrTypedArray,vCt=ZT();NEe.exports=function(t,r,n){var i=t.cd[0],a=i.trace,o=t.xa,s=t.ya;if(!(qEe.inbox(r-i.x0,r-(i.x0+i.w*a.dx),0)>0||qEe.inbox(n-i.y0,n-(i.y0+i.h*a.dy),0)>0)){var l=Math.floor((r-i.x0)/a.dx),u=Math.floor(Math.abs(n-i.y0)/a.dy),c;if(a._hasZ?c=i.z[u][l]:a._hasSource&&(c=a._canvas.el.getContext(\"2d\",{willReadFrequently:!0}).getImageData(l,u,1,1).data),!!c){var f=i.hi||a.hoverinfo,h;if(f){var d=f.split(\"+\");d.indexOf(\"all\")!==-1&&(d=[\"color\"]),d.indexOf(\"color\")!==-1&&(h=!0)}var v=vCt.colormodel[a.colormodel],_=v.colormodel||a.colormodel,b=_.length,p=a._scaler(c),k=v.suffix,E=[];(a.hovertemplate||h)&&(E.push(\"[\"+[p[0]+k[0],p[1]+k[1],p[2]+k[2]].join(\", \")),b===4&&E.push(\", \"+p[3]+k[3]),E.push(\"]\"),E=E.join(\"\"),t.extraText=_.toUpperCase()+\": \"+E);var S;hD(a.hovertext)&&hD(a.hovertext[u])?S=a.hovertext[u][l]:hD(a.text)&&hD(a.text[u])&&(S=a.text[u][l]);var L=s.c2p(i.y0+(u+.5)*a.dy),x=i.x0+(l+.5)*a.dx,C=i.y0+(u+.5)*a.dy,M=\"[\"+c.slice(0,a.colormodel.length).join(\", \")+\"]\";return[BEe.extendFlat(t,{index:[u,l],x0:o.c2p(i.x0+l*a.dx),x1:o.c2p(i.x0+(l+1)*a.dx),y0:L,y1:L,color:p,xVal:x,xLabelVal:x,yVal:C,yLabelVal:C,zLabelVal:M,text:S,hovertemplateLabels:{zLabel:M,colorLabel:E,\"color[0]Label\":p[0]+k[0],\"color[1]Label\":p[1]+k[1],\"color[2]Label\":p[2]+k[2],\"color[3]Label\":p[3]+k[3]}})]}}}});var GEe=ye((Ndr,VEe)=>{\"use strict\";VEe.exports=function(t,r){return\"xVal\"in r&&(t.x=r.xVal),\"yVal\"in r&&(t.y=r.yVal),r.xa&&(t.xaxis=r.xa),r.ya&&(t.yaxis=r.ya),t.color=r.color,t.colormodel=r.trace.colormodel,t.z||(t.z=r.color),t}});var jEe=ye((Udr,HEe)=>{\"use strict\";HEe.exports={attributes:uH(),supplyDefaults:U3e(),calc:IEe(),plot:FEe(),style:OEe(),hoverPoints:UEe(),eventData:GEe(),moduleType:\"trace\",name:\"image\",basePlotModule:ph(),categories:[\"cartesian\",\"svg\",\"2dMap\",\"noSortingByValue\"],animatable:!1,meta:{}}});var XEe=ye((Vdr,WEe)=>{\"use strict\";WEe.exports=jEe()});var S2=ye((Gdr,YEe)=>{\"use strict\";var pCt=Gl(),gCt=Cc().attributes,mCt=ec(),yCt=Lh(),{hovertemplateAttrs:_Ct,texttemplateAttrs:xCt,templatefallbackAttrs:ZEe}=Ll(),EE=Ao().extendFlat,bCt=Pd().pattern,dD=mCt({editType:\"plot\",arrayOk:!0,colorEditType:\"plot\"});YEe.exports={labels:{valType:\"data_array\",editType:\"calc\"},label0:{valType:\"number\",dflt:0,editType:\"calc\"},dlabel:{valType:\"number\",dflt:1,editType:\"calc\"},values:{valType:\"data_array\",editType:\"calc\"},marker:{colors:{valType:\"data_array\",editType:\"calc\"},line:{color:{valType:\"color\",dflt:yCt.defaultLine,arrayOk:!0,editType:\"style\"},width:{valType:\"number\",min:0,dflt:0,arrayOk:!0,editType:\"style\"},editType:\"calc\"},pattern:bCt,editType:\"calc\"},text:{valType:\"data_array\",editType:\"plot\"},hovertext:{valType:\"string\",dflt:\"\",arrayOk:!0,editType:\"style\"},scalegroup:{valType:\"string\",dflt:\"\",editType:\"calc\"},textinfo:{valType:\"flaglist\",flags:[\"label\",\"text\",\"value\",\"percent\"],extras:[\"none\"],editType:\"calc\"},hoverinfo:EE({},pCt.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"percent\",\"name\"]}),hovertemplate:_Ct({},{keys:[\"label\",\"color\",\"value\",\"percent\",\"text\"]}),hovertemplatefallback:ZEe(),texttemplate:xCt({editType:\"plot\"},{keys:[\"label\",\"color\",\"value\",\"percent\",\"text\"]}),texttemplatefallback:ZEe({editType:\"plot\"}),textposition:{valType:\"enumerated\",values:[\"inside\",\"outside\",\"auto\",\"none\"],dflt:\"auto\",arrayOk:!0,editType:\"plot\"},textfont:EE({},dD,{}),insidetextorientation:{valType:\"enumerated\",values:[\"horizontal\",\"radial\",\"tangential\",\"auto\"],dflt:\"auto\",editType:\"plot\"},insidetextfont:EE({},dD,{}),outsidetextfont:EE({},dD,{}),automargin:{valType:\"boolean\",dflt:!1,editType:\"plot\"},title:{text:{valType:\"string\",dflt:\"\",editType:\"plot\"},font:EE({},dD,{}),position:{valType:\"enumerated\",values:[\"top left\",\"top center\",\"top right\",\"middle center\",\"bottom left\",\"bottom center\",\"bottom right\"],editType:\"plot\"},editType:\"plot\"},domain:gCt({name:\"pie\",trace:!0,editType:\"calc\"}),hole:{valType:\"number\",min:0,max:1,dflt:0,editType:\"calc\"},sort:{valType:\"boolean\",dflt:!0,editType:\"calc\"},direction:{valType:\"enumerated\",values:[\"clockwise\",\"counterclockwise\"],dflt:\"counterclockwise\",editType:\"calc\"},rotation:{valType:\"angle\",dflt:0,editType:\"calc\"},pull:{valType:\"number\",min:0,max:1,dflt:0,arrayOk:!0,editType:\"calc\"}}});var M2=ye((Hdr,$Ee)=>{\"use strict\";var wCt=Eo(),kE=Dr(),TCt=S2(),ACt=Cc().defaults,SCt=r0().handleText,MCt=Dr().coercePattern;function KEe(e,t){var r=kE.isArrayOrTypedArray(e),n=kE.isArrayOrTypedArray(t),i=Math.min(r?e.length:1/0,n?t.length:1/0);if(isFinite(i)||(i=0),i&&n){for(var a,o=0;o0){a=!0;break}}a||(i=0)}return{hasLabels:r,hasValues:n,len:i}}function JEe(e,t,r,n,i){var a=n(\"marker.line.width\");a&&n(\"marker.line.color\",i?void 0:r.paper_bgcolor);var o=n(\"marker.colors\");MCt(n,\"marker.pattern\",o),e.marker&&!t.marker.pattern.fgcolor&&(t.marker.pattern.fgcolor=e.marker.colors),t.marker.pattern.bgcolor||(t.marker.pattern.bgcolor=r.paper_bgcolor)}function ECt(e,t,r,n){function i(k,E){return kE.coerce(e,t,TCt,k,E)}var a=i(\"labels\"),o=i(\"values\"),s=KEe(a,o),l=s.len;if(t._hasLabels=s.hasLabels,t._hasValues=s.hasValues,!t._hasLabels&&t._hasValues&&(i(\"label0\"),i(\"dlabel\")),!l){t.visible=!1;return}t._length=l,JEe(e,t,n,i,!0),i(\"scalegroup\");var u=i(\"text\"),c=i(\"texttemplate\");i(\"texttemplatefallback\");var f;if(c||(f=i(\"textinfo\",kE.isArrayOrTypedArray(u)?\"text+percent\":\"percent\")),i(\"hovertext\"),i(\"hovertemplate\"),i(\"hovertemplatefallback\"),c||f&&f!==\"none\"){var h=i(\"textposition\");SCt(e,t,n,i,h,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1});var d=Array.isArray(h)||h===\"auto\",v=d||h===\"outside\";v&&i(\"automargin\"),(h===\"inside\"||h===\"auto\"||Array.isArray(h))&&i(\"insidetextorientation\")}else f===\"none\"&&i(\"textposition\",\"none\");ACt(t,n,i);var _=i(\"hole\"),b=i(\"title.text\");if(b){var p=i(\"title.position\",_?\"middle center\":\"top center\");!_&&p===\"middle center\"&&(t.title.position=\"top center\"),kE.coerceFont(i,\"title.font\",n.font)}i(\"sort\"),i(\"direction\"),i(\"rotation\"),i(\"pull\")}$Ee.exports={handleLabelsAndValues:KEe,handleMarkerDefaults:JEe,supplyDefaults:ECt}});var vD=ye((jdr,QEe)=>{\"use strict\";QEe.exports={hiddenlabels:{valType:\"data_array\",editType:\"calc\"},piecolorway:{valType:\"colorlist\",editType:\"calc\"},extendpiecolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var tke=ye((Wdr,eke)=>{\"use strict\";var kCt=Dr(),CCt=vD();eke.exports=function(t,r){function n(i,a){return kCt.coerce(t,r,CCt,i,a)}n(\"hiddenlabels\"),n(\"piecolorway\",r.colorway),n(\"extendpiecolors\")}});var wA=ye((Xdr,nke)=>{\"use strict\";var LCt=Eo(),vW=cd(),PCt=ka(),ICt={};function RCt(e,t){var r=[],n=e._fullLayout,i=n.hiddenlabels||[],a=t.labels,o=t.marker.colors||[],s=t.values,l=t._length,u=t._hasValues&&l,c,f;if(t.dlabel)for(a=new Array(l),c=0;c=0});var S=t.type===\"funnelarea\"?_:t.sort;return S&&r.sort(function(L,x){return x.v-L.v}),r[0]&&(r[0].vTotal=v),r}function rke(e){return function(r,n){return!r||(r=vW(r),!r.isValid())?!1:(r=PCt.addOpacity(r,r.getAlpha()),e[n]||(e[n]=r),r)}}function DCt(e,t){var r=(t||{}).type;r||(r=\"pie\");var n=e._fullLayout,i=e.calcdata,a=n[r+\"colorway\"],o=n[\"_\"+r+\"colormap\"];n[\"extend\"+r+\"colors\"]&&(a=ike(a,ICt));for(var s=0,l=0;l{\"use strict\";var FCt=ip().appendArrayMultiPointValues;ake.exports=function(t,r){var n={curveNumber:r.index,pointNumbers:t.pts,data:r._input,fullData:r,label:t.label,color:t.color,value:t.v,percent:t.percent,text:t.text,bbox:t.bbox,v:t.v};return t.pts.length===1&&(n.pointNumber=n.i=t.pts[0]),FCt(n,r,t.pts),r.type===\"funnelarea\"&&(delete n.v,delete n.i),n}});var yD=ye((Ydr,Eke)=>{\"use strict\";var Fp=Oa(),zCt=Mc(),pD=vf(),hke=ka(),Wy=So(),rv=Dr(),OCt=rv.strScale,ske=rv.strTranslate,pW=ru(),dke=bv(),qCt=dke.recordMinTextSize,BCt=dke.clearMinTextSize,vke=e2().TEXTPAD,ns=l_(),gD=oke(),lke=Dr().isValidTextValue;function NCt(e,t){var r=e._context.staticPlot,n=e._fullLayout,i=n._size;BCt(\"pie\",n),mke(t,e),Ake(t,i);var a=rv.makeTraceGroups(n._pielayer,t,\"trace\").each(function(o){var s=Fp.select(this),l=o[0],u=l.trace;YCt(o),s.attr(\"stroke-linejoin\",\"round\"),s.each(function(){var c=Fp.select(this).selectAll(\"g.slice\").data(o);c.enter().append(\"g\").classed(\"slice\",!0),c.exit().remove();var f=[[[],[]],[[],[]]],h=!1;c.each(function(S,L){if(S.hidden){Fp.select(this).selectAll(\"path,g\").remove();return}S.pointNumber=S.i,S.curveNumber=u.index,f[S.pxmid[1]<0?0:1][S.pxmid[0]<0?0:1].push(S);var x=l.cx,C=l.cy,M=Fp.select(this),g=M.selectAll(\"path.surface\").data([S]);if(g.enter().append(\"path\").classed(\"surface\",!0).style({\"pointer-events\":r?\"none\":\"all\"}),M.call(pke,e,o),u.pull){var P=+ns.castOption(u.pull,S.pts)||0;P>0&&(x+=P*S.pxmid[0],C+=P*S.pxmid[1])}S.cxFinal=x,S.cyFinal=C;function T(N,j,re,oe){var _e=oe*(j[0]-N[0]),Ee=oe*(j[1]-N[1]);return\"a\"+oe*l.r+\",\"+oe*l.r+\" 0 \"+S.largeArc+(re?\" 1 \":\" 0 \")+_e+\",\"+Ee}var z=u.hole;if(S.v===l.vTotal){var O=\"M\"+(x+S.px0[0])+\",\"+(C+S.px0[1])+T(S.px0,S.pxmid,!0,1)+T(S.pxmid,S.px0,!0,1)+\"Z\";z?g.attr(\"d\",\"M\"+(x+z*S.px0[0])+\",\"+(C+z*S.px0[1])+T(S.px0,S.pxmid,!1,z)+T(S.pxmid,S.px0,!1,z)+\"Z\"+O):g.attr(\"d\",O)}else{var V=T(S.px0,S.px1,!0,1);if(z){var G=1-z;g.attr(\"d\",\"M\"+(x+z*S.px1[0])+\",\"+(C+z*S.px1[1])+T(S.px1,S.px0,!1,z)+\"l\"+G*S.px0[0]+\",\"+G*S.px0[1]+V+\"Z\")}else g.attr(\"d\",\"M\"+x+\",\"+C+\"l\"+S.px0[0]+\",\"+S.px0[1]+V+\"Z\")}Ske(e,S,l);var Z=ns.castOption(u.textposition,S.pts),H=M.selectAll(\"g.slicetext\").data(S.text&&Z!==\"none\"?[0]:[]);H.enter().append(\"g\").classed(\"slicetext\",!0),H.exit().remove(),H.each(function(){var N=rv.ensureSingle(Fp.select(this),\"text\",\"\",function(ie){ie.attr(\"data-notex\",1)}),j=rv.ensureUniformFontSize(e,Z===\"outside\"?VCt(u,S,n.font):gke(u,S,n.font));N.text(S.text).attr({class:\"slicetext\",transform:\"\",\"text-anchor\":\"middle\"}).call(Wy.font,j).call(pW.convertToTspans,e);var re=Wy.bBox(N.node()),oe;if(Z===\"outside\")oe=fke(re,S);else if(oe=yke(re,S,l),Z===\"auto\"&&oe.scale<1){var _e=rv.ensureUniformFontSize(e,u.outsidetextfont);N.call(Wy.font,_e),re=Wy.bBox(N.node()),oe=fke(re,S)}var Ee=oe.textPosAngle,Ce=Ee===void 0?S.pxmid:mD(l.r,Ee);if(oe.targetX=x+Ce[0]*oe.rCenter+(oe.x||0),oe.targetY=C+Ce[1]*oe.rCenter+(oe.y||0),Mke(oe,re),oe.outside){var me=oe.targetY;S.yLabelMin=me-re.height/2,S.yLabelMid=me,S.yLabelMax=me+re.height/2,S.labelExtraX=0,S.labelExtraY=0,h=!0}oe.fontSize=j.size,qCt(u.type,oe,n),o[L].transform=oe,rv.setTransormAndDisplay(N,oe)})});var d=Fp.select(this).selectAll(\"g.titletext\").data(u.title.text?[0]:[]);if(d.enter().append(\"g\").classed(\"titletext\",!0),d.exit().remove(),d.each(function(){var S=rv.ensureSingle(Fp.select(this),\"text\",\"\",function(C){C.attr(\"data-notex\",1)}),L=u.title.text;u._meta&&(L=rv.templateString(L,u._meta)),S.text(L).attr({class:\"titletext\",transform:\"\",\"text-anchor\":\"middle\"}).call(Wy.font,u.title.font).call(pW.convertToTspans,e);var x;u.title.position===\"middle center\"?x=jCt(l):x=wke(l,i),S.attr(\"transform\",ske(x.x,x.y)+OCt(Math.min(1,x.scale))+ske(x.tx,x.ty))}),h&&XCt(f,u),UCt(c,u),h&&u.automargin){var v=Wy.bBox(s.node()),_=u.domain,b=i.w*(_.x[1]-_.x[0]),p=i.h*(_.y[1]-_.y[0]),k=(.5*b-l.r)/i.w,E=(.5*p-l.r)/i.h;zCt.autoMargin(e,\"pie.\"+u.uid+\".automargin\",{xl:_.x[0]-k,xr:_.x[1]+k,yb:_.y[0]-E,yt:_.y[1]+E,l:Math.max(l.cx-l.r-v.left,0),r:Math.max(v.right-(l.cx+l.r),0),b:Math.max(v.bottom-(l.cy+l.r),0),t:Math.max(l.cy-l.r-v.top,0),pad:5})}})});setTimeout(function(){a.selectAll(\"tspan\").each(function(){var o=Fp.select(this);o.attr(\"dy\")&&o.attr(\"dy\",o.attr(\"dy\"))})},0)}function UCt(e,t){e.each(function(r){var n=Fp.select(this);if(!r.labelExtraX&&!r.labelExtraY){n.select(\"path.textline\").remove();return}var i=n.select(\"g.slicetext text\");r.transform.targetX+=r.labelExtraX,r.transform.targetY+=r.labelExtraY,rv.setTransormAndDisplay(i,r.transform);var a=r.cxFinal+r.pxmid[0],o=r.cyFinal+r.pxmid[1],s=\"M\"+a+\",\"+o,l=(r.yLabelMax-r.yLabelMin)*(r.pxmid[0]<0?-1:1)/4;if(r.labelExtraX){var u=r.labelExtraX*r.pxmid[1]/r.pxmid[0],c=r.yLabelMid+r.labelExtraY-(r.cyFinal+r.pxmid[1]);Math.abs(u)>Math.abs(c)?s+=\"l\"+c*r.pxmid[0]/r.pxmid[1]+\",\"+c+\"H\"+(a+r.labelExtraX+l):s+=\"l\"+r.labelExtraX+\",\"+u+\"v\"+(c-u)+\"h\"+l}else s+=\"V\"+(r.yLabelMid+r.labelExtraY)+\"h\"+l;rv.ensureSingle(n,\"path\",\"textline\").call(hke.stroke,t.outsidetextfont.color).attr({\"stroke-width\":Math.min(2,t.outsidetextfont.size/8),d:s,fill:\"none\"})})}function pke(e,t,r){var n=r[0],i=n.cx,a=n.cy,o=n.trace,s=o.type===\"funnelarea\";\"_hasHoverLabel\"in o||(o._hasHoverLabel=!1),\"_hasHoverEvent\"in o||(o._hasHoverEvent=!1),e.on(\"mouseover\",function(l){var u=t._fullLayout,c=t._fullData[o.index];if(!(t._dragging||u.hovermode===!1)){var f=c.hoverinfo;if(Array.isArray(f)&&(f=pD.castHoverinfo({hoverinfo:[ns.castOption(f,l.pts)],_module:o._module},u,0)),f===\"all\"&&(f=\"label+text+value+percent+name\"),c.hovertemplate||f!==\"none\"&&f!==\"skip\"&&f){var h=l.rInscribed||0,d=i+l.pxmid[0]*(1-h),v=a+l.pxmid[1]*(1-h),_=u.separators,b=[];if(f&&f.indexOf(\"label\")!==-1&&b.push(l.label),l.text=ns.castOption(c.hovertext||c.text,l.pts),f&&f.indexOf(\"text\")!==-1){var p=l.text;rv.isValidTextValue(p)&&b.push(p)}l.value=l.v,l.valueLabel=ns.formatPieValue(l.v,_),f&&f.indexOf(\"value\")!==-1&&b.push(l.valueLabel),l.percent=l.v/n.vTotal,l.percentLabel=ns.formatPiePercent(l.percent,_),f&&f.indexOf(\"percent\")!==-1&&b.push(l.percentLabel);var k=c.hoverlabel,E=k.font,S=[];pD.loneHover({trace:o,x0:d-h*n.r,x1:d+h*n.r,y:v,_x0:s?i+l.TL[0]:d-h*n.r,_x1:s?i+l.TR[0]:d+h*n.r,_y0:s?a+l.TL[1]:v-h*n.r,_y1:s?a+l.BL[1]:v+h*n.r,text:b.join(\"
\"),name:c.hovertemplate||f.indexOf(\"name\")!==-1?c.name:void 0,idealAlign:l.pxmid[0]<0?\"left\":\"right\",color:ns.castOption(k.bgcolor,l.pts)||l.color,borderColor:ns.castOption(k.bordercolor,l.pts),fontFamily:ns.castOption(E.family,l.pts),fontSize:ns.castOption(E.size,l.pts),fontColor:ns.castOption(E.color,l.pts),nameLength:ns.castOption(k.namelength,l.pts),textAlign:ns.castOption(k.align,l.pts),hovertemplate:ns.castOption(c.hovertemplate,l.pts),hovertemplateLabels:l,eventData:[gD(l,c)]},{container:u._hoverlayer.node(),outerContainer:u._paper.node(),gd:t,inOut_bbox:S}),l.bbox=S[0],o._hasHoverLabel=!0}o._hasHoverEvent=!0,t.emit(\"plotly_hover\",{points:[gD(l,c)],event:Fp.event})}}),e.on(\"mouseout\",function(l){var u=t._fullLayout,c=t._fullData[o.index],f=Fp.select(this).datum();o._hasHoverEvent&&(l.originalEvent=Fp.event,t.emit(\"plotly_unhover\",{points:[gD(f,c)],event:Fp.event}),o._hasHoverEvent=!1),o._hasHoverLabel&&(pD.loneUnhover(u._hoverlayer.node()),o._hasHoverLabel=!1)}),e.on(\"click\",function(l){var u=t._fullLayout,c=t._fullData[o.index];t._dragging||u.hovermode===!1||(t._hoverdata=[gD(l,c)],pD.click(t,Fp.event))})}function VCt(e,t,r){var n=ns.castOption(e.outsidetextfont.color,t.pts)||ns.castOption(e.textfont.color,t.pts)||r.color,i=ns.castOption(e.outsidetextfont.family,t.pts)||ns.castOption(e.textfont.family,t.pts)||r.family,a=ns.castOption(e.outsidetextfont.size,t.pts)||ns.castOption(e.textfont.size,t.pts)||r.size,o=ns.castOption(e.outsidetextfont.weight,t.pts)||ns.castOption(e.textfont.weight,t.pts)||r.weight,s=ns.castOption(e.outsidetextfont.style,t.pts)||ns.castOption(e.textfont.style,t.pts)||r.style,l=ns.castOption(e.outsidetextfont.variant,t.pts)||ns.castOption(e.textfont.variant,t.pts)||r.variant,u=ns.castOption(e.outsidetextfont.textcase,t.pts)||ns.castOption(e.textfont.textcase,t.pts)||r.textcase,c=ns.castOption(e.outsidetextfont.lineposition,t.pts)||ns.castOption(e.textfont.lineposition,t.pts)||r.lineposition,f=ns.castOption(e.outsidetextfont.shadow,t.pts)||ns.castOption(e.textfont.shadow,t.pts)||r.shadow;return{color:n,family:i,size:a,weight:o,style:s,variant:l,textcase:u,lineposition:c,shadow:f}}function gke(e,t,r){var n=ns.castOption(e.insidetextfont.color,t.pts);!n&&e._input.textfont&&(n=ns.castOption(e._input.textfont.color,t.pts));var i=ns.castOption(e.insidetextfont.family,t.pts)||ns.castOption(e.textfont.family,t.pts)||r.family,a=ns.castOption(e.insidetextfont.size,t.pts)||ns.castOption(e.textfont.size,t.pts)||r.size,o=ns.castOption(e.insidetextfont.weight,t.pts)||ns.castOption(e.textfont.weight,t.pts)||r.weight,s=ns.castOption(e.insidetextfont.style,t.pts)||ns.castOption(e.textfont.style,t.pts)||r.style,l=ns.castOption(e.insidetextfont.variant,t.pts)||ns.castOption(e.textfont.variant,t.pts)||r.variant,u=ns.castOption(e.insidetextfont.textcase,t.pts)||ns.castOption(e.textfont.textcase,t.pts)||r.textcase,c=ns.castOption(e.insidetextfont.lineposition,t.pts)||ns.castOption(e.textfont.lineposition,t.pts)||r.lineposition,f=ns.castOption(e.insidetextfont.shadow,t.pts)||ns.castOption(e.textfont.shadow,t.pts)||r.shadow;return{color:n||hke.contrast(t.color),family:i,size:a,weight:o,style:s,variant:l,textcase:u,lineposition:c,shadow:f}}function mke(e,t){for(var r,n,i=0;i=-4;k-=2)p(Math.PI*k,\"tan\");for(k=4;k>=-4;k-=2)p(Math.PI*(k+1),\"tan\")}if(f||d){for(k=4;k>=-4;k-=2)p(Math.PI*(k+1.5),\"rad\");for(k=4;k>=-4;k-=2)p(Math.PI*(k+.5),\"rad\")}}if(s||v||f){var E=Math.sqrt(e.width*e.width+e.height*e.height);if(b={scale:i*n*2/E,rCenter:1-i,rotate:0},b.textPosAngle=(t.startangle+t.stopangle)/2,b.scale>=1)return b;_.push(b)}(v||d)&&(b=uke(e,n,o,l,u),b.textPosAngle=(t.startangle+t.stopangle)/2,_.push(b)),(v||h)&&(b=cke(e,n,o,l,u),b.textPosAngle=(t.startangle+t.stopangle)/2,_.push(b));for(var S=0,L=0,x=0;x<_.length;x++){var C=_[x].scale;if(L=1)break}return _[S]}function GCt(e,t){var r=e.startangle,n=e.stopangle;return r>t&&t>n||r0?1:-1)/2,y:a/(1+r*r/(n*n)),outside:!0}}function jCt(e){var t=Math.sqrt(e.titleBox.width*e.titleBox.width+e.titleBox.height*e.titleBox.height);return{x:e.cx,y:e.cy,scale:e.trace.hole*e.r*2/t,tx:0,ty:-e.titleBox.height/2+e.trace.title.font.size}}function wke(e,t){var r=1,n=1,i,a=e.trace,o={x:e.cx,y:e.cy},s={tx:0,ty:0};s.ty+=a.title.font.size,i=Tke(a),a.title.position.indexOf(\"top\")!==-1?(o.y-=(1+i)*e.r,s.ty-=e.titleBox.height):a.title.position.indexOf(\"bottom\")!==-1&&(o.y+=(1+i)*e.r);var l=WCt(e.r,e.trace.aspectratio),u=t.w*(a.domain.x[1]-a.domain.x[0])/2;return a.title.position.indexOf(\"left\")!==-1?(u=u+l,o.x-=(1+i)*l,s.tx+=e.titleBox.width/2):a.title.position.indexOf(\"center\")!==-1?u*=2:a.title.position.indexOf(\"right\")!==-1&&(u=u+l,o.x+=(1+i)*l,s.tx-=e.titleBox.width/2),r=u/e.titleBox.width,n=gW(e,t)/e.titleBox.height,{x:o.x,y:o.y,scale:Math.min(r,n),tx:s.tx,ty:s.ty}}function WCt(e,t){return e/(t===void 0?1:t)}function gW(e,t){var r=e.trace,n=t.h*(r.domain.y[1]-r.domain.y[0]);return Math.min(e.titleBox.height,n/2)}function Tke(e){var t=e.pull;if(!t)return 0;var r;if(rv.isArrayOrTypedArray(t))for(t=0,r=0;rt&&(t=e.pull[r]);return t}function XCt(e,t){var r,n,i,a,o,s,l,u,c,f,h,d,v;function _(E,S){return E.pxmid[1]-S.pxmid[1]}function b(E,S){return S.pxmid[1]-E.pxmid[1]}function p(E,S){S||(S={});var L=S.labelExtraY+(n?S.yLabelMax:S.yLabelMin),x=n?E.yLabelMin:E.yLabelMax,C=n?E.yLabelMax:E.yLabelMin,M=E.cyFinal+o(E.px0[1],E.px1[1]),g=L-x,P,T,z,O,V,G;if(g*l>0&&(E.labelExtraY=g),!!rv.isArrayOrTypedArray(t.pull))for(T=0;T=(ns.castOption(t.pull,z.pts)||0))&&((E.pxmid[1]-z.pxmid[1])*l>0?(O=z.cyFinal+o(z.px0[1],z.px1[1]),g=O-x-E.labelExtraY,g*l>0&&(E.labelExtraY+=g)):(C+E.labelExtraY-M)*l>0&&(P=3*s*Math.abs(T-f.indexOf(E)),V=z.cxFinal+a(z.px0[0],z.px1[0]),G=V+P-(E.cxFinal+E.pxmid[0])-E.labelExtraX,G*s>0&&(E.labelExtraX+=G)))}for(n=0;n<2;n++)for(i=n?_:b,o=n?Math.max:Math.min,l=n?1:-1,r=0;r<2;r++){for(a=r?Math.max:Math.min,s=r?1:-1,u=e[n][r],u.sort(i),c=e[1-n][r],f=c.concat(u),d=[],h=0;h1?(u=r.r,c=u/i.aspectratio):(c=r.r,u=c*i.aspectratio),u*=(1+i.baseratio)/2,l=u*c}o=Math.min(o,l/r.vTotal)}for(n=0;nt.vTotal/2?1:0,u.halfangle=Math.PI*Math.min(u.v/t.vTotal,.5),u.ring=1-n.hole,u.rInscribed=HCt(u,t))}function mD(e,t){return[e*Math.sin(t),-e*Math.cos(t)]}function Ske(e,t,r){var n=e._fullLayout,i=r.trace,a=i.texttemplate,o=i.textinfo;if(!a&&o&&o!==\"none\"){var s=o.split(\"+\"),l=function(S){return s.indexOf(S)!==-1},u=l(\"label\"),c=l(\"text\"),f=l(\"value\"),h=l(\"percent\"),d=n.separators,v;if(v=u?[t.label]:[],c){var _=ns.getFirstFilled(i.text,t.pts);lke(_)&&v.push(_)}f&&v.push(ns.formatPieValue(t.v,d)),h&&v.push(ns.formatPiePercent(t.v/r.vTotal,d)),t.text=v.join(\"
\")}function b(S){return{label:S.label,value:S.v,valueLabel:ns.formatPieValue(S.v,n.separators),percent:S.v/r.vTotal,percentLabel:ns.formatPiePercent(S.v/r.vTotal,n.separators),color:S.color,text:S.text,customdata:rv.castOption(i,S.i,\"customdata\")}}if(a){var p=rv.castOption(i,t.i,\"texttemplate\");if(!p)t.text=\"\";else{var k=b(t),E=ns.getFirstFilled(i.text,t.pts);(lke(E)||E===\"\")&&(k.text=E),t.text=rv.texttemplateString({data:[k,i._meta],fallback:i.texttemplatefallback,labels:k,locale:e._fullLayout._d3locale,template:p})}}}function Mke(e,t){var r=e.rotate*Math.PI/180,n=Math.cos(r),i=Math.sin(r),a=(t.left+t.right)/2,o=(t.top+t.bottom)/2;e.textX=a*n-o*i,e.textY=a*i+o*n,e.noCenter=!0}Eke.exports={plot:NCt,formatSliceLabel:Ske,transformInsideText:yke,determineInsideTextFont:gke,positionTitleOutside:wke,prerenderTitles:mke,layoutAreas:Ake,attachFxHandlers:pke,computeTransform:Mke}});var Lke=ye((Kdr,Cke)=>{\"use strict\";var kke=Oa(),KCt=q3(),JCt=bv().resizeText;Cke.exports=function(t){var r=t._fullLayout._pielayer.selectAll(\".trace\");JCt(t,r,\"pie\"),r.each(function(n){var i=n[0],a=i.trace,o=kke.select(this);o.style({opacity:a.opacity}),o.selectAll(\"path.surface\").each(function(s){kke.select(this).call(KCt,s,a,t)})})}});var Ike=ye(TA=>{\"use strict\";var Pke=Mc();TA.name=\"pie\";TA.plot=function(e,t,r,n){Pke.plotBasePlot(TA.name,e,t,r,n)};TA.clean=function(e,t,r,n){Pke.cleanBasePlot(TA.name,e,t,r,n)}});var Dke=ye(($dr,Rke)=>{\"use strict\";Rke.exports={attributes:S2(),supplyDefaults:M2().supplyDefaults,supplyLayoutDefaults:tke(),layoutAttributes:vD(),calc:wA().calc,crossTraceCalc:wA().crossTraceCalc,plot:yD().plot,style:Lke(),styleOne:q3(),moduleType:\"trace\",name:\"pie\",basePlotModule:Ike(),categories:[\"pie-like\",\"pie\",\"showLegend\"],meta:{}}});var zke=ye((Qdr,Fke)=>{\"use strict\";Fke.exports=Dke()});var qke=ye(AA=>{\"use strict\";var Oke=Mc();AA.name=\"sunburst\";AA.plot=function(e,t,r,n){Oke.plotBasePlot(AA.name,e,t,r,n)};AA.clean=function(e,t,r,n){Oke.cleanBasePlot(AA.name,e,t,r,n)}});var mW=ye((tvr,Bke)=>{\"use strict\";Bke.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:\"linear\",eventDataKeys:[\"currentPath\",\"root\",\"entry\",\"percentRoot\",\"percentEntry\",\"percentParent\"]}});var LE=ye((rvr,Vke)=>{\"use strict\";var $Ct=Gl(),{hovertemplateAttrs:QCt,texttemplateAttrs:e6t,templatefallbackAttrs:Nke}=Ll(),t6t=Tu(),r6t=Cc().attributes,Xy=S2(),Uke=mW(),CE=Ao().extendFlat,i6t=Pd().pattern;Vke.exports={labels:{valType:\"data_array\",editType:\"calc\"},parents:{valType:\"data_array\",editType:\"calc\"},values:{valType:\"data_array\",editType:\"calc\"},branchvalues:{valType:\"enumerated\",values:[\"remainder\",\"total\"],dflt:\"remainder\",editType:\"calc\"},count:{valType:\"flaglist\",flags:[\"branches\",\"leaves\"],dflt:\"leaves\",editType:\"calc\"},level:{valType:\"any\",editType:\"plot\",anim:!0},maxdepth:{valType:\"integer\",editType:\"plot\",dflt:-1},marker:CE({colors:{valType:\"data_array\",editType:\"calc\"},line:{color:CE({},Xy.marker.line.color,{dflt:null}),width:CE({},Xy.marker.line.width,{dflt:1}),editType:\"calc\"},pattern:i6t,editType:\"calc\"},t6t(\"marker\",{colorAttr:\"colors\",anim:!1})),leaf:{opacity:{valType:\"number\",editType:\"style\",min:0,max:1},editType:\"plot\"},text:Xy.text,textinfo:{valType:\"flaglist\",flags:[\"label\",\"text\",\"value\",\"current path\",\"percent root\",\"percent entry\",\"percent parent\"],extras:[\"none\"],editType:\"plot\"},texttemplate:e6t({editType:\"plot\"},{keys:Uke.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:Nke({editType:\"plot\"}),hovertext:Xy.hovertext,hoverinfo:CE({},$Ct.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"name\",\"current path\",\"percent root\",\"percent entry\",\"percent parent\"],dflt:\"label+text+value+name\"}),hovertemplate:QCt({},{keys:Uke.eventDataKeys}),hovertemplatefallback:Nke(),textfont:Xy.textfont,insidetextorientation:Xy.insidetextorientation,insidetextfont:Xy.insidetextfont,outsidetextfont:CE({},Xy.outsidetextfont,{}),rotation:{valType:\"angle\",dflt:0,editType:\"plot\"},sort:Xy.sort,root:{color:{valType:\"color\",editType:\"calc\",dflt:\"rgba(0,0,0,0)\"},editType:\"calc\"},domain:r6t({name:\"sunburst\",trace:!0,editType:\"calc\"})}});var yW=ye((ivr,Gke)=>{\"use strict\";Gke.exports={sunburstcolorway:{valType:\"colorlist\",editType:\"calc\"},extendsunburstcolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var Xke=ye((nvr,Wke)=>{\"use strict\";var Hke=Dr(),n6t=LE(),a6t=Cc().defaults,o6t=r0().handleText,s6t=M2().handleMarkerDefaults,jke=tc(),l6t=jke.hasColorscale,u6t=jke.handleDefaults;Wke.exports=function(t,r,n,i){function a(h,d){return Hke.coerce(t,r,n6t,h,d)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\"),s6t(t,r,i,a);var u=r._hasColorscale=l6t(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;u&&u6t(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}),a(\"leaf.opacity\",u?1:.7);var c=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",Hke.isArrayOrTypedArray(c)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var f=\"auto\";o6t(t,r,i,a,f,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"insidetextorientation\"),a(\"sort\"),a(\"rotation\"),a(\"root.color\"),a6t(r,i,a),r._length=null}});var Yke=ye((avr,Zke)=>{\"use strict\";var c6t=Dr(),f6t=yW();Zke.exports=function(t,r){function n(i,a){return c6t.coerce(t,r,f6t,i,a)}n(\"sunburstcolorway\",r.colorway),n(\"extendsunburstcolors\")}});var PE=ye((_D,Kke)=>{(function(e,t){typeof _D==\"object\"&&typeof Kke!=\"undefined\"?t(_D):(e=e||self,t(e.d3=e.d3||{}))})(_D,function(e){\"use strict\";function t(je,tt){return je.parent===tt.parent?1:2}function r(je){return je.reduce(n,0)/je.length}function n(je,tt){return je+tt.x}function i(je){return 1+je.reduce(a,0)}function a(je,tt){return Math.max(je,tt.y)}function o(je){for(var tt;tt=je.children;)je=tt[0];return je}function s(je){for(var tt;tt=je.children;)je=tt[tt.length-1];return je}function l(){var je=t,tt=1,xt=1,Ie=!1;function xe(ke){var vt,ir=0;ke.eachAfter(function($r){var di=$r.children;di?($r.x=r(di),$r.y=i(di)):($r.x=vt?ir+=je($r,vt):0,$r.y=0,vt=$r)});var ar=o(ke),vr=s(ke),ii=ar.x-je(ar,vr)/2,pi=vr.x+je(vr,ar)/2;return ke.eachAfter(Ie?function($r){$r.x=($r.x-ke.x)*tt,$r.y=(ke.y-$r.y)*xt}:function($r){$r.x=($r.x-ii)/(pi-ii)*tt,$r.y=(1-(ke.y?$r.y/ke.y:1))*xt})}return xe.separation=function(ke){return arguments.length?(je=ke,xe):je},xe.size=function(ke){return arguments.length?(Ie=!1,tt=+ke[0],xt=+ke[1],xe):Ie?null:[tt,xt]},xe.nodeSize=function(ke){return arguments.length?(Ie=!0,tt=+ke[0],xt=+ke[1],xe):Ie?[tt,xt]:null},xe}function u(je){var tt=0,xt=je.children,Ie=xt&&xt.length;if(!Ie)tt=1;else for(;--Ie>=0;)tt+=xt[Ie].value;je.value=tt}function c(){return this.eachAfter(u)}function f(je){var tt=this,xt,Ie=[tt],xe,ke,vt;do for(xt=Ie.reverse(),Ie=[];tt=xt.pop();)if(je(tt),xe=tt.children,xe)for(ke=0,vt=xe.length;ke=0;--xe)xt.push(Ie[xe]);return this}function d(je){for(var tt=this,xt=[tt],Ie=[],xe,ke,vt;tt=xt.pop();)if(Ie.push(tt),xe=tt.children,xe)for(ke=0,vt=xe.length;ke=0;)xt+=Ie[xe].value;tt.value=xt})}function _(je){return this.eachBefore(function(tt){tt.children&&tt.children.sort(je)})}function b(je){for(var tt=this,xt=p(tt,je),Ie=[tt];tt!==xt;)tt=tt.parent,Ie.push(tt);for(var xe=Ie.length;je!==xt;)Ie.splice(xe,0,je),je=je.parent;return Ie}function p(je,tt){if(je===tt)return je;var xt=je.ancestors(),Ie=tt.ancestors(),xe=null;for(je=xt.pop(),tt=Ie.pop();je===tt;)xe=je,je=xt.pop(),tt=Ie.pop();return xe}function k(){for(var je=this,tt=[je];je=je.parent;)tt.push(je);return tt}function E(){var je=[];return this.each(function(tt){je.push(tt)}),je}function S(){var je=[];return this.eachBefore(function(tt){tt.children||je.push(tt)}),je}function L(){var je=this,tt=[];return je.each(function(xt){xt!==je&&tt.push({source:xt.parent,target:xt})}),tt}function x(je,tt){var xt=new T(je),Ie=+je.value&&(xt.value=je.value),xe,ke=[xt],vt,ir,ar,vr;for(tt==null&&(tt=M);xe=ke.pop();)if(Ie&&(xe.value=+xe.data.value),(ir=tt(xe.data))&&(vr=ir.length))for(xe.children=new Array(vr),ar=vr-1;ar>=0;--ar)ke.push(vt=xe.children[ar]=new T(ir[ar])),vt.parent=xe,vt.depth=xe.depth+1;return xt.eachBefore(P)}function C(){return x(this).eachBefore(g)}function M(je){return je.children}function g(je){je.data=je.data.data}function P(je){var tt=0;do je.height=tt;while((je=je.parent)&&je.height<++tt)}function T(je){this.data=je,this.depth=this.height=0,this.parent=null}T.prototype=x.prototype={constructor:T,count:c,each:f,eachAfter:d,eachBefore:h,sum:v,sort:_,path:b,ancestors:k,descendants:E,leaves:S,links:L,copy:C};var z=Array.prototype.slice;function O(je){for(var tt=je.length,xt,Ie;tt;)Ie=Math.random()*tt--|0,xt=je[tt],je[tt]=je[Ie],je[Ie]=xt;return je}function V(je){for(var tt=0,xt=(je=O(z.call(je))).length,Ie=[],xe,ke;tt0&&xt*xt>Ie*Ie+xe*xe}function N(je,tt){for(var xt=0;xtar?(xe=(vr+ar-ke)/(2*vr),ir=Math.sqrt(Math.max(0,ar/vr-xe*xe)),xt.x=je.x-xe*Ie-ir*vt,xt.y=je.y-xe*vt+ir*Ie):(xe=(vr+ke-ar)/(2*vr),ir=Math.sqrt(Math.max(0,ke/vr-xe*xe)),xt.x=tt.x+xe*Ie-ir*vt,xt.y=tt.y+xe*vt+ir*Ie)):(xt.x=tt.x+xt.r,xt.y=tt.y)}function Ce(je,tt){var xt=je.r+tt.r-1e-6,Ie=tt.x-je.x,xe=tt.y-je.y;return xt>0&&xt*xt>Ie*Ie+xe*xe}function me(je){var tt=je._,xt=je.next._,Ie=tt.r+xt.r,xe=(tt.x*xt.r+xt.x*tt.r)/Ie,ke=(tt.y*xt.r+xt.y*tt.r)/Ie;return xe*xe+ke*ke}function ie(je){this._=je,this.next=null,this.previous=null}function Se(je){if(!(xe=je.length))return 0;var tt,xt,Ie,xe,ke,vt,ir,ar,vr,ii,pi;if(tt=je[0],tt.x=0,tt.y=0,!(xe>1))return tt.r;if(xt=je[1],tt.x=-xt.r,xt.x=tt.r,xt.y=0,!(xe>2))return tt.r+xt.r;Ee(xt,tt,Ie=je[2]),tt=new ie(tt),xt=new ie(xt),Ie=new ie(Ie),tt.next=Ie.previous=xt,xt.next=tt.previous=Ie,Ie.next=xt.previous=tt;e:for(ir=3;ir0)throw new Error(\"cycle\");return ir}return xt.id=function(Ie){return arguments.length?(je=Fe(Ie),xt):je},xt.parentId=function(Ie){return arguments.length?(tt=Fe(Ie),xt):tt},xt}function $e(je,tt){return je.parent===tt.parent?1:2}function St(je){var tt=je.children;return tt?tt[0]:je.t}function Qt(je){var tt=je.children;return tt?tt[tt.length-1]:je.t}function Vt(je,tt,xt){var Ie=xt/(tt.i-je.i);tt.c-=Ie,tt.s+=xt,je.c+=Ie,tt.z+=xt,tt.m+=xt}function _t(je){for(var tt=0,xt=0,Ie=je.children,xe=Ie.length,ke;--xe>=0;)ke=Ie[xe],ke.z+=tt,ke.m+=tt,tt+=ke.s+(xt+=ke.c)}function It(je,tt,xt){return je.a.parent===tt.parent?je.a:xt}function mt(je,tt){this._=je,this.parent=null,this.children=null,this.A=null,this.a=this,this.z=0,this.m=0,this.c=0,this.s=0,this.t=null,this.i=tt}mt.prototype=Object.create(T.prototype);function er(je){for(var tt=new mt(je,0),xt,Ie=[tt],xe,ke,vt,ir;xt=Ie.pop();)if(ke=xt._.children)for(xt.children=new Array(ir=ke.length),vt=ir-1;vt>=0;--vt)Ie.push(xe=xt.children[vt]=new mt(ke[vt],vt)),xe.parent=xt;return(tt.parent=new mt(null,0)).children=[tt],tt}function lr(){var je=$e,tt=1,xt=1,Ie=null;function xe(vr){var ii=er(vr);if(ii.eachAfter(ke),ii.parent.m=-ii.z,ii.eachBefore(vt),Ie)vr.eachBefore(ar);else{var pi=vr,$r=vr,di=vr;vr.eachBefore(function(qn){qn.x$r.x&&($r=qn),qn.depth>di.depth&&(di=qn)});var ji=pi===$r?1:je(pi,$r)/2,In=ji-pi.x,wi=tt/($r.x+ji+In),On=xt/(di.depth||1);vr.eachBefore(function(qn){qn.x=(qn.x+In)*wi,qn.y=qn.depth*On})}return vr}function ke(vr){var ii=vr.children,pi=vr.parent.children,$r=vr.i?pi[vr.i-1]:null;if(ii){_t(vr);var di=(ii[0].z+ii[ii.length-1].z)/2;$r?(vr.z=$r.z+je(vr._,$r._),vr.m=vr.z-di):vr.z=di}else $r&&(vr.z=$r.z+je(vr._,$r._));vr.parent.A=ir(vr,$r,vr.parent.A||pi[0])}function vt(vr){vr._.x=vr.z+vr.parent.m,vr.m+=vr.parent.m}function ir(vr,ii,pi){if(ii){for(var $r=vr,di=vr,ji=ii,In=$r.parent.children[0],wi=$r.m,On=di.m,qn=ji.m,Fn=In.m,ra;ji=Qt(ji),$r=St($r),ji&&$r;)In=St(In),di=Qt(di),di.a=vr,ra=ji.z+qn-$r.z-wi+je(ji._,$r._),ra>0&&(Vt(It(ji,vr,pi),vr,ra),wi+=ra,On+=ra),qn+=ji.m,wi+=$r.m,Fn+=In.m,On+=di.m;ji&&!Qt(di)&&(di.t=ji,di.m+=qn-On),$r&&!St(In)&&(In.t=$r,In.m+=wi-Fn,pi=vr)}return pi}function ar(vr){vr.x*=tt,vr.y=vr.depth*xt}return xe.separation=function(vr){return arguments.length?(je=vr,xe):je},xe.size=function(vr){return arguments.length?(Ie=!1,tt=+vr[0],xt=+vr[1],xe):Ie?null:[tt,xt]},xe.nodeSize=function(vr){return arguments.length?(Ie=!0,tt=+vr[0],xt=+vr[1],xe):Ie?[tt,xt]:null},xe}function Tr(je,tt,xt,Ie,xe){for(var ke=je.children,vt,ir=-1,ar=ke.length,vr=je.value&&(xe-xt)/je.value;++irqn&&(qn=vr),Ut=wi*wi*la,Fn=Math.max(qn/Ut,Ut/On),Fn>ra){wi-=vr;break}ra=Fn}vt.push(ar={value:wi,dice:di1?Ie:1)},xt}(Lr);function Vr(){var je=Br,tt=!1,xt=1,Ie=1,xe=[0],ke=Pe,vt=Pe,ir=Pe,ar=Pe,vr=Pe;function ii($r){return $r.x0=$r.y0=0,$r.x1=xt,$r.y1=Ie,$r.eachBefore(pi),xe=[0],tt&&$r.eachBefore(Zt),$r}function pi($r){var di=xe[$r.depth],ji=$r.x0+di,In=$r.y0+di,wi=$r.x1-di,On=$r.y1-di;wi=$r-1){var qn=ke[pi];qn.x0=ji,qn.y0=In,qn.x1=wi,qn.y1=On;return}for(var Fn=vr[pi],ra=di/2+Fn,la=pi+1,Ut=$r-1;la>>1;vr[wt]On-In){var Er=(ji*nr+wi*rr)/di;ii(pi,la,rr,ji,In,Er,On),ii(la,$r,nr,Er,In,wi,On)}else{var Xr=(In*nr+On*rr)/di;ii(pi,la,rr,ji,In,wi,Xr),ii(la,$r,nr,ji,Xr,wi,On)}}}function Ge(je,tt,xt,Ie,xe){(je.depth&1?Tr:st)(je,tt,xt,Ie,xe)}var Je=function je(tt){function xt(Ie,xe,ke,vt,ir){if((ar=Ie._squarify)&&ar.ratio===tt)for(var ar,vr,ii,pi,$r=-1,di,ji=ar.length,In=Ie.value;++$r1?Ie:1)},xt}(Lr);e.cluster=l,e.hierarchy=x,e.pack=ce,e.packEnclose=V,e.packSiblings=Le,e.partition=lt,e.stratify=cr,e.tree=lr,e.treemap=Vr,e.treemapBinary=dt,e.treemapDice=st,e.treemapResquarify=Je,e.treemapSlice=Tr,e.treemapSliceDice=Ge,e.treemapSquarify=Br,Object.defineProperty(e,\"__esModule\",{value:!0})})});var RE=ye(IE=>{\"use strict\";var Jke=PE(),h6t=Eo(),SA=Dr(),d6t=tc().makeColorScaleFuncFromTrace,v6t=wA().makePullColorFn,p6t=wA().generateExtendedColors,g6t=tc().calc,m6t=fs().ALMOST_EQUAL,y6t={},_6t={},x6t={};IE.calc=function(e,t){var r=e._fullLayout,n=t.ids,i=SA.isArrayOrTypedArray(n),a=t.labels,o=t.parents,s=t.values,l=SA.isArrayOrTypedArray(s),u=[],c={},f={},h=function(H,N){c[H]?c[H].push(N):c[H]=[N],f[N]=1},d=function(H){return H||typeof H==\"number\"},v=function(H){return!l||h6t(s[H])&&s[H]>=0},_,b,p;i?(_=Math.min(n.length,o.length),b=function(H){return d(n[H])&&v(H)},p=function(H){return String(n[H])}):(_=Math.min(a.length,o.length),b=function(H){return d(a[H])&&v(H)},p=function(H){return String(a[H])}),l&&(_=Math.min(_,s.length));for(var k=0;k<_;k++)if(b(k)){var E=p(k),S=d(o[k])?String(o[k]):\"\",L={i:k,id:E,pid:S,label:d(a[k])?String(a[k]):\"\"};l&&(L.v=+s[k]),u.push(L),h(S,E)}if(c[\"\"]){if(c[\"\"].length>1){for(var M=SA.randstr(),g=0;g{});function Gm(){}function eCe(){return this.rgb().formatHex()}function k6t(){return this.rgb().formatHex8()}function C6t(){return sCe(this).formatHsl()}function tCe(){return this.rgb().formatRgb()}function j_(e){var t,r;return e=(e+\"\").trim().toLowerCase(),(t=b6t.exec(e))?(r=t[1].length,t=parseInt(t[1],16),r===6?rCe(t):r===3?new _d(t>>8&15|t>>4&240,t>>4&15|t&240,(t&15)<<4|t&15,1):r===8?bD(t>>24&255,t>>16&255,t>>8&255,(t&255)/255):r===4?bD(t>>12&15|t>>8&240,t>>8&15|t>>4&240,t>>4&15|t&240,((t&15)<<4|t&15)/255):null):(t=w6t.exec(e))?new _d(t[1],t[2],t[3],1):(t=T6t.exec(e))?new _d(t[1]*255/100,t[2]*255/100,t[3]*255/100,1):(t=A6t.exec(e))?bD(t[1],t[2],t[3],t[4]):(t=S6t.exec(e))?bD(t[1]*255/100,t[2]*255/100,t[3]*255/100,t[4]):(t=M6t.exec(e))?aCe(t[1],t[2]/100,t[3]/100,1):(t=E6t.exec(e))?aCe(t[1],t[2]/100,t[3]/100,t[4]):Qke.hasOwnProperty(e)?rCe(Qke[e]):e===\"transparent\"?new _d(NaN,NaN,NaN,0):null}function rCe(e){return new _d(e>>16&255,e>>8&255,e&255,1)}function bD(e,t,r,n){return n<=0&&(e=t=r=NaN),new _d(e,t,r,n)}function FE(e){return e instanceof Gm||(e=j_(e)),e?(e=e.rgb(),new _d(e.r,e.g,e.b,e.opacity)):new _d}function EA(e,t,r,n){return arguments.length===1?FE(e):new _d(e,t,r,n==null?1:n)}function _d(e,t,r,n){this.r=+e,this.g=+t,this.b=+r,this.opacity=+n}function iCe(){return`#${E2(this.r)}${E2(this.g)}${E2(this.b)}`}function L6t(){return`#${E2(this.r)}${E2(this.g)}${E2(this.b)}${E2((isNaN(this.opacity)?1:this.opacity)*255)}`}function nCe(){let e=TD(this.opacity);return`${e===1?\"rgb(\":\"rgba(\"}${k2(this.r)}, ${k2(this.g)}, ${k2(this.b)}${e===1?\")\":`, ${e})`}`}function TD(e){return isNaN(e)?1:Math.max(0,Math.min(1,e))}function k2(e){return Math.max(0,Math.min(255,Math.round(e)||0))}function E2(e){return e=k2(e),(e<16?\"0\":\"\")+e.toString(16)}function aCe(e,t,r,n){return n<=0?e=t=r=NaN:r<=0||r>=1?e=t=NaN:t<=0&&(e=NaN),new Xg(e,t,r,n)}function sCe(e){if(e instanceof Xg)return new Xg(e.h,e.s,e.l,e.opacity);if(e instanceof Gm||(e=j_(e)),!e)return new Xg;if(e instanceof Xg)return e;e=e.rgb();var t=e.r/255,r=e.g/255,n=e.b/255,i=Math.min(t,r,n),a=Math.max(t,r,n),o=NaN,s=a-i,l=(a+i)/2;return s?(t===a?o=(r-n)/s+(r0&&l<1?0:o,new Xg(o,s,l,e.opacity)}function zE(e,t,r,n){return arguments.length===1?sCe(e):new Xg(e,t,r,n==null?1:n)}function Xg(e,t,r,n){this.h=+e,this.s=+t,this.l=+r,this.opacity=+n}function oCe(e){return e=(e||0)%360,e<0?e+360:e}function wD(e){return Math.max(0,Math.min(1,e||0))}function _W(e,t,r){return(e<60?t+(r-t)*e/60:e<180?r:e<240?t+(r-t)*(240-e)/60:t)*255}var H_,C2,MA,DE,Vm,b6t,w6t,T6t,A6t,S6t,M6t,E6t,Qke,AD=gu(()=>{xD();H_=.7,C2=1/H_,MA=\"\\\\s*([+-]?\\\\d+)\\\\s*\",DE=\"\\\\s*([+-]?(?:\\\\d*\\\\.)?\\\\d+(?:[eE][+-]?\\\\d+)?)\\\\s*\",Vm=\"\\\\s*([+-]?(?:\\\\d*\\\\.)?\\\\d+(?:[eE][+-]?\\\\d+)?)%\\\\s*\",b6t=/^#([0-9a-f]{3,8})$/,w6t=new RegExp(`^rgb\\\\(${MA},${MA},${MA}\\\\)$`),T6t=new RegExp(`^rgb\\\\(${Vm},${Vm},${Vm}\\\\)$`),A6t=new RegExp(`^rgba\\\\(${MA},${MA},${MA},${DE}\\\\)$`),S6t=new RegExp(`^rgba\\\\(${Vm},${Vm},${Vm},${DE}\\\\)$`),M6t=new RegExp(`^hsl\\\\(${DE},${Vm},${Vm}\\\\)$`),E6t=new RegExp(`^hsla\\\\(${DE},${Vm},${Vm},${DE}\\\\)$`),Qke={aliceblue:15792383,antiquewhite:16444375,aqua:65535,aquamarine:8388564,azure:15794175,beige:16119260,bisque:16770244,black:0,blanchedalmond:16772045,blue:255,blueviolet:9055202,brown:10824234,burlywood:14596231,cadetblue:6266528,chartreuse:8388352,chocolate:13789470,coral:16744272,cornflowerblue:6591981,cornsilk:16775388,crimson:14423100,cyan:65535,darkblue:139,darkcyan:35723,darkgoldenrod:12092939,darkgray:11119017,darkgreen:25600,darkgrey:11119017,darkkhaki:12433259,darkmagenta:9109643,darkolivegreen:5597999,darkorange:16747520,darkorchid:10040012,darkred:9109504,darksalmon:15308410,darkseagreen:9419919,darkslateblue:4734347,darkslategray:3100495,darkslategrey:3100495,darkturquoise:52945,darkviolet:9699539,deeppink:16716947,deepskyblue:49151,dimgray:6908265,dimgrey:6908265,dodgerblue:2003199,firebrick:11674146,floralwhite:16775920,forestgreen:2263842,fuchsia:16711935,gainsboro:14474460,ghostwhite:16316671,gold:16766720,goldenrod:14329120,gray:8421504,green:32768,greenyellow:11403055,grey:8421504,honeydew:15794160,hotpink:16738740,indianred:13458524,indigo:4915330,ivory:16777200,khaki:15787660,lavender:15132410,lavenderblush:16773365,lawngreen:8190976,lemonchiffon:16775885,lightblue:11393254,lightcoral:15761536,lightcyan:14745599,lightgoldenrodyellow:16448210,lightgray:13882323,lightgreen:9498256,lightgrey:13882323,lightpink:16758465,lightsalmon:16752762,lightseagreen:2142890,lightskyblue:8900346,lightslategray:7833753,lightslategrey:7833753,lightsteelblue:11584734,lightyellow:16777184,lime:65280,limegreen:3329330,linen:16445670,magenta:16711935,maroon:8388608,mediumaquamarine:6737322,mediumblue:205,mediumorchid:12211667,mediumpurple:9662683,mediumseagreen:3978097,mediumslateblue:8087790,mediumspringgreen:64154,mediumturquoise:4772300,mediumvioletred:13047173,midnightblue:1644912,mintcream:16121850,mistyrose:16770273,moccasin:16770229,navajowhite:16768685,navy:128,oldlace:16643558,olive:8421376,olivedrab:7048739,orange:16753920,orangered:16729344,orchid:14315734,palegoldenrod:15657130,palegreen:10025880,paleturquoise:11529966,palevioletred:14381203,papayawhip:16773077,peachpuff:16767673,peru:13468991,pink:16761035,plum:14524637,powderblue:11591910,purple:8388736,rebeccapurple:6697881,red:16711680,rosybrown:12357519,royalblue:4286945,saddlebrown:9127187,salmon:16416882,sandybrown:16032864,seagreen:3050327,seashell:16774638,sienna:10506797,silver:12632256,skyblue:8900331,slateblue:6970061,slategray:7372944,slategrey:7372944,snow:16775930,springgreen:65407,steelblue:4620980,tan:13808780,teal:32896,thistle:14204888,tomato:16737095,turquoise:4251856,violet:15631086,wheat:16113331,white:16777215,whitesmoke:16119285,yellow:16776960,yellowgreen:10145074};Zy(Gm,j_,{copy(e){return Object.assign(new this.constructor,this,e)},displayable(){return this.rgb().displayable()},hex:eCe,formatHex:eCe,formatHex8:k6t,formatHsl:C6t,formatRgb:tCe,toString:tCe});Zy(_d,EA,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new _d(this.r*e,this.g*e,this.b*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new _d(this.r*e,this.g*e,this.b*e,this.opacity)},rgb(){return this},clamp(){return new _d(k2(this.r),k2(this.g),k2(this.b),TD(this.opacity))},displayable(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:iCe,formatHex:iCe,formatHex8:L6t,formatRgb:nCe,toString:nCe}));Zy(Xg,zE,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new Xg(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new Xg(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=this.h%360+(this.h<0)*360,t=isNaN(e)||isNaN(this.s)?0:this.s,r=this.l,n=r+(r<.5?r:1-r)*t,i=2*r-n;return new _d(_W(e>=240?e-240:e+120,i,n),_W(e,i,n),_W(e<120?e+240:e-120,i,n),this.opacity)},clamp(){return new Xg(oCe(this.h),wD(this.s),wD(this.l),TD(this.opacity))},displayable(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl(){let e=TD(this.opacity);return`${e===1?\"hsl(\":\"hsla(\"}${oCe(this.h)}, ${wD(this.s)*100}%, ${wD(this.l)*100}%${e===1?\")\":`, ${e})`}`}}))});var SD,MD,xW=gu(()=>{SD=Math.PI/180,MD=180/Math.PI});function dCe(e){if(e instanceof Hm)return new Hm(e.l,e.a,e.b,e.opacity);if(e instanceof Yy)return vCe(e);e instanceof _d||(e=FE(e));var t=AW(e.r),r=AW(e.g),n=AW(e.b),i=bW((.2225045*t+.7168786*r+.0606169*n)/uCe),a,o;return t===r&&r===n?a=o=i:(a=bW((.4360747*t+.3850649*r+.1430804*n)/lCe),o=bW((.0139322*t+.0971045*r+.7141733*n)/cCe)),new Hm(116*i-16,500*(a-i),200*(i-o),e.opacity)}function CA(e,t,r,n){return arguments.length===1?dCe(e):new Hm(e,t,r,n==null?1:n)}function Hm(e,t,r,n){this.l=+e,this.a=+t,this.b=+r,this.opacity=+n}function bW(e){return e>P6t?Math.pow(e,1/3):e/hCe+fCe}function wW(e){return e>kA?e*e*e:hCe*(e-fCe)}function TW(e){return 255*(e<=.0031308?12.92*e:1.055*Math.pow(e,1/2.4)-.055)}function AW(e){return(e/=255)<=.04045?e/12.92:Math.pow((e+.055)/1.055,2.4)}function I6t(e){if(e instanceof Yy)return new Yy(e.h,e.c,e.l,e.opacity);if(e instanceof Hm||(e=dCe(e)),e.a===0&&e.b===0)return new Yy(NaN,0{xD();AD();xW();ED=18,lCe=.96422,uCe=1,cCe=.82521,fCe=4/29,kA=6/29,hCe=3*kA*kA,P6t=kA*kA*kA;Zy(Hm,CA,G_(Gm,{brighter(e){return new Hm(this.l+ED*(e==null?1:e),this.a,this.b,this.opacity)},darker(e){return new Hm(this.l-ED*(e==null?1:e),this.a,this.b,this.opacity)},rgb(){var e=(this.l+16)/116,t=isNaN(this.a)?e:e+this.a/500,r=isNaN(this.b)?e:e-this.b/200;return t=lCe*wW(t),e=uCe*wW(e),r=cCe*wW(r),new _d(TW(3.1338561*t-1.6168667*e-.4906146*r),TW(-.9787684*t+1.9161415*e+.033454*r),TW(.0719453*t-.2289914*e+1.4052427*r),this.opacity)}}));Zy(Yy,OE,G_(Gm,{brighter(e){return new Yy(this.h,this.c,this.l+ED*(e==null?1:e),this.opacity)},darker(e){return new Yy(this.h,this.c,this.l-ED*(e==null?1:e),this.opacity)},rgb(){return vCe(this).rgb()}}))});function R6t(e){if(e instanceof L2)return new L2(e.h,e.s,e.l,e.opacity);e instanceof _d||(e=FE(e));var t=e.r/255,r=e.g/255,n=e.b/255,i=(yCe*n+gCe*t-mCe*r)/(yCe+gCe-mCe),a=n-i,o=(qE*(r-i)-MW*a)/kD,s=Math.sqrt(o*o+a*a)/(qE*i*(1-i)),l=s?Math.atan2(o,a)*MD-120:NaN;return new L2(l<0?l+360:l,s,i,e.opacity)}function LA(e,t,r,n){return arguments.length===1?R6t(e):new L2(e,t,r,n==null?1:n)}function L2(e,t,r,n){this.h=+e,this.s=+t,this.l=+r,this.opacity=+n}var _Ce,SW,MW,kD,qE,gCe,mCe,yCe,xCe=gu(()=>{xD();AD();xW();_Ce=-.14861,SW=1.78277,MW=-.29227,kD=-.90649,qE=1.97294,gCe=qE*kD,mCe=qE*SW,yCe=SW*MW-kD*_Ce;Zy(L2,LA,G_(Gm,{brighter(e){return e=e==null?C2:Math.pow(C2,e),new L2(this.h,this.s,this.l*e,this.opacity)},darker(e){return e=e==null?H_:Math.pow(H_,e),new L2(this.h,this.s,this.l*e,this.opacity)},rgb(){var e=isNaN(this.h)?0:(this.h+120)*SD,t=+this.l,r=isNaN(this.s)?0:this.s*t*(1-t),n=Math.cos(e),i=Math.sin(e);return new _d(255*(t+r*(_Ce*n+SW*i)),255*(t+r*(MW*n+kD*i)),255*(t+r*(qE*n)),this.opacity)}}))});var P2=gu(()=>{AD();pCe();xCe()});function EW(e,t,r,n,i){var a=e*e,o=a*e;return((1-3*e+3*a-o)*t+(4-6*a+3*o)*r+(1+3*e+3*a-3*o)*n+o*i)/6}function CD(e){var t=e.length-1;return function(r){var n=r<=0?r=0:r>=1?(r=1,t-1):Math.floor(r*t),i=e[n],a=e[n+1],o=n>0?e[n-1]:2*i-a,s=n{});function PD(e){var t=e.length;return function(r){var n=Math.floor(((r%=1)<0?++r:r)*t),i=e[(n+t-1)%t],a=e[n%t],o=e[(n+1)%t],s=e[(n+2)%t];return EW((r-n/t)*t,i,a,o,s)}}var kW=gu(()=>{LD()});var PA,CW=gu(()=>{PA=e=>()=>e});function bCe(e,t){return function(r){return e+r*t}}function D6t(e,t,r){return e=Math.pow(e,r),t=Math.pow(t,r)-e,r=1/r,function(n){return Math.pow(e+n*t,r)}}function W_(e,t){var r=t-e;return r?bCe(e,r>180||r<-180?r-360*Math.round(r/360):r):PA(isNaN(e)?t:e)}function wCe(e){return(e=+e)==1?$f:function(t,r){return r-t?D6t(t,r,e):PA(isNaN(t)?r:t)}}function $f(e,t){var r=t-e;return r?bCe(e,r):PA(isNaN(e)?t:e)}var I2=gu(()=>{CW()});function TCe(e){return function(t){var r=t.length,n=new Array(r),i=new Array(r),a=new Array(r),o,s;for(o=0;o{P2();LD();kW();I2();BE=function e(t){var r=wCe(t);function n(i,a){var o=r((i=EA(i)).r,(a=EA(a)).r),s=r(i.g,a.g),l=r(i.b,a.b),u=$f(i.opacity,a.opacity);return function(c){return i.r=o(c),i.g=s(c),i.b=l(c),i.opacity=u(c),i+\"\"}}return n.gamma=e,n}(1);ACe=TCe(CD),SCe=TCe(PD)});function IA(e,t){t||(t=[]);var r=e?Math.min(t.length,e.length):0,n=t.slice(),i;return function(a){for(i=0;i{});function MCe(e,t){return(ID(t)?IA:PW)(e,t)}function PW(e,t){var r=t?t.length:0,n=e?Math.min(r,e.length):0,i=new Array(n),a=new Array(r),o;for(o=0;o{NE();RD()});function DD(e,t){var r=new Date;return e=+e,t=+t,function(n){return r.setTime(e*(1-n)+t*n),r}}var RW=gu(()=>{});function zp(e,t){return e=+e,t=+t,function(r){return e*(1-r)+t*r}}var UE=gu(()=>{});function FD(e,t){var r={},n={},i;(e===null||typeof e!=\"object\")&&(e={}),(t===null||typeof t!=\"object\")&&(t={});for(i in t)i in e?r[i]=X_(e[i],t[i]):n[i]=t[i];return function(a){for(i in r)n[i]=r[i](a);return n}}var DW=gu(()=>{NE()});function F6t(e){return function(){return e}}function z6t(e){return function(t){return e(t)+\"\"}}function zD(e,t){var r=zW.lastIndex=FW.lastIndex=0,n,i,a,o=-1,s=[],l=[];for(e=e+\"\",t=t+\"\";(n=zW.exec(e))&&(i=FW.exec(t));)(a=i.index)>r&&(a=t.slice(r,a),s[o]?s[o]+=a:s[++o]=a),(n=n[0])===(i=i[0])?s[o]?s[o]+=i:s[++o]=i:(s[++o]=null,l.push({i:o,x:zp(n,i)})),r=FW.lastIndex;return r{UE();zW=/[-+]?(?:\\d+\\.?\\d*|\\.?\\d+)(?:[eE][-+]?\\d+)?/g,FW=new RegExp(zW.source,\"g\")});function X_(e,t){var r=typeof t,n;return t==null||r===\"boolean\"?PA(t):(r===\"number\"?zp:r===\"string\"?(n=j_(t))?(t=n,BE):zD:t instanceof j_?BE:t instanceof Date?DD:ID(t)?IA:Array.isArray(t)?PW:typeof t.valueOf!=\"function\"&&typeof t.toString!=\"function\"||isNaN(t)?FD:zp)(e,t)}var NE=gu(()=>{P2();LW();IW();RW();UE();DW();OW();CW();RD()});function ECe(e){var t=e.length;return function(r){return e[Math.max(0,Math.min(t-1,Math.floor(r*t)))]}}var kCe=gu(()=>{});function CCe(e,t){var r=W_(+e,+t);return function(n){var i=r(n);return i-360*Math.floor(i/360)}}var LCe=gu(()=>{I2()});function PCe(e,t){return e=+e,t=+t,function(r){return Math.round(e*(1-r)+t*r)}}var ICe=gu(()=>{});function qW(e,t,r,n,i,a){var o,s,l;return(o=Math.sqrt(e*e+t*t))&&(e/=o,t/=o),(l=e*r+t*n)&&(r-=e*l,n-=t*l),(s=Math.sqrt(r*r+n*n))&&(r/=s,n/=s,l/=s),e*n{RCe=180/Math.PI,OD={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1}});function FCe(e){let t=new(typeof DOMMatrix==\"function\"?DOMMatrix:WebKitCSSMatrix)(e+\"\");return t.isIdentity?OD:qW(t.a,t.b,t.c,t.d,t.e,t.f)}function zCe(e){return e==null?OD:(qD||(qD=document.createElementNS(\"http://www.w3.org/2000/svg\",\"g\")),qD.setAttribute(\"transform\",e),(e=qD.transform.baseVal.consolidate())?(e=e.matrix,qW(e.a,e.b,e.c,e.d,e.e,e.f)):OD)}var qD,OCe=gu(()=>{DCe()});function qCe(e,t,r,n){function i(u){return u.length?u.pop()+\" \":\"\"}function a(u,c,f,h,d,v){if(u!==f||c!==h){var _=d.push(\"translate(\",null,t,null,r);v.push({i:_-4,x:zp(u,f)},{i:_-2,x:zp(c,h)})}else(f||h)&&d.push(\"translate(\"+f+t+h+r)}function o(u,c,f,h){u!==c?(u-c>180?c+=360:c-u>180&&(u+=360),h.push({i:f.push(i(f)+\"rotate(\",null,n)-2,x:zp(u,c)})):c&&f.push(i(f)+\"rotate(\"+c+n)}function s(u,c,f,h){u!==c?h.push({i:f.push(i(f)+\"skewX(\",null,n)-2,x:zp(u,c)}):c&&f.push(i(f)+\"skewX(\"+c+n)}function l(u,c,f,h,d,v){if(u!==f||c!==h){var _=d.push(i(d)+\"scale(\",null,\",\",null,\")\");v.push({i:_-4,x:zp(u,f)},{i:_-2,x:zp(c,h)})}else(f!==1||h!==1)&&d.push(i(d)+\"scale(\"+f+\",\"+h+\")\")}return function(u,c){var f=[],h=[];return u=e(u),c=e(c),a(u.translateX,u.translateY,c.translateX,c.translateY,f,h),o(u.rotate,c.rotate,f,h),s(u.skewX,c.skewX,f,h),l(u.scaleX,u.scaleY,c.scaleX,c.scaleY,f,h),u=c=null,function(d){for(var v=-1,_=h.length,b;++v<_;)f[(b=h[v]).i]=b.x(d);return f.join(\"\")}}}var BCe,NCe,UCe=gu(()=>{UE();OCe();BCe=qCe(FCe,\"px, \",\"px)\",\"deg)\"),NCe=qCe(zCe,\", \",\")\",\")\")});function VCe(e){return((e=Math.exp(e))+1/e)/2}function q6t(e){return((e=Math.exp(e))-1/e)/2}function B6t(e){return((e=Math.exp(2*e))-1)/(e+1)}var O6t,GCe,HCe=gu(()=>{O6t=1e-12;GCe=function e(t,r,n){function i(a,o){var s=a[0],l=a[1],u=a[2],c=o[0],f=o[1],h=o[2],d=c-s,v=f-l,_=d*d+v*v,b,p;if(_{P2();I2();WCe=jCe(W_),XCe=jCe($f)});function BW(e,t){var r=$f((e=CA(e)).l,(t=CA(t)).l),n=$f(e.a,t.a),i=$f(e.b,t.b),a=$f(e.opacity,t.opacity);return function(o){return e.l=r(o),e.a=n(o),e.b=i(o),e.opacity=a(o),e+\"\"}}var YCe=gu(()=>{P2();I2()});function KCe(e){return function(t,r){var n=e((t=OE(t)).h,(r=OE(r)).h),i=$f(t.c,r.c),a=$f(t.l,r.l),o=$f(t.opacity,r.opacity);return function(s){return t.h=n(s),t.c=i(s),t.l=a(s),t.opacity=o(s),t+\"\"}}}var JCe,$Ce,QCe=gu(()=>{P2();I2();JCe=KCe(W_),$Ce=KCe($f)});function e6e(e){return function t(r){r=+r;function n(i,a){var o=e((i=LA(i)).h,(a=LA(a)).h),s=$f(i.s,a.s),l=$f(i.l,a.l),u=$f(i.opacity,a.opacity);return function(c){return i.h=o(c),i.s=s(c),i.l=l(Math.pow(c,r)),i.opacity=u(c),i+\"\"}}return n.gamma=t,n}(1)}var t6e,r6e,i6e=gu(()=>{P2();I2();t6e=e6e(W_),r6e=e6e($f)});function NW(e,t){t===void 0&&(t=e,e=X_);for(var r=0,n=t.length-1,i=t[0],a=new Array(n<0?0:n);r{NE()});function a6e(e,t){for(var r=new Array(t),n=0;n{});var R2={};uee(R2,{interpolate:()=>X_,interpolateArray:()=>MCe,interpolateBasis:()=>CD,interpolateBasisClosed:()=>PD,interpolateCubehelix:()=>t6e,interpolateCubehelixLong:()=>r6e,interpolateDate:()=>DD,interpolateDiscrete:()=>ECe,interpolateHcl:()=>JCe,interpolateHclLong:()=>$Ce,interpolateHsl:()=>WCe,interpolateHslLong:()=>XCe,interpolateHue:()=>CCe,interpolateLab:()=>BW,interpolateNumber:()=>zp,interpolateNumberArray:()=>IA,interpolateObject:()=>FD,interpolateRgb:()=>BE,interpolateRgbBasis:()=>ACe,interpolateRgbBasisClosed:()=>SCe,interpolateRound:()=>PCe,interpolateString:()=>zD,interpolateTransformCss:()=>BCe,interpolateTransformSvg:()=>NCe,interpolateZoom:()=>GCe,piecewise:()=>NW,quantize:()=>a6e});var D2=gu(()=>{NE();IW();LD();kW();RW();kCe();LCe();UE();RD();DW();ICe();OW();UCe();HCe();LW();ZCe();YCe();QCe();i6e();n6e();o6e()});var BD=ye((Ypr,s6e)=>{\"use strict\";var N6t=So(),U6t=ka();s6e.exports=function(t,r,n,i,a){var o=r.data.data,s=o.i,l=a||o.color;if(s>=0){r.i=o.i;var u=n.marker;u.pattern?(!u.colors||!u.pattern.shape)&&(u.color=l,r.color=l):(u.color=l,r.color=l),N6t.pointStyle(t,n,i,r)}else U6t.fill(t,l)}});var UW=ye((Kpr,h6e)=>{\"use strict\";var l6e=Oa(),u6e=ka(),c6e=Dr(),V6t=bv().resizeText,G6t=BD();function H6t(e){var t=e._fullLayout._sunburstlayer.selectAll(\".trace\");V6t(e,t,\"sunburst\"),t.each(function(r){var n=l6e.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){l6e.select(this).call(f6e,o,a,e)})})}function f6e(e,t,r,n){var i=t.data.data,a=!t.children,o=i.i,s=c6e.castOption(r,o,\"marker.line.color\")||u6e.defaultLine,l=c6e.castOption(r,o,\"marker.line.width\")||0;e.call(G6t,t,r,n).style(\"stroke-width\",l).call(u6e.stroke,s).style(\"opacity\",a?r.leaf.opacity:null)}h6e.exports={style:H6t,styleOne:f6e}});var Ky=ye(Bs=>{\"use strict\";var F2=Dr(),j6t=ka(),W6t=Ag(),d6e=l_();Bs.findEntryWithLevel=function(e,t){var r;return t&&e.eachAfter(function(n){if(Bs.getPtId(n)===t)return r=n.copy()}),r||e};Bs.findEntryWithChild=function(e,t){var r;return e.eachAfter(function(n){for(var i=n.children||[],a=0;a0)};Bs.getMaxDepth=function(e){return e.maxdepth>=0?e.maxdepth:1/0};Bs.isHeader=function(e,t){return!(Bs.isLeaf(e)||e.depth===t._maxDepth-1)};function v6e(e){return e.data.data.pid}Bs.getParent=function(e,t){return Bs.findEntryWithLevel(e,v6e(t))};Bs.listPath=function(e,t){var r=e.parent;if(!r)return[];var n=t?[r.data[t]]:[r];return Bs.listPath(r,t).concat(n)};Bs.getPath=function(e){return Bs.listPath(e,\"label\").join(\"/\")+\"/\"};Bs.formatValue=d6e.formatPieValue;Bs.formatPercent=function(e,t){var r=F2.formatPercent(e,0);return r===\"0%\"&&(r=d6e.formatPiePercent(e,t)),r}});var HE=ye(($pr,m6e)=>{\"use strict\";var RA=Oa(),p6e=qa(),Y6t=ip().appendArrayPointValue,VE=vf(),g6e=Dr(),K6t=y3(),rd=Ky(),J6t=l_(),$6t=J6t.formatPieValue;m6e.exports=function(t,r,n,i,a){var o=i[0],s=o.trace,l=o.hierarchy,u=s.type===\"sunburst\",c=s.type===\"treemap\"||s.type===\"icicle\";\"_hasHoverLabel\"in s||(s._hasHoverLabel=!1),\"_hasHoverEvent\"in s||(s._hasHoverEvent=!1);var f=function(v){var _=n._fullLayout;if(!(n._dragging||_.hovermode===!1)){var b=n._fullData[s.index],p=v.data.data,k=p.i,E=rd.isHierarchyRoot(v),S=rd.getParent(l,v),L=rd.getValue(v),x=function(Ee){return g6e.castOption(b,k,Ee)},C=x(\"hovertemplate\"),M=VE.castHoverinfo(b,_,k),g=_.separators,P;if(C||M&&M!==\"none\"&&M!==\"skip\"){var T,z;u&&(T=o.cx+v.pxmid[0]*(1-v.rInscribed),z=o.cy+v.pxmid[1]*(1-v.rInscribed)),c&&(T=v._hoverX,z=v._hoverY);var O={},V=[],G=[],Z=function(Ee){return V.indexOf(Ee)!==-1};M&&(V=M===\"all\"?b._module.attributes.hoverinfo.flags:M.split(\"+\")),O.label=p.label,Z(\"label\")&&O.label&&G.push(O.label),p.hasOwnProperty(\"v\")&&(O.value=p.v,O.valueLabel=$6t(O.value,g),Z(\"value\")&&G.push(O.valueLabel)),O.currentPath=v.currentPath=rd.getPath(v.data),Z(\"current path\")&&!E&&G.push(O.currentPath);var H,N=[],j=function(){N.indexOf(H)===-1&&(G.push(H),N.push(H))};O.percentParent=v.percentParent=L/rd.getValue(S),O.parent=v.parentString=rd.getPtLabel(S),Z(\"percent parent\")&&(H=rd.formatPercent(O.percentParent,g)+\" of \"+O.parent,j()),O.percentEntry=v.percentEntry=L/rd.getValue(r),O.entry=v.entry=rd.getPtLabel(r),Z(\"percent entry\")&&!E&&!v.onPathbar&&(H=rd.formatPercent(O.percentEntry,g)+\" of \"+O.entry,j()),O.percentRoot=v.percentRoot=L/rd.getValue(l),O.root=v.root=rd.getPtLabel(l),Z(\"percent root\")&&!E&&(H=rd.formatPercent(O.percentRoot,g)+\" of \"+O.root,j()),O.text=x(\"hovertext\")||x(\"text\"),Z(\"text\")&&(H=O.text,g6e.isValidTextValue(H)&&G.push(H)),P=[GE(v,b,a.eventDataKeys)];var re={trace:b,y:z,_x0:v._x0,_x1:v._x1,_y0:v._y0,_y1:v._y1,text:G.join(\"
\"),name:C||Z(\"name\")?b.name:void 0,color:x(\"hoverlabel.bgcolor\")||p.color,borderColor:x(\"hoverlabel.bordercolor\"),fontFamily:x(\"hoverlabel.font.family\"),fontSize:x(\"hoverlabel.font.size\"),fontColor:x(\"hoverlabel.font.color\"),fontWeight:x(\"hoverlabel.font.weight\"),fontStyle:x(\"hoverlabel.font.style\"),fontVariant:x(\"hoverlabel.font.variant\"),nameLength:x(\"hoverlabel.namelength\"),textAlign:x(\"hoverlabel.align\"),hovertemplate:C,hovertemplateLabels:O,eventData:P};u&&(re.x0=T-v.rInscribed*v.rpx1,re.x1=T+v.rInscribed*v.rpx1,re.idealAlign=v.pxmid[0]<0?\"left\":\"right\"),c&&(re.x=T,re.idealAlign=T<0?\"left\":\"right\");var oe=[];VE.loneHover(re,{container:_._hoverlayer.node(),outerContainer:_._paper.node(),gd:n,inOut_bbox:oe}),P[0].bbox=oe[0],s._hasHoverLabel=!0}if(c){var _e=t.select(\"path.surface\");a.styleOne(_e,v,b,n,{hovered:!0})}s._hasHoverEvent=!0,n.emit(\"plotly_hover\",{points:P||[GE(v,b,a.eventDataKeys)],event:RA.event})}},h=function(v){var _=n._fullLayout,b=n._fullData[s.index],p=RA.select(this).datum();if(s._hasHoverEvent&&(v.originalEvent=RA.event,n.emit(\"plotly_unhover\",{points:[GE(p,b,a.eventDataKeys)],event:RA.event}),s._hasHoverEvent=!1),s._hasHoverLabel&&(VE.loneUnhover(_._hoverlayer.node()),s._hasHoverLabel=!1),c){var k=t.select(\"path.surface\");a.styleOne(k,p,b,n,{hovered:!1})}},d=function(v){var _=n._fullLayout,b=n._fullData[s.index],p=u&&(rd.isHierarchyRoot(v)||rd.isLeaf(v)),k=rd.getPtId(v),E=rd.isEntry(v)?rd.findEntryWithChild(l,k):rd.findEntryWithLevel(l,k),S=rd.getPtId(E),L={points:[GE(v,b,a.eventDataKeys)],event:RA.event};p||(L.nextLevel=S);var x=K6t.triggerHandler(n,\"plotly_\"+s.type+\"click\",L);if(x!==!1&&_.hovermode&&(n._hoverdata=[GE(v,b,a.eventDataKeys)],VE.click(n,RA.event)),!p&&x!==!1&&!n._dragging&&!n._transitioning){p6e.call(\"_storeDirectGUIEdit\",b,_._tracePreGUI[b.uid],{level:b.level});var C={data:[{level:S}],traces:[s.index]},M={frame:{redraw:!1,duration:a.transitionTime},transition:{duration:a.transitionTime,easing:a.transitionEasing},mode:\"immediate\",fromcurrent:!0};VE.loneUnhover(_._hoverlayer.node()),p6e.call(\"animate\",n,C,M)}};t.on(\"mouseover\",f),t.on(\"mouseout\",h),t.on(\"click\",d)};function GE(e,t,r){for(var n=e.data.data,i={curveNumber:t.index,pointNumber:n.i,data:t._input,fullData:t},a=0;a{\"use strict\";var jE=Oa(),Q6t=PE(),Zg=(D2(),ob(R2)).interpolate,y6e=So(),Av=Dr(),eLt=ru(),w6e=bv(),_6e=w6e.recordMinTextSize,tLt=w6e.clearMinTextSize,T6e=yD(),rLt=l_().getRotationAngle,iLt=T6e.computeTransform,nLt=T6e.transformInsideText,aLt=UW().styleOne,oLt=N0().resizeText,sLt=HE(),VW=mW(),Rl=Ky();ND.plot=function(e,t,r,n){var i=e._fullLayout,a=i._sunburstlayer,o,s,l=!r,u=!i.uniformtext.mode&&Rl.hasTransition(r);if(tLt(\"sunburst\",i),o=a.selectAll(\"g.trace.sunburst\").data(t,function(f){return f[0].trace.uid}),o.enter().append(\"g\").classed(\"trace\",!0).classed(\"sunburst\",!0).attr(\"stroke-linejoin\",\"round\"),o.order(),u){n&&(s=n());var c=jE.transition().duration(r.duration).ease(r.easing).each(\"end\",function(){s&&s()}).each(\"interrupt\",function(){s&&s()});c.each(function(){a.selectAll(\"g.trace\").each(function(f){x6e(e,f,this,r)})})}else o.each(function(f){x6e(e,f,this,r)}),i.uniformtext.mode&&oLt(e,i._sunburstlayer.selectAll(\".trace\"),\"sunburst\");l&&o.exit().remove()};function x6e(e,t,r,n){var i=e._context.staticPlot,a=e._fullLayout,o=!a.uniformtext.mode&&Rl.hasTransition(n),s=jE.select(r),l=s.selectAll(\"g.slice\"),u=t[0],c=u.trace,f=u.hierarchy,h=Rl.findEntryWithLevel(f,c.level),d=Rl.getMaxDepth(c),v=a._size,_=c.domain,b=v.w*(_.x[1]-_.x[0]),p=v.h*(_.y[1]-_.y[0]),k=.5*Math.min(b,p),E=u.cx=v.l+v.w*(_.x[1]+_.x[0])/2,S=u.cy=v.t+v.h*(1-_.y[0])-p/2;if(!h)return l.remove();var L=null,x={};o&&l.each(function(me){x[Rl.getPtId(me)]={rpx0:me.rpx0,rpx1:me.rpx1,x0:me.x0,x1:me.x1,transform:me.transform},!L&&Rl.isEntry(me)&&(L=me)});var C=lLt(h).descendants(),M=h.height+1,g=0,P=d;u.hasMultipleRoots&&Rl.isHierarchyRoot(h)&&(C=C.slice(1),M-=1,g=1,P+=1),C=C.filter(function(me){return me.y1<=P});var T=rLt(c.rotation);T&&C.forEach(function(me){me.x0+=T,me.x1+=T});var z=Math.min(M,d),O=function(me){return(me-g)/z*k},V=function(me,ie){return[me*Math.cos(ie),-me*Math.sin(ie)]},G=function(me){return Av.pathAnnulus(me.rpx0,me.rpx1,me.x0,me.x1,E,S)},Z=function(me){return E+b6e(me)[0]*(me.transform.rCenter||0)+(me.transform.x||0)},H=function(me){return S+b6e(me)[1]*(me.transform.rCenter||0)+(me.transform.y||0)};l=l.data(C,Rl.getPtId),l.enter().append(\"g\").classed(\"slice\",!0),o?l.exit().transition().each(function(){var me=jE.select(this),ie=me.select(\"path.surface\");ie.transition().attrTween(\"d\",function(Le){var Ae=oe(Le);return function(Fe){return G(Ae(Fe))}});var Se=me.select(\"g.slicetext\");Se.attr(\"opacity\",0)}).remove():l.exit().remove(),l.order();var N=null;if(o&&L){var j=Rl.getPtId(L);l.each(function(me){N===null&&Rl.getPtId(me)===j&&(N=me.x1)})}var re=l;o&&(re=re.transition().each(\"end\",function(){var me=jE.select(this);Rl.setSliceCursor(me,e,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:!1})})),re.each(function(me){var ie=jE.select(this),Se=Av.ensureSingle(ie,\"path\",\"surface\",function(Re){Re.style(\"pointer-events\",i?\"none\":\"all\")});me.rpx0=O(me.y0),me.rpx1=O(me.y1),me.xmid=(me.x0+me.x1)/2,me.pxmid=V(me.rpx1,me.xmid),me.midangle=-(me.xmid-Math.PI/2),me.startangle=-(me.x0-Math.PI/2),me.stopangle=-(me.x1-Math.PI/2),me.halfangle=.5*Math.min(Av.angleDelta(me.x0,me.x1)||Math.PI,Math.PI),me.ring=1-me.rpx0/me.rpx1,me.rInscribed=uLt(me,c),o?Se.transition().attrTween(\"d\",function(Re){var ce=_e(Re);return function(Ze){return G(ce(Ze))}}):Se.attr(\"d\",G),ie.call(sLt,h,e,t,{eventDataKeys:VW.eventDataKeys,transitionTime:VW.CLICK_TRANSITION_TIME,transitionEasing:VW.CLICK_TRANSITION_EASING}).call(Rl.setSliceCursor,e,{hideOnRoot:!0,hideOnLeaves:!0,isTransitioning:e._transitioning}),Se.call(aLt,me,c,e);var Le=Av.ensureSingle(ie,\"g\",\"slicetext\"),Ae=Av.ensureSingle(Le,\"text\",\"\",function(Re){Re.attr(\"data-notex\",1)}),Fe=Av.ensureUniformFontSize(e,Rl.determineTextFont(c,me,a.font));Ae.text(ND.formatSliceLabel(me,h,c,t,a)).classed(\"slicetext\",!0).attr(\"text-anchor\",\"middle\").call(y6e.font,Fe).call(eLt.convertToTspans,e);var Pe=y6e.bBox(Ae.node());me.transform=nLt(Pe,me,u),me.transform.targetX=Z(me),me.transform.targetY=H(me);var ge=function(Re,ce){var Ze=Re.transform;return iLt(Ze,ce),Ze.fontSize=Fe.size,_6e(c.type,Ze,a),Av.getTextTransform(Ze)};o?Ae.transition().attrTween(\"transform\",function(Re){var ce=Ee(Re);return function(Ze){return ge(ce(Ze),Pe)}}):Ae.attr(\"transform\",ge(me,Pe))});function oe(me){var ie=Rl.getPtId(me),Se=x[ie],Le=x[Rl.getPtId(h)],Ae;if(Le){var Fe=(me.x1>Le.x1?2*Math.PI:0)+T;Ae=me.rpx1N?2*Math.PI:0)+T;Se={x0:Ae,x1:Ae}}else Se={rpx0:k,rpx1:k},Av.extendFlat(Se,Ce(me));else Se={rpx0:0,rpx1:0};else Se={x0:T,x1:T};return Zg(Se,Le)}function Ee(me){var ie=x[Rl.getPtId(me)],Se,Le=me.transform;if(ie)Se=ie;else if(Se={rpx1:me.rpx1,transform:{textPosAngle:Le.textPosAngle,scale:0,rotate:Le.rotate,rCenter:Le.rCenter,x:Le.x,y:Le.y}},L)if(me.parent)if(N){var Ae=me.x1>N?2*Math.PI:0;Se.x0=Se.x1=Ae}else Av.extendFlat(Se,Ce(me));else Se.x0=Se.x1=T;else Se.x0=Se.x1=T;var Fe=Zg(Se.transform.textPosAngle,me.transform.textPosAngle),Pe=Zg(Se.rpx1,me.rpx1),ge=Zg(Se.x0,me.x0),Re=Zg(Se.x1,me.x1),ce=Zg(Se.transform.scale,Le.scale),Ze=Zg(Se.transform.rotate,Le.rotate),ut=Le.rCenter===0?3:Se.transform.rCenter===0?1/3:1,pt=Zg(Se.transform.rCenter,Le.rCenter),Zt=function(st){return pt(Math.pow(st,ut))};return function(st){var lt=Pe(st),Gt=ge(st),Nt=Re(st),Jt=Zt(st),sr=V(lt,(Gt+Nt)/2),wr=Fe(st),cr={pxmid:sr,rpx1:lt,transform:{textPosAngle:wr,rCenter:Jt,x:Le.x,y:Le.y}};return _6e(c.type,Le,a),{transform:{targetX:Z(cr),targetY:H(cr),scale:ce(st),rotate:Ze(st),rCenter:Jt}}}}function Ce(me){var ie=me.parent,Se=x[Rl.getPtId(ie)],Le={};if(Se){var Ae=ie.children,Fe=Ae.indexOf(me),Pe=Ae.length,ge=Zg(Se.x0,Se.x1);Le.x0=ge(Fe/Pe),Le.x1=ge(Fe/Pe)}else Le.x0=Le.x1=0;return Le}}function lLt(e){return Q6t.partition().size([2*Math.PI,e.height+1])(e)}ND.formatSliceLabel=function(e,t,r,n,i){var a=r.texttemplate,o=r.textinfo;if(!a&&(!o||o===\"none\"))return\"\";var s=i.separators,l=n[0],u=e.data.data,c=l.hierarchy,f=Rl.isHierarchyRoot(e),h=Rl.getParent(c,e),d=Rl.getValue(e);if(!a){var v=o.split(\"+\"),_=function(g){return v.indexOf(g)!==-1},b=[],p;if(_(\"label\")&&u.label&&b.push(u.label),u.hasOwnProperty(\"v\")&&_(\"value\")&&b.push(Rl.formatValue(u.v,s)),!f){_(\"current path\")&&b.push(Rl.getPath(e.data));var k=0;_(\"percent parent\")&&k++,_(\"percent entry\")&&k++,_(\"percent root\")&&k++;var E=k>1;if(k){var S,L=function(g){p=Rl.formatPercent(S,s),E&&(p+=\" of \"+g),b.push(p)};_(\"percent parent\")&&!f&&(S=d/Rl.getValue(h),L(\"parent\")),_(\"percent entry\")&&(S=d/Rl.getValue(t),L(\"entry\")),_(\"percent root\")&&(S=d/Rl.getValue(c),L(\"root\"))}}return _(\"text\")&&(p=Av.castOption(r,u.i,\"text\"),Av.isValidTextValue(p)&&b.push(p)),b.join(\"
\")}var x=Av.castOption(r,u.i,\"texttemplate\");if(!x)return\"\";var C={};u.label&&(C.label=u.label),u.hasOwnProperty(\"v\")&&(C.value=u.v,C.valueLabel=Rl.formatValue(u.v,s)),C.currentPath=Rl.getPath(e.data),f||(C.percentParent=d/Rl.getValue(h),C.percentParentLabel=Rl.formatPercent(C.percentParent,s),C.parent=Rl.getPtLabel(h)),C.percentEntry=d/Rl.getValue(t),C.percentEntryLabel=Rl.formatPercent(C.percentEntry,s),C.entry=Rl.getPtLabel(t),C.percentRoot=d/Rl.getValue(c),C.percentRootLabel=Rl.formatPercent(C.percentRoot,s),C.root=Rl.getPtLabel(c),u.hasOwnProperty(\"color\")&&(C.color=u.color);var M=Av.castOption(r,u.i,\"text\");return(Av.isValidTextValue(M)||M===\"\")&&(C.text=M),C.customdata=Av.castOption(r,u.i,\"customdata\"),Av.texttemplateString({data:[C,r._meta],fallback:r.texttemplatefallback,labels:C,locale:i._d3locale,template:x})};function uLt(e){return e.rpx0===0&&Av.isFullCircle([e.x0,e.x1])?1:Math.max(0,Math.min(1/(1+1/Math.sin(e.halfangle)),e.ring/2))}function b6e(e){return cLt(e.rpx1,e.transform.textPosAngle)}function cLt(e,t){return[e*Math.sin(t),-e*Math.cos(t)]}});var S6e=ye((e0r,A6e)=>{\"use strict\";A6e.exports={moduleType:\"trace\",name:\"sunburst\",basePlotModule:qke(),categories:[],animatable:!0,attributes:LE(),layoutAttributes:yW(),supplyDefaults:Xke(),supplyLayoutDefaults:Yke(),calc:RE().calc,crossTraceCalc:RE().crossTraceCalc,plot:UD().plot,style:UW().style,colorbar:$d(),meta:{}}});var E6e=ye((t0r,M6e)=>{\"use strict\";M6e.exports=S6e()});var C6e=ye(DA=>{\"use strict\";var k6e=Mc();DA.name=\"treemap\";DA.plot=function(e,t,r,n){k6e.plotBasePlot(DA.name,e,t,r,n)};DA.clean=function(e,t,r,n){k6e.cleanBasePlot(DA.name,e,t,r,n)}});var z2=ye((i0r,L6e)=>{\"use strict\";L6e.exports={CLICK_TRANSITION_TIME:750,CLICK_TRANSITION_EASING:\"poly\",eventDataKeys:[\"currentPath\",\"root\",\"entry\",\"percentRoot\",\"percentEntry\",\"percentParent\"],gapWithPathbar:1}});var VD=ye((n0r,R6e)=>{\"use strict\";var{hovertemplateAttrs:fLt,texttemplateAttrs:hLt,templatefallbackAttrs:P6e}=Ll(),dLt=Tu(),vLt=Cc().attributes,O2=S2(),Q0=LE(),I6e=z2(),GW=Ao().extendFlat,pLt=Pd().pattern;R6e.exports={labels:Q0.labels,parents:Q0.parents,values:Q0.values,branchvalues:Q0.branchvalues,count:Q0.count,level:Q0.level,maxdepth:Q0.maxdepth,tiling:{packing:{valType:\"enumerated\",values:[\"squarify\",\"binary\",\"dice\",\"slice\",\"slice-dice\",\"dice-slice\"],dflt:\"squarify\",editType:\"plot\"},squarifyratio:{valType:\"number\",min:1,dflt:1,editType:\"plot\"},flip:{valType:\"flaglist\",flags:[\"x\",\"y\"],dflt:\"\",editType:\"plot\"},pad:{valType:\"number\",min:0,dflt:3,editType:\"plot\"},editType:\"calc\"},marker:GW({pad:{t:{valType:\"number\",min:0,editType:\"plot\"},l:{valType:\"number\",min:0,editType:\"plot\"},r:{valType:\"number\",min:0,editType:\"plot\"},b:{valType:\"number\",min:0,editType:\"plot\"},editType:\"calc\"},colors:Q0.marker.colors,pattern:pLt,depthfade:{valType:\"enumerated\",values:[!0,!1,\"reversed\"],editType:\"style\"},line:Q0.marker.line,cornerradius:{valType:\"number\",min:0,dflt:0,editType:\"plot\"},editType:\"calc\"},dLt(\"marker\",{colorAttr:\"colors\",anim:!1})),pathbar:{visible:{valType:\"boolean\",dflt:!0,editType:\"plot\"},side:{valType:\"enumerated\",values:[\"top\",\"bottom\"],dflt:\"top\",editType:\"plot\"},edgeshape:{valType:\"enumerated\",values:[\">\",\"<\",\"|\",\"/\",\"\\\\\"],dflt:\">\",editType:\"plot\"},thickness:{valType:\"number\",min:12,editType:\"plot\"},textfont:GW({},O2.textfont,{}),editType:\"calc\"},text:O2.text,textinfo:Q0.textinfo,texttemplate:hLt({editType:\"plot\"},{keys:I6e.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:P6e({editType:\"plot\"}),hovertext:O2.hovertext,hoverinfo:Q0.hoverinfo,hovertemplate:fLt({},{keys:I6e.eventDataKeys}),hovertemplatefallback:P6e(),textfont:O2.textfont,insidetextfont:O2.insidetextfont,outsidetextfont:GW({},O2.outsidetextfont,{}),textposition:{valType:\"enumerated\",values:[\"top left\",\"top center\",\"top right\",\"middle left\",\"middle center\",\"middle right\",\"bottom left\",\"bottom center\",\"bottom right\"],dflt:\"top left\",editType:\"plot\"},sort:O2.sort,root:Q0.root,domain:vLt({name:\"treemap\",trace:!0,editType:\"calc\"})}});var HW=ye((a0r,D6e)=>{\"use strict\";D6e.exports={treemapcolorway:{valType:\"colorlist\",editType:\"calc\"},extendtreemapcolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var q6e=ye((o0r,O6e)=>{\"use strict\";var F6e=Dr(),gLt=VD(),mLt=ka(),yLt=Cc().defaults,_Lt=r0().handleText,xLt=e2().TEXTPAD,bLt=M2().handleMarkerDefaults,z6e=tc(),wLt=z6e.hasColorscale,TLt=z6e.handleDefaults;O6e.exports=function(t,r,n,i){function a(b,p){return F6e.coerce(t,r,gLt,b,p)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\");var u=a(\"tiling.packing\");u===\"squarify\"&&a(\"tiling.squarifyratio\"),a(\"tiling.flip\"),a(\"tiling.pad\");var c=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",F6e.isArrayOrTypedArray(c)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var f=a(\"pathbar.visible\"),h=\"auto\";_Lt(t,r,i,a,h,{hasPathbar:f,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"textposition\");var d=r.textposition.indexOf(\"bottom\")!==-1;bLt(t,r,i,a);var v=r._hasColorscale=wLt(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;v?TLt(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}):a(\"marker.depthfade\",!(r.marker.colors||[]).length);var _=r.textfont.size*2;a(\"marker.pad.t\",d?_/4:_),a(\"marker.pad.l\",_/4),a(\"marker.pad.r\",_/4),a(\"marker.pad.b\",d?_:_/4),a(\"marker.cornerradius\"),r._hovered={marker:{line:{width:2,color:mLt.contrast(i.paper_bgcolor)}}},f&&(a(\"pathbar.thickness\",r.pathbar.textfont.size+2*xLt),a(\"pathbar.side\"),a(\"pathbar.edgeshape\")),a(\"sort\"),a(\"root.color\"),yLt(r,i,a),r._length=null}});var N6e=ye((s0r,B6e)=>{\"use strict\";var ALt=Dr(),SLt=HW();B6e.exports=function(t,r){function n(i,a){return ALt.coerce(t,r,SLt,i,a)}n(\"treemapcolorway\",r.colorway),n(\"extendtreemapcolors\")}});var WW=ye(jW=>{\"use strict\";var U6e=RE();jW.calc=function(e,t){return U6e.calc(e,t)};jW.crossTraceCalc=function(e){return U6e._runCrossTraceCalc(\"treemap\",e)}});var XW=ye((u0r,V6e)=>{\"use strict\";V6e.exports=function e(t,r,n){var i;n.swapXY&&(i=t.x0,t.x0=t.y0,t.y0=i,i=t.x1,t.x1=t.y1,t.y1=i),n.flipX&&(i=t.x0,t.x0=r[0]-t.x1,t.x1=r[0]-i),n.flipY&&(i=t.y0,t.y0=r[1]-t.y1,t.y1=r[1]-i);var a=t.children;if(a)for(var o=0;o{\"use strict\";var FA=PE(),MLt=XW();G6e.exports=function(t,r,n){var i=n.flipX,a=n.flipY,o=n.packing===\"dice-slice\",s=n.pad[a?\"bottom\":\"top\"],l=n.pad[i?\"right\":\"left\"],u=n.pad[i?\"left\":\"right\"],c=n.pad[a?\"top\":\"bottom\"],f;o&&(f=l,l=s,s=f,f=u,u=c,c=f);var h=FA.treemap().tile(ELt(n.packing,n.squarifyratio)).paddingInner(n.pad.inner).paddingLeft(l).paddingRight(u).paddingTop(s).paddingBottom(c).size(o?[r[1],r[0]]:r)(t);return(o||i||a)&&MLt(h,r,{swapXY:o,flipX:i,flipY:a}),h};function ELt(e,t){switch(e){case\"squarify\":return FA.treemapSquarify.ratio(t);case\"binary\":return FA.treemapBinary;case\"dice\":return FA.treemapDice;case\"slice\":return FA.treemapSlice;default:return FA.treemapSliceDice}}});var GD=ye((f0r,X6e)=>{\"use strict\";var H6e=Oa(),zA=ka(),j6e=Dr(),YW=Ky(),kLt=bv().resizeText,CLt=BD();function LLt(e){var t=e._fullLayout._treemaplayer.selectAll(\".trace\");kLt(e,t,\"treemap\"),t.each(function(r){var n=H6e.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){H6e.select(this).call(W6e,o,a,e,{hovered:!1})})})}function W6e(e,t,r,n,i){var a=(i||{}).hovered,o=t.data.data,s=o.i,l,u,c=o.color,f=YW.isHierarchyRoot(t),h=1;if(a)l=r._hovered.marker.line.color,u=r._hovered.marker.line.width;else if(f&&c===r.root.color)h=100,l=\"rgba(0,0,0,0)\",u=0;else if(l=j6e.castOption(r,s,\"marker.line.color\")||zA.defaultLine,u=j6e.castOption(r,s,\"marker.line.width\")||0,!r._hasColorscale&&!t.onPathbar){var d=r.marker.depthfade;if(d){var v=zA.combine(zA.addOpacity(r._backgroundColor,.75),c),_;if(d===!0){var b=YW.getMaxDepth(r);isFinite(b)?YW.isLeaf(t)?_=0:_=r._maxVisibleLayers-(t.data.depth-r._entryDepth):_=t.data.height+1}else _=t.data.depth-r._entryDepth,r._atRootLevel||_++;if(_>0)for(var p=0;p<_;p++){var k=.5*p/_;c=zA.combine(zA.addOpacity(v,k),c)}}}e.call(CLt,t,r,n,c).style(\"stroke-width\",u).call(zA.stroke,l).style(\"opacity\",h)}X6e.exports={style:LLt,styleOne:W6e}});var $6e=ye((h0r,J6e)=>{\"use strict\";var Z6e=Oa(),HD=Dr(),Y6e=So(),PLt=ru(),ILt=ZW(),K6e=GD().styleOne,KW=z2(),OA=Ky(),RLt=HE(),JW=!0;J6e.exports=function(t,r,n,i,a){var o=a.barDifY,s=a.width,l=a.height,u=a.viewX,c=a.viewY,f=a.pathSlice,h=a.toMoveInsideSlice,d=a.strTransform,v=a.hasTransition,_=a.handleSlicesExit,b=a.makeUpdateSliceInterpolator,p=a.makeUpdateTextInterpolator,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=L.hierarchy,M=s/x._entryDepth,g=OA.listPath(n.data,\"id\"),P=ILt(C.copy(),[s,l],{packing:\"dice\",pad:{inner:0,top:0,left:0,right:0,bottom:0}}).descendants();P=P.filter(function(z){var O=g.indexOf(z.data.id);return O===-1?!1:(z.x0=M*O,z.x1=M*(O+1),z.y0=o,z.y1=o+l,z.onPathbar=!0,!0)}),P.reverse(),i=i.data(P,OA.getPtId),i.enter().append(\"g\").classed(\"pathbar\",!0),_(i,JW,k,[s,l],f),i.order();var T=i;v&&(T=T.transition().each(\"end\",function(){var z=Z6e.select(this);OA.setSliceCursor(z,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:!1})})),T.each(function(z){z._x0=u(z.x0),z._x1=u(z.x1),z._y0=c(z.y0),z._y1=c(z.y1),z._hoverX=u(z.x1-Math.min(s,l)/2),z._hoverY=c(z.y1-l/2);var O=Z6e.select(this),V=HD.ensureSingle(O,\"path\",\"surface\",function(N){N.style(\"pointer-events\",E?\"none\":\"all\")});v?V.transition().attrTween(\"d\",function(N){var j=b(N,JW,k,[s,l]);return function(re){return f(j(re))}}):V.attr(\"d\",f),O.call(RLt,n,t,r,{styleOne:K6e,eventDataKeys:KW.eventDataKeys,transitionTime:KW.CLICK_TRANSITION_TIME,transitionEasing:KW.CLICK_TRANSITION_EASING}).call(OA.setSliceCursor,t,{hideOnRoot:!1,hideOnLeaves:!1,isTransitioning:t._transitioning}),V.call(K6e,z,x,t,{hovered:!1}),z._text=(OA.getPtLabel(z)||\"\").split(\"
\").join(\" \")||\"\";var G=HD.ensureSingle(O,\"g\",\"slicetext\"),Z=HD.ensureSingle(G,\"text\",\"\",function(N){N.attr(\"data-notex\",1)}),H=HD.ensureUniformFontSize(t,OA.determineTextFont(x,z,S.font,{onPathbar:!0}));Z.text(z._text||\" \").classed(\"slicetext\",!0).attr(\"text-anchor\",\"start\").call(Y6e.font,H).call(PLt.convertToTspans,t),z.textBB=Y6e.bBox(Z.node()),z.transform=h(z,{fontSize:H.size,onPathbar:!0}),z.transform.fontSize=H.size,v?Z.transition().attrTween(\"transform\",function(N){var j=p(N,JW,k,[s,l]);return function(re){return d(j(re))}}):Z.attr(\"transform\",d(z))})}});var rLe=ye((d0r,tLe)=>{\"use strict\";var Q6e=Oa(),$W=(D2(),ob(R2)).interpolate,Z_=Ky(),WE=Dr(),eLe=e2().TEXTPAD,DLt=n2(),FLt=DLt.toMoveInsideBar,zLt=bv(),QW=zLt.recordMinTextSize,OLt=z2(),qLt=$6e();function q2(e){return Z_.isHierarchyRoot(e)?\"\":Z_.getPtId(e)}tLe.exports=function(t,r,n,i,a){var o=t._fullLayout,s=r[0],l=s.trace,u=l.type,c=u===\"icicle\",f=s.hierarchy,h=Z_.findEntryWithLevel(f,l.level),d=Q6e.select(n),v=d.selectAll(\"g.pathbar\"),_=d.selectAll(\"g.slice\");if(!h){v.remove(),_.remove();return}var b=Z_.isHierarchyRoot(h),p=!o.uniformtext.mode&&Z_.hasTransition(i),k=Z_.getMaxDepth(l),E=function($e){return $e.data.depth-h.data.depth-1?C+P:-(g+P):0,z={x0:M,x1:M,y0:T,y1:T+g},O=function($e,St,Qt){var Vt=l.tiling.pad,_t=function(lr){return lr-Vt<=St.x0},It=function(lr){return lr+Vt>=St.x1},mt=function(lr){return lr-Vt<=St.y0},er=function(lr){return lr+Vt>=St.y1};return $e.x0===St.x0&&$e.x1===St.x1&&$e.y0===St.y0&&$e.y1===St.y1?{x0:$e.x0,x1:$e.x1,y0:$e.y0,y1:$e.y1}:{x0:_t($e.x0-Vt)?0:It($e.x0-Vt)?Qt[0]:$e.x0,x1:_t($e.x1+Vt)?0:It($e.x1+Vt)?Qt[0]:$e.x1,y0:mt($e.y0-Vt)?0:er($e.y0-Vt)?Qt[1]:$e.y0,y1:mt($e.y1+Vt)?0:er($e.y1+Vt)?Qt[1]:$e.y1}},V=null,G={},Z={},H=null,N=function($e,St){return St?G[q2($e)]:Z[q2($e)]},j=function($e,St,Qt,Vt){if(St)return G[q2(f)]||z;var _t=Z[l.level]||Qt;return E($e)?O($e,_t,Vt):{}};s.hasMultipleRoots&&b&&k++,l._maxDepth=k,l._backgroundColor=o.paper_bgcolor,l._entryDepth=h.data.depth,l._atRootLevel=b;var re=-x/2+S.l+S.w*(L.x[1]+L.x[0])/2,oe=-C/2+S.t+S.h*(1-(L.y[1]+L.y[0])/2),_e=function($e){return re+$e},Ee=function($e){return oe+$e},Ce=Ee(0),me=_e(0),ie=function($e){return me+$e},Se=function($e){return Ce+$e};function Le($e,St){return $e+\",\"+St}var Ae=ie(0),Fe=function($e){$e.x=Math.max(Ae,$e.x)},Pe=l.pathbar.edgeshape,ge=function($e){var St=ie(Math.max(Math.min($e.x0,$e.x0),0)),Qt=ie(Math.min(Math.max($e.x1,$e.x1),M)),Vt=Se($e.y0),_t=Se($e.y1),It=g/2,mt={},er={};mt.x=St,er.x=Qt,mt.y=er.y=(Vt+_t)/2;var lr={x:St,y:Vt},Tr={x:Qt,y:Vt},Lr={x:Qt,y:_t},ti={x:St,y:_t};return Pe===\">\"?(lr.x-=It,Tr.x-=It,Lr.x-=It,ti.x-=It):Pe===\"/\"?(Lr.x-=It,ti.x-=It,mt.x-=It/2,er.x-=It/2):Pe===\"\\\\\"?(lr.x-=It,Tr.x-=It,mt.x-=It/2,er.x-=It/2):Pe===\"<\"&&(mt.x-=It,er.x-=It),Fe(lr),Fe(ti),Fe(mt),Fe(Tr),Fe(Lr),Fe(er),\"M\"+Le(lr.x,lr.y)+\"L\"+Le(Tr.x,Tr.y)+\"L\"+Le(er.x,er.y)+\"L\"+Le(Lr.x,Lr.y)+\"L\"+Le(ti.x,ti.y)+\"L\"+Le(mt.x,mt.y)+\"Z\"},Re=l[c?\"tiling\":\"marker\"].pad,ce=function($e){return l.textposition.indexOf($e)!==-1},Ze=ce(\"top\"),ut=ce(\"left\"),pt=ce(\"right\"),Zt=ce(\"bottom\"),st=function($e){var St=_e($e.x0),Qt=_e($e.x1),Vt=Ee($e.y0),_t=Ee($e.y1),It=Qt-St,mt=_t-Vt;if(!It||!mt)return\"\";var er=l.marker.cornerradius||0,lr=Math.min(er,It/2,mt/2);lr&&$e.data&&$e.data.data&&$e.data.data.label&&(Ze&&(lr=Math.min(lr,Re.t)),ut&&(lr=Math.min(lr,Re.l)),pt&&(lr=Math.min(lr,Re.r)),Zt&&(lr=Math.min(lr,Re.b)));var Tr=function(Lr,ti){return lr?\"a\"+Le(lr,lr)+\" 0 0 1 \"+Le(Lr,ti):\"\"};return\"M\"+Le(St,Vt+lr)+Tr(lr,-lr)+\"L\"+Le(Qt-lr,Vt)+Tr(lr,lr)+\"L\"+Le(Qt,_t-lr)+Tr(-lr,lr)+\"L\"+Le(St+lr,_t)+Tr(-lr,-lr)+\"Z\"},lt=function($e,St){var Qt=$e.x0,Vt=$e.x1,_t=$e.y0,It=$e.y1,mt=$e.textBB,er=Ze||St.isHeader&&!Zt,lr=er?\"start\":Zt?\"end\":\"middle\",Tr=ce(\"right\"),Lr=ce(\"left\")||St.onPathbar,ti=Lr?-1:Tr?1:0;if(St.isHeader){if(Qt+=(c?Re:Re.l)-eLe,Vt-=(c?Re:Re.r)-eLe,Qt>=Vt){var Br=(Qt+Vt)/2;Qt=Br,Vt=Br}var Vr;Zt?(Vr=It-(c?Re:Re.b),_t{\"use strict\";var BLt=Oa(),NLt=Ky(),ULt=bv(),VLt=ULt.clearMinTextSize,GLt=N0().resizeText,iLe=rLe();nLe.exports=function(t,r,n,i,a){var o=a.type,s=a.drawDescendants,l=t._fullLayout,u=l[\"_\"+o+\"layer\"],c,f,h=!n;if(VLt(o,l),c=u.selectAll(\"g.trace.\"+o).data(r,function(v){return v[0].trace.uid}),c.enter().append(\"g\").classed(\"trace\",!0).classed(o,!0),c.order(),!l.uniformtext.mode&&NLt.hasTransition(n)){i&&(f=i());var d=BLt.transition().duration(n.duration).ease(n.easing).each(\"end\",function(){f&&f()}).each(\"interrupt\",function(){f&&f()});d.each(function(){u.selectAll(\"g.trace\").each(function(v){iLe(t,v,this,n,s)})})}else c.each(function(v){iLe(t,v,this,n,s)}),l.uniformtext.mode&&GLt(t,u.selectAll(\".trace\"),o);h&&c.exit().remove()}});var uLe=ye((p0r,lLe)=>{\"use strict\";var aLe=Oa(),jD=Dr(),oLe=So(),HLt=ru(),jLt=ZW(),sLe=GD().styleOne,tX=z2(),Y_=Ky(),WLt=HE(),XLt=UD().formatSliceLabel,rX=!1;lLe.exports=function(t,r,n,i,a){var o=a.width,s=a.height,l=a.viewX,u=a.viewY,c=a.pathSlice,f=a.toMoveInsideSlice,h=a.strTransform,d=a.hasTransition,v=a.handleSlicesExit,_=a.makeUpdateSliceInterpolator,b=a.makeUpdateTextInterpolator,p=a.prevEntry,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=x.textposition.indexOf(\"left\")!==-1,M=x.textposition.indexOf(\"right\")!==-1,g=x.textposition.indexOf(\"bottom\")!==-1,P=!g&&!x.marker.pad.t||g&&!x.marker.pad.b,T=jLt(n,[o,s],{packing:x.tiling.packing,squarifyratio:x.tiling.squarifyratio,flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1,pad:{inner:x.tiling.pad,top:x.marker.pad.t,left:x.marker.pad.l,right:x.marker.pad.r,bottom:x.marker.pad.b}}),z=T.descendants(),O=1/0,V=-1/0;z.forEach(function(j){var re=j.depth;re>=x._maxDepth?(j.x0=j.x1=(j.x0+j.x1)/2,j.y0=j.y1=(j.y0+j.y1)/2):(O=Math.min(O,re),V=Math.max(V,re))}),i=i.data(z,Y_.getPtId),x._maxVisibleLayers=isFinite(V)?V-O+1:0,i.enter().append(\"g\").classed(\"slice\",!0),v(i,rX,k,[o,s],c),i.order();var G=null;if(d&&p){var Z=Y_.getPtId(p);i.each(function(j){G===null&&Y_.getPtId(j)===Z&&(G={x0:j.x0,x1:j.x1,y0:j.y0,y1:j.y1})})}var H=function(){return G||{x0:0,x1:o,y0:0,y1:s}},N=i;return d&&(N=N.transition().each(\"end\",function(){var j=aLe.select(this);Y_.setSliceCursor(j,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})})),N.each(function(j){var re=Y_.isHeader(j,x);j._x0=l(j.x0),j._x1=l(j.x1),j._y0=u(j.y0),j._y1=u(j.y1),j._hoverX=l(j.x1-x.marker.pad.r),j._hoverY=u(g?j.y1-x.marker.pad.b/2:j.y0+x.marker.pad.t/2);var oe=aLe.select(this),_e=jD.ensureSingle(oe,\"path\",\"surface\",function(Le){Le.style(\"pointer-events\",E?\"none\":\"all\")});d?_e.transition().attrTween(\"d\",function(Le){var Ae=_(Le,rX,H(),[o,s]);return function(Fe){return c(Ae(Fe))}}):_e.attr(\"d\",c),oe.call(WLt,n,t,r,{styleOne:sLe,eventDataKeys:tX.eventDataKeys,transitionTime:tX.CLICK_TRANSITION_TIME,transitionEasing:tX.CLICK_TRANSITION_EASING}).call(Y_.setSliceCursor,t,{isTransitioning:t._transitioning}),_e.call(sLe,j,x,t,{hovered:!1}),j.x0===j.x1||j.y0===j.y1?j._text=\"\":re?j._text=P?\"\":Y_.getPtLabel(j)||\"\":j._text=XLt(j,n,x,r,S)||\"\";var Ee=jD.ensureSingle(oe,\"g\",\"slicetext\"),Ce=jD.ensureSingle(Ee,\"text\",\"\",function(Le){Le.attr(\"data-notex\",1)}),me=jD.ensureUniformFontSize(t,Y_.determineTextFont(x,j,S.font)),ie=j._text||\" \",Se=re&&ie.indexOf(\"
\")===-1;Ce.text(ie).classed(\"slicetext\",!0).attr(\"text-anchor\",M?\"end\":C||Se?\"start\":\"middle\").call(oLe.font,me).call(HLt.convertToTspans,t),j.textBB=oLe.bBox(Ce.node()),j.transform=f(j,{fontSize:me.size,isHeader:re}),j.transform.fontSize=me.size,d?Ce.transition().attrTween(\"transform\",function(Le){var Ae=b(Le,rX,H(),[o,s]);return function(Fe){return h(Ae(Fe))}}):Ce.attr(\"transform\",h(j))}),G}});var fLe=ye((g0r,cLe)=>{\"use strict\";var ZLt=eX(),YLt=uLe();cLe.exports=function(t,r,n,i){return ZLt(t,r,n,i,{type:\"treemap\",drawDescendants:YLt})}});var dLe=ye((m0r,hLe)=>{\"use strict\";hLe.exports={moduleType:\"trace\",name:\"treemap\",basePlotModule:C6e(),categories:[],animatable:!0,attributes:VD(),layoutAttributes:HW(),supplyDefaults:q6e(),supplyLayoutDefaults:N6e(),calc:WW().calc,crossTraceCalc:WW().crossTraceCalc,plot:fLe(),style:GD().style,colorbar:$d(),meta:{}}});var pLe=ye((y0r,vLe)=>{\"use strict\";vLe.exports=dLe()});var mLe=ye(qA=>{\"use strict\";var gLe=Mc();qA.name=\"icicle\";qA.plot=function(e,t,r,n){gLe.plotBasePlot(qA.name,e,t,r,n)};qA.clean=function(e,t,r,n){gLe.cleanBasePlot(qA.name,e,t,r,n)}});var iX=ye((x0r,xLe)=>{\"use strict\";var{hovertemplateAttrs:KLt,texttemplateAttrs:JLt,templatefallbackAttrs:yLe}=Ll(),$Lt=Tu(),QLt=Cc().attributes,XE=S2(),o0=LE(),WD=VD(),_Le=z2(),ePt=Ao().extendFlat,tPt=Pd().pattern;xLe.exports={labels:o0.labels,parents:o0.parents,values:o0.values,branchvalues:o0.branchvalues,count:o0.count,level:o0.level,maxdepth:o0.maxdepth,tiling:{orientation:{valType:\"enumerated\",values:[\"v\",\"h\"],dflt:\"h\",editType:\"plot\"},flip:WD.tiling.flip,pad:{valType:\"number\",min:0,dflt:0,editType:\"plot\"},editType:\"calc\"},marker:ePt({colors:o0.marker.colors,line:o0.marker.line,pattern:tPt,editType:\"calc\"},$Lt(\"marker\",{colorAttr:\"colors\",anim:!1})),leaf:o0.leaf,pathbar:WD.pathbar,text:XE.text,textinfo:o0.textinfo,texttemplate:JLt({editType:\"plot\"},{keys:_Le.eventDataKeys.concat([\"label\",\"value\"])}),texttemplatefallback:yLe({editType:\"plot\"}),hovertext:XE.hovertext,hoverinfo:o0.hoverinfo,hovertemplate:KLt({},{keys:_Le.eventDataKeys}),hovertemplatefallback:yLe(),textfont:XE.textfont,insidetextfont:XE.insidetextfont,outsidetextfont:WD.outsidetextfont,textposition:WD.textposition,sort:XE.sort,root:o0.root,domain:QLt({name:\"icicle\",trace:!0,editType:\"calc\"})}});var nX=ye((b0r,bLe)=>{\"use strict\";bLe.exports={iciclecolorway:{valType:\"colorlist\",editType:\"calc\"},extendiciclecolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var SLe=ye((w0r,ALe)=>{\"use strict\";var wLe=Dr(),rPt=iX(),iPt=ka(),nPt=Cc().defaults,aPt=r0().handleText,oPt=e2().TEXTPAD,sPt=M2().handleMarkerDefaults,TLe=tc(),lPt=TLe.hasColorscale,uPt=TLe.handleDefaults;ALe.exports=function(t,r,n,i){function a(d,v){return wLe.coerce(t,r,rPt,d,v)}var o=a(\"labels\"),s=a(\"parents\");if(!o||!o.length||!s||!s.length){r.visible=!1;return}var l=a(\"values\");l&&l.length?a(\"branchvalues\"):a(\"count\"),a(\"level\"),a(\"maxdepth\"),a(\"tiling.orientation\"),a(\"tiling.flip\"),a(\"tiling.pad\");var u=a(\"text\");a(\"texttemplate\"),a(\"texttemplatefallback\"),r.texttemplate||a(\"textinfo\",wLe.isArrayOrTypedArray(u)?\"text+label\":\"label\"),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\");var c=a(\"pathbar.visible\"),f=\"auto\";aPt(t,r,i,a,f,{hasPathbar:c,moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1}),a(\"textposition\"),sPt(t,r,i,a);var h=r._hasColorscale=lPt(t,\"marker\",\"colors\")||(t.marker||{}).coloraxis;h&&uPt(t,r,i,a,{prefix:\"marker.\",cLetter:\"c\"}),a(\"leaf.opacity\",h?1:.7),r._hovered={marker:{line:{width:2,color:iPt.contrast(i.paper_bgcolor)}}},c&&(a(\"pathbar.thickness\",r.pathbar.textfont.size+2*oPt),a(\"pathbar.side\"),a(\"pathbar.edgeshape\")),a(\"sort\"),a(\"root.color\"),nPt(r,i,a),r._length=null}});var ELe=ye((T0r,MLe)=>{\"use strict\";var cPt=Dr(),fPt=nX();MLe.exports=function(t,r){function n(i,a){return cPt.coerce(t,r,fPt,i,a)}n(\"iciclecolorway\",r.colorway),n(\"extendiciclecolors\")}});var oX=ye(aX=>{\"use strict\";var kLe=RE();aX.calc=function(e,t){return kLe.calc(e,t)};aX.crossTraceCalc=function(e){return kLe._runCrossTraceCalc(\"icicle\",e)}});var LLe=ye((S0r,CLe)=>{\"use strict\";var hPt=PE(),dPt=XW();CLe.exports=function(t,r,n){var i=n.flipX,a=n.flipY,o=n.orientation===\"h\",s=n.maxDepth,l=r[0],u=r[1];s&&(l=(t.height+1)*r[0]/Math.min(t.height+1,s),u=(t.height+1)*r[1]/Math.min(t.height+1,s));var c=hPt.partition().padding(n.pad.inner).size(o?[r[1],l]:[r[0],u])(t);return(o||i||a)&&dPt(c,r,{swapXY:o,flipX:i,flipY:a}),c}});var sX=ye((M0r,FLe)=>{\"use strict\";var PLe=Oa(),ILe=ka(),RLe=Dr(),vPt=bv().resizeText,pPt=BD();function gPt(e){var t=e._fullLayout._iciclelayer.selectAll(\".trace\");vPt(e,t,\"icicle\"),t.each(function(r){var n=PLe.select(this),i=r[0],a=i.trace;n.style(\"opacity\",a.opacity),n.selectAll(\"path.surface\").each(function(o){PLe.select(this).call(DLe,o,a,e)})})}function DLe(e,t,r,n){var i=t.data.data,a=!t.children,o=i.i,s=RLe.castOption(r,o,\"marker.line.color\")||ILe.defaultLine,l=RLe.castOption(r,o,\"marker.line.width\")||0;e.call(pPt,t,r,n).style(\"stroke-width\",l).call(ILe.stroke,s).style(\"opacity\",a?r.leaf.opacity:null)}FLe.exports={style:gPt,styleOne:DLe}});var NLe=ye((E0r,BLe)=>{\"use strict\";var zLe=Oa(),XD=Dr(),OLe=So(),mPt=ru(),yPt=LLe(),qLe=sX().styleOne,lX=z2(),BA=Ky(),_Pt=HE(),xPt=UD().formatSliceLabel,uX=!1;BLe.exports=function(t,r,n,i,a){var o=a.width,s=a.height,l=a.viewX,u=a.viewY,c=a.pathSlice,f=a.toMoveInsideSlice,h=a.strTransform,d=a.hasTransition,v=a.handleSlicesExit,_=a.makeUpdateSliceInterpolator,b=a.makeUpdateTextInterpolator,p=a.prevEntry,k={},E=t._context.staticPlot,S=t._fullLayout,L=r[0],x=L.trace,C=x.textposition.indexOf(\"left\")!==-1,M=x.textposition.indexOf(\"right\")!==-1,g=x.textposition.indexOf(\"bottom\")!==-1,P=yPt(n,[o,s],{flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1,orientation:x.tiling.orientation,pad:{inner:x.tiling.pad},maxDepth:x._maxDepth}),T=P.descendants(),z=1/0,O=-1/0;T.forEach(function(N){var j=N.depth;j>=x._maxDepth?(N.x0=N.x1=(N.x0+N.x1)/2,N.y0=N.y1=(N.y0+N.y1)/2):(z=Math.min(z,j),O=Math.max(O,j))}),i=i.data(T,BA.getPtId),x._maxVisibleLayers=isFinite(O)?O-z+1:0,i.enter().append(\"g\").classed(\"slice\",!0),v(i,uX,k,[o,s],c),i.order();var V=null;if(d&&p){var G=BA.getPtId(p);i.each(function(N){V===null&&BA.getPtId(N)===G&&(V={x0:N.x0,x1:N.x1,y0:N.y0,y1:N.y1})})}var Z=function(){return V||{x0:0,x1:o,y0:0,y1:s}},H=i;return d&&(H=H.transition().each(\"end\",function(){var N=zLe.select(this);BA.setSliceCursor(N,t,{hideOnRoot:!0,hideOnLeaves:!1,isTransitioning:!1})})),H.each(function(N){N._x0=l(N.x0),N._x1=l(N.x1),N._y0=u(N.y0),N._y1=u(N.y1),N._hoverX=l(N.x1-x.tiling.pad),N._hoverY=u(g?N.y1-x.tiling.pad/2:N.y0+x.tiling.pad/2);var j=zLe.select(this),re=XD.ensureSingle(j,\"path\",\"surface\",function(Ce){Ce.style(\"pointer-events\",E?\"none\":\"all\")});d?re.transition().attrTween(\"d\",function(Ce){var me=_(Ce,uX,Z(),[o,s],{orientation:x.tiling.orientation,flipX:x.tiling.flip.indexOf(\"x\")>-1,flipY:x.tiling.flip.indexOf(\"y\")>-1});return function(ie){return c(me(ie))}}):re.attr(\"d\",c),j.call(_Pt,n,t,r,{styleOne:qLe,eventDataKeys:lX.eventDataKeys,transitionTime:lX.CLICK_TRANSITION_TIME,transitionEasing:lX.CLICK_TRANSITION_EASING}).call(BA.setSliceCursor,t,{isTransitioning:t._transitioning}),re.call(qLe,N,x,t,{hovered:!1}),N.x0===N.x1||N.y0===N.y1?N._text=\"\":N._text=xPt(N,n,x,r,S)||\"\";var oe=XD.ensureSingle(j,\"g\",\"slicetext\"),_e=XD.ensureSingle(oe,\"text\",\"\",function(Ce){Ce.attr(\"data-notex\",1)}),Ee=XD.ensureUniformFontSize(t,BA.determineTextFont(x,N,S.font));_e.text(N._text||\" \").classed(\"slicetext\",!0).attr(\"text-anchor\",M?\"end\":C?\"start\":\"middle\").call(OLe.font,Ee).call(mPt.convertToTspans,t),N.textBB=OLe.bBox(_e.node()),N.transform=f(N,{fontSize:Ee.size}),N.transform.fontSize=Ee.size,d?_e.transition().attrTween(\"transform\",function(Ce){var me=b(Ce,uX,Z(),[o,s]);return function(ie){return h(me(ie))}}):_e.attr(\"transform\",h(N))}),V}});var VLe=ye((k0r,ULe)=>{\"use strict\";var bPt=eX(),wPt=NLe();ULe.exports=function(t,r,n,i){return bPt(t,r,n,i,{type:\"icicle\",drawDescendants:wPt})}});var HLe=ye((C0r,GLe)=>{\"use strict\";GLe.exports={moduleType:\"trace\",name:\"icicle\",basePlotModule:mLe(),categories:[],animatable:!0,attributes:iX(),layoutAttributes:nX(),supplyDefaults:SLe(),supplyLayoutDefaults:ELe(),calc:oX().calc,crossTraceCalc:oX().crossTraceCalc,plot:VLe(),style:sX().style,colorbar:$d(),meta:{}}});var WLe=ye((L0r,jLe)=>{\"use strict\";jLe.exports=HLe()});var ZLe=ye(NA=>{\"use strict\";var XLe=Mc();NA.name=\"funnelarea\";NA.plot=function(e,t,r,n){XLe.plotBasePlot(NA.name,e,t,r,n)};NA.clean=function(e,t,r,n){XLe.cleanBasePlot(NA.name,e,t,r,n)}});var cX=ye((I0r,KLe)=>{\"use strict\";var iv=S2(),TPt=Gl(),APt=Cc().attributes,{hovertemplateAttrs:SPt,texttemplateAttrs:MPt,templatefallbackAttrs:YLe}=Ll(),B2=Ao().extendFlat;KLe.exports={labels:iv.labels,label0:iv.label0,dlabel:iv.dlabel,values:iv.values,marker:{colors:iv.marker.colors,line:{color:B2({},iv.marker.line.color,{dflt:null}),width:B2({},iv.marker.line.width,{dflt:1}),editType:\"calc\"},pattern:iv.marker.pattern,editType:\"calc\"},text:iv.text,hovertext:iv.hovertext,scalegroup:B2({},iv.scalegroup,{}),textinfo:B2({},iv.textinfo,{flags:[\"label\",\"text\",\"value\",\"percent\"]}),texttemplate:MPt({editType:\"plot\"},{keys:[\"label\",\"color\",\"value\",\"text\",\"percent\"]}),texttemplatefallback:YLe({editType:\"plot\"}),hoverinfo:B2({},TPt.hoverinfo,{flags:[\"label\",\"text\",\"value\",\"percent\",\"name\"]}),hovertemplate:SPt({},{keys:[\"label\",\"color\",\"value\",\"text\",\"percent\"]}),hovertemplatefallback:YLe(),textposition:B2({},iv.textposition,{values:[\"inside\",\"none\"],dflt:\"inside\"}),textfont:iv.textfont,insidetextfont:iv.insidetextfont,title:{text:iv.title.text,font:iv.title.font,position:B2({},iv.title.position,{values:[\"top left\",\"top center\",\"top right\"],dflt:\"top center\"}),editType:\"plot\"},domain:APt({name:\"funnelarea\",trace:!0,editType:\"calc\"}),aspectratio:{valType:\"number\",min:0,dflt:1,editType:\"plot\"},baseratio:{valType:\"number\",min:0,max:1,dflt:.333,editType:\"plot\"}}});var fX=ye((R0r,JLe)=>{\"use strict\";var EPt=vD().hiddenlabels;JLe.exports={hiddenlabels:EPt,funnelareacolorway:{valType:\"colorlist\",editType:\"calc\"},extendfunnelareacolors:{valType:\"boolean\",dflt:!0,editType:\"calc\"}}});var ePe=ye((D0r,QLe)=>{\"use strict\";var $Le=Dr(),kPt=cX(),CPt=Cc().defaults,LPt=r0().handleText,PPt=M2().handleLabelsAndValues,IPt=M2().handleMarkerDefaults;QLe.exports=function(t,r,n,i){function a(_,b){return $Le.coerce(t,r,kPt,_,b)}var o=a(\"labels\"),s=a(\"values\"),l=PPt(o,s),u=l.len;if(r._hasLabels=l.hasLabels,r._hasValues=l.hasValues,!r._hasLabels&&r._hasValues&&(a(\"label0\"),a(\"dlabel\")),!u){r.visible=!1;return}r._length=u,IPt(t,r,i,a),a(\"scalegroup\");var c=a(\"text\"),f=a(\"texttemplate\");a(\"texttemplatefallback\");var h;if(f||(h=a(\"textinfo\",Array.isArray(c)?\"text+percent\":\"percent\")),a(\"hovertext\"),a(\"hovertemplate\"),a(\"hovertemplatefallback\"),f||h&&h!==\"none\"){var d=a(\"textposition\");LPt(t,r,i,a,d,{moduleHasSelected:!1,moduleHasUnselected:!1,moduleHasConstrain:!1,moduleHasCliponaxis:!1,moduleHasTextangle:!1,moduleHasInsideanchor:!1})}else h===\"none\"&&a(\"textposition\",\"none\");CPt(r,i,a);var v=a(\"title.text\");v&&(a(\"title.position\"),$Le.coerceFont(a,\"title.font\",i.font)),a(\"aspectratio\"),a(\"baseratio\")}});var rPe=ye((F0r,tPe)=>{\"use strict\";var RPt=Dr(),DPt=fX();tPe.exports=function(t,r){function n(i,a){return RPt.coerce(t,r,DPt,i,a)}n(\"hiddenlabels\"),n(\"funnelareacolorway\",r.colorway),n(\"extendfunnelareacolors\")}});var hX=ye((z0r,nPe)=>{\"use strict\";var iPe=wA();function FPt(e,t){return iPe.calc(e,t)}function zPt(e){iPe.crossTraceCalc(e,{type:\"funnelarea\"})}nPe.exports={calc:FPt,crossTraceCalc:zPt}});var uPe=ye((O0r,lPe)=>{\"use strict\";var N2=Oa(),dX=So(),K_=Dr(),OPt=K_.strScale,aPe=K_.strTranslate,oPe=ru(),qPt=n2(),BPt=qPt.toMoveInsideBar,sPe=bv(),NPt=sPe.recordMinTextSize,UPt=sPe.clearMinTextSize,VPt=l_(),UA=yD(),GPt=UA.attachFxHandlers,HPt=UA.determineInsideTextFont,jPt=UA.layoutAreas,WPt=UA.prerenderTitles,XPt=UA.positionTitleOutside,ZPt=UA.formatSliceLabel;lPe.exports=function(t,r){var n=t._context.staticPlot,i=t._fullLayout;UPt(\"funnelarea\",i),WPt(r,t),jPt(r,i._size),K_.makeTraceGroups(i._funnelarealayer,r,\"trace\").each(function(a){var o=N2.select(this),s=a[0],l=s.trace;KPt(a),o.each(function(){var u=N2.select(this).selectAll(\"g.slice\").data(a);u.enter().append(\"g\").classed(\"slice\",!0),u.exit().remove(),u.each(function(f,h){if(f.hidden){N2.select(this).selectAll(\"path,g\").remove();return}f.pointNumber=f.i,f.curveNumber=l.index;var d=s.cx,v=s.cy,_=N2.select(this),b=_.selectAll(\"path.surface\").data([f]);b.enter().append(\"path\").classed(\"surface\",!0).style({\"pointer-events\":n?\"none\":\"all\"}),_.call(GPt,t,a);var p=\"M\"+(d+f.TR[0])+\",\"+(v+f.TR[1])+vX(f.TR,f.BR)+vX(f.BR,f.BL)+vX(f.BL,f.TL)+\"Z\";b.attr(\"d\",p),ZPt(t,f,s);var k=VPt.castOption(l.textposition,f.pts),E=_.selectAll(\"g.slicetext\").data(f.text&&k!==\"none\"?[0]:[]);E.enter().append(\"g\").classed(\"slicetext\",!0),E.exit().remove(),E.each(function(){var S=K_.ensureSingle(N2.select(this),\"text\",\"\",function(z){z.attr(\"data-notex\",1)}),L=K_.ensureUniformFontSize(t,HPt(l,f,i.font));S.text(f.text).attr({class:\"slicetext\",transform:\"\",\"text-anchor\":\"middle\"}).call(dX.font,L).call(oPe.convertToTspans,t);var x=dX.bBox(S.node()),C,M,g,P=Math.min(f.BL[1],f.BR[1])+v,T=Math.max(f.TL[1],f.TR[1])+v;M=Math.max(f.TL[0],f.BL[0])+d,g=Math.min(f.TR[0],f.BR[0])+d,C=BPt(M,g,P,T,x,{isHorizontal:!0,constrained:!0,angle:0,anchor:\"middle\"}),C.fontSize=L.size,NPt(l.type,C,i),a[h].transform=C,K_.setTransormAndDisplay(S,C)})});var c=N2.select(this).selectAll(\"g.titletext\").data(l.title.text?[0]:[]);c.enter().append(\"g\").classed(\"titletext\",!0),c.exit().remove(),c.each(function(){var f=K_.ensureSingle(N2.select(this),\"text\",\"\",function(v){v.attr(\"data-notex\",1)}),h=l.title.text;l._meta&&(h=K_.templateString(h,l._meta)),f.text(h).attr({class:\"titletext\",transform:\"\",\"text-anchor\":\"middle\"}).call(dX.font,l.title.font).call(oPe.convertToTspans,t);var d=XPt(s,i._size);f.attr(\"transform\",aPe(d.x,d.y)+OPt(Math.min(1,d.scale))+aPe(d.tx,d.ty))})})})};function vX(e,t){var r=t[0]-e[0],n=t[1]-e[1];return\"l\"+r+\",\"+n}function YPt(e,t){return[.5*(e[0]+t[0]),.5*(e[1]+t[1])]}function KPt(e){if(!e.length)return;var t=e[0],r=t.trace,n=r.aspectratio,i=r.baseratio;i>.999&&(i=.999);var a=Math.pow(i,2),o=t.vTotal,s=o*a/(1-a),l=o,u=s/o;function c(){var O=Math.sqrt(u);return{x:O,y:-O}}function f(){var O=c();return[O.x,O.y]}var h,d=[];d.push(f());var v,_;for(v=e.length-1;v>-1;v--)if(_=e[v],!_.hidden){var b=_.v/l;u+=b,d.push(f())}var p=1/0,k=-1/0;for(v=0;v-1;v--)if(_=e[v],!_.hidden){P+=1;var T=d[P][0],z=d[P][1];_.TL=[-T,z],_.TR=[T,z],_.BL=M,_.BR=g,_.pxmid=YPt(_.TR,_.BR),M=_.TL,g=_.TR}}});var hPe=ye((q0r,fPe)=>{\"use strict\";var cPe=Oa(),JPt=q3(),$Pt=bv().resizeText;fPe.exports=function(t){var r=t._fullLayout._funnelarealayer.selectAll(\".trace\");$Pt(t,r,\"funnelarea\"),r.each(function(n){var i=n[0],a=i.trace,o=cPe.select(this);o.style({opacity:a.opacity}),o.selectAll(\"path.surface\").each(function(s){cPe.select(this).call(JPt,s,a,t)})})}});var vPe=ye((B0r,dPe)=>{\"use strict\";dPe.exports={moduleType:\"trace\",name:\"funnelarea\",basePlotModule:ZLe(),categories:[\"pie-like\",\"funnelarea\",\"showLegend\"],attributes:cX(),layoutAttributes:fX(),supplyDefaults:ePe(),supplyLayoutDefaults:rPe(),calc:hX().calc,crossTraceCalc:hX().crossTraceCalc,plot:uPe(),style:hPe(),styleOne:q3(),meta:{}}});var gPe=ye((N0r,pPe)=>{\"use strict\";pPe.exports=vPe()});var Od=ye((U0r,mPe)=>{(function(){var e={24:function(i){var a={left:0,top:0};i.exports=o;function o(l,u,c){u=u||l.currentTarget||l.srcElement,Array.isArray(c)||(c=[0,0]);var f=l.clientX||0,h=l.clientY||0,d=s(u);return c[0]=f-d.left,c[1]=h-d.top,c}function s(l){return l===window||l===document||l===document.body?a:l.getBoundingClientRect()}},109:function(i){i.exports=a;function a(o,s,l,u){var c=l[0],f=l[2],h=s[0]-c,d=s[2]-f,v=Math.sin(u),_=Math.cos(u);return o[0]=c+d*v+h*_,o[1]=s[1],o[2]=f+d*_-h*v,o}},160:function(i){i.exports=a;function a(o,s,l){return o[0]=Math.max(s[0],l[0]),o[1]=Math.max(s[1],l[1]),o[2]=Math.max(s[2],l[2]),o[3]=Math.max(s[3],l[3]),o}},216:function(i){\"use strict\";i.exports=a;function a(o,s){for(var l={},u=0;u1){v[0]in h||(h[v[0]]=[]),h=h[v[0]];for(var _=1;_=0;--N){var Se=Z[N];j=Se[0];var Le=V[j],Ae=Le[0],Fe=Le[1],Pe=O[Ae],ge=O[Fe];if((Pe[0]-ge[0]||Pe[1]-ge[1])<0){var Re=Ae;Ae=Fe,Fe=Re}Le[0]=Ae;var ce=Le[1]=Se[1],Ze;for(H&&(Ze=Le[2]);N>0&&Z[N-1][0]===j;){var Se=Z[--N],ut=Se[1];H?V.push([ce,ut,Ze]):V.push([ce,ut]),ce=ut}H?V.push([ce,Fe,Ze]):V.push([ce,Fe])}return re}function x(O,V,G){for(var Z=V.length,H=new s(Z),N=[],j=0;jV[2]?1:0)}function g(O,V,G){if(O.length!==0){if(V)for(var Z=0;Z0||j.length>0}function z(O,V,G){var Z;if(G){Z=V;for(var H=new Array(V.length),N=0;N
\n", - "
" + "
" ], "text/plain": [ "PlotResult(data= Size: 262kB\n", @@ -4290,8 +4300,8 @@ "id": "11", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:30:57.850628Z", - "start_time": "2025-12-14T15:30:55.260086Z" + "end_time": "2025-12-14T15:36:52.191535Z", + "start_time": "2025-12-14T15:36:50.141148Z" } }, "outputs": [ @@ -4324,17 +4334,17 @@ "id": "12", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:30:57.963460Z", - "start_time": "2025-12-14T15:30:57.895913Z" + "end_time": "2025-12-14T15:37:01.054663Z", + "start_time": "2025-12-14T15:37:00.991311Z" } }, "outputs": [ { "data": { "text/html": [ - "
\n", + "
" + ], + "text/plain": [ + "PlotResult(data= Size: 262kB\n", + "Dimensions: (time: 2976, variable: 5)\n", + "Coordinates:\n", + " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", + " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", + "Data variables:\n", + " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", + " aggregated (variable, time) float64 119kB 56.75 56.75 56.75 ... 153.1 153.1, figure=Figure({\n", + " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('XY/C9ShgTEBdj8L1KGBMQF2PwvUoYE' ... 'AAAAC4VEAAAAAAALhUQAAAAAAAuFRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('Rrbz/dQ4FkBGtvP91DgWQEa28/3UOB' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('Qrbz/dQ4EsBCtvP91DgSwEK28/3UOB' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('hetRuB4UYECF61G4HhRgQIXrUbgeFG' ... 'XrUbgkY0AfhetRuCRjQB+F61G4JGNA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'}],\n", + " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", + " 'margin': {'t': 60},\n", + " 'template': '...',\n", + " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", + " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", + " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", + "}))" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Segmentation only: reduce 96 timesteps/day to 12 segments/day\n", "fs_segmentation_demo = flow_system.copy()\n", @@ -4570,15 +4777,23 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "id": "do29lhcinx7", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:31:06.393171Z", - "start_time": "2025-12-14T15:31:00.010062Z" + "end_time": "2025-12-14T15:37:05.038629Z", + "start_time": "2025-12-14T15:37:02.095516Z" } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" + ] + } + ], "source": [ "# Test different numbers of segments\n", "segment_configs = [6, 12, 24, 48]\n", @@ -4596,15 +4811,52 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 10, "id": "21athrtuavw", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:31:07.706095Z", - "start_time": "2025-12-14T15:31:07.626274Z" + "end_time": "2025-12-14T15:37:05.124205Z", + "start_time": "2025-12-14T15:37:05.100783Z" } }, - "outputs": [], + "outputs": [ + { + "data": { + "text/html": [ + "
" + ] + }, + "jetTransient": { + "display_id": null + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "# Compare the segmented data for first day only (clearer visualization)\n", "fig = make_subplots(\n", @@ -4667,8 +4919,8 @@ "id": "phpx36k23p", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:31:08.542772Z", - "start_time": "2025-12-14T15:31:08.441233Z" + "end_time": "2025-12-14T15:37:05.145417Z", + "start_time": "2025-12-14T15:37:05.131259Z" } }, "outputs": [ @@ -4677,14 +4929,14 @@ "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", " \n", @@ -4696,38 +4948,38 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 RMSEMAEMax ErrorCorrelationRMSEMAEMax ErrorCorrelation
Segments
415.6712.3443.150.8954610.197.9336.380.9572
88.246.4935.360.9722125.894.5323.950.9859
125.894.5323.950.9859242.732.1211.380.9970
242.732.1211.380.9970481.200.863.910.9994
\n" ], "text/plain": [ - "" + "" ] }, "execution_count": 11, @@ -4776,16 +5028,213 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "id": "j24sbfpl0x", "metadata": { "ExecuteTime": { - "end_time": "2025-12-14T15:31:10.678388Z", - "start_time": "2025-12-14T15:31:09.101883Z" + "end_time": "2025-12-14T15:37:05.735963Z", + "start_time": "2025-12-14T15:37:05.163786Z" } }, - "outputs": [], - "source": [ + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Original: 2976 timesteps\n", + "Combined: 8 typical days × 12 segments = 96 representative timesteps\n" + ] + }, + { + "data": { + "text/html": [ + "
\n", + "
" + ], + "text/plain": [ + "PlotResult(data= Size: 262kB\n", + "Dimensions: (time: 2976, variable: 5)\n", + "Coordinates:\n", + " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", + " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", + "Data variables:\n", + " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", + " aggregated (variable, time) float64 119kB 56.75 56.75 56.75 ... 153.1 153.1, figure=Figure({\n", + " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('XY/C9ShgTEBdj8L1KGBMQF2PwvUoYE' ... 'AAAAC4VEAAAAAAALhUQAAAAAAAuFRA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('Rrbz/dQ4FkBGtvP91DgWQEa28/3UOB' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('Qrbz/dQ4EsBCtvP91DgSwEK28/3UOB' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'},\n", + " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", + " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", + " 'marker': {'symbol': 'circle'},\n", + " 'mode': 'lines',\n", + " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", + " 'showlegend': True,\n", + " 'type': 'scattergl',\n", + " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", + " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", + " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", + " shape=(2976,), dtype='datetime64[ns]'),\n", + " 'xaxis': 'x',\n", + " 'y': {'bdata': ('hetRuB4UYECF61G4HhRgQIXrUbgeFG' ... 'XrUbgkY0AfhetRuCRjQB+F61G4JGNA'),\n", + " 'dtype': 'f8'},\n", + " 'yaxis': 'y'}],\n", + " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", + " 'margin': {'t': 60},\n", + " 'template': '...',\n", + " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", + " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", + " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", + "}))" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ "# Combined: 8 typical days × 12 segments each\n", "fs_combined_demo = flow_system.copy()\n", "fs_combined = fs_combined_demo.transform.cluster(\n", @@ -4813,51 +5262,38 @@ }, { "cell_type": "code", + "execution_count": null, "id": "15", "metadata": { + "ExecuteTime": { + "start_time": "2025-12-14T15:37:06.084308Z" + }, "jupyter": { "is_executing": true - }, - "ExecuteTime": { - "start_time": "2025-12-14T15:34:32.897171Z" } }, - "source": [ - "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", - "\n", - "start = timeit.default_timer()\n", - "fs_full = flow_system.copy()\n", - "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start\n", - "\n", - "print(f'Full optimization: {time_full:.2f} seconds')\n", - "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_full.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ], "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "\u001B[2m2025-12-14 16:34:33.058\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001B[2m2025-12-14 16:34:33.146\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" + "\u001b[2m2025-12-14 16:37:06.388\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", + "\u001b[2m2025-12-14 16:37:06.492\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", + "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", + "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 64/64 [00:00<00:00, 157.71it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 1053.62it/s]\n", - "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 886.33it/s]\n" + "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 64/64 [00:00<00:00, 131.16it/s]\n", + "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 830.38it/s]\n", + "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 1075.96it/s]\n" ] }, { @@ -4865,7 +5301,7 @@ "output_type": "stream", "text": [ "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-fnex2i30 has 89316 rows; 80386 cols; 264919 nonzeros; 5955 integer variables (5955 binary)\n", + "MIP linopy-problem-thzxg8od has 89316 rows; 80386 cols; 264919 nonzeros; 5955 integer variables (5955 binary)\n", "Coefficient ranges:\n", " Matrix [1e-05, 1e+03]\n", " Cost [1e+00, 1e+00]\n", @@ -4890,14 +5326,25 @@ " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", "\n", - " 0 0 0 0.00% -48251946.82856 inf inf 0 0 0 0 0.8s\n", - " R 0 0 0 0.00% 2209206.133553 2278967.860722 3.06% 0 0 0 15439 2.4s\n", - " C 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7380 2937 0 18513 5.0s\n", - " 0 0 0 0.00% 2209206.133553 2276813.637485 2.97% 7578 2989 0 18631 10.0s\n" + " 0 0 0 0.00% -48251946.82856 inf inf 0 0 0 0 0.4s\n", + " R 0 0 0 0.00% 2209206.133553 2278967.860722 3.06% 0 0 0 15439 1.5s\n" ] } ], - "execution_count": null + "source": [ + "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", + "\n", + "start = timeit.default_timer()\n", + "fs_full = flow_system.copy()\n", + "fs_full.optimize(solver)\n", + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.2f} seconds')\n", + "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_full.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] }, { "cell_type": "markdown", @@ -4907,104 +5354,10 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "17", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:31:34.009387Z", - "start_time": "2025-12-14T15:31:30.196907Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001B[2m2025-12-14 16:31:30.931\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001B[2m2025-12-14 16:31:31.015\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 81/81 [00:00<00:00, 131.46it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 956.49it/s]\n", - "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 747.94it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-gj5jp5dp has 126461 rows; 80386 cols; 339209 nonzeros; 5955 integer variables (5955 binary)\n", - "Coefficient ranges:\n", - " Matrix [1e-05, 1e+03]\n", - " Cost [1e+00, 1e+00]\n", - " Bound [1e+00, 1e+03]\n", - " RHS [1e+00, 1e+00]\n", - "Presolving model\n", - "41449 rows, 7695 cols, 100532 nonzeros 0s\n", - "9148 rows, 5691 cols, 23883 nonzeros 0s\n", - "8222 rows, 4788 cols, 23865 nonzeros 0s\n", - "Presolve reductions: rows 8222(-118239); columns 4788(-75598); nonzeros 23865(-315344) \n", - "\n", - "Solving MIP model with:\n", - " 8222 rows\n", - " 4788 cols (1585 binary, 0 integer, 0 implied int., 3203 continuous, 0 domain fixed)\n", - " 23865 nonzeros\n", - "\n", - "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", - " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", - " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", - " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", - "\n", - " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", - "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", - "\n", - " 0 0 0 0.00% -35212528.89731 inf inf 0 0 0 0 0.3s\n", - " 0 0 0 0.00% 2215408.582854 inf inf 0 0 0 3609 0.4s\n", - " R 0 0 0 0.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 0.9s\n", - " 1 0 1 100.00% 2215408.582854 2215424.331523 0.00% 4826 751 0 4378 0.9s\n", - "\n", - "Solving report\n", - " Model linopy-problem-gj5jp5dp\n", - " Status Optimal\n", - " Primal bound 2215424.33152\n", - " Dual bound 2215408.58285\n", - " Gap 0.000711% (tolerance: 1%)\n", - " P-D integral 7.89234528479e-08\n", - " Solution status feasible\n", - " 2215424.33152 (objective)\n", - " 0 (bound viol.)\n", - " 0 (int. viol.)\n", - " 0 (row viol.)\n", - " Timing 0.91\n", - " Max sub-MIP depth 0\n", - " Nodes 1\n", - " Repair LPs 0\n", - " LP iterations 4378\n", - " 0 (strong br.)\n", - " 769 (separation)\n", - " 0 (heuristics)\n", - "Clustered optimization: 3.81 seconds\n", - "Cost: 2,215,424 €\n", - "Speedup: 4.9x\n", - "\n", - "Optimized sizes:\n", - " CHP(Q_th): 300.0\n", - " Boiler(Q_th): 0.0\n", - " Storage: 1000.0\n" - ] - } - ], + "metadata": {}, + "outputs": [], "source": [ "start = timeit.default_timer()\n", "\n", @@ -5033,105 +5386,10 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "id": "puisldf6fa", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:31:37.427466Z", - "start_time": "2025-12-14T15:31:34.040817Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001B[2m2025-12-14 16:31:34.760\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001B[2m2025-12-14 16:31:34.852\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 81/81 [00:00<00:00, 117.21it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 858.30it/s]\n", - "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 993.25it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-bjm6577n has 137800 rows; 80386 cols; 361887 nonzeros; 5955 integer variables (5955 binary)\n", - "Coefficient ranges:\n", - " Matrix [1e-05, 1e+03]\n", - " Cost [1e+00, 1e+00]\n", - " Bound [1e+00, 1e+03]\n", - " RHS [1e+00, 1e+00]\n", - "Presolving model\n", - "41647 rows, 1246 cols, 98274 nonzeros 0s\n", - "29666 rows, 749 cols, 62308 nonzeros 0s\n", - "1125 rows, 534 cols, 2267 nonzeros 0s\n", - "1123 rows, 159 cols, 1027 nonzeros 0s\n", - "501 rows, 159 cols, 1025 nonzeros 0s\n", - "Presolve reductions: rows 501(-137299); columns 159(-80227); nonzeros 1025(-360862) \n", - "\n", - "Solving MIP model with:\n", - " 501 rows\n", - " 159 cols (1 binary, 0 integer, 0 implied int., 158 continuous, 0 domain fixed)\n", - " 1025 nonzeros\n", - "\n", - "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", - " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", - " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", - " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", - "\n", - " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", - "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", - "\n", - " J 0 0 0 0.00% -inf 2426442.894624 Large 0 0 0 0 0.2s\n", - " T 0 0 0 0.00% -144158.139812 2407140.32574 105.99% 0 0 0 18 0.2s\n", - " 1 0 1 100.00% 2407140.32574 2407140.32574 0.00% 0 0 0 18 0.2s\n", - "\n", - "Solving report\n", - " Model linopy-problem-bjm6577n\n", - " Status Optimal\n", - " Primal bound 2407140.32574\n", - " Dual bound 2407140.32574\n", - " Gap 0% (tolerance: 1%)\n", - " P-D integral 0.00409873540413\n", - " Solution status feasible\n", - " 2407140.32574 (objective)\n", - " 0 (bound viol.)\n", - " 0 (int. viol.)\n", - " 0 (row viol.)\n", - " Timing 0.17\n", - " Max sub-MIP depth 0\n", - " Nodes 1\n", - " Repair LPs 0\n", - " LP iterations 18\n", - " 0 (strong br.)\n", - " 0 (separation)\n", - " 0 (heuristics)\n", - "Segmentation optimization: 3.38 seconds\n", - "Cost: 2,407,140 €\n", - "Speedup: 5.5x\n", - "\n", - "Optimized sizes:\n", - " CHP(Q_th): 248.4\n", - " Boiler(Q_th): 0.0\n", - " Storage: 0.0\n" - ] - } - ], + "metadata": {}, + "outputs": [], "source": [ "start = timeit.default_timer()\n", "\n", @@ -5139,7 +5397,7 @@ "fs_segmented = flow_system.transform.cluster(\n", " n_clusters=None, # No clustering\n", " cluster_duration='1D',\n", - " n_segments=4, # 4 segments per day\n", + " n_segments=12, # 12 segments per day\n", ")\n", "\n", "fs_segmented.optimize(solver)\n", @@ -5161,113 +5419,18 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "id": "frq1vct5l4v", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:31:40.701389Z", - "start_time": "2025-12-14T15:31:37.455352Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001B[2m2025-12-14 16:31:38.177\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001B[2m2025-12-14 16:31:38.252\u001B[0m \u001B[33mWARNING \u001B[0m │ \u001B[33m┌─\u001B[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001B[2m \u001B[0m │ \u001B[33m│\u001B[0m Coordinates:\n", - "\u001B[2m \u001B[0m │ \u001B[33m└─\u001B[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 98/98 [00:00<00:00, 173.62it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 55/55 [00:00<00:00, 1005.32it/s]\n", - "Writing binary variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 5/5 [00:00<00:00, 846.58it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-s6_9mxfb has 174945 rows; 80386 cols; 436177 nonzeros; 5955 integer variables (5955 binary)\n", - "Coefficient ranges:\n", - " Matrix [1e-05, 1e+03]\n", - " Cost [1e+00, 1e+00]\n", - " Bound [1e+00, 1e+03]\n", - " RHS [1e+00, 1e+00]\n", - "Presolving model\n", - "41647 rows, 326 cols, 98274 nonzeros 0s\n", - "29654 rows, 197 cols, 62284 nonzeros 0s\n", - "295 rows, 144 cols, 596 nonzeros 0s\n", - "294 rows, 46 cols, 275 nonzeros 0s\n", - "132 rows, 46 cols, 273 nonzeros 0s\n", - "Presolve reductions: rows 132(-174813); columns 46(-80340); nonzeros 273(-435904) \n", - "\n", - "Solving MIP model with:\n", - " 132 rows\n", - " 46 cols (1 binary, 0 integer, 0 implied int., 45 continuous, 0 domain fixed)\n", - " 273 nonzeros\n", - "\n", - "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", - " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", - " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", - " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", - "\n", - " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", - "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", - "\n", - " J 0 0 0 0.00% -inf 2461461.294894 Large 0 0 0 0 0.2s\n", - " T 0 0 0 0.00% 148461.143926 2407158.736494 93.83% 0 0 0 7 0.2s\n", - " 1 0 1 100.00% 2407158.736494 2407158.736494 0.00% 0 0 0 7 0.2s\n", - "\n", - "Solving report\n", - " Model linopy-problem-s6_9mxfb\n", - " Status Optimal\n", - " Primal bound 2407158.73649\n", - " Dual bound 2407158.73649\n", - " Gap 0% (tolerance: 1%)\n", - " P-D integral 0.00427957544687\n", - " Solution status feasible\n", - " 2407158.73649 (objective)\n", - " 0 (bound viol.)\n", - " 0 (int. viol.)\n", - " 0 (row viol.)\n", - " Timing 0.22\n", - " Max sub-MIP depth 0\n", - " Nodes 1\n", - " Repair LPs 0\n", - " LP iterations 7\n", - " 0 (strong br.)\n", - " 0 (separation)\n", - " 0 (heuristics)\n", - "Combined optimization: 3.24 seconds\n", - "Cost: 2,407,159 €\n", - "Speedup: 5.7x\n", - "\n", - "Optimized sizes:\n", - " CHP(Q_th): 248.4\n", - " Boiler(Q_th): 0.0\n", - " Storage: 0.0\n" - ] - } - ], + "metadata": {}, + "outputs": [], "source": [ "start = timeit.default_timer()\n", "\n", - "# Combined: 8 typical days × 4 segments each\n", + "# Combined: 8 typical days × 12 segments each\n", "fs_combined_opt = flow_system.transform.cluster(\n", " n_clusters=8,\n", " cluster_duration='1D',\n", - " n_segments=4,\n", + " n_segments=12,\n", ")\n", "\n", "fs_combined_opt.optimize(solver)\n", @@ -5291,86 +5454,10 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": null, "id": "19", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:31:40.744033Z", - "start_time": "2025-12-14T15:31:40.734687Z" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 Time [s]Cost [€]CHP SizeBoiler SizeStorage SizeCost Gap [%]Speedup
Full (baseline)18.622,209,206300.00.010000.001.0x
Clustering (8 days)3.812,215,424300.00.010000.284.9x
Segmentation (4 seg)3.382,407,140248.40.008.965.5x
Combined (8×4)3.242,407,159248.40.008.965.7x
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], + "metadata": {}, + "outputs": [], "source": [ "results = {\n", " 'Full (baseline)': {\n", @@ -5387,14 +5474,14 @@ " 'Boiler Size': fs_clustered.statistics.sizes['Boiler(Q_th)'].item(),\n", " 'Storage Size': fs_clustered.statistics.sizes['Storage'].item(),\n", " },\n", - " 'Segmentation (4 seg)': {\n", + " 'Segmentation (12 seg)': {\n", " 'Time [s]': time_segmented,\n", " 'Cost [€]': fs_segmented.solution['costs'].item(),\n", " 'CHP Size': fs_segmented.statistics.sizes['CHP(Q_th)'].item(),\n", " 'Boiler Size': fs_segmented.statistics.sizes['Boiler(Q_th)'].item(),\n", " 'Storage Size': fs_segmented.statistics.sizes['Storage'].item(),\n", " },\n", - " 'Combined (8×4)': {\n", + " 'Combined (8×12)': {\n", " 'Time [s]': time_combined,\n", " 'Cost [€]': fs_combined_opt.solution['costs'].item(),\n", " 'CHP Size': fs_combined_opt.statistics.sizes['CHP(Q_th)'].item(),\n", @@ -5435,23 +5522,10 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "id": "21", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:31:40.802963Z", - "start_time": "2025-12-14T15:31:40.748748Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Multi-period system: 1344 timesteps × 3 periods\n" - ] - } - ], + "metadata": {}, + "outputs": [], "source": [ "# Load raw data for multi-period example\n", "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", @@ -5508,27 +5582,10 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "id": "22", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:31:41.653370Z", - "start_time": "2025-12-14T15:31:40.807443Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001B[2m2025-12-14 16:31:40.808\u001B[0m \u001B[33mWARNING \u001B[0m │ FlowSystem is not connected_and_transformed. Connecting and transforming data now.\n", - "Clustering was applied to 3 period(s):\n", - " - period=2024\n", - " - period=2025\n", - " - period=2026\n" - ] - } - ], + "metadata": {}, + "outputs": [], "source": [ "# Cluster - each period gets clustered independently\n", "fs_mp_clustered = fs_mp.transform.cluster(n_clusters=4, cluster_duration='1D')\n", @@ -5542,50 +5599,10 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "id": "23", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:31:43.152426Z", - "start_time": "2025-12-14T15:31:41.711392Z" - } - }, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing constraints.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 38/38 [00:00<00:00, 155.98it/s]\n", - "Writing continuous variables.: 100%|\u001B[38;2;128;191;255m██████████\u001B[0m| 22/22 [00:00<00:00, 686.93it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "LP linopy-problem-9o95ub30 has 49392 rows; 40356 cols; 131016 nonzeros\n", - "Coefficient ranges:\n", - " Matrix [2e-01, 2e+01]\n", - " Cost [1e+00, 1e+00]\n", - " Bound [5e+01, 1e+03]\n", - " RHS [0e+00, 0e+00]\n", - "Presolving model\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "0 rows, 0 cols, 0 nonzeros 0s\n", - "Presolve reductions: rows 0(-49392); columns 0(-40356); nonzeros 0(-131016) - Reduced to empty\n", - "Performed postsolve\n", - "Solving the original LP from the solution after postsolve\n", - "\n", - "Model name : linopy-problem-9o95ub30\n", - "Model status : Optimal\n", - "Objective value : 1.3352558890e+07\n", - "P-D objective error : 1.7437154695e-15\n", - "HiGHS run time : 0.06\n", - "Multi-period clustered cost: 13,352,559 €\n" - ] - } - ], + "metadata": {}, + "outputs": [], "source": [ "# Optimize\n", "fs_mp_clustered.optimize(solver)\n", @@ -5596,13 +5613,42 @@ "cell_type": "markdown", "id": "24", "metadata": {}, - "source": "## API Reference\n\n### `transform.cluster()` Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `n_clusters` | `int \\| None` | Number of typical periods (e.g., 8 typical days). Set to `None` for segmentation-only. |\n| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h', or hours as float) |\n| `n_segments` | `int \\| None` | Segments within each period (inner-period aggregation). Default: `None` (no segmentation) |\n| `aggregate_data` | `bool` | If True (default), aggregate time series data |\n| `include_storage` | `bool` | Include storage in clustering constraints (default: True) |\n| `flexibility_percent` | `float` | Allow binary variable deviations (default: 0) |\n| `flexibility_penalty` | `float` | Penalty for deviations (default: 0) |\n| `time_series_for_high_peaks` | `list` | Force inclusion of high-value periods |\n| `time_series_for_low_peaks` | `list` | Force inclusion of low-value periods |\n\n### Common Patterns\n\n```python\n# Clustering only: 8 typical days from a year\nfs.transform.cluster(n_clusters=8, cluster_duration='1D')\n\n# Segmentation only: reduce to 4 segments per day\nfs.transform.cluster(n_clusters=None, cluster_duration='1D', n_segments=4)\n\n# Combined: 8 typical days × 4 segments each\nfs.transform.cluster(n_clusters=8, cluster_duration='1D', n_segments=4)\n\n# Force inclusion of peak demand periods\nfs.transform.cluster(\n n_clusters=8,\n cluster_duration='1D',\n time_series_for_high_peaks=[heat_demand_ts],\n)\n```" + "source": "## API Reference\n\n### `transform.cluster()` Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `n_clusters` | `int \\| None` | Number of typical periods (e.g., 8 typical days). Set to `None` for segmentation-only. |\n| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h', or hours as float) |\n| `n_segments` | `int \\| None` | Segments within each period (inner-period aggregation). Default: `None` (no segmentation) |\n| `aggregate_data` | `bool` | If True (default), aggregate time series data |\n| `include_storage` | `bool` | Include storage in clustering constraints (default: True) |\n| `flexibility_percent` | `float` | Allow binary variable deviations (default: 0) |\n| `flexibility_penalty` | `float` | Penalty for deviations (default: 0) |\n| `time_series_for_high_peaks` | `list` | Force inclusion of high-value periods |\n| `time_series_for_low_peaks` | `list` | Force inclusion of low-value periods |\n\n### Common Patterns\n\n```python\n# Clustering only: 8 typical days from a year\nfs.transform.cluster(n_clusters=8, cluster_duration='1D')\n\n# Segmentation only: reduce to 12 segments per day\nfs.transform.cluster(n_clusters=None, cluster_duration='1D', n_segments=12)\n\n# Combined: 8 typical days × 12 segments each\nfs.transform.cluster(n_clusters=8, cluster_duration='1D', n_segments=12)\n\n# Force inclusion of peak demand periods\nfs.transform.cluster(\n n_clusters=8,\n cluster_duration='1D',\n time_series_for_high_peaks=[heat_demand_ts],\n)\n```" }, { "cell_type": "markdown", "id": "25", "metadata": {}, - "source": "## Summary\n\nYou learned how to:\n\n- Use **clustering** (`n_clusters`) to identify typical periods (inter-period aggregation)\n- Use **segmentation** (`n_segments`) to reduce timesteps within periods (inner-period aggregation)\n- **Combine both** techniques for maximum speedup\n- Cluster **multi-period** FlowSystems (each period independently)\n\n### When to Use Each Technique\n\n| Technique | Use Case | Example |\n|-----------|----------|---------|\n| **Clustering** | Many similar periods (days, weeks) | 365 days → 12 typical days |\n| **Segmentation** | High-resolution data not needed | 96 timesteps/day → 4 segments |\n| **Combined** | Large problems with high resolution | 365 × 96 → 12 × 4 = 48 timesteps |\n\n### Accuracy vs. Speed Trade-off\n\n| Approach | Speedup | Accuracy | Best For |\n|----------|---------|----------|----------|\n| More clusters/segments | Lower | Higher | Final results |\n| Fewer clusters/segments | Higher | Lower | Screening, exploration |\n\n### Next Steps\n\n- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" + "source": [ + "## Summary\n", + "\n", + "You learned how to:\n", + "\n", + "- Use **clustering** (`n_clusters`) to identify typical periods (inter-period aggregation)\n", + "- Use **segmentation** (`n_segments`) to reduce timesteps within periods (inner-period aggregation)\n", + "- **Combine both** techniques for maximum speedup\n", + "- Cluster **multi-period** FlowSystems (each period independently)\n", + "\n", + "### When to Use Each Technique\n", + "\n", + "| Technique | Use Case | Example |\n", + "|-----------|----------|---------|\n", + "| **Clustering** | Many similar periods (days, weeks) | 365 days → 12 typical days |\n", + "| **Segmentation** | High-resolution data not needed | 96 timesteps/day → 12 segments |\n", + "| **Combined** | Large problems with high resolution | 365 × 96 → 12 × 12 = 144 timesteps |\n", + "\n", + "### Accuracy vs. Speed Trade-off\n", + "\n", + "| Approach | Speedup | Accuracy | Best For |\n", + "|----------|---------|----------|----------|\n", + "| More clusters/segments | Lower | Higher | Final results |\n", + "| Fewer clusters/segments | Higher | Lower | Screening, exploration |\n", + "\n", + "### Next Steps\n", + "\n", + "- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n", + "- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" + ] } ], "metadata": { From e20775f0f85e17e202517b0a1d6d1faf064106e6 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 15 Dec 2025 10:05:43 +0100 Subject: [PATCH 020/191] Fix Data_DIR in notebooks --- docs/notebooks/data/generate_example_systems.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index e42968de7..639db3a29 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -20,9 +20,11 @@ # Output directory (same as this script) try: OUTPUT_DIR = Path(__file__).parent + DATA_DIR = Path(__file__).parent.parent.parent.parent / 'examples' / 'resources' except NameError: # Running in notebook context (e.g., mkdocs-jupyter) OUTPUT_DIR = Path('docs/notebooks/data') + DATA_DIR = Path('examples/resources') def create_simple_system() -> fx.FlowSystem: @@ -241,8 +243,8 @@ def create_district_heating_system() -> fx.FlowSystem: Used by: 08a-aggregation, 08b-rolling-horizon, 08c-clustering notebooks """ - # Load real data (relative to examples/resources) - data_path = Path(__file__).parent.parent.parent.parent / 'examples' / 'resources' / 'Zeitreihen2020.csv' + # Load real data + data_path = DATA_DIR / 'Zeitreihen2020.csv' data = pd.read_csv(data_path, index_col=0, parse_dates=True).sort_index() data = data['2020-01-01':'2020-01-31 23:45:00'] # One month data.index.name = 'time' @@ -358,7 +360,7 @@ def create_operational_system() -> fx.FlowSystem: Used by: 08b-rolling-horizon notebook """ # Load real data - data_path = Path(__file__).parent.parent.parent.parent / 'examples' / 'resources' / 'Zeitreihen2020.csv' + data_path = DATA_DIR / 'Zeitreihen2020.csv' data = pd.read_csv(data_path, index_col=0, parse_dates=True).sort_index() data = data['2020-01-01':'2020-01-14 23:45:00'] # Two weeks data.index.name = 'time' From 6fdd6847b65f217c684a1a200f52c7f6fceb4e54 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 15 Dec 2025 12:34:55 +0100 Subject: [PATCH 021/191] Group constraints and varaibles form clustering together --- flixopt/clustering.py | 142 ++++++++++++++++++++++++++++++++++++++---- 1 file changed, 129 insertions(+), 13 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 94e117102..8fa43b347 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -504,6 +504,10 @@ def do_modeling(self): binary_variables: set[str] = set(self._model.variables.binaries) binary_time_variables: set[str] = time_variables & binary_variables + # Group variables by dimension signature: (has_period, has_scenario, is_binary) + # This allows creating batched constraints with a 'variable' dimension + variable_groups: dict[tuple[bool, bool, bool], dict[str, linopy.Variable]] = {} + for component in components: if isinstance(component, Storage) and not self.clustering_parameters.include_storage: continue # Skip storage if not included @@ -511,12 +515,26 @@ def do_modeling(self): all_variables_of_component = set(component.submodel.variables) if self.clustering_parameters.aggregate_data: - relevant_variables = component.submodel.variables[all_variables_of_component & time_variables] + relevant_var_names = all_variables_of_component & time_variables else: - relevant_variables = component.submodel.variables[all_variables_of_component & binary_time_variables] - - for variable in relevant_variables: - self._equate_indices_multi_dimensional(component.submodel.variables[variable]) + relevant_var_names = all_variables_of_component & binary_time_variables + + for var_name in relevant_var_names: + variable = component.submodel.variables[var_name] + var_dims = set(variable.dims) + key = ('period' in var_dims, 'scenario' in var_dims, var_name in binary_variables) + variable_groups.setdefault(key, {})[var_name] = variable + + # Process each group with batched constraint creation + # Binary variables are handled separately with per-variable constraints (simpler, avoids dimension conflicts) + for (has_period, has_scenario, is_binary), variables in variable_groups.items(): + if is_binary: + # Handle binaries individually to avoid dimension conflicts with correction variables + for variable in variables.values(): + self._equate_indices_multi_dimensional(variable) + else: + # Batch continuous variables for efficiency + self._equate_indices_batched(variables, has_period, has_scenario) # Add penalty for flexibility deviations penalty = self.clustering_parameters.flexibility_penalty @@ -525,14 +543,103 @@ def do_modeling(self): for variable_name in self.variables_direct: variable = self.variables_direct[variable_name] + # Correction vars use eq_idx dimension (not time) to avoid duplicate coord issues + sum_dim = 'eq_idx' if 'eq_idx' in variable.dims else 'time' self._model.effects.add_share_to_effects( name='Clustering', - expressions={PENALTY_EFFECT_LABEL: (variable * penalty).sum('time')}, + expressions={PENALTY_EFFECT_LABEL: (variable * penalty).sum(sum_dim)}, target='periodic', ) + def _equate_indices_batched( + self, + variables: dict[str, linopy.Variable], + has_period: bool, + has_scenario: bool, + ) -> None: + """Create batched constraints for a group of continuous variables. + + Instead of creating one constraint per variable, this method creates a single constraint + with a 'variable' dimension, reducing the number of constraint objects. + + Args: + variables: Dict mapping variable names to linopy Variables. + has_period: Whether these variables have a 'period' dimension. + has_scenario: Whether these variables have a 'scenario' dimension. + """ + + # Create group suffix for unique constraint names + group_suffix = f'_{"P" if has_period else ""}{"S" if has_scenario else ""}' + if group_suffix == '_': + group_suffix = '_base' + + for (period_label, scenario_label), clustering in self.clustering_data_dict.items(): + # Build selector for this period/scenario combination + selector = {} + if has_period and period_label is not None: + selector['period'] = period_label + if has_scenario and scenario_label is not None: + selector['scenario'] = scenario_label + + # Create constraint name suffix with dimension info + dim_suffix = group_suffix + if period_label is not None: + dim_suffix += f'_p{period_label}' + if scenario_label is not None: + dim_suffix += f'_s{scenario_label}' + + # 1. Inter-period clustering constraints + cluster_indices = clustering.get_equation_indices(skip_first_index_of_period=True) + if len(cluster_indices[0]) > 0: + self._create_batched_constraint(variables, selector, cluster_indices, f'{dim_suffix}_cluster') + + # 2. Intra-segment constraints + segment_indices = clustering.get_segment_equation_indices() + if len(segment_indices[0]) > 0: + self._create_batched_constraint(variables, selector, segment_indices, f'{dim_suffix}_segment') + + def _create_batched_constraint( + self, + variables: dict[str, linopy.Variable], + selector: dict, + indices: tuple[np.ndarray, np.ndarray], + dim_suffix: str, + ) -> None: + """Create a single constraint with 'variable' dimension for multiple variables. + + Args: + variables: Dict mapping variable names to linopy Variables. + selector: Dict for selecting period/scenario slice (e.g., {'period': 2024}). + indices: Tuple of (idx_a, idx_b) arrays for equating timesteps. + dim_suffix: Suffix for constraint name (e.g., '_cluster' or '_p2024_cluster'). + """ + import linopy + + # Build list of expressions, each expanded with variable dimension + lhs_parts = [] + + for var_name, variable in variables.items(): + # Select period/scenario slice if needed + var_slice = variable.sel(**selector) if selector else variable + + # Create difference expression: var[idx_a] - var[idx_b] + diff = var_slice.isel(time=indices[0]) - var_slice.isel(time=indices[1]) + + # Expand dims to add 'variable' dimension + lhs_parts.append(diff.expand_dims(variable=[var_name])) + + # Merge all expressions along 'variable' dimension + combined_lhs = linopy.merge(*lhs_parts, dim='variable') + + # Create single constraint for all variables + self.add_constraints(combined_lhs == 0, short_name=f'equate_indices{dim_suffix}') + def _equate_indices_multi_dimensional(self, variable: linopy.Variable) -> None: - """Equate indices across clustered segments, handling multi-dimensional cases.""" + """Equate indices across clustered segments, handling multi-dimensional cases. + + Note: This method is kept for backwards compatibility but is no longer used + by the default do_modeling(). Use _equate_indices_batched() instead. + """ var_dims = set(variable.dims) has_period = 'period' in var_dims has_scenario = 'scenario' in var_dims @@ -588,13 +695,22 @@ def _equate_indices( # Add correction variables for binary flexibility if var_name in self._model.variables.binaries and self.clustering_parameters.flexibility_percent > 0: - sel = variable.isel(time=indices[0]) - coords = {d: sel.indexes[d] for d in sel.dims} - var_k1 = self.add_variables(binary=True, coords=coords, short_name=f'correction1{dim_suffix}|{var_name}') - var_k0 = self.add_variables(binary=True, coords=coords, short_name=f'correction0{dim_suffix}|{var_name}') + # Use integer indices for correction variables to avoid duplicate datetime coords + # (indices[0] can have duplicates since same timestep may be compared to multiple others) + coords = [np.arange(length)] + dims = ['eq_idx'] + var_k1 = self.add_variables( + binary=True, coords=coords, dims=dims, short_name=f'correction1{dim_suffix}|{var_name}' + ) + var_k0 = self.add_variables( + binary=True, coords=coords, dims=dims, short_name=f'correction0{dim_suffix}|{var_name}' + ) # Extend equation to allow deviation: On(a,t) - On(b,t) + K1 - K0 = 0 - con.lhs += 1 * var_k1 - 1 * var_k0 + # Rename constraint's time dim to eq_idx for alignment, then rename back + lhs_renamed = con.lhs.rename({'time': 'eq_idx'}) + new_lhs = lhs_renamed + 1 * var_k1 - 1 * var_k0 + con.lhs = new_lhs.rename({'eq_idx': 'time'}) # Interlock K0 and K1: can't both be 1 self.add_constraints(var_k0 + var_k1 <= 1, short_name=f'lock_k0_and_k1{dim_suffix}|{var_name}') @@ -602,6 +718,6 @@ def _equate_indices( # Limit total corrections limit = int(np.floor(self.clustering_parameters.flexibility_percent / 100 * length)) self.add_constraints( - var_k0.sum(dim='time') + var_k1.sum(dim='time') <= limit, + var_k0.sum(dim='eq_idx') + var_k1.sum(dim='eq_idx') <= limit, short_name=f'limit_corrections{dim_suffix}|{var_name}', ) From 76af0192ef4137f2df60ecadd7225f0725e92499 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 15 Dec 2025 14:30:44 +0100 Subject: [PATCH 022/191] Only equalize SOME variables --- flixopt/clustering.py | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 8fa43b347..07cdfb705 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -498,11 +498,7 @@ def do_modeling(self): else: components = list(self.components_to_clusterize) - time_variables: set[str] = { - name for name in self._model.variables if 'time' in self._model.variables[name].dims - } binary_variables: set[str] = set(self._model.variables.binaries) - binary_time_variables: set[str] = time_variables & binary_variables # Group variables by dimension signature: (has_period, has_scenario, is_binary) # This allows creating batched constraints with a 'variable' dimension @@ -512,15 +508,29 @@ def do_modeling(self): if isinstance(component, Storage) and not self.clustering_parameters.include_storage: continue # Skip storage if not included - all_variables_of_component = set(component.submodel.variables) + # Only equalize specific variable types: + # - flow_rate: main continuous decision variables + # - status: binary on/off variables (only if aggregate_data=False or binary flexibility) + relevant_var_names: list[str] = [] + # Always include flow_rate variables when aggregate_data=True if self.clustering_parameters.aggregate_data: - relevant_var_names = all_variables_of_component & time_variables - else: - relevant_var_names = all_variables_of_component & binary_time_variables + for flow in component.inputs + component.outputs: + flow_rate_name = f'{flow.label_full}|flow_rate' + if flow_rate_name in component.submodel.variables: + relevant_var_names.append(flow_rate_name) + + # Include status variables (binary on/off) when needed + if not self.clustering_parameters.aggregate_data or self.clustering_parameters.flexibility_percent > 0: + for flow in component.inputs + component.outputs: + status_name = f'{flow.label_full}|status' + if status_name in component.submodel.variables: + relevant_var_names.append(status_name) for var_name in relevant_var_names: variable = component.submodel.variables[var_name] + if 'time' not in variable.dims: + continue # Skip non-time variables var_dims = set(variable.dims) key = ('period' in var_dims, 'scenario' in var_dims, var_name in binary_variables) variable_groups.setdefault(key, {})[var_name] = variable From b7c5d6026ab71029db5fdb09c713552b26190ae8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 15 Dec 2025 15:01:39 +0100 Subject: [PATCH 023/191] Always fix binaries for better pre-solve --- flixopt/clustering.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 07cdfb705..3bc3415be 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -510,22 +510,26 @@ def do_modeling(self): # Only equalize specific variable types: # - flow_rate: main continuous decision variables - # - status: binary on/off variables (only if aggregate_data=False or binary flexibility) + # - status: binary on/off variables + # - inside_piece: binary for piecewise segment selection relevant_var_names: list[str] = [] - # Always include flow_rate variables when aggregate_data=True - if self.clustering_parameters.aggregate_data: - for flow in component.inputs + component.outputs: + for flow in component.inputs + component.outputs: + if self.clustering_parameters.aggregate_data: + # Continuous flow rate flow_rate_name = f'{flow.label_full}|flow_rate' if flow_rate_name in component.submodel.variables: relevant_var_names.append(flow_rate_name) - # Include status variables (binary on/off) when needed - if not self.clustering_parameters.aggregate_data or self.clustering_parameters.flexibility_percent > 0: - for flow in component.inputs + component.outputs: - status_name = f'{flow.label_full}|status' - if status_name in component.submodel.variables: - relevant_var_names.append(status_name) + # Binary variables - always include for better solver presolve + # On/off status + status_name = f'{flow.label_full}|status' + if status_name in component.submodel.variables: + relevant_var_names.append(status_name) + # Piecewise segment selection + inside_piece_name = f'{flow.label_full}|inside_piece' + if inside_piece_name in component.submodel.variables: + relevant_var_names.append(inside_piece_name) for var_name in relevant_var_names: variable = component.submodel.variables[var_name] From 737f3544309ff8d8d23edfb62f075a9c3b2b5c76 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 15 Dec 2025 15:23:14 +0100 Subject: [PATCH 024/191] Group constraints and varaibles form clustering together --- flixopt/clustering.py | 50 ++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 3bc3415be..78b9f7133 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -526,13 +526,22 @@ def do_modeling(self): status_name = f'{flow.label_full}|status' if status_name in component.submodel.variables: relevant_var_names.append(status_name) - # Piecewise segment selection - inside_piece_name = f'{flow.label_full}|inside_piece' - if inside_piece_name in component.submodel.variables: - relevant_var_names.append(inside_piece_name) + + # Piecewise segment binaries (inside_piece for each piece) + piecewise_model = getattr(component.submodel, 'piecewise_conversion', None) + if piecewise_model is not None: + for piece in piecewise_model.pieces: + if piece.inside_piece is not None: + relevant_var_names.append(piece.inside_piece.name) for var_name in relevant_var_names: - variable = component.submodel.variables[var_name] + # Look up variable - first in component submodel, then in model + if var_name in component.submodel.variables: + variable = component.submodel.variables[var_name] + elif var_name in self._model.variables: + variable = self._model.variables[var_name] + else: + continue # Variable not found if 'time' not in variable.dims: continue # Skip non-time variables var_dims = set(variable.dims) @@ -631,6 +640,7 @@ def _create_batched_constraint( # Build list of expressions, each expanded with variable dimension lhs_parts = [] + length = len(indices[0]) for var_name, variable in variables.items(): # Select period/scenario slice if needed @@ -639,8 +649,13 @@ def _create_batched_constraint( # Create difference expression: var[idx_a] - var[idx_b] diff = var_slice.isel(time=indices[0]) - var_slice.isel(time=indices[1]) + # Rename time dimension to eq_idx and assign integer coordinates + # (indices[0] can have duplicates, causing duplicate datetime coords) + diff_renamed = diff.rename({'time': 'eq_idx'}) + diff_renamed = diff_renamed.assign_coords(eq_idx=np.arange(length)) + # Expand dims to add 'variable' dimension - lhs_parts.append(diff.expand_dims(variable=[var_name])) + lhs_parts.append(diff_renamed.expand_dims(variable=[var_name])) # Merge all expressions along 'variable' dimension combined_lhs = linopy.merge(*lhs_parts, dim='variable') @@ -701,16 +716,17 @@ def _equate_indices( length = len(indices[0]) var_name = original_var_name or variable.name - # Main constraint: x(cluster_a, t) - x(cluster_b, t) = 0 - con = self.add_constraints( - variable.isel(time=indices[0]) - variable.isel(time=indices[1]) == 0, - short_name=f'equate_indices{dim_suffix}|{var_name}', - ) + # Create constraint expression: x(cluster_a, t) - x(cluster_b, t) + # indices[0] can have duplicate values (same timestep compared to multiple others), + # so we use eq_idx dimension with integer coordinates to avoid duplicate datetime coords + lhs = variable.isel(time=indices[0]) - variable.isel(time=indices[1]) + + # Rename time dimension to eq_idx and assign integer coordinates + lhs_renamed = lhs.rename({'time': 'eq_idx'}) + lhs_renamed = lhs_renamed.assign_coords(eq_idx=np.arange(length)) # Add correction variables for binary flexibility if var_name in self._model.variables.binaries and self.clustering_parameters.flexibility_percent > 0: - # Use integer indices for correction variables to avoid duplicate datetime coords - # (indices[0] can have duplicates since same timestep may be compared to multiple others) coords = [np.arange(length)] dims = ['eq_idx'] var_k1 = self.add_variables( @@ -721,10 +737,7 @@ def _equate_indices( ) # Extend equation to allow deviation: On(a,t) - On(b,t) + K1 - K0 = 0 - # Rename constraint's time dim to eq_idx for alignment, then rename back - lhs_renamed = con.lhs.rename({'time': 'eq_idx'}) - new_lhs = lhs_renamed + 1 * var_k1 - 1 * var_k0 - con.lhs = new_lhs.rename({'eq_idx': 'time'}) + lhs_renamed = lhs_renamed + 1 * var_k1 - 1 * var_k0 # Interlock K0 and K1: can't both be 1 self.add_constraints(var_k0 + var_k1 <= 1, short_name=f'lock_k0_and_k1{dim_suffix}|{var_name}') @@ -735,3 +748,6 @@ def _equate_indices( var_k0.sum(dim='eq_idx') + var_k1.sum(dim='eq_idx') <= limit, short_name=f'limit_corrections{dim_suffix}|{var_name}', ) + + # Add the main constraint + self.add_constraints(lhs_renamed == 0, short_name=f'equate_indices{dim_suffix}|{var_name}') From 79db532eb41e6f6f12ae8085f79e0fb36b5cc237 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 15 Dec 2025 15:38:10 +0100 Subject: [PATCH 025/191] Improve readybility of clustering equations --- flixopt/clustering.py | 366 ++++++++++++++++-------------------------- 1 file changed, 140 insertions(+), 226 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 78b9f7133..685fea1ca 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -493,261 +493,175 @@ def __init__( self.is_multi_dimensional = False def do_modeling(self): - if not self.components_to_clusterize: - components = list(self.flow_system.components.values()) - else: - components = list(self.components_to_clusterize) + """Create equality constraints for clustered time indices. + + Equalizes: + - flow_rate: continuous flow variables (batched into single constraint) + - status: binary on/off variables (individual constraints) + - inside_piece: piecewise segment binaries (individual constraints) + """ - binary_variables: set[str] = set(self._model.variables.binaries) + components = self.components_to_clusterize or list(self.flow_system.components.values()) - # Group variables by dimension signature: (has_period, has_scenario, is_binary) - # This allows creating batched constraints with a 'variable' dimension - variable_groups: dict[tuple[bool, bool, bool], dict[str, linopy.Variable]] = {} + # Collect variables to equalize, grouped by type + continuous_vars: dict[str, linopy.Variable] = {} + binary_vars: dict[str, linopy.Variable] = {} for component in components: if isinstance(component, Storage) and not self.clustering_parameters.include_storage: - continue # Skip storage if not included - - # Only equalize specific variable types: - # - flow_rate: main continuous decision variables - # - status: binary on/off variables - # - inside_piece: binary for piecewise segment selection - relevant_var_names: list[str] = [] + continue for flow in component.inputs + component.outputs: + # Continuous: flow_rate (when aggregating data) if self.clustering_parameters.aggregate_data: - # Continuous flow rate - flow_rate_name = f'{flow.label_full}|flow_rate' - if flow_rate_name in component.submodel.variables: - relevant_var_names.append(flow_rate_name) - - # Binary variables - always include for better solver presolve - # On/off status - status_name = f'{flow.label_full}|status' - if status_name in component.submodel.variables: - relevant_var_names.append(status_name) - - # Piecewise segment binaries (inside_piece for each piece) - piecewise_model = getattr(component.submodel, 'piecewise_conversion', None) - if piecewise_model is not None: - for piece in piecewise_model.pieces: + name = f'{flow.label_full}|flow_rate' + if name in component.submodel.variables: + continuous_vars[name] = component.submodel.variables[name] + + # Binary: status + name = f'{flow.label_full}|status' + if name in component.submodel.variables: + binary_vars[name] = component.submodel.variables[name] + + # Binary: piecewise segment selection + piecewise = getattr(component.submodel, 'piecewise_conversion', None) + if piecewise is not None: + for piece in piecewise.pieces: if piece.inside_piece is not None: - relevant_var_names.append(piece.inside_piece.name) - - for var_name in relevant_var_names: - # Look up variable - first in component submodel, then in model - if var_name in component.submodel.variables: - variable = component.submodel.variables[var_name] - elif var_name in self._model.variables: - variable = self._model.variables[var_name] - else: - continue # Variable not found - if 'time' not in variable.dims: - continue # Skip non-time variables - var_dims = set(variable.dims) - key = ('period' in var_dims, 'scenario' in var_dims, var_name in binary_variables) - variable_groups.setdefault(key, {})[var_name] = variable - - # Process each group with batched constraint creation - # Binary variables are handled separately with per-variable constraints (simpler, avoids dimension conflicts) - for (has_period, has_scenario, is_binary), variables in variable_groups.items(): - if is_binary: - # Handle binaries individually to avoid dimension conflicts with correction variables - for variable in variables.values(): - self._equate_indices_multi_dimensional(variable) - else: - # Batch continuous variables for efficiency - self._equate_indices_batched(variables, has_period, has_scenario) + binary_vars[piece.inside_piece.name] = piece.inside_piece + + # Create constraints for each clustering (period/scenario combination) + for (period, scenario), clustering in self.clustering_data_dict.items(): + suffix = self._make_suffix(period, scenario) + + for constraint_type, indices in [ + ('cluster', clustering.get_equation_indices(skip_first_index_of_period=True)), + ('segment', clustering.get_segment_equation_indices()), + ]: + if len(indices[0]) == 0: + continue + + # Batch continuous variables into single constraint + if continuous_vars: + self._add_equality_constraint( + continuous_vars, indices, period, scenario, f'{suffix}_{constraint_type}' + ) + + # Individual constraints for binaries (needed for flexibility correction vars) + for var in binary_vars.values(): + self._add_equality_constraint( + {var.name: var}, + indices, + period, + scenario, + f'{suffix}_{constraint_type}|{var.name}', + allow_flexibility=True, + ) # Add penalty for flexibility deviations - penalty = self.clustering_parameters.flexibility_penalty - if self.clustering_parameters.flexibility_percent > 0 and penalty != 0: - from .effects import PENALTY_EFFECT_LABEL - - for variable_name in self.variables_direct: - variable = self.variables_direct[variable_name] - # Correction vars use eq_idx dimension (not time) to avoid duplicate coord issues - sum_dim = 'eq_idx' if 'eq_idx' in variable.dims else 'time' - self._model.effects.add_share_to_effects( - name='Clustering', - expressions={PENALTY_EFFECT_LABEL: (variable * penalty).sum(sum_dim)}, - target='periodic', - ) - - def _equate_indices_batched( + self._add_flexibility_penalty() + + def _make_suffix(self, period, scenario) -> str: + """Create constraint name suffix from period/scenario labels.""" + parts = [] + if period is not None: + parts.append(f'p{period}') + if scenario is not None: + parts.append(f's{scenario}') + return '_'.join(parts) if parts else 'base' + + def _add_equality_constraint( self, variables: dict[str, linopy.Variable], - has_period: bool, - has_scenario: bool, - ) -> None: - """Create batched constraints for a group of continuous variables. - - Instead of creating one constraint per variable, this method creates a single constraint - with a 'variable' dimension, reducing the number of constraint objects. - - Args: - variables: Dict mapping variable names to linopy Variables. - has_period: Whether these variables have a 'period' dimension. - has_scenario: Whether these variables have a 'scenario' dimension. - """ - - # Create group suffix for unique constraint names - group_suffix = f'_{"P" if has_period else ""}{"S" if has_scenario else ""}' - if group_suffix == '_': - group_suffix = '_base' - - for (period_label, scenario_label), clustering in self.clustering_data_dict.items(): - # Build selector for this period/scenario combination - selector = {} - if has_period and period_label is not None: - selector['period'] = period_label - if has_scenario and scenario_label is not None: - selector['scenario'] = scenario_label - - # Create constraint name suffix with dimension info - dim_suffix = group_suffix - if period_label is not None: - dim_suffix += f'_p{period_label}' - if scenario_label is not None: - dim_suffix += f'_s{scenario_label}' - - # 1. Inter-period clustering constraints - cluster_indices = clustering.get_equation_indices(skip_first_index_of_period=True) - if len(cluster_indices[0]) > 0: - self._create_batched_constraint(variables, selector, cluster_indices, f'{dim_suffix}_cluster') - - # 2. Intra-segment constraints - segment_indices = clustering.get_segment_equation_indices() - if len(segment_indices[0]) > 0: - self._create_batched_constraint(variables, selector, segment_indices, f'{dim_suffix}_segment') - - def _create_batched_constraint( - self, - variables: dict[str, linopy.Variable], - selector: dict, indices: tuple[np.ndarray, np.ndarray], - dim_suffix: str, + period, + scenario, + suffix: str, + allow_flexibility: bool = False, ) -> None: - """Create a single constraint with 'variable' dimension for multiple variables. + """Add equality constraint: var[idx_a] == var[idx_b] for all index pairs. Args: - variables: Dict mapping variable names to linopy Variables. - selector: Dict for selecting period/scenario slice (e.g., {'period': 2024}). - indices: Tuple of (idx_a, idx_b) arrays for equating timesteps. - dim_suffix: Suffix for constraint name (e.g., '_cluster' or '_p2024_cluster'). + variables: Variables to constrain (batched if multiple). + indices: Tuple of (idx_a, idx_b) arrays - timesteps to equate. + period: Period label for selecting variable slice (or None). + scenario: Scenario label for selecting variable slice (or None). + suffix: Constraint name suffix. + allow_flexibility: If True, add correction variables for binaries. """ import linopy - # Build list of expressions, each expanded with variable dimension - lhs_parts = [] - length = len(indices[0]) + idx_a, idx_b = indices + n_equations = len(idx_a) + + # Build constraint expression for each variable + expressions = [] + for name, var in variables.items(): + # Select period/scenario slice if variable has those dimensions + if period is not None and 'period' in var.dims: + var = var.sel(period=period) + if scenario is not None and 'scenario' in var.dims: + var = var.sel(scenario=scenario) - for var_name, variable in variables.items(): - # Select period/scenario slice if needed - var_slice = variable.sel(**selector) if selector else variable + if 'time' not in var.dims: + continue - # Create difference expression: var[idx_a] - var[idx_b] - diff = var_slice.isel(time=indices[0]) - var_slice.isel(time=indices[1]) + # Compute difference: var[idx_a] - var[idx_b] + diff = var.isel(time=idx_a) - var.isel(time=idx_b) - # Rename time dimension to eq_idx and assign integer coordinates - # (indices[0] can have duplicates, causing duplicate datetime coords) - diff_renamed = diff.rename({'time': 'eq_idx'}) - diff_renamed = diff_renamed.assign_coords(eq_idx=np.arange(length)) + # Replace time dim with integer eq_idx (avoids duplicate datetime coords) + diff = diff.rename({'time': 'eq_idx'}).assign_coords(eq_idx=np.arange(n_equations)) + expressions.append(diff.expand_dims(variable=[name])) - # Expand dims to add 'variable' dimension - lhs_parts.append(diff_renamed.expand_dims(variable=[var_name])) + if not expressions: + return - # Merge all expressions along 'variable' dimension - combined_lhs = linopy.merge(*lhs_parts, dim='variable') + # Merge into single expression with 'variable' dimension + lhs = linopy.merge(*expressions, dim='variable') if len(expressions) > 1 else expressions[0] - # Create single constraint for all variables - self.add_constraints(combined_lhs == 0, short_name=f'equate_indices{dim_suffix}') + # Add flexibility for binaries + if allow_flexibility and self.clustering_parameters.flexibility_percent > 0: + var_name = next(iter(variables)) # Single variable for binary case + if var_name in self._model.variables.binaries: + lhs = self._add_binary_flexibility(lhs, n_equations, suffix, var_name) - def _equate_indices_multi_dimensional(self, variable: linopy.Variable) -> None: - """Equate indices across clustered segments, handling multi-dimensional cases. + self.add_constraints(lhs == 0, short_name=f'equate_{suffix}') - Note: This method is kept for backwards compatibility but is no longer used - by the default do_modeling(). Use _equate_indices_batched() instead. - """ - var_dims = set(variable.dims) - has_period = 'period' in var_dims - has_scenario = 'scenario' in var_dims - - for (period_label, scenario_label), clustering in self.clustering_data_dict.items(): - # Build selector for this period/scenario combination - selector = {} - if has_period and period_label is not None: - selector['period'] = period_label - if has_scenario and scenario_label is not None: - selector['scenario'] = scenario_label - - # Select variable slice for this dimension combination - if selector: - var_slice = variable.sel(**selector) - else: - var_slice = variable - - # Create constraint name with dimension info - dim_suffix = '' - if period_label is not None: - dim_suffix += f'_p{period_label}' - if scenario_label is not None: - dim_suffix += f'_s{scenario_label}' - - # 1. Inter-period clustering constraints (equate timesteps across periods in same cluster) - cluster_indices = clustering.get_equation_indices(skip_first_index_of_period=True) - if len(cluster_indices[0]) > 0: - self._equate_indices(var_slice, cluster_indices, dim_suffix + '_cluster', variable.name) - - # 2. Intra-segment constraints (equate timesteps within same segment) - segment_indices = clustering.get_segment_equation_indices() - if len(segment_indices[0]) > 0: - self._equate_indices(var_slice, segment_indices, dim_suffix + '_segment', variable.name) - - def _equate_indices( - self, - variable: linopy.Variable, - indices: tuple[np.ndarray, np.ndarray], - dim_suffix: str = '', - original_var_name: str | None = None, - ) -> None: - """Add constraints to equate variable values at corresponding cluster indices.""" - assert len(indices[0]) == len(indices[1]), 'The length of the indices must match!' - length = len(indices[0]) - var_name = original_var_name or variable.name - - # Create constraint expression: x(cluster_a, t) - x(cluster_b, t) - # indices[0] can have duplicate values (same timestep compared to multiple others), - # so we use eq_idx dimension with integer coordinates to avoid duplicate datetime coords - lhs = variable.isel(time=indices[0]) - variable.isel(time=indices[1]) - - # Rename time dimension to eq_idx and assign integer coordinates - lhs_renamed = lhs.rename({'time': 'eq_idx'}) - lhs_renamed = lhs_renamed.assign_coords(eq_idx=np.arange(length)) - - # Add correction variables for binary flexibility - if var_name in self._model.variables.binaries and self.clustering_parameters.flexibility_percent > 0: - coords = [np.arange(length)] - dims = ['eq_idx'] - var_k1 = self.add_variables( - binary=True, coords=coords, dims=dims, short_name=f'correction1{dim_suffix}|{var_name}' - ) - var_k0 = self.add_variables( - binary=True, coords=coords, dims=dims, short_name=f'correction0{dim_suffix}|{var_name}' - ) + def _add_binary_flexibility(self, lhs, n_equations: int, suffix: str, var_name: str): + """Add correction variables to allow limited binary deviations.""" + coords = [np.arange(n_equations)] + dims = ['eq_idx'] - # Extend equation to allow deviation: On(a,t) - On(b,t) + K1 - K0 = 0 - lhs_renamed = lhs_renamed + 1 * var_k1 - 1 * var_k0 + k_up = self.add_variables(binary=True, coords=coords, dims=dims, short_name=f'k_up_{suffix}|{var_name}') + k_down = self.add_variables(binary=True, coords=coords, dims=dims, short_name=f'k_down_{suffix}|{var_name}') - # Interlock K0 and K1: can't both be 1 - self.add_constraints(var_k0 + var_k1 <= 1, short_name=f'lock_k0_and_k1{dim_suffix}|{var_name}') + # Modified equation: diff + k_up - k_down == 0 + lhs = lhs + k_up - k_down - # Limit total corrections - limit = int(np.floor(self.clustering_parameters.flexibility_percent / 100 * length)) - self.add_constraints( - var_k0.sum(dim='eq_idx') + var_k1.sum(dim='eq_idx') <= limit, - short_name=f'limit_corrections{dim_suffix}|{var_name}', - ) + # At most one correction per equation + self.add_constraints(k_up + k_down <= 1, short_name=f'lock_k_{suffix}|{var_name}') - # Add the main constraint - self.add_constraints(lhs_renamed == 0, short_name=f'equate_indices{dim_suffix}|{var_name}') + # Limit total corrections + max_corrections = int(self.clustering_parameters.flexibility_percent / 100 * n_equations) + self.add_constraints( + k_up.sum('eq_idx') + k_down.sum('eq_idx') <= max_corrections, + short_name=f'limit_k_{suffix}|{var_name}', + ) + + return lhs + + def _add_flexibility_penalty(self): + """Add penalty cost for flexibility correction variables.""" + penalty = self.clustering_parameters.flexibility_penalty + if self.clustering_parameters.flexibility_percent == 0 or penalty == 0: + return + + from .effects import PENALTY_EFFECT_LABEL + + for var in self.variables_direct.values(): + sum_dim = 'eq_idx' if 'eq_idx' in var.dims else 'time' + self._model.effects.add_share_to_effects( + name='Clustering', + expressions={PENALTY_EFFECT_LABEL: (var * penalty).sum(sum_dim)}, + target='periodic', + ) From 2f38e7c89d2cdc7a25e9b532f67e78eccfa8e182 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 15 Dec 2025 16:03:25 +0100 Subject: [PATCH 026/191] Add IO for clustering --- flixopt/clustering.py | 91 ++++++++++++++++++++++++++++++++++++--- flixopt/flow_system.py | 98 +++++++++++++++++++++++++++++++++++++++--- 2 files changed, 177 insertions(+), 12 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 685fea1ca..ffcc69fca 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -25,7 +25,9 @@ from .plot_result import PlotResult from .structure import ( FlowSystemModel, + Interface, Submodel, + register_class_for_io, ) if TYPE_CHECKING: @@ -353,7 +355,8 @@ def _parse_cluster_duration(duration: str | float) -> float: return td.total_seconds() / 3600 -class ClusteringParameters: +@register_class_for_io +class ClusteringParameters(Interface): """Parameters for time series clustering. This class configures how time series data is clustered into representative @@ -424,6 +427,7 @@ def __init__( time_series_for_low_peaks: list[TimeSeriesData] | None = None, ): self.n_clusters = n_clusters + self.cluster_duration = cluster_duration # Store original for serialization self.cluster_duration_hours = _parse_cluster_duration(cluster_duration) self.n_segments = n_segments self.aggregate_data = aggregate_data @@ -454,6 +458,71 @@ def labels_for_low_peaks(self) -> list[str]: return [ts.name for ts in self.time_series_for_low_peaks] +@register_class_for_io +class ClusteringIndices(Interface): + """Stores computed clustering equation indices for serialization. + + This class stores the precomputed indices from tsam clustering, allowing + clustering constraints to be recreated without re-running tsam. + + Each index pair `(i, j)` in `cluster_equations` or `segment_equations` means: + "equate var[i] == var[j]" - i.e., the variable values at timesteps i and j must be equal. + + Args: + cluster_equations: List of (i, j) pairs for inter-cluster equality constraints. + segment_equations: List of (i, j) pairs for intra-segment equality constraints. + period: Period label this clustering applies to (None for single-period). + scenario: Scenario label this clustering applies to (None for single-scenario). + """ + + def __init__( + self, + cluster_equations: list[list[int]] | None = None, + segment_equations: list[list[int]] | None = None, + period: str | int | None = None, + scenario: str | None = None, + ): + self.cluster_equations = cluster_equations or [] + self.segment_equations = segment_equations or [] + self.period = period + self.scenario = scenario + + @classmethod + def from_clustering(cls, clustering: Clustering, period=None, scenario=None) -> ClusteringIndices: + """Create from a Clustering object by extracting equation indices.""" + cluster_idx = clustering.get_equation_indices(skip_first_index_of_period=True) + segment_idx = clustering.get_segment_equation_indices() + + # Convert parallel arrays to list of pairs: [(i, j), (i, j), ...] + cluster_equations = ( + list(zip(cluster_idx[0].tolist(), cluster_idx[1].tolist(), strict=False)) if len(cluster_idx[0]) > 0 else [] + ) + segment_equations = ( + list(zip(segment_idx[0].tolist(), segment_idx[1].tolist(), strict=False)) if len(segment_idx[0]) > 0 else [] + ) + + return cls( + cluster_equations=cluster_equations, + segment_equations=segment_equations, + period=period, + scenario=scenario, + ) + + def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: + """Get cluster equation indices as parallel numpy arrays.""" + if not self.cluster_equations: + return np.array([]), np.array([]) + idx_i, idx_j = zip(*self.cluster_equations, strict=False) + return np.array(idx_i), np.array(idx_j) + + def get_segment_indices(self) -> tuple[np.ndarray, np.ndarray]: + """Get segment equation indices as parallel numpy arrays.""" + if not self.segment_equations: + return np.array([]), np.array([]) + idx_i, idx_j = zip(*self.segment_equations, strict=False) + return np.array(idx_i), np.array(idx_j) + + class ClusteringModel(Submodel): """Model that adds clustering constraints to equate variables across clustered time segments. @@ -531,13 +600,23 @@ def do_modeling(self): binary_vars[piece.inside_piece.name] = piece.inside_piece # Create constraints for each clustering (period/scenario combination) - for (period, scenario), clustering in self.clustering_data_dict.items(): + for (period, scenario), clustering_or_indices in self.clustering_data_dict.items(): suffix = self._make_suffix(period, scenario) - for constraint_type, indices in [ - ('cluster', clustering.get_equation_indices(skip_first_index_of_period=True)), - ('segment', clustering.get_segment_equation_indices()), - ]: + # Support both Clustering objects (fresh) and ClusteringIndices (restored from file) + if isinstance(clustering_or_indices, ClusteringIndices): + indices_pairs = [ + ('cluster', clustering_or_indices.get_cluster_indices()), + ('segment', clustering_or_indices.get_segment_indices()), + ] + else: + # Original Clustering object + indices_pairs = [ + ('cluster', clustering_or_indices.get_equation_indices(skip_first_index_of_period=True)), + ('segment', clustering_or_indices.get_segment_equation_indices()), + ] + + for constraint_type, indices in indices_pairs: if len(indices[0]) == 0: continue diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 8f2dba51b..c4798126d 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -627,6 +627,42 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: carriers_structure[name] = carrier_ref ds.attrs['carriers'] = json.dumps(carriers_structure) + # Include clustering info if present + if self._clustering_info is not None: + from .clustering import ClusteringIndices, ClusteringParameters + + clustering_data = {} + + # Serialize parameters + params = self._clustering_info.get('parameters') + if isinstance(params, ClusteringParameters): + params_ref, _ = params._create_reference_structure() + clustering_data['parameters'] = params_ref + + # Serialize equation indices from Clustering objects + clustering_dict = self._clustering_info.get('clustering', {}) + indices_list = [] + if isinstance(clustering_dict, dict): + # Multi-dimensional: {(period, scenario): Clustering} + for (period, scenario), clustering in clustering_dict.items(): + indices = ClusteringIndices.from_clustering(clustering, period, scenario) + indices_ref, _ = indices._create_reference_structure() + indices_list.append(indices_ref) + else: + # Single Clustering object + indices = ClusteringIndices.from_clustering(clustering_dict) + indices_ref, _ = indices._create_reference_structure() + indices_list.append(indices_ref) + + clustering_data['indices'] = indices_list + + # Store component labels to clusterize (not the component objects) + components = self._clustering_info.get('components_to_clusterize') + if components: + clustering_data['component_labels'] = [c.label for c in components] + + ds.attrs['clustering_info'] = json.dumps(clustering_data) + # Add version info ds.attrs['flixopt_version'] = __version__ @@ -721,6 +757,45 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: carrier = cls._resolve_reference_structure(carrier_data, {}) flow_system._carriers.add(carrier) + # Restore clustering info if present + if 'clustering_info' in reference_structure: + from .clustering import ClusteringIndices + + clustering_data = json.loads(reference_structure['clustering_info']) + + # Restore parameters + params = None + if 'parameters' in clustering_data: + params = cls._resolve_reference_structure(clustering_data['parameters'], {}) + + # Restore indices + indices_dict = {} + for indices_ref in clustering_data.get('indices', []): + indices = cls._resolve_reference_structure(indices_ref, {}) + if isinstance(indices, ClusteringIndices): + key = (indices.period, indices.scenario) + indices_dict[key] = indices + + # Restore component references + component_labels = clustering_data.get('component_labels', []) + components_to_clusterize = None + if component_labels: + components_to_clusterize = [ + flow_system.components[label] for label in component_labels if label in flow_system.components + ] + + flow_system._clustering_info = { + 'parameters': params, + 'clustering_indices': indices_dict, # ClusteringIndices instead of Clustering + 'components_to_clusterize': components_to_clusterize, + 'restored_from_file': True, # Flag to indicate this was loaded, not computed + } + logger.info( + f'Restored clustering info: n_clusters={params.n_clusters}, ' + f'duration={params.cluster_duration}, n_segments={params.n_segments}. ' + f'Clustering constraints will be recreated from stored indices.' + ) + # Reconnect network to populate bus inputs/outputs (not stored in NetCDF). flow_system.connect_and_transform() @@ -1283,17 +1358,28 @@ def _add_clustering_constraints(self) -> None: from .clustering import ClusteringModel info = self._clustering_info or {} - required_keys = {'parameters', 'clustering', 'components_to_clusterize'} - missing_keys = required_keys - set(info) - if missing_keys: - raise KeyError(f'_clustering_info missing required keys: {sorted(missing_keys)}') + + # Check for required keys - support both fresh clustering and restored from file + if 'clustering' in info: + # Fresh clustering with Clustering objects + clustering_data = info['clustering'] + elif 'clustering_indices' in info: + # Restored from file with ClusteringIndices objects + clustering_data = info['clustering_indices'] + else: + raise KeyError( + '_clustering_info missing required key: either "clustering" (fresh) or "clustering_indices" (restored)' + ) + + if 'parameters' not in info: + raise KeyError('_clustering_info missing required key: "parameters"') clustering_model = ClusteringModel( model=self.model, clustering_parameters=info['parameters'], flow_system=self, - clustering_data=info['clustering'], - components_to_clusterize=info['components_to_clusterize'], + clustering_data=clustering_data, + components_to_clusterize=info.get('components_to_clusterize'), ) clustering_model.do_modeling() From 96e0826e0671aa186ff4ea75ebd82070f04933ae Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 15 Dec 2025 16:09:56 +0100 Subject: [PATCH 027/191] Add IO for clustering --- flixopt/clustering.py | 127 +++++++++++++++++++++++++++++++----------- 1 file changed, 96 insertions(+), 31 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index ffcc69fca..b3dee19ac 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -284,6 +284,65 @@ def get_equation_indices(self, skip_first_index_of_period: bool = True) -> tuple # Convert lists to numpy arrays return np.array(idx_var1), np.array(idx_var2) + def get_equation_groups(self, skip_first_index_of_period: bool = True) -> list[list[int]]: + """Get groups of timestep indices that should be equal (inter-cluster). + + Each group contains timesteps at the same position within periods of the same cluster. + E.g., if cluster 0 has periods [0-95] and [192-287], position 5 gives group [5, 197]. + + Args: + skip_first_index_of_period: Skip first timestep of each period (for storage continuity). + + Returns: + List of groups, where each group is a list of timestep indices to equate. + """ + groups = [] + + for index_vectors in self.get_cluster_indices().values(): + if len(index_vectors) <= 1: + continue + + # Determine the length and starting offset + start_offset = 1 if skip_first_index_of_period else 0 + min_len = min(len(v) for v in index_vectors) - start_offset + + # Create a group for each position across all periods in this cluster + for pos in range(min_len): + group = [int(v[pos + start_offset]) for v in index_vectors] + if len(group) > 1: + groups.append(group) + + return groups + + def get_segment_equation_groups(self) -> list[list[int]]: + """Get groups of timestep indices that should be equal (intra-segment). + + Each group contains all timesteps within the same segment. + + Returns: + List of groups, where each group is a list of timestep indices to equate. + """ + if self.n_segments is None: + return [] + + groups = [] + period_length = int(self.hours_per_period / self.hours_per_time_step) + segment_duration_dict = self.tsam.segmentDurationDict['Segment Duration'] + + for period_idx, cluster_id in enumerate(self.tsam.clusterOrder): + period_offset = period_idx * period_length + start_step = 0 + + for seg_idx in range(self.n_segments): + duration = segment_duration_dict[(cluster_id, seg_idx)] + if duration > 1: + # Group all timesteps in this segment + group = [period_offset + start_step + step for step in range(duration)] + groups.append(group) + start_step += duration + + return groups + def get_segment_equation_indices(self) -> tuple[np.ndarray, np.ndarray]: """ Generates pairs of indices for intra-segment equalization. @@ -465,62 +524,68 @@ class ClusteringIndices(Interface): This class stores the precomputed indices from tsam clustering, allowing clustering constraints to be recreated without re-running tsam. - Each index pair `(i, j)` in `cluster_equations` or `segment_equations` means: - "equate var[i] == var[j]" - i.e., the variable values at timesteps i and j must be equal. + Each group in `cluster_groups` or `segment_groups` contains timestep indices + that should all be equal: e.g., `[0, 96, 192]` means `var[0] == var[96] == var[192]`. Args: - cluster_equations: List of (i, j) pairs for inter-cluster equality constraints. - segment_equations: List of (i, j) pairs for intra-segment equality constraints. + cluster_groups: List of groups for inter-cluster equality constraints. + Each group is a list of timestep indices that should be equal. + segment_groups: List of groups for intra-segment equality constraints. period: Period label this clustering applies to (None for single-period). scenario: Scenario label this clustering applies to (None for single-scenario). """ def __init__( self, - cluster_equations: list[list[int]] | None = None, - segment_equations: list[list[int]] | None = None, + cluster_groups: list[list[int]] | None = None, + segment_groups: list[list[int]] | None = None, period: str | int | None = None, scenario: str | None = None, ): - self.cluster_equations = cluster_equations or [] - self.segment_equations = segment_equations or [] + self.cluster_groups = cluster_groups or [] + self.segment_groups = segment_groups or [] self.period = period self.scenario = scenario @classmethod def from_clustering(cls, clustering: Clustering, period=None, scenario=None) -> ClusteringIndices: """Create from a Clustering object by extracting equation indices.""" - cluster_idx = clustering.get_equation_indices(skip_first_index_of_period=True) - segment_idx = clustering.get_segment_equation_indices() - - # Convert parallel arrays to list of pairs: [(i, j), (i, j), ...] - cluster_equations = ( - list(zip(cluster_idx[0].tolist(), cluster_idx[1].tolist(), strict=False)) if len(cluster_idx[0]) > 0 else [] - ) - segment_equations = ( - list(zip(segment_idx[0].tolist(), segment_idx[1].tolist(), strict=False)) if len(segment_idx[0]) > 0 else [] - ) - return cls( - cluster_equations=cluster_equations, - segment_equations=segment_equations, + cluster_groups=clustering.get_equation_groups(skip_first_index_of_period=True), + segment_groups=clustering.get_segment_equation_groups(), period=period, scenario=scenario, ) def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get cluster equation indices as parallel numpy arrays.""" - if not self.cluster_equations: - return np.array([]), np.array([]) - idx_i, idx_j = zip(*self.cluster_equations, strict=False) - return np.array(idx_i), np.array(idx_j) + """Get cluster equation indices as parallel numpy arrays for constraint creation.""" + return self._groups_to_indices(self.cluster_groups) def get_segment_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get segment equation indices as parallel numpy arrays.""" - if not self.segment_equations: - return np.array([]), np.array([]) - idx_i, idx_j = zip(*self.segment_equations, strict=False) - return np.array(idx_i), np.array(idx_j) + """Get segment equation indices as parallel numpy arrays for constraint creation.""" + return self._groups_to_indices(self.segment_groups) + + @staticmethod + def _groups_to_indices(groups: list[list[int]]) -> tuple[np.ndarray, np.ndarray]: + """Convert groups to parallel index arrays for constraint creation. + + Each group [a, b, c, d] generates pairs: (a,b), (a,c), (a,d) + i.e., equate all elements to the first element of the group. + """ + if not groups: + return np.array([], dtype=int), np.array([], dtype=int) + + idx_a = [] + idx_b = [] + for group in groups: + if len(group) < 2: + continue + first = group[0] + for other in group[1:]: + idx_a.append(first) + idx_b.append(other) + + return np.array(idx_a, dtype=int), np.array(idx_b, dtype=int) class ClusteringModel(Submodel): From e8ab4b802ce05393005fddc6456175fcaa840e30 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 15 Dec 2025 16:19:33 +0100 Subject: [PATCH 028/191] Add IO for clustering --- flixopt/clustering.py | 79 +++++++++++++++--------------------------- flixopt/flow_system.py | 79 ++++++++++++++++++------------------------ 2 files changed, 61 insertions(+), 97 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index b3dee19ac..6ee24ff34 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -28,7 +28,7 @@ Interface, Submodel, register_class_for_io, -) +) # Interface and register_class_for_io used by ClusteringParameters if TYPE_CHECKING: import linopy @@ -517,75 +517,50 @@ def labels_for_low_peaks(self) -> list[str]: return [ts.name for ts in self.time_series_for_low_peaks] -@register_class_for_io -class ClusteringIndices(Interface): +class ClusteringIndices: """Stores computed clustering equation indices for serialization. - This class stores the precomputed indices from tsam clustering, allowing - clustering constraints to be recreated without re-running tsam. - - Each group in `cluster_groups` or `segment_groups` contains timestep indices - that should all be equal: e.g., `[0, 96, 192]` means `var[0] == var[96] == var[192]`. + Stores pairs of timestep indices (i, j) where var[i] must equal var[j]. + Uses numpy arrays for efficient storage in NetCDF. Args: - cluster_groups: List of groups for inter-cluster equality constraints. - Each group is a list of timestep indices that should be equal. - segment_groups: List of groups for intra-segment equality constraints. - period: Period label this clustering applies to (None for single-period). - scenario: Scenario label this clustering applies to (None for single-scenario). + cluster_idx_i: First indices for inter-cluster equality constraints. + cluster_idx_j: Second indices for inter-cluster equality constraints. + segment_idx_i: First indices for intra-segment equality constraints. + segment_idx_j: Second indices for intra-segment equality constraints. """ def __init__( self, - cluster_groups: list[list[int]] | None = None, - segment_groups: list[list[int]] | None = None, - period: str | int | None = None, - scenario: str | None = None, + cluster_idx_i: np.ndarray | None = None, + cluster_idx_j: np.ndarray | None = None, + segment_idx_i: np.ndarray | None = None, + segment_idx_j: np.ndarray | None = None, ): - self.cluster_groups = cluster_groups or [] - self.segment_groups = segment_groups or [] - self.period = period - self.scenario = scenario + self.cluster_idx_i = cluster_idx_i if cluster_idx_i is not None else np.array([], dtype=np.int32) + self.cluster_idx_j = cluster_idx_j if cluster_idx_j is not None else np.array([], dtype=np.int32) + self.segment_idx_i = segment_idx_i if segment_idx_i is not None else np.array([], dtype=np.int32) + self.segment_idx_j = segment_idx_j if segment_idx_j is not None else np.array([], dtype=np.int32) @classmethod - def from_clustering(cls, clustering: Clustering, period=None, scenario=None) -> ClusteringIndices: + def from_clustering(cls, clustering: Clustering) -> ClusteringIndices: """Create from a Clustering object by extracting equation indices.""" + cluster_idx = clustering.get_equation_indices(skip_first_index_of_period=True) + segment_idx = clustering.get_segment_equation_indices() return cls( - cluster_groups=clustering.get_equation_groups(skip_first_index_of_period=True), - segment_groups=clustering.get_segment_equation_groups(), - period=period, - scenario=scenario, + cluster_idx_i=cluster_idx[0].astype(np.int32), + cluster_idx_j=cluster_idx[1].astype(np.int32), + segment_idx_i=segment_idx[0].astype(np.int32), + segment_idx_j=segment_idx[1].astype(np.int32), ) def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get cluster equation indices as parallel numpy arrays for constraint creation.""" - return self._groups_to_indices(self.cluster_groups) + """Get cluster equation indices as parallel numpy arrays.""" + return self.cluster_idx_i, self.cluster_idx_j def get_segment_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get segment equation indices as parallel numpy arrays for constraint creation.""" - return self._groups_to_indices(self.segment_groups) - - @staticmethod - def _groups_to_indices(groups: list[list[int]]) -> tuple[np.ndarray, np.ndarray]: - """Convert groups to parallel index arrays for constraint creation. - - Each group [a, b, c, d] generates pairs: (a,b), (a,c), (a,d) - i.e., equate all elements to the first element of the group. - """ - if not groups: - return np.array([], dtype=int), np.array([], dtype=int) - - idx_a = [] - idx_b = [] - for group in groups: - if len(group) < 2: - continue - first = group[0] - for other in group[1:]: - idx_a.append(first) - idx_b.append(other) - - return np.array(idx_a, dtype=int), np.array(idx_b, dtype=int) + """Get segment equation indices as parallel numpy arrays.""" + return self.segment_idx_i, self.segment_idx_j class ClusteringModel(Submodel): diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index c4798126d..f3e18d44a 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -631,37 +631,30 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: if self._clustering_info is not None: from .clustering import ClusteringIndices, ClusteringParameters - clustering_data = {} - # Serialize parameters params = self._clustering_info.get('parameters') if isinstance(params, ClusteringParameters): params_ref, _ = params._create_reference_structure() - clustering_data['parameters'] = params_ref - - # Serialize equation indices from Clustering objects - clustering_dict = self._clustering_info.get('clustering', {}) - indices_list = [] - if isinstance(clustering_dict, dict): - # Multi-dimensional: {(period, scenario): Clustering} - for (period, scenario), clustering in clustering_dict.items(): - indices = ClusteringIndices.from_clustering(clustering, period, scenario) - indices_ref, _ = indices._create_reference_structure() - indices_list.append(indices_ref) - else: - # Single Clustering object - indices = ClusteringIndices.from_clustering(clustering_dict) - indices_ref, _ = indices._create_reference_structure() - indices_list.append(indices_ref) - - clustering_data['indices'] = indices_list + ds.attrs['_clustering_params'] = json.dumps(params_ref) - # Store component labels to clusterize (not the component objects) + # Store component labels to clusterize components = self._clustering_info.get('components_to_clusterize') if components: - clustering_data['component_labels'] = [c.label for c in components] - - ds.attrs['clustering_info'] = json.dumps(clustering_data) + ds.attrs['_clustering_components'] = json.dumps([c.label for c in components]) + + # Store equation indices as DataArrays (efficient binary storage) + clustering_obj = self._clustering_info.get('clustering') + if clustering_obj is not None: + if isinstance(clustering_obj, dict): + # Multi-dimensional: {(period, scenario): Clustering} + # For now, only support single clustering (most common case) + clustering_obj = next(iter(clustering_obj.values())) + + indices = ClusteringIndices.from_clustering(clustering_obj) + ds['_clustering_cluster_idx_i'] = xr.DataArray(indices.cluster_idx_i, dims=['_cluster_eq']) + ds['_clustering_cluster_idx_j'] = xr.DataArray(indices.cluster_idx_j, dims=['_cluster_eq']) + ds['_clustering_segment_idx_i'] = xr.DataArray(indices.segment_idx_i, dims=['_segment_eq']) + ds['_clustering_segment_idx_j'] = xr.DataArray(indices.segment_idx_j, dims=['_segment_eq']) # Add version info ds.attrs['flixopt_version'] = __version__ @@ -758,42 +751,38 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: flow_system._carriers.add(carrier) # Restore clustering info if present - if 'clustering_info' in reference_structure: + if '_clustering_params' in reference_structure: from .clustering import ClusteringIndices - clustering_data = json.loads(reference_structure['clustering_info']) - # Restore parameters - params = None - if 'parameters' in clustering_data: - params = cls._resolve_reference_structure(clustering_data['parameters'], {}) - - # Restore indices - indices_dict = {} - for indices_ref in clustering_data.get('indices', []): - indices = cls._resolve_reference_structure(indices_ref, {}) - if isinstance(indices, ClusteringIndices): - key = (indices.period, indices.scenario) - indices_dict[key] = indices + params = cls._resolve_reference_structure(json.loads(reference_structure['_clustering_params']), {}) + + # Restore indices from DataArrays + indices = ClusteringIndices( + cluster_idx_i=ds['_clustering_cluster_idx_i'].values if '_clustering_cluster_idx_i' in ds else None, + cluster_idx_j=ds['_clustering_cluster_idx_j'].values if '_clustering_cluster_idx_j' in ds else None, + segment_idx_i=ds['_clustering_segment_idx_i'].values if '_clustering_segment_idx_i' in ds else None, + segment_idx_j=ds['_clustering_segment_idx_j'].values if '_clustering_segment_idx_j' in ds else None, + ) # Restore component references - component_labels = clustering_data.get('component_labels', []) components_to_clusterize = None - if component_labels: + if '_clustering_components' in reference_structure: + component_labels = json.loads(reference_structure['_clustering_components']) components_to_clusterize = [ flow_system.components[label] for label in component_labels if label in flow_system.components ] flow_system._clustering_info = { 'parameters': params, - 'clustering_indices': indices_dict, # ClusteringIndices instead of Clustering + 'clustering_indices': {(None, None): indices}, # ClusteringIndices keyed by (period, scenario) 'components_to_clusterize': components_to_clusterize, - 'restored_from_file': True, # Flag to indicate this was loaded, not computed + 'restored_from_file': True, } logger.info( - f'Restored clustering info: n_clusters={params.n_clusters}, ' - f'duration={params.cluster_duration}, n_segments={params.n_segments}. ' - f'Clustering constraints will be recreated from stored indices.' + f'Restored clustering: n_clusters={params.n_clusters}, duration={params.cluster_duration}, ' + f'n_segments={params.n_segments}, {len(indices.cluster_idx_i)} cluster + ' + f'{len(indices.segment_idx_i)} segment equations.' ) # Reconnect network to populate bus inputs/outputs (not stored in NetCDF). From c74c5e7058fd29840b779daeb3312b877149ba70 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 16 Dec 2025 08:56:55 +0100 Subject: [PATCH 029/191] Improve clustering --- flixopt/__init__.py | 3 +- flixopt/clustering.py | 336 +++++++++++++++++++++++----------- flixopt/flow_system.py | 67 ++++--- flixopt/transform_accessor.py | 92 +++++++++- 4 files changed, 366 insertions(+), 132 deletions(-) diff --git a/flixopt/__init__.py b/flixopt/__init__.py index 1e3fee5bd..e5f3fd78e 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -15,7 +15,7 @@ # Import commonly used classes and functions from . import linear_converters, plotting, results, solvers from .carrier import Carrier, CarrierContainer -from .clustering import ClusteringParameters +from .clustering import ClusteringIndices, ClusteringParameters from .components import ( LinearConverter, Sink, @@ -58,6 +58,7 @@ 'Piecewise', 'PiecewiseConversion', 'PiecewiseEffects', + 'ClusteringIndices', 'ClusteringParameters', 'PlotResult', 'plotting', diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 6ee24ff34..79d9f992c 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -518,49 +518,225 @@ def labels_for_low_peaks(self) -> list[str]: class ClusteringIndices: - """Stores computed clustering equation indices for serialization. + """Stores clustering group assignments for timesteps. - Stores pairs of timestep indices (i, j) where var[i] must equal var[j]. - Uses numpy arrays for efficient storage in NetCDF. + Each timestep is assigned to a cluster group and optionally a segment group. + Timesteps in the same group should have equal variable values. + + The group assignments are stored as integer arrays where: + - A value >= 0 indicates the group ID + - A value of -1 indicates "not in any group" (no equalization) Args: - cluster_idx_i: First indices for inter-cluster equality constraints. - cluster_idx_j: Second indices for inter-cluster equality constraints. - segment_idx_i: First indices for intra-segment equality constraints. - segment_idx_j: Second indices for intra-segment equality constraints. + cluster_groups: Array of shape (n_timesteps,) mapping each timestep to + a cluster group ID. Timesteps with the same group ID (except -1) + will be equalized. + segment_groups: Array of shape (n_timesteps,) mapping each timestep to + a segment group ID. Timesteps with the same group ID (except -1) + will be equalized. """ def __init__( self, - cluster_idx_i: np.ndarray | None = None, - cluster_idx_j: np.ndarray | None = None, - segment_idx_i: np.ndarray | None = None, - segment_idx_j: np.ndarray | None = None, + cluster_groups: np.ndarray | None = None, + segment_groups: np.ndarray | None = None, ): - self.cluster_idx_i = cluster_idx_i if cluster_idx_i is not None else np.array([], dtype=np.int32) - self.cluster_idx_j = cluster_idx_j if cluster_idx_j is not None else np.array([], dtype=np.int32) - self.segment_idx_i = segment_idx_i if segment_idx_i is not None else np.array([], dtype=np.int32) - self.segment_idx_j = segment_idx_j if segment_idx_j is not None else np.array([], dtype=np.int32) + self.cluster_groups = cluster_groups if cluster_groups is not None else np.array([], dtype=np.int32) + self.segment_groups = segment_groups if segment_groups is not None else np.array([], dtype=np.int32) @classmethod def from_clustering(cls, clustering: Clustering) -> ClusteringIndices: - """Create from a Clustering object by extracting equation indices.""" - cluster_idx = clustering.get_equation_indices(skip_first_index_of_period=True) - segment_idx = clustering.get_segment_equation_indices() - return cls( - cluster_idx_i=cluster_idx[0].astype(np.int32), - cluster_idx_j=cluster_idx[1].astype(np.int32), - segment_idx_i=segment_idx[0].astype(np.int32), - segment_idx_j=segment_idx[1].astype(np.int32), - ) + """Create from a Clustering object by extracting group assignments.""" + n_timesteps = clustering.nr_of_time_steps + period_length = int(clustering.hours_per_period / clustering.hours_per_time_step) + + # Build cluster groups: cluster_id * period_length + position_in_period + # Skip first timestep of each period for storage continuity + cluster_groups = np.full(n_timesteps, -1, dtype=np.int32) + for period_idx, cluster_id in enumerate(clustering.tsam.clusterOrder): + start_idx = period_idx * period_length + # Skip first timestep (position 0) for storage continuity + for pos in range(1, period_length): + ts_idx = start_idx + pos + if ts_idx < n_timesteps: + cluster_groups[ts_idx] = cluster_id * period_length + pos + + # Build segment groups + segment_groups = np.full(n_timesteps, -1, dtype=np.int32) + if clustering.n_segments is not None: + segment_counter = 0 + segment_duration_dict = clustering.tsam.segmentDurationDict['Segment Duration'] + + for period_idx, cluster_id in enumerate(clustering.tsam.clusterOrder): + period_offset = period_idx * period_length + start_step = 0 + + for seg_idx in range(clustering.n_segments): + duration = segment_duration_dict[(cluster_id, seg_idx)] + # All timesteps in this segment get the same group ID + for step in range(duration): + ts_idx = period_offset + start_step + step + if ts_idx < n_timesteps: + segment_groups[ts_idx] = segment_counter + segment_counter += 1 + start_step += duration + + return cls(cluster_groups=cluster_groups, segment_groups=segment_groups) + + @classmethod + def from_tsam( + cls, + aggregation: tsam.TimeSeriesAggregation, + hours_per_timestep: float, + hours_per_period: float, + skip_first_index_of_period: bool = True, + ) -> ClusteringIndices: + """Create from a tsam TimeSeriesAggregation object directly. + + This allows users to run tsam on a subset of their time series data + (e.g., only key drivers like prices and demands) and then apply the + resulting clustering to a full FlowSystem. + + Args: + aggregation: A tsam TimeSeriesAggregation object after calling + createTypicalPeriods(). + hours_per_timestep: Duration of each timestep in hours (must match + the resolution used when creating the tsam aggregation). + hours_per_period: Duration of each period in hours (must match + hoursPerPeriod used when creating the tsam aggregation). + skip_first_index_of_period: Skip first timestep of each period when + creating inter-cluster constraints. Default True (recommended + for correct storage state transitions). + + Returns: + ClusteringIndices with group assignments for constraint generation. + + Examples: + >>> import tsam.timeseriesaggregation as tsam + >>> import pandas as pd + >>> + >>> # Create subset DataFrame with key time series + >>> subset_df = pd.DataFrame( + ... { + ... 'electricity_price': prices, + ... 'heat_demand': demand, + ... }, + ... index=timesteps, + ... ) + >>> + >>> # Run tsam clustering + >>> aggregation = tsam.TimeSeriesAggregation( + ... subset_df, + ... noTypicalPeriods=8, + ... hoursPerPeriod=24, + ... resolution=1, # 1-hour timesteps + ... ) + >>> aggregation.createTypicalPeriods() + >>> + >>> # Convert to ClusteringIndices + >>> indices = ClusteringIndices.from_tsam( + ... aggregation, + ... hours_per_timestep=1, + ... hours_per_period=24, + ... ) + >>> + >>> # Apply to FlowSystem + >>> clustered_fs = flow_system.transform.add_clustering(indices) + + With inner-period segmentation: + + >>> aggregation = tsam.TimeSeriesAggregation( + ... subset_df, + ... noTypicalPeriods=8, + ... hoursPerPeriod=24, + ... resolution=1, + ... segmentation=True, + ... noSegments=4, + ... ) + >>> aggregation.createTypicalPeriods() + >>> indices = ClusteringIndices.from_tsam(aggregation, 1, 24) + """ + if not TSAM_AVAILABLE: + raise ImportError("The 'tsam' package is required. Install it with 'pip install tsam'.") + + period_length = int(hours_per_period / hours_per_timestep) + n_timesteps = len(aggregation.timeSeries) + + # Build cluster groups: cluster_id * period_length + position_in_period + cluster_groups = np.full(n_timesteps, -1, dtype=np.int32) + start_pos = 1 if skip_first_index_of_period else 0 + + for period_idx, cluster_id in enumerate(aggregation.clusterOrder): + start_idx = period_idx * period_length + for pos in range(start_pos, period_length): + ts_idx = start_idx + pos + if ts_idx < n_timesteps: + cluster_groups[ts_idx] = cluster_id * period_length + pos + + # Build segment groups + segment_groups = np.full(n_timesteps, -1, dtype=np.int32) + if aggregation.segmentation and hasattr(aggregation, 'segmentDurationDict'): + segment_counter = 0 + n_segments = aggregation.noSegments + segment_duration_dict = aggregation.segmentDurationDict['Segment Duration'] + + for period_idx, cluster_id in enumerate(aggregation.clusterOrder): + period_offset = period_idx * period_length + start_step = 0 + + for seg_idx in range(n_segments): + duration = segment_duration_dict[(cluster_id, seg_idx)] + # All timesteps in this segment get the same group ID + for step in range(duration): + ts_idx = period_offset + start_step + step + if ts_idx < n_timesteps: + segment_groups[ts_idx] = segment_counter + segment_counter += 1 + start_step += duration + + return cls(cluster_groups=cluster_groups, segment_groups=segment_groups) def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get cluster equation indices as parallel numpy arrays.""" - return self.cluster_idx_i, self.cluster_idx_j + """Get cluster equation indices as parallel numpy arrays. + + Converts group assignments to pairs (i, j) where var[i] == var[j]. + Returns indices for all pairs within each group. + """ + return self._groups_to_pairs(self.cluster_groups) def get_segment_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get segment equation indices as parallel numpy arrays.""" - return self.segment_idx_i, self.segment_idx_j + """Get segment equation indices as parallel numpy arrays. + + Converts group assignments to pairs (i, j) where var[i] == var[j]. + Returns indices for all pairs within each group. + """ + return self._groups_to_pairs(self.segment_groups) + + @staticmethod + def _groups_to_pairs(groups: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """Convert group assignments to equation pairs. + + For each group with members [a, b, c, ...], generates pairs: + (a, b), (a, c), ... to equate all members to the first. + """ + if len(groups) == 0: + return np.array([], dtype=np.int32), np.array([], dtype=np.int32) + + # Find unique groups (excluding -1) + unique_groups = np.unique(groups) + unique_groups = unique_groups[unique_groups >= 0] + + idx_i, idx_j = [], [] + for group_id in unique_groups: + members = np.where(groups == group_id)[0] + if len(members) > 1: + # Equate all members to the first + first = members[0] + for other in members[1:]: + idx_i.append(first) + idx_j.append(other) + + return np.array(idx_i, dtype=np.int32), np.array(idx_j, dtype=np.int32) class ClusteringModel(Submodel): @@ -575,31 +751,22 @@ def __init__( model: FlowSystemModel, clustering_parameters: ClusteringParameters, flow_system: FlowSystem, - clustering_data: Clustering | dict[tuple, Clustering], - components_to_clusterize: list[Component] | None, + clustering_indices: ClusteringIndices, + components_to_clusterize: list[Component] | None = None, ): """ Args: model: The FlowSystemModel to add constraints to. clustering_parameters: Parameters controlling clustering behavior. flow_system: The FlowSystem being optimized. - clustering_data: Either a single Clustering object (simple case) or a dict - mapping (period_label, scenario_label) tuples to Clustering objects - (multi-dimensional case). + clustering_indices: Precomputed equation indices (from Clustering or user-provided). components_to_clusterize: Components to apply clustering to. If None, all components. """ super().__init__(model, label_of_element='Clustering', label_of_model='Clustering') self.flow_system = flow_system self.clustering_parameters = clustering_parameters self.components_to_clusterize = components_to_clusterize - - # Handle both single and multi-dimensional clustering - if isinstance(clustering_data, dict): - self.clustering_data_dict = clustering_data - self.is_multi_dimensional = True - else: - self.clustering_data_dict = {(None, None): clustering_data} - self.is_multi_dimensional = False + self.clustering_indices = clustering_indices def do_modeling(self): """Create equality constraints for clustered time indices. @@ -639,94 +806,57 @@ def do_modeling(self): if piece.inside_piece is not None: binary_vars[piece.inside_piece.name] = piece.inside_piece - # Create constraints for each clustering (period/scenario combination) - for (period, scenario), clustering_or_indices in self.clustering_data_dict.items(): - suffix = self._make_suffix(period, scenario) - - # Support both Clustering objects (fresh) and ClusteringIndices (restored from file) - if isinstance(clustering_or_indices, ClusteringIndices): - indices_pairs = [ - ('cluster', clustering_or_indices.get_cluster_indices()), - ('segment', clustering_or_indices.get_segment_indices()), - ] - else: - # Original Clustering object - indices_pairs = [ - ('cluster', clustering_or_indices.get_equation_indices(skip_first_index_of_period=True)), - ('segment', clustering_or_indices.get_segment_equation_indices()), - ] - - for constraint_type, indices in indices_pairs: - if len(indices[0]) == 0: - continue - - # Batch continuous variables into single constraint - if continuous_vars: - self._add_equality_constraint( - continuous_vars, indices, period, scenario, f'{suffix}_{constraint_type}' - ) - - # Individual constraints for binaries (needed for flexibility correction vars) - for var in binary_vars.values(): - self._add_equality_constraint( - {var.name: var}, - indices, - period, - scenario, - f'{suffix}_{constraint_type}|{var.name}', - allow_flexibility=True, - ) + # Create constraints from clustering indices + indices = self.clustering_indices + + for constraint_type, idx_pair in [ + ('cluster', indices.get_cluster_indices()), + ('segment', indices.get_segment_indices()), + ]: + if len(idx_pair[0]) == 0: + continue + + # Batch continuous variables into single constraint + if continuous_vars: + self._add_equality_constraint(continuous_vars, idx_pair, f'base_{constraint_type}') + + # Individual constraints for binaries (needed for flexibility correction vars) + for var in binary_vars.values(): + self._add_equality_constraint( + {var.name: var}, idx_pair, f'base_{constraint_type}|{var.name}', allow_flexibility=True + ) # Add penalty for flexibility deviations self._add_flexibility_penalty() - def _make_suffix(self, period, scenario) -> str: - """Create constraint name suffix from period/scenario labels.""" - parts = [] - if period is not None: - parts.append(f'p{period}') - if scenario is not None: - parts.append(f's{scenario}') - return '_'.join(parts) if parts else 'base' - def _add_equality_constraint( self, variables: dict[str, linopy.Variable], indices: tuple[np.ndarray, np.ndarray], - period, - scenario, suffix: str, allow_flexibility: bool = False, ) -> None: - """Add equality constraint: var[idx_a] == var[idx_b] for all index pairs. + """Add equality constraint: var[idx_i] == var[idx_j] for all index pairs. Args: variables: Variables to constrain (batched if multiple). - indices: Tuple of (idx_a, idx_b) arrays - timesteps to equate. - period: Period label for selecting variable slice (or None). - scenario: Scenario label for selecting variable slice (or None). + indices: Tuple of (idx_i, idx_j) arrays - timesteps to equate. suffix: Constraint name suffix. allow_flexibility: If True, add correction variables for binaries. """ import linopy - idx_a, idx_b = indices - n_equations = len(idx_a) + idx_i, idx_j = indices + n_equations = len(idx_i) # Build constraint expression for each variable expressions = [] for name, var in variables.items(): - # Select period/scenario slice if variable has those dimensions - if period is not None and 'period' in var.dims: - var = var.sel(period=period) - if scenario is not None and 'scenario' in var.dims: - var = var.sel(scenario=scenario) - if 'time' not in var.dims: continue - # Compute difference: var[idx_a] - var[idx_b] - diff = var.isel(time=idx_a) - var.isel(time=idx_b) + # Compute difference: var[idx_i] - var[idx_j] + diff = var.isel(time=idx_i) - var.isel(time=idx_j) # Replace time dim with integer eq_idx (avoids duplicate datetime coords) diff = diff.rename({'time': 'eq_idx'}).assign_coords(eq_idx=np.arange(n_equations)) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index f3e18d44a..427cc5a55 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -642,19 +642,27 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: if components: ds.attrs['_clustering_components'] = json.dumps([c.label for c in components]) - # Store equation indices as DataArrays (efficient binary storage) + # Store group assignments as DataArrays (efficient binary storage) + # Get or create ClusteringIndices clustering_obj = self._clustering_info.get('clustering') + indices_dict = self._clustering_info.get('clustering_indices') + if clustering_obj is not None: if isinstance(clustering_obj, dict): - # Multi-dimensional: {(period, scenario): Clustering} - # For now, only support single clustering (most common case) clustering_obj = next(iter(clustering_obj.values())) - indices = ClusteringIndices.from_clustering(clustering_obj) - ds['_clustering_cluster_idx_i'] = xr.DataArray(indices.cluster_idx_i, dims=['_cluster_eq']) - ds['_clustering_cluster_idx_j'] = xr.DataArray(indices.cluster_idx_j, dims=['_cluster_eq']) - ds['_clustering_segment_idx_i'] = xr.DataArray(indices.segment_idx_i, dims=['_segment_eq']) - ds['_clustering_segment_idx_j'] = xr.DataArray(indices.segment_idx_j, dims=['_segment_eq']) + elif indices_dict is not None: + indices = next(iter(indices_dict.values())) + else: + indices = None + + if indices is not None: + ds['_clustering_cluster_groups'] = xr.DataArray( + indices.cluster_groups, dims=['time'], coords={'time': self.timesteps} + ) + ds['_clustering_segment_groups'] = xr.DataArray( + indices.segment_groups, dims=['time'], coords={'time': self.timesteps} + ) # Add version info ds.attrs['flixopt_version'] = __version__ @@ -757,12 +765,10 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: # Restore parameters params = cls._resolve_reference_structure(json.loads(reference_structure['_clustering_params']), {}) - # Restore indices from DataArrays + # Restore group assignments from DataArrays indices = ClusteringIndices( - cluster_idx_i=ds['_clustering_cluster_idx_i'].values if '_clustering_cluster_idx_i' in ds else None, - cluster_idx_j=ds['_clustering_cluster_idx_j'].values if '_clustering_cluster_idx_j' in ds else None, - segment_idx_i=ds['_clustering_segment_idx_i'].values if '_clustering_segment_idx_i' in ds else None, - segment_idx_j=ds['_clustering_segment_idx_j'].values if '_clustering_segment_idx_j' in ds else None, + cluster_groups=ds['_clustering_cluster_groups'].values if '_clustering_cluster_groups' in ds else None, + segment_groups=ds['_clustering_segment_groups'].values if '_clustering_segment_groups' in ds else None, ) # Restore component references @@ -779,10 +785,12 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: 'components_to_clusterize': components_to_clusterize, 'restored_from_file': True, } + n_cluster_groups = len(np.unique(indices.cluster_groups[indices.cluster_groups >= 0])) + n_segment_groups = len(np.unique(indices.segment_groups[indices.segment_groups >= 0])) logger.info( f'Restored clustering: n_clusters={params.n_clusters}, duration={params.cluster_duration}, ' - f'n_segments={params.n_segments}, {len(indices.cluster_idx_i)} cluster + ' - f'{len(indices.segment_idx_i)} segment equations.' + f'n_segments={params.n_segments}, {n_cluster_groups} cluster groups + ' + f'{n_segment_groups} segment groups.' ) # Reconnect network to populate bus inputs/outputs (not stored in NetCDF). @@ -1344,30 +1352,35 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: def _add_clustering_constraints(self) -> None: """Add clustering constraints to the model.""" - from .clustering import ClusteringModel + from .clustering import ClusteringIndices, ClusteringModel info = self._clustering_info or {} - # Check for required keys - support both fresh clustering and restored from file - if 'clustering' in info: - # Fresh clustering with Clustering objects - clustering_data = info['clustering'] - elif 'clustering_indices' in info: - # Restored from file with ClusteringIndices objects - clustering_data = info['clustering_indices'] + if 'parameters' not in info: + raise KeyError('_clustering_info missing required key: "parameters"') + + # Get or create ClusteringIndices + if 'clustering_indices' in info: + # Restored from file - use directly + indices_dict = info['clustering_indices'] + # Get the single ClusteringIndices (for now, only support single clustering) + clustering_indices = next(iter(indices_dict.values())) + elif 'clustering' in info: + # Fresh clustering - convert Clustering to ClusteringIndices + clustering_obj = info['clustering'] + if isinstance(clustering_obj, dict): + clustering_obj = next(iter(clustering_obj.values())) + clustering_indices = ClusteringIndices.from_clustering(clustering_obj) else: raise KeyError( '_clustering_info missing required key: either "clustering" (fresh) or "clustering_indices" (restored)' ) - if 'parameters' not in info: - raise KeyError('_clustering_info missing required key: "parameters"') - clustering_model = ClusteringModel( model=self.model, clustering_parameters=info['parameters'], flow_system=self, - clustering_data=clustering_data, + clustering_indices=clustering_indices, components_to_clusterize=info.get('components_to_clusterize'), ) clustering_model.do_modeling() diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 6ab2bca9f..2b3329182 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -17,7 +17,7 @@ if TYPE_CHECKING: import numpy as np - from .clustering import ClusteringParameters + from .clustering import ClusteringIndices, ClusteringParameters from .flow_system import FlowSystem logger = logging.getLogger('flixopt') @@ -399,6 +399,96 @@ def _calculate_clustering_weights(ds) -> dict[str, float]: return weights + def add_clustering( + self, + indices: ClusteringIndices, + parameters: ClusteringParameters | None = None, + components_to_clusterize: list | None = None, + ) -> FlowSystem: + """Add clustering constraints using externally computed indices. + + This method allows applying clustering to a FlowSystem using indices + computed outside of flixopt. This is useful when: + - You want to cluster based on a subset of time series data (faster tsam) + - You have custom clustering logic or algorithms + - You want to reuse clustering results across multiple FlowSystems + + The clustering indices define equality constraints that equate variable values + at specific timestep pairs. For example, if indices specify (10, 50), then + for all clustered variables: var[10] == var[50]. + + Args: + indices: ClusteringIndices object with precomputed equation indices. + Use ClusteringIndices.from_tsam() to create from tsam results. + parameters: Optional ClusteringParameters. If None, default parameters + are created (no flexibility, include storage). Required parameters + like n_clusters and cluster_duration are only used for metadata. + components_to_clusterize: Components to apply clustering to. + If None, all components are clustered. + + Returns: + A new FlowSystem with clustering constraints configured. + + Examples: + External clustering with tsam on subset of data: + + >>> import tsam.timeseriesaggregation as tsam + >>> # Extract subset of timeseries for clustering + >>> subset_df = pd.DataFrame( + ... { + ... 'price': flow_system['prices'].values, + ... 'demand': flow_system['heat_demand'].values, + ... }, + ... index=flow_system.timesteps, + ... ) + >>> + >>> # Run tsam on subset + >>> aggregation = tsam.TimeSeriesAggregation(subset_df, noTypicalPeriods=8, hoursPerPeriod=24) + >>> aggregation.createTypicalPeriods() + >>> + >>> # Convert to ClusteringIndices + >>> from flixopt.clustering import ClusteringIndices + >>> indices = ClusteringIndices.from_tsam(aggregation) + >>> + >>> # Apply to FlowSystem + >>> clustered_fs = flow_system.transform.add_clustering(indices) + >>> clustered_fs.optimize(solver) + + With custom parameters: + + >>> from flixopt.clustering import ClusteringParameters, ClusteringIndices + >>> params = ClusteringParameters( + ... n_clusters=8, + ... cluster_duration='1D', + ... flexibility_percent=5, # Allow 5% binary deviation + ... ) + >>> clustered_fs = flow_system.transform.add_clustering(indices, parameters=params) + """ + from .clustering import ClusteringIndices, ClusteringParameters + + # Validate indices type + if not isinstance(indices, ClusteringIndices): + raise TypeError(f'indices must be ClusteringIndices, got {type(indices).__name__}') + + # Create default parameters if not provided + if parameters is None: + parameters = ClusteringParameters( + n_clusters=None, # Unknown when using external indices + cluster_duration=1.0, # Placeholder - not used for constraint generation + ) + + # Create a copy of the FlowSystem to avoid modifying the original + clustered_fs = self._fs.copy() + + # Store clustering info + clustered_fs._clustering_info = { + 'parameters': parameters, + 'clustering_indices': {(None, None): indices}, # Single clustering + 'components_to_clusterize': components_to_clusterize, + } + + return clustered_fs + def sel( self, time: str | slice | list[str] | pd.Timestamp | pd.DatetimeIndex | None = None, From 933c878e3cfaafe13ab0aeb4fdb122a45bef3a17 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 16 Dec 2025 09:06:14 +0100 Subject: [PATCH 030/191] Improve clustering organization --- flixopt/clustering.py | 336 ++++++++++++++++++++++------------------- flixopt/flow_system.py | 57 ++++--- 2 files changed, 219 insertions(+), 174 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 79d9f992c..66ad54ced 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -33,6 +33,7 @@ if TYPE_CHECKING: import linopy import pandas as pd + import xarray as xr from .core import Scalar, TimeSeriesData from .elements import Component @@ -518,78 +519,97 @@ def labels_for_low_peaks(self) -> list[str]: class ClusteringIndices: - """Stores clustering group assignments for timesteps. + """Compact storage for clustering assignments. - Each timestep is assigned to a cluster group and optionally a segment group. - Timesteps in the same group should have equal variable values. + Stores clustering information in a compact format using: + - `cluster_order`: Which cluster each clustering-period (e.g., day) belongs to + - `period_length`: Number of timesteps per clustering-period + - `segment_durations`: Optional segment durations per cluster (for inner-period segmentation) - The group assignments are stored as integer arrays where: - - A value >= 0 indicates the group ID - - A value of -1 indicates "not in any group" (no equalization) + This is much more compact than storing per-timestep assignments: + - For 365 days: 365 values instead of 8760 + + The cluster_order can optionally have period/scenario dimensions for multi-dimensional + FlowSystems, where the integer values are reused across dimensions. Args: - cluster_groups: Array of shape (n_timesteps,) mapping each timestep to - a cluster group ID. Timesteps with the same group ID (except -1) - will be equalized. - segment_groups: Array of shape (n_timesteps,) mapping each timestep to - a segment group ID. Timesteps with the same group ID (except -1) - will be equalized. + cluster_order: Array of shape (n_cluster_periods,) or DataArray with additional + period/scenario dimensions. Values are cluster IDs (0 to n_clusters-1). + period_length: Number of timesteps per clustering-period (e.g., 24 for daily). + segment_durations: Optional dict mapping (cluster_id, segment_idx) to duration, + or 2D array of shape (n_clusters, n_segments) with durations. + skip_first_of_period: Whether to skip the first timestep of each period + when generating inter-cluster constraints (for storage continuity). """ def __init__( self, - cluster_groups: np.ndarray | None = None, - segment_groups: np.ndarray | None = None, + cluster_order: np.ndarray, + period_length: int, + segment_durations: dict[tuple[int, int], int] | np.ndarray | None = None, + skip_first_of_period: bool = True, ): - self.cluster_groups = cluster_groups if cluster_groups is not None else np.array([], dtype=np.int32) - self.segment_groups = segment_groups if segment_groups is not None else np.array([], dtype=np.int32) + self.cluster_order = np.asarray(cluster_order, dtype=np.int32) + self.period_length = period_length + self.skip_first_of_period = skip_first_of_period + + # Normalize segment_durations to dict format + if segment_durations is None: + self.segment_durations: dict[tuple[int, int], int] | None = None + elif isinstance(segment_durations, dict): + self.segment_durations = segment_durations + else: + # Convert 2D array to dict + arr = np.asarray(segment_durations) + self.segment_durations = {(i, j): int(arr[i, j]) for i in range(arr.shape[0]) for j in range(arr.shape[1])} + + @property + def n_cluster_periods(self) -> int: + """Number of clustering-periods (e.g., days).""" + return len(self.cluster_order) + + @property + def n_clusters(self) -> int: + """Number of unique clusters.""" + return int(np.max(self.cluster_order)) + 1 + + @property + def n_timesteps(self) -> int: + """Total number of timesteps.""" + return self.n_cluster_periods * self.period_length + + @property + def n_segments(self) -> int | None: + """Number of segments per cluster, or None if no segmentation.""" + if self.segment_durations is None: + return None + # Get max segment index + 1 + return max(seg_idx for _, seg_idx in self.segment_durations.keys()) + 1 @classmethod def from_clustering(cls, clustering: Clustering) -> ClusteringIndices: - """Create from a Clustering object by extracting group assignments.""" - n_timesteps = clustering.nr_of_time_steps + """Create from a Clustering object.""" period_length = int(clustering.hours_per_period / clustering.hours_per_time_step) - # Build cluster groups: cluster_id * period_length + position_in_period - # Skip first timestep of each period for storage continuity - cluster_groups = np.full(n_timesteps, -1, dtype=np.int32) - for period_idx, cluster_id in enumerate(clustering.tsam.clusterOrder): - start_idx = period_idx * period_length - # Skip first timestep (position 0) for storage continuity - for pos in range(1, period_length): - ts_idx = start_idx + pos - if ts_idx < n_timesteps: - cluster_groups[ts_idx] = cluster_id * period_length + pos - - # Build segment groups - segment_groups = np.full(n_timesteps, -1, dtype=np.int32) + # Extract segment durations if segmentation is used + segment_durations = None if clustering.n_segments is not None: - segment_counter = 0 - segment_duration_dict = clustering.tsam.segmentDurationDict['Segment Duration'] - - for period_idx, cluster_id in enumerate(clustering.tsam.clusterOrder): - period_offset = period_idx * period_length - start_step = 0 + segment_durations = dict(clustering.tsam.segmentDurationDict['Segment Duration']) - for seg_idx in range(clustering.n_segments): - duration = segment_duration_dict[(cluster_id, seg_idx)] - # All timesteps in this segment get the same group ID - for step in range(duration): - ts_idx = period_offset + start_step + step - if ts_idx < n_timesteps: - segment_groups[ts_idx] = segment_counter - segment_counter += 1 - start_step += duration - - return cls(cluster_groups=cluster_groups, segment_groups=segment_groups) + return cls( + cluster_order=np.array(clustering.tsam.clusterOrder, dtype=np.int32), + period_length=period_length, + segment_durations=segment_durations, + skip_first_of_period=True, + ) @classmethod def from_tsam( cls, aggregation: tsam.TimeSeriesAggregation, - hours_per_timestep: float, - hours_per_period: float, - skip_first_index_of_period: bool = True, + hours_per_timestep: float | None = None, + hours_per_period: float | None = None, + skip_first_of_period: bool = True, ) -> ClusteringIndices: """Create from a tsam TimeSeriesAggregation object directly. @@ -600,144 +620,152 @@ def from_tsam( Args: aggregation: A tsam TimeSeriesAggregation object after calling createTypicalPeriods(). - hours_per_timestep: Duration of each timestep in hours (must match - the resolution used when creating the tsam aggregation). - hours_per_period: Duration of each period in hours (must match - hoursPerPeriod used when creating the tsam aggregation). - skip_first_index_of_period: Skip first timestep of each period when + hours_per_timestep: Duration of each timestep in hours. If None, + uses aggregation.resolution. + hours_per_period: Duration of each period in hours. If None, + uses aggregation.hoursPerPeriod. + skip_first_of_period: Skip first timestep of each period when creating inter-cluster constraints. Default True (recommended for correct storage state transitions). Returns: - ClusteringIndices with group assignments for constraint generation. + ClusteringIndices with compact cluster assignments. Examples: >>> import tsam.timeseriesaggregation as tsam - >>> import pandas as pd - >>> - >>> # Create subset DataFrame with key time series - >>> subset_df = pd.DataFrame( - ... { - ... 'electricity_price': prices, - ... 'heat_demand': demand, - ... }, - ... index=timesteps, - ... ) >>> - >>> # Run tsam clustering - >>> aggregation = tsam.TimeSeriesAggregation( - ... subset_df, - ... noTypicalPeriods=8, - ... hoursPerPeriod=24, - ... resolution=1, # 1-hour timesteps - ... ) + >>> # Run tsam on subset of data + >>> aggregation = tsam.TimeSeriesAggregation(subset_df, noTypicalPeriods=8, hoursPerPeriod=24, resolution=1) >>> aggregation.createTypicalPeriods() >>> - >>> # Convert to ClusteringIndices - >>> indices = ClusteringIndices.from_tsam( - ... aggregation, - ... hours_per_timestep=1, - ... hours_per_period=24, - ... ) + >>> # Convert to ClusteringIndices (auto-detects parameters) + >>> indices = ClusteringIndices.from_tsam(aggregation) >>> >>> # Apply to FlowSystem >>> clustered_fs = flow_system.transform.add_clustering(indices) - - With inner-period segmentation: - - >>> aggregation = tsam.TimeSeriesAggregation( - ... subset_df, - ... noTypicalPeriods=8, - ... hoursPerPeriod=24, - ... resolution=1, - ... segmentation=True, - ... noSegments=4, - ... ) - >>> aggregation.createTypicalPeriods() - >>> indices = ClusteringIndices.from_tsam(aggregation, 1, 24) """ if not TSAM_AVAILABLE: raise ImportError("The 'tsam' package is required. Install it with 'pip install tsam'.") - period_length = int(hours_per_period / hours_per_timestep) - n_timesteps = len(aggregation.timeSeries) + # Auto-detect parameters from aggregation + if hours_per_timestep is None: + hours_per_timestep = aggregation.resolution + if hours_per_period is None: + hours_per_period = aggregation.hoursPerPeriod - # Build cluster groups: cluster_id * period_length + position_in_period - cluster_groups = np.full(n_timesteps, -1, dtype=np.int32) - start_pos = 1 if skip_first_index_of_period else 0 - - for period_idx, cluster_id in enumerate(aggregation.clusterOrder): - start_idx = period_idx * period_length - for pos in range(start_pos, period_length): - ts_idx = start_idx + pos - if ts_idx < n_timesteps: - cluster_groups[ts_idx] = cluster_id * period_length + pos + period_length = int(hours_per_period / hours_per_timestep) - # Build segment groups - segment_groups = np.full(n_timesteps, -1, dtype=np.int32) + # Extract segment durations if segmentation is used + segment_durations = None if aggregation.segmentation and hasattr(aggregation, 'segmentDurationDict'): - segment_counter = 0 - n_segments = aggregation.noSegments - segment_duration_dict = aggregation.segmentDurationDict['Segment Duration'] - - for period_idx, cluster_id in enumerate(aggregation.clusterOrder): - period_offset = period_idx * period_length - start_step = 0 - - for seg_idx in range(n_segments): - duration = segment_duration_dict[(cluster_id, seg_idx)] - # All timesteps in this segment get the same group ID - for step in range(duration): - ts_idx = period_offset + start_step + step - if ts_idx < n_timesteps: - segment_groups[ts_idx] = segment_counter - segment_counter += 1 - start_step += duration - - return cls(cluster_groups=cluster_groups, segment_groups=segment_groups) + segment_durations = dict(aggregation.segmentDurationDict['Segment Duration']) + + return cls( + cluster_order=np.array(aggregation.clusterOrder, dtype=np.int32), + period_length=period_length, + segment_durations=segment_durations, + skip_first_of_period=skip_first_of_period, + ) def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get cluster equation indices as parallel numpy arrays. + """Get inter-cluster equation indices as parallel numpy arrays. - Converts group assignments to pairs (i, j) where var[i] == var[j]. - Returns indices for all pairs within each group. + Returns pairs (i, j) where var[i] == var[j] for timesteps at the same + position within periods belonging to the same cluster. """ - return self._groups_to_pairs(self.cluster_groups) + # Group periods by cluster + cluster_to_periods: dict[int, list[int]] = {} + for period_idx, cluster_id in enumerate(self.cluster_order): + cluster_to_periods.setdefault(int(cluster_id), []).append(period_idx) - def get_segment_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get segment equation indices as parallel numpy arrays. + idx_i, idx_j = [], [] + start_pos = 1 if self.skip_first_of_period else 0 - Converts group assignments to pairs (i, j) where var[i] == var[j]. - Returns indices for all pairs within each group. - """ - return self._groups_to_pairs(self.segment_groups) + for periods in cluster_to_periods.values(): + if len(periods) <= 1: + continue + # Equate all periods to the first one at each position + first_period = periods[0] + for pos in range(start_pos, self.period_length): + first_ts = first_period * self.period_length + pos + for other_period in periods[1:]: + other_ts = other_period * self.period_length + pos + idx_i.append(first_ts) + idx_j.append(other_ts) - @staticmethod - def _groups_to_pairs(groups: np.ndarray) -> tuple[np.ndarray, np.ndarray]: - """Convert group assignments to equation pairs. + return np.array(idx_i, dtype=np.int32), np.array(idx_j, dtype=np.int32) - For each group with members [a, b, c, ...], generates pairs: - (a, b), (a, c), ... to equate all members to the first. + def get_segment_indices(self) -> tuple[np.ndarray, np.ndarray]: + """Get intra-segment equation indices as parallel numpy arrays. + + Returns pairs (i, j) where var[i] == var[j] for timesteps within + the same segment (when segmentation is enabled). """ - if len(groups) == 0: + if self.segment_durations is None: return np.array([], dtype=np.int32), np.array([], dtype=np.int32) - # Find unique groups (excluding -1) - unique_groups = np.unique(groups) - unique_groups = unique_groups[unique_groups >= 0] - idx_i, idx_j = [], [] - for group_id in unique_groups: - members = np.where(groups == group_id)[0] - if len(members) > 1: - # Equate all members to the first - first = members[0] - for other in members[1:]: - idx_i.append(first) - idx_j.append(other) + n_segments = self.n_segments + + for period_idx, cluster_id in enumerate(self.cluster_order): + period_offset = period_idx * self.period_length + start_step = 0 + + for seg_idx in range(n_segments): + duration = self.segment_durations[(int(cluster_id), seg_idx)] + # Equate all timesteps in segment to the first + first_ts = period_offset + start_step + for step in range(1, duration): + idx_i.append(first_ts) + idx_j.append(period_offset + start_step + step) + start_step += duration return np.array(idx_i, dtype=np.int32), np.array(idx_j, dtype=np.int32) + def to_dataarray(self, time_index: pd.DatetimeIndex | None = None) -> xr.DataArray: + """Convert cluster_order to a DataArray for storage. + + Args: + time_index: Optional time index to derive cluster_period coordinates. + If provided, uses the start time of each period as coordinate. + + Returns: + DataArray with cluster assignments, shape (n_cluster_periods,). + """ + import xarray as xr + + if time_index is not None and len(time_index) >= self.n_cluster_periods * self.period_length: + # Use start of each period as coordinate + coords = {'cluster_period': time_index[:: self.period_length][: self.n_cluster_periods]} + else: + coords = {'cluster_period': np.arange(self.n_cluster_periods)} + + return xr.DataArray( + self.cluster_order, + dims=['cluster_period'], + coords=coords, + attrs={'period_length': self.period_length, 'skip_first_of_period': self.skip_first_of_period}, + ) + + def segment_durations_to_dataarray(self) -> xr.DataArray | None: + """Convert segment_durations to a DataArray for storage. + + Returns: + DataArray of shape (n_clusters, n_segments), or None if no segmentation. + """ + import xarray as xr + + if self.segment_durations is None: + return None + + n_clusters = self.n_clusters + n_segments = self.n_segments + arr = np.zeros((n_clusters, n_segments), dtype=np.int32) + for (cluster_id, seg_idx), duration in self.segment_durations.items(): + arr[cluster_id, seg_idx] = duration + + return xr.DataArray(arr, dims=['cluster', 'segment']) + class ClusteringModel(Submodel): """Model that adds clustering constraints to equate variables across clustered time segments. diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 427cc5a55..8103bbfd1 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -642,7 +642,7 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: if components: ds.attrs['_clustering_components'] = json.dumps([c.label for c in components]) - # Store group assignments as DataArrays (efficient binary storage) + # Store clustering indices in compact format # Get or create ClusteringIndices clustering_obj = self._clustering_info.get('clustering') indices_dict = self._clustering_info.get('clustering_indices') @@ -657,12 +657,13 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: indices = None if indices is not None: - ds['_clustering_cluster_groups'] = xr.DataArray( - indices.cluster_groups, dims=['time'], coords={'time': self.timesteps} - ) - ds['_clustering_segment_groups'] = xr.DataArray( - indices.segment_groups, dims=['time'], coords={'time': self.timesteps} - ) + # Store cluster_order (compact: n_cluster_periods instead of n_timesteps) + ds['_clustering_cluster_order'] = indices.to_dataarray(self.timesteps) + + # Store segment durations if segmentation is used + segment_da = indices.segment_durations_to_dataarray() + if segment_da is not None: + ds['_clustering_segment_durations'] = segment_da # Add version info ds.attrs['flixopt_version'] = __version__ @@ -765,11 +766,27 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: # Restore parameters params = cls._resolve_reference_structure(json.loads(reference_structure['_clustering_params']), {}) - # Restore group assignments from DataArrays - indices = ClusteringIndices( - cluster_groups=ds['_clustering_cluster_groups'].values if '_clustering_cluster_groups' in ds else None, - segment_groups=ds['_clustering_segment_groups'].values if '_clustering_segment_groups' in ds else None, - ) + # Restore from compact format + if '_clustering_cluster_order' in ds: + cluster_order_da = ds['_clustering_cluster_order'] + period_length = int(cluster_order_da.attrs.get('period_length', 24)) + skip_first = cluster_order_da.attrs.get('skip_first_of_period', True) + + # Restore segment durations if present + segment_durations = None + if '_clustering_segment_durations' in ds: + segment_da = ds['_clustering_segment_durations'] + segment_durations = segment_da.values # 2D array + + indices = ClusteringIndices( + cluster_order=cluster_order_da.values, + period_length=period_length, + segment_durations=segment_durations, + skip_first_of_period=skip_first, + ) + else: + # Fallback for old format - shouldn't normally happen + indices = None # Restore component references components_to_clusterize = None @@ -781,17 +798,17 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: flow_system._clustering_info = { 'parameters': params, - 'clustering_indices': {(None, None): indices}, # ClusteringIndices keyed by (period, scenario) + 'clustering_indices': {(None, None): indices} if indices else {}, 'components_to_clusterize': components_to_clusterize, 'restored_from_file': True, } - n_cluster_groups = len(np.unique(indices.cluster_groups[indices.cluster_groups >= 0])) - n_segment_groups = len(np.unique(indices.segment_groups[indices.segment_groups >= 0])) - logger.info( - f'Restored clustering: n_clusters={params.n_clusters}, duration={params.cluster_duration}, ' - f'n_segments={params.n_segments}, {n_cluster_groups} cluster groups + ' - f'{n_segment_groups} segment groups.' - ) + if indices: + logger.info( + f'Restored clustering: n_clusters={indices.n_clusters}, ' + f'n_cluster_periods={indices.n_cluster_periods}, ' + f'period_length={indices.period_length}, ' + f'n_segments={indices.n_segments}.' + ) # Reconnect network to populate bus inputs/outputs (not stored in NetCDF). flow_system.connect_and_transform() From f96d9f25d120b62ed9227ec2a78d0a4c94bf6c86 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 16 Dec 2025 09:33:24 +0100 Subject: [PATCH 031/191] Improve clustering organization --- flixopt/clustering.py | 251 +++++++++++------------------------------ flixopt/flow_system.py | 67 ++++------- 2 files changed, 92 insertions(+), 226 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 66ad54ced..9aad8718b 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -518,164 +518,97 @@ def labels_for_low_peaks(self) -> list[str]: return [ts.name for ts in self.time_series_for_low_peaks] -class ClusteringIndices: +@register_class_for_io +class ClusteringIndices(Interface): """Compact storage for clustering assignments. - Stores clustering information in a compact format using: - - `cluster_order`: Which cluster each clustering-period (e.g., day) belongs to - - `period_length`: Number of timesteps per clustering-period - - `segment_durations`: Optional segment durations per cluster (for inner-period segmentation) - - This is much more compact than storing per-timestep assignments: - - For 365 days: 365 values instead of 8760 + Stores clustering in a compact format: + - `cluster_order`: DataArray (cluster_period,) - which cluster each period belongs to + - `segment_assignment`: DataArray (cluster, position) - segment ID per position in cluster - The cluster_order can optionally have period/scenario dimensions for multi-dimensional - FlowSystems, where the integer values are reused across dimensions. + For 365 days with 24h periods: stores 365 + 8×24 = 557 values instead of 8760. Args: - cluster_order: Array of shape (n_cluster_periods,) or DataArray with additional - period/scenario dimensions. Values are cluster IDs (0 to n_clusters-1). - period_length: Number of timesteps per clustering-period (e.g., 24 for daily). - segment_durations: Optional dict mapping (cluster_id, segment_idx) to duration, - or 2D array of shape (n_clusters, n_segments) with durations. - skip_first_of_period: Whether to skip the first timestep of each period - when generating inter-cluster constraints (for storage continuity). + cluster_order: DataArray of shape (cluster_period,) with cluster IDs. + period_length: Number of timesteps per clustering-period. + segment_assignment: Optional DataArray (cluster, position) with segment IDs. + skip_first_of_period: Skip first timestep for storage continuity. """ def __init__( self, - cluster_order: np.ndarray, + cluster_order: xr.DataArray, period_length: int, - segment_durations: dict[tuple[int, int], int] | np.ndarray | None = None, + segment_assignment: xr.DataArray | None = None, skip_first_of_period: bool = True, ): - self.cluster_order = np.asarray(cluster_order, dtype=np.int32) - self.period_length = period_length - self.skip_first_of_period = skip_first_of_period + import xarray as xr - # Normalize segment_durations to dict format - if segment_durations is None: - self.segment_durations: dict[tuple[int, int], int] | None = None - elif isinstance(segment_durations, dict): - self.segment_durations = segment_durations + if isinstance(cluster_order, xr.DataArray): + self.cluster_order = cluster_order.rename('cluster_order') if cluster_order.name is None else cluster_order else: - # Convert 2D array to dict - arr = np.asarray(segment_durations) - self.segment_durations = {(i, j): int(arr[i, j]) for i in range(arr.shape[0]) for j in range(arr.shape[1])} - - @property - def n_cluster_periods(self) -> int: - """Number of clustering-periods (e.g., days).""" - return len(self.cluster_order) - - @property - def n_clusters(self) -> int: - """Number of unique clusters.""" - return int(np.max(self.cluster_order)) + 1 - - @property - def n_timesteps(self) -> int: - """Total number of timesteps.""" - return self.n_cluster_periods * self.period_length + self.cluster_order = xr.DataArray(cluster_order, dims=['cluster_period'], name='cluster_order') - @property - def n_segments(self) -> int | None: - """Number of segments per cluster, or None if no segmentation.""" - if self.segment_durations is None: - return None - # Get max segment index + 1 - return max(seg_idx for _, seg_idx in self.segment_durations.keys()) + 1 + self.period_length = int(period_length) + self.skip_first_of_period = skip_first_of_period - @classmethod - def from_clustering(cls, clustering: Clustering) -> ClusteringIndices: - """Create from a Clustering object.""" - period_length = int(clustering.hours_per_period / clustering.hours_per_time_step) - - # Extract segment durations if segmentation is used - segment_durations = None - if clustering.n_segments is not None: - segment_durations = dict(clustering.tsam.segmentDurationDict['Segment Duration']) - - return cls( - cluster_order=np.array(clustering.tsam.clusterOrder, dtype=np.int32), - period_length=period_length, - segment_durations=segment_durations, - skip_first_of_period=True, - ) + if segment_assignment is not None and isinstance(segment_assignment, xr.DataArray): + self.segment_assignment = ( + segment_assignment.rename('segment_assignment') + if segment_assignment.name is None + else segment_assignment + ) + else: + self.segment_assignment = segment_assignment @classmethod def from_tsam( cls, aggregation: tsam.TimeSeriesAggregation, - hours_per_timestep: float | None = None, - hours_per_period: float | None = None, skip_first_of_period: bool = True, ) -> ClusteringIndices: - """Create from a tsam TimeSeriesAggregation object directly. - - This allows users to run tsam on a subset of their time series data - (e.g., only key drivers like prices and demands) and then apply the - resulting clustering to a full FlowSystem. + """Create from a tsam TimeSeriesAggregation object. Args: - aggregation: A tsam TimeSeriesAggregation object after calling - createTypicalPeriods(). - hours_per_timestep: Duration of each timestep in hours. If None, - uses aggregation.resolution. - hours_per_period: Duration of each period in hours. If None, - uses aggregation.hoursPerPeriod. - skip_first_of_period: Skip first timestep of each period when - creating inter-cluster constraints. Default True (recommended - for correct storage state transitions). - - Returns: - ClusteringIndices with compact cluster assignments. + aggregation: tsam object after calling createTypicalPeriods(). + skip_first_of_period: Skip first timestep of each period (for storage). Examples: - >>> import tsam.timeseriesaggregation as tsam - >>> - >>> # Run tsam on subset of data - >>> aggregation = tsam.TimeSeriesAggregation(subset_df, noTypicalPeriods=8, hoursPerPeriod=24, resolution=1) + >>> aggregation = tsam.TimeSeriesAggregation(df, noTypicalPeriods=8, hoursPerPeriod=24) >>> aggregation.createTypicalPeriods() - >>> - >>> # Convert to ClusteringIndices (auto-detects parameters) >>> indices = ClusteringIndices.from_tsam(aggregation) - >>> - >>> # Apply to FlowSystem - >>> clustered_fs = flow_system.transform.add_clustering(indices) """ - if not TSAM_AVAILABLE: - raise ImportError("The 'tsam' package is required. Install it with 'pip install tsam'.") + import xarray as xr - # Auto-detect parameters from aggregation - if hours_per_timestep is None: - hours_per_timestep = aggregation.resolution - if hours_per_period is None: - hours_per_period = aggregation.hoursPerPeriod + if not TSAM_AVAILABLE: + raise ImportError("The 'tsam' package is required. Install with 'pip install tsam'.") - period_length = int(hours_per_period / hours_per_timestep) + period_length = int(aggregation.hoursPerPeriod / aggregation.resolution) + cluster_order = xr.DataArray(aggregation.clusterOrder, dims=['cluster_period'], name='cluster_order') - # Extract segment durations if segmentation is used - segment_durations = None + # Build segment assignment if segmentation is used + segment_assignment = None if aggregation.segmentation and hasattr(aggregation, 'segmentDurationDict'): - segment_durations = dict(aggregation.segmentDurationDict['Segment Duration']) + n_clusters = aggregation.noTypicalPeriods + segment_duration_dict = aggregation.segmentDurationDict['Segment Duration'] - return cls( - cluster_order=np.array(aggregation.clusterOrder, dtype=np.int32), - period_length=period_length, - segment_durations=segment_durations, - skip_first_of_period=skip_first_of_period, - ) + # Build (cluster, position) -> segment_id mapping + arr = np.zeros((n_clusters, period_length), dtype=np.int32) + for cluster_id in range(n_clusters): + pos = 0 + for seg_idx in range(aggregation.noSegments): + duration = segment_duration_dict[(cluster_id, seg_idx)] + arr[cluster_id, pos : pos + duration] = seg_idx + pos += duration - def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get inter-cluster equation indices as parallel numpy arrays. + segment_assignment = xr.DataArray(arr, dims=['cluster', 'position'], name='segment_assignment') - Returns pairs (i, j) where var[i] == var[j] for timesteps at the same - position within periods belonging to the same cluster. - """ - # Group periods by cluster + return cls(cluster_order, period_length, segment_assignment, skip_first_of_period) + + def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: + """Get inter-cluster equation pairs (i, j) where var[i] == var[j].""" cluster_to_periods: dict[int, list[int]] = {} - for period_idx, cluster_id in enumerate(self.cluster_order): + for period_idx, cluster_id in enumerate(self.cluster_order.values): cluster_to_periods.setdefault(int(cluster_id), []).append(period_idx) idx_i, idx_j = [], [] @@ -684,88 +617,38 @@ def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: for periods in cluster_to_periods.values(): if len(periods) <= 1: continue - # Equate all periods to the first one at each position first_period = periods[0] for pos in range(start_pos, self.period_length): first_ts = first_period * self.period_length + pos for other_period in periods[1:]: - other_ts = other_period * self.period_length + pos idx_i.append(first_ts) - idx_j.append(other_ts) + idx_j.append(other_period * self.period_length + pos) return np.array(idx_i, dtype=np.int32), np.array(idx_j, dtype=np.int32) def get_segment_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get intra-segment equation indices as parallel numpy arrays. - - Returns pairs (i, j) where var[i] == var[j] for timesteps within - the same segment (when segmentation is enabled). - """ - if self.segment_durations is None: + """Get intra-segment equation pairs (i, j) where var[i] == var[j].""" + if self.segment_assignment is None: return np.array([], dtype=np.int32), np.array([], dtype=np.int32) idx_i, idx_j = [], [] - n_segments = self.n_segments + seg_arr = self.segment_assignment.values # (cluster, position) - for period_idx, cluster_id in enumerate(self.cluster_order): + for period_idx, cluster_id in enumerate(self.cluster_order.values): period_offset = period_idx * self.period_length - start_step = 0 + segment_ids = seg_arr[int(cluster_id)] # (position,) - for seg_idx in range(n_segments): - duration = self.segment_durations[(int(cluster_id), seg_idx)] - # Equate all timesteps in segment to the first - first_ts = period_offset + start_step - for step in range(1, duration): - idx_i.append(first_ts) - idx_j.append(period_offset + start_step + step) - start_step += duration + # Group positions by segment + for seg_id in np.unique(segment_ids): + positions = np.where(segment_ids == seg_id)[0] + if len(positions) > 1: + first_ts = period_offset + positions[0] + for pos in positions[1:]: + idx_i.append(first_ts) + idx_j.append(period_offset + pos) return np.array(idx_i, dtype=np.int32), np.array(idx_j, dtype=np.int32) - def to_dataarray(self, time_index: pd.DatetimeIndex | None = None) -> xr.DataArray: - """Convert cluster_order to a DataArray for storage. - - Args: - time_index: Optional time index to derive cluster_period coordinates. - If provided, uses the start time of each period as coordinate. - - Returns: - DataArray with cluster assignments, shape (n_cluster_periods,). - """ - import xarray as xr - - if time_index is not None and len(time_index) >= self.n_cluster_periods * self.period_length: - # Use start of each period as coordinate - coords = {'cluster_period': time_index[:: self.period_length][: self.n_cluster_periods]} - else: - coords = {'cluster_period': np.arange(self.n_cluster_periods)} - - return xr.DataArray( - self.cluster_order, - dims=['cluster_period'], - coords=coords, - attrs={'period_length': self.period_length, 'skip_first_of_period': self.skip_first_of_period}, - ) - - def segment_durations_to_dataarray(self) -> xr.DataArray | None: - """Convert segment_durations to a DataArray for storage. - - Returns: - DataArray of shape (n_clusters, n_segments), or None if no segmentation. - """ - import xarray as xr - - if self.segment_durations is None: - return None - - n_clusters = self.n_clusters - n_segments = self.n_segments - arr = np.zeros((n_clusters, n_segments), dtype=np.int32) - for (cluster_id, seg_idx), duration in self.segment_durations.items(): - arr[cluster_id, seg_idx] = duration - - return xr.DataArray(arr, dims=['cluster', 'segment']) - class ClusteringModel(Submodel): """Model that adds clustering constraints to equate variables across clustered time segments. diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 8103bbfd1..156254f5a 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -631,39 +631,35 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: if self._clustering_info is not None: from .clustering import ClusteringIndices, ClusteringParameters - # Serialize parameters + # Serialize parameters using Interface pattern params = self._clustering_info.get('parameters') if isinstance(params, ClusteringParameters): - params_ref, _ = params._create_reference_structure() + params_ref, params_arrays = params._create_reference_structure() ds.attrs['_clustering_params'] = json.dumps(params_ref) + ds.update(params_arrays) # Store component labels to clusterize components = self._clustering_info.get('components_to_clusterize') if components: ds.attrs['_clustering_components'] = json.dumps([c.label for c in components]) - # Store clustering indices in compact format - # Get or create ClusteringIndices + # Serialize ClusteringIndices using Interface pattern clustering_obj = self._clustering_info.get('clustering') indices_dict = self._clustering_info.get('clustering_indices') if clustering_obj is not None: if isinstance(clustering_obj, dict): clustering_obj = next(iter(clustering_obj.values())) - indices = ClusteringIndices.from_clustering(clustering_obj) + indices = ClusteringIndices.from_tsam(clustering_obj.tsam) elif indices_dict is not None: indices = next(iter(indices_dict.values())) else: indices = None if indices is not None: - # Store cluster_order (compact: n_cluster_periods instead of n_timesteps) - ds['_clustering_cluster_order'] = indices.to_dataarray(self.timesteps) - - # Store segment durations if segmentation is used - segment_da = indices.segment_durations_to_dataarray() - if segment_da is not None: - ds['_clustering_segment_durations'] = segment_da + indices_ref, indices_arrays = indices._create_reference_structure() + ds.attrs['_clustering_indices'] = json.dumps(indices_ref) + ds.update(indices_arrays) # Add version info ds.attrs['flixopt_version'] = __version__ @@ -759,34 +755,21 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: carrier = cls._resolve_reference_structure(carrier_data, {}) flow_system._carriers.add(carrier) - # Restore clustering info if present - if '_clustering_params' in reference_structure: - from .clustering import ClusteringIndices - + # Restore clustering info if present (using Interface pattern) + if '_clustering_params' in reference_structure or '_clustering_indices' in reference_structure: # Restore parameters - params = cls._resolve_reference_structure(json.loads(reference_structure['_clustering_params']), {}) - - # Restore from compact format - if '_clustering_cluster_order' in ds: - cluster_order_da = ds['_clustering_cluster_order'] - period_length = int(cluster_order_da.attrs.get('period_length', 24)) - skip_first = cluster_order_da.attrs.get('skip_first_of_period', True) - - # Restore segment durations if present - segment_durations = None - if '_clustering_segment_durations' in ds: - segment_da = ds['_clustering_segment_durations'] - segment_durations = segment_da.values # 2D array - - indices = ClusteringIndices( - cluster_order=cluster_order_da.values, - period_length=period_length, - segment_durations=segment_durations, - skip_first_of_period=skip_first, + params = None + if '_clustering_params' in reference_structure: + params = cls._resolve_reference_structure( + json.loads(reference_structure['_clustering_params']), arrays_dict + ) + + # Restore ClusteringIndices using Interface pattern + indices = None + if '_clustering_indices' in reference_structure: + indices = cls._resolve_reference_structure( + json.loads(reference_structure['_clustering_indices']), arrays_dict ) - else: - # Fallback for old format - shouldn't normally happen - indices = None # Restore component references components_to_clusterize = None @@ -803,11 +786,11 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: 'restored_from_file': True, } if indices: + n_cluster_periods = len(indices.cluster_order) + n_clusters = int(indices.cluster_order.max()) + 1 logger.info( - f'Restored clustering: n_clusters={indices.n_clusters}, ' - f'n_cluster_periods={indices.n_cluster_periods}, ' - f'period_length={indices.period_length}, ' - f'n_segments={indices.n_segments}.' + f'Restored clustering: {n_clusters} clusters, ' + f'{n_cluster_periods} periods, period_length={indices.period_length}.' ) # Reconnect network to populate bus inputs/outputs (not stored in NetCDF). From 2eb0ee4b5ea77b258a851ba1db6ceb69546822b2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 16 Dec 2025 09:40:40 +0100 Subject: [PATCH 032/191] Improve clustering organization --- flixopt/flow_system.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 156254f5a..22be780f3 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -1366,11 +1366,11 @@ def _add_clustering_constraints(self) -> None: # Get the single ClusteringIndices (for now, only support single clustering) clustering_indices = next(iter(indices_dict.values())) elif 'clustering' in info: - # Fresh clustering - convert Clustering to ClusteringIndices + # Fresh clustering - convert Clustering to ClusteringIndices via tsam clustering_obj = info['clustering'] if isinstance(clustering_obj, dict): clustering_obj = next(iter(clustering_obj.values())) - clustering_indices = ClusteringIndices.from_clustering(clustering_obj) + clustering_indices = ClusteringIndices.from_tsam(clustering_obj.tsam) else: raise KeyError( '_clustering_info missing required key: either "clustering" (fresh) or "clustering_indices" (restored)' From 5f114f5c071625790342f0a8369428e1626bc61f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 16 Dec 2025 09:43:30 +0100 Subject: [PATCH 033/191] Improve clustering organization --- flixopt/optimization.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/flixopt/optimization.py b/flixopt/optimization.py index 32bdd7410..80f1f2c50 100644 --- a/flixopt/optimization.py +++ b/flixopt/optimization.py @@ -422,8 +422,12 @@ def do_modeling(self) -> ClusteredOptimization: self.model = self.flow_system.create_model(self.normalize_weights) self.model.do_modeling() # Add Clustering Submodel after modeling the rest + # Convert Clustering to ClusteringIndices via tsam + from .clustering import ClusteringIndices + + clustering_indices = ClusteringIndices.from_tsam(self.clustering.tsam) self.clustering_model = ClusteringModel( - self.model, self.clustering_parameters, self.flow_system, self.clustering, self.components_to_clusterize + self.model, self.clustering_parameters, self.flow_system, clustering_indices, self.components_to_clusterize ) self.clustering_model.do_modeling() self.durations['modeling'] = round(timeit.default_timer() - t_start, 2) From 529ef507c7bde61a916f6dc2288790ddf9c273bf Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Tue, 16 Dec 2025 10:20:09 +0100 Subject: [PATCH 034/191] Improve clustering organization --- flixopt/__init__.py | 3 +- flixopt/clustering.py | 165 ++++++++++++++++++---------------- flixopt/flow_system.py | 87 +++++++----------- flixopt/optimization.py | 8 +- flixopt/transform_accessor.py | 55 ++++++------ 5 files changed, 149 insertions(+), 169 deletions(-) diff --git a/flixopt/__init__.py b/flixopt/__init__.py index e5f3fd78e..1e3fee5bd 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -15,7 +15,7 @@ # Import commonly used classes and functions from . import linear_converters, plotting, results, solvers from .carrier import Carrier, CarrierContainer -from .clustering import ClusteringIndices, ClusteringParameters +from .clustering import ClusteringParameters from .components import ( LinearConverter, Sink, @@ -58,7 +58,6 @@ 'Piecewise', 'PiecewiseConversion', 'PiecewiseEffects', - 'ClusteringIndices', 'ClusteringParameters', 'PlotResult', 'plotting', diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 9aad8718b..baa1b2a17 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -448,6 +448,15 @@ class ClusteringParameters(Interface): segments with high values. time_series_for_low_peaks: List of TimeSeriesData to force inclusion of segments with low values. + cluster_order: Pre-computed cluster assignments. DataArray of shape (cluster_period,) + specifying which cluster each period belongs to. If provided, tsam clustering + is skipped. + period_length: Number of timesteps per clustering-period. Required if cluster_order + is provided. + segment_assignment: Pre-computed segment assignments. DataArray of shape (cluster, position) + specifying segment ID for each position. Optional. + skip_first_of_period: Whether to skip the first timestep of each period for storage + constraints (to maintain inter-period continuity). Default is True. Examples: Basic usage (8 typical days): @@ -465,13 +474,15 @@ class ClusteringParameters(Interface): ... n_segments=4, # Reduce 24h to 4 segments per day ... ) - Segmentation only (no clustering, just reduce to 4 segments per day): + With pre-computed cluster assignments (external clustering): - >>> clustered_fs = flow_system.transform.cluster( - ... n_clusters=None, # Skip clustering + >>> params = fx.ClusteringParameters( + ... n_clusters=8, ... cluster_duration='1D', - ... n_segments=4, + ... cluster_order=xr.DataArray([0, 1, 2, 0, 1, ...], dims=['cluster_period']), + ... period_length=24, ... ) + >>> clustered_fs = flow_system.transform.cluster(parameters=params) """ def __init__( @@ -485,7 +496,14 @@ def __init__( flexibility_penalty: float = 0, time_series_for_high_peaks: list[TimeSeriesData] | None = None, time_series_for_low_peaks: list[TimeSeriesData] | None = None, + # Clustering indices (optional - computed from tsam if not provided) + cluster_order: xr.DataArray | None = None, + period_length: int | None = None, + segment_assignment: xr.DataArray | None = None, + skip_first_of_period: bool = True, ): + import xarray as xr + self.n_clusters = n_clusters self.cluster_duration = cluster_duration # Store original for serialization self.cluster_duration_hours = _parse_cluster_duration(cluster_duration) @@ -496,6 +514,39 @@ def __init__( self.flexibility_penalty = flexibility_penalty self.time_series_for_high_peaks: list[TimeSeriesData] = time_series_for_high_peaks or [] self.time_series_for_low_peaks: list[TimeSeriesData] = time_series_for_low_peaks or [] + self.skip_first_of_period = skip_first_of_period + + # Clustering indices - ensure DataArrays have names for IO + if cluster_order is not None: + if isinstance(cluster_order, xr.DataArray): + self.cluster_order = ( + cluster_order.rename('cluster_order') if cluster_order.name is None else cluster_order + ) + else: + self.cluster_order = xr.DataArray(cluster_order, dims=['cluster_period'], name='cluster_order') + else: + self.cluster_order = None + + self.period_length = int(period_length) if period_length is not None else None + + if segment_assignment is not None: + if isinstance(segment_assignment, xr.DataArray): + self.segment_assignment = ( + segment_assignment.rename('segment_assignment') + if segment_assignment.name is None + else segment_assignment + ) + else: + self.segment_assignment = xr.DataArray( + segment_assignment, dims=['cluster', 'position'], name='segment_assignment' + ) + else: + self.segment_assignment = None + + @property + def has_indices(self) -> bool: + """Whether clustering indices have been computed/provided.""" + return self.cluster_order is not None and self.period_length is not None @property def use_extreme_periods(self) -> bool: @@ -517,83 +568,27 @@ def labels_for_low_peaks(self) -> list[str]: """Names of time series used for low peak selection.""" return [ts.name for ts in self.time_series_for_low_peaks] - -@register_class_for_io -class ClusteringIndices(Interface): - """Compact storage for clustering assignments. - - Stores clustering in a compact format: - - `cluster_order`: DataArray (cluster_period,) - which cluster each period belongs to - - `segment_assignment`: DataArray (cluster, position) - segment ID per position in cluster - - For 365 days with 24h periods: stores 365 + 8×24 = 557 values instead of 8760. - - Args: - cluster_order: DataArray of shape (cluster_period,) with cluster IDs. - period_length: Number of timesteps per clustering-period. - segment_assignment: Optional DataArray (cluster, position) with segment IDs. - skip_first_of_period: Skip first timestep for storage continuity. - """ - - def __init__( - self, - cluster_order: xr.DataArray, - period_length: int, - segment_assignment: xr.DataArray | None = None, - skip_first_of_period: bool = True, - ): - import xarray as xr - - if isinstance(cluster_order, xr.DataArray): - self.cluster_order = cluster_order.rename('cluster_order') if cluster_order.name is None else cluster_order - else: - self.cluster_order = xr.DataArray(cluster_order, dims=['cluster_period'], name='cluster_order') - - self.period_length = int(period_length) - self.skip_first_of_period = skip_first_of_period - - if segment_assignment is not None and isinstance(segment_assignment, xr.DataArray): - self.segment_assignment = ( - segment_assignment.rename('segment_assignment') - if segment_assignment.name is None - else segment_assignment - ) - else: - self.segment_assignment = segment_assignment - - @classmethod - def from_tsam( - cls, - aggregation: tsam.TimeSeriesAggregation, - skip_first_of_period: bool = True, - ) -> ClusteringIndices: - """Create from a tsam TimeSeriesAggregation object. + def populate_from_tsam(self, aggregation: tsam.TimeSeriesAggregation) -> None: + """Populate clustering indices from a tsam TimeSeriesAggregation object. Args: aggregation: tsam object after calling createTypicalPeriods(). - skip_first_of_period: Skip first timestep of each period (for storage). - - Examples: - >>> aggregation = tsam.TimeSeriesAggregation(df, noTypicalPeriods=8, hoursPerPeriod=24) - >>> aggregation.createTypicalPeriods() - >>> indices = ClusteringIndices.from_tsam(aggregation) """ import xarray as xr if not TSAM_AVAILABLE: raise ImportError("The 'tsam' package is required. Install with 'pip install tsam'.") - period_length = int(aggregation.hoursPerPeriod / aggregation.resolution) - cluster_order = xr.DataArray(aggregation.clusterOrder, dims=['cluster_period'], name='cluster_order') + self.period_length = int(aggregation.hoursPerPeriod / aggregation.resolution) + self.cluster_order = xr.DataArray(aggregation.clusterOrder, dims=['cluster_period'], name='cluster_order') # Build segment assignment if segmentation is used - segment_assignment = None if aggregation.segmentation and hasattr(aggregation, 'segmentDurationDict'): n_clusters = aggregation.noTypicalPeriods segment_duration_dict = aggregation.segmentDurationDict['Segment Duration'] # Build (cluster, position) -> segment_id mapping - arr = np.zeros((n_clusters, period_length), dtype=np.int32) + arr = np.zeros((n_clusters, self.period_length), dtype=np.int32) for cluster_id in range(n_clusters): pos = 0 for seg_idx in range(aggregation.noSegments): @@ -601,12 +596,17 @@ def from_tsam( arr[cluster_id, pos : pos + duration] = seg_idx pos += duration - segment_assignment = xr.DataArray(arr, dims=['cluster', 'position'], name='segment_assignment') - - return cls(cluster_order, period_length, segment_assignment, skip_first_of_period) + self.segment_assignment = xr.DataArray(arr, dims=['cluster', 'position'], name='segment_assignment') def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get inter-cluster equation pairs (i, j) where var[i] == var[j].""" + """Get inter-cluster equation pairs (i, j) where var[i] == var[j]. + + Returns: + Tuple of (idx_i, idx_j) arrays of timestep indices to equate. + """ + if self.cluster_order is None or self.period_length is None: + raise ValueError('Clustering indices not set. Call populate_from_tsam() first or provide cluster_order.') + cluster_to_periods: dict[int, list[int]] = {} for period_idx, cluster_id in enumerate(self.cluster_order.values): cluster_to_periods.setdefault(int(cluster_id), []).append(period_idx) @@ -627,10 +627,17 @@ def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: return np.array(idx_i, dtype=np.int32), np.array(idx_j, dtype=np.int32) def get_segment_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get intra-segment equation pairs (i, j) where var[i] == var[j].""" + """Get intra-segment equation pairs (i, j) where var[i] == var[j]. + + Returns: + Tuple of (idx_i, idx_j) arrays of timestep indices to equate. + """ if self.segment_assignment is None: return np.array([], dtype=np.int32), np.array([], dtype=np.int32) + if self.cluster_order is None or self.period_length is None: + raise ValueError('Clustering indices not set. Call populate_from_tsam() first or provide cluster_order.') + idx_i, idx_j = [], [] seg_arr = self.segment_assignment.values # (cluster, position) @@ -662,22 +669,19 @@ def __init__( model: FlowSystemModel, clustering_parameters: ClusteringParameters, flow_system: FlowSystem, - clustering_indices: ClusteringIndices, components_to_clusterize: list[Component] | None = None, ): """ Args: model: The FlowSystemModel to add constraints to. - clustering_parameters: Parameters controlling clustering behavior. + clustering_parameters: Parameters controlling clustering behavior (must have indices populated). flow_system: The FlowSystem being optimized. - clustering_indices: Precomputed equation indices (from Clustering or user-provided). components_to_clusterize: Components to apply clustering to. If None, all components. """ super().__init__(model, label_of_element='Clustering', label_of_model='Clustering') self.flow_system = flow_system self.clustering_parameters = clustering_parameters self.components_to_clusterize = components_to_clusterize - self.clustering_indices = clustering_indices def do_modeling(self): """Create equality constraints for clustered time indices. @@ -687,6 +691,11 @@ def do_modeling(self): - status: binary on/off variables (individual constraints) - inside_piece: piecewise segment binaries (individual constraints) """ + if not self.clustering_parameters.has_indices: + raise ValueError( + 'ClusteringParameters must have indices populated. ' + 'Call populate_from_tsam() or provide cluster_order/period_length directly.' + ) components = self.components_to_clusterize or list(self.flow_system.components.values()) @@ -717,12 +726,12 @@ def do_modeling(self): if piece.inside_piece is not None: binary_vars[piece.inside_piece.name] = piece.inside_piece - # Create constraints from clustering indices - indices = self.clustering_indices + # Create constraints from clustering parameters + params = self.clustering_parameters for constraint_type, idx_pair in [ - ('cluster', indices.get_cluster_indices()), - ('segment', indices.get_segment_indices()), + ('cluster', params.get_cluster_indices()), + ('segment', params.get_segment_indices()), ]: if len(idx_pair[0]) == 0: continue diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 22be780f3..541be3ab3 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -629,11 +629,20 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: # Include clustering info if present if self._clustering_info is not None: - from .clustering import ClusteringIndices, ClusteringParameters + from .clustering import ClusteringParameters - # Serialize parameters using Interface pattern + # Ensure parameters have indices populated before saving params = self._clustering_info.get('parameters') if isinstance(params, ClusteringParameters): + # Populate indices from tsam if not already set + if not params.has_indices: + clustering_obj = self._clustering_info.get('clustering') + if clustering_obj is not None: + if isinstance(clustering_obj, dict): + clustering_obj = next(iter(clustering_obj.values())) + params.populate_from_tsam(clustering_obj.tsam) + + # Serialize parameters (now includes indices) using Interface pattern params_ref, params_arrays = params._create_reference_structure() ds.attrs['_clustering_params'] = json.dumps(params_ref) ds.update(params_arrays) @@ -643,24 +652,6 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: if components: ds.attrs['_clustering_components'] = json.dumps([c.label for c in components]) - # Serialize ClusteringIndices using Interface pattern - clustering_obj = self._clustering_info.get('clustering') - indices_dict = self._clustering_info.get('clustering_indices') - - if clustering_obj is not None: - if isinstance(clustering_obj, dict): - clustering_obj = next(iter(clustering_obj.values())) - indices = ClusteringIndices.from_tsam(clustering_obj.tsam) - elif indices_dict is not None: - indices = next(iter(indices_dict.values())) - else: - indices = None - - if indices is not None: - indices_ref, indices_arrays = indices._create_reference_structure() - ds.attrs['_clustering_indices'] = json.dumps(indices_ref) - ds.update(indices_arrays) - # Add version info ds.attrs['flixopt_version'] = __version__ @@ -756,20 +747,11 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: flow_system._carriers.add(carrier) # Restore clustering info if present (using Interface pattern) - if '_clustering_params' in reference_structure or '_clustering_indices' in reference_structure: - # Restore parameters - params = None - if '_clustering_params' in reference_structure: - params = cls._resolve_reference_structure( - json.loads(reference_structure['_clustering_params']), arrays_dict - ) - - # Restore ClusteringIndices using Interface pattern - indices = None - if '_clustering_indices' in reference_structure: - indices = cls._resolve_reference_structure( - json.loads(reference_structure['_clustering_indices']), arrays_dict - ) + if '_clustering_params' in reference_structure: + # Restore parameters (now includes indices via Interface pattern) + params = cls._resolve_reference_structure( + json.loads(reference_structure['_clustering_params']), arrays_dict + ) # Restore component references components_to_clusterize = None @@ -781,16 +763,15 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: flow_system._clustering_info = { 'parameters': params, - 'clustering_indices': {(None, None): indices} if indices else {}, 'components_to_clusterize': components_to_clusterize, 'restored_from_file': True, } - if indices: - n_cluster_periods = len(indices.cluster_order) - n_clusters = int(indices.cluster_order.max()) + 1 + if params.has_indices: + n_cluster_periods = len(params.cluster_order) + n_clusters = int(params.cluster_order.max()) + 1 logger.info( f'Restored clustering: {n_clusters} clusters, ' - f'{n_cluster_periods} periods, period_length={indices.period_length}.' + f'{n_cluster_periods} periods, period_length={params.period_length}.' ) # Reconnect network to populate bus inputs/outputs (not stored in NetCDF). @@ -1352,35 +1333,31 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: def _add_clustering_constraints(self) -> None: """Add clustering constraints to the model.""" - from .clustering import ClusteringIndices, ClusteringModel + from .clustering import ClusteringModel info = self._clustering_info or {} if 'parameters' not in info: raise KeyError('_clustering_info missing required key: "parameters"') - # Get or create ClusteringIndices - if 'clustering_indices' in info: - # Restored from file - use directly - indices_dict = info['clustering_indices'] - # Get the single ClusteringIndices (for now, only support single clustering) - clustering_indices = next(iter(indices_dict.values())) - elif 'clustering' in info: - # Fresh clustering - convert Clustering to ClusteringIndices via tsam + parameters = info['parameters'] + + # Populate indices from tsam if not already set + if not parameters.has_indices: + if 'clustering' not in info: + raise KeyError( + '_clustering_info missing "clustering" and parameters have no indices. ' + 'Either provide cluster_order/period_length or run transform.cluster() first.' + ) clustering_obj = info['clustering'] if isinstance(clustering_obj, dict): clustering_obj = next(iter(clustering_obj.values())) - clustering_indices = ClusteringIndices.from_tsam(clustering_obj.tsam) - else: - raise KeyError( - '_clustering_info missing required key: either "clustering" (fresh) or "clustering_indices" (restored)' - ) + parameters.populate_from_tsam(clustering_obj.tsam) clustering_model = ClusteringModel( model=self.model, - clustering_parameters=info['parameters'], + clustering_parameters=parameters, flow_system=self, - clustering_indices=clustering_indices, components_to_clusterize=info.get('components_to_clusterize'), ) clustering_model.do_modeling() diff --git a/flixopt/optimization.py b/flixopt/optimization.py index 80f1f2c50..68ff169d9 100644 --- a/flixopt/optimization.py +++ b/flixopt/optimization.py @@ -422,12 +422,10 @@ def do_modeling(self) -> ClusteredOptimization: self.model = self.flow_system.create_model(self.normalize_weights) self.model.do_modeling() # Add Clustering Submodel after modeling the rest - # Convert Clustering to ClusteringIndices via tsam - from .clustering import ClusteringIndices - - clustering_indices = ClusteringIndices.from_tsam(self.clustering.tsam) + # Populate clustering indices from tsam + self.clustering_parameters.populate_from_tsam(self.clustering.tsam) self.clustering_model = ClusteringModel( - self.model, self.clustering_parameters, self.flow_system, clustering_indices, self.components_to_clusterize + self.model, self.clustering_parameters, self.flow_system, self.components_to_clusterize ) self.clustering_model.do_modeling() self.durations['modeling'] = round(timeit.default_timer() - t_start, 2) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 2b3329182..40341b4c2 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -17,7 +17,7 @@ if TYPE_CHECKING: import numpy as np - from .clustering import ClusteringIndices, ClusteringParameters + from .clustering import ClusteringParameters from .flow_system import FlowSystem logger = logging.getLogger('flixopt') @@ -401,11 +401,10 @@ def _calculate_clustering_weights(ds) -> dict[str, float]: def add_clustering( self, - indices: ClusteringIndices, - parameters: ClusteringParameters | None = None, + parameters: ClusteringParameters, components_to_clusterize: list | None = None, ) -> FlowSystem: - """Add clustering constraints using externally computed indices. + """Add clustering constraints using ClusteringParameters with pre-set indices. This method allows applying clustering to a FlowSystem using indices computed outside of flixopt. This is useful when: @@ -414,15 +413,12 @@ def add_clustering( - You want to reuse clustering results across multiple FlowSystems The clustering indices define equality constraints that equate variable values - at specific timestep pairs. For example, if indices specify (10, 50), then - for all clustered variables: var[10] == var[50]. + at specific timestep pairs. The parameters must have `cluster_order` and + `period_length` set (either directly or via `populate_from_tsam()`). Args: - indices: ClusteringIndices object with precomputed equation indices. - Use ClusteringIndices.from_tsam() to create from tsam results. - parameters: Optional ClusteringParameters. If None, default parameters - are created (no flexibility, include storage). Required parameters - like n_clusters and cluster_duration are only used for metadata. + parameters: ClusteringParameters with clustering indices set. + Must have `cluster_order` and `period_length` populated. components_to_clusterize: Components to apply clustering to. If None, all components are clustered. @@ -446,35 +442,37 @@ def add_clustering( >>> aggregation = tsam.TimeSeriesAggregation(subset_df, noTypicalPeriods=8, hoursPerPeriod=24) >>> aggregation.createTypicalPeriods() >>> - >>> # Convert to ClusteringIndices - >>> from flixopt.clustering import ClusteringIndices - >>> indices = ClusteringIndices.from_tsam(aggregation) + >>> # Create parameters and populate from tsam + >>> params = fx.ClusteringParameters(n_clusters=8, cluster_duration='1D') + >>> params.populate_from_tsam(aggregation) >>> >>> # Apply to FlowSystem - >>> clustered_fs = flow_system.transform.add_clustering(indices) + >>> clustered_fs = flow_system.transform.add_clustering(params) >>> clustered_fs.optimize(solver) - With custom parameters: + With pre-computed cluster assignments: - >>> from flixopt.clustering import ClusteringParameters, ClusteringIndices - >>> params = ClusteringParameters( + >>> import xarray as xr + >>> params = fx.ClusteringParameters( ... n_clusters=8, ... cluster_duration='1D', + ... cluster_order=xr.DataArray([0, 1, 2, 0, 1, 2, 0, 1], dims=['cluster_period']), + ... period_length=24, ... flexibility_percent=5, # Allow 5% binary deviation ... ) - >>> clustered_fs = flow_system.transform.add_clustering(indices, parameters=params) + >>> clustered_fs = flow_system.transform.add_clustering(params) """ - from .clustering import ClusteringIndices, ClusteringParameters + from .clustering import ClusteringParameters - # Validate indices type - if not isinstance(indices, ClusteringIndices): - raise TypeError(f'indices must be ClusteringIndices, got {type(indices).__name__}') + # Validate parameters type + if not isinstance(parameters, ClusteringParameters): + raise TypeError(f'parameters must be ClusteringParameters, got {type(parameters).__name__}') - # Create default parameters if not provided - if parameters is None: - parameters = ClusteringParameters( - n_clusters=None, # Unknown when using external indices - cluster_duration=1.0, # Placeholder - not used for constraint generation + # Validate that indices are set + if not parameters.has_indices: + raise ValueError( + 'ClusteringParameters must have indices set. ' + 'Either provide cluster_order/period_length directly or call populate_from_tsam().' ) # Create a copy of the FlowSystem to avoid modifying the original @@ -483,7 +481,6 @@ def add_clustering( # Store clustering info clustered_fs._clustering_info = { 'parameters': parameters, - 'clustering_indices': {(None, None): indices}, # Single clustering 'components_to_clusterize': components_to_clusterize, } From f25383252bc4145e8073f5b037b830a625c2f471 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 12:04:37 +0100 Subject: [PATCH 035/191] Add external tsam support with data aggregation - Add tsam_aggregation parameter to ClusteringParameters for passing pre-computed tsam objects from external clustering - Update add_clustering() to aggregate FlowSystem data using the external tsam's cluster assignments - Auto-populate cluster indices when tsam_aggregation is provided - Add notebook 08d-external-clustering.ipynb demonstrating: - Built-in clustering via transform.cluster() - External tsam with data aggregation - Custom cluster indices (binary-only mode) --- docs/notebooks/08d-external-clustering.ipynb | 406 +++++++++++++++++++ flixopt/clustering.py | 7 + flixopt/transform_accessor.py | 68 +++- 3 files changed, 478 insertions(+), 3 deletions(-) create mode 100644 docs/notebooks/08d-external-clustering.ipynb diff --git a/docs/notebooks/08d-external-clustering.ipynb b/docs/notebooks/08d-external-clustering.ipynb new file mode 100644 index 000000000..351f15210 --- /dev/null +++ b/docs/notebooks/08d-external-clustering.ipynb @@ -0,0 +1,406 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# External Clustering\n", + "\n", + "This notebook demonstrates different ways to apply clustering to a FlowSystem:\n", + "\n", + "1. **Built-in clustering** - Let flixopt handle everything via `transform.cluster()`\n", + "2. **External tsam** - Run tsam yourself on a data subset and pass results to flixopt\n", + "3. **Custom indices** - Provide your own cluster assignments directly\n", + "\n", + "The latter two options are useful when:\n", + "- You want to cluster on a subset of time series (faster tsam computation)\n", + "- You have custom clustering algorithms\n", + "- You want to reuse clustering results across multiple FlowSystems" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import xarray as xr\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Load a Pre-built FlowSystem\n", + "\n", + "We'll use the district heating system from the data directory." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "\n", + "# Generate example data if not present\n", + "data_file = Path('data/district_heating_system.nc4')\n", + "if not data_file.exists():\n", + " from data.generate_example_systems import create_district_heating_system\n", + "\n", + " fs = create_district_heating_system()\n", + " fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n", + " fs.to_netcdf(data_file, overwrite=True)\n", + "\n", + "# Load the FlowSystem\n", + "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "print(f'Loaded FlowSystem: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) / 96:.0f} days)')\n", + "print(f'Components: {list(flow_system.components.keys())}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Extract key time series from the FlowSystem for later use\n", + "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", + "elec_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", + "\n", + "print(f'Heat demand shape: {heat_demand.shape}')\n", + "print(f'Electricity price shape: {elec_price.shape}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Baseline: solve without clustering\n", + "solver = fx.solvers.HighsSolver(mip_gap=0.01, log_to_console=False)\n", + "fs_baseline = flow_system.copy()\n", + "fs_baseline.optimize(solver)\n", + "print(f'Baseline cost (no clustering): {fs_baseline.solution[\"costs\"].item():,.0f} €')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Option 1: Built-in Clustering\n", + "\n", + "The simplest approach - let flixopt handle clustering internally using tsam.\n", + "This extracts ALL time series from the FlowSystem and clusters on them." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create clustered system using built-in method\n", + "fs_builtin = flow_system.transform.cluster(\n", + " n_clusters=8, # Find 8 typical days\n", + " cluster_duration='1D',\n", + ")\n", + "\n", + "fs_builtin.optimize(solver)\n", + "print(f'Built-in clustering cost: {fs_builtin.solution[\"costs\"].item():,.0f} €')\n", + "\n", + "# Access the clustering parameters\n", + "params = fs_builtin._clustering_info['parameters']\n", + "print(f'\\nCluster assignments: {params.cluster_order.values}')\n", + "print(f'Period length: {params.period_length} timesteps')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Option 2: External tsam on Data Subset\n", + "\n", + "Run tsam yourself on a **subset** of time series data, then pass results to flixopt.\n", + "\n", + "This is useful when:\n", + "- You only want to cluster based on the most important time series (faster tsam)\n", + "- You want more control over tsam parameters\n", + "- You want to reuse the same clustering for multiple FlowSystems" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tsam.timeseriesaggregation as tsam\n", + "\n", + "# Create DataFrame with only the KEY time series\n", + "# (Much faster than letting flixopt extract ALL time series)\n", + "clustering_data = pd.DataFrame(\n", + " {\n", + " 'heat_demand': heat_demand.values,\n", + " 'elec_price': elec_price.values,\n", + " },\n", + " index=flow_system.timesteps,\n", + ")\n", + "\n", + "print(f'Clustering on {len(clustering_data.columns)} time series (subset of FlowSystem data)')\n", + "print(f'Columns: {list(clustering_data.columns)}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run tsam with custom parameters\n", + "aggregation = tsam.TimeSeriesAggregation(\n", + " clustering_data,\n", + " noTypicalPeriods=8,\n", + " hoursPerPeriod=24,\n", + " resolution=0.25, # 15-min resolution\n", + " clusterMethod='hierarchical',\n", + ")\n", + "aggregation.createTypicalPeriods()\n", + "\n", + "print(f'tsam cluster order: {aggregation.clusterOrder}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create ClusteringParameters with the external tsam aggregation\n", + "# This allows flixopt to use the tsam results to aggregate ALL FlowSystem data\n", + "params_external = fx.ClusteringParameters(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " tsam_aggregation=aggregation, # Pass the tsam object for data aggregation\n", + ")\n", + "\n", + "print(f'Indices populated: {params_external.has_indices}')\n", + "print(f'Cluster order: {params_external.cluster_order.values}')\n", + "print(f'Period length: {params_external.period_length}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Apply to FlowSystem using add_clustering()\n", + "fs_external = flow_system.transform.add_clustering(params_external)\n", + "\n", + "fs_external.optimize(solver)\n", + "print(f'External tsam clustering cost: {fs_external.solution[\"costs\"].item():,.0f} €')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Option 3: Custom Indices\n", + "\n", + "Provide your own cluster assignments directly - no tsam required.\n", + "\n", + "This is useful when:\n", + "- You have a custom clustering algorithm\n", + "- You want to manually define typical periods (e.g., weekdays vs weekends)\n", + "- You're loading clustering results from another source" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Define custom cluster assignments based on day of week\n", + "# We have 31 days, let's group by weekday pattern\n", + "n_days = len(flow_system.timesteps) // 96 # 96 timesteps per day (15-min)\n", + "print(f'Number of days: {n_days}')\n", + "\n", + "# Simple pattern: group every 4th day together\n", + "custom_cluster_order = [i % 8 for i in range(n_days)]\n", + "\n", + "# Note: With custom indices (no tsam object), we use aggregate_data=False\n", + "# because we don't have a tsam to transform the data. This only equalizes\n", + "# binary (on/off) decisions across similar periods.\n", + "params_custom = fx.ClusteringParameters(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " aggregate_data=False, # No tsam available for data transformation\n", + " # Provide indices directly\n", + " cluster_order=xr.DataArray(custom_cluster_order, dims=['cluster_period'], name='cluster_order'),\n", + " period_length=96, # 96 timesteps per day (15-min resolution)\n", + ")\n", + "\n", + "print(f'Custom indices set: {params_custom.has_indices}')\n", + "print(f'Cluster order: {params_custom.cluster_order.values}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Apply to FlowSystem\n", + "fs_custom = flow_system.transform.add_clustering(params_custom)\n", + "\n", + "fs_custom.optimize(solver)\n", + "print(f'Custom clustering cost: {fs_custom.solution[\"costs\"].item():,.0f} €')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Comparison" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = pd.DataFrame(\n", + " {\n", + " 'Method': ['Baseline (no clustering)', 'Built-in clustering', 'External tsam (subset)', 'Custom indices'],\n", + " 'Cost [€]': [\n", + " fs_baseline.solution['costs'].item(),\n", + " fs_builtin.solution['costs'].item(),\n", + " fs_external.solution['costs'].item(),\n", + " fs_custom.solution['costs'].item(),\n", + " ],\n", + " }\n", + ").set_index('Method')\n", + "\n", + "results['Gap vs Baseline [%]'] = (results['Cost [€]'] / results.loc['Baseline (no clustering)', 'Cost [€]'] - 1) * 100\n", + "results.style.format({'Cost [€]': '{:,.0f}', 'Gap vs Baseline [%]': '{:.2f}'})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## IO: Save and Reload\n", + "\n", + "Clustering indices are automatically saved with the FlowSystem and restored on load." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tempfile\n", + "\n", + "# Save clustered FlowSystem\n", + "with tempfile.TemporaryDirectory() as tmpdir:\n", + " path = Path(tmpdir) / 'clustered_system.nc4'\n", + " fs_external.to_netcdf(path)\n", + " print(f'Saved to: {path}')\n", + "\n", + " # Reload\n", + " fs_loaded = fx.FlowSystem.from_netcdf(path)\n", + "\n", + " # Check clustering was restored\n", + " params_loaded = fs_loaded._clustering_info['parameters']\n", + " print('\\nRestored clustering:')\n", + " print(f' has_indices: {params_loaded.has_indices}')\n", + " print(f' cluster_order: {params_loaded.cluster_order.values}')\n", + " print(f' period_length: {params_loaded.period_length}')\n", + "\n", + " # Solve reloaded system\n", + " fs_loaded.optimize(solver)\n", + " print(f'\\nReloaded cost: {fs_loaded.solution[\"costs\"].item():,.0f} €')\n", + " print(f'Original cost: {fs_external.solution[\"costs\"].item():,.0f} €')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Advanced: Segmentation with External tsam\n", + "\n", + "You can also provide segment assignments for intra-period aggregation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Run tsam with segmentation on the data subset\n", + "aggregation_seg = tsam.TimeSeriesAggregation(\n", + " clustering_data,\n", + " noTypicalPeriods=8,\n", + " hoursPerPeriod=24,\n", + " resolution=0.25,\n", + " segmentation=True,\n", + " noSegments=12, # 12 segments per day (~2 hours each)\n", + ")\n", + "aggregation_seg.createTypicalPeriods()\n", + "\n", + "# Create parameters with segmentation and tsam for data aggregation\n", + "params_seg = fx.ClusteringParameters(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " n_segments=12,\n", + " tsam_aggregation=aggregation_seg, # Pass tsam for data aggregation\n", + ")\n", + "\n", + "print(f'Segment assignment shape: {params_seg.segment_assignment.shape}')\n", + "print(f'Segment assignment for cluster 0:\\n{params_seg.segment_assignment.sel(cluster=0).values}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Apply segmented clustering\n", + "fs_segmented = flow_system.transform.add_clustering(params_seg)\n", + "fs_segmented.optimize(solver)\n", + "print(f'Segmented clustering cost: {fs_segmented.solution[\"costs\"].item():,.0f} €')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": "## Summary\n\n| Method | Data Aggregation | When to Use |\n|--------|------------------|-------------|\n| `transform.cluster()` | Yes | Default - let flixopt handle everything |\n| `tsam_aggregation=...` | Yes | External tsam on data subset, with data aggregation |\n| Direct `cluster_order` | No | Custom algorithms or manual period grouping (binary only) |\n\nAll methods use `ClusteringParameters` which stores:\n- `cluster_order`: Which cluster each period belongs to\n- `period_length`: Timesteps per period\n- `segment_assignment`: (optional) Segment IDs within each cluster\n- `tsam_aggregation`: (optional) tsam object for data transformation" + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/flixopt/clustering.py b/flixopt/clustering.py index baa1b2a17..e5323e152 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -501,6 +501,8 @@ def __init__( period_length: int | None = None, segment_assignment: xr.DataArray | None = None, skip_first_of_period: bool = True, + # External tsam aggregation for data transformation + tsam_aggregation: tsam.TimeSeriesAggregation | None = None, ): import xarray as xr @@ -515,6 +517,7 @@ def __init__( self.time_series_for_high_peaks: list[TimeSeriesData] = time_series_for_high_peaks or [] self.time_series_for_low_peaks: list[TimeSeriesData] = time_series_for_low_peaks or [] self.skip_first_of_period = skip_first_of_period + self.tsam_aggregation = tsam_aggregation # Not serialized - runtime only # Clustering indices - ensure DataArrays have names for IO if cluster_order is not None: @@ -543,6 +546,10 @@ def __init__( else: self.segment_assignment = None + # Auto-populate indices from tsam if provided + if tsam_aggregation is not None and not self.has_indices: + self.populate_from_tsam(tsam_aggregation) + @property def has_indices(self) -> bool: """Whether clustering indices have been computed/provided.""" diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 40341b4c2..d6a44a6a9 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -463,6 +463,7 @@ def add_clustering( >>> clustered_fs = flow_system.transform.add_clustering(params) """ from .clustering import ClusteringParameters + from .core import DataConverter, TimeSeriesData # Validate parameters type if not isinstance(parameters, ClusteringParameters): @@ -472,11 +473,72 @@ def add_clustering( if not parameters.has_indices: raise ValueError( 'ClusteringParameters must have indices set. ' - 'Either provide cluster_order/period_length directly or call populate_from_tsam().' + 'Either provide cluster_order/period_length directly, pass tsam_aggregation, or call populate_from_tsam().' ) - # Create a copy of the FlowSystem to avoid modifying the original - clustered_fs = self._fs.copy() + # Aggregate data if tsam_aggregation is provided and aggregate_data=True + if parameters.aggregate_data and parameters.tsam_aggregation is not None: + ds = self._fs.to_dataset() + tsam_agg = parameters.tsam_aggregation + + # Get aggregated data from tsam (this is pre-computed for the subset that was clustered) + aggregated_df = tsam_agg.predictOriginalData() + + # For variables not in the clustering subset, compute aggregation manually + # using the cluster assignments + period_length = parameters.period_length + cluster_order = parameters.cluster_order.values + n_timesteps = len(self._fs.timesteps) + + for name in ds.data_vars: + da = ds[name] + if 'time' not in da.dims: + continue + + if name in aggregated_df.columns: + # Use tsam's aggregated result for columns that were clustered + series = aggregated_df[name] + da_new = DataConverter.to_dataarray(series, self._fs.coords).rename(name).assign_attrs(da.attrs) + else: + # Manually aggregate using cluster assignments + # For each timestep, replace with mean of corresponding timesteps in same cluster + import numpy as np + + values = da.values.copy() + aggregated_values = np.zeros_like(values) + + # Build mapping: for each cluster, collect all timestep indices + n_clusters = int(cluster_order.max()) + 1 + cluster_to_timesteps: dict[int, list[int]] = {c: [] for c in range(n_clusters)} + for period_idx, cluster_id in enumerate(cluster_order): + for pos in range(period_length): + ts_idx = period_idx * period_length + pos + if ts_idx < n_timesteps: + cluster_to_timesteps[int(cluster_id)].append((ts_idx, pos)) + + # For each cluster, compute mean for each position + for _cluster_id, ts_list in cluster_to_timesteps.items(): + # Group by position within period + position_values: dict[int, list] = {} + for ts_idx, pos in ts_list: + position_values.setdefault(pos, []).append(values[ts_idx]) + + # Compute mean for each position and assign back + for ts_idx, pos in ts_list: + aggregated_values[ts_idx] = np.mean(position_values[pos]) + + da_new = da.copy(data=aggregated_values) + + if TimeSeriesData.is_timeseries_data(da_new): + da_new = TimeSeriesData.from_dataarray(da_new) + ds[name] = da_new + + from .flow_system import FlowSystem + + clustered_fs = FlowSystem.from_dataset(ds) + else: + # No data aggregation - just copy + clustered_fs = self._fs.copy() # Store clustering info clustered_fs._clustering_info = { From 088bf4dd796851b8d22473113529c2d0f13d80f2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 12:24:17 +0100 Subject: [PATCH 036/191] fix: Multi period clustering: Each period now gets its own clustering constraints based on that period's actual cluster assignments, rather than using a single set of indices for all periods. --- flixopt/clustering.py | 22 +++++++++++++- flixopt/flow_system.py | 67 +++++++++++++++++++++++++++++------------- 2 files changed, 68 insertions(+), 21 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index e5323e152..e67a1a134 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -677,6 +677,8 @@ def __init__( clustering_parameters: ClusteringParameters, flow_system: FlowSystem, components_to_clusterize: list[Component] | None = None, + period_selector: int | str | None = None, + scenario_selector: str | None = None, ): """ Args: @@ -684,11 +686,22 @@ def __init__( clustering_parameters: Parameters controlling clustering behavior (must have indices populated). flow_system: The FlowSystem being optimized. components_to_clusterize: Components to apply clustering to. If None, all components. + period_selector: If provided, only add constraints for this period (for multi-period FlowSystems). + scenario_selector: If provided, only add constraints for this scenario (for multi-scenario FlowSystems). """ - super().__init__(model, label_of_element='Clustering', label_of_model='Clustering') + # Include period/scenario in label for multi-dimensional cases + label_suffix = '' + if period_selector is not None: + label_suffix += f'|{period_selector}' + if scenario_selector is not None: + label_suffix += f'|{scenario_selector}' + + super().__init__(model, label_of_element='Clustering', label_of_model=f'Clustering{label_suffix}') self.flow_system = flow_system self.clustering_parameters = clustering_parameters self.components_to_clusterize = components_to_clusterize + self.period_selector = period_selector + self.scenario_selector = scenario_selector def do_modeling(self): """Create equality constraints for clustered time indices. @@ -782,6 +795,13 @@ def _add_equality_constraint( if 'time' not in var.dims: continue + # For multi-period/scenario, select only the relevant slice + # Each period/scenario has its own clustering indices + if self.period_selector is not None and 'period' in var.dims: + var = var.sel(period=self.period_selector) + if self.scenario_selector is not None and 'scenario' in var.dims: + var = var.sel(scenario=self.scenario_selector) + # Compute difference: var[idx_i] - var[idx_j] diff = var.isel(time=idx_i) - var.isel(time=idx_j) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index fedfa7718..5d997c299 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -1333,6 +1333,8 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: def _add_clustering_constraints(self) -> None: """Add clustering constraints to the model.""" + import copy + from .clustering import ClusteringModel info = self._clustering_info or {} @@ -1340,27 +1342,52 @@ def _add_clustering_constraints(self) -> None: if 'parameters' not in info: raise KeyError('_clustering_info missing required key: "parameters"') - parameters = info['parameters'] - - # Populate indices from tsam if not already set - if not parameters.has_indices: - if 'clustering' not in info: - raise KeyError( - '_clustering_info missing "clustering" and parameters have no indices. ' - 'Either provide cluster_order/period_length or run transform.cluster() first.' + base_parameters = info['parameters'] + clustering_obj = info.get('clustering') + + # Check if this is a multi-period/scenario clustering + is_multi_dimensional = isinstance(clustering_obj, dict) and len(clustering_obj) > 1 + + if is_multi_dimensional: + # For multi-period/scenario, create separate constraints for each combination + # Each (period, scenario) has its own clustering with different cluster assignments + for (period_label, scenario_label), clustering in clustering_obj.items(): + # Create a copy of parameters with this period's indices + params_copy = copy.copy(base_parameters) + params_copy.populate_from_tsam(clustering.tsam) + + # Determine period/scenario selector + period_selector = period_label if period_label is not None else None + scenario_selector = scenario_label if scenario_label is not None else None + + clustering_model = ClusteringModel( + model=self.model, + clustering_parameters=params_copy, + flow_system=self, + components_to_clusterize=info.get('components_to_clusterize'), + period_selector=period_selector, + scenario_selector=scenario_selector, ) - clustering_obj = info['clustering'] - if isinstance(clustering_obj, dict): - clustering_obj = next(iter(clustering_obj.values())) - parameters.populate_from_tsam(clustering_obj.tsam) - - clustering_model = ClusteringModel( - model=self.model, - clustering_parameters=parameters, - flow_system=self, - components_to_clusterize=info.get('components_to_clusterize'), - ) - clustering_model.do_modeling() + clustering_model.do_modeling() + else: + # Single dimension - use original logic + if not base_parameters.has_indices: + if clustering_obj is None: + raise KeyError( + '_clustering_info missing "clustering" and parameters have no indices. ' + 'Either provide cluster_order/period_length or run transform.cluster() first.' + ) + if isinstance(clustering_obj, dict): + clustering_obj = next(iter(clustering_obj.values())) + base_parameters.populate_from_tsam(clustering_obj.tsam) + + clustering_model = ClusteringModel( + model=self.model, + clustering_parameters=base_parameters, + flow_system=self, + components_to_clusterize=info.get('components_to_clusterize'), + ) + clustering_model.do_modeling() def solve(self, solver: _Solver) -> FlowSystem: """ From 189b7d0c2c9e1a7ac7cdd7341b80ed87cdfca3d5 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 14:25:08 +0100 Subject: [PATCH 037/191] Temp --- flixopt/clustering.py | 151 ++++++++++++++++++++++++++ flixopt/features.py | 7 +- flixopt/flow_system.py | 62 +++++++++++ flixopt/transform_accessor.py | 199 +++++++++++++++++++++++++++++++++- 4 files changed, 415 insertions(+), 4 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index e67a1a134..f9d96924e 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -861,3 +861,154 @@ def _add_flexibility_penalty(self): expressions={PENALTY_EFFECT_LABEL: (var * penalty).sum(sum_dim)}, target='periodic', ) + + +class TypicalPeriodsModel(Submodel): + """Model that adds storage inter-period linking for typical periods optimization. + + When using cluster_reduce(), timesteps are reduced to only typical (representative) + periods. This model creates variables and constraints to track storage state + across the full original time horizon using boundary state variables. + + The approach: + 1. Create SOC_boundary[d] for each original period d (0 to n_original_periods) + 2. Compute delta_SOC[c] for each typical period c (change in SOC during period) + 3. Link: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + 4. Optionally enforce cyclic: SOC_boundary[0] = SOC_boundary[n_original_periods] + + This allows the optimizer to properly value storage for long-term (seasonal) + patterns while only solving for the typical period timesteps. + """ + + def __init__( + self, + model: FlowSystemModel, + flow_system: FlowSystem, + cluster_order: np.ndarray | list, + cluster_occurrences: dict[int, int], + nr_of_typical_periods: int, + timesteps_per_period: int, + storage_cyclic: bool = True, + ): + """ + Args: + model: The FlowSystemModel to add constraints to. + flow_system: The FlowSystem being optimized. + cluster_order: Array indicating which typical period (cluster) each original + period belongs to. Length = n_original_periods. + cluster_occurrences: Dict mapping cluster_id to number of original periods + it represents. + nr_of_typical_periods: Number of typical (representative) periods. + timesteps_per_period: Number of timesteps in each period. + storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. + """ + super().__init__(model, label_of_element='TypicalPeriods', label_of_model='TypicalPeriods') + self.flow_system = flow_system + self.cluster_order = np.array(cluster_order) + self.cluster_occurrences = cluster_occurrences + self.nr_of_typical_periods = nr_of_typical_periods + self.timesteps_per_period = timesteps_per_period + self.storage_cyclic = storage_cyclic + self.n_original_periods = len(self.cluster_order) + + def do_modeling(self): + """Create SOC boundary variables and inter-period linking constraints. + + For each storage: + - SOC_boundary[d]: State of charge at start of original period d + - delta_SOC[c]: Change in SOC during typical period c + - Linking: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + """ + + storages = list(self.flow_system.storages.values()) + if not storages: + logger.info('No storages found - skipping inter-period linking') + return + + logger.info( + f'Adding inter-period storage linking for {len(storages)} storages ' + f'({self.n_original_periods} original periods, {self.nr_of_typical_periods} typical)' + ) + + for storage in storages: + self._add_storage_linking(storage) + + def _add_storage_linking(self, storage) -> None: + """Add inter-period linking constraints for a single storage. + + Args: + storage: Storage component to add linking for. + """ + import xarray as xr + + label = storage.label + + # Get the charge state variable from the storage's submodel + charge_state_name = f'{label}|charge_state' + if charge_state_name not in storage.submodel.variables: + logger.warning(f'Storage {label} has no charge_state variable - skipping') + return + + charge_state = storage.submodel.variables[charge_state_name] + + # Get storage capacity bounds + capacity = storage.capacity_in_flow_hours + if hasattr(capacity, 'fixed_size') and capacity.fixed_size is not None: + cap_value = capacity.fixed_size + elif hasattr(capacity, 'maximum') and capacity.maximum is not None: + cap_value = float(capacity.maximum.max().item()) if hasattr(capacity.maximum, 'max') else capacity.maximum + else: + cap_value = 1e9 # Large default + + # Create SOC_boundary variables for each original period boundary + # We need n_original_periods + 1 boundaries (start of first period through end of last) + n_boundaries = self.n_original_periods + 1 + boundary_coords = [np.arange(n_boundaries)] + boundary_dims = ['period_boundary'] + + # Bounds: 0 <= SOC_boundary <= capacity + lb = xr.DataArray(0.0, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) + ub = xr.DataArray(cap_value, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) + + soc_boundary = self.add_variables( + lower=lb, + upper=ub, + coords=boundary_coords, + dims=boundary_dims, + short_name=f'SOC_boundary|{label}', + ) + + # Compute delta_SOC for each typical period + # delta_SOC[c] = charge_state[c, end] - charge_state[c, start] + delta_soc_list = [] + for c in range(self.nr_of_typical_periods): + # Get start and end timestep indices for this typical period + start_idx = c * self.timesteps_per_period + end_idx = (c + 1) * self.timesteps_per_period # charge_state has extra timestep at end + + # charge_state at end - charge_state at start of typical period c + # Note: charge_state is indexed by time with extra timestep + delta = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) + delta_soc_list.append(delta) + + # Stack into array indexed by typical_period + delta_soc = xr.concat(delta_soc_list, dim='typical_period') + delta_soc = delta_soc.assign_coords(typical_period=np.arange(self.nr_of_typical_periods)) + + # Create linking constraints: + # SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + for d in range(self.n_original_periods): + c = int(self.cluster_order[d]) # Which typical period this original period maps to + lhs = ( + soc_boundary.isel(period_boundary=d + 1) + - soc_boundary.isel(period_boundary=d) + - delta_soc.isel(typical_period=c) + ) + self.add_constraints(lhs == 0, short_name=f'inter_period_link|{label}|{d}') + + # Cyclic constraint: SOC_boundary[0] = SOC_boundary[end] + if self.storage_cyclic: + lhs = soc_boundary.isel(period_boundary=0) - soc_boundary.isel(period_boundary=self.n_original_periods) + self.add_constraints(lhs == 0, short_name=f'cyclic|{label}') + + logger.debug(f'Added inter-period linking for storage {label}') diff --git a/flixopt/features.py b/flixopt/features.py index 4dfe48964..ef1d1e4c8 100644 --- a/flixopt/features.py +++ b/flixopt/features.py @@ -620,8 +620,11 @@ def _do_modeling(self): self._eq_total_per_timestep = self.add_constraints(self.total_per_timestep == 0, short_name='per_timestep') - # Add it to the total - self._eq_total.lhs -= self.total_per_timestep.sum(dim='time') + # Add it to the total (apply timestep weights if available for typical periods) + if hasattr(self._model, 'timestep_weights') and self._model.timestep_weights is not None: + self._eq_total.lhs -= (self.total_per_timestep * self._model.timestep_weights).sum(dim='time') + else: + self._eq_total.lhs -= self.total_per_timestep.sum(dim='time') def add_share( self, diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 5d997c299..7abf3f55c 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -219,6 +219,9 @@ def __init__( # Clustering info - populated by transform.cluster() self._clustering_info: dict | None = None + # Typical periods info - populated by transform.cluster_reduce() + self._typical_periods_info: dict | None = None + # Statistics accessor cache - lazily initialized, invalidated on new solution self._statistics: StatisticsAccessor | None = None @@ -1306,6 +1309,7 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: 1. Connecting and transforming all elements (if not already done) 2. Creating the FlowSystemModel with all variables and constraints 3. Adding clustering constraints (if this is a clustered FlowSystem) + 4. Adding typical periods modeling (if this is a reduced FlowSystem) After calling this method, `self.model` will be available for inspection before solving. @@ -1323,14 +1327,72 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: """ self.connect_and_transform() self.create_model(normalize_weights) + + # Apply timestep weighting before do_modeling() for typical periods + if self._typical_periods_info is not None: + self._apply_timestep_weights() + self.model.do_modeling() # Add clustering constraints if this is a clustered FlowSystem if self._clustering_info is not None: self._add_clustering_constraints() + # Add typical periods storage modeling if this is a reduced FlowSystem + if self._typical_periods_info is not None: + self._add_typical_periods_modeling() + return self + def _apply_timestep_weights(self) -> None: + """Apply timestep weights to the model for typical periods optimization. + + This multiplies operational effects (costs, emissions) by the number of + original periods each typical period represents. + """ + info = self._typical_periods_info + if info is None: + return + + timestep_weights = info['timestep_weights'] + + # Store timestep weights on the model for use in effect calculations + self.model.timestep_weights = xr.DataArray( + timestep_weights, + coords={'time': self.timesteps}, + dims=['time'], + name='timestep_weights', + ) + logger.info(f'Applied timestep weights for typical periods: sum={sum(timestep_weights)}') + + def _add_typical_periods_modeling(self) -> None: + """Add storage inter-period linking for typical periods optimization. + + Creates SOC_boundary variables that link storage states between sequential + periods in the original time series, using the delta SOC from typical periods. + """ + from .clustering import TypicalPeriodsModel + + info = self._typical_periods_info + if info is None: + return + + if not info.get('storage_inter_period_linking', True): + logger.info('Storage inter-period linking disabled') + return + + # Create typical periods model for storage linking + typical_periods_model = TypicalPeriodsModel( + model=self.model, + flow_system=self, + cluster_order=info['cluster_order'], + cluster_occurrences=info['cluster_occurrences'], + nr_of_typical_periods=info['nr_of_typical_periods'], + timesteps_per_period=info['timesteps_per_period'], + storage_cyclic=info.get('storage_cyclic', True), + ) + typical_periods_model.do_modeling() + def _add_clustering_constraints(self) -> None: """Add clustering constraints to the model.""" import copy diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index d6a44a6a9..27286794f 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -11,12 +11,11 @@ from collections import defaultdict from typing import TYPE_CHECKING, Any, Literal +import numpy as np import pandas as pd import xarray as xr if TYPE_CHECKING: - import numpy as np - from .clustering import ClusteringParameters from .flow_system import FlowSystem @@ -1045,6 +1044,202 @@ def fix_sizes( return new_fs + def cluster_reduce( + self, + n_typical_periods: int, + period_duration: str | float, + weights: dict[str, float] | None = None, + time_series_for_high_peaks: list[str] | None = None, + time_series_for_low_peaks: list[str] | None = None, + storage_inter_period_linking: bool = True, + storage_cyclic: bool = True, + ) -> FlowSystem: + """ + Create a FlowSystem with reduced timesteps using typical periods. + + This method creates a new FlowSystem optimized for sizing studies by reducing + the number of timesteps to only the typical (representative) periods identified + through time series aggregation. Unlike `cluster()` which uses equality constraints, + this method actually reduces the problem size for faster solving. + + The method: + 1. Performs time series clustering using tsam + 2. Extracts only the typical periods (not all original timesteps) + 3. Applies timestep weighting for accurate cost representation + 4. Optionally links storage states between periods via boundary variables + + Use this for initial sizing optimization, then use `fix_sizes()` to re-optimize + at full resolution for accurate dispatch results. + + Args: + n_typical_periods: Number of typical periods to extract (e.g., 8 typical days). + period_duration: Duration of each period. Can be a pandas-style string + ('1D', '24h', '6h') or a numeric value in hours. + weights: Optional clustering weights per time series. Keys are time series labels. + time_series_for_high_peaks: Time series labels for explicitly selecting high-value + periods. **Recommended** for demand time series to capture peak demand days. + time_series_for_low_peaks: Time series labels for explicitly selecting low-value periods. + storage_inter_period_linking: If True, link storage states between periods using + boundary variables. This preserves long-term storage behavior. Default: True. + storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end] for storages. + Only used when storage_inter_period_linking=True. Default: True. + + Returns: + A new FlowSystem with reduced timesteps (only typical periods). + The FlowSystem has metadata stored in `_typical_periods_info` for weighting. + + Raises: + ValueError: If timestep sizes are inconsistent. + ValueError: If period_duration is not a multiple of timestep size. + + Examples: + Two-stage sizing optimization: + + >>> # Stage 1: Size with reduced timesteps (fast) + >>> fs_sizing = flow_system.transform.cluster_reduce( + ... n_typical_periods=8, + ... period_duration='1D', + ... time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'], + ... ) + >>> fs_sizing.optimize(solver) + >>> + >>> # Apply safety margin (typical periods may smooth peaks) + >>> sizes_with_margin = { + ... name: float(size.item()) * 1.05 for name, size in fs_sizing.statistics.sizes.items() + ... } + >>> + >>> # Stage 2: Fix sizes and re-optimize at full resolution + >>> fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin) + >>> fs_dispatch.optimize(solver) + + Note: + - This is best suited for initial sizing, not final dispatch optimization + - Use `time_series_for_high_peaks` to ensure peak demand periods are captured + - A 5-10% safety margin on sizes is recommended for the dispatch stage + - Storage linking adds SOC_boundary variables to track state between periods + """ + import pandas as pd + + from .clustering import Clustering + from .core import DataConverter, TimeSeriesData, drop_constant_arrays + from .flow_system import FlowSystem + + # Parse period_duration to hours + if isinstance(period_duration, str): + hours_per_period = pd.Timedelta(period_duration).total_seconds() / 3600 + else: + hours_per_period = float(period_duration) + + # Validation + dt_min = float(self._fs.hours_per_timestep.min().item()) + dt_max = float(self._fs.hours_per_timestep.max().item()) + if dt_min != dt_max: + raise ValueError( + f'cluster_reduce() failed due to inconsistent time step sizes: ' + f'delta_t varies from {dt_min} to {dt_max} hours.' + ) + ratio = hours_per_period / dt_max + if not np.isclose(ratio, round(ratio), atol=1e-9): + raise ValueError( + f'The selected period_duration={hours_per_period}h does not match the time ' + f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.' + ) + + timesteps_per_period = int(round(hours_per_period / dt_max)) + + logger.info(f'{"":#^80}') + logger.info(f'{" Creating Typical Periods (Reduced Timesteps) ":#^80}') + + # Get dataset representation + ds = self._fs.to_dataset(include_solution=False) + temporaly_changing_ds = drop_constant_arrays(ds, dim='time') + + # Perform clustering + clustering = Clustering( + original_data=temporaly_changing_ds.to_dataframe(), + hours_per_time_step=float(dt_min), + hours_per_period=hours_per_period, + nr_of_periods=n_typical_periods, + weights=weights or self._calculate_clustering_weights(temporaly_changing_ds), + time_series_for_high_peaks=time_series_for_high_peaks or [], + time_series_for_low_peaks=time_series_for_low_peaks or [], + ) + clustering.cluster() + + # Extract typical periods data from tsam + typical_periods_df = clustering.tsam.typicalPeriods + cluster_order = clustering.tsam.clusterOrder # Order in which clusters appear + cluster_occurrences = clustering.tsam.clusterPeriodNoOccur # {cluster_id: count} + + # Actual number of typical periods (may differ from requested if peak forcing is used) + actual_nr_of_typical_periods = len(cluster_occurrences) + + # Create timestep weights: each typical period timestep represents multiple original timesteps + # Weight = number of original periods this typical period represents + timestep_weights = [] + for typical_period_idx in range(actual_nr_of_typical_periods): + weight = cluster_occurrences.get(typical_period_idx, 1) + timestep_weights.extend([weight] * timesteps_per_period) + + timestep_weights = np.array(timestep_weights) + + logger.info(f'Reduced from {len(self._fs.timesteps)} to {len(typical_periods_df)} timesteps') + logger.info(f'Typical periods: {actual_nr_of_typical_periods} (requested: {n_typical_periods})') + logger.info(f'Cluster occurrences: {cluster_occurrences}') + + # Create new time index for typical periods + # Use a synthetic time index starting from the original start time + original_time = self._fs.timesteps + time_start = original_time[0] + freq = pd.Timedelta(hours=dt_min) + new_time_index = pd.date_range( + start=time_start, + periods=len(typical_periods_df), + freq=freq, + ) + + # Build new dataset with typical periods data + ds_new = self._fs.to_dataset(include_solution=False) + + # Update time-varying data arrays with typical periods values + typical_periods_df.index = new_time_index # Reindex with our new time + for name in typical_periods_df.columns: + if name in ds_new.data_vars: + series = typical_periods_df[name] + da = DataConverter.to_dataarray( + series, + {'time': new_time_index, **{k: v for k, v in self._fs.coords.items() if k != 'time'}}, + ).rename(name) + da = da.assign_attrs(ds_new[name].attrs) + if TimeSeriesData.is_timeseries_data(da): + da = TimeSeriesData.from_dataarray(da) + ds_new[name] = da + + # Update time coordinate + ds_new = ds_new.reindex(time=new_time_index) + + # Update metadata + ds_new.attrs['timesteps_per_period'] = timesteps_per_period + ds_new.attrs['hours_per_timestep'] = dt_min + + # Create new FlowSystem with reduced timesteps + reduced_fs = FlowSystem.from_dataset(ds_new) + + # Store typical periods info for later use during modeling + reduced_fs._typical_periods_info = { + 'clustering': clustering, + 'timestep_weights': timestep_weights, + 'cluster_order': cluster_order, + 'cluster_occurrences': cluster_occurrences, + 'nr_of_typical_periods': actual_nr_of_typical_periods, + 'timesteps_per_period': timesteps_per_period, + 'storage_inter_period_linking': storage_inter_period_linking, + 'storage_cyclic': storage_cyclic, + 'original_fs': self._fs, + } + + return reduced_fs + # Future methods can be added here: # # def mga(self, alternatives: int = 5) -> FlowSystem: From f92311a2d614e6bc80304d470e993536457d5e3c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 14:27:16 +0100 Subject: [PATCH 038/191] Add expand_solution() --- flixopt/transform_accessor.py | 133 ++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 27286794f..71a30bb75 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1240,6 +1240,139 @@ def cluster_reduce( return reduced_fs + def expand_solution(self) -> FlowSystem: + """Expand a reduced (typical periods) FlowSystem back to full original timesteps. + + After solving a FlowSystem created with ``cluster_reduce()``, this method + disaggregates the FlowSystem by: + 1. Expanding all time series data from typical periods to full timesteps + 2. Expanding the solution by mapping each typical period back to all + original periods it represents + + This enables using all existing solution accessors (``statistics``, ``plot``, etc.) + with full time resolution, where both the data and solution are consistently + expanded from the typical periods. + + Returns: + FlowSystem: A new FlowSystem with full timesteps and expanded solution. + + Raises: + ValueError: If the FlowSystem was not created with ``cluster_reduce()``. + ValueError: If the FlowSystem has no solution. + + Examples: + Two-stage optimization with solution expansion: + + >>> # Stage 1: Size with reduced timesteps + >>> fs_reduced = flow_system.transform.cluster_reduce( + ... n_typical_periods=8, + ... period_duration='1D', + ... ) + >>> fs_reduced.optimize(solver) + >>> + >>> # Expand to full resolution FlowSystem + >>> fs_expanded = fs_reduced.transform.expand_solution() + >>> + >>> # Use all existing accessors with full timesteps + >>> fs_expanded.statistics.flow_rates # Full 8760 timesteps + >>> fs_expanded.statistics.plot.balance('HeatBus') # Full resolution plots + >>> fs_expanded.statistics.plot.heatmap('Boiler(Q_th)|flow_rate') + + Note: + The expanded FlowSystem repeats the typical period values for all + periods belonging to the same cluster. Both input data and solution + are consistently expanded, so they match. This is an approximation - + the actual dispatch at full resolution would differ due to + intra-period variations in time series data. + + For accurate dispatch results, use ``fix_sizes()`` to fix the sizes + from the reduced optimization and re-optimize at full resolution. + """ + import numpy as np + + from .flow_system import FlowSystem + + # Validate + if not hasattr(self._fs, '_typical_periods_info') or self._fs._typical_periods_info is None: + raise ValueError( + 'expand_solution() requires a FlowSystem created with cluster_reduce(). ' + 'This FlowSystem has no typical periods info.' + ) + + if self._fs.solution is None: + raise ValueError('FlowSystem has no solution. Run optimize() or solve() first.') + + info = self._fs._typical_periods_info + cluster_order = info['cluster_order'] + timesteps_per_period = info['timesteps_per_period'] + original_fs: FlowSystem = info['original_fs'] + n_typical_periods = info['nr_of_typical_periods'] + + # Get original timesteps from the original FlowSystem + original_timesteps = original_fs.timesteps + n_original_timesteps = len(original_timesteps) + n_reduced_timesteps = n_typical_periods * timesteps_per_period + + # Build mapping: for each original timestep, which reduced timestep to copy from + mapping = np.zeros(n_original_timesteps, dtype=np.int32) + + for orig_ts_idx in range(n_original_timesteps): + # Which original period does this timestep belong to? + orig_period_idx = orig_ts_idx // timesteps_per_period + # Position within the period + pos_in_period = orig_ts_idx % timesteps_per_period + + # Which cluster (typical period) does this original period map to? + cluster_id = cluster_order[orig_period_idx] if orig_period_idx < len(cluster_order) else 0 + + # The corresponding timestep in the reduced solution + reduced_ts_idx = cluster_id * timesteps_per_period + pos_in_period + + # Ensure we don't exceed reduced solution bounds + mapping[orig_ts_idx] = min(reduced_ts_idx, n_reduced_timesteps - 1) + + # Helper function to expand time-dependent data + def expand_time_data(da: xr.DataArray) -> xr.DataArray: + if 'time' not in da.dims: + return da.copy() + expanded_da = da.isel(time=xr.DataArray(mapping, dims=['time'])) + expanded_da = expanded_da.assign_coords(time=original_timesteps) + return expanded_da.assign_attrs(da.attrs) + + # 1. Expand the FlowSystem's data (input time series) + reduced_ds = self._fs.to_dataset(include_solution=False) + expanded_ds_data = {} + + for var_name in reduced_ds.data_vars: + expanded_ds_data[var_name] = expand_time_data(reduced_ds[var_name]) + + # Update coordinates + expanded_ds = xr.Dataset(expanded_ds_data, attrs=reduced_ds.attrs) + expanded_ds = expanded_ds.assign_coords(time=original_timesteps) + + # Copy hours_per_timestep from original + expanded_ds.attrs['hours_per_timestep'] = original_fs.hours_per_timestep.values.tolist() + + # Create the expanded FlowSystem from the expanded dataset + expanded_fs = FlowSystem.from_dataset(expanded_ds) + + # 2. Expand the solution + reduced_solution = self._fs.solution + expanded_solution_data = {} + + for var_name in reduced_solution.data_vars: + expanded_solution_data[var_name] = expand_time_data(reduced_solution[var_name]) + + expanded_solution = xr.Dataset(expanded_solution_data, attrs=reduced_solution.attrs) + expanded_fs._solution = expanded_solution + + logger.info( + f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' + f'({n_typical_periods} typical periods → {len(cluster_order)} original periods)' + ) + + return expanded_fs + # Future methods can be added here: # # def mga(self, alternatives: int = 5) -> FlowSystem: From 68be8f59d18d99dd3464ebcaf5e09aea45a2bc68 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 14:32:40 +0100 Subject: [PATCH 039/191] fix --- flixopt/clustering.py | 24 ++++++++---------------- flixopt/transform_accessor.py | 10 ++++------ 2 files changed, 12 insertions(+), 22 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index f9d96924e..4660b24f0 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -886,7 +886,7 @@ def __init__( flow_system: FlowSystem, cluster_order: np.ndarray | list, cluster_occurrences: dict[int, int], - nr_of_typical_periods: int, + n_typical_periods: int, timesteps_per_period: int, storage_cyclic: bool = True, ): @@ -898,7 +898,7 @@ def __init__( period belongs to. Length = n_original_periods. cluster_occurrences: Dict mapping cluster_id to number of original periods it represents. - nr_of_typical_periods: Number of typical (representative) periods. + n_typical_periods: Number of typical (representative) periods. timesteps_per_period: Number of timesteps in each period. storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. """ @@ -906,7 +906,7 @@ def __init__( self.flow_system = flow_system self.cluster_order = np.array(cluster_order) self.cluster_occurrences = cluster_occurrences - self.nr_of_typical_periods = nr_of_typical_periods + self.n_typical_periods = n_typical_periods self.timesteps_per_period = timesteps_per_period self.storage_cyclic = storage_cyclic self.n_original_periods = len(self.cluster_order) @@ -978,9 +978,10 @@ def _add_storage_linking(self, storage) -> None: short_name=f'SOC_boundary|{label}', ) - # Compute delta_SOC for each typical period + # Pre-compute delta_SOC for each typical period # delta_SOC[c] = charge_state[c, end] - charge_state[c, start] - delta_soc_list = [] + # We store these as a dict since linopy expressions can't be concat'd with xr.concat + delta_soc_dict = {} for c in range(self.nr_of_typical_periods): # Get start and end timestep indices for this typical period start_idx = c * self.timesteps_per_period @@ -988,22 +989,13 @@ def _add_storage_linking(self, storage) -> None: # charge_state at end - charge_state at start of typical period c # Note: charge_state is indexed by time with extra timestep - delta = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) - delta_soc_list.append(delta) - - # Stack into array indexed by typical_period - delta_soc = xr.concat(delta_soc_list, dim='typical_period') - delta_soc = delta_soc.assign_coords(typical_period=np.arange(self.nr_of_typical_periods)) + delta_soc_dict[c] = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) # Create linking constraints: # SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] for d in range(self.n_original_periods): c = int(self.cluster_order[d]) # Which typical period this original period maps to - lhs = ( - soc_boundary.isel(period_boundary=d + 1) - - soc_boundary.isel(period_boundary=d) - - delta_soc.isel(typical_period=c) - ) + lhs = soc_boundary.isel(period_boundary=d + 1) - soc_boundary.isel(period_boundary=d) - delta_soc_dict[c] self.add_constraints(lhs == 0, short_name=f'inter_period_link|{label}|{d}') # Cyclic constraint: SOC_boundary[0] = SOC_boundary[end] diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 71a30bb75..2574e4521 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1118,8 +1118,6 @@ def cluster_reduce( - A 5-10% safety margin on sizes is recommended for the dispatch stage - Storage linking adds SOC_boundary variables to track state between periods """ - import pandas as pd - from .clustering import Clustering from .core import DataConverter, TimeSeriesData, drop_constant_arrays from .flow_system import FlowSystem @@ -1172,19 +1170,19 @@ def cluster_reduce( cluster_occurrences = clustering.tsam.clusterPeriodNoOccur # {cluster_id: count} # Actual number of typical periods (may differ from requested if peak forcing is used) - actual_nr_of_typical_periods = len(cluster_occurrences) + actual_n_typical_periods = len(cluster_occurrences) # Create timestep weights: each typical period timestep represents multiple original timesteps # Weight = number of original periods this typical period represents timestep_weights = [] - for typical_period_idx in range(actual_nr_of_typical_periods): + for typical_period_idx in range(actual_n_typical_periods): weight = cluster_occurrences.get(typical_period_idx, 1) timestep_weights.extend([weight] * timesteps_per_period) timestep_weights = np.array(timestep_weights) logger.info(f'Reduced from {len(self._fs.timesteps)} to {len(typical_periods_df)} timesteps') - logger.info(f'Typical periods: {actual_nr_of_typical_periods} (requested: {n_typical_periods})') + logger.info(f'Typical periods: {actual_n_typical_periods} (requested: {n_typical_periods})') logger.info(f'Cluster occurrences: {cluster_occurrences}') # Create new time index for typical periods @@ -1231,7 +1229,7 @@ def cluster_reduce( 'timestep_weights': timestep_weights, 'cluster_order': cluster_order, 'cluster_occurrences': cluster_occurrences, - 'nr_of_typical_periods': actual_nr_of_typical_periods, + 'n_typical_periods': actual_n_typical_periods, 'timesteps_per_period': timesteps_per_period, 'storage_inter_period_linking': storage_inter_period_linking, 'storage_cyclic': storage_cyclic, From 4102148a2fb927402e04d2ac4cbe8f061b1209f1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 14:32:48 +0100 Subject: [PATCH 040/191] Add notebook --- docs/notebooks/08e-cluster-and-reduce.ipynb | 525 ++++++++++++++++++++ 1 file changed, 525 insertions(+) create mode 100644 docs/notebooks/08e-cluster-and-reduce.ipynb diff --git a/docs/notebooks/08e-cluster-and-reduce.ipynb b/docs/notebooks/08e-cluster-and-reduce.ipynb new file mode 100644 index 000000000..090f88167 --- /dev/null +++ b/docs/notebooks/08e-cluster-and-reduce.ipynb @@ -0,0 +1,525 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Typical Periods Optimization with `cluster_reduce()`\n", + "\n", + "This notebook demonstrates the new `cluster_reduce()` method for fast sizing optimization using typical periods.\n", + "\n", + "## Key Concept\n", + "\n", + "Unlike `cluster()` which uses equality constraints (same number of timesteps), `cluster_reduce()` **actually reduces** the number of timesteps:\n", + "\n", + "| Method | Timesteps | Mechanism | Use Case |\n", + "|--------|-----------|-----------|----------|\n", + "| `cluster()` | 8760 | Equality constraints | Accurate operational dispatch |\n", + "| `cluster_reduce()` | 192 (8×24) | Typical periods only | Fast initial sizing |\n", + "\n", + "## Features\n", + "\n", + "- **Actual timestep reduction**: Only solves for typical periods (e.g., 8 days × 24h = 192 instead of 8760)\n", + "- **Timestep weighting**: Operational costs are weighted by cluster occurrence\n", + "- **Inter-period storage linking**: SOC_boundary variables track storage state across original periods\n", + "- **Cyclic constraint**: Optional cyclic storage constraint for long-term balance\n", + "\n", + "!!! note \"Requirements\"\n", + " This notebook requires the `tsam` package: `pip install tsam`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import timeit\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a Full-Year Example System\n", + "\n", + "We'll create a simple district heating system with a full year of hourly data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Generate synthetic yearly data\n", + "np.random.seed(42)\n", + "hours = 8760 # Full year hourly\n", + "\n", + "# Create realistic heat demand profile (seasonal + daily patterns)\n", + "t = np.arange(hours)\n", + "seasonal = 50 + 40 * np.cos(2 * np.pi * t / 8760) # Higher in winter\n", + "daily = 10 * np.sin(2 * np.pi * t / 24 - np.pi / 2) # Peak in morning/evening\n", + "noise = np.random.normal(0, 5, hours)\n", + "heat_demand = np.maximum(seasonal + daily + noise, 10)\n", + "\n", + "# Create electricity price profile (higher during day, lower at night)\n", + "hour_of_day = t % 24\n", + "elec_price = 50 + 30 * np.sin(np.pi * hour_of_day / 12) + np.random.normal(0, 5, hours)\n", + "elec_price = np.maximum(elec_price, 20)\n", + "\n", + "timesteps = pd.date_range('2020-01-01', periods=hours, freq='h')\n", + "\n", + "print(f'Created {hours} hourly timesteps ({hours / 24:.0f} days)')\n", + "print(f'Heat demand range: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')\n", + "print(f'Electricity price range: {elec_price.min():.1f} - {elec_price.max():.1f} EUR/MWh')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize first month of data\n", + "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", + "\n", + "fig.add_trace(go.Scatter(x=timesteps[:720], y=heat_demand[:720], name='Heat Demand'), row=1, col=1)\n", + "fig.add_trace(go.Scatter(x=timesteps[:720], y=elec_price[:720], name='Electricity Price'), row=2, col=1)\n", + "\n", + "fig.update_layout(height=400, title='First Month of Data')\n", + "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", + "fig.update_yaxes(title_text='El. Price [EUR/MWh]', row=2, col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def create_flow_system():\n", + " \"\"\"Create the district heating FlowSystem.\"\"\"\n", + " fs = fx.FlowSystem(timesteps=timesteps)\n", + "\n", + " # Effects\n", + " costs = fx.Effect(label='costs', unit='EUR', is_objective=True)\n", + "\n", + " # Buses\n", + " heat_bus = fx.Bus('Heat')\n", + " elec_bus = fx.Bus('Electricity')\n", + " gas_bus = fx.Bus('Gas')\n", + "\n", + " fs.add_elements(costs, heat_bus, elec_bus, gas_bus)\n", + "\n", + " # Gas supply\n", + " gas_supply = fx.Source(\n", + " 'GasSupply',\n", + " outputs=[fx.Flow('gas_out', bus='Gas', size=500, effects_per_flow_hour={'costs': 35})],\n", + " )\n", + "\n", + " # Electricity grid\n", + " grid_buy = fx.Source(\n", + " 'GridBuy',\n", + " outputs=[fx.Flow('elec_out', bus='Electricity', size=200, effects_per_flow_hour={'costs': elec_price})],\n", + " )\n", + "\n", + " grid_sell = fx.Sink(\n", + " 'GridSell',\n", + " inputs=[fx.Flow('elec_in', bus='Electricity', size=200, effects_per_flow_hour={'costs': -elec_price * 0.9})],\n", + " )\n", + "\n", + " # Boiler (investment)\n", + " boiler = fx.linear_converters.Boiler(\n", + " 'Boiler',\n", + " thermal_efficiency=0.9,\n", + " thermal_flow=fx.Flow(\n", + " 'Q_th',\n", + " bus='Heat',\n", + " size=fx.InvestParameters(minimum_size=0, maximum_size=200, effects_of_investment_per_size={'costs': 50000}),\n", + " ),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", + " )\n", + "\n", + " # CHP (investment)\n", + " chp = fx.linear_converters.CHP(\n", + " 'CHP',\n", + " thermal_efficiency=0.45,\n", + " electrical_efficiency=0.35,\n", + " thermal_flow=fx.Flow(\n", + " 'Q_th',\n", + " bus='Heat',\n", + " size=fx.InvestParameters(\n", + " minimum_size=0, maximum_size=150, effects_of_investment_per_size={'costs': 150000}\n", + " ),\n", + " ),\n", + " electrical_flow=fx.Flow('P_el', bus='Electricity'),\n", + " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", + " )\n", + "\n", + " # Heat storage (investment)\n", + " storage = fx.Storage(\n", + " 'ThermalStorage',\n", + " charging=fx.Flow('charge', bus='Heat', size=50),\n", + " discharging=fx.Flow('discharge', bus='Heat', size=50),\n", + " capacity_in_flow_hours=fx.InvestParameters(\n", + " minimum_size=0, maximum_size=500, effects_of_investment_per_size={'costs': 20000}\n", + " ),\n", + " eta_charge=0.95,\n", + " eta_discharge=0.95,\n", + " relative_loss_per_hour=0.005,\n", + " initial_charge_state='equals_final',\n", + " )\n", + "\n", + " # Heat demand\n", + " demand = fx.Sink(\n", + " 'HeatDemand',\n", + " inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)],\n", + " )\n", + "\n", + " fs.add_elements(gas_supply, grid_buy, grid_sell, boiler, chp, storage, demand)\n", + "\n", + " return fs\n", + "\n", + "\n", + "# Create the system\n", + "flow_system = create_flow_system()\n", + "print(f'FlowSystem created with {len(flow_system.timesteps)} timesteps')\n", + "print(f'Components: {list(flow_system.components.keys())}')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Method 1: Full Optimization (Baseline)\n", + "\n", + "First, let's solve the full problem with all 8760 timesteps." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", + "\n", + "start = timeit.default_timer()\n", + "fs_full = create_flow_system()\n", + "fs_full.optimize(solver)\n", + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.2f} seconds')\n", + "print(f'Total cost: {fs_full.solution[\"costs\"].item():,.0f} EUR')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_full.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Method 2: Typical Periods with `cluster_reduce()`\n", + "\n", + "Now let's use the new `cluster_reduce()` method to solve with only 8 typical days (192 timesteps).\n", + "\n", + "**Important**: Use `time_series_for_high_peaks` to force inclusion of peak demand periods. Without this, the typical periods may miss extreme peaks, leading to undersized components that cause infeasibility in the full-resolution dispatch stage." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# IMPORTANT: Use time_series_for_high_peaks to force inclusion of peak demand periods!\n", + "# Without this, the typical periods may miss extreme peaks, leading to undersized components.\n", + "# The format is the column name in the internal dataframe: 'ComponentName(FlowName)|attribute'\n", + "peak_forcing_series = ['HeatDemand(Q_th)|fixed_relative_profile']\n", + "\n", + "# Create reduced FlowSystem with 8 typical days\n", + "fs_reduced = create_flow_system().transform.cluster_reduce(\n", + " hours_per_period=24, # 24 hours per period (daily)\n", + " nr_of_typical_periods=8, # 8 typical days\n", + " time_series_for_high_peaks=peak_forcing_series, # Force inclusion of peak demand day!\n", + " storage_inter_period_linking=True, # Link storage states between periods\n", + " storage_cyclic=True, # Cyclic constraint: SOC[0] = SOC[end]\n", + ")\n", + "\n", + "time_clustering = timeit.default_timer() - start\n", + "print(f'Clustering time: {time_clustering:.2f} seconds')\n", + "print(f'Reduced from {len(flow_system.timesteps)} to {len(fs_reduced.timesteps)} timesteps')\n", + "print(f'Timestep weights (cluster occurrences): {np.unique(fs_reduced._typical_periods_info[\"timestep_weights\"])}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Optimize the reduced system\n", + "start = timeit.default_timer()\n", + "fs_reduced.optimize(solver)\n", + "time_reduced = timeit.default_timer() - start\n", + "\n", + "print(f'Reduced optimization: {time_reduced:.2f} seconds')\n", + "print(f'Total cost: {fs_reduced.solution[\"costs\"].item():,.0f} EUR')\n", + "print(f'Speedup vs full: {time_full / (time_clustering + time_reduced):.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_reduced.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Method 3: Two-Stage Workflow\n", + "\n", + "The recommended workflow:\n", + "1. **Stage 1**: Fast sizing with `cluster_reduce()`\n", + "2. **Stage 2**: Fix sizes (with safety margin) and re-optimize for accurate dispatch\n", + "\n", + "**Note**: Typical periods aggregate similar days, so individual days within a cluster may have higher demand than the typical day. Adding a 5-10% safety margin to sizes helps ensure feasibility." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Stage 1: Fast sizing (already done above)\n", + "print('Stage 1: Sizing with typical periods')\n", + "print(f' Time: {time_clustering + time_reduced:.2f} seconds')\n", + "print(f' Cost estimate: {fs_reduced.solution[\"costs\"].item():,.0f} EUR')\n", + "\n", + "# Apply safety margin to sizes (5-10% buffer for demand variability)\n", + "SAFETY_MARGIN = 1.05 # 5% buffer\n", + "sizes_with_margin = {name: float(size.item()) * SAFETY_MARGIN for name, size in fs_reduced.statistics.sizes.items()}\n", + "print(f'\\nSizes with {(SAFETY_MARGIN - 1) * 100:.0f}% safety margin:')\n", + "for name, size in sizes_with_margin.items():\n", + " original = fs_reduced.statistics.sizes[name].item()\n", + " print(f' {name}: {original:.1f} -> {size:.1f}')\n", + "\n", + "# Stage 2: Fix sizes and re-optimize at full resolution\n", + "print('\\nStage 2: Dispatch at full resolution')\n", + "start = timeit.default_timer()\n", + "\n", + "fs_dispatch = create_flow_system().transform.fix_sizes(sizes_with_margin)\n", + "fs_dispatch.optimize(solver)\n", + "\n", + "time_dispatch = timeit.default_timer() - start\n", + "print(f' Time: {time_dispatch:.2f} seconds')\n", + "print(f' Actual cost: {fs_dispatch.solution[\"costs\"].item():,.0f} EUR')\n", + "\n", + "# Total time comparison\n", + "total_two_stage = time_clustering + time_reduced + time_dispatch\n", + "print(f'\\nTotal two-stage time: {total_two_stage:.2f} seconds')\n", + "print(f'Full optimization time: {time_full:.2f} seconds')\n", + "print(f'Two-stage speedup: {time_full / total_two_stage:.1f}x')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Compare Results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "results = {\n", + " 'Full (baseline)': {\n", + " 'Time [s]': time_full,\n", + " 'Cost [EUR]': fs_full.solution['costs'].item(),\n", + " 'Boiler Size': fs_full.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'CHP Size': fs_full.statistics.sizes['CHP(Q_th)'].item(),\n", + " 'Storage Size': fs_full.statistics.sizes['ThermalStorage'].item(),\n", + " },\n", + " 'Typical Periods (sizing)': {\n", + " 'Time [s]': time_clustering + time_reduced,\n", + " 'Cost [EUR]': fs_reduced.solution['costs'].item(),\n", + " 'Boiler Size': fs_reduced.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'CHP Size': fs_reduced.statistics.sizes['CHP(Q_th)'].item(),\n", + " 'Storage Size': fs_reduced.statistics.sizes['ThermalStorage'].item(),\n", + " },\n", + " 'Two-Stage (with margin)': {\n", + " 'Time [s]': total_two_stage,\n", + " 'Cost [EUR]': fs_dispatch.solution['costs'].item(),\n", + " 'Boiler Size': sizes_with_margin['Boiler(Q_th)'],\n", + " 'CHP Size': sizes_with_margin['CHP(Q_th)'],\n", + " 'Storage Size': sizes_with_margin['ThermalStorage'],\n", + " },\n", + "}\n", + "\n", + "comparison = pd.DataFrame(results).T\n", + "baseline_cost = comparison.loc['Full (baseline)', 'Cost [EUR]']\n", + "baseline_time = comparison.loc['Full (baseline)', 'Time [s]']\n", + "comparison['Cost Gap [%]'] = ((comparison['Cost [EUR]'] - baseline_cost) / abs(baseline_cost) * 100).round(2)\n", + "comparison['Speedup'] = (baseline_time / comparison['Time [s]']).round(1)\n", + "\n", + "comparison.style.format(\n", + " {\n", + " 'Time [s]': '{:.2f}',\n", + " 'Cost [EUR]': '{:,.0f}',\n", + " 'Boiler Size': '{:.1f}',\n", + " 'CHP Size': '{:.1f}',\n", + " 'Storage Size': '{:.0f}',\n", + " 'Cost Gap [%]': '{:.2f}',\n", + " 'Speedup': '{:.1f}x',\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Inter-Period Storage Linking\n", + "\n", + "The `cluster_reduce()` method creates special constraints to track storage state across original periods:\n", + "\n", + "- **SOC_boundary[d]**: Storage state at the boundary of original period d\n", + "- **delta_SOC[c]**: Change in SOC during typical period c\n", + "- **Linking**: `SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]]`\n", + "- **Cyclic**: `SOC_boundary[0] = SOC_boundary[end]` (optional)\n", + "\n", + "This ensures long-term storage behavior is captured correctly even though we only solve for typical periods." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Show clustering info\n", + "info = fs_reduced._typical_periods_info\n", + "print('Typical Periods Configuration:')\n", + "print(f' Number of typical periods: {info[\"nr_of_typical_periods\"]}')\n", + "print(f' Timesteps per period: {info[\"timesteps_per_period\"]}')\n", + "print(f' Total reduced timesteps: {info[\"nr_of_typical_periods\"] * info[\"timesteps_per_period\"]}')\n", + "print(f' Cluster order (first 10): {info[\"cluster_order\"][:10]}...')\n", + "print(f' Cluster occurrences: {dict(info[\"cluster_occurrences\"])}')\n", + "print(f' Storage inter-period linking: {info[\"storage_inter_period_linking\"]}')\n", + "print(f' Storage cyclic: {info[\"storage_cyclic\"]}')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## API Reference\n", + "\n", + "### `transform.cluster_reduce()` Parameters\n", + "\n", + "| Parameter | Type | Description |\n", + "|-----------|------|-------------|\n", + "| `hours_per_period` | `float` | Duration of each period in hours (e.g., 24 for daily) |\n", + "| `nr_of_typical_periods` | `int` | Number of typical periods to extract (e.g., 8) |\n", + "| `weights` | `dict[str, float]` | Optional weights for clustering each time series |\n", + "| `time_series_for_high_peaks` | `list[str]` | **IMPORTANT**: Force inclusion of high-value periods to capture peak demands |\n", + "| `time_series_for_low_peaks` | `list[str]` | Force inclusion of low-value periods |\n", + "| `storage_inter_period_linking` | `bool` | Link storage states between periods (default: True) |\n", + "| `storage_cyclic` | `bool` | Enforce cyclic storage constraint (default: True) |\n", + "\n", + "### Peak Forcing\n", + "\n", + "**Always use `time_series_for_high_peaks`** for demand time series to ensure extreme peaks are captured. The format is:\n", + "```python\n", + "time_series_for_high_peaks=['ComponentName(FlowName)|fixed_relative_profile']\n", + "```\n", + "\n", + "Without peak forcing, the clustering algorithm may select typical periods that don't include the peak demand day, leading to undersized components and infeasibility in the dispatch stage.\n", + "\n", + "### Comparison with `cluster()`\n", + "\n", + "| Feature | `cluster()` | `cluster_reduce()` |\n", + "|---------|-------------|--------------------|\n", + "| Timesteps | Original (8760) | Reduced (e.g., 192) |\n", + "| Mechanism | Equality constraints | Typical periods only |\n", + "| Solve time | Moderate reduction | Dramatic reduction |\n", + "| Accuracy | Higher | Lower (sizing only) |\n", + "| Storage handling | Via constraints | SOC boundary linking |\n", + "| Use case | Final dispatch | Initial sizing |" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "The new `cluster_reduce()` method provides:\n", + "\n", + "1. **Dramatic speedup** for sizing optimization by reducing timesteps\n", + "2. **Proper cost weighting** so operational costs reflect cluster occurrences\n", + "3. **Storage state tracking** across original periods via SOC_boundary variables\n", + "4. **Two-stage workflow** support via `fix_sizes()` for accurate dispatch\n", + "\n", + "### Recommended Workflow\n", + "\n", + "```python\n", + "# Stage 1: Fast sizing with typical periods\n", + "fs_sizing = flow_system.transform.cluster_reduce(\n", + " hours_per_period=24,\n", + " nr_of_typical_periods=8,\n", + " time_series_for_high_peaks=['DemandComponent(FlowName)|fixed_relative_profile'],\n", + ")\n", + "fs_sizing.optimize(solver)\n", + "\n", + "# Apply safety margin (typical periods aggregate, so individual days may exceed)\n", + "SAFETY_MARGIN = 1.05 # 5% buffer\n", + "sizes_with_margin = {\n", + " name: float(size.item()) * SAFETY_MARGIN\n", + " for name, size in fs_sizing.statistics.sizes.items()\n", + "}\n", + "\n", + "# Stage 2: Fix sizes and optimize dispatch at full resolution\n", + "fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin)\n", + "fs_dispatch.optimize(solver)\n", + "```\n", + "\n", + "### Key Considerations\n", + "\n", + "- **Peak forcing is essential**: Use `time_series_for_high_peaks` to capture peak demand days\n", + "- **Safety margin recommended**: Add 5-10% buffer to sizes since aggregation smooths peaks\n", + "- **Two-stage is recommended**: Use `cluster_reduce()` for fast sizing, then `fix_sizes()` for dispatch\n", + "- **Storage linking preserves long-term behavior**: SOC_boundary variables ensure correct storage cycling" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} From 0eac20a7543b847bbc5ff0ebb72c01e9e07f84d1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 14:34:34 +0100 Subject: [PATCH 041/191] fix --- flixopt/clustering.py | 4 ++-- flixopt/flow_system.py | 2 +- flixopt/transform_accessor.py | 10 +++++----- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index 4660b24f0..a92181010 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -927,7 +927,7 @@ def do_modeling(self): logger.info( f'Adding inter-period storage linking for {len(storages)} storages ' - f'({self.n_original_periods} original periods, {self.nr_of_typical_periods} typical)' + f'({self.n_original_periods} original periods, {self.n_typical_periods} typical)' ) for storage in storages: @@ -982,7 +982,7 @@ def _add_storage_linking(self, storage) -> None: # delta_SOC[c] = charge_state[c, end] - charge_state[c, start] # We store these as a dict since linopy expressions can't be concat'd with xr.concat delta_soc_dict = {} - for c in range(self.nr_of_typical_periods): + for c in range(self.n_typical_periods): # Get start and end timestep indices for this typical period start_idx = c * self.timesteps_per_period end_idx = (c + 1) * self.timesteps_per_period # charge_state has extra timestep at end diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 7abf3f55c..d900a3f7b 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -1387,7 +1387,7 @@ def _add_typical_periods_modeling(self) -> None: flow_system=self, cluster_order=info['cluster_order'], cluster_occurrences=info['cluster_occurrences'], - nr_of_typical_periods=info['nr_of_typical_periods'], + n_typical_periods=info['n_typical_periods'], timesteps_per_period=info['timesteps_per_period'], storage_cyclic=info.get('storage_cyclic', True), ) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 2574e4521..d72f0c036 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1046,8 +1046,8 @@ def fix_sizes( def cluster_reduce( self, - n_typical_periods: int, period_duration: str | float, + n_typical_periods: int, weights: dict[str, float] | None = None, time_series_for_high_peaks: list[str] | None = None, time_series_for_low_peaks: list[str] | None = None, @@ -1072,9 +1072,9 @@ def cluster_reduce( at full resolution for accurate dispatch results. Args: - n_typical_periods: Number of typical periods to extract (e.g., 8 typical days). period_duration: Duration of each period. Can be a pandas-style string ('1D', '24h', '6h') or a numeric value in hours. + n_typical_periods: Number of typical periods to extract (e.g., 8 typical days). weights: Optional clustering weights per time series. Keys are time series labels. time_series_for_high_peaks: Time series labels for explicitly selecting high-value periods. **Recommended** for demand time series to capture peak demand days. @@ -1097,8 +1097,8 @@ def cluster_reduce( >>> # Stage 1: Size with reduced timesteps (fast) >>> fs_sizing = flow_system.transform.cluster_reduce( - ... n_typical_periods=8, ... period_duration='1D', + ... n_typical_periods=8, ... time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'], ... ) >>> fs_sizing.optimize(solver) @@ -1263,8 +1263,8 @@ def expand_solution(self) -> FlowSystem: >>> # Stage 1: Size with reduced timesteps >>> fs_reduced = flow_system.transform.cluster_reduce( - ... n_typical_periods=8, ... period_duration='1D', + ... n_typical_periods=8, ... ) >>> fs_reduced.optimize(solver) >>> @@ -1304,7 +1304,7 @@ def expand_solution(self) -> FlowSystem: cluster_order = info['cluster_order'] timesteps_per_period = info['timesteps_per_period'] original_fs: FlowSystem = info['original_fs'] - n_typical_periods = info['nr_of_typical_periods'] + n_typical_periods = info['n_typical_periods'] # Get original timesteps from the original FlowSystem original_timesteps = original_fs.timesteps From 85a73cb7e33be08dac06b05c495e97775c0dfc09 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 14:53:23 +0100 Subject: [PATCH 042/191] Improve wording --- flixopt/flow_system.py | 20 +++--- flixopt/transform_accessor.py | 132 +++++++++++++++++----------------- 2 files changed, 76 insertions(+), 76 deletions(-) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index d900a3f7b..70aaa55ee 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -220,7 +220,7 @@ def __init__( self._clustering_info: dict | None = None # Typical periods info - populated by transform.cluster_reduce() - self._typical_periods_info: dict | None = None + self._cluster_info: dict | None = None # Statistics accessor cache - lazily initialized, invalidated on new solution self._statistics: StatisticsAccessor | None = None @@ -1328,8 +1328,8 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: self.connect_and_transform() self.create_model(normalize_weights) - # Apply timestep weighting before do_modeling() for typical periods - if self._typical_periods_info is not None: + # Apply timestep weighting before do_modeling() for cluster_reduce() + if self._cluster_info is not None: self._apply_timestep_weights() self.model.do_modeling() @@ -1339,18 +1339,18 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: self._add_clustering_constraints() # Add typical periods storage modeling if this is a reduced FlowSystem - if self._typical_periods_info is not None: + if self._cluster_info is not None: self._add_typical_periods_modeling() return self def _apply_timestep_weights(self) -> None: - """Apply timestep weights to the model for typical periods optimization. + """Apply timestep weights to the model for cluster_reduce() optimization. This multiplies operational effects (costs, emissions) by the number of - original periods each typical period represents. + original segments each typical cluster represents. """ - info = self._typical_periods_info + info = self._cluster_info if info is None: return @@ -1373,7 +1373,7 @@ def _add_typical_periods_modeling(self) -> None: """ from .clustering import TypicalPeriodsModel - info = self._typical_periods_info + info = self._cluster_info if info is None: return @@ -1387,8 +1387,8 @@ def _add_typical_periods_modeling(self) -> None: flow_system=self, cluster_order=info['cluster_order'], cluster_occurrences=info['cluster_occurrences'], - n_typical_periods=info['n_typical_periods'], - timesteps_per_period=info['timesteps_per_period'], + n_typical_periods=info['n_clusters'], + timesteps_per_period=info['timesteps_per_cluster'], storage_cyclic=info.get('storage_cyclic', True), ) typical_periods_model.do_modeling() diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index d72f0c036..25e011ef4 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1046,8 +1046,8 @@ def fix_sizes( def cluster_reduce( self, - period_duration: str | float, - n_typical_periods: int, + n_clusters: int, + cluster_duration: str | float, weights: dict[str, float] | None = None, time_series_for_high_peaks: list[str] | None = None, time_series_for_low_peaks: list[str] | None = None, @@ -1055,55 +1055,55 @@ def cluster_reduce( storage_cyclic: bool = True, ) -> FlowSystem: """ - Create a FlowSystem with reduced timesteps using typical periods. + Create a FlowSystem with reduced timesteps using typical clusters. This method creates a new FlowSystem optimized for sizing studies by reducing - the number of timesteps to only the typical (representative) periods identified + the number of timesteps to only the typical (representative) clusters identified through time series aggregation. Unlike `cluster()` which uses equality constraints, this method actually reduces the problem size for faster solving. The method: 1. Performs time series clustering using tsam - 2. Extracts only the typical periods (not all original timesteps) + 2. Extracts only the typical clusters (not all original timesteps) 3. Applies timestep weighting for accurate cost representation - 4. Optionally links storage states between periods via boundary variables + 4. Optionally links storage states between clusters via boundary variables Use this for initial sizing optimization, then use `fix_sizes()` to re-optimize at full resolution for accurate dispatch results. Args: - period_duration: Duration of each period. Can be a pandas-style string + n_clusters: Number of clusters (typical segments) to extract (e.g., 8 typical days). + cluster_duration: Duration of each cluster. Can be a pandas-style string ('1D', '24h', '6h') or a numeric value in hours. - n_typical_periods: Number of typical periods to extract (e.g., 8 typical days). weights: Optional clustering weights per time series. Keys are time series labels. time_series_for_high_peaks: Time series labels for explicitly selecting high-value - periods. **Recommended** for demand time series to capture peak demand days. - time_series_for_low_peaks: Time series labels for explicitly selecting low-value periods. - storage_inter_period_linking: If True, link storage states between periods using + clusters. **Recommended** for demand time series to capture peak demand days. + time_series_for_low_peaks: Time series labels for explicitly selecting low-value clusters. + storage_inter_period_linking: If True, link storage states between clusters using boundary variables. This preserves long-term storage behavior. Default: True. storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end] for storages. Only used when storage_inter_period_linking=True. Default: True. Returns: - A new FlowSystem with reduced timesteps (only typical periods). - The FlowSystem has metadata stored in `_typical_periods_info` for weighting. + A new FlowSystem with reduced timesteps (only typical clusters). + The FlowSystem has metadata stored in `_cluster_info` for weighting. Raises: ValueError: If timestep sizes are inconsistent. - ValueError: If period_duration is not a multiple of timestep size. + ValueError: If cluster_duration is not a multiple of timestep size. Examples: Two-stage sizing optimization: >>> # Stage 1: Size with reduced timesteps (fast) >>> fs_sizing = flow_system.transform.cluster_reduce( - ... period_duration='1D', - ... n_typical_periods=8, + ... n_clusters=8, + ... cluster_duration='1D', ... time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'], ... ) >>> fs_sizing.optimize(solver) >>> - >>> # Apply safety margin (typical periods may smooth peaks) + >>> # Apply safety margin (typical clusters may smooth peaks) >>> sizes_with_margin = { ... name: float(size.item()) * 1.05 for name, size in fs_sizing.statistics.sizes.items() ... } @@ -1114,19 +1114,19 @@ def cluster_reduce( Note: - This is best suited for initial sizing, not final dispatch optimization - - Use `time_series_for_high_peaks` to ensure peak demand periods are captured + - Use `time_series_for_high_peaks` to ensure peak demand clusters are captured - A 5-10% safety margin on sizes is recommended for the dispatch stage - - Storage linking adds SOC_boundary variables to track state between periods + - Storage linking adds SOC_boundary variables to track state between clusters """ from .clustering import Clustering from .core import DataConverter, TimeSeriesData, drop_constant_arrays from .flow_system import FlowSystem - # Parse period_duration to hours - if isinstance(period_duration, str): - hours_per_period = pd.Timedelta(period_duration).total_seconds() / 3600 + # Parse cluster_duration to hours + if isinstance(cluster_duration, str): + hours_per_cluster = pd.Timedelta(cluster_duration).total_seconds() / 3600 else: - hours_per_period = float(period_duration) + hours_per_cluster = float(cluster_duration) # Validation dt_min = float(self._fs.hours_per_timestep.min().item()) @@ -1136,17 +1136,17 @@ def cluster_reduce( f'cluster_reduce() failed due to inconsistent time step sizes: ' f'delta_t varies from {dt_min} to {dt_max} hours.' ) - ratio = hours_per_period / dt_max + ratio = hours_per_cluster / dt_max if not np.isclose(ratio, round(ratio), atol=1e-9): raise ValueError( - f'The selected period_duration={hours_per_period}h does not match the time ' + f'The selected cluster_duration={hours_per_cluster}h does not match the time ' f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.' ) - timesteps_per_period = int(round(hours_per_period / dt_max)) + timesteps_per_cluster = int(round(hours_per_cluster / dt_max)) logger.info(f'{"":#^80}') - logger.info(f'{" Creating Typical Periods (Reduced Timesteps) ":#^80}') + logger.info(f'{" Creating Typical Clusters (Reduced Timesteps) ":#^80}') # Get dataset representation ds = self._fs.to_dataset(include_solution=False) @@ -1156,8 +1156,8 @@ def cluster_reduce( clustering = Clustering( original_data=temporaly_changing_ds.to_dataframe(), hours_per_time_step=float(dt_min), - hours_per_period=hours_per_period, - nr_of_periods=n_typical_periods, + hours_per_period=hours_per_cluster, + nr_of_periods=n_clusters, weights=weights or self._calculate_clustering_weights(temporaly_changing_ds), time_series_for_high_peaks=time_series_for_high_peaks or [], time_series_for_low_peaks=time_series_for_low_peaks or [], @@ -1169,20 +1169,20 @@ def cluster_reduce( cluster_order = clustering.tsam.clusterOrder # Order in which clusters appear cluster_occurrences = clustering.tsam.clusterPeriodNoOccur # {cluster_id: count} - # Actual number of typical periods (may differ from requested if peak forcing is used) - actual_n_typical_periods = len(cluster_occurrences) + # Actual number of clusters (may differ from requested if peak forcing is used) + actual_n_clusters = len(cluster_occurrences) - # Create timestep weights: each typical period timestep represents multiple original timesteps - # Weight = number of original periods this typical period represents + # Create timestep weights: each typical cluster timestep represents multiple original timesteps + # Weight = number of original clusters this typical cluster represents timestep_weights = [] - for typical_period_idx in range(actual_n_typical_periods): - weight = cluster_occurrences.get(typical_period_idx, 1) - timestep_weights.extend([weight] * timesteps_per_period) + for cluster_idx in range(actual_n_clusters): + weight = cluster_occurrences.get(cluster_idx, 1) + timestep_weights.extend([weight] * timesteps_per_cluster) timestep_weights = np.array(timestep_weights) logger.info(f'Reduced from {len(self._fs.timesteps)} to {len(typical_periods_df)} timesteps') - logger.info(f'Typical periods: {actual_n_typical_periods} (requested: {n_typical_periods})') + logger.info(f'Clusters: {actual_n_clusters} (requested: {n_clusters})') logger.info(f'Cluster occurrences: {cluster_occurrences}') # Create new time index for typical periods @@ -1217,20 +1217,20 @@ def cluster_reduce( ds_new = ds_new.reindex(time=new_time_index) # Update metadata - ds_new.attrs['timesteps_per_period'] = timesteps_per_period + ds_new.attrs['timesteps_per_cluster'] = timesteps_per_cluster ds_new.attrs['hours_per_timestep'] = dt_min # Create new FlowSystem with reduced timesteps reduced_fs = FlowSystem.from_dataset(ds_new) - # Store typical periods info for later use during modeling - reduced_fs._typical_periods_info = { + # Store cluster info for later use during modeling + reduced_fs._cluster_info = { 'clustering': clustering, 'timestep_weights': timestep_weights, 'cluster_order': cluster_order, 'cluster_occurrences': cluster_occurrences, - 'n_typical_periods': actual_n_typical_periods, - 'timesteps_per_period': timesteps_per_period, + 'n_clusters': actual_n_clusters, + 'timesteps_per_cluster': timesteps_per_cluster, 'storage_inter_period_linking': storage_inter_period_linking, 'storage_cyclic': storage_cyclic, 'original_fs': self._fs, @@ -1239,17 +1239,17 @@ def cluster_reduce( return reduced_fs def expand_solution(self) -> FlowSystem: - """Expand a reduced (typical periods) FlowSystem back to full original timesteps. + """Expand a reduced (clustered) FlowSystem back to full original timesteps. After solving a FlowSystem created with ``cluster_reduce()``, this method disaggregates the FlowSystem by: - 1. Expanding all time series data from typical periods to full timesteps - 2. Expanding the solution by mapping each typical period back to all - original periods it represents + 1. Expanding all time series data from typical clusters to full timesteps + 2. Expanding the solution by mapping each typical cluster back to all + original segments it represents This enables using all existing solution accessors (``statistics``, ``plot``, etc.) with full time resolution, where both the data and solution are consistently - expanded from the typical periods. + expanded from the typical clusters. Returns: FlowSystem: A new FlowSystem with full timesteps and expanded solution. @@ -1263,8 +1263,8 @@ def expand_solution(self) -> FlowSystem: >>> # Stage 1: Size with reduced timesteps >>> fs_reduced = flow_system.transform.cluster_reduce( - ... period_duration='1D', - ... n_typical_periods=8, + ... n_clusters=8, + ... cluster_duration='1D', ... ) >>> fs_reduced.optimize(solver) >>> @@ -1277,11 +1277,11 @@ def expand_solution(self) -> FlowSystem: >>> fs_expanded.statistics.plot.heatmap('Boiler(Q_th)|flow_rate') Note: - The expanded FlowSystem repeats the typical period values for all - periods belonging to the same cluster. Both input data and solution + The expanded FlowSystem repeats the typical cluster values for all + segments belonging to the same cluster. Both input data and solution are consistently expanded, so they match. This is an approximation - the actual dispatch at full resolution would differ due to - intra-period variations in time series data. + intra-cluster variations in time series data. For accurate dispatch results, use ``fix_sizes()`` to fix the sizes from the reduced optimization and re-optimize at full resolution. @@ -1291,40 +1291,40 @@ def expand_solution(self) -> FlowSystem: from .flow_system import FlowSystem # Validate - if not hasattr(self._fs, '_typical_periods_info') or self._fs._typical_periods_info is None: + if not hasattr(self._fs, '_cluster_info') or self._fs._cluster_info is None: raise ValueError( 'expand_solution() requires a FlowSystem created with cluster_reduce(). ' - 'This FlowSystem has no typical periods info.' + 'This FlowSystem has no cluster info.' ) if self._fs.solution is None: raise ValueError('FlowSystem has no solution. Run optimize() or solve() first.') - info = self._fs._typical_periods_info + info = self._fs._cluster_info cluster_order = info['cluster_order'] - timesteps_per_period = info['timesteps_per_period'] + timesteps_per_cluster = info['timesteps_per_cluster'] original_fs: FlowSystem = info['original_fs'] - n_typical_periods = info['n_typical_periods'] + n_clusters = info['n_clusters'] # Get original timesteps from the original FlowSystem original_timesteps = original_fs.timesteps n_original_timesteps = len(original_timesteps) - n_reduced_timesteps = n_typical_periods * timesteps_per_period + n_reduced_timesteps = n_clusters * timesteps_per_cluster # Build mapping: for each original timestep, which reduced timestep to copy from mapping = np.zeros(n_original_timesteps, dtype=np.int32) for orig_ts_idx in range(n_original_timesteps): - # Which original period does this timestep belong to? - orig_period_idx = orig_ts_idx // timesteps_per_period - # Position within the period - pos_in_period = orig_ts_idx % timesteps_per_period + # Which original segment does this timestep belong to? + orig_segment_idx = orig_ts_idx // timesteps_per_cluster + # Position within the cluster + pos_in_cluster = orig_ts_idx % timesteps_per_cluster - # Which cluster (typical period) does this original period map to? - cluster_id = cluster_order[orig_period_idx] if orig_period_idx < len(cluster_order) else 0 + # Which cluster does this original segment map to? + cluster_id = cluster_order[orig_segment_idx] if orig_segment_idx < len(cluster_order) else 0 # The corresponding timestep in the reduced solution - reduced_ts_idx = cluster_id * timesteps_per_period + pos_in_period + reduced_ts_idx = cluster_id * timesteps_per_cluster + pos_in_cluster # Ensure we don't exceed reduced solution bounds mapping[orig_ts_idx] = min(reduced_ts_idx, n_reduced_timesteps - 1) @@ -1366,7 +1366,7 @@ def expand_time_data(da: xr.DataArray) -> xr.DataArray: logger.info( f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' - f'({n_typical_periods} typical periods → {len(cluster_order)} original periods)' + f'({n_clusters} clusters → {len(cluster_order)} original segments)' ) return expanded_fs From e727652eed6c2dc1c7363bb8f41dc90dc704ca23 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 15:13:32 +0100 Subject: [PATCH 043/191] Temp --- docs/notebooks/08e-cluster-and-reduce.ipynb | 34 +- flixopt/transform_accessor.py | 364 ++++++++++++++++---- 2 files changed, 321 insertions(+), 77 deletions(-) diff --git a/docs/notebooks/08e-cluster-and-reduce.ipynb b/docs/notebooks/08e-cluster-and-reduce.ipynb index 090f88167..5f67adf03 100644 --- a/docs/notebooks/08e-cluster-and-reduce.ipynb +++ b/docs/notebooks/08e-cluster-and-reduce.ipynb @@ -253,8 +253,8 @@ "\n", "# Create reduced FlowSystem with 8 typical days\n", "fs_reduced = create_flow_system().transform.cluster_reduce(\n", - " hours_per_period=24, # 24 hours per period (daily)\n", - " nr_of_typical_periods=8, # 8 typical days\n", + " period_duration='1D', # Daily periods (can also use hours, e.g., 24)\n", + " n_typical_periods=8, # 8 typical days\n", " time_series_for_high_peaks=peak_forcing_series, # Force inclusion of peak demand day!\n", " storage_inter_period_linking=True, # Link storage states between periods\n", " storage_cyclic=True, # Cyclic constraint: SOC[0] = SOC[end]\n", @@ -416,9 +416,9 @@ "# Show clustering info\n", "info = fs_reduced._typical_periods_info\n", "print('Typical Periods Configuration:')\n", - "print(f' Number of typical periods: {info[\"nr_of_typical_periods\"]}')\n", + "print(f' Number of typical periods: {info[\"n_typical_periods\"]}')\n", "print(f' Timesteps per period: {info[\"timesteps_per_period\"]}')\n", - "print(f' Total reduced timesteps: {info[\"nr_of_typical_periods\"] * info[\"timesteps_per_period\"]}')\n", + "print(f' Total reduced timesteps: {info[\"n_typical_periods\"] * info[\"timesteps_per_period\"]}')\n", "print(f' Cluster order (first 10): {info[\"cluster_order\"][:10]}...')\n", "print(f' Cluster occurrences: {dict(info[\"cluster_occurrences\"])}')\n", "print(f' Storage inter-period linking: {info[\"storage_inter_period_linking\"]}')\n", @@ -435,8 +435,8 @@ "\n", "| Parameter | Type | Description |\n", "|-----------|------|-------------|\n", - "| `hours_per_period` | `float` | Duration of each period in hours (e.g., 24 for daily) |\n", - "| `nr_of_typical_periods` | `int` | Number of typical periods to extract (e.g., 8) |\n", + "| `period_duration` | `str \\| float` | Duration of each period ('1D', '24h') or hours as float |\n", + "| `n_typical_periods` | `int` | Number of typical periods to extract (e.g., 8) |\n", "| `weights` | `dict[str, float]` | Optional weights for clustering each time series |\n", "| `time_series_for_high_peaks` | `list[str]` | **IMPORTANT**: Force inclusion of high-value periods to capture peak demands |\n", "| `time_series_for_low_peaks` | `list[str]` | Force inclusion of low-value periods |\n", @@ -482,8 +482,8 @@ "```python\n", "# Stage 1: Fast sizing with typical periods\n", "fs_sizing = flow_system.transform.cluster_reduce(\n", - " hours_per_period=24,\n", - " nr_of_typical_periods=8,\n", + " period_duration='1D',\n", + " n_typical_periods=8,\n", " time_series_for_high_peaks=['DemandComponent(FlowName)|fixed_relative_profile'],\n", ")\n", "fs_sizing.optimize(solver)\n", @@ -507,6 +507,24 @@ "- **Two-stage is recommended**: Use `cluster_reduce()` for fast sizing, then `fix_sizes()` for dispatch\n", "- **Storage linking preserves long-term behavior**: SOC_boundary variables ensure correct storage cycling" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fs_expanded = fs_reduced.transform.expand_solution()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fs_expanded.statistics.plot.effects()" + ] } ], "metadata": { diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 25e011ef4..be8d71ec2 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1145,48 +1145,96 @@ def cluster_reduce( timesteps_per_cluster = int(round(hours_per_cluster / dt_max)) + # Check for multi-period/scenario dimensions + has_periods = self._fs.periods is not None + has_scenarios = self._fs.scenarios is not None + logger.info(f'{"":#^80}') - logger.info(f'{" Creating Typical Clusters (Reduced Timesteps) ":#^80}') + if has_periods or has_scenarios: + logger.info(f'{" Creating Typical Clusters (Multi-dimensional) ":#^80}') + else: + logger.info(f'{" Creating Typical Clusters (Reduced Timesteps) ":#^80}') + + # Determine iteration dimensions + periods = list(self._fs.periods) if has_periods else [None] + scenarios = list(self._fs.scenarios) if has_scenarios else [None] # Get dataset representation ds = self._fs.to_dataset(include_solution=False) - temporaly_changing_ds = drop_constant_arrays(ds, dim='time') - # Perform clustering - clustering = Clustering( - original_data=temporaly_changing_ds.to_dataframe(), - hours_per_time_step=float(dt_min), - hours_per_period=hours_per_cluster, - nr_of_periods=n_clusters, - weights=weights or self._calculate_clustering_weights(temporaly_changing_ds), - time_series_for_high_peaks=time_series_for_high_peaks or [], - time_series_for_low_peaks=time_series_for_low_peaks or [], - ) - clustering.cluster() + # Store clustering results per (period, scenario) combination + clustering_results: dict[tuple, Clustering] = {} + cluster_orders: dict[tuple, np.ndarray] = {} + cluster_occurrences_all: dict[tuple, dict] = {} + + # Track actual n_clusters (may vary per combination if peak forcing is used) + all_n_clusters = [] + + # Cluster each period x scenario combination independently + for period_label in periods: + for scenario_label in scenarios: + key = (period_label, scenario_label) + + # Select slice for this combination + selector = {} + if period_label is not None: + selector['period'] = period_label + if scenario_label is not None: + selector['scenario'] = scenario_label + + if selector: + ds_slice = ds.sel(**selector, drop=True) + else: + ds_slice = ds + + # Drop constant arrays for clustering + temporaly_changing_ds = drop_constant_arrays(ds_slice, dim='time') + + # Log dimension info + dim_info = [] + if period_label is not None: + dim_info.append(f'period={period_label}') + if scenario_label is not None: + dim_info.append(f'scenario={scenario_label}') + if dim_info: + logger.info(f'Clustering {", ".join(dim_info)}...') + + # Perform clustering on this slice + clustering = Clustering( + original_data=temporaly_changing_ds.to_dataframe(), + hours_per_time_step=float(dt_min), + hours_per_period=hours_per_cluster, + nr_of_periods=n_clusters, + weights=weights or self._calculate_clustering_weights(temporaly_changing_ds), + time_series_for_high_peaks=time_series_for_high_peaks or [], + time_series_for_low_peaks=time_series_for_low_peaks or [], + ) + clustering.cluster() - # Extract typical periods data from tsam - typical_periods_df = clustering.tsam.typicalPeriods - cluster_order = clustering.tsam.clusterOrder # Order in which clusters appear - cluster_occurrences = clustering.tsam.clusterPeriodNoOccur # {cluster_id: count} + clustering_results[key] = clustering + cluster_orders[key] = clustering.tsam.clusterOrder + cluster_occurrences_all[key] = clustering.tsam.clusterPeriodNoOccur + all_n_clusters.append(len(clustering.tsam.clusterPeriodNoOccur)) - # Actual number of clusters (may differ from requested if peak forcing is used) - actual_n_clusters = len(cluster_occurrences) + # Use first clustering result for building reduced dataset + # (all should have same structure, just different cluster assignments) + first_key = (periods[0], scenarios[0]) + first_clustering = clustering_results[first_key] + typical_periods_df = first_clustering.tsam.typicalPeriods + actual_n_clusters = len(first_clustering.tsam.clusterPeriodNoOccur) - # Create timestep weights: each typical cluster timestep represents multiple original timesteps - # Weight = number of original clusters this typical cluster represents + # Create timestep weights (use first combination - weights should be consistent) + cluster_occurrences = cluster_occurrences_all[first_key] timestep_weights = [] for cluster_idx in range(actual_n_clusters): weight = cluster_occurrences.get(cluster_idx, 1) timestep_weights.extend([weight] * timesteps_per_cluster) - timestep_weights = np.array(timestep_weights) logger.info(f'Reduced from {len(self._fs.timesteps)} to {len(typical_periods_df)} timesteps') logger.info(f'Clusters: {actual_n_clusters} (requested: {n_clusters})') - logger.info(f'Cluster occurrences: {cluster_occurrences}') - # Create new time index for typical periods - # Use a synthetic time index starting from the original start time + # Create new time index for typical clusters original_time = self._fs.timesteps time_start = original_time[0] freq = pd.Timedelta(hours=dt_min) @@ -1196,25 +1244,110 @@ def cluster_reduce( freq=freq, ) - # Build new dataset with typical periods data - ds_new = self._fs.to_dataset(include_solution=False) - - # Update time-varying data arrays with typical periods values - typical_periods_df.index = new_time_index # Reindex with our new time - for name in typical_periods_df.columns: - if name in ds_new.data_vars: - series = typical_periods_df[name] + # Build new dataset with typical clusters data + ds_original = self._fs.to_dataset(include_solution=False) + + # Collect typical periods data per (period, scenario) combination + # Key: (period, scenario), Value: DataFrame with typical period data + typical_dfs = {} + for key, clustering in clustering_results.items(): + typical_df = clustering.tsam.typicalPeriods.copy() + typical_df.index = new_time_index + typical_dfs[key] = typical_df + + # Build new data arrays with reduced time dimension + ds_new_vars = {} + for name in ds_original.data_vars: + original_da = ds_original[name] + + # Check if this variable is in the typical periods (time-varying and non-constant) + first_key = (periods[0], scenarios[0]) + in_typical = name in typical_dfs[first_key].columns + + if 'time' not in original_da.dims: + # Time-independent variable: copy as-is + ds_new_vars[name] = original_da.copy() + elif not in_typical: + # Time-dependent but constant (not clustered): slice to new time length + # Take first timesteps_per_cluster * n_clusters timesteps + ds_new_vars[name] = original_da.isel(time=slice(0, len(new_time_index))).assign_coords( + time=new_time_index + ) + elif not has_periods and not has_scenarios: + # Simple case: single clustering, use typical periods directly + series = typical_dfs[first_key][name] da = DataConverter.to_dataarray( series, {'time': new_time_index, **{k: v for k, v in self._fs.coords.items() if k != 'time'}}, ).rename(name) - da = da.assign_attrs(ds_new[name].attrs) + da = da.assign_attrs(original_da.attrs) if TimeSeriesData.is_timeseries_data(da): da = TimeSeriesData.from_dataarray(da) - ds_new[name] = da + ds_new_vars[name] = da + else: + # Multi-dimensional: build new array with all dims but reduced time + new_dims = list(original_da.dims) + new_shape = list(original_da.shape) + time_idx = new_dims.index('time') + new_shape[time_idx] = len(new_time_index) + + # Build coordinates + new_coords = {} + for dim in new_dims: + if dim == 'time': + new_coords[dim] = new_time_index + else: + new_coords[dim] = original_da.coords[dim].values + + # Initialize array and fill per (period, scenario) + new_data = np.zeros(new_shape, dtype=original_da.dtype) + + for period_label in periods: + for scenario_label in scenarios: + key = (period_label, scenario_label) + typical_df = typical_dfs[key] + + if name not in typical_df.columns: + continue - # Update time coordinate - ds_new = ds_new.reindex(time=new_time_index) + series_values = typical_df[name].values + + # Determine indices for this slice + if 'period' in new_dims and 'scenario' in new_dims: + if period_label is not None and scenario_label is not None: + period_idx = list(new_coords['period']).index(period_label) + scenario_idx = list(new_coords['scenario']).index(scenario_label) + if new_dims == ['time', 'period', 'scenario']: + new_data[:, period_idx, scenario_idx] = series_values + elif new_dims == ['time', 'scenario', 'period']: + new_data[:, scenario_idx, period_idx] = series_values + elif 'period' in new_dims: + if period_label is not None: + period_idx = list(new_coords['period']).index(period_label) + if new_dims == ['time', 'period']: + new_data[:, period_idx] = series_values + elif new_dims == ['period', 'time']: + new_data[period_idx, :] = series_values + elif 'scenario' in new_dims: + if scenario_label is not None: + scenario_idx = list(new_coords['scenario']).index(scenario_label) + if new_dims == ['time', 'scenario']: + new_data[:, scenario_idx] = series_values + elif new_dims == ['scenario', 'time']: + new_data[scenario_idx, :] = series_values + else: + # Has time but no period/scenario: use first key's data + new_data[:] = series_values + break # Only need to fill once + + da = xr.DataArray(data=new_data, dims=new_dims, coords=new_coords, attrs=original_da.attrs) + if TimeSeriesData.is_timeseries_data(da): + da = TimeSeriesData.from_dataarray(da) + ds_new_vars[name] = da + + # Create new dataset with updated variables + ds_new = xr.Dataset(ds_new_vars, attrs=ds_original.attrs) + ds_new = ds_new.assign_coords(time=new_time_index) # Update metadata ds_new.attrs['timesteps_per_cluster'] = timesteps_per_cluster @@ -1223,17 +1356,22 @@ def cluster_reduce( # Create new FlowSystem with reduced timesteps reduced_fs = FlowSystem.from_dataset(ds_new) - # Store cluster info for later use during modeling + # Store cluster info for later use during modeling and expand_solution() reduced_fs._cluster_info = { - 'clustering': clustering, + 'clustering_results': clustering_results, # Dict keyed by (period, scenario) + 'cluster_orders': cluster_orders, # Dict keyed by (period, scenario) + 'cluster_occurrences': cluster_occurrences_all, # Dict keyed by (period, scenario) 'timestep_weights': timestep_weights, - 'cluster_order': cluster_order, - 'cluster_occurrences': cluster_occurrences, 'n_clusters': actual_n_clusters, 'timesteps_per_cluster': timesteps_per_cluster, 'storage_inter_period_linking': storage_inter_period_linking, 'storage_cyclic': storage_cyclic, 'original_fs': self._fs, + 'has_periods': has_periods, + 'has_scenarios': has_scenarios, + # For backwards compatibility with simple case + 'cluster_order': cluster_orders[first_key], + 'clustering': first_clustering, } return reduced_fs @@ -1247,6 +1385,9 @@ def expand_solution(self) -> FlowSystem: 2. Expanding the solution by mapping each typical cluster back to all original segments it represents + For FlowSystems with periods and/or scenarios, each (period, scenario) + combination is expanded using its own cluster assignment. + This enables using all existing solution accessors (``statistics``, ``plot``, etc.) with full time resolution, where both the data and solution are consistently expanded from the typical clusters. @@ -1301,48 +1442,126 @@ def expand_solution(self) -> FlowSystem: raise ValueError('FlowSystem has no solution. Run optimize() or solve() first.') info = self._fs._cluster_info - cluster_order = info['cluster_order'] timesteps_per_cluster = info['timesteps_per_cluster'] original_fs: FlowSystem = info['original_fs'] n_clusters = info['n_clusters'] + has_periods = info.get('has_periods', False) + has_scenarios = info.get('has_scenarios', False) + + # Get cluster_orders dict (keyed by (period, scenario) tuples) + # For backwards compatibility, create dict from single cluster_order if needed + cluster_orders = info.get('cluster_orders', {(None, None): info['cluster_order']}) + + # Determine iteration dimensions + periods = list(original_fs.periods) if has_periods else [None] + scenarios = list(original_fs.scenarios) if has_scenarios else [None] # Get original timesteps from the original FlowSystem original_timesteps = original_fs.timesteps n_original_timesteps = len(original_timesteps) n_reduced_timesteps = n_clusters * timesteps_per_cluster - # Build mapping: for each original timestep, which reduced timestep to copy from - mapping = np.zeros(n_original_timesteps, dtype=np.int32) - - for orig_ts_idx in range(n_original_timesteps): - # Which original segment does this timestep belong to? - orig_segment_idx = orig_ts_idx // timesteps_per_cluster - # Position within the cluster - pos_in_cluster = orig_ts_idx % timesteps_per_cluster - - # Which cluster does this original segment map to? - cluster_id = cluster_order[orig_segment_idx] if orig_segment_idx < len(cluster_order) else 0 - - # The corresponding timestep in the reduced solution - reduced_ts_idx = cluster_id * timesteps_per_cluster + pos_in_cluster + # Helper to build mapping for a specific cluster_order + def build_mapping(cluster_order: np.ndarray) -> np.ndarray: + mapping = np.zeros(n_original_timesteps, dtype=np.int32) + for orig_ts_idx in range(n_original_timesteps): + orig_segment_idx = orig_ts_idx // timesteps_per_cluster + pos_in_cluster = orig_ts_idx % timesteps_per_cluster + cluster_id = cluster_order[orig_segment_idx] if orig_segment_idx < len(cluster_order) else 0 + reduced_ts_idx = cluster_id * timesteps_per_cluster + pos_in_cluster + mapping[orig_ts_idx] = min(reduced_ts_idx, n_reduced_timesteps - 1) + return mapping + + # Build mappings per (period, scenario) + mappings = {key: build_mapping(order) for key, order in cluster_orders.items()} + first_key = (periods[0], scenarios[0]) + + # Helper function to expand time-dependent data (simple case) + def expand_simple(da: xr.DataArray, mapping: np.ndarray) -> xr.DataArray: + expanded_da = da.isel(time=xr.DataArray(mapping, dims=['time'])) + expanded_da = expanded_da.assign_coords(time=original_timesteps) + return expanded_da.assign_attrs(da.attrs) - # Ensure we don't exceed reduced solution bounds - mapping[orig_ts_idx] = min(reduced_ts_idx, n_reduced_timesteps - 1) + # Helper function to expand multi-dimensional data + def expand_multi_dimensional(da: xr.DataArray) -> xr.DataArray: + # Create output array with expanded time dimension + new_dims = list(da.dims) + new_shape = list(da.shape) + time_idx = new_dims.index('time') + new_shape[time_idx] = n_original_timesteps + + # Build new coordinates + new_coords = dict(da.coords) + new_coords['time'] = original_timesteps + + # Initialize output with zeros + expanded_data = np.zeros(new_shape, dtype=da.dtype) + + # Expand each (period, scenario) slice independently + for period_label in periods: + for scenario_label in scenarios: + key = (period_label, scenario_label) + mapping = mappings[key] + + # Build selector for this slice + if 'period' in da.dims and 'scenario' in da.dims: + if period_label is not None and scenario_label is not None: + slice_data = da.sel(period=period_label, scenario=scenario_label) + expanded_slice = slice_data.values[mapping] + # Assign back to the correct position + period_idx = list(da.coords['period'].values).index(period_label) + scenario_idx = list(da.coords['scenario'].values).index(scenario_label) + if da.dims == ('time', 'period', 'scenario'): + expanded_data[:, period_idx, scenario_idx] = expanded_slice + elif da.dims == ('time', 'scenario', 'period'): + expanded_data[:, scenario_idx, period_idx] = expanded_slice + elif 'period' in da.dims: + if period_label is not None: + slice_data = da.sel(period=period_label) + expanded_slice = slice_data.values[mapping] + period_idx = list(da.coords['period'].values).index(period_label) + if da.dims == ('time', 'period'): + expanded_data[:, period_idx] = expanded_slice + elif da.dims == ('period', 'time'): + expanded_data[period_idx, :] = expanded_slice + elif 'scenario' in da.dims: + if scenario_label is not None: + slice_data = da.sel(scenario=scenario_label) + expanded_slice = slice_data.values[mapping] + scenario_idx = list(da.coords['scenario'].values).index(scenario_label) + if da.dims == ('time', 'scenario'): + expanded_data[:, scenario_idx] = expanded_slice + elif da.dims == ('scenario', 'time'): + expanded_data[scenario_idx, :] = expanded_slice + + return xr.DataArray( + data=expanded_data, + dims=new_dims, + coords=new_coords, + attrs=da.attrs, + ) - # Helper function to expand time-dependent data - def expand_time_data(da: xr.DataArray) -> xr.DataArray: + # Helper function to expand any data array + def expand_data(da: xr.DataArray) -> xr.DataArray: if 'time' not in da.dims: + # Time-independent: copy as-is return da.copy() - expanded_da = da.isel(time=xr.DataArray(mapping, dims=['time'])) - expanded_da = expanded_da.assign_coords(time=original_timesteps) - return expanded_da.assign_attrs(da.attrs) + elif not has_periods and not has_scenarios: + # Simple case: use first mapping + return expand_simple(da, mappings[first_key]) + elif 'period' not in da.dims and 'scenario' not in da.dims: + # Has time but no period/scenario dims: use first mapping + return expand_simple(da, mappings[first_key]) + else: + # Multi-dimensional: expand each slice independently + return expand_multi_dimensional(da) # 1. Expand the FlowSystem's data (input time series) reduced_ds = self._fs.to_dataset(include_solution=False) expanded_ds_data = {} for var_name in reduced_ds.data_vars: - expanded_ds_data[var_name] = expand_time_data(reduced_ds[var_name]) + expanded_ds_data[var_name] = expand_data(reduced_ds[var_name]) # Update coordinates expanded_ds = xr.Dataset(expanded_ds_data, attrs=reduced_ds.attrs) @@ -1359,15 +1578,22 @@ def expand_time_data(da: xr.DataArray) -> xr.DataArray: expanded_solution_data = {} for var_name in reduced_solution.data_vars: - expanded_solution_data[var_name] = expand_time_data(reduced_solution[var_name]) + expanded_solution_data[var_name] = expand_data(reduced_solution[var_name]) expanded_solution = xr.Dataset(expanded_solution_data, attrs=reduced_solution.attrs) expanded_fs._solution = expanded_solution - logger.info( - f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' - f'({n_clusters} clusters → {len(cluster_order)} original segments)' - ) + n_combinations = len(periods) * len(scenarios) + if n_combinations > 1: + logger.info( + f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' + f'({n_clusters} clusters, {n_combinations} period/scenario combinations)' + ) + else: + logger.info( + f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' + f'({n_clusters} clusters → {len(cluster_orders[first_key])} original segments)' + ) return expanded_fs From 1827578052c033aaf8726d16ee58feccefe580bf Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 15:30:46 +0100 Subject: [PATCH 044/191] Temp --- flixopt/clustering.py | 38 ++++++++++++++++++++++++++++++----- flixopt/elements.py | 12 ++++++----- flixopt/features.py | 17 +++++++--------- flixopt/flow_system.py | 34 ++++++++++++++++++++++++------- flixopt/structure.py | 18 +++++++++++++++++ flixopt/transform_accessor.py | 3 +++ 6 files changed, 95 insertions(+), 27 deletions(-) diff --git a/flixopt/clustering.py b/flixopt/clustering.py index a92181010..b6224e838 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -951,12 +951,12 @@ def _add_storage_linking(self, storage) -> None: charge_state = storage.submodel.variables[charge_state_name] - # Get storage capacity bounds + # Get storage capacity bounds (may have period/scenario dimensions) capacity = storage.capacity_in_flow_hours if hasattr(capacity, 'fixed_size') and capacity.fixed_size is not None: cap_value = capacity.fixed_size elif hasattr(capacity, 'maximum') and capacity.maximum is not None: - cap_value = float(capacity.maximum.max().item()) if hasattr(capacity.maximum, 'max') else capacity.maximum + cap_value = capacity.maximum else: cap_value = 1e9 # Large default @@ -966,9 +966,37 @@ def _add_storage_linking(self, storage) -> None: boundary_coords = [np.arange(n_boundaries)] boundary_dims = ['period_boundary'] - # Bounds: 0 <= SOC_boundary <= capacity - lb = xr.DataArray(0.0, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) - ub = xr.DataArray(cap_value, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) + # Build bounds - handle both scalar and multi-dimensional cap_value + # If cap_value has period/scenario dims, we need to include them + if isinstance(cap_value, xr.DataArray) and cap_value.dims: + # cap_value has dimensions (e.g., period, scenario) - need to broadcast + extra_dims = list(cap_value.dims) + extra_coords = {dim: cap_value.coords[dim].values for dim in extra_dims} + + # Add extra dims/coords to the variable + boundary_dims = ['period_boundary'] + extra_dims + boundary_coords = [np.arange(n_boundaries)] + [extra_coords[d] for d in extra_dims] + + # Build lb and ub with all dimensions + lb_coords = {'period_boundary': np.arange(n_boundaries), **extra_coords} + lb_shape = [n_boundaries] + [len(extra_coords[d]) for d in extra_dims] + lb = xr.DataArray( + np.zeros(lb_shape), + coords=lb_coords, + dims=boundary_dims, + ) + + # Broadcast cap_value across period_boundary dimension + ub = cap_value.expand_dims({'period_boundary': n_boundaries}, axis=0) + ub = ub.assign_coords(period_boundary=np.arange(n_boundaries)) + else: + # Scalar cap_value - simple case + if hasattr(cap_value, 'item'): + cap_value = float(cap_value.item()) + else: + cap_value = float(cap_value) + lb = xr.DataArray(0.0, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) + ub = xr.DataArray(cap_value, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) soc_boundary = self.add_variables( lower=lb, diff --git a/flixopt/elements.py b/flixopt/elements.py index 2933eb95a..1dc92ec66 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -680,7 +680,7 @@ def _do_modeling(self): ModelingPrimitives.expression_tracking_variable( model=self, name=f'{self.label_full}|total_flow_hours', - tracked_expression=(self.flow_rate * self._model.hours_per_step).sum('time'), + tracked_expression=(self.flow_rate * self._model.aggregation_weight).sum('time'), bounds=( self.element.flow_hours_min if self.element.flow_hours_min is not None else 0, self.element.flow_hours_max if self.element.flow_hours_max is not None else None, @@ -826,7 +826,7 @@ def _create_shares(self): self._model.effects.add_share_to_effects( name=self.label_full, expressions={ - effect: self.flow_rate * self._model.hours_per_step * factor + effect: self.flow_rate * self._model.aggregation_weight * factor for effect, factor in self.element.effects_per_flow_hour.items() }, target='temporal', @@ -839,7 +839,7 @@ def _create_bounds_for_load_factor(self): # Maximum load factor constraint if self.element.load_factor_max is not None: - flow_hours_per_size_max = self._model.hours_per_step.sum('time') * self.element.load_factor_max + flow_hours_per_size_max = self._model.aggregation_weight.sum('time') * self.element.load_factor_max self.add_constraints( self.total_flow_hours <= size * flow_hours_per_size_max, short_name='load_factor_max', @@ -847,7 +847,7 @@ def _create_bounds_for_load_factor(self): # Minimum load factor constraint if self.element.load_factor_min is not None: - flow_hours_per_size_min = self._model.hours_per_step.sum('time') * self.element.load_factor_min + flow_hours_per_size_min = self._model.aggregation_weight.sum('time') * self.element.load_factor_min self.add_constraints( self.total_flow_hours >= size * flow_hours_per_size_min, short_name='load_factor_min', @@ -951,7 +951,9 @@ def _do_modeling(self): # Add virtual supply/demand to balance and penalty if needed if self.element.allows_imbalance: - imbalance_penalty = np.multiply(self._model.hours_per_step, self.element.imbalance_penalty_per_flow_hour) + imbalance_penalty = np.multiply( + self._model.aggregation_weight, self.element.imbalance_penalty_per_flow_hour + ) self.virtual_supply = self.add_variables( lower=0, coords=self._model.get_coords(), short_name='virtual_supply' diff --git a/flixopt/features.py b/flixopt/features.py index ef1d1e4c8..a52e0e128 100644 --- a/flixopt/features.py +++ b/flixopt/features.py @@ -199,12 +199,12 @@ def _do_modeling(self): # 3. Total duration tracking using existing pattern ModelingPrimitives.expression_tracking_variable( self, - tracked_expression=(self.status * self._model.hours_per_step).sum('time'), + tracked_expression=(self.status * self._model.aggregation_weight).sum('time'), bounds=( self.parameters.active_hours_min if self.parameters.active_hours_min is not None else 0, self.parameters.active_hours_max if self.parameters.active_hours_max is not None - else self._model.hours_per_step.sum('time').max().item(), + else self._model.aggregation_weight.sum('time').max().item(), ), short_name='active_hours', coords=['period', 'scenario'], @@ -268,7 +268,7 @@ def _add_effects(self): self._model.effects.add_share_to_effects( name=self.label_of_element, expressions={ - effect: self.status * factor * self._model.hours_per_step + effect: self.status * factor * self._model.aggregation_weight for effect, factor in self.parameters.effects_per_active_hour.items() }, target='temporal', @@ -612,19 +612,16 @@ def _do_modeling(self): if 'time' in self._dims: self.total_per_timestep = self.add_variables( - lower=-np.inf if (self._min_per_hour is None) else self._min_per_hour * self._model.hours_per_step, - upper=np.inf if (self._max_per_hour is None) else self._max_per_hour * self._model.hours_per_step, + lower=-np.inf if (self._min_per_hour is None) else self._min_per_hour * self._model.aggregation_weight, + upper=np.inf if (self._max_per_hour is None) else self._max_per_hour * self._model.aggregation_weight, coords=self._model.get_coords(self._dims), short_name='per_timestep', ) self._eq_total_per_timestep = self.add_constraints(self.total_per_timestep == 0, short_name='per_timestep') - # Add it to the total (apply timestep weights if available for typical periods) - if hasattr(self._model, 'timestep_weights') and self._model.timestep_weights is not None: - self._eq_total.lhs -= (self.total_per_timestep * self._model.timestep_weights).sum(dim='time') - else: - self._eq_total.lhs -= self.total_per_timestep.sum(dim='time') + # Add it to the total (timestep_weight handles cluster representation, defaults to 1.0) + self._eq_total.lhs -= (self.total_per_timestep * self._model.timestep_weight).sum(dim='time') def add_share( self, diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 70aaa55ee..1a60c508c 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -67,6 +67,10 @@ class FlowSystem(Interface, CompositeContainerMixin[Element]): scenario_weights: The weights of each scenario. If None, all scenarios have the same weight (normalized to 1). Period weights are always computed internally from the period index (like hours_per_timestep for time). The final `weights` array (accessible via `flow_system.model.objective_weights`) is computed as period_weights × normalized_scenario_weights, with normalization applied to the scenario weights by default. + timestep_weight: Weight for each timestep representing cluster representation count. + If None (default), all timesteps have weight 1.0. Used by cluster_reduce() to specify + how many original timesteps each cluster represents. Combined with hours_per_timestep + via aggregation_weight for proper time aggregation in clustered models. scenario_independent_sizes: Controls whether investment sizes are equalized across scenarios. - True: All sizes are shared/equalized across scenarios - False: All sizes are optimized separately per scenario @@ -170,6 +174,7 @@ def __init__( hours_of_previous_timesteps: int | float | np.ndarray | None = None, weight_of_last_period: int | float | None = None, scenario_weights: Numeric_S | None = None, + timestep_weight: Numeric_TPS | None = None, scenario_independent_sizes: bool | list[str] = True, scenario_independent_flow_rates: bool | list[str] = False, name: str | None = None, @@ -189,6 +194,14 @@ def __init__( self.hours_per_timestep = self.fit_to_model_coords('hours_per_timestep', hours_per_timestep) + # Timestep weight for cluster_reduce optimization (default 1.0) + # Represents how many original timesteps each cluster represents + self.timestep_weight = self.fit_to_model_coords( + 'timestep_weight', + np.ones(len(self.timesteps)) if timestep_weight is None else timestep_weight, + dims=['time'], + ) + self.scenario_weights = scenario_weights # Use setter # Compute all period-related metadata using shared helper @@ -705,6 +718,9 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: scenario_weights=cls._resolve_dataarray_reference(reference_structure['scenario_weights'], arrays_dict) if 'scenario_weights' in reference_structure else None, + timestep_weight=cls._resolve_dataarray_reference(reference_structure['timestep_weight'], arrays_dict) + if 'timestep_weight' in reference_structure + else None, scenario_independent_sizes=reference_structure.get('scenario_independent_sizes', True), scenario_independent_flow_rates=reference_structure.get('scenario_independent_flow_rates', False), name=reference_structure.get('name'), @@ -1328,10 +1344,6 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: self.connect_and_transform() self.create_model(normalize_weights) - # Apply timestep weighting before do_modeling() for cluster_reduce() - if self._cluster_info is not None: - self._apply_timestep_weights() - self.model.do_modeling() # Add clustering constraints if this is a clustered FlowSystem @@ -1347,16 +1359,24 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: def _apply_timestep_weights(self) -> None: """Apply timestep weights to the model for cluster_reduce() optimization. - This multiplies operational effects (costs, emissions) by the number of - original segments each typical cluster represents. + .. deprecated:: + This method is deprecated. Timestep weights are now stored directly on FlowSystem + as `timestep_weight` and accessed via `FlowSystemModel.timestep_weight` and + `FlowSystemModel.aggregation_weight`. """ + warnings.warn( + '_apply_timestep_weights() is deprecated. Timestep weights are now stored directly ' + 'on FlowSystem as `timestep_weight` and accessed via FlowSystemModel.timestep_weight.', + DeprecationWarning, + stacklevel=2, + ) info = self._cluster_info if info is None: return timestep_weights = info['timestep_weights'] - # Store timestep weights on the model for use in effect calculations + # Store timestep weights on the model for backward compatibility self.model.timestep_weights = xr.DataArray( timestep_weights, coords={'time': self.timesteps}, diff --git a/flixopt/structure.py b/flixopt/structure.py index 88fd9ce31..a67dbb404 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -216,6 +216,24 @@ def hours_per_step(self): def hours_of_previous_timesteps(self): return self.flow_system.hours_of_previous_timesteps + @property + def timestep_weight(self) -> xr.DataArray: + """Timestep weight for cluster_reduce optimization. + + Represents how many original timesteps each cluster represents. + Default is 1.0 for all timesteps. + """ + return self.flow_system.timestep_weight + + @property + def aggregation_weight(self) -> xr.DataArray: + """Combined weight for time aggregation. + + Combines hours_per_step (timestep duration) and timestep_weight (cluster representation). + Use this for proper time aggregation in clustered models. + """ + return self.hours_per_step * self.timestep_weight + @property def scenario_weights(self) -> xr.DataArray: """ diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index be8d71ec2..6178ca55c 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1356,6 +1356,9 @@ def cluster_reduce( # Create new FlowSystem with reduced timesteps reduced_fs = FlowSystem.from_dataset(ds_new) + # Set timestep_weight for proper aggregation in the reduced FlowSystem + reduced_fs.timestep_weight = reduced_fs.fit_to_model_coords('timestep_weight', timestep_weights, dims=['time']) + # Store cluster info for later use during modeling and expand_solution() reduced_fs._cluster_info = { 'clustering_results': clustering_results, # Dict keyed by (period, scenario) From f33d31bf0dcc740f84d15cd40627aa6526a1b384 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 16:05:33 +0100 Subject: [PATCH 045/191] Weight Concepts (Final Naming) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit | Property | Stored On | Purpose | |--------------------|------------------------------|-------------------------------------------------------------------| | timestep_duration | FlowSystem / FlowSystemModel | Physical duration of each timestep in hours | | cluster_weight | FlowSystem / FlowSystemModel | How many original timesteps each cluster represents (default 1.0) | | aggregation_weight | FlowSystemModel (computed) | Combined = timestep_duration × cluster_weight | --- flixopt/components.py | 8 +-- flixopt/features.py | 12 ++-- flixopt/flow_system.py | 58 ++++++++--------- flixopt/optimization.py | 4 +- flixopt/results.py | 16 ++--- flixopt/statistics_accessor.py | 2 +- flixopt/structure.py | 19 +++--- flixopt/transform_accessor.py | 22 +++---- tests/deprecated/test_bus.py | 4 +- tests/deprecated/test_effect.py | 4 +- tests/deprecated/test_flow.py | 64 ++++++++++--------- tests/deprecated/test_flow_system_resample.py | 2 +- tests/deprecated/test_linear_converter.py | 8 +-- tests/deprecated/test_on_hours_computation.py | 14 ++-- tests/deprecated/test_storage.py | 16 ++--- tests/test_bus.py | 4 +- tests/test_effect.py | 4 +- tests/test_flow.py | 64 ++++++++++--------- tests/test_flow_system_resample.py | 2 +- tests/test_linear_converter.py | 8 +-- tests/test_on_hours_computation.py | 14 ++-- tests/test_storage.py | 16 ++--- 22 files changed, 185 insertions(+), 180 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 267c144af..6f982066c 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -901,7 +901,7 @@ def _do_modeling(self): charge_state = self.charge_state rel_loss = self.element.relative_loss_per_hour - hours_per_step = self._model.hours_per_step + timestep_duration = self._model.timestep_duration charge_rate = self.element.charging.submodel.flow_rate discharge_rate = self.element.discharging.submodel.flow_rate eff_charge = self.element.eta_charge @@ -909,9 +909,9 @@ def _do_modeling(self): self.add_constraints( charge_state.isel(time=slice(1, None)) - == charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** hours_per_step) - + charge_rate * eff_charge * hours_per_step - - discharge_rate * hours_per_step / eff_discharge, + == charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) + + charge_rate * eff_charge * timestep_duration + - discharge_rate * timestep_duration / eff_discharge, short_name='charge_state', ) diff --git a/flixopt/features.py b/flixopt/features.py index a52e0e128..5d890ebf9 100644 --- a/flixopt/features.py +++ b/flixopt/features.py @@ -242,7 +242,7 @@ def _do_modeling(self): short_name='uptime', minimum_duration=self.parameters.min_uptime, maximum_duration=self.parameters.max_uptime, - duration_per_step=self.hours_per_step, + duration_per_step=self.timestep_duration, duration_dim='time', previous_duration=self._get_previous_uptime(), ) @@ -255,7 +255,7 @@ def _do_modeling(self): short_name='downtime', minimum_duration=self.parameters.min_downtime, maximum_duration=self.parameters.max_downtime, - duration_per_step=self.hours_per_step, + duration_per_step=self.timestep_duration, duration_dim='time', previous_duration=self._get_previous_downtime(), ) @@ -330,7 +330,7 @@ def _get_previous_uptime(self): Returns 0 if no previous status is provided (assumes previously inactive). """ - hours_per_step = self._model.hours_per_step.isel(time=0).min().item() + hours_per_step = self._model.timestep_duration.isel(time=0).min().item() if self._previous_status is None: return 0 else: @@ -341,7 +341,7 @@ def _get_previous_downtime(self): Returns one timestep duration if no previous status is provided (assumes previously inactive). """ - hours_per_step = self._model.hours_per_step.isel(time=0).min().item() + hours_per_step = self._model.timestep_duration.isel(time=0).min().item() if self._previous_status is None: return hours_per_step else: @@ -620,8 +620,8 @@ def _do_modeling(self): self._eq_total_per_timestep = self.add_constraints(self.total_per_timestep == 0, short_name='per_timestep') - # Add it to the total (timestep_weight handles cluster representation, defaults to 1.0) - self._eq_total.lhs -= (self.total_per_timestep * self._model.timestep_weight).sum(dim='time') + # Add it to the total (cluster_weight handles cluster representation, defaults to 1.0) + self._eq_total.lhs -= (self.total_per_timestep * self._model.cluster_weight).sum(dim='time') def add_share( self, diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 1a60c508c..357b66493 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -65,11 +65,11 @@ class FlowSystem(Interface, CompositeContainerMixin[Element]): weight_of_last_period: Weight/duration of the last period. If None, computed from the last period interval. Used for calculating sums over periods in multi-period models. scenario_weights: The weights of each scenario. If None, all scenarios have the same weight (normalized to 1). - Period weights are always computed internally from the period index (like hours_per_timestep for time). + Period weights are always computed internally from the period index (like timestep_duration for time). The final `weights` array (accessible via `flow_system.model.objective_weights`) is computed as period_weights × normalized_scenario_weights, with normalization applied to the scenario weights by default. - timestep_weight: Weight for each timestep representing cluster representation count. + cluster_weight: Weight for each timestep representing cluster representation count. If None (default), all timesteps have weight 1.0. Used by cluster_reduce() to specify - how many original timesteps each cluster represents. Combined with hours_per_timestep + how many original timesteps each cluster represents. Combined with timestep_duration via aggregation_weight for proper time aggregation in clustered models. scenario_independent_sizes: Controls whether investment sizes are equalized across scenarios. - True: All sizes are shared/equalized across scenarios @@ -174,7 +174,7 @@ def __init__( hours_of_previous_timesteps: int | float | np.ndarray | None = None, weight_of_last_period: int | float | None = None, scenario_weights: Numeric_S | None = None, - timestep_weight: Numeric_TPS | None = None, + cluster_weight: Numeric_TPS | None = None, scenario_independent_sizes: bool | list[str] = True, scenario_independent_flow_rates: bool | list[str] = False, name: str | None = None, @@ -186,19 +186,19 @@ def __init__( self.timesteps_extra, self.hours_of_last_timestep, self.hours_of_previous_timesteps, - hours_per_timestep, + timestep_duration, ) = self._compute_time_metadata(self.timesteps, hours_of_last_timestep, hours_of_previous_timesteps) self.periods = None if periods is None else self._validate_periods(periods) self.scenarios = None if scenarios is None else self._validate_scenarios(scenarios) - self.hours_per_timestep = self.fit_to_model_coords('hours_per_timestep', hours_per_timestep) + self.timestep_duration = self.fit_to_model_coords('timestep_duration', timestep_duration) - # Timestep weight for cluster_reduce optimization (default 1.0) + # Cluster weight for cluster_reduce optimization (default 1.0) # Represents how many original timesteps each cluster represents - self.timestep_weight = self.fit_to_model_coords( - 'timestep_weight', - np.ones(len(self.timesteps)) if timestep_weight is None else timestep_weight, + self.cluster_weight = self.fit_to_model_coords( + 'cluster_weight', + np.ones(len(self.timesteps)) if cluster_weight is None else cluster_weight, dims=['time'], ) @@ -318,11 +318,11 @@ def _create_timesteps_with_extra( return pd.DatetimeIndex(timesteps.append(last_date), name='time') @staticmethod - def calculate_hours_per_timestep(timesteps_extra: pd.DatetimeIndex) -> xr.DataArray: - """Calculate duration of each timestep as a 1D DataArray.""" + def calculate_timestep_duration(timesteps_extra: pd.DatetimeIndex) -> xr.DataArray: + """Calculate duration of each timestep in hours as a 1D DataArray.""" hours_per_step = np.diff(timesteps_extra) / pd.Timedelta(hours=1) return xr.DataArray( - hours_per_step, coords={'time': timesteps_extra[:-1]}, dims='time', name='hours_per_timestep' + hours_per_step, coords={'time': timesteps_extra[:-1]}, dims='time', name='timestep_duration' ) @staticmethod @@ -393,22 +393,22 @@ def _compute_time_metadata( Can be a scalar or array. Returns: - Tuple of (timesteps_extra, hours_of_last_timestep, hours_of_previous_timesteps, hours_per_timestep) + Tuple of (timesteps_extra, hours_of_last_timestep, hours_of_previous_timesteps, timestep_duration) """ # Create timesteps with extra step at the end timesteps_extra = cls._create_timesteps_with_extra(timesteps, hours_of_last_timestep) - # Calculate hours per timestep - hours_per_timestep = cls.calculate_hours_per_timestep(timesteps_extra) + # Calculate timestep duration + timestep_duration = cls.calculate_timestep_duration(timesteps_extra) # Extract hours_of_last_timestep if not provided if hours_of_last_timestep is None: - hours_of_last_timestep = hours_per_timestep.isel(time=-1).item() + hours_of_last_timestep = timestep_duration.isel(time=-1).item() # Compute hours_of_previous_timesteps (handles both None and provided cases) hours_of_previous_timesteps = cls._calculate_hours_of_previous_timesteps(timesteps, hours_of_previous_timesteps) - return timesteps_extra, hours_of_last_timestep, hours_of_previous_timesteps, hours_per_timestep + return timesteps_extra, hours_of_last_timestep, hours_of_previous_timesteps, timestep_duration @classmethod def _compute_period_metadata( @@ -453,7 +453,7 @@ def _update_time_metadata( """ Update time-related attributes and data variables in dataset based on its time index. - Recomputes hours_of_last_timestep, hours_of_previous_timesteps, and hours_per_timestep + Recomputes hours_of_last_timestep, hours_of_previous_timesteps, and timestep_duration from the dataset's time index when these parameters are None. This ensures time metadata stays synchronized with the actual timesteps after operations like resampling or selection. @@ -469,14 +469,14 @@ def _update_time_metadata( new_time_index = dataset.indexes.get('time') if new_time_index is not None and len(new_time_index) >= 2: # Use shared helper to compute all time metadata - _, hours_of_last_timestep, hours_of_previous_timesteps, hours_per_timestep = cls._compute_time_metadata( + _, hours_of_last_timestep, hours_of_previous_timesteps, timestep_duration = cls._compute_time_metadata( new_time_index, hours_of_last_timestep, hours_of_previous_timesteps ) - # Update hours_per_timestep DataArray if it exists in the dataset + # Update timestep_duration DataArray if it exists in the dataset # This prevents stale data after resampling operations - if 'hours_per_timestep' in dataset.data_vars: - dataset['hours_per_timestep'] = hours_per_timestep + if 'timestep_duration' in dataset.data_vars: + dataset['timestep_duration'] = timestep_duration # Update time-related attributes only when new values are provided/computed # This preserves existing metadata instead of overwriting with None @@ -718,8 +718,8 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: scenario_weights=cls._resolve_dataarray_reference(reference_structure['scenario_weights'], arrays_dict) if 'scenario_weights' in reference_structure else None, - timestep_weight=cls._resolve_dataarray_reference(reference_structure['timestep_weight'], arrays_dict) - if 'timestep_weight' in reference_structure + cluster_weight=cls._resolve_dataarray_reference(reference_structure['cluster_weight'], arrays_dict) + if 'cluster_weight' in reference_structure else None, scenario_independent_sizes=reference_structure.get('scenario_independent_sizes', True), scenario_independent_flow_rates=reference_structure.get('scenario_independent_flow_rates', False), @@ -1360,13 +1360,13 @@ def _apply_timestep_weights(self) -> None: """Apply timestep weights to the model for cluster_reduce() optimization. .. deprecated:: - This method is deprecated. Timestep weights are now stored directly on FlowSystem - as `timestep_weight` and accessed via `FlowSystemModel.timestep_weight` and + This method is deprecated. Cluster weights are now stored directly on FlowSystem + as `cluster_weight` and accessed via `FlowSystemModel.cluster_weight` and `FlowSystemModel.aggregation_weight`. """ warnings.warn( - '_apply_timestep_weights() is deprecated. Timestep weights are now stored directly ' - 'on FlowSystem as `timestep_weight` and accessed via FlowSystemModel.timestep_weight.', + '_apply_timestep_weights() is deprecated. Cluster weights are now stored directly ' + 'on FlowSystem as `cluster_weight` and accessed via FlowSystemModel.cluster_weight.', DeprecationWarning, stacklevel=2, ) diff --git a/flixopt/optimization.py b/flixopt/optimization.py index f9526caab..a576477e0 100644 --- a/flixopt/optimization.py +++ b/flixopt/optimization.py @@ -438,8 +438,8 @@ def _perform_clustering(self): t_start_agg = timeit.default_timer() # Validation - dt_min = float(self.flow_system.hours_per_timestep.min().item()) - dt_max = float(self.flow_system.hours_per_timestep.max().item()) + dt_min = float(self.flow_system.timestep_duration.min().item()) + dt_max = float(self.flow_system.timestep_duration.max().item()) if not dt_min == dt_max: raise ValueError( f'Clustering failed due to inconsistent time step sizes:delta_t varies from {dt_min} to {dt_max} hours.' diff --git a/flixopt/results.py b/flixopt/results.py index 16d88743a..8ec860244 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -99,7 +99,7 @@ class Results(CompositeContainerMixin['ComponentResults | BusResults | EffectRes buses: Dictionary mapping bus labels to BusResults objects effects: Dictionary mapping effect names to EffectResults objects timesteps_extra: Extended time index including boundary conditions - hours_per_timestep: Duration of each timestep for proper energy optimizations + timestep_duration: Duration of each timestep in hours for proper energy calculations Examples: Load and analyze saved results: @@ -285,7 +285,7 @@ def __init__( self.flows = ResultsContainer(elements=flows_dict, element_type_name='flow results', truncate_repr=10) self.timesteps_extra = self.solution.indexes['time'] - self.hours_per_timestep = FlowSystem.calculate_hours_per_timestep(self.timesteps_extra) + self.timestep_duration = FlowSystem.calculate_timestep_duration(self.timesteps_extra) self.scenarios = self.solution.indexes['scenario'] if 'scenario' in self.solution.indexes else None self.periods = self.solution.indexes['period'] if 'period' in self.solution.indexes else None @@ -623,7 +623,7 @@ def flow_hours( .. deprecated:: Use `results.plot.all_flow_hours` (Dataset) or - `results.flows['FlowLabel'].flow_rate * results.hours_per_timestep` instead. + `results.flows['FlowLabel'].flow_rate * results.timestep_duration` instead. **Note**: The new API differs from this method: @@ -675,7 +675,7 @@ def flow_hours( stacklevel=2, ) if self._flow_hours is None: - self._flow_hours = (self.flow_rates() * self.hours_per_timestep).rename('flow_hours') + self._flow_hours = (self.flow_rates() * self.timestep_duration).rename('flow_hours') filters = {k: v for k, v in {'start': start, 'end': end, 'component': component}.items() if v is not None} return filter_dataarray_by_coord(self._flow_hours, **filters) @@ -1577,14 +1577,14 @@ def plot_node_balance_pie( dpi = plot_kwargs.pop('dpi', None) # None uses CONFIG.Plotting.default_dpi inputs = sanitize_dataset( - ds=self.solution[self.inputs] * self._results.hours_per_timestep, + ds=self.solution[self.inputs] * self._results.timestep_duration, threshold=1e-5, drop_small_vars=True, zero_small_values=True, drop_suffix='|', ) outputs = sanitize_dataset( - ds=self.solution[self.outputs] * self._results.hours_per_timestep, + ds=self.solution[self.outputs] * self._results.timestep_duration, threshold=1e-5, drop_small_vars=True, zero_small_values=True, @@ -1715,7 +1715,7 @@ def node_balance( ds, _ = _apply_selection_to_data(ds, select=select, drop=True) if unit_type == 'flow_hours': - ds = ds * self._results.hours_per_timestep + ds = ds * self._results.timestep_duration ds = ds.rename_vars({var: var.replace('flow_rate', 'flow_hours') for var in ds.data_vars}) return ds @@ -2016,7 +2016,7 @@ def flow_rate(self) -> xr.DataArray: @property def flow_hours(self) -> xr.DataArray: - return (self.flow_rate * self._results.hours_per_timestep).rename(f'{self.label}|flow_hours') + return (self.flow_rate * self._results.timestep_duration).rename(f'{self.label}|flow_hours') @property def size(self) -> xr.DataArray: diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index 952047cc5..535970840 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -471,7 +471,7 @@ def flow_hours(self) -> xr.Dataset: """ self._require_solution() if self._flow_hours is None: - hours = self._fs.hours_per_timestep + hours = self._fs.timestep_duration flow_rates = self.flow_rates # Multiply and preserve/transform attributes data_vars = {} diff --git a/flixopt/structure.py b/flixopt/structure.py index a67dbb404..15666c86b 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -209,30 +209,31 @@ def solution(self): return solution @property - def hours_per_step(self): - return self.flow_system.hours_per_timestep + def timestep_duration(self) -> xr.DataArray: + """Duration of each timestep in hours.""" + return self.flow_system.timestep_duration @property def hours_of_previous_timesteps(self): return self.flow_system.hours_of_previous_timesteps @property - def timestep_weight(self) -> xr.DataArray: - """Timestep weight for cluster_reduce optimization. + def cluster_weight(self) -> xr.DataArray: + """Cluster weight for cluster_reduce optimization. Represents how many original timesteps each cluster represents. Default is 1.0 for all timesteps. """ - return self.flow_system.timestep_weight + return self.flow_system.cluster_weight @property def aggregation_weight(self) -> xr.DataArray: """Combined weight for time aggregation. - Combines hours_per_step (timestep duration) and timestep_weight (cluster representation). + Combines timestep_duration (physical duration) and cluster_weight (cluster representation). Use this for proper time aggregation in clustered models. """ - return self.hours_per_step * self.timestep_weight + return self.timestep_duration * self.cluster_weight @property def scenario_weights(self) -> xr.DataArray: @@ -1721,8 +1722,8 @@ def __repr__(self) -> str: return f'{model_string}\n{"=" * len(model_string)}\n\n{all_sections}' @property - def hours_per_step(self): - return self._model.hours_per_step + def timestep_duration(self): + return self._model.timestep_duration def _do_modeling(self): """ diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 6178ca55c..37fc508af 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -182,8 +182,8 @@ def _cluster_simple( from .core import DataConverter, TimeSeriesData, drop_constant_arrays # Validation - dt_min = float(self._fs.hours_per_timestep.min().item()) - dt_max = float(self._fs.hours_per_timestep.max().item()) + dt_min = float(self._fs.timestep_duration.min().item()) + dt_max = float(self._fs.timestep_duration.max().item()) if dt_min != dt_max: raise ValueError( f'Clustering failed due to inconsistent time step sizes: ' @@ -253,8 +253,8 @@ def _cluster_multi_dimensional( from .core import DataConverter, TimeSeriesData, drop_constant_arrays # Validation - dt_min = float(self._fs.hours_per_timestep.min().item()) - dt_max = float(self._fs.hours_per_timestep.max().item()) + dt_min = float(self._fs.timestep_duration.min().item()) + dt_max = float(self._fs.timestep_duration.max().item()) if dt_min != dt_max: raise ValueError( f'Clustering failed due to inconsistent time step sizes: ' @@ -1129,8 +1129,8 @@ def cluster_reduce( hours_per_cluster = float(cluster_duration) # Validation - dt_min = float(self._fs.hours_per_timestep.min().item()) - dt_max = float(self._fs.hours_per_timestep.max().item()) + dt_min = float(self._fs.timestep_duration.min().item()) + dt_max = float(self._fs.timestep_duration.max().item()) if dt_min != dt_max: raise ValueError( f'cluster_reduce() failed due to inconsistent time step sizes: ' @@ -1351,13 +1351,13 @@ def cluster_reduce( # Update metadata ds_new.attrs['timesteps_per_cluster'] = timesteps_per_cluster - ds_new.attrs['hours_per_timestep'] = dt_min + ds_new.attrs['timestep_duration'] = dt_min # Create new FlowSystem with reduced timesteps reduced_fs = FlowSystem.from_dataset(ds_new) - # Set timestep_weight for proper aggregation in the reduced FlowSystem - reduced_fs.timestep_weight = reduced_fs.fit_to_model_coords('timestep_weight', timestep_weights, dims=['time']) + # Set cluster_weight for proper aggregation in the reduced FlowSystem + reduced_fs.cluster_weight = reduced_fs.fit_to_model_coords('cluster_weight', timestep_weights, dims=['time']) # Store cluster info for later use during modeling and expand_solution() reduced_fs._cluster_info = { @@ -1570,8 +1570,8 @@ def expand_data(da: xr.DataArray) -> xr.DataArray: expanded_ds = xr.Dataset(expanded_ds_data, attrs=reduced_ds.attrs) expanded_ds = expanded_ds.assign_coords(time=original_timesteps) - # Copy hours_per_timestep from original - expanded_ds.attrs['hours_per_timestep'] = original_fs.hours_per_timestep.values.tolist() + # Copy timestep_duration from original + expanded_ds.attrs['timestep_duration'] = original_fs.timestep_duration.values.tolist() # Create the expanded FlowSystem from the expanded dataset expanded_fs = FlowSystem.from_dataset(expanded_ds) diff --git a/tests/deprecated/test_bus.py b/tests/deprecated/test_bus.py index cc49a2073..9bb7ddbe3 100644 --- a/tests/deprecated/test_bus.py +++ b/tests/deprecated/test_bus.py @@ -74,8 +74,8 @@ def test_bus_penalty(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['TestBus->Penalty(temporal)'], model.variables['TestBus->Penalty(temporal)'] - == model.variables['TestBus|virtual_supply'] * 1e5 * model.hours_per_step - + model.variables['TestBus|virtual_demand'] * 1e5 * model.hours_per_step, + == model.variables['TestBus|virtual_supply'] * 1e5 * model.timestep_duration + + model.variables['TestBus|virtual_demand'] * 1e5 * model.timestep_duration, ) def test_bus_with_coords(self, basic_flow_system_linopy_coords, coords_config): diff --git a/tests/deprecated/test_effect.py b/tests/deprecated/test_effect.py index b3bb278f0..1cf625c1b 100644 --- a/tests/deprecated/test_effect.py +++ b/tests/deprecated/test_effect.py @@ -130,8 +130,8 @@ def test_bounds(self, basic_flow_system_linopy_coords, coords_config): assert_var_equal( model.variables['Effect1(temporal)|per_timestep'], model.add_variables( - lower=4.0 * model.hours_per_step, - upper=4.1 * model.hours_per_step, + lower=4.0 * model.timestep_duration, + upper=4.1 * model.timestep_duration, coords=model.get_coords(['time', 'period', 'scenario']), ), ) diff --git a/tests/deprecated/test_flow.py b/tests/deprecated/test_flow.py index 594bc1fbb..8e1ce1f53 100644 --- a/tests/deprecated/test_flow.py +++ b/tests/deprecated/test_flow.py @@ -23,7 +23,7 @@ def test_flow_minimal(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|total_flow_hours'], flow.submodel.variables['Sink(Wärme)|total_flow_hours'] - == (flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.timestep_duration).sum('time'), ) assert_var_equal(flow.submodel.flow_rate, model.add_variables(lower=0, upper=100, coords=model.get_coords())) assert_var_equal( @@ -61,7 +61,7 @@ def test_flow(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|total_flow_hours'], flow.submodel.variables['Sink(Wärme)|total_flow_hours'] - == (flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.timestep_duration).sum('time'), ) assert_var_equal( @@ -83,12 +83,12 @@ def test_flow(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|load_factor_min'], - flow.submodel.variables['Sink(Wärme)|total_flow_hours'] >= model.hours_per_step.sum('time') * 0.1 * 100, + flow.submodel.variables['Sink(Wärme)|total_flow_hours'] >= model.timestep_duration.sum('time') * 0.1 * 100, ) assert_conequal( model.constraints['Sink(Wärme)|load_factor_max'], - flow.submodel.variables['Sink(Wärme)|total_flow_hours'] <= model.hours_per_step.sum('time') * 0.9 * 100, + flow.submodel.variables['Sink(Wärme)|total_flow_hours'] <= model.timestep_duration.sum('time') * 0.9 * 100, ) assert_sets_equal( @@ -129,13 +129,13 @@ def test_effects_per_flow_hour(self, basic_flow_system_linopy_coords, coords_con assert_conequal( model.constraints['Sink(Wärme)->costs(temporal)'], model.variables['Sink(Wärme)->costs(temporal)'] - == flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.hours_per_step * costs_per_flow_hour, + == flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.timestep_duration * costs_per_flow_hour, ) assert_conequal( model.constraints['Sink(Wärme)->CO2(temporal)'], model.variables['Sink(Wärme)->CO2(temporal)'] - == flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.hours_per_step * co2_per_flow_hour, + == flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.timestep_duration * co2_per_flow_hour, ) @@ -561,7 +561,7 @@ def test_flow_on(self, basic_flow_system_linopy_coords, coords_config): model.add_variables(binary=True, coords=model.get_coords()), ) # Upper bound is total hours when active_hours_max is not specified - total_hours = model.hours_per_step.sum('time') + total_hours = model.timestep_duration.sum('time') assert_var_equal( model.variables['Sink(Wärme)|active_hours'], model.add_variables(lower=0, upper=total_hours, coords=model.get_coords(['period', 'scenario'])), @@ -580,7 +580,7 @@ def test_flow_on(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|active_hours'], flow.submodel.variables['Sink(Wärme)|active_hours'] - == (flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration).sum('time'), ) def test_effects_per_active_hour(self, basic_flow_system_linopy_coords, coords_config): @@ -635,13 +635,13 @@ def test_effects_per_active_hour(self, basic_flow_system_linopy_coords, coords_c assert_conequal( model.constraints['Sink(Wärme)->costs(temporal)'], model.variables['Sink(Wärme)->costs(temporal)'] - == flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step * costs_per_running_hour, + == flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration * costs_per_running_hour, ) assert_conequal( model.constraints['Sink(Wärme)->CO2(temporal)'], model.variables['Sink(Wärme)->CO2(temporal)'] - == flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step * co2_per_running_hour, + == flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration * co2_per_running_hour, ) def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_config): @@ -687,7 +687,7 @@ def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_conf model.add_variables(lower=0, upper=8, coords=model.get_coords()), ) - mega = model.hours_per_step.sum('time') + mega = model.timestep_duration.sum('time') assert_conequal( model.constraints['Sink(Wärme)|uptime|ub'], @@ -698,7 +698,7 @@ def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_conf model.constraints['Sink(Wärme)|uptime|forward'], model.variables['Sink(Wärme)|uptime'].isel(time=slice(1, None)) <= model.variables['Sink(Wärme)|uptime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)), + + model.timestep_duration.isel(time=slice(None, -1)), ) # eq: duration(t) >= duration(t - 1) + dt(t) + (On(t) - 1) * BIG @@ -706,14 +706,14 @@ def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_conf model.constraints['Sink(Wärme)|uptime|backward'], model.variables['Sink(Wärme)|uptime'].isel(time=slice(1, None)) >= model.variables['Sink(Wärme)|uptime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)) + + model.timestep_duration.isel(time=slice(None, -1)) + (model.variables['Sink(Wärme)|status'].isel(time=slice(1, None)) - 1) * mega, ) assert_conequal( model.constraints['Sink(Wärme)|uptime|initial'], model.variables['Sink(Wärme)|uptime'].isel(time=0) - == model.variables['Sink(Wärme)|status'].isel(time=0) * model.hours_per_step.isel(time=0), + == model.variables['Sink(Wärme)|status'].isel(time=0) * model.timestep_duration.isel(time=0), ) assert_conequal( @@ -768,7 +768,7 @@ def test_consecutive_on_hours_previous(self, basic_flow_system_linopy_coords, co model.add_variables(lower=0, upper=8, coords=model.get_coords()), ) - mega = model.hours_per_step.sum('time') + model.hours_per_step.isel(time=0) * 3 + mega = model.timestep_duration.sum('time') + model.timestep_duration.isel(time=0) * 3 assert_conequal( model.constraints['Sink(Wärme)|uptime|ub'], @@ -779,7 +779,7 @@ def test_consecutive_on_hours_previous(self, basic_flow_system_linopy_coords, co model.constraints['Sink(Wärme)|uptime|forward'], model.variables['Sink(Wärme)|uptime'].isel(time=slice(1, None)) <= model.variables['Sink(Wärme)|uptime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)), + + model.timestep_duration.isel(time=slice(None, -1)), ) # eq: duration(t) >= duration(t - 1) + dt(t) + (On(t) - 1) * BIG @@ -787,14 +787,14 @@ def test_consecutive_on_hours_previous(self, basic_flow_system_linopy_coords, co model.constraints['Sink(Wärme)|uptime|backward'], model.variables['Sink(Wärme)|uptime'].isel(time=slice(1, None)) >= model.variables['Sink(Wärme)|uptime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)) + + model.timestep_duration.isel(time=slice(None, -1)) + (model.variables['Sink(Wärme)|status'].isel(time=slice(1, None)) - 1) * mega, ) assert_conequal( model.constraints['Sink(Wärme)|uptime|initial'], model.variables['Sink(Wärme)|uptime'].isel(time=0) - == model.variables['Sink(Wärme)|status'].isel(time=0) * (model.hours_per_step.isel(time=0) * (1 + 3)), + == model.variables['Sink(Wärme)|status'].isel(time=0) * (model.timestep_duration.isel(time=0) * (1 + 3)), ) assert_conequal( @@ -850,7 +850,9 @@ def test_consecutive_off_hours(self, basic_flow_system_linopy_coords, coords_con model.add_variables(lower=0, upper=12, coords=model.get_coords()), ) - mega = model.hours_per_step.sum('time') + model.hours_per_step.isel(time=0) * 1 # previously inactive for 1h + mega = ( + model.timestep_duration.sum('time') + model.timestep_duration.isel(time=0) * 1 + ) # previously inactive for 1h assert_conequal( model.constraints['Sink(Wärme)|downtime|ub'], @@ -861,7 +863,7 @@ def test_consecutive_off_hours(self, basic_flow_system_linopy_coords, coords_con model.constraints['Sink(Wärme)|downtime|forward'], model.variables['Sink(Wärme)|downtime'].isel(time=slice(1, None)) <= model.variables['Sink(Wärme)|downtime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)), + + model.timestep_duration.isel(time=slice(None, -1)), ) # eq: duration(t) >= duration(t - 1) + dt(t) + (On(t) - 1) * BIG @@ -869,14 +871,14 @@ def test_consecutive_off_hours(self, basic_flow_system_linopy_coords, coords_con model.constraints['Sink(Wärme)|downtime|backward'], model.variables['Sink(Wärme)|downtime'].isel(time=slice(1, None)) >= model.variables['Sink(Wärme)|downtime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)) + + model.timestep_duration.isel(time=slice(None, -1)) + (model.variables['Sink(Wärme)|inactive'].isel(time=slice(1, None)) - 1) * mega, ) assert_conequal( model.constraints['Sink(Wärme)|downtime|initial'], model.variables['Sink(Wärme)|downtime'].isel(time=0) - == model.variables['Sink(Wärme)|inactive'].isel(time=0) * (model.hours_per_step.isel(time=0) * (1 + 1)), + == model.variables['Sink(Wärme)|inactive'].isel(time=0) * (model.timestep_duration.isel(time=0) * (1 + 1)), ) assert_conequal( @@ -933,7 +935,7 @@ def test_consecutive_off_hours_previous(self, basic_flow_system_linopy_coords, c model.add_variables(lower=0, upper=12, coords=model.get_coords()), ) - mega = model.hours_per_step.sum('time') + model.hours_per_step.isel(time=0) * 2 + mega = model.timestep_duration.sum('time') + model.timestep_duration.isel(time=0) * 2 assert_conequal( model.constraints['Sink(Wärme)|downtime|ub'], @@ -944,7 +946,7 @@ def test_consecutive_off_hours_previous(self, basic_flow_system_linopy_coords, c model.constraints['Sink(Wärme)|downtime|forward'], model.variables['Sink(Wärme)|downtime'].isel(time=slice(1, None)) <= model.variables['Sink(Wärme)|downtime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)), + + model.timestep_duration.isel(time=slice(None, -1)), ) # eq: duration(t) >= duration(t - 1) + dt(t) + (On(t) - 1) * BIG @@ -952,14 +954,14 @@ def test_consecutive_off_hours_previous(self, basic_flow_system_linopy_coords, c model.constraints['Sink(Wärme)|downtime|backward'], model.variables['Sink(Wärme)|downtime'].isel(time=slice(1, None)) >= model.variables['Sink(Wärme)|downtime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)) + + model.timestep_duration.isel(time=slice(None, -1)) + (model.variables['Sink(Wärme)|inactive'].isel(time=slice(1, None)) - 1) * mega, ) assert_conequal( model.constraints['Sink(Wärme)|downtime|initial'], model.variables['Sink(Wärme)|downtime'].isel(time=0) - == model.variables['Sink(Wärme)|inactive'].isel(time=0) * (model.hours_per_step.isel(time=0) * (1 + 2)), + == model.variables['Sink(Wärme)|inactive'].isel(time=0) * (model.timestep_duration.isel(time=0) * (1 + 2)), ) assert_conequal( @@ -1067,7 +1069,7 @@ def test_on_hours_limits(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|active_hours'], flow.submodel.variables['Sink(Wärme)|active_hours'] - == (flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration).sum('time'), ) @@ -1131,7 +1133,7 @@ def test_flow_on_invest_optional(self, basic_flow_system_linopy_coords, coords_c model.add_variables(binary=True, coords=model.get_coords()), ) # Upper bound is total hours when active_hours_max is not specified - total_hours = model.hours_per_step.sum('time') + total_hours = model.timestep_duration.sum('time') assert_var_equal( model.variables['Sink(Wärme)|active_hours'], model.add_variables(lower=0, upper=total_hours, coords=model.get_coords(['period', 'scenario'])), @@ -1157,7 +1159,7 @@ def test_flow_on_invest_optional(self, basic_flow_system_linopy_coords, coords_c assert_conequal( model.constraints['Sink(Wärme)|active_hours'], flow.submodel.variables['Sink(Wärme)|active_hours'] - == (flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration).sum('time'), ) # Investment @@ -1233,7 +1235,7 @@ def test_flow_on_invest_non_optional(self, basic_flow_system_linopy_coords, coor model.add_variables(binary=True, coords=model.get_coords()), ) # Upper bound is total hours when active_hours_max is not specified - total_hours = model.hours_per_step.sum('time') + total_hours = model.timestep_duration.sum('time') assert_var_equal( model.variables['Sink(Wärme)|active_hours'], model.add_variables(lower=0, upper=total_hours, coords=model.get_coords(['period', 'scenario'])), @@ -1251,7 +1253,7 @@ def test_flow_on_invest_non_optional(self, basic_flow_system_linopy_coords, coor assert_conequal( model.constraints['Sink(Wärme)|active_hours'], flow.submodel.variables['Sink(Wärme)|active_hours'] - == (flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration).sum('time'), ) # Investment diff --git a/tests/deprecated/test_flow_system_resample.py b/tests/deprecated/test_flow_system_resample.py index c76946f80..549f05208 100644 --- a/tests/deprecated/test_flow_system_resample.py +++ b/tests/deprecated/test_flow_system_resample.py @@ -128,7 +128,7 @@ def test_time_metadata_updated(simple_fs): """Test time metadata correctly updated.""" fs_r = simple_fs.resample('3h', method='mean') assert len(fs_r.timesteps) == 8 - assert_allclose(fs_r.hours_per_timestep.values, 3.0) + assert_allclose(fs_r.timestep_duration.values, 3.0) assert fs_r.hours_of_last_timestep == 3.0 diff --git a/tests/deprecated/test_linear_converter.py b/tests/deprecated/test_linear_converter.py index 57b911d64..d20d104d0 100644 --- a/tests/deprecated/test_linear_converter.py +++ b/tests/deprecated/test_linear_converter.py @@ -174,7 +174,7 @@ def test_linear_converter_with_status(self, basic_flow_system_linopy_coords, coo assert_conequal( model.constraints['Converter|active_hours'], model.variables['Converter|active_hours'] - == (model.variables['Converter|status'] * model.hours_per_step).sum('time'), + == (model.variables['Converter|status'] * model.timestep_duration).sum('time'), ) # Check conversion constraint @@ -188,7 +188,7 @@ def test_linear_converter_with_status(self, basic_flow_system_linopy_coords, coo assert_conequal( model.constraints['Converter->costs(temporal)'], model.variables['Converter->costs(temporal)'] - == model.variables['Converter|status'] * model.hours_per_step * 5, + == model.variables['Converter|status'] * model.timestep_duration * 5, ) def test_linear_converter_multidimensional(self, basic_flow_system_linopy_coords, coords_config): @@ -485,7 +485,7 @@ def test_piecewise_conversion_with_status(self, basic_flow_system_linopy_coords, assert 'Converter|active_hours' in model.constraints assert_conequal( model.constraints['Converter|active_hours'], - model['Converter|active_hours'] == (model['Converter|status'] * model.hours_per_step).sum('time'), + model['Converter|active_hours'] == (model['Converter|status'] * model.timestep_duration).sum('time'), ) # Verify that the costs effect is applied @@ -493,7 +493,7 @@ def test_piecewise_conversion_with_status(self, basic_flow_system_linopy_coords, assert_conequal( model.constraints['Converter->costs(temporal)'], model.variables['Converter->costs(temporal)'] - == model.variables['Converter|status'] * model.hours_per_step * 5, + == model.variables['Converter|status'] * model.timestep_duration * 5, ) diff --git a/tests/deprecated/test_on_hours_computation.py b/tests/deprecated/test_on_hours_computation.py index 578fd7792..c74332565 100644 --- a/tests/deprecated/test_on_hours_computation.py +++ b/tests/deprecated/test_on_hours_computation.py @@ -9,7 +9,7 @@ class TestComputeConsecutiveDuration: """Tests for the compute_consecutive_hours_in_state static method.""" @pytest.mark.parametrize( - 'binary_values, hours_per_timestep, expected', + 'binary_values, timestep_duration, expected', [ # Case 1: Single timestep DataArrays (xr.DataArray([1], dims=['time']), 5, 5), @@ -26,22 +26,22 @@ class TestComputeConsecutiveDuration: (xr.DataArray([0, 1, 1, 1, 0, 0], dims=['time']), 1, 0), # ends with 0 ], ) - def test_compute_duration(self, binary_values, hours_per_timestep, expected): + def test_compute_duration(self, binary_values, timestep_duration, expected): """Test compute_consecutive_hours_in_state with various inputs.""" - result = ModelingUtilities.compute_consecutive_hours_in_state(binary_values, hours_per_timestep) + result = ModelingUtilities.compute_consecutive_hours_in_state(binary_values, timestep_duration) assert np.isclose(result, expected) @pytest.mark.parametrize( - 'binary_values, hours_per_timestep', + 'binary_values, timestep_duration', [ - # Case: hours_per_timestep must be scalar + # Case: timestep_duration must be scalar (xr.DataArray([1, 1, 1, 1, 1], dims=['time']), np.array([1, 2])), ], ) - def test_compute_duration_raises_error(self, binary_values, hours_per_timestep): + def test_compute_duration_raises_error(self, binary_values, timestep_duration): """Test error conditions.""" with pytest.raises(TypeError): - ModelingUtilities.compute_consecutive_hours_in_state(binary_values, hours_per_timestep) + ModelingUtilities.compute_consecutive_hours_in_state(binary_values, timestep_duration) class TestComputePreviousOnStates: diff --git a/tests/deprecated/test_storage.py b/tests/deprecated/test_storage.py index 15170a321..3fd47fbf8 100644 --- a/tests/deprecated/test_storage.py +++ b/tests/deprecated/test_storage.py @@ -73,8 +73,8 @@ def test_basic_storage(self, basic_flow_system_linopy_coords, coords_config): model.constraints['TestStorage|charge_state'], charge_state.isel(time=slice(1, None)) == charge_state.isel(time=slice(None, -1)) - + model.variables['TestStorage(Q_th_in)|flow_rate'] * model.hours_per_step - - model.variables['TestStorage(Q_th_out)|flow_rate'] * model.hours_per_step, + + model.variables['TestStorage(Q_th_in)|flow_rate'] * model.timestep_duration + - model.variables['TestStorage(Q_th_out)|flow_rate'] * model.timestep_duration, ) # Check initial charge state constraint assert_conequal( @@ -146,7 +146,7 @@ def test_lossy_storage(self, basic_flow_system_linopy_coords, coords_config): charge_state = model.variables['TestStorage|charge_state'] rel_loss = 0.05 - hours_per_step = model.hours_per_step + timestep_duration = model.timestep_duration charge_rate = model.variables['TestStorage(Q_th_in)|flow_rate'] discharge_rate = model.variables['TestStorage(Q_th_out)|flow_rate'] eff_charge = 0.9 @@ -155,9 +155,9 @@ def test_lossy_storage(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['TestStorage|charge_state'], charge_state.isel(time=slice(1, None)) - == charge_state.isel(time=slice(None, -1)) * (1 - rel_loss) ** hours_per_step - + charge_rate * eff_charge * hours_per_step - - discharge_rate / eff_discharge * hours_per_step, + == charge_state.isel(time=slice(None, -1)) * (1 - rel_loss) ** timestep_duration + + charge_rate * eff_charge * timestep_duration + - discharge_rate / eff_discharge * timestep_duration, ) # Check initial charge state constraint @@ -242,8 +242,8 @@ def test_charge_state_bounds(self, basic_flow_system_linopy_coords, coords_confi model.constraints['TestStorage|charge_state'], charge_state.isel(time=slice(1, None)) == charge_state.isel(time=slice(None, -1)) - + model.variables['TestStorage(Q_th_in)|flow_rate'] * model.hours_per_step - - model.variables['TestStorage(Q_th_out)|flow_rate'] * model.hours_per_step, + + model.variables['TestStorage(Q_th_in)|flow_rate'] * model.timestep_duration + - model.variables['TestStorage(Q_th_out)|flow_rate'] * model.timestep_duration, ) # Check initial charge state constraint assert_conequal( diff --git a/tests/test_bus.py b/tests/test_bus.py index cc49a2073..9bb7ddbe3 100644 --- a/tests/test_bus.py +++ b/tests/test_bus.py @@ -74,8 +74,8 @@ def test_bus_penalty(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['TestBus->Penalty(temporal)'], model.variables['TestBus->Penalty(temporal)'] - == model.variables['TestBus|virtual_supply'] * 1e5 * model.hours_per_step - + model.variables['TestBus|virtual_demand'] * 1e5 * model.hours_per_step, + == model.variables['TestBus|virtual_supply'] * 1e5 * model.timestep_duration + + model.variables['TestBus|virtual_demand'] * 1e5 * model.timestep_duration, ) def test_bus_with_coords(self, basic_flow_system_linopy_coords, coords_config): diff --git a/tests/test_effect.py b/tests/test_effect.py index 015e054eb..60fbb0166 100644 --- a/tests/test_effect.py +++ b/tests/test_effect.py @@ -129,8 +129,8 @@ def test_bounds(self, basic_flow_system_linopy_coords, coords_config): assert_var_equal( model.variables['Effect1(temporal)|per_timestep'], model.add_variables( - lower=4.0 * model.hours_per_step, - upper=4.1 * model.hours_per_step, + lower=4.0 * model.timestep_duration, + upper=4.1 * model.timestep_duration, coords=model.get_coords(['time', 'period', 'scenario']), ), ) diff --git a/tests/test_flow.py b/tests/test_flow.py index 594bc1fbb..8e1ce1f53 100644 --- a/tests/test_flow.py +++ b/tests/test_flow.py @@ -23,7 +23,7 @@ def test_flow_minimal(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|total_flow_hours'], flow.submodel.variables['Sink(Wärme)|total_flow_hours'] - == (flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.timestep_duration).sum('time'), ) assert_var_equal(flow.submodel.flow_rate, model.add_variables(lower=0, upper=100, coords=model.get_coords())) assert_var_equal( @@ -61,7 +61,7 @@ def test_flow(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|total_flow_hours'], flow.submodel.variables['Sink(Wärme)|total_flow_hours'] - == (flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.timestep_duration).sum('time'), ) assert_var_equal( @@ -83,12 +83,12 @@ def test_flow(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|load_factor_min'], - flow.submodel.variables['Sink(Wärme)|total_flow_hours'] >= model.hours_per_step.sum('time') * 0.1 * 100, + flow.submodel.variables['Sink(Wärme)|total_flow_hours'] >= model.timestep_duration.sum('time') * 0.1 * 100, ) assert_conequal( model.constraints['Sink(Wärme)|load_factor_max'], - flow.submodel.variables['Sink(Wärme)|total_flow_hours'] <= model.hours_per_step.sum('time') * 0.9 * 100, + flow.submodel.variables['Sink(Wärme)|total_flow_hours'] <= model.timestep_duration.sum('time') * 0.9 * 100, ) assert_sets_equal( @@ -129,13 +129,13 @@ def test_effects_per_flow_hour(self, basic_flow_system_linopy_coords, coords_con assert_conequal( model.constraints['Sink(Wärme)->costs(temporal)'], model.variables['Sink(Wärme)->costs(temporal)'] - == flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.hours_per_step * costs_per_flow_hour, + == flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.timestep_duration * costs_per_flow_hour, ) assert_conequal( model.constraints['Sink(Wärme)->CO2(temporal)'], model.variables['Sink(Wärme)->CO2(temporal)'] - == flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.hours_per_step * co2_per_flow_hour, + == flow.submodel.variables['Sink(Wärme)|flow_rate'] * model.timestep_duration * co2_per_flow_hour, ) @@ -561,7 +561,7 @@ def test_flow_on(self, basic_flow_system_linopy_coords, coords_config): model.add_variables(binary=True, coords=model.get_coords()), ) # Upper bound is total hours when active_hours_max is not specified - total_hours = model.hours_per_step.sum('time') + total_hours = model.timestep_duration.sum('time') assert_var_equal( model.variables['Sink(Wärme)|active_hours'], model.add_variables(lower=0, upper=total_hours, coords=model.get_coords(['period', 'scenario'])), @@ -580,7 +580,7 @@ def test_flow_on(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|active_hours'], flow.submodel.variables['Sink(Wärme)|active_hours'] - == (flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration).sum('time'), ) def test_effects_per_active_hour(self, basic_flow_system_linopy_coords, coords_config): @@ -635,13 +635,13 @@ def test_effects_per_active_hour(self, basic_flow_system_linopy_coords, coords_c assert_conequal( model.constraints['Sink(Wärme)->costs(temporal)'], model.variables['Sink(Wärme)->costs(temporal)'] - == flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step * costs_per_running_hour, + == flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration * costs_per_running_hour, ) assert_conequal( model.constraints['Sink(Wärme)->CO2(temporal)'], model.variables['Sink(Wärme)->CO2(temporal)'] - == flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step * co2_per_running_hour, + == flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration * co2_per_running_hour, ) def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_config): @@ -687,7 +687,7 @@ def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_conf model.add_variables(lower=0, upper=8, coords=model.get_coords()), ) - mega = model.hours_per_step.sum('time') + mega = model.timestep_duration.sum('time') assert_conequal( model.constraints['Sink(Wärme)|uptime|ub'], @@ -698,7 +698,7 @@ def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_conf model.constraints['Sink(Wärme)|uptime|forward'], model.variables['Sink(Wärme)|uptime'].isel(time=slice(1, None)) <= model.variables['Sink(Wärme)|uptime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)), + + model.timestep_duration.isel(time=slice(None, -1)), ) # eq: duration(t) >= duration(t - 1) + dt(t) + (On(t) - 1) * BIG @@ -706,14 +706,14 @@ def test_consecutive_on_hours(self, basic_flow_system_linopy_coords, coords_conf model.constraints['Sink(Wärme)|uptime|backward'], model.variables['Sink(Wärme)|uptime'].isel(time=slice(1, None)) >= model.variables['Sink(Wärme)|uptime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)) + + model.timestep_duration.isel(time=slice(None, -1)) + (model.variables['Sink(Wärme)|status'].isel(time=slice(1, None)) - 1) * mega, ) assert_conequal( model.constraints['Sink(Wärme)|uptime|initial'], model.variables['Sink(Wärme)|uptime'].isel(time=0) - == model.variables['Sink(Wärme)|status'].isel(time=0) * model.hours_per_step.isel(time=0), + == model.variables['Sink(Wärme)|status'].isel(time=0) * model.timestep_duration.isel(time=0), ) assert_conequal( @@ -768,7 +768,7 @@ def test_consecutive_on_hours_previous(self, basic_flow_system_linopy_coords, co model.add_variables(lower=0, upper=8, coords=model.get_coords()), ) - mega = model.hours_per_step.sum('time') + model.hours_per_step.isel(time=0) * 3 + mega = model.timestep_duration.sum('time') + model.timestep_duration.isel(time=0) * 3 assert_conequal( model.constraints['Sink(Wärme)|uptime|ub'], @@ -779,7 +779,7 @@ def test_consecutive_on_hours_previous(self, basic_flow_system_linopy_coords, co model.constraints['Sink(Wärme)|uptime|forward'], model.variables['Sink(Wärme)|uptime'].isel(time=slice(1, None)) <= model.variables['Sink(Wärme)|uptime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)), + + model.timestep_duration.isel(time=slice(None, -1)), ) # eq: duration(t) >= duration(t - 1) + dt(t) + (On(t) - 1) * BIG @@ -787,14 +787,14 @@ def test_consecutive_on_hours_previous(self, basic_flow_system_linopy_coords, co model.constraints['Sink(Wärme)|uptime|backward'], model.variables['Sink(Wärme)|uptime'].isel(time=slice(1, None)) >= model.variables['Sink(Wärme)|uptime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)) + + model.timestep_duration.isel(time=slice(None, -1)) + (model.variables['Sink(Wärme)|status'].isel(time=slice(1, None)) - 1) * mega, ) assert_conequal( model.constraints['Sink(Wärme)|uptime|initial'], model.variables['Sink(Wärme)|uptime'].isel(time=0) - == model.variables['Sink(Wärme)|status'].isel(time=0) * (model.hours_per_step.isel(time=0) * (1 + 3)), + == model.variables['Sink(Wärme)|status'].isel(time=0) * (model.timestep_duration.isel(time=0) * (1 + 3)), ) assert_conequal( @@ -850,7 +850,9 @@ def test_consecutive_off_hours(self, basic_flow_system_linopy_coords, coords_con model.add_variables(lower=0, upper=12, coords=model.get_coords()), ) - mega = model.hours_per_step.sum('time') + model.hours_per_step.isel(time=0) * 1 # previously inactive for 1h + mega = ( + model.timestep_duration.sum('time') + model.timestep_duration.isel(time=0) * 1 + ) # previously inactive for 1h assert_conequal( model.constraints['Sink(Wärme)|downtime|ub'], @@ -861,7 +863,7 @@ def test_consecutive_off_hours(self, basic_flow_system_linopy_coords, coords_con model.constraints['Sink(Wärme)|downtime|forward'], model.variables['Sink(Wärme)|downtime'].isel(time=slice(1, None)) <= model.variables['Sink(Wärme)|downtime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)), + + model.timestep_duration.isel(time=slice(None, -1)), ) # eq: duration(t) >= duration(t - 1) + dt(t) + (On(t) - 1) * BIG @@ -869,14 +871,14 @@ def test_consecutive_off_hours(self, basic_flow_system_linopy_coords, coords_con model.constraints['Sink(Wärme)|downtime|backward'], model.variables['Sink(Wärme)|downtime'].isel(time=slice(1, None)) >= model.variables['Sink(Wärme)|downtime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)) + + model.timestep_duration.isel(time=slice(None, -1)) + (model.variables['Sink(Wärme)|inactive'].isel(time=slice(1, None)) - 1) * mega, ) assert_conequal( model.constraints['Sink(Wärme)|downtime|initial'], model.variables['Sink(Wärme)|downtime'].isel(time=0) - == model.variables['Sink(Wärme)|inactive'].isel(time=0) * (model.hours_per_step.isel(time=0) * (1 + 1)), + == model.variables['Sink(Wärme)|inactive'].isel(time=0) * (model.timestep_duration.isel(time=0) * (1 + 1)), ) assert_conequal( @@ -933,7 +935,7 @@ def test_consecutive_off_hours_previous(self, basic_flow_system_linopy_coords, c model.add_variables(lower=0, upper=12, coords=model.get_coords()), ) - mega = model.hours_per_step.sum('time') + model.hours_per_step.isel(time=0) * 2 + mega = model.timestep_duration.sum('time') + model.timestep_duration.isel(time=0) * 2 assert_conequal( model.constraints['Sink(Wärme)|downtime|ub'], @@ -944,7 +946,7 @@ def test_consecutive_off_hours_previous(self, basic_flow_system_linopy_coords, c model.constraints['Sink(Wärme)|downtime|forward'], model.variables['Sink(Wärme)|downtime'].isel(time=slice(1, None)) <= model.variables['Sink(Wärme)|downtime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)), + + model.timestep_duration.isel(time=slice(None, -1)), ) # eq: duration(t) >= duration(t - 1) + dt(t) + (On(t) - 1) * BIG @@ -952,14 +954,14 @@ def test_consecutive_off_hours_previous(self, basic_flow_system_linopy_coords, c model.constraints['Sink(Wärme)|downtime|backward'], model.variables['Sink(Wärme)|downtime'].isel(time=slice(1, None)) >= model.variables['Sink(Wärme)|downtime'].isel(time=slice(None, -1)) - + model.hours_per_step.isel(time=slice(None, -1)) + + model.timestep_duration.isel(time=slice(None, -1)) + (model.variables['Sink(Wärme)|inactive'].isel(time=slice(1, None)) - 1) * mega, ) assert_conequal( model.constraints['Sink(Wärme)|downtime|initial'], model.variables['Sink(Wärme)|downtime'].isel(time=0) - == model.variables['Sink(Wärme)|inactive'].isel(time=0) * (model.hours_per_step.isel(time=0) * (1 + 2)), + == model.variables['Sink(Wärme)|inactive'].isel(time=0) * (model.timestep_duration.isel(time=0) * (1 + 2)), ) assert_conequal( @@ -1067,7 +1069,7 @@ def test_on_hours_limits(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['Sink(Wärme)|active_hours'], flow.submodel.variables['Sink(Wärme)|active_hours'] - == (flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration).sum('time'), ) @@ -1131,7 +1133,7 @@ def test_flow_on_invest_optional(self, basic_flow_system_linopy_coords, coords_c model.add_variables(binary=True, coords=model.get_coords()), ) # Upper bound is total hours when active_hours_max is not specified - total_hours = model.hours_per_step.sum('time') + total_hours = model.timestep_duration.sum('time') assert_var_equal( model.variables['Sink(Wärme)|active_hours'], model.add_variables(lower=0, upper=total_hours, coords=model.get_coords(['period', 'scenario'])), @@ -1157,7 +1159,7 @@ def test_flow_on_invest_optional(self, basic_flow_system_linopy_coords, coords_c assert_conequal( model.constraints['Sink(Wärme)|active_hours'], flow.submodel.variables['Sink(Wärme)|active_hours'] - == (flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration).sum('time'), ) # Investment @@ -1233,7 +1235,7 @@ def test_flow_on_invest_non_optional(self, basic_flow_system_linopy_coords, coor model.add_variables(binary=True, coords=model.get_coords()), ) # Upper bound is total hours when active_hours_max is not specified - total_hours = model.hours_per_step.sum('time') + total_hours = model.timestep_duration.sum('time') assert_var_equal( model.variables['Sink(Wärme)|active_hours'], model.add_variables(lower=0, upper=total_hours, coords=model.get_coords(['period', 'scenario'])), @@ -1251,7 +1253,7 @@ def test_flow_on_invest_non_optional(self, basic_flow_system_linopy_coords, coor assert_conequal( model.constraints['Sink(Wärme)|active_hours'], flow.submodel.variables['Sink(Wärme)|active_hours'] - == (flow.submodel.variables['Sink(Wärme)|status'] * model.hours_per_step).sum('time'), + == (flow.submodel.variables['Sink(Wärme)|status'] * model.timestep_duration).sum('time'), ) # Investment diff --git a/tests/test_flow_system_resample.py b/tests/test_flow_system_resample.py index 7486b173c..dd5e19176 100644 --- a/tests/test_flow_system_resample.py +++ b/tests/test_flow_system_resample.py @@ -128,7 +128,7 @@ def test_time_metadata_updated(simple_fs): """Test time metadata correctly updated.""" fs_r = simple_fs.resample('3h', method='mean') assert len(fs_r.timesteps) == 8 - assert_allclose(fs_r.hours_per_timestep.values, 3.0) + assert_allclose(fs_r.timestep_duration.values, 3.0) assert fs_r.hours_of_last_timestep == 3.0 diff --git a/tests/test_linear_converter.py b/tests/test_linear_converter.py index 57b911d64..d20d104d0 100644 --- a/tests/test_linear_converter.py +++ b/tests/test_linear_converter.py @@ -174,7 +174,7 @@ def test_linear_converter_with_status(self, basic_flow_system_linopy_coords, coo assert_conequal( model.constraints['Converter|active_hours'], model.variables['Converter|active_hours'] - == (model.variables['Converter|status'] * model.hours_per_step).sum('time'), + == (model.variables['Converter|status'] * model.timestep_duration).sum('time'), ) # Check conversion constraint @@ -188,7 +188,7 @@ def test_linear_converter_with_status(self, basic_flow_system_linopy_coords, coo assert_conequal( model.constraints['Converter->costs(temporal)'], model.variables['Converter->costs(temporal)'] - == model.variables['Converter|status'] * model.hours_per_step * 5, + == model.variables['Converter|status'] * model.timestep_duration * 5, ) def test_linear_converter_multidimensional(self, basic_flow_system_linopy_coords, coords_config): @@ -485,7 +485,7 @@ def test_piecewise_conversion_with_status(self, basic_flow_system_linopy_coords, assert 'Converter|active_hours' in model.constraints assert_conequal( model.constraints['Converter|active_hours'], - model['Converter|active_hours'] == (model['Converter|status'] * model.hours_per_step).sum('time'), + model['Converter|active_hours'] == (model['Converter|status'] * model.timestep_duration).sum('time'), ) # Verify that the costs effect is applied @@ -493,7 +493,7 @@ def test_piecewise_conversion_with_status(self, basic_flow_system_linopy_coords, assert_conequal( model.constraints['Converter->costs(temporal)'], model.variables['Converter->costs(temporal)'] - == model.variables['Converter|status'] * model.hours_per_step * 5, + == model.variables['Converter|status'] * model.timestep_duration * 5, ) diff --git a/tests/test_on_hours_computation.py b/tests/test_on_hours_computation.py index 578fd7792..c74332565 100644 --- a/tests/test_on_hours_computation.py +++ b/tests/test_on_hours_computation.py @@ -9,7 +9,7 @@ class TestComputeConsecutiveDuration: """Tests for the compute_consecutive_hours_in_state static method.""" @pytest.mark.parametrize( - 'binary_values, hours_per_timestep, expected', + 'binary_values, timestep_duration, expected', [ # Case 1: Single timestep DataArrays (xr.DataArray([1], dims=['time']), 5, 5), @@ -26,22 +26,22 @@ class TestComputeConsecutiveDuration: (xr.DataArray([0, 1, 1, 1, 0, 0], dims=['time']), 1, 0), # ends with 0 ], ) - def test_compute_duration(self, binary_values, hours_per_timestep, expected): + def test_compute_duration(self, binary_values, timestep_duration, expected): """Test compute_consecutive_hours_in_state with various inputs.""" - result = ModelingUtilities.compute_consecutive_hours_in_state(binary_values, hours_per_timestep) + result = ModelingUtilities.compute_consecutive_hours_in_state(binary_values, timestep_duration) assert np.isclose(result, expected) @pytest.mark.parametrize( - 'binary_values, hours_per_timestep', + 'binary_values, timestep_duration', [ - # Case: hours_per_timestep must be scalar + # Case: timestep_duration must be scalar (xr.DataArray([1, 1, 1, 1, 1], dims=['time']), np.array([1, 2])), ], ) - def test_compute_duration_raises_error(self, binary_values, hours_per_timestep): + def test_compute_duration_raises_error(self, binary_values, timestep_duration): """Test error conditions.""" with pytest.raises(TypeError): - ModelingUtilities.compute_consecutive_hours_in_state(binary_values, hours_per_timestep) + ModelingUtilities.compute_consecutive_hours_in_state(binary_values, timestep_duration) class TestComputePreviousOnStates: diff --git a/tests/test_storage.py b/tests/test_storage.py index 15170a321..3fd47fbf8 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -73,8 +73,8 @@ def test_basic_storage(self, basic_flow_system_linopy_coords, coords_config): model.constraints['TestStorage|charge_state'], charge_state.isel(time=slice(1, None)) == charge_state.isel(time=slice(None, -1)) - + model.variables['TestStorage(Q_th_in)|flow_rate'] * model.hours_per_step - - model.variables['TestStorage(Q_th_out)|flow_rate'] * model.hours_per_step, + + model.variables['TestStorage(Q_th_in)|flow_rate'] * model.timestep_duration + - model.variables['TestStorage(Q_th_out)|flow_rate'] * model.timestep_duration, ) # Check initial charge state constraint assert_conequal( @@ -146,7 +146,7 @@ def test_lossy_storage(self, basic_flow_system_linopy_coords, coords_config): charge_state = model.variables['TestStorage|charge_state'] rel_loss = 0.05 - hours_per_step = model.hours_per_step + timestep_duration = model.timestep_duration charge_rate = model.variables['TestStorage(Q_th_in)|flow_rate'] discharge_rate = model.variables['TestStorage(Q_th_out)|flow_rate'] eff_charge = 0.9 @@ -155,9 +155,9 @@ def test_lossy_storage(self, basic_flow_system_linopy_coords, coords_config): assert_conequal( model.constraints['TestStorage|charge_state'], charge_state.isel(time=slice(1, None)) - == charge_state.isel(time=slice(None, -1)) * (1 - rel_loss) ** hours_per_step - + charge_rate * eff_charge * hours_per_step - - discharge_rate / eff_discharge * hours_per_step, + == charge_state.isel(time=slice(None, -1)) * (1 - rel_loss) ** timestep_duration + + charge_rate * eff_charge * timestep_duration + - discharge_rate / eff_discharge * timestep_duration, ) # Check initial charge state constraint @@ -242,8 +242,8 @@ def test_charge_state_bounds(self, basic_flow_system_linopy_coords, coords_confi model.constraints['TestStorage|charge_state'], charge_state.isel(time=slice(1, None)) == charge_state.isel(time=slice(None, -1)) - + model.variables['TestStorage(Q_th_in)|flow_rate'] * model.hours_per_step - - model.variables['TestStorage(Q_th_out)|flow_rate'] * model.hours_per_step, + + model.variables['TestStorage(Q_th_in)|flow_rate'] * model.timestep_duration + - model.variables['TestStorage(Q_th_out)|flow_rate'] * model.timestep_duration, ) # Check initial charge state constraint assert_conequal( From 7050db3b0290ab40a687b2326b551fdee53d9395 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 16:10:53 +0100 Subject: [PATCH 046/191] Temp --- flixopt/transform_accessor.py | 512 ++++++++++++++-------------------- 1 file changed, 209 insertions(+), 303 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 37fc508af..ab231bde0 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1119,90 +1119,57 @@ def cluster_reduce( - Storage linking adds SOC_boundary variables to track state between clusters """ from .clustering import Clustering - from .core import DataConverter, TimeSeriesData, drop_constant_arrays + from .core import TimeSeriesData, drop_constant_arrays from .flow_system import FlowSystem # Parse cluster_duration to hours - if isinstance(cluster_duration, str): - hours_per_cluster = pd.Timedelta(cluster_duration).total_seconds() / 3600 - else: - hours_per_cluster = float(cluster_duration) + hours_per_cluster = ( + pd.Timedelta(cluster_duration).total_seconds() / 3600 + if isinstance(cluster_duration, str) + else float(cluster_duration) + ) # Validation - dt_min = float(self._fs.timestep_duration.min().item()) - dt_max = float(self._fs.timestep_duration.max().item()) - if dt_min != dt_max: + dt = float(self._fs.timestep_duration.min().item()) + if not np.isclose(dt, float(self._fs.timestep_duration.max().item())): raise ValueError( - f'cluster_reduce() failed due to inconsistent time step sizes: ' - f'delta_t varies from {dt_min} to {dt_max} hours.' + f'cluster_reduce() requires uniform timestep sizes, got min={dt}h, ' + f'max={float(self._fs.timestep_duration.max().item())}h.' ) - ratio = hours_per_cluster / dt_max - if not np.isclose(ratio, round(ratio), atol=1e-9): - raise ValueError( - f'The selected cluster_duration={hours_per_cluster}h does not match the time ' - f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.' - ) - - timesteps_per_cluster = int(round(hours_per_cluster / dt_max)) + if not np.isclose(hours_per_cluster / dt, round(hours_per_cluster / dt), atol=1e-9): + raise ValueError(f'cluster_duration={hours_per_cluster}h must be a multiple of timestep size ({dt}h).') - # Check for multi-period/scenario dimensions + timesteps_per_cluster = int(round(hours_per_cluster / dt)) has_periods = self._fs.periods is not None has_scenarios = self._fs.scenarios is not None logger.info(f'{"":#^80}') - if has_periods or has_scenarios: - logger.info(f'{" Creating Typical Clusters (Multi-dimensional) ":#^80}') - else: - logger.info(f'{" Creating Typical Clusters (Reduced Timesteps) ":#^80}') + logger.info(f'{" Creating Typical Clusters ":#^80}') # Determine iteration dimensions periods = list(self._fs.periods) if has_periods else [None] scenarios = list(self._fs.scenarios) if has_scenarios else [None] - # Get dataset representation ds = self._fs.to_dataset(include_solution=False) - # Store clustering results per (period, scenario) combination + # Cluster each (period, scenario) combination clustering_results: dict[tuple, Clustering] = {} cluster_orders: dict[tuple, np.ndarray] = {} cluster_occurrences_all: dict[tuple, dict] = {} - # Track actual n_clusters (may vary per combination if peak forcing is used) - all_n_clusters = [] - - # Cluster each period x scenario combination independently for period_label in periods: for scenario_label in scenarios: key = (period_label, scenario_label) - - # Select slice for this combination - selector = {} - if period_label is not None: - selector['period'] = period_label - if scenario_label is not None: - selector['scenario'] = scenario_label - - if selector: - ds_slice = ds.sel(**selector, drop=True) - else: - ds_slice = ds - - # Drop constant arrays for clustering + selector = {k: v for k, v in [('period', period_label), ('scenario', scenario_label)] if v is not None} + ds_slice = ds.sel(**selector, drop=True) if selector else ds temporaly_changing_ds = drop_constant_arrays(ds_slice, dim='time') - # Log dimension info - dim_info = [] - if period_label is not None: - dim_info.append(f'period={period_label}') - if scenario_label is not None: - dim_info.append(f'scenario={scenario_label}') - if dim_info: - logger.info(f'Clustering {", ".join(dim_info)}...') + if selector: + logger.info(f'Clustering {", ".join(f"{k}={v}" for k, v in selector.items())}...') - # Perform clustering on this slice clustering = Clustering( original_data=temporaly_changing_ds.to_dataframe(), - hours_per_time_step=float(dt_min), + hours_per_time_step=dt, hours_per_period=hours_per_cluster, nr_of_periods=n_clusters, weights=weights or self._calculate_clustering_weights(temporaly_changing_ds), @@ -1214,156 +1181,70 @@ def cluster_reduce( clustering_results[key] = clustering cluster_orders[key] = clustering.tsam.clusterOrder cluster_occurrences_all[key] = clustering.tsam.clusterPeriodNoOccur - all_n_clusters.append(len(clustering.tsam.clusterPeriodNoOccur)) - # Use first clustering result for building reduced dataset - # (all should have same structure, just different cluster assignments) + # Use first clustering for structure first_key = (periods[0], scenarios[0]) first_clustering = clustering_results[first_key] - typical_periods_df = first_clustering.tsam.typicalPeriods + n_reduced_timesteps = len(first_clustering.tsam.typicalPeriods) actual_n_clusters = len(first_clustering.tsam.clusterPeriodNoOccur) - # Create timestep weights (use first combination - weights should be consistent) + # Create timestep weights from cluster occurrences cluster_occurrences = cluster_occurrences_all[first_key] - timestep_weights = [] - for cluster_idx in range(actual_n_clusters): - weight = cluster_occurrences.get(cluster_idx, 1) - timestep_weights.extend([weight] * timesteps_per_cluster) - timestep_weights = np.array(timestep_weights) + timestep_weights = np.repeat( + [cluster_occurrences.get(c, 1) for c in range(actual_n_clusters)], timesteps_per_cluster + ) - logger.info(f'Reduced from {len(self._fs.timesteps)} to {len(typical_periods_df)} timesteps') + logger.info(f'Reduced from {len(self._fs.timesteps)} to {n_reduced_timesteps} timesteps') logger.info(f'Clusters: {actual_n_clusters} (requested: {n_clusters})') - # Create new time index for typical clusters - original_time = self._fs.timesteps - time_start = original_time[0] - freq = pd.Timedelta(hours=dt_min) + # Create new time index new_time_index = pd.date_range( - start=time_start, - periods=len(typical_periods_df), - freq=freq, + start=self._fs.timesteps[0], periods=n_reduced_timesteps, freq=pd.Timedelta(hours=dt) ) - # Build new dataset with typical clusters data - ds_original = self._fs.to_dataset(include_solution=False) - - # Collect typical periods data per (period, scenario) combination - # Key: (period, scenario), Value: DataFrame with typical period data - typical_dfs = {} + # Build typical periods DataArrays keyed by (variable_name, (period, scenario)) + typical_das: dict[str, dict[tuple, xr.DataArray]] = {} for key, clustering in clustering_results.items(): - typical_df = clustering.tsam.typicalPeriods.copy() - typical_df.index = new_time_index - typical_dfs[key] = typical_df + typical_df = clustering.tsam.typicalPeriods + for col in typical_df.columns: + typical_das.setdefault(col, {})[key] = xr.DataArray( + typical_df[col].values, dims=['time'], coords={'time': new_time_index} + ) - # Build new data arrays with reduced time dimension + # Build reduced dataset ds_new_vars = {} - for name in ds_original.data_vars: - original_da = ds_original[name] - - # Check if this variable is in the typical periods (time-varying and non-constant) - first_key = (periods[0], scenarios[0]) - in_typical = name in typical_dfs[first_key].columns - + for name, original_da in ds.data_vars.items(): if 'time' not in original_da.dims: - # Time-independent variable: copy as-is ds_new_vars[name] = original_da.copy() - elif not in_typical: - # Time-dependent but constant (not clustered): slice to new time length - # Take first timesteps_per_cluster * n_clusters timesteps - ds_new_vars[name] = original_da.isel(time=slice(0, len(new_time_index))).assign_coords( + elif name not in typical_das: + # Time-dependent but constant: slice to new time length + ds_new_vars[name] = original_da.isel(time=slice(0, n_reduced_timesteps)).assign_coords( time=new_time_index ) - elif not has_periods and not has_scenarios: - # Simple case: single clustering, use typical periods directly - series = typical_dfs[first_key][name] - da = DataConverter.to_dataarray( - series, - {'time': new_time_index, **{k: v for k, v in self._fs.coords.items() if k != 'time'}}, - ).rename(name) - da = da.assign_attrs(original_da.attrs) - if TimeSeriesData.is_timeseries_data(da): - da = TimeSeriesData.from_dataarray(da) - ds_new_vars[name] = da else: - # Multi-dimensional: build new array with all dims but reduced time - new_dims = list(original_da.dims) - new_shape = list(original_da.shape) - time_idx = new_dims.index('time') - new_shape[time_idx] = len(new_time_index) - - # Build coordinates - new_coords = {} - for dim in new_dims: - if dim == 'time': - new_coords[dim] = new_time_index - else: - new_coords[dim] = original_da.coords[dim].values - - # Initialize array and fill per (period, scenario) - new_data = np.zeros(new_shape, dtype=original_da.dtype) - - for period_label in periods: - for scenario_label in scenarios: - key = (period_label, scenario_label) - typical_df = typical_dfs[key] - - if name not in typical_df.columns: - continue - - series_values = typical_df[name].values - - # Determine indices for this slice - if 'period' in new_dims and 'scenario' in new_dims: - if period_label is not None and scenario_label is not None: - period_idx = list(new_coords['period']).index(period_label) - scenario_idx = list(new_coords['scenario']).index(scenario_label) - if new_dims == ['time', 'period', 'scenario']: - new_data[:, period_idx, scenario_idx] = series_values - elif new_dims == ['time', 'scenario', 'period']: - new_data[:, scenario_idx, period_idx] = series_values - elif 'period' in new_dims: - if period_label is not None: - period_idx = list(new_coords['period']).index(period_label) - if new_dims == ['time', 'period']: - new_data[:, period_idx] = series_values - elif new_dims == ['period', 'time']: - new_data[period_idx, :] = series_values - elif 'scenario' in new_dims: - if scenario_label is not None: - scenario_idx = list(new_coords['scenario']).index(scenario_label) - if new_dims == ['time', 'scenario']: - new_data[:, scenario_idx] = series_values - elif new_dims == ['scenario', 'time']: - new_data[scenario_idx, :] = series_values - else: - # Has time but no period/scenario: use first key's data - new_data[:] = series_values - break # Only need to fill once - - da = xr.DataArray(data=new_data, dims=new_dims, coords=new_coords, attrs=original_da.attrs) - if TimeSeriesData.is_timeseries_data(da): - da = TimeSeriesData.from_dataarray(da) + # Time-varying: combine per-(period, scenario) slices + da = self._combine_slices_to_dataarray( + slices=typical_das[name], + original_da=original_da, + new_time_index=new_time_index, + periods=periods, + scenarios=scenarios, + ) + if TimeSeriesData.is_timeseries_data(original_da): + da = TimeSeriesData.from_dataarray(da.assign_attrs(original_da.attrs)) ds_new_vars[name] = da - # Create new dataset with updated variables - ds_new = xr.Dataset(ds_new_vars, attrs=ds_original.attrs) - ds_new = ds_new.assign_coords(time=new_time_index) - - # Update metadata + ds_new = xr.Dataset(ds_new_vars, attrs=ds.attrs) ds_new.attrs['timesteps_per_cluster'] = timesteps_per_cluster - ds_new.attrs['timestep_duration'] = dt_min + ds_new.attrs['timestep_duration'] = dt - # Create new FlowSystem with reduced timesteps reduced_fs = FlowSystem.from_dataset(ds_new) - - # Set cluster_weight for proper aggregation in the reduced FlowSystem reduced_fs.cluster_weight = reduced_fs.fit_to_model_coords('cluster_weight', timestep_weights, dims=['time']) - # Store cluster info for later use during modeling and expand_solution() reduced_fs._cluster_info = { - 'clustering_results': clustering_results, # Dict keyed by (period, scenario) - 'cluster_orders': cluster_orders, # Dict keyed by (period, scenario) - 'cluster_occurrences': cluster_occurrences_all, # Dict keyed by (period, scenario) + 'clustering_results': clustering_results, + 'cluster_orders': cluster_orders, + 'cluster_occurrences': cluster_occurrences_all, 'timestep_weights': timestep_weights, 'n_clusters': actual_n_clusters, 'timesteps_per_cluster': timesteps_per_cluster, @@ -1372,13 +1253,60 @@ def cluster_reduce( 'original_fs': self._fs, 'has_periods': has_periods, 'has_scenarios': has_scenarios, - # For backwards compatibility with simple case 'cluster_order': cluster_orders[first_key], 'clustering': first_clustering, } return reduced_fs + @staticmethod + def _combine_slices_to_dataarray( + slices: dict[tuple, xr.DataArray], + original_da: xr.DataArray, + new_time_index: pd.DatetimeIndex, + periods: list, + scenarios: list, + ) -> xr.DataArray: + """Combine per-(period, scenario) slices into a multi-dimensional DataArray using xr.concat. + + Args: + slices: Dict mapping (period, scenario) tuples to 1D DataArrays (time only). + original_da: Original DataArray to get dimension order and attrs from. + new_time_index: New time coordinate for the output. + periods: List of period labels ([None] if no periods dimension). + scenarios: List of scenario labels ([None] if no scenarios dimension). + + Returns: + DataArray with dimensions matching original_da but reduced time. + """ + first_key = (periods[0], scenarios[0]) + has_periods = periods != [None] + has_scenarios = scenarios != [None] + + # Simple case: no period/scenario dimensions + if not has_periods and not has_scenarios: + return slices[first_key].assign_attrs(original_da.attrs) + + # Multi-dimensional: use xr.concat to stack along period/scenario dims + if has_periods and has_scenarios: + # Stack scenarios first, then periods + period_arrays = [] + for p in periods: + scenario_arrays = [slices[(p, s)] for s in scenarios] + period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) + result = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) + elif has_periods: + result = xr.concat([slices[(p, None)] for p in periods], dim=pd.Index(periods, name='period')) + else: + result = xr.concat([slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario')) + + # Match original dimension order + target_dims = [d for d in original_da.dims if d in result.dims] + if target_dims and tuple(target_dims) != result.dims: + result = result.transpose(*target_dims) + + return result.assign_attrs(original_da.attrs) + def expand_solution(self) -> FlowSystem: """Expand a reduced (clustered) FlowSystem back to full original timesteps. @@ -1430,8 +1358,6 @@ def expand_solution(self) -> FlowSystem: For accurate dispatch results, use ``fix_sizes()`` to fix the sizes from the reduced optimization and re-optimize at full resolution. """ - import numpy as np - from .flow_system import FlowSystem # Validate @@ -1440,7 +1366,6 @@ def expand_solution(self) -> FlowSystem: 'expand_solution() requires a FlowSystem created with cluster_reduce(). ' 'This FlowSystem has no cluster info.' ) - if self._fs.solution is None: raise ValueError('FlowSystem has no solution. Run optimize() or solve() first.') @@ -1450,156 +1375,137 @@ def expand_solution(self) -> FlowSystem: n_clusters = info['n_clusters'] has_periods = info.get('has_periods', False) has_scenarios = info.get('has_scenarios', False) - - # Get cluster_orders dict (keyed by (period, scenario) tuples) - # For backwards compatibility, create dict from single cluster_order if needed cluster_orders = info.get('cluster_orders', {(None, None): info['cluster_order']}) - # Determine iteration dimensions periods = list(original_fs.periods) if has_periods else [None] scenarios = list(original_fs.scenarios) if has_scenarios else [None] - # Get original timesteps from the original FlowSystem original_timesteps = original_fs.timesteps n_original_timesteps = len(original_timesteps) n_reduced_timesteps = n_clusters * timesteps_per_cluster - - # Helper to build mapping for a specific cluster_order - def build_mapping(cluster_order: np.ndarray) -> np.ndarray: - mapping = np.zeros(n_original_timesteps, dtype=np.int32) - for orig_ts_idx in range(n_original_timesteps): - orig_segment_idx = orig_ts_idx // timesteps_per_cluster - pos_in_cluster = orig_ts_idx % timesteps_per_cluster - cluster_id = cluster_order[orig_segment_idx] if orig_segment_idx < len(cluster_order) else 0 - reduced_ts_idx = cluster_id * timesteps_per_cluster + pos_in_cluster - mapping[orig_ts_idx] = min(reduced_ts_idx, n_reduced_timesteps - 1) - return mapping - - # Build mappings per (period, scenario) - mappings = {key: build_mapping(order) for key, order in cluster_orders.items()} first_key = (periods[0], scenarios[0]) - # Helper function to expand time-dependent data (simple case) - def expand_simple(da: xr.DataArray, mapping: np.ndarray) -> xr.DataArray: - expanded_da = da.isel(time=xr.DataArray(mapping, dims=['time'])) - expanded_da = expanded_da.assign_coords(time=original_timesteps) - return expanded_da.assign_attrs(da.attrs) - - # Helper function to expand multi-dimensional data - def expand_multi_dimensional(da: xr.DataArray) -> xr.DataArray: - # Create output array with expanded time dimension - new_dims = list(da.dims) - new_shape = list(da.shape) - time_idx = new_dims.index('time') - new_shape[time_idx] = n_original_timesteps - - # Build new coordinates - new_coords = dict(da.coords) - new_coords['time'] = original_timesteps - - # Initialize output with zeros - expanded_data = np.zeros(new_shape, dtype=da.dtype) - - # Expand each (period, scenario) slice independently - for period_label in periods: - for scenario_label in scenarios: - key = (period_label, scenario_label) - mapping = mappings[key] - - # Build selector for this slice - if 'period' in da.dims and 'scenario' in da.dims: - if period_label is not None and scenario_label is not None: - slice_data = da.sel(period=period_label, scenario=scenario_label) - expanded_slice = slice_data.values[mapping] - # Assign back to the correct position - period_idx = list(da.coords['period'].values).index(period_label) - scenario_idx = list(da.coords['scenario'].values).index(scenario_label) - if da.dims == ('time', 'period', 'scenario'): - expanded_data[:, period_idx, scenario_idx] = expanded_slice - elif da.dims == ('time', 'scenario', 'period'): - expanded_data[:, scenario_idx, period_idx] = expanded_slice - elif 'period' in da.dims: - if period_label is not None: - slice_data = da.sel(period=period_label) - expanded_slice = slice_data.values[mapping] - period_idx = list(da.coords['period'].values).index(period_label) - if da.dims == ('time', 'period'): - expanded_data[:, period_idx] = expanded_slice - elif da.dims == ('period', 'time'): - expanded_data[period_idx, :] = expanded_slice - elif 'scenario' in da.dims: - if scenario_label is not None: - slice_data = da.sel(scenario=scenario_label) - expanded_slice = slice_data.values[mapping] - scenario_idx = list(da.coords['scenario'].values).index(scenario_label) - if da.dims == ('time', 'scenario'): - expanded_data[:, scenario_idx] = expanded_slice - elif da.dims == ('scenario', 'time'): - expanded_data[scenario_idx, :] = expanded_slice - - return xr.DataArray( - data=expanded_data, - dims=new_dims, - coords=new_coords, - attrs=da.attrs, - ) + # Build expansion mappings per (period, scenario) + mappings = { + key: self._build_expansion_mapping(order, timesteps_per_cluster, n_original_timesteps) + for key, order in cluster_orders.items() + } - # Helper function to expand any data array - def expand_data(da: xr.DataArray) -> xr.DataArray: + # Expand function for DataArrays + def expand_da(da: xr.DataArray) -> xr.DataArray: if 'time' not in da.dims: - # Time-independent: copy as-is return da.copy() - elif not has_periods and not has_scenarios: - # Simple case: use first mapping - return expand_simple(da, mappings[first_key]) - elif 'period' not in da.dims and 'scenario' not in da.dims: - # Has time but no period/scenario dims: use first mapping - return expand_simple(da, mappings[first_key]) - else: - # Multi-dimensional: expand each slice independently - return expand_multi_dimensional(da) + return self._expand_dataarray(da, mappings, original_timesteps, periods, scenarios) - # 1. Expand the FlowSystem's data (input time series) + # 1. Expand FlowSystem data reduced_ds = self._fs.to_dataset(include_solution=False) - expanded_ds_data = {} - - for var_name in reduced_ds.data_vars: - expanded_ds_data[var_name] = expand_data(reduced_ds[var_name]) - - # Update coordinates - expanded_ds = xr.Dataset(expanded_ds_data, attrs=reduced_ds.attrs) - expanded_ds = expanded_ds.assign_coords(time=original_timesteps) - - # Copy timestep_duration from original + expanded_ds = xr.Dataset( + {name: expand_da(da) for name, da in reduced_ds.data_vars.items()}, attrs=reduced_ds.attrs + ) expanded_ds.attrs['timestep_duration'] = original_fs.timestep_duration.values.tolist() - # Create the expanded FlowSystem from the expanded dataset expanded_fs = FlowSystem.from_dataset(expanded_ds) - # 2. Expand the solution + # 2. Expand solution reduced_solution = self._fs.solution - expanded_solution_data = {} - - for var_name in reduced_solution.data_vars: - expanded_solution_data[var_name] = expand_data(reduced_solution[var_name]) - - expanded_solution = xr.Dataset(expanded_solution_data, attrs=reduced_solution.attrs) - expanded_fs._solution = expanded_solution + expanded_fs._solution = xr.Dataset( + {name: expand_da(da) for name, da in reduced_solution.data_vars.items()}, + attrs=reduced_solution.attrs, + ) n_combinations = len(periods) * len(scenarios) - if n_combinations > 1: - logger.info( - f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' - f'({n_clusters} clusters, {n_combinations} period/scenario combinations)' - ) - else: - logger.info( - f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' - f'({n_clusters} clusters → {len(cluster_orders[first_key])} original segments)' + logger.info( + f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' + f'({n_clusters} clusters' + + ( + f', {n_combinations} period/scenario combinations)' + if n_combinations > 1 + else f' → {len(cluster_orders[first_key])} original segments)' ) + ) return expanded_fs + @staticmethod + def _build_expansion_mapping( + cluster_order: np.ndarray, timesteps_per_cluster: int, n_original_timesteps: int + ) -> np.ndarray: + """Build mapping from original timesteps to reduced (typical) timesteps. + + Args: + cluster_order: Array mapping each original segment to its cluster ID. + timesteps_per_cluster: Number of timesteps per cluster. + n_original_timesteps: Total number of original timesteps. + + Returns: + Array where mapping[i] gives the reduced timestep index for original timestep i. + """ + n_reduced = len(set(cluster_order)) * timesteps_per_cluster + segment_indices = np.arange(n_original_timesteps) // timesteps_per_cluster + pos_in_segment = np.arange(n_original_timesteps) % timesteps_per_cluster + # Handle edge case where segment_indices exceed cluster_order length + safe_segment_indices = np.minimum(segment_indices, len(cluster_order) - 1) + cluster_ids = cluster_order[safe_segment_indices] + mapping = cluster_ids * timesteps_per_cluster + pos_in_segment + return np.minimum(mapping, n_reduced - 1).astype(np.int32) + + @staticmethod + def _expand_dataarray( + da: xr.DataArray, + mappings: dict[tuple, np.ndarray], + original_timesteps: pd.DatetimeIndex, + periods: list, + scenarios: list, + ) -> xr.DataArray: + """Expand a DataArray from reduced to original timesteps using cluster mappings. + + Args: + da: DataArray with reduced time dimension. + mappings: Dict mapping (period, scenario) tuples to expansion index arrays. + original_timesteps: Original time coordinates. + periods: List of period labels ([None] if no periods). + scenarios: List of scenario labels ([None] if no scenarios). + + Returns: + DataArray with expanded time dimension. + """ + first_key = (periods[0], scenarios[0]) + has_periods = periods != [None] + has_scenarios = scenarios != [None] + + # Simple case: no period/scenario dimensions in the data + if (not has_periods and not has_scenarios) or ('period' not in da.dims and 'scenario' not in da.dims): + mapping = mappings[first_key] + expanded = da.isel(time=xr.DataArray(mapping, dims=['time'])) + return expanded.assign_coords(time=original_timesteps).assign_attrs(da.attrs) + + # Multi-dimensional: expand each (period, scenario) slice and recombine + expanded_slices: dict[tuple, xr.DataArray] = {} + for p in periods: + for s in scenarios: + key = (p, s) + mapping = mappings[key] + + # Select the slice for this (period, scenario) combination + selector = {} + if p is not None and 'period' in da.dims: + selector['period'] = p + if s is not None and 'scenario' in da.dims: + selector['scenario'] = s + + slice_da = da.sel(**selector, drop=True) if selector else da + expanded = slice_da.isel(time=xr.DataArray(mapping, dims=['time'])) + expanded_slices[key] = expanded.assign_coords(time=original_timesteps) + + # Recombine slices using _combine_slices_to_dataarray + return TransformAccessor._combine_slices_to_dataarray( + slices=expanded_slices, + original_da=da, + new_time_index=original_timesteps, + periods=periods, + scenarios=scenarios, + ) + # Future methods can be added here: # # def mga(self, alternatives: int = 5) -> FlowSystem: From 4673bc59bacd4002d695b77efe5fc8c688b5e160 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 16:16:53 +0100 Subject: [PATCH 047/191] Temp --- flixopt/transform_accessor.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index ab231bde0..7380b03d6 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1212,12 +1212,13 @@ def cluster_reduce( ) # Build reduced dataset + all_keys = {(p, s) for p in periods for s in scenarios} ds_new_vars = {} for name, original_da in ds.data_vars.items(): if 'time' not in original_da.dims: ds_new_vars[name] = original_da.copy() - elif name not in typical_das: - # Time-dependent but constant: slice to new time length + elif name not in typical_das or set(typical_das[name].keys()) != all_keys: + # Time-dependent but constant (or not present in all clustering results): slice to new time length ds_new_vars[name] = original_da.isel(time=slice(0, n_reduced_timesteps)).assign_coords( time=new_time_index ) From b2e71d760d40c9e3bab3b29df99a4beb18e227d7 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 16:29:30 +0100 Subject: [PATCH 048/191] Fix script for docs --- docs/notebooks/data/generate_example_systems.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index 639db3a29..53070eeda 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -20,11 +20,11 @@ # Output directory (same as this script) try: OUTPUT_DIR = Path(__file__).parent - DATA_DIR = Path(__file__).parent.parent.parent.parent / 'examples' / 'resources' + DATA_DIR = Path(__file__).parent # Zeitreihen2020.csv is in the same directory except NameError: # Running in notebook context (e.g., mkdocs-jupyter) OUTPUT_DIR = Path('docs/notebooks/data') - DATA_DIR = Path('examples/resources') + DATA_DIR = Path('docs/notebooks/data') def create_simple_system() -> fx.FlowSystem: @@ -280,6 +280,7 @@ def create_district_heating_system() -> fx.FlowSystem: effects_of_investment_per_size={'costs': 10}, ), relative_minimum=0.3, + status_parameters=fx.StatusParameters(), ), fuel_flow=fx.Flow('Q_fu', bus='Coal'), ), @@ -296,6 +297,7 @@ def create_district_heating_system() -> fx.FlowSystem: effects_of_investment_per_size={'costs': 5}, ), relative_minimum=0.1, + status_parameters=fx.StatusParameters(), ), fuel_flow=fx.Flow('Q_fu', bus='Gas'), ), From 05dd8dae6ffca66fad687d785da0bcde1ef5a7d9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 16:31:20 +0100 Subject: [PATCH 049/191] Update notebook --- docs/notebooks/08e-cluster-and-reduce.ipynb | 232 ++++++-------------- 1 file changed, 68 insertions(+), 164 deletions(-) diff --git a/docs/notebooks/08e-cluster-and-reduce.ipynb b/docs/notebooks/08e-cluster-and-reduce.ipynb index 5f67adf03..9d8d70015 100644 --- a/docs/notebooks/08e-cluster-and-reduce.ipynb +++ b/docs/notebooks/08e-cluster-and-reduce.ipynb @@ -6,7 +6,7 @@ "source": [ "# Typical Periods Optimization with `cluster_reduce()`\n", "\n", - "This notebook demonstrates the new `cluster_reduce()` method for fast sizing optimization using typical periods.\n", + "This notebook demonstrates the `cluster_reduce()` method for fast sizing optimization using typical periods.\n", "\n", "## Key Concept\n", "\n", @@ -14,12 +14,12 @@ "\n", "| Method | Timesteps | Mechanism | Use Case |\n", "|--------|-----------|-----------|----------|\n", - "| `cluster()` | 8760 | Equality constraints | Accurate operational dispatch |\n", - "| `cluster_reduce()` | 192 (8×24) | Typical periods only | Fast initial sizing |\n", + "| `cluster()` | 2976 | Equality constraints | Accurate operational dispatch |\n", + "| `cluster_reduce()` | 768 (8×96) | Typical periods only | Fast initial sizing |\n", "\n", "## Features\n", "\n", - "- **Actual timestep reduction**: Only solves for typical periods (e.g., 8 days × 24h = 192 instead of 8760)\n", + "- **Actual timestep reduction**: Only solves for typical periods (e.g., 8 days × 96 timesteps = 768 instead of 2976)\n", "- **Timestep weighting**: Operational costs are weighted by cluster occurrence\n", "- **Inter-period storage linking**: SOC_boundary variables track storage state across original periods\n", "- **Cyclic constraint**: Optional cyclic storage constraint for long-term balance\n", @@ -50,9 +50,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Create a Full-Year Example System\n", + "## Load the FlowSystem\n", "\n", - "We'll create a simple district heating system with a full year of hourly data." + "We use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" ] }, { @@ -61,27 +61,23 @@ "metadata": {}, "outputs": [], "source": [ - "# Generate synthetic yearly data\n", - "np.random.seed(42)\n", - "hours = 8760 # Full year hourly\n", - "\n", - "# Create realistic heat demand profile (seasonal + daily patterns)\n", - "t = np.arange(hours)\n", - "seasonal = 50 + 40 * np.cos(2 * np.pi * t / 8760) # Higher in winter\n", - "daily = 10 * np.sin(2 * np.pi * t / 24 - np.pi / 2) # Peak in morning/evening\n", - "noise = np.random.normal(0, 5, hours)\n", - "heat_demand = np.maximum(seasonal + daily + noise, 10)\n", - "\n", - "# Create electricity price profile (higher during day, lower at night)\n", - "hour_of_day = t % 24\n", - "elec_price = 50 + 30 * np.sin(np.pi * hour_of_day / 12) + np.random.normal(0, 5, hours)\n", - "elec_price = np.maximum(elec_price, 20)\n", - "\n", - "timesteps = pd.date_range('2020-01-01', periods=hours, freq='h')\n", - "\n", - "print(f'Created {hours} hourly timesteps ({hours / 24:.0f} days)')\n", - "print(f'Heat demand range: {heat_demand.min():.1f} - {heat_demand.max():.1f} MW')\n", - "print(f'Electricity price range: {elec_price.min():.1f} - {elec_price.max():.1f} EUR/MWh')" + "from pathlib import Path\n", + "\n", + "# Generate example data if not present (for local development)\n", + "data_file = Path('data/district_heating_system.nc4')\n", + "if not data_file.exists():\n", + " from data.generate_example_systems import create_district_heating_system\n", + "\n", + " fs = create_district_heating_system()\n", + " fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n", + " fs.to_netcdf(data_file, overwrite=True)\n", + "\n", + "# Load the district heating system (real data from Zeitreihen2020.csv)\n", + "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "\n", + "timesteps = flow_system.timesteps\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Components: {list(flow_system.components.keys())}')" ] }, { @@ -90,121 +86,28 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize first month of data\n", + "# Visualize first two weeks of data\n", + "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", + "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", + "\n", "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", "\n", - "fig.add_trace(go.Scatter(x=timesteps[:720], y=heat_demand[:720], name='Heat Demand'), row=1, col=1)\n", - "fig.add_trace(go.Scatter(x=timesteps[:720], y=elec_price[:720], name='Electricity Price'), row=2, col=1)\n", + "fig.add_trace(go.Scatter(x=timesteps[:1344], y=heat_demand.values[:1344], name='Heat Demand'), row=1, col=1)\n", + "fig.add_trace(go.Scatter(x=timesteps[:1344], y=electricity_price.values[:1344], name='Electricity Price'), row=2, col=1)\n", "\n", - "fig.update_layout(height=400, title='First Month of Data')\n", + "fig.update_layout(height=400, title='First Two Weeks of Data')\n", "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", - "fig.update_yaxes(title_text='El. Price [EUR/MWh]', row=2, col=1)\n", + "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", "fig.show()" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def create_flow_system():\n", - " \"\"\"Create the district heating FlowSystem.\"\"\"\n", - " fs = fx.FlowSystem(timesteps=timesteps)\n", - "\n", - " # Effects\n", - " costs = fx.Effect(label='costs', unit='EUR', is_objective=True)\n", - "\n", - " # Buses\n", - " heat_bus = fx.Bus('Heat')\n", - " elec_bus = fx.Bus('Electricity')\n", - " gas_bus = fx.Bus('Gas')\n", - "\n", - " fs.add_elements(costs, heat_bus, elec_bus, gas_bus)\n", - "\n", - " # Gas supply\n", - " gas_supply = fx.Source(\n", - " 'GasSupply',\n", - " outputs=[fx.Flow('gas_out', bus='Gas', size=500, effects_per_flow_hour={'costs': 35})],\n", - " )\n", - "\n", - " # Electricity grid\n", - " grid_buy = fx.Source(\n", - " 'GridBuy',\n", - " outputs=[fx.Flow('elec_out', bus='Electricity', size=200, effects_per_flow_hour={'costs': elec_price})],\n", - " )\n", - "\n", - " grid_sell = fx.Sink(\n", - " 'GridSell',\n", - " inputs=[fx.Flow('elec_in', bus='Electricity', size=200, effects_per_flow_hour={'costs': -elec_price * 0.9})],\n", - " )\n", - "\n", - " # Boiler (investment)\n", - " boiler = fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.9,\n", - " thermal_flow=fx.Flow(\n", - " 'Q_th',\n", - " bus='Heat',\n", - " size=fx.InvestParameters(minimum_size=0, maximum_size=200, effects_of_investment_per_size={'costs': 50000}),\n", - " ),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", - " )\n", - "\n", - " # CHP (investment)\n", - " chp = fx.linear_converters.CHP(\n", - " 'CHP',\n", - " thermal_efficiency=0.45,\n", - " electrical_efficiency=0.35,\n", - " thermal_flow=fx.Flow(\n", - " 'Q_th',\n", - " bus='Heat',\n", - " size=fx.InvestParameters(\n", - " minimum_size=0, maximum_size=150, effects_of_investment_per_size={'costs': 150000}\n", - " ),\n", - " ),\n", - " electrical_flow=fx.Flow('P_el', bus='Electricity'),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", - " )\n", - "\n", - " # Heat storage (investment)\n", - " storage = fx.Storage(\n", - " 'ThermalStorage',\n", - " charging=fx.Flow('charge', bus='Heat', size=50),\n", - " discharging=fx.Flow('discharge', bus='Heat', size=50),\n", - " capacity_in_flow_hours=fx.InvestParameters(\n", - " minimum_size=0, maximum_size=500, effects_of_investment_per_size={'costs': 20000}\n", - " ),\n", - " eta_charge=0.95,\n", - " eta_discharge=0.95,\n", - " relative_loss_per_hour=0.005,\n", - " initial_charge_state='equals_final',\n", - " )\n", - "\n", - " # Heat demand\n", - " demand = fx.Sink(\n", - " 'HeatDemand',\n", - " inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)],\n", - " )\n", - "\n", - " fs.add_elements(gas_supply, grid_buy, grid_sell, boiler, chp, storage, demand)\n", - "\n", - " return fs\n", - "\n", - "\n", - "# Create the system\n", - "flow_system = create_flow_system()\n", - "print(f'FlowSystem created with {len(flow_system.timesteps)} timesteps')\n", - "print(f'Components: {list(flow_system.components.keys())}')" - ] - }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Method 1: Full Optimization (Baseline)\n", "\n", - "First, let's solve the full problem with all 8760 timesteps." + "First, let's solve the full problem with all timesteps." ] }, { @@ -216,12 +119,12 @@ "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", "\n", "start = timeit.default_timer()\n", - "fs_full = create_flow_system()\n", + "fs_full = flow_system.copy()\n", "fs_full.optimize(solver)\n", "time_full = timeit.default_timer() - start\n", "\n", "print(f'Full optimization: {time_full:.2f} seconds')\n", - "print(f'Total cost: {fs_full.solution[\"costs\"].item():,.0f} EUR')\n", + "print(f'Total cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", "print('\\nOptimized sizes:')\n", "for name, size in fs_full.statistics.sizes.items():\n", " print(f' {name}: {float(size.item()):.1f}')" @@ -233,7 +136,7 @@ "source": [ "## Method 2: Typical Periods with `cluster_reduce()`\n", "\n", - "Now let's use the new `cluster_reduce()` method to solve with only 8 typical days (192 timesteps).\n", + "Now let's use the `cluster_reduce()` method to solve with only 8 typical days (768 timesteps).\n", "\n", "**Important**: Use `time_series_for_high_peaks` to force inclusion of peak demand periods. Without this, the typical periods may miss extreme peaks, leading to undersized components that cause infeasibility in the full-resolution dispatch stage." ] @@ -252,9 +155,9 @@ "peak_forcing_series = ['HeatDemand(Q_th)|fixed_relative_profile']\n", "\n", "# Create reduced FlowSystem with 8 typical days\n", - "fs_reduced = create_flow_system().transform.cluster_reduce(\n", - " period_duration='1D', # Daily periods (can also use hours, e.g., 24)\n", - " n_typical_periods=8, # 8 typical days\n", + "fs_reduced = flow_system.transform.cluster_reduce(\n", + " n_clusters=8, # 8 typical days\n", + " cluster_duration='1D', # Daily periods (can also use hours, e.g., 24)\n", " time_series_for_high_peaks=peak_forcing_series, # Force inclusion of peak demand day!\n", " storage_inter_period_linking=True, # Link storage states between periods\n", " storage_cyclic=True, # Cyclic constraint: SOC[0] = SOC[end]\n", @@ -263,7 +166,7 @@ "time_clustering = timeit.default_timer() - start\n", "print(f'Clustering time: {time_clustering:.2f} seconds')\n", "print(f'Reduced from {len(flow_system.timesteps)} to {len(fs_reduced.timesteps)} timesteps')\n", - "print(f'Timestep weights (cluster occurrences): {np.unique(fs_reduced._typical_periods_info[\"timestep_weights\"])}')" + "print(f'Timestep weights (cluster occurrences): {np.unique(fs_reduced._cluster_info[\"timestep_weights\"])}')" ] }, { @@ -278,7 +181,7 @@ "time_reduced = timeit.default_timer() - start\n", "\n", "print(f'Reduced optimization: {time_reduced:.2f} seconds')\n", - "print(f'Total cost: {fs_reduced.solution[\"costs\"].item():,.0f} EUR')\n", + "print(f'Total cost: {fs_reduced.solution[\"costs\"].item():,.0f} €')\n", "print(f'Speedup vs full: {time_full / (time_clustering + time_reduced):.1f}x')\n", "print('\\nOptimized sizes:')\n", "for name, size in fs_reduced.statistics.sizes.items():\n", @@ -307,7 +210,7 @@ "# Stage 1: Fast sizing (already done above)\n", "print('Stage 1: Sizing with typical periods')\n", "print(f' Time: {time_clustering + time_reduced:.2f} seconds')\n", - "print(f' Cost estimate: {fs_reduced.solution[\"costs\"].item():,.0f} EUR')\n", + "print(f' Cost estimate: {fs_reduced.solution[\"costs\"].item():,.0f} €')\n", "\n", "# Apply safety margin to sizes (5-10% buffer for demand variability)\n", "SAFETY_MARGIN = 1.05 # 5% buffer\n", @@ -321,12 +224,12 @@ "print('\\nStage 2: Dispatch at full resolution')\n", "start = timeit.default_timer()\n", "\n", - "fs_dispatch = create_flow_system().transform.fix_sizes(sizes_with_margin)\n", + "fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin)\n", "fs_dispatch.optimize(solver)\n", "\n", "time_dispatch = timeit.default_timer() - start\n", "print(f' Time: {time_dispatch:.2f} seconds')\n", - "print(f' Actual cost: {fs_dispatch.solution[\"costs\"].item():,.0f} EUR')\n", + "print(f' Actual cost: {fs_dispatch.solution[\"costs\"].item():,.0f} €')\n", "\n", "# Total time comparison\n", "total_two_stage = time_clustering + time_reduced + time_dispatch\n", @@ -351,39 +254,39 @@ "results = {\n", " 'Full (baseline)': {\n", " 'Time [s]': time_full,\n", - " 'Cost [EUR]': fs_full.solution['costs'].item(),\n", - " 'Boiler Size': fs_full.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Cost [€]': fs_full.solution['costs'].item(),\n", " 'CHP Size': fs_full.statistics.sizes['CHP(Q_th)'].item(),\n", - " 'Storage Size': fs_full.statistics.sizes['ThermalStorage'].item(),\n", + " 'Boiler Size': fs_full.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Storage Size': fs_full.statistics.sizes['Storage'].item(),\n", " },\n", " 'Typical Periods (sizing)': {\n", " 'Time [s]': time_clustering + time_reduced,\n", - " 'Cost [EUR]': fs_reduced.solution['costs'].item(),\n", - " 'Boiler Size': fs_reduced.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Cost [€]': fs_reduced.solution['costs'].item(),\n", " 'CHP Size': fs_reduced.statistics.sizes['CHP(Q_th)'].item(),\n", - " 'Storage Size': fs_reduced.statistics.sizes['ThermalStorage'].item(),\n", + " 'Boiler Size': fs_reduced.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Storage Size': fs_reduced.statistics.sizes['Storage'].item(),\n", " },\n", " 'Two-Stage (with margin)': {\n", " 'Time [s]': total_two_stage,\n", - " 'Cost [EUR]': fs_dispatch.solution['costs'].item(),\n", - " 'Boiler Size': sizes_with_margin['Boiler(Q_th)'],\n", + " 'Cost [€]': fs_dispatch.solution['costs'].item(),\n", " 'CHP Size': sizes_with_margin['CHP(Q_th)'],\n", - " 'Storage Size': sizes_with_margin['ThermalStorage'],\n", + " 'Boiler Size': sizes_with_margin['Boiler(Q_th)'],\n", + " 'Storage Size': sizes_with_margin['Storage'],\n", " },\n", "}\n", "\n", "comparison = pd.DataFrame(results).T\n", - "baseline_cost = comparison.loc['Full (baseline)', 'Cost [EUR]']\n", + "baseline_cost = comparison.loc['Full (baseline)', 'Cost [€]']\n", "baseline_time = comparison.loc['Full (baseline)', 'Time [s]']\n", - "comparison['Cost Gap [%]'] = ((comparison['Cost [EUR]'] - baseline_cost) / abs(baseline_cost) * 100).round(2)\n", + "comparison['Cost Gap [%]'] = ((comparison['Cost [€]'] - baseline_cost) / abs(baseline_cost) * 100).round(2)\n", "comparison['Speedup'] = (baseline_time / comparison['Time [s]']).round(1)\n", "\n", "comparison.style.format(\n", " {\n", " 'Time [s]': '{:.2f}',\n", - " 'Cost [EUR]': '{:,.0f}',\n", - " 'Boiler Size': '{:.1f}',\n", + " 'Cost [€]': '{:,.0f}',\n", " 'CHP Size': '{:.1f}',\n", + " 'Boiler Size': '{:.1f}',\n", " 'Storage Size': '{:.0f}',\n", " 'Cost Gap [%]': '{:.2f}',\n", " 'Speedup': '{:.1f}x',\n", @@ -414,13 +317,14 @@ "outputs": [], "source": [ "# Show clustering info\n", - "info = fs_reduced._typical_periods_info\n", + "info = fs_reduced._cluster_info\n", "print('Typical Periods Configuration:')\n", - "print(f' Number of typical periods: {info[\"n_typical_periods\"]}')\n", - "print(f' Timesteps per period: {info[\"timesteps_per_period\"]}')\n", - "print(f' Total reduced timesteps: {info[\"n_typical_periods\"] * info[\"timesteps_per_period\"]}')\n", + "print(f' Number of typical periods: {info[\"n_clusters\"]}')\n", + "print(f' Timesteps per period: {info[\"timesteps_per_cluster\"]}')\n", + "print(f' Total reduced timesteps: {info[\"n_clusters\"] * info[\"timesteps_per_cluster\"]}')\n", "print(f' Cluster order (first 10): {info[\"cluster_order\"][:10]}...')\n", - "print(f' Cluster occurrences: {dict(info[\"cluster_occurrences\"])}')\n", + "cluster_occurrences = info['cluster_occurrences'][(None, None)]\n", + "print(f' Cluster occurrences: {dict(cluster_occurrences)}')\n", "print(f' Storage inter-period linking: {info[\"storage_inter_period_linking\"]}')\n", "print(f' Storage cyclic: {info[\"storage_cyclic\"]}')" ] @@ -435,8 +339,8 @@ "\n", "| Parameter | Type | Description |\n", "|-----------|------|-------------|\n", - "| `period_duration` | `str \\| float` | Duration of each period ('1D', '24h') or hours as float |\n", - "| `n_typical_periods` | `int` | Number of typical periods to extract (e.g., 8) |\n", + "| `n_clusters` | `int` | Number of typical periods to extract (e.g., 8) |\n", + "| `cluster_duration` | `str \\| float` | Duration of each period ('1D', '24h') or hours as float |\n", "| `weights` | `dict[str, float]` | Optional weights for clustering each time series |\n", "| `time_series_for_high_peaks` | `list[str]` | **IMPORTANT**: Force inclusion of high-value periods to capture peak demands |\n", "| `time_series_for_low_peaks` | `list[str]` | Force inclusion of low-value periods |\n", @@ -456,7 +360,7 @@ "\n", "| Feature | `cluster()` | `cluster_reduce()` |\n", "|---------|-------------|--------------------|\n", - "| Timesteps | Original (8760) | Reduced (e.g., 192) |\n", + "| Timesteps | Original (2976) | Reduced (e.g., 768) |\n", "| Mechanism | Equality constraints | Typical periods only |\n", "| Solve time | Moderate reduction | Dramatic reduction |\n", "| Accuracy | Higher | Lower (sizing only) |\n", @@ -470,7 +374,7 @@ "source": [ "## Summary\n", "\n", - "The new `cluster_reduce()` method provides:\n", + "The `cluster_reduce()` method provides:\n", "\n", "1. **Dramatic speedup** for sizing optimization by reducing timesteps\n", "2. **Proper cost weighting** so operational costs reflect cluster occurrences\n", @@ -482,8 +386,8 @@ "```python\n", "# Stage 1: Fast sizing with typical periods\n", "fs_sizing = flow_system.transform.cluster_reduce(\n", - " period_duration='1D',\n", - " n_typical_periods=8,\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", " time_series_for_high_peaks=['DemandComponent(FlowName)|fixed_relative_profile'],\n", ")\n", "fs_sizing.optimize(solver)\n", From 4341cec752cb79bee0457687dbc16b845f8e7883 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 16:39:14 +0100 Subject: [PATCH 050/191] Fix clsuter weight applying --- flixopt/elements.py | 4 ++-- flixopt/features.py | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flixopt/elements.py b/flixopt/elements.py index 1dc92ec66..608b6ac70 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -821,12 +821,12 @@ def results_structure(self): } def _create_shares(self): - # Effects per flow hour + # Effects per flow hour (use timestep_duration only, cluster_weight is applied when summing to total) if self.element.effects_per_flow_hour: self._model.effects.add_share_to_effects( name=self.label_full, expressions={ - effect: self.flow_rate * self._model.aggregation_weight * factor + effect: self.flow_rate * self._model.timestep_duration * factor for effect, factor in self.element.effects_per_flow_hour.items() }, target='temporal', diff --git a/flixopt/features.py b/flixopt/features.py index 5d890ebf9..75cb3d92c 100644 --- a/flixopt/features.py +++ b/flixopt/features.py @@ -263,12 +263,12 @@ def _do_modeling(self): self._add_effects() def _add_effects(self): - """Add operational effects""" + """Add operational effects (use timestep_duration only, cluster_weight is applied when summing to total)""" if self.parameters.effects_per_active_hour: self._model.effects.add_share_to_effects( name=self.label_of_element, expressions={ - effect: self.status * factor * self._model.aggregation_weight + effect: self.status * factor * self._model.timestep_duration for effect, factor in self.parameters.effects_per_active_hour.items() }, target='temporal', @@ -612,8 +612,8 @@ def _do_modeling(self): if 'time' in self._dims: self.total_per_timestep = self.add_variables( - lower=-np.inf if (self._min_per_hour is None) else self._min_per_hour * self._model.aggregation_weight, - upper=np.inf if (self._max_per_hour is None) else self._max_per_hour * self._model.aggregation_weight, + lower=-np.inf if (self._min_per_hour is None) else self._min_per_hour * self._model.timestep_duration, + upper=np.inf if (self._max_per_hour is None) else self._max_per_hour * self._model.timestep_duration, coords=self._model.get_coords(self._dims), short_name='per_timestep', ) From 87fd661ffee90ed42c1e3a8cf6137662b40afe6c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 16:42:49 +0100 Subject: [PATCH 051/191] Fix storage initial=final issue in clustering --- flixopt/transform_accessor.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 7380b03d6..9431481de 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1242,6 +1242,13 @@ def cluster_reduce( reduced_fs = FlowSystem.from_dataset(ds_new) reduced_fs.cluster_weight = reduced_fs.fit_to_model_coords('cluster_weight', timestep_weights, dims=['time']) + # If storage_cyclic=False, also disable cyclic constraint on individual storages + if not storage_cyclic: + for storage in reduced_fs.storages.values(): + if storage.initial_charge_state == 'equals_final': + storage.initial_charge_state = 0 + logger.debug(f"Set {storage.label}.initial_charge_state=0 (was 'equals_final')") + reduced_fs._cluster_info = { 'clustering_results': clustering_results, 'cluster_orders': cluster_orders, From 2dc39e4670ddfcf0d9725d9fe3e45ae244b6ed34 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 16:46:52 +0100 Subject: [PATCH 052/191] Improve notebooks --- docs/notebooks/08a-aggregation.ipynb | 3 +- docs/notebooks/08b-rolling-horizon.ipynb | 3 +- docs/notebooks/08c-clustering.ipynb | 5168 +----------------- docs/notebooks/08d-external-clustering.ipynb | 19 +- docs/notebooks/08e-cluster-and-reduce.ipynb | 5 +- 5 files changed, 223 insertions(+), 4975 deletions(-) diff --git a/docs/notebooks/08a-aggregation.ipynb b/docs/notebooks/08a-aggregation.ipynb index 24dc3279a..6d0260539 100644 --- a/docs/notebooks/08a-aggregation.ipynb +++ b/docs/notebooks/08a-aggregation.ipynb @@ -67,8 +67,7 @@ " from data.generate_example_systems import create_district_heating_system\n", "\n", " fs = create_district_heating_system()\n", - " fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n", - " fs.to_netcdf(data_file, overwrite=True)\n", + " fs.to_netcdf(data_file)\n", "\n", "# Load the district heating system (real data from Zeitreihen2020.csv)\n", "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", diff --git a/docs/notebooks/08b-rolling-horizon.ipynb b/docs/notebooks/08b-rolling-horizon.ipynb index 68a45303e..e43da8f2c 100644 --- a/docs/notebooks/08b-rolling-horizon.ipynb +++ b/docs/notebooks/08b-rolling-horizon.ipynb @@ -71,8 +71,7 @@ " from data.generate_example_systems import create_operational_system\n", "\n", " fs = create_operational_system()\n", - " fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n", - " fs.to_netcdf(data_file, overwrite=True)\n", + " fs.to_netcdf(data_file)\n", "\n", "# Load the operational system (real data from Zeitreihen2020.csv, two weeks)\n", "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index c477cfcbe..5967fda45 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -4,7 +4,21 @@ "cell_type": "markdown", "id": "0", "metadata": {}, - "source": "# Clustering and Segmentation with tsam\n\nSpeed up large problems by reducing time series complexity using the [tsam](https://github.com/FZJ-IEK3-VSA/tsam) package.\n\nThis notebook demonstrates two complementary techniques:\n\n- **Clustering** (inter-period): Identify typical periods (e.g., 8 typical days from 365 days)\n- **Segmentation** (inner-period): Reduce timesteps within periods (e.g., 24 hours to 4 segments)\n\nBoth can be used independently or combined for maximum speedup.\n\n!!! note \"Requirements\"\n This notebook requires the `tsam` package: `pip install tsam`" + "source": [ + "# Clustering and Segmentation with tsam\n", + "\n", + "Speed up large problems by reducing time series complexity using the [tsam](https://github.com/FZJ-IEK3-VSA/tsam) package.\n", + "\n", + "This notebook demonstrates two complementary techniques:\n", + "\n", + "- **Clustering** (inter-period): Identify typical periods (e.g., 8 typical days from 365 days)\n", + "- **Segmentation** (inner-period): Reduce timesteps within periods (e.g., 24 hours to 4 segments)\n", + "\n", + "Both can be used independently or combined for maximum speedup.\n", + "\n", + "!!! note \"Requirements\"\n", + " This notebook requires the `tsam` package: `pip install tsam`" + ] }, { "cell_type": "markdown", @@ -16,26 +30,10 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "2", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:36:41.528074Z", - "start_time": "2025-12-14T15:36:38.134971Z" - } - }, - "outputs": [ - { - "data": { - "text/plain": [ - "flixopt.config.CONFIG" - ] - }, - "execution_count": 1, - "metadata": {}, - "output_type": "execute_result" - } - ], + "metadata": {}, + "outputs": [], "source": [ "import timeit\n", "\n", @@ -53,28 +51,18 @@ "cell_type": "markdown", "id": "3", "metadata": {}, - "source": "## Load the FlowSystem\n\nWe use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" + "source": [ + "## Load the FlowSystem\n", + "\n", + "We use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" + ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "id": "4", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:36:47.851549Z", - "start_time": "2025-12-14T15:36:47.598380Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Loaded FlowSystem: 2976 timesteps (31 days at 15-min resolution)\n", - "Components: ['CHP', 'Boiler', 'Storage', 'GasGrid', 'CoalSupply', 'GridBuy', 'GridSell', 'HeatDemand', 'ElecDemand']\n" - ] - } - ], + "metadata": {}, + "outputs": [], "source": [ "from pathlib import Path\n", "\n", @@ -84,8 +72,7 @@ " from data.generate_example_systems import create_district_heating_system\n", "\n", " fs = create_district_heating_system()\n", - " fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n", - " fs.to_netcdf(data_file, overwrite=True)\n", + " fs.to_netcdf(data_file)\n", "\n", "# Load the district heating system (real data from Zeitreihen2020.csv)\n", "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", @@ -97,3950 +84,10 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "5", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:36:47.942912Z", - "start_time": "2025-12-14T15:36:47.863137Z" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - " \n", - " \n", - " " - ] - }, - "jetTransient": { - "display_id": null - }, - "metadata": {}, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
" - ] - }, - "jetTransient": { - "display_id": null - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "metadata": {}, + "outputs": [], "source": [ "# Visualize first two weeks of data\n", "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", @@ -4059,219 +106,33 @@ }, { "cell_type": "markdown", - "id": "8", + "id": "6", "metadata": {}, - "source": "## Part 1: Clustering (Inter-Period Aggregation)\n\n**Clustering** groups similar periods together to find representative \"typical\" periods.\n\nFor example, with 31 days of data:\n- Original: 31 days × 96 timesteps/day = 2,976 timesteps \n- Clustered (8 typical days): 8 days × 96 timesteps/day = 768 representative timesteps\n\nThe optimizer only solves for 8 unique days, but weights results by how often each typical day occurred.\n\n```python\nfs.transform.cluster(\n n_clusters=8, # Find 8 typical days\n cluster_duration='1D', # Each cluster is 1 day\n)\n```" + "source": [ + "## Part 1: Clustering (Inter-Period Aggregation)\n", + "\n", + "**Clustering** groups similar periods together to find representative \"typical\" periods.\n", + "\n", + "For example, with 31 days of data:\n", + "- Original: 31 days × 96 timesteps/day = 2,976 timesteps \n", + "- Clustered (8 typical days): 8 days × 96 timesteps/day = 768 representative timesteps\n", + "\n", + "The optimizer only solves for 8 unique days, but weights results by how often each typical day occurred.\n", + "\n", + "```python\n", + "fs.transform.cluster(\n", + " n_clusters=8, # Find 8 typical days\n", + " cluster_duration='1D', # Each cluster is 1 day\n", + ")\n", + "```" + ] }, { "cell_type": "code", - "execution_count": 4, - "id": "9", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:36:49.785497Z", - "start_time": "2025-12-14T15:36:48.689947Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Original: 2976 timesteps (31 days)\n", - "Clustered: 8 typical days\n", - "Cluster assignments: [np.int32(2), np.int32(5), np.int32(7), np.int32(3), np.int32(0), np.int32(4), np.int32(5), np.int32(5), np.int32(5), np.int32(7), np.int32(3), np.int32(0), np.int32(4), np.int32(5), np.int32(5), np.int32(1), np.int32(7), np.int32(3), np.int32(0), np.int32(4), np.int32(1), np.int32(1), np.int32(1), np.int32(7), np.int32(3), np.int32(0), np.int32(4), np.int32(1), np.int32(1), np.int32(1), np.int32(6)]\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "
" - ], - "text/plain": [ - "PlotResult(data= Size: 262kB\n", - "Dimensions: (time: 2976, variable: 5)\n", - "Coordinates:\n", - " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", - " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", - "Data variables:\n", - " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", - " aggregated (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0, figure=Figure({\n", - " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('UbgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('9dJNYhDYG8D10k1iENgbwPXSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'}],\n", - " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", - " 'margin': {'t': 60},\n", - " 'template': '...',\n", - " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", - " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", - " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", - "}))" - ] - }, - "execution_count": 4, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], "source": [ "# Cluster with 8 typical days (from 31 days)\n", "fs_clustering_demo = flow_system.copy()\n", @@ -4290,29 +151,20 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "8", "metadata": {}, - "source": "### Comparing Different Cluster Counts\n\nMore clusters = better accuracy but less speedup. Let's compare:" + "source": [ + "### Comparing Different Cluster Counts\n", + "\n", + "More clusters = better accuracy but less speedup. Let's compare:" + ] }, { "cell_type": "code", - "execution_count": 5, - "id": "11", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:36:52.191535Z", - "start_time": "2025-12-14T15:36:50.141148Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" - ] - } - ], + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], "source": [ "# Test different numbers of clusters\n", "cluster_configs = [4, 8, 12, 16]\n", @@ -4330,52 +182,10 @@ }, { "cell_type": "code", - "execution_count": 6, - "id": "12", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:37:01.054663Z", - "start_time": "2025-12-14T15:37:00.991311Z" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "
" - ] - }, - "jetTransient": { - "display_id": null - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], "source": [ "# Compare the aggregated data for each configuration\n", "fig = make_subplots(\n", @@ -4431,78 +241,10 @@ }, { "cell_type": "code", - "execution_count": 7, - "id": "13", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:37:01.104163Z", - "start_time": "2025-12-14T15:37:01.071223Z" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 RMSEMAEMax ErrorCorrelation
Typical Days    
44.844.5212.190.9905
83.452.606.890.9952
121.680.836.390.9989
160.370.251.860.9999
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], "source": [ "# Calculate error metrics for each configuration\n", "metrics = []\n", @@ -4538,218 +280,34 @@ }, { "cell_type": "markdown", - "id": "t8et37i26k", + "id": "12", "metadata": {}, - "source": "## Part 2: Segmentation (Inner-Period Aggregation)\n\n**Segmentation** reduces the number of timesteps *within* each period by grouping similar consecutive timesteps.\n\nFor example, with 15-minute resolution data:\n- Original day: 96 timesteps (24h × 4 per hour)\n- Segmented (12 segments): 12 representative timesteps per day (~2 hours each)\n\nThis is useful when you have high-resolution data but don't need that granularity for your analysis.\n\n```python\nfs.transform.cluster(\n n_clusters=None, # Skip clustering (keep all periods)\n cluster_duration='1D', # Segment within each day\n n_segments=12, # Reduce to 12 segments per day\n)\n```" + "source": [ + "## Part 2: Segmentation (Inner-Period Aggregation)\n", + "\n", + "**Segmentation** reduces the number of timesteps *within* each period by grouping similar consecutive timesteps.\n", + "\n", + "For example, with 15-minute resolution data:\n", + "- Original day: 96 timesteps (24h × 4 per hour)\n", + "- Segmented (12 segments): 12 representative timesteps per day (~2 hours each)\n", + "\n", + "This is useful when you have high-resolution data but don't need that granularity for your analysis.\n", + "\n", + "```python\n", + "fs.transform.cluster(\n", + " n_clusters=None, # Skip clustering (keep all periods)\n", + " cluster_duration='1D', # Segment within each day\n", + " n_segments=12, # Reduce to 12 segments per day\n", + ")\n", + "```" + ] }, { "cell_type": "code", - "execution_count": 8, - "id": "lbpmw6mnb5k", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:37:01.984111Z", - "start_time": "2025-12-14T15:37:01.119694Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Original: 96 timesteps per day (15-min resolution)\n", - "Segmented: 12 segments per day (~2 hours each)\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "
" - ], - "text/plain": [ - "PlotResult(data= Size: 262kB\n", - "Dimensions: (time: 2976, variable: 5)\n", - "Coordinates:\n", - " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", - " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", - "Data variables:\n", - " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", - " aggregated (variable, time) float64 119kB 56.75 56.75 56.75 ... 153.1 153.1, figure=Figure({\n", - " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('XY/C9ShgTEBdj8L1KGBMQF2PwvUoYE' ... 'AAAAC4VEAAAAAAALhUQAAAAAAAuFRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('Rrbz/dQ4FkBGtvP91DgWQEa28/3UOB' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('Qrbz/dQ4EsBCtvP91DgSwEK28/3UOB' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('hetRuB4UYECF61G4HhRgQIXrUbgeFG' ... 'XrUbgkY0AfhetRuCRjQB+F61G4JGNA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'}],\n", - " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", - " 'margin': {'t': 60},\n", - " 'template': '...',\n", - " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", - " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", - " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", - "}))" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], "source": [ "# Segmentation only: reduce 96 timesteps/day to 12 segments/day\n", "fs_segmentation_demo = flow_system.copy()\n", @@ -4771,29 +329,20 @@ }, { "cell_type": "markdown", - "id": "6bgh7f0vsj", + "id": "14", "metadata": {}, - "source": "### Comparing Different Segment Counts\n\nMore segments = better accuracy but less speedup:" + "source": [ + "### Comparing Different Segment Counts\n", + "\n", + "More segments = better accuracy but less speedup:" + ] }, { "cell_type": "code", - "execution_count": 9, - "id": "do29lhcinx7", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:37:05.038629Z", - "start_time": "2025-12-14T15:37:02.095516Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Comparing: HeatDemand(Q_th)|fixed_relative_profile\n" - ] - } - ], + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], "source": [ "# Test different numbers of segments\n", "segment_configs = [6, 12, 24, 48]\n", @@ -4811,52 +360,10 @@ }, { "cell_type": "code", - "execution_count": 10, - "id": "21athrtuavw", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:37:05.124205Z", - "start_time": "2025-12-14T15:37:05.100783Z" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "
" - ] - }, - "jetTransient": { - "display_id": null - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], "source": [ "# Compare the segmented data for first day only (clearer visualization)\n", "fig = make_subplots(\n", @@ -4915,78 +422,10 @@ }, { "cell_type": "code", - "execution_count": 11, - "id": "phpx36k23p", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:37:05.145417Z", - "start_time": "2025-12-14T15:37:05.131259Z" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
 RMSEMAEMax ErrorCorrelation
Segments    
610.197.9336.380.9572
125.894.5323.950.9859
242.732.1211.380.9970
481.200.863.910.9994
\n" - ], - "text/plain": [ - "" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], "source": [ "# Calculate error metrics for segmentation\n", "seg_metrics = []\n", @@ -5022,218 +461,30 @@ }, { "cell_type": "markdown", - "id": "u6sc5ek0rya", + "id": "18", "metadata": {}, - "source": "## Part 3: Combined Clustering + Segmentation\n\nFor maximum speedup, combine both techniques:\n\n```python\nfs.transform.cluster(\n n_clusters=8, # 8 typical days (inter-period)\n cluster_duration='1D',\n n_segments=12, # 12 segments per day (inner-period)\n)\n```\n\nThis reduces 2,976 timesteps to just 8 × 12 = 96 representative timesteps!" + "source": [ + "## Part 3: Combined Clustering + Segmentation\n", + "\n", + "For maximum speedup, combine both techniques:\n", + "\n", + "```python\n", + "fs.transform.cluster(\n", + " n_clusters=8, # 8 typical days (inter-period)\n", + " cluster_duration='1D',\n", + " n_segments=12, # 12 segments per day (inner-period)\n", + ")\n", + "```\n", + "\n", + "This reduces 2,976 timesteps to just 8 × 12 = 96 representative timesteps!" + ] }, { "cell_type": "code", - "execution_count": 12, - "id": "j24sbfpl0x", - "metadata": { - "ExecuteTime": { - "end_time": "2025-12-14T15:37:05.735963Z", - "start_time": "2025-12-14T15:37:05.163786Z" - } - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Original: 2976 timesteps\n", - "Combined: 8 typical days × 12 segments = 96 representative timesteps\n" - ] - }, - { - "data": { - "text/html": [ - "
\n", - "
" - ], - "text/plain": [ - "PlotResult(data= Size: 262kB\n", - "Dimensions: (time: 2976, variable: 5)\n", - "Coordinates:\n", - " * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00\n", - " * variable (variable) object 40B 'ElecDemand(P_el)|fixed_relative_profil...\n", - "Data variables:\n", - " original (variable, time) float64 119kB 58.39 58.36 58.11 ... 152.1 151.0\n", - " aggregated (variable, time) float64 119kB 56.75 56.75 56.75 ... 153.1 153.1, figure=Figure({\n", - " 'data': [{'hovertemplate': ('variable=Original - ElecDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('UrgehesxTUCuR+F6FC5NQK5H4XoUDk' ... 'G4HoXLVEDhehSuR8FTQAAAAAAA8FRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GasGrid(Q_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridBuy(P_' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYH0Dy0k1iENgfQPLSTWIQ2B' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - GridSell(P' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('8tJNYhDYG8Dy0k1iENgbwPLSTWIQ2B' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Original - HeatDemand' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'dash'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Original - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('sp3vp8bDX0BEi2zn+4leQO58PzVeGl' ... 'MzMzMjY0BeukkMAgNjQL+fGi/d4GJA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - ElecDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'line': {'color': '#636EFA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - ElecDemand(P_el)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('XY/C9ShgTEBdj8L1KGBMQF2PwvUoYE' ... 'AAAAC4VEAAAAAAALhUQAAAAAAAuFRA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GasGrid(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'line': {'color': '#EF553B', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GasGrid(Q_Gas)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('mG4Sg8A6QECYbhKDwDpAQJhuEoPAOk' ... '66SQxSQEA1XrpJDFJAQDVeukkMUkBA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridBuy(' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#00CC96', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridBuy(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('Rrbz/dQ4FkBGtvP91DgWQEa28/3UOB' ... 'bz/dT4RkBGtvP91PhGQEa28/3U+EZA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - GridSell' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'line': {'color': '#AB63FA', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - GridSell(P_el)|costs|per_flow_hour',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('Qrbz/dQ4EsBCtvP91DgSwEK28/3UOB' ... 'bz/dR4RsBGtvP91HhGwEa28/3UeEbA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'},\n", - " {'hovertemplate': ('variable=Aggregated - HeatDema' ... '}
value=%{y}'),\n", - " 'legendgroup': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'line': {'color': '#FFA15A', 'dash': 'solid'},\n", - " 'marker': {'symbol': 'circle'},\n", - " 'mode': 'lines',\n", - " 'name': 'Aggregated - HeatDemand(Q_th)|fixed_relative_profile',\n", - " 'showlegend': True,\n", - " 'type': 'scattergl',\n", - " 'x': array(['2020-01-01T00:00:00.000000000', '2020-01-01T00:15:00.000000000',\n", - " '2020-01-01T00:30:00.000000000', ..., '2020-01-31T23:15:00.000000000',\n", - " '2020-01-31T23:30:00.000000000', '2020-01-31T23:45:00.000000000'],\n", - " shape=(2976,), dtype='datetime64[ns]'),\n", - " 'xaxis': 'x',\n", - " 'y': {'bdata': ('hetRuB4UYECF61G4HhRgQIXrUbgeFG' ... 'XrUbgkY0AfhetRuCRjQB+F61G4JGNA'),\n", - " 'dtype': 'f8'},\n", - " 'yaxis': 'y'}],\n", - " 'layout': {'legend': {'title': {'text': 'variable'}, 'tracegroupgap': 0},\n", - " 'margin': {'t': 60},\n", - " 'template': '...',\n", - " 'title': {'text': 'Original vs Aggregated Data (original = ---)'},\n", - " 'xaxis': {'anchor': 'y', 'domain': [0.0, 1.0], 'title': {'text': 'Time in h'}},\n", - " 'yaxis': {'anchor': 'x', 'domain': [0.0, 1.0], 'title': {'text': 'Value'}}}\n", - "}))" - ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" - } - ], + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], "source": [ "# Combined: 8 typical days × 12 segments each\n", "fs_combined_demo = flow_system.copy()\n", @@ -5256,81 +507,26 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "20", "metadata": {}, - "source": "## Performance Comparison\n\nNow let's compare the optimization performance of all approaches.\n\n### Baseline: Full Optimization (No Aggregation)" + "source": [ + "## Performance Comparison\n", + "\n", + "Now let's compare the optimization performance of all approaches.\n", + "\n", + "### Baseline: Full Optimization (No Aggregation)" + ] }, { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "21", "metadata": { - "ExecuteTime": { - "start_time": "2025-12-14T15:37:06.084308Z" - }, "jupyter": { "is_executing": true } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[2m2025-12-14 16:37:06.388\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow CHP(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.3, 0.3, 0.3, ..., 0.3, 0.3, 0.3], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n", - "\u001b[2m2025-12-14 16:37:06.492\u001b[0m \u001b[33mWARNING \u001b[0m │ \u001b[33m┌─\u001b[0m Flow Boiler(Q_th) has a relative_minimum of Size: 24kB\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m array([0.1, 0.1, 0.1, ..., 0.1, 0.1, 0.1], shape=(2976,))\n", - "\u001b[2m \u001b[0m │ \u001b[33m│\u001b[0m Coordinates:\n", - "\u001b[2m \u001b[0m │ \u001b[33m└─\u001b[0m * time (time) datetime64[ns] 24kB 2020-01-01 ... 2020-01-31T23:45:00 and no status_parameters. This prevents the Flow from switching inactive (flow_rate = 0). Consider using status_parameters to allow the Flow to be switched active and inactive.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing constraints.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 64/64 [00:00<00:00, 131.16it/s]\n", - "Writing continuous variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 55/55 [00:00<00:00, 830.38it/s]\n", - "Writing binary variables.: 100%|\u001b[38;2;128;191;255m██████████\u001b[0m| 5/5 [00:00<00:00, 1075.96it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Running HiGHS 1.12.0 (git hash: 755a8e0): Copyright (c) 2025 HiGHS under MIT licence terms\n", - "MIP linopy-problem-thzxg8od has 89316 rows; 80386 cols; 264919 nonzeros; 5955 integer variables (5955 binary)\n", - "Coefficient ranges:\n", - " Matrix [1e-05, 1e+03]\n", - " Cost [1e+00, 1e+00]\n", - " Bound [1e+00, 1e+03]\n", - " RHS [1e+00, 1e+00]\n", - "Presolving model\n", - "38694 rows, 26790 cols, 92267 nonzeros 0s\n", - "31169 rows, 18018 cols, 88849 nonzeros 0s\n", - "30836 rows, 17685 cols, 89182 nonzeros 0s\n", - "Presolve reductions: rows 30836(-58480); columns 17685(-62701); nonzeros 89182(-175737) \n", - "\n", - "Solving MIP model with:\n", - " 30836 rows\n", - " 17685 cols (5955 binary, 0 integer, 0 implied int., 11730 continuous, 0 domain fixed)\n", - " 89182 nonzeros\n", - "\n", - "Src: B => Branching; C => Central rounding; F => Feasibility pump; H => Heuristic;\n", - " I => Shifting; J => Feasibility jump; L => Sub-MIP; P => Empty MIP; R => Randomized rounding;\n", - " S => Solve LP; T => Evaluate node; U => Unbounded; X => User solution; Y => HiGHS solution;\n", - " Z => ZI Round; l => Trivial lower; p => Trivial point; u => Trivial upper; z => Trivial zero\n", - "\n", - " Nodes | B&B Tree | Objective Bounds | Dynamic Constraints | Work \n", - "Src Proc. InQueue | Leaves Expl. | BestBound BestSol Gap | Cuts InLp Confl. | LpIters Time\n", - "\n", - " 0 0 0 0.00% -48251946.82856 inf inf 0 0 0 0 0.4s\n", - " R 0 0 0 0.00% 2209206.133553 2278967.860722 3.06% 0 0 0 15439 1.5s\n" - ] - } - ], + "outputs": [], "source": [ "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", "\n", @@ -5348,14 +544,16 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "22", "metadata": {}, - "source": "### Clustering Only (8 Typical Days)" + "source": [ + "### Clustering Only (8 Typical Days)" + ] }, { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -5380,14 +578,16 @@ }, { "cell_type": "markdown", - "id": "qk9l29yv32p", + "id": "24", "metadata": {}, - "source": "### Segmentation Only (12 Segments per Day)" + "source": [ + "### Segmentation Only (12 Segments per Day)" + ] }, { "cell_type": "code", "execution_count": null, - "id": "puisldf6fa", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -5413,14 +613,16 @@ }, { "cell_type": "markdown", - "id": "6nlsdmx326d", + "id": "26", "metadata": {}, - "source": "### Combined: Clustering + Segmentation" + "source": [ + "### Combined: Clustering + Segmentation" + ] }, { "cell_type": "code", "execution_count": null, - "id": "frq1vct5l4v", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -5446,7 +648,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "28", "metadata": {}, "source": [ "## Compare Results" @@ -5455,7 +657,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -5511,7 +713,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "30", "metadata": {}, "source": [ "## Multi-Period Clustering\n", @@ -5523,7 +725,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -5583,7 +785,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -5600,7 +802,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -5611,13 +813,49 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "34", "metadata": {}, - "source": "## API Reference\n\n### `transform.cluster()` Parameters\n\n| Parameter | Type | Description |\n|-----------|------|-------------|\n| `n_clusters` | `int \\| None` | Number of typical periods (e.g., 8 typical days). Set to `None` for segmentation-only. |\n| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h', or hours as float) |\n| `n_segments` | `int \\| None` | Segments within each period (inner-period aggregation). Default: `None` (no segmentation) |\n| `aggregate_data` | `bool` | If True (default), aggregate time series data |\n| `include_storage` | `bool` | Include storage in clustering constraints (default: True) |\n| `flexibility_percent` | `float` | Allow binary variable deviations (default: 0) |\n| `flexibility_penalty` | `float` | Penalty for deviations (default: 0) |\n| `time_series_for_high_peaks` | `list` | Force inclusion of high-value periods |\n| `time_series_for_low_peaks` | `list` | Force inclusion of low-value periods |\n\n### Common Patterns\n\n```python\n# Clustering only: 8 typical days from a year\nfs.transform.cluster(n_clusters=8, cluster_duration='1D')\n\n# Segmentation only: reduce to 12 segments per day\nfs.transform.cluster(n_clusters=None, cluster_duration='1D', n_segments=12)\n\n# Combined: 8 typical days × 12 segments each\nfs.transform.cluster(n_clusters=8, cluster_duration='1D', n_segments=12)\n\n# Force inclusion of peak demand periods\nfs.transform.cluster(\n n_clusters=8,\n cluster_duration='1D',\n time_series_for_high_peaks=[heat_demand_ts],\n)\n```" + "source": [ + "## API Reference\n", + "\n", + "### `transform.cluster()` Parameters\n", + "\n", + "| Parameter | Type | Description |\n", + "|-----------|------|-------------|\n", + "| `n_clusters` | `int \\| None` | Number of typical periods (e.g., 8 typical days). Set to `None` for segmentation-only. |\n", + "| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h', or hours as float) |\n", + "| `n_segments` | `int \\| None` | Segments within each period (inner-period aggregation). Default: `None` (no segmentation) |\n", + "| `aggregate_data` | `bool` | If True (default), aggregate time series data |\n", + "| `include_storage` | `bool` | Include storage in clustering constraints (default: True) |\n", + "| `flexibility_percent` | `float` | Allow binary variable deviations (default: 0) |\n", + "| `flexibility_penalty` | `float` | Penalty for deviations (default: 0) |\n", + "| `time_series_for_high_peaks` | `list` | Force inclusion of high-value periods |\n", + "| `time_series_for_low_peaks` | `list` | Force inclusion of low-value periods |\n", + "\n", + "### Common Patterns\n", + "\n", + "```python\n", + "# Clustering only: 8 typical days from a year\n", + "fs.transform.cluster(n_clusters=8, cluster_duration='1D')\n", + "\n", + "# Segmentation only: reduce to 12 segments per day\n", + "fs.transform.cluster(n_clusters=None, cluster_duration='1D', n_segments=12)\n", + "\n", + "# Combined: 8 typical days × 12 segments each\n", + "fs.transform.cluster(n_clusters=8, cluster_duration='1D', n_segments=12)\n", + "\n", + "# Force inclusion of peak demand periods\n", + "fs.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " time_series_for_high_peaks=[heat_demand_ts],\n", + ")\n", + "```" + ] }, { "cell_type": "markdown", - "id": "25", + "id": "35", "metadata": {}, "source": [ "## Summary\n", diff --git a/docs/notebooks/08d-external-clustering.ipynb b/docs/notebooks/08d-external-clustering.ipynb index 351f15210..da213fba2 100644 --- a/docs/notebooks/08d-external-clustering.ipynb +++ b/docs/notebooks/08d-external-clustering.ipynb @@ -55,8 +55,7 @@ " from data.generate_example_systems import create_district_heating_system\n", "\n", " fs = create_district_heating_system()\n", - " fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n", - " fs.to_netcdf(data_file, overwrite=True)\n", + " fs.to_netcdf(data_file)\n", "\n", "# Load the FlowSystem\n", "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", @@ -387,7 +386,21 @@ { "cell_type": "markdown", "metadata": {}, - "source": "## Summary\n\n| Method | Data Aggregation | When to Use |\n|--------|------------------|-------------|\n| `transform.cluster()` | Yes | Default - let flixopt handle everything |\n| `tsam_aggregation=...` | Yes | External tsam on data subset, with data aggregation |\n| Direct `cluster_order` | No | Custom algorithms or manual period grouping (binary only) |\n\nAll methods use `ClusteringParameters` which stores:\n- `cluster_order`: Which cluster each period belongs to\n- `period_length`: Timesteps per period\n- `segment_assignment`: (optional) Segment IDs within each cluster\n- `tsam_aggregation`: (optional) tsam object for data transformation" + "source": [ + "## Summary\n", + "\n", + "| Method | Data Aggregation | When to Use |\n", + "|--------|------------------|-------------|\n", + "| `transform.cluster()` | Yes | Default - let flixopt handle everything |\n", + "| `tsam_aggregation=...` | Yes | External tsam on data subset, with data aggregation |\n", + "| Direct `cluster_order` | No | Custom algorithms or manual period grouping (binary only) |\n", + "\n", + "All methods use `ClusteringParameters` which stores:\n", + "- `cluster_order`: Which cluster each period belongs to\n", + "- `period_length`: Timesteps per period\n", + "- `segment_assignment`: (optional) Segment IDs within each cluster\n", + "- `tsam_aggregation`: (optional) tsam object for data transformation" + ] } ], "metadata": { diff --git a/docs/notebooks/08e-cluster-and-reduce.ipynb b/docs/notebooks/08e-cluster-and-reduce.ipynb index 9d8d70015..8ab2418fd 100644 --- a/docs/notebooks/08e-cluster-and-reduce.ipynb +++ b/docs/notebooks/08e-cluster-and-reduce.ipynb @@ -69,8 +69,7 @@ " from data.generate_example_systems import create_district_heating_system\n", "\n", " fs = create_district_heating_system()\n", - " fs.optimize(fx.solvers.HighsSolver(log_to_console=False))\n", - " fs.to_netcdf(data_file, overwrite=True)\n", + " fs.to_netcdf(data_file)\n", "\n", "# Load the district heating system (real data from Zeitreihen2020.csv)\n", "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", @@ -427,7 +426,7 @@ "metadata": {}, "outputs": [], "source": [ - "fs_expanded.statistics.plot.effects()" + "fs_reduced.statistics.plot.balance('Heat')" ] } ], From 3c0e986387b46b2a99f0df05c33063f9edc7597f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 17:08:21 +0100 Subject: [PATCH 053/191] clsuter reduce does remoce regular cyclic storage behaviour --- flixopt/transform_accessor.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 9431481de..f5460c07d 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1242,12 +1242,10 @@ def cluster_reduce( reduced_fs = FlowSystem.from_dataset(ds_new) reduced_fs.cluster_weight = reduced_fs.fit_to_model_coords('cluster_weight', timestep_weights, dims=['time']) - # If storage_cyclic=False, also disable cyclic constraint on individual storages - if not storage_cyclic: - for storage in reduced_fs.storages.values(): - if storage.initial_charge_state == 'equals_final': - storage.initial_charge_state = 0 - logger.debug(f"Set {storage.label}.initial_charge_state=0 (was 'equals_final')") + # Remove 'equals_final' from storages - doesn't make sense on reduced timesteps + for storage in reduced_fs.storages.values(): + if storage.initial_charge_state == 'equals_final': + storage.initial_charge_state = 0 reduced_fs._cluster_info = { 'clustering_results': clustering_results, From f2272d02315d3e3c8bfce3da9539bea4f2aaa9b9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 17:14:58 +0100 Subject: [PATCH 054/191] Fix cluster weightings when expanding and in statistics_accessor.py --- flixopt/statistics_accessor.py | 4 ++-- flixopt/transform_accessor.py | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index 535970840..572363be8 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -784,9 +784,9 @@ def get_contributor_type(contributor: str) -> str: label = f'{contributor}->{source_effect}({current_mode})' if label in solution: da = solution[label] * factor - # For total mode, sum temporal over time + # For total mode, sum temporal over time (apply cluster_weight for proper weighting) if mode == 'total' and current_mode == 'temporal' and 'time' in da.dims: - da = da.sum('time') + da = (da * self._fs.cluster_weight).sum('time') if share_total is None: share_total = da else: diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index f5460c07d..c35bfa027 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1412,6 +1412,11 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: expanded_fs = FlowSystem.from_dataset(expanded_ds) + # Reset cluster_weight to 1.0 - values are already expanded, no weighting needed + expanded_fs.cluster_weight = expanded_fs.fit_to_model_coords( + 'cluster_weight', np.ones(n_original_timesteps), dims=['time'] + ) + # 2. Expand solution reduced_solution = self._fs.solution expanded_fs._solution = xr.Dataset( From c7155e84af4ce28045e7d3e0b2329e812a909974 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 18:25:39 +0100 Subject: [PATCH 055/191] Improve cluster weighting --- flixopt/transform_accessor.py | 72 ++++++++++++++++++++++++++++++----- 1 file changed, 62 insertions(+), 10 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index c35bfa027..8c2b562b1 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1188,12 +1188,42 @@ def cluster_reduce( n_reduced_timesteps = len(first_clustering.tsam.typicalPeriods) actual_n_clusters = len(first_clustering.tsam.clusterPeriodNoOccur) - # Create timestep weights from cluster occurrences - cluster_occurrences = cluster_occurrences_all[first_key] - timestep_weights = np.repeat( - [cluster_occurrences.get(c, 1) for c in range(actual_n_clusters)], timesteps_per_cluster + # Create new time index (needed for weights and typical periods) + new_time_index = pd.date_range( + start=self._fs.timesteps[0], periods=n_reduced_timesteps, freq=pd.Timedelta(hours=dt) ) + # Create timestep weights from cluster occurrences (per period/scenario if needed) + def _build_weights_for_key(key: tuple) -> np.ndarray: + occurrences = cluster_occurrences_all[key] + return np.repeat([occurrences.get(c, 1) for c in range(actual_n_clusters)], timesteps_per_cluster) + + # Build weights array - might need period/scenario dimensions + if has_periods or has_scenarios: + # Build multi-dimensional weights + weights_dict: dict[tuple, xr.DataArray] = {} + for key in cluster_occurrences_all: + weights_dict[key] = xr.DataArray( + _build_weights_for_key(key), dims=['time'], coords={'time': new_time_index} + ) + # Combine into single DataArray with appropriate dimensions + if has_periods and has_scenarios: + period_arrays = [] + for p in periods: + scenario_arrays = [weights_dict[(p, s)] for s in scenarios] + period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) + timestep_weights = xr.concat(period_arrays, dim=pd.Index(periods, name='period')).transpose('time', ...) + elif has_periods: + timestep_weights = xr.concat( + [weights_dict[(p, None)] for p in periods], dim=pd.Index(periods, name='period') + ).transpose('time', 'period') + else: + timestep_weights = xr.concat( + [weights_dict[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario') + ).transpose('time', 'scenario') + else: + timestep_weights = _build_weights_for_key(first_key) + logger.info(f'Reduced from {len(self._fs.timesteps)} to {n_reduced_timesteps} timesteps') logger.info(f'Clusters: {actual_n_clusters} (requested: {n_clusters})') @@ -1240,7 +1270,10 @@ def cluster_reduce( ds_new.attrs['timestep_duration'] = dt reduced_fs = FlowSystem.from_dataset(ds_new) - reduced_fs.cluster_weight = reduced_fs.fit_to_model_coords('cluster_weight', timestep_weights, dims=['time']) + # Set cluster_weight - might have period/scenario dimensions + reduced_fs.cluster_weight = reduced_fs.fit_to_model_coords( + 'cluster_weight', timestep_weights, dims=['scenario', 'period', 'time'] + ) # Remove 'equals_final' from storages - doesn't make sense on reduced timesteps for storage in reduced_fs.storages.values(): @@ -1403,19 +1436,38 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: return da.copy() return self._expand_dataarray(da, mappings, original_timesteps, periods, scenarios) - # 1. Expand FlowSystem data + # 1. Expand FlowSystem data (exclude cluster_weight - we'll set it manually) reduced_ds = self._fs.to_dataset(include_solution=False) expanded_ds = xr.Dataset( - {name: expand_da(da) for name, da in reduced_ds.data_vars.items()}, attrs=reduced_ds.attrs + {name: expand_da(da) for name, da in reduced_ds.data_vars.items() if name != 'cluster_weight'}, + attrs=reduced_ds.attrs, ) expanded_ds.attrs['timestep_duration'] = original_fs.timestep_duration.values.tolist() expanded_fs = FlowSystem.from_dataset(expanded_ds) # Reset cluster_weight to 1.0 - values are already expanded, no weighting needed - expanded_fs.cluster_weight = expanded_fs.fit_to_model_coords( - 'cluster_weight', np.ones(n_original_timesteps), dims=['time'] - ) + # Match dimensions of original clustered cluster_weight + if has_periods or has_scenarios: + ones_da = xr.DataArray(np.ones(n_original_timesteps), dims=['time'], coords={'time': original_timesteps}) + if has_periods and has_scenarios: + expanded_fs.cluster_weight = ( + ones_da.expand_dims(period=list(periods), scenario=list(scenarios)) + .transpose('time', 'period', 'scenario') + .rename('cluster_weight') + ) + elif has_periods: + expanded_fs.cluster_weight = ( + ones_da.expand_dims(period=list(periods)).transpose('time', 'period').rename('cluster_weight') + ) + else: + expanded_fs.cluster_weight = ( + ones_da.expand_dims(scenario=list(scenarios)).transpose('time', 'scenario').rename('cluster_weight') + ) + else: + expanded_fs.cluster_weight = expanded_fs.fit_to_model_coords( + 'cluster_weight', np.ones(n_original_timesteps), dims=['time'] + ) # 2. Expand solution reduced_solution = self._fs.solution From 7126207659bd3e12acb7238c28a1a7c3a18e6899 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 18:28:47 +0100 Subject: [PATCH 056/191] 1. Each period gets its own cluster weights based on its clustering 2. Statistics correctly apply weights when computing totals 3. Expanded FlowSystem has no weighting (all 1.0) since values are already repeated --- flixopt/flow_system.py | 15 ++++++++---- flixopt/transform_accessor.py | 45 ++++++++++++++++------------------- 2 files changed, 31 insertions(+), 29 deletions(-) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 357b66493..368228071 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -196,11 +196,16 @@ def __init__( # Cluster weight for cluster_reduce optimization (default 1.0) # Represents how many original timesteps each cluster represents - self.cluster_weight = self.fit_to_model_coords( - 'cluster_weight', - np.ones(len(self.timesteps)) if cluster_weight is None else cluster_weight, - dims=['time'], - ) + # May have period/scenario dimensions if cluster_reduce was used with those + if cluster_weight is None: + self.cluster_weight = self.fit_to_model_coords( + 'cluster_weight', np.ones(len(self.timesteps)), dims=['time'] + ) + elif isinstance(cluster_weight, xr.DataArray) and len(cluster_weight.dims) > 1: + # Multi-dimensional cluster_weight from cluster_reduce - use directly + self.cluster_weight = cluster_weight.rename('cluster_weight') + else: + self.cluster_weight = self.fit_to_model_coords('cluster_weight', cluster_weight, dims=['time']) self.scenario_weights = scenario_weights # Use setter diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 8c2b562b1..fe9606f45 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1436,7 +1436,7 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: return da.copy() return self._expand_dataarray(da, mappings, original_timesteps, periods, scenarios) - # 1. Expand FlowSystem data (exclude cluster_weight - we'll set it manually) + # 1. Expand FlowSystem data (with cluster_weight set to 1.0 for all timesteps) reduced_ds = self._fs.to_dataset(include_solution=False) expanded_ds = xr.Dataset( {name: expand_da(da) for name, da in reduced_ds.data_vars.items() if name != 'cluster_weight'}, @@ -1444,30 +1444,27 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: ) expanded_ds.attrs['timestep_duration'] = original_fs.timestep_duration.values.tolist() - expanded_fs = FlowSystem.from_dataset(expanded_ds) - - # Reset cluster_weight to 1.0 - values are already expanded, no weighting needed - # Match dimensions of original clustered cluster_weight - if has_periods or has_scenarios: - ones_da = xr.DataArray(np.ones(n_original_timesteps), dims=['time'], coords={'time': original_timesteps}) - if has_periods and has_scenarios: - expanded_fs.cluster_weight = ( - ones_da.expand_dims(period=list(periods), scenario=list(scenarios)) - .transpose('time', 'period', 'scenario') - .rename('cluster_weight') - ) - elif has_periods: - expanded_fs.cluster_weight = ( - ones_da.expand_dims(period=list(periods)).transpose('time', 'period').rename('cluster_weight') - ) - else: - expanded_fs.cluster_weight = ( - ones_da.expand_dims(scenario=list(scenarios)).transpose('time', 'scenario').rename('cluster_weight') - ) - else: - expanded_fs.cluster_weight = expanded_fs.fit_to_model_coords( - 'cluster_weight', np.ones(n_original_timesteps), dims=['time'] + # Create cluster_weight with value 1.0 for all timesteps (no weighting needed for expanded) + ones_da = xr.DataArray(np.ones(n_original_timesteps), dims=['time'], coords={'time': original_timesteps}) + if has_periods and has_scenarios: + cluster_weight = ( + ones_da.expand_dims(period=list(periods), scenario=list(scenarios)) + .transpose('time', 'period', 'scenario') + .rename('cluster_weight') + ) + elif has_periods: + cluster_weight = ( + ones_da.expand_dims(period=list(periods)).transpose('time', 'period').rename('cluster_weight') ) + elif has_scenarios: + cluster_weight = ( + ones_da.expand_dims(scenario=list(scenarios)).transpose('time', 'scenario').rename('cluster_weight') + ) + else: + cluster_weight = ones_da.rename('cluster_weight') + expanded_ds['cluster_weight'] = cluster_weight + + expanded_fs = FlowSystem.from_dataset(expanded_ds) # 2. Expand solution reduced_solution = self._fs.solution From acff2b16bce1fd6f980bb8333b8d19e2968f4351 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 18:34:28 +0100 Subject: [PATCH 057/191] Improve code --- flixopt/flow_system.py | 14 +++---- flixopt/transform_accessor.py | 73 +++++++++-------------------------- 2 files changed, 24 insertions(+), 63 deletions(-) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 368228071..1e6759b5f 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -197,15 +197,11 @@ def __init__( # Cluster weight for cluster_reduce optimization (default 1.0) # Represents how many original timesteps each cluster represents # May have period/scenario dimensions if cluster_reduce was used with those - if cluster_weight is None: - self.cluster_weight = self.fit_to_model_coords( - 'cluster_weight', np.ones(len(self.timesteps)), dims=['time'] - ) - elif isinstance(cluster_weight, xr.DataArray) and len(cluster_weight.dims) > 1: - # Multi-dimensional cluster_weight from cluster_reduce - use directly - self.cluster_weight = cluster_weight.rename('cluster_weight') - else: - self.cluster_weight = self.fit_to_model_coords('cluster_weight', cluster_weight, dims=['time']) + self.cluster_weight = self.fit_to_model_coords( + 'cluster_weight', + np.ones(len(self.timesteps)) if cluster_weight is None else cluster_weight, + dims=['time', 'period', 'scenario'], # Gracefully ignores dims not present + ) self.scenario_weights = scenario_weights # Use setter diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index fe9606f45..846f8d004 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1193,45 +1193,23 @@ def cluster_reduce( start=self._fs.timesteps[0], periods=n_reduced_timesteps, freq=pd.Timedelta(hours=dt) ) - # Create timestep weights from cluster occurrences (per period/scenario if needed) - def _build_weights_for_key(key: tuple) -> np.ndarray: + # Create timestep weights from cluster occurrences (per period/scenario) + def _build_weights_for_key(key: tuple) -> xr.DataArray: occurrences = cluster_occurrences_all[key] - return np.repeat([occurrences.get(c, 1) for c in range(actual_n_clusters)], timesteps_per_cluster) - - # Build weights array - might need period/scenario dimensions - if has_periods or has_scenarios: - # Build multi-dimensional weights - weights_dict: dict[tuple, xr.DataArray] = {} - for key in cluster_occurrences_all: - weights_dict[key] = xr.DataArray( - _build_weights_for_key(key), dims=['time'], coords={'time': new_time_index} - ) - # Combine into single DataArray with appropriate dimensions - if has_periods and has_scenarios: - period_arrays = [] - for p in periods: - scenario_arrays = [weights_dict[(p, s)] for s in scenarios] - period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) - timestep_weights = xr.concat(period_arrays, dim=pd.Index(periods, name='period')).transpose('time', ...) - elif has_periods: - timestep_weights = xr.concat( - [weights_dict[(p, None)] for p in periods], dim=pd.Index(periods, name='period') - ).transpose('time', 'period') - else: - timestep_weights = xr.concat( - [weights_dict[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario') - ).transpose('time', 'scenario') - else: - timestep_weights = _build_weights_for_key(first_key) + weights = np.repeat([occurrences.get(c, 1) for c in range(actual_n_clusters)], timesteps_per_cluster) + return xr.DataArray(weights, dims=['time'], coords={'time': new_time_index}) + + # Build weights - use _combine_slices_to_dataarray for consistent multi-dim handling + weights_slices = {key: _build_weights_for_key(key) for key in cluster_occurrences_all} + # Create a dummy 1D DataArray as template for _combine_slices_to_dataarray + dummy_template = xr.DataArray(np.zeros(n_reduced_timesteps), dims=['time']) + timestep_weights = self._combine_slices_to_dataarray( + weights_slices, dummy_template, new_time_index, periods, scenarios + ) logger.info(f'Reduced from {len(self._fs.timesteps)} to {n_reduced_timesteps} timesteps') logger.info(f'Clusters: {actual_n_clusters} (requested: {n_clusters})') - # Create new time index - new_time_index = pd.date_range( - start=self._fs.timesteps[0], periods=n_reduced_timesteps, freq=pd.Timedelta(hours=dt) - ) - # Build typical periods DataArrays keyed by (variable_name, (period, scenario)) typical_das: dict[str, dict[tuple, xr.DataArray]] = {} for key, clustering in clustering_results.items(): @@ -1339,10 +1317,8 @@ def _combine_slices_to_dataarray( else: result = xr.concat([slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario')) - # Match original dimension order - target_dims = [d for d in original_da.dims if d in result.dims] - if target_dims and tuple(target_dims) != result.dims: - result = result.transpose(*target_dims) + # Put time dimension first (standard order), preserve other dims + result = result.transpose('time', ...) return result.assign_attrs(original_da.attrs) @@ -1445,23 +1421,12 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: expanded_ds.attrs['timestep_duration'] = original_fs.timestep_duration.values.tolist() # Create cluster_weight with value 1.0 for all timesteps (no weighting needed for expanded) + # Use _combine_slices_to_dataarray for consistent multi-dim handling ones_da = xr.DataArray(np.ones(n_original_timesteps), dims=['time'], coords={'time': original_timesteps}) - if has_periods and has_scenarios: - cluster_weight = ( - ones_da.expand_dims(period=list(periods), scenario=list(scenarios)) - .transpose('time', 'period', 'scenario') - .rename('cluster_weight') - ) - elif has_periods: - cluster_weight = ( - ones_da.expand_dims(period=list(periods)).transpose('time', 'period').rename('cluster_weight') - ) - elif has_scenarios: - cluster_weight = ( - ones_da.expand_dims(scenario=list(scenarios)).transpose('time', 'scenario').rename('cluster_weight') - ) - else: - cluster_weight = ones_da.rename('cluster_weight') + ones_slices = {(p, s): ones_da for p in periods for s in scenarios} + cluster_weight = self._combine_slices_to_dataarray( + ones_slices, ones_da, original_timesteps, periods, scenarios + ).rename('cluster_weight') expanded_ds['cluster_weight'] = cluster_weight expanded_fs = FlowSystem.from_dataset(expanded_ds) From 85c2d7eea443d70450e49d94b399cccb26ec8ed7 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 19:53:08 +0100 Subject: [PATCH 058/191] Add new aggregation module --- flixopt/__init__.py | 5 +- flixopt/aggregation/__init__.py | 116 +++++ flixopt/aggregation/base.py | 336 +++++++++++++ flixopt/aggregation/manual.py | 334 +++++++++++++ flixopt/aggregation/tsam_backend.py | 443 ++++++++++++++++++ flixopt/clustering.py | 52 ++ flixopt/flow_system.py | 42 ++ flixopt/structure.py | 86 ++++ flixopt/transform_accessor.py | 240 ++++++++++ tests/test_aggregation/__init__.py | 1 + tests/test_aggregation/test_base.py | 161 +++++++ tests/test_aggregation/test_integration.py | 194 ++++++++ tests/test_aggregation/test_manual_backend.py | 131 ++++++ tests/test_cluster_reduce_expand.py | 349 ++++++++++++++ 14 files changed, 2489 insertions(+), 1 deletion(-) create mode 100644 flixopt/aggregation/__init__.py create mode 100644 flixopt/aggregation/base.py create mode 100644 flixopt/aggregation/manual.py create mode 100644 flixopt/aggregation/tsam_backend.py create mode 100644 tests/test_aggregation/__init__.py create mode 100644 tests/test_aggregation/test_base.py create mode 100644 tests/test_aggregation/test_integration.py create mode 100644 tests/test_aggregation/test_manual_backend.py create mode 100644 tests/test_cluster_reduce_expand.py diff --git a/flixopt/__init__.py b/flixopt/__init__.py index 1e3fee5bd..00555c7e0 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -13,7 +13,7 @@ __version__ = '0.0.0.dev0' # Import commonly used classes and functions -from . import linear_converters, plotting, results, solvers +from . import aggregation, linear_converters, plotting, results, solvers from .carrier import Carrier, CarrierContainer from .clustering import ClusteringParameters from .components import ( @@ -32,6 +32,7 @@ from .interface import InvestParameters, Piece, Piecewise, PiecewiseConversion, PiecewiseEffects, StatusParameters from .optimization import ClusteredOptimization, Optimization, SegmentedOptimization from .plot_result import PlotResult +from .structure import TimeSeriesWeights __all__ = [ 'TimeSeriesData', @@ -60,6 +61,8 @@ 'PiecewiseEffects', 'ClusteringParameters', 'PlotResult', + 'TimeSeriesWeights', + 'aggregation', 'plotting', 'results', 'linear_converters', diff --git a/flixopt/aggregation/__init__.py b/flixopt/aggregation/__init__.py new file mode 100644 index 000000000..02d7552a4 --- /dev/null +++ b/flixopt/aggregation/__init__.py @@ -0,0 +1,116 @@ +""" +Time Series Aggregation Module for flixopt. + +This module provides an abstraction layer for time series aggregation that +supports multiple backends while maintaining proper handling of multi-dimensional +data (period, scenario dimensions). + +Available backends: +- TSAMBackend: Uses tsam package for k-means clustering into typical periods +- ManualBackend: Accepts user-provided mapping/weights for external aggregation + +Key classes: +- AggregationResult: Universal result container from any aggregation backend +- ClusterStructure: Hierarchical structure info for storage inter-period linking +- Aggregator: Protocol that all backends implement + +Example usage: + + # Using TSAM backend + from flixopt.aggregation import TSAMBackend + + backend = TSAMBackend(cluster_duration='1D', n_segments=4) + result = backend.aggregate(data, n_representatives=8) + + # Using manual/external aggregation (PyPSA-style) + from flixopt.aggregation import ManualBackend + import xarray as xr + + backend = ManualBackend( + timestep_mapping=xr.DataArray(my_mapping, dims=['original_time']), + representative_weights=xr.DataArray(my_weights, dims=['time']), + ) + result = backend.aggregate(data) + + # Or via transform accessor + fs_aggregated = fs.transform.aggregate(method='tsam', n_representatives=8) + fs_aggregated = fs.transform.set_aggregation(my_mapping, my_weights) +""" + +from .base import ( + AggregationInfo, + AggregationResult, + Aggregator, + ClusterStructure, + create_cluster_structure_from_mapping, +) +from .manual import ( + ManualBackend, + create_manual_backend_from_labels, + create_manual_backend_from_selection, +) + +# Conditional imports based on package availability +_BACKENDS = {'manual': ManualBackend} + +try: + from .tsam_backend import TSAMBackend, create_tsam_backend_from_clustering + + _BACKENDS['tsam'] = TSAMBackend +except ImportError: + # tsam not installed - TSAMBackend not available + TSAMBackend = None + create_tsam_backend_from_clustering = None + + +def get_backend(name: str): + """Get aggregation backend by name. + + Args: + name: Backend name ('tsam', 'manual'). + + Returns: + Backend class. + + Raises: + ValueError: If backend is not available. + """ + if name not in _BACKENDS: + available = list(_BACKENDS.keys()) + raise ValueError(f"Unknown backend '{name}'. Available: {available}") + + backend_class = _BACKENDS[name] + if backend_class is None: + raise ImportError( + f"Backend '{name}' is not available. Install required dependencies (e.g., 'pip install tsam' for TSAM)." + ) + + return backend_class + + +def list_backends() -> list[str]: + """List available aggregation backends. + + Returns: + List of backend names that are currently available. + """ + return [name for name, cls in _BACKENDS.items() if cls is not None] + + +__all__ = [ + # Core classes + 'AggregationResult', + 'AggregationInfo', + 'ClusterStructure', + 'Aggregator', + # Backends + 'TSAMBackend', + 'ManualBackend', + # Utilities + 'create_cluster_structure_from_mapping', + 'create_tsam_backend_from_clustering', + 'create_manual_backend_from_labels', + 'create_manual_backend_from_selection', + 'get_backend', + 'list_backends', +] diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py new file mode 100644 index 000000000..1ed40c0bb --- /dev/null +++ b/flixopt/aggregation/base.py @@ -0,0 +1,336 @@ +""" +Base classes and data structures for time series aggregation. + +This module provides an abstraction layer for time series aggregation that +supports multiple backends (TSAM, manual/external, etc.) while maintaining +proper handling of multi-dimensional data (period, scenario dimensions). + +All data structures use xarray for consistent multi-dimensional support. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Protocol, runtime_checkable + +import numpy as np +import xarray as xr + + +@dataclass +class ClusterStructure: + """Structure information for inter-period storage linking. + + This class captures the hierarchical structure of time series clustering, + which is needed for proper storage state-of-charge tracking across + typical periods when using cluster_reduce(). + + All arrays use xarray DataArrays to properly handle multi-dimensional + cases (period, scenario dimensions). + + Attributes: + cluster_order: Maps original periods to cluster IDs. + dims: [original_period] or [original_period, period, scenario] + Each value indicates which typical period (cluster) the original + period belongs to. + cluster_occurrences: Count of how many original periods each cluster represents. + dims: [cluster] or [cluster, period, scenario] + n_clusters: Number of distinct clusters (typical periods). + Can be int (same for all) or DataArray (varies by period/scenario). + timesteps_per_cluster: Number of timesteps in each cluster period. + + Example: + For 365 days clustered into 8 typical days: + - cluster_order: shape (365,), values 0-7 + - cluster_occurrences: shape (8,), e.g., [45, 46, 46, 46, 46, 45, 45, 46] + - n_clusters: 8 + - timesteps_per_cluster: 24 (for hourly data) + """ + + cluster_order: xr.DataArray + cluster_occurrences: xr.DataArray + n_clusters: int | xr.DataArray + timesteps_per_cluster: int + + def __post_init__(self): + """Validate and ensure proper DataArray formatting.""" + # Ensure cluster_order is a DataArray with proper dims + if not isinstance(self.cluster_order, xr.DataArray): + self.cluster_order = xr.DataArray(self.cluster_order, dims=['original_period'], name='cluster_order') + elif self.cluster_order.name is None: + self.cluster_order = self.cluster_order.rename('cluster_order') + + # Ensure cluster_occurrences is a DataArray with proper dims + if not isinstance(self.cluster_occurrences, xr.DataArray): + self.cluster_occurrences = xr.DataArray( + self.cluster_occurrences, dims=['cluster'], name='cluster_occurrences' + ) + elif self.cluster_occurrences.name is None: + self.cluster_occurrences = self.cluster_occurrences.rename('cluster_occurrences') + + @property + def n_original_periods(self) -> int: + """Number of original periods (before clustering).""" + return len(self.cluster_order.coords['original_period']) + + def get_cluster_weight_per_timestep(self) -> xr.DataArray: + """Get weight for each representative timestep. + + Returns an array where each timestep's weight equals the number of + original periods its cluster represents. + + Returns: + DataArray with dims [time] or [time, period, scenario]. + """ + # Expand cluster_occurrences to timesteps + n_clusters = ( + int(self.n_clusters) if isinstance(self.n_clusters, (int, np.integer)) else int(self.n_clusters.values) + ) + + # Get occurrence for each cluster, then repeat for timesteps + weights_list = [] + for c in range(n_clusters): + occ = self.cluster_occurrences.sel(cluster=c) + weights_list.append(np.repeat(float(occ.values), self.timesteps_per_cluster)) + + weights = np.concatenate(weights_list) + return xr.DataArray( + weights, + dims=['time'], + coords={'time': np.arange(len(weights))}, + name='cluster_weight', + ) + + +@dataclass +class AggregationResult: + """Universal result from any time series aggregation method. + + This dataclass captures all information needed to: + 1. Transform a FlowSystem to use aggregated timesteps + 2. Expand a solution back to original resolution + 3. Properly weight results for statistics + + All arrays use xarray DataArrays to properly handle multi-dimensional + cases (period, scenario dimensions). + + Attributes: + timestep_mapping: Maps each original timestep to its representative index. + dims: [original_time] or [original_time, period, scenario] + Values are indices into the representative timesteps (0 to n_representatives-1). + n_representatives: Number of representative timesteps after aggregation. + Can be int (same for all) or DataArray (varies by period/scenario). + representative_weights: Weight for each representative timestep. + dims: [time] or [time, period, scenario] + Typically equals the number of original timesteps each representative covers. + aggregated_data: Time series data aggregated to representative timesteps. + Optional - some backends may not aggregate data. + cluster_structure: Hierarchical clustering structure for storage linking. + Optional - only needed when using cluster_reduce() mode. + original_data: Reference to original data before aggregation. + Optional - useful for expand_solution(). + + Example: + For 8760 hourly timesteps -> 192 representative timesteps (8 days x 24h): + - timestep_mapping: shape (8760,), values 0-191 + - n_representatives: 192 + - representative_weights: shape (192,), summing to 8760 + """ + + timestep_mapping: xr.DataArray + n_representatives: int | xr.DataArray + representative_weights: xr.DataArray + aggregated_data: xr.Dataset | None = None + cluster_structure: ClusterStructure | None = None + original_data: xr.Dataset | None = None + + def __post_init__(self): + """Validate and ensure proper DataArray formatting.""" + # Ensure timestep_mapping is a DataArray + if not isinstance(self.timestep_mapping, xr.DataArray): + self.timestep_mapping = xr.DataArray(self.timestep_mapping, dims=['original_time'], name='timestep_mapping') + elif self.timestep_mapping.name is None: + self.timestep_mapping = self.timestep_mapping.rename('timestep_mapping') + + # Ensure representative_weights is a DataArray + if not isinstance(self.representative_weights, xr.DataArray): + self.representative_weights = xr.DataArray( + self.representative_weights, dims=['time'], name='representative_weights' + ) + elif self.representative_weights.name is None: + self.representative_weights = self.representative_weights.rename('representative_weights') + + @property + def n_original_timesteps(self) -> int: + """Number of original timesteps (before aggregation).""" + return len(self.timestep_mapping.coords['original_time']) + + def get_expansion_mapping(self) -> xr.DataArray: + """Get mapping from original timesteps to representative indices. + + This is the same as timestep_mapping but ensures proper naming + for use in expand_solution(). + + Returns: + DataArray mapping original timesteps to representative indices. + """ + return self.timestep_mapping.rename('expansion_mapping') + + def validate(self) -> None: + """Validate that all fields are consistent. + + Raises: + ValueError: If validation fails. + """ + n_rep = ( + int(self.n_representatives) + if isinstance(self.n_representatives, (int, np.integer)) + else int(self.n_representatives.max().values) + ) + + # Check mapping values are within range + max_idx = int(self.timestep_mapping.max().values) + if max_idx >= n_rep: + raise ValueError(f'timestep_mapping contains index {max_idx} but n_representatives is {n_rep}') + + # Check weights length matches n_representatives + if len(self.representative_weights) != n_rep: + raise ValueError( + f'representative_weights has {len(self.representative_weights)} elements ' + f'but n_representatives is {n_rep}' + ) + + # Check weights sum roughly equals original timesteps + weight_sum = float(self.representative_weights.sum().values) + n_original = self.n_original_timesteps + if abs(weight_sum - n_original) > 1e-6: + # Warning only - some aggregation methods may not preserve this exactly + import warnings + + warnings.warn( + f'representative_weights sum ({weight_sum}) does not match n_original_timesteps ({n_original})', + stacklevel=2, + ) + + +@runtime_checkable +class Aggregator(Protocol): + """Protocol that any aggregation backend must implement. + + This protocol defines the interface for time series aggregation backends. + Implementations can use any aggregation algorithm (TSAM, sklearn k-means, + hierarchical clustering, etc.) as long as they return an AggregationResult. + + The input data is an xarray Dataset to properly handle multi-dimensional + time series with period and scenario dimensions. + + Example implementation: + class MyAggregator: + def aggregate( + self, + data: xr.Dataset, + n_representatives: int, + **kwargs + ) -> AggregationResult: + # Custom aggregation logic + ... + return AggregationResult( + timestep_mapping=mapping, + n_representatives=n_representatives, + representative_weights=weights, + ) + """ + + def aggregate( + self, + data: xr.Dataset, + n_representatives: int, + **kwargs, + ) -> AggregationResult: + """Perform time series aggregation. + + Args: + data: Input time series data as xarray Dataset. + Must have 'time' dimension. May also have 'period' and/or + 'scenario' dimensions for multi-dimensional optimization. + n_representatives: Target number of representative timesteps. + **kwargs: Backend-specific options. + + Returns: + AggregationResult containing mapping, weights, and optionally + aggregated data and cluster structure. + """ + ... + + +@dataclass +class AggregationInfo: + """Information about an aggregation stored on a FlowSystem. + + This is stored on the FlowSystem after aggregation to enable: + - expand_solution() to map back to original timesteps + - Statistics to properly weight results + - Serialization/deserialization of aggregated models + + Attributes: + result: The AggregationResult from the aggregation backend. + original_flow_system: Reference to the FlowSystem before aggregation. + mode: Whether aggregation used 'reduce' (fewer timesteps) or + 'constrain' (same timesteps with equality constraints). + backend_name: Name of the aggregation backend used (e.g., 'tsam', 'manual'). + """ + + result: AggregationResult + original_flow_system: object # FlowSystem - avoid circular import + mode: str # 'reduce' or 'constrain' + backend_name: str = 'unknown' + + +def create_cluster_structure_from_mapping( + timestep_mapping: xr.DataArray, + timesteps_per_cluster: int, +) -> ClusterStructure: + """Create ClusterStructure from a timestep mapping. + + This is a convenience function for creating ClusterStructure when you + have the timestep mapping but not the full clustering metadata. + + Args: + timestep_mapping: Mapping from original timesteps to representative indices. + timesteps_per_cluster: Number of timesteps per cluster period. + + Returns: + ClusterStructure derived from the mapping. + """ + n_original = len(timestep_mapping) + n_original_periods = n_original // timesteps_per_cluster + + # Determine cluster order from the mapping + # Each original period maps to the cluster of its first timestep + cluster_order = [] + for p in range(n_original_periods): + start_idx = p * timesteps_per_cluster + cluster_idx = int(timestep_mapping.isel(original_time=start_idx).values) // timesteps_per_cluster + cluster_order.append(cluster_idx) + + cluster_order_da = xr.DataArray(cluster_order, dims=['original_period'], name='cluster_order') + + # Count occurrences of each cluster + unique_clusters = np.unique(cluster_order) + occurrences = {} + for c in unique_clusters: + occurrences[int(c)] = sum(1 for x in cluster_order if x == c) + + n_clusters = len(unique_clusters) + cluster_occurrences_da = xr.DataArray( + [occurrences.get(c, 0) for c in range(n_clusters)], + dims=['cluster'], + name='cluster_occurrences', + ) + + return ClusterStructure( + cluster_order=cluster_order_da, + cluster_occurrences=cluster_occurrences_da, + n_clusters=n_clusters, + timesteps_per_cluster=timesteps_per_cluster, + ) diff --git a/flixopt/aggregation/manual.py b/flixopt/aggregation/manual.py new file mode 100644 index 000000000..c77dc6a84 --- /dev/null +++ b/flixopt/aggregation/manual.py @@ -0,0 +1,334 @@ +""" +Manual aggregation backend for user-provided clustering results. + +This backend enables PyPSA-style workflows where users perform aggregation +externally (using sklearn, custom algorithms, etc.) and then provide the +mapping and weights to flixopt. +""" + +from __future__ import annotations + +import numpy as np +import xarray as xr + +from .base import AggregationResult, ClusterStructure, create_cluster_structure_from_mapping + + +class ManualBackend: + """Backend for user-provided aggregation results. + + This backend accepts pre-computed aggregation mapping and weights, + enabling users to use any external clustering tool (sklearn k-means, + hierarchical clustering, etc.) with flixopt. + + This is similar to PyPSA's approach where aggregation is done externally + and the framework just accepts the results. + + Args: + timestep_mapping: Mapping from original timesteps to representative indices. + DataArray with dims [original_time] or [original_time, period, scenario]. + Values should be integers in range [0, n_representatives). + representative_weights: Weight for each representative timestep. + DataArray with dims [time] or [time, period, scenario]. + Typically equals count of original timesteps each representative covers. + cluster_structure: Optional cluster structure for storage inter-period linking. + If not provided and timesteps_per_cluster is given, will be inferred from mapping. + timesteps_per_cluster: Number of timesteps per cluster period. + Required to infer cluster_structure if not explicitly provided. + + Example: + >>> # External clustering with sklearn + >>> from sklearn.cluster import KMeans + >>> kmeans = KMeans(n_clusters=8) + >>> labels = kmeans.fit_predict(my_data) + >>> + >>> # Create mapping (original timestep -> representative) + >>> mapping = ... # compute from labels + >>> weights = ... # count occurrences + >>> + >>> # Use with flixopt + >>> backend = ManualBackend( + ... timestep_mapping=xr.DataArray(mapping, dims=['original_time']), + ... representative_weights=xr.DataArray(weights, dims=['time']), + ... ) + >>> result = backend.aggregate(data, n_representatives=192) + """ + + def __init__( + self, + timestep_mapping: xr.DataArray, + representative_weights: xr.DataArray, + cluster_structure: ClusterStructure | None = None, + timesteps_per_cluster: int | None = None, + ): + # Validate and store mapping + if not isinstance(timestep_mapping, xr.DataArray): + timestep_mapping = xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping') + self.timestep_mapping = timestep_mapping + + # Validate and store weights + if not isinstance(representative_weights, xr.DataArray): + representative_weights = xr.DataArray(representative_weights, dims=['time'], name='representative_weights') + self.representative_weights = representative_weights + + # Store or infer cluster structure + self.cluster_structure = cluster_structure + self.timesteps_per_cluster = timesteps_per_cluster + + # Validate + self._validate() + + def _validate(self) -> None: + """Validate input arrays.""" + # Check mapping has required dimension + if 'original_time' not in self.timestep_mapping.dims: + if 'time' in self.timestep_mapping.dims: + # Rename for clarity + self.timestep_mapping = self.timestep_mapping.rename({'time': 'original_time'}) + else: + raise ValueError("timestep_mapping must have 'original_time' or 'time' dimension") + + # Check weights has required dimension + if 'time' not in self.representative_weights.dims: + raise ValueError("representative_weights must have 'time' dimension") + + # Check mapping values are non-negative integers + min_val = int(self.timestep_mapping.min().values) + if min_val < 0: + raise ValueError(f'timestep_mapping contains negative value: {min_val}') + + # Check mapping values are within bounds + max_val = int(self.timestep_mapping.max().values) + n_weights = len(self.representative_weights.coords['time']) + if max_val >= n_weights: + raise ValueError( + f'timestep_mapping contains index {max_val} but representative_weights only has {n_weights} elements' + ) + + def aggregate( + self, + data: xr.Dataset, + n_representatives: int | None = None, + **kwargs, + ) -> AggregationResult: + """Create AggregationResult from stored mapping and weights. + + The data parameter is used to: + 1. Validate dimensions match the mapping + 2. Create aggregated data by indexing with the mapping + + Args: + data: Input time series data as xarray Dataset. + Used for validation and to create aggregated_data. + n_representatives: Number of representatives. If None, inferred from weights. + **kwargs: Ignored (for protocol compatibility). + + Returns: + AggregationResult with the stored mapping and weights. + """ + # Infer n_representatives if not provided + if n_representatives is None: + n_representatives = len(self.representative_weights.coords['time']) + + # Validate data dimensions match mapping + self._validate_data_dimensions(data) + + # Create aggregated data by indexing original data + aggregated_data = self._create_aggregated_data(data, n_representatives) + + # Infer cluster structure if needed + cluster_structure = self.cluster_structure + if cluster_structure is None and self.timesteps_per_cluster is not None: + cluster_structure = create_cluster_structure_from_mapping(self.timestep_mapping, self.timesteps_per_cluster) + + return AggregationResult( + timestep_mapping=self.timestep_mapping, + n_representatives=n_representatives, + representative_weights=self.representative_weights, + aggregated_data=aggregated_data, + cluster_structure=cluster_structure, + original_data=data, + ) + + def _validate_data_dimensions(self, data: xr.Dataset) -> None: + """Validate that data dimensions are compatible with mapping.""" + # Check time dimension length + if 'time' not in data.dims: + raise ValueError("Input data must have 'time' dimension") + + n_data_timesteps = len(data.coords['time']) + n_mapping_timesteps = len(self.timestep_mapping.coords['original_time']) + + if n_data_timesteps != n_mapping_timesteps: + raise ValueError(f'Data has {n_data_timesteps} timesteps but mapping expects {n_mapping_timesteps}') + + # Check period/scenario dimensions if present in mapping + for dim in ['period', 'scenario']: + if dim in self.timestep_mapping.dims: + if dim not in data.dims: + raise ValueError(f"Mapping has '{dim}' dimension but data does not") + mapping_coords = self.timestep_mapping.coords[dim].values + data_coords = data.coords[dim].values + if not np.array_equal(mapping_coords, data_coords): + raise ValueError(f"'{dim}' coordinates don't match between mapping and data") + + def _create_aggregated_data( + self, + data: xr.Dataset, + n_representatives: int, + ) -> xr.Dataset: + """Create aggregated data by extracting representative timesteps. + + For each representative timestep, we take the value from the first + original timestep that maps to it (simple selection, not averaging). + """ + # Find first original timestep for each representative + mapping_vals = self.timestep_mapping.values + if mapping_vals.ndim > 1: + # Multi-dimensional - use first slice + mapping_vals = mapping_vals[:, 0] if mapping_vals.ndim == 2 else mapping_vals[:, 0, 0] + + # For each representative, find the first original that maps to it + first_original = {} + for orig_idx, rep_idx in enumerate(mapping_vals): + if rep_idx not in first_original: + first_original[int(rep_idx)] = orig_idx + + # Build index array for selecting representative values + rep_indices = [first_original.get(i, 0) for i in range(n_representatives)] + + # Select from data + aggregated_vars = {} + for var_name, var_data in data.data_vars.items(): + if 'time' in var_data.dims: + # Select representative timesteps + selected = var_data.isel(time=rep_indices) + # Reassign time coordinate + selected = selected.assign_coords(time=np.arange(n_representatives)) + aggregated_vars[var_name] = selected + else: + # Non-time variable - keep as is + aggregated_vars[var_name] = var_data + + return xr.Dataset(aggregated_vars) + + +def create_manual_backend_from_labels( + labels: np.ndarray, + timesteps_per_cluster: int, + n_timesteps: int | None = None, +) -> ManualBackend: + """Create ManualBackend from cluster labels (e.g., from sklearn KMeans). + + This is a convenience function for creating a ManualBackend when you have + cluster labels from a standard clustering algorithm. + + Args: + labels: Cluster label for each timestep (from KMeans.fit_predict, etc.). + Shape: (n_timesteps,) with values in [0, n_clusters). + timesteps_per_cluster: Number of timesteps per cluster period. + n_timesteps: Total number of timesteps. If None, inferred from labels. + + Returns: + ManualBackend configured with the label-derived mapping. + + Example: + >>> from sklearn.cluster import KMeans + >>> kmeans = KMeans(n_clusters=8).fit(daily_profiles) + >>> labels = np.repeat(kmeans.labels_, 24) # Expand to hourly + >>> backend = create_manual_backend_from_labels(labels, timesteps_per_cluster=24) + """ + if n_timesteps is None: + n_timesteps = len(labels) + + # Get unique clusters and count occurrences + unique_clusters = np.unique(labels) + n_clusters = len(unique_clusters) + + # Remap labels to 0..n_clusters-1 if needed + if not np.array_equal(unique_clusters, np.arange(n_clusters)): + label_map = {old: new for new, old in enumerate(unique_clusters)} + labels = np.array([label_map[label] for label in labels]) + + # Build timestep mapping + # Each original timestep maps to: cluster_id * timesteps_per_cluster + position_in_period + n_original_periods = n_timesteps // timesteps_per_cluster + timestep_mapping = np.zeros(n_timesteps, dtype=np.int32) + + for period_idx in range(n_original_periods): + cluster_id = labels[period_idx * timesteps_per_cluster] # Label of first timestep in period + for pos in range(timesteps_per_cluster): + orig_idx = period_idx * timesteps_per_cluster + pos + if orig_idx < n_timesteps: + timestep_mapping[orig_idx] = cluster_id * timesteps_per_cluster + pos + + # Build weights (count of originals per representative) + n_representative_timesteps = n_clusters * timesteps_per_cluster + representative_weights = np.zeros(n_representative_timesteps, dtype=np.float64) + + # Count occurrences of each cluster + cluster_counts = {} + for period_idx in range(n_original_periods): + cluster_id = labels[period_idx * timesteps_per_cluster] + cluster_counts[cluster_id] = cluster_counts.get(cluster_id, 0) + 1 + + for cluster_id, count in cluster_counts.items(): + for pos in range(timesteps_per_cluster): + rep_idx = cluster_id * timesteps_per_cluster + pos + if rep_idx < n_representative_timesteps: + representative_weights[rep_idx] = count + + return ManualBackend( + timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), + representative_weights=xr.DataArray(representative_weights, dims=['time'], name='representative_weights'), + timesteps_per_cluster=timesteps_per_cluster, + ) + + +def create_manual_backend_from_selection( + selected_indices: np.ndarray, + weights: np.ndarray, + n_original_timesteps: int, + timesteps_per_period: int | None = None, +) -> ManualBackend: + """Create ManualBackend from selected representative timesteps. + + This is useful when you have a simple selection-based aggregation + (e.g., select every Nth timestep, select specific representative days). + + Args: + selected_indices: Indices of selected representative timesteps. + These become the new time axis. + weights: Weight for each selected timestep (how many originals it represents). + n_original_timesteps: Total number of original timesteps. + timesteps_per_period: Optional, for creating cluster structure. + + Returns: + ManualBackend configured with the selection-based mapping. + + Example: + >>> # Select every 7th day as representative + >>> selected = np.arange(0, 365 * 24, 7 * 24) # Weekly representatives + >>> weights = np.ones(len(selected)) * 7 # Each represents 7 days + >>> backend = create_manual_backend_from_selection(selected, weights, n_original_timesteps=365 * 24) + """ + n_representatives = len(selected_indices) + + if len(weights) != n_representatives: + raise ValueError(f'weights has {len(weights)} elements but selected_indices has {n_representatives}') + + # Build mapping: each original maps to nearest selected + timestep_mapping = np.zeros(n_original_timesteps, dtype=np.int32) + + # Simple nearest-neighbor assignment + for orig_idx in range(n_original_timesteps): + # Find nearest selected index + distances = np.abs(selected_indices - orig_idx) + nearest_rep = np.argmin(distances) + timestep_mapping[orig_idx] = nearest_rep + + return ManualBackend( + timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), + representative_weights=xr.DataArray(weights, dims=['time'], name='representative_weights'), + timesteps_per_cluster=timesteps_per_period, + ) diff --git a/flixopt/aggregation/tsam_backend.py b/flixopt/aggregation/tsam_backend.py new file mode 100644 index 000000000..dbe9ed1e0 --- /dev/null +++ b/flixopt/aggregation/tsam_backend.py @@ -0,0 +1,443 @@ +""" +TSAM (Time Series Aggregation Module) backend for time series aggregation. + +This backend wraps the existing flixopt Clustering class which uses the +tsam package to perform k-means clustering of time series into typical periods. +""" + +from __future__ import annotations + +import logging + +import numpy as np +import xarray as xr + +from .base import AggregationResult, ClusterStructure + +logger = logging.getLogger('flixopt') + +# Check if tsam is available +try: + import tsam.timeseriesaggregation as tsam + + TSAM_AVAILABLE = True +except ImportError: + TSAM_AVAILABLE = False + + +def _parse_cluster_duration(duration: str | float) -> float: + """Convert cluster duration to hours. + + Args: + duration: Either a pandas-style duration string ('1D', '24h', '6h') + or a numeric value in hours. + + Returns: + Duration in hours. + """ + import pandas as pd + + if isinstance(duration, (int, float)): + return float(duration) + + # Parse pandas-style duration strings + td = pd.Timedelta(duration) + return td.total_seconds() / 3600 + + +class TSAMBackend: + """TSAM-based time series aggregation backend. + + This backend uses the tsam (Time Series Aggregation Module) package + to perform k-means clustering of time series into typical periods. + + Features: + - Inter-period clustering (typical days/weeks) + - Intra-period segmentation (reduce timesteps within periods) + - Extreme period preservation (high/low peaks) + - Custom weighting of time series for clustering + + Args: + cluster_duration: Duration of each cluster period. + Can be pandas-style string ('1D', '24h') or hours as float. + n_segments: Number of segments within each period for intra-period + clustering. None for no segmentation. + time_series_for_high_peaks: Column names to preserve high-value periods for. + time_series_for_low_peaks: Column names to preserve low-value periods for. + weights: Dict mapping column names to clustering weights. + + Example: + >>> backend = TSAMBackend(cluster_duration='1D', n_segments=4) + >>> result = backend.aggregate(data, n_representatives=8) + """ + + def __init__( + self, + cluster_duration: str | float = '1D', + n_segments: int | None = None, + time_series_for_high_peaks: list[str] | None = None, + time_series_for_low_peaks: list[str] | None = None, + weights: dict[str, float] | None = None, + ): + if not TSAM_AVAILABLE: + raise ImportError("The 'tsam' package is required for TSAMBackend. Install it with 'pip install tsam'.") + + self.cluster_duration = cluster_duration + self.cluster_duration_hours = _parse_cluster_duration(cluster_duration) + self.n_segments = n_segments + self.time_series_for_high_peaks = time_series_for_high_peaks or [] + self.time_series_for_low_peaks = time_series_for_low_peaks or [] + self.weights = weights or {} + + @property + def use_extreme_periods(self) -> bool: + """Whether extreme period selection is enabled.""" + return bool(self.time_series_for_high_peaks or self.time_series_for_low_peaks) + + def aggregate( + self, + data: xr.Dataset, + n_representatives: int, + hours_per_timestep: float | None = None, + **kwargs, + ) -> AggregationResult: + """Perform TSAM aggregation on the input data. + + For multi-dimensional data (period/scenario), aggregation is performed + independently for each (period, scenario) combination. + + Args: + data: Input time series data as xarray Dataset. + Must have 'time' dimension. + n_representatives: Target number of typical periods (clusters). + hours_per_timestep: Duration of each timestep in hours. + If None, inferred from time coordinates. + **kwargs: Additional options passed to tsam. + + Returns: + AggregationResult with mapping, weights, and aggregated data. + """ + # Convert Dataset to DataFrame for tsam + # Handle multi-dimensional case + has_period = 'period' in data.dims + has_scenario = 'scenario' in data.dims + + if has_period or has_scenario: + return self._aggregate_multi_dimensional(data, n_representatives, hours_per_timestep, **kwargs) + else: + return self._aggregate_single(data, n_representatives, hours_per_timestep, **kwargs) + + def _aggregate_single( + self, + data: xr.Dataset, + n_representatives: int, + hours_per_timestep: float | None = None, + **kwargs, + ) -> AggregationResult: + """Aggregate a single-dimensional time series.""" + import pandas as pd + + # Convert to DataFrame + df = data.to_dataframe() + if isinstance(df.index, pd.MultiIndex): + # Flatten multi-index (shouldn't happen for single-dim, but be safe) + df = df.reset_index(drop=True) + + n_timesteps = len(df) + + # Infer hours_per_timestep if not provided + if hours_per_timestep is None: + if 'time' in data.coords and hasattr(data.coords['time'], 'values'): + time_vals = pd.to_datetime(data.coords['time'].values) + if len(time_vals) > 1: + hours_per_timestep = (time_vals[1] - time_vals[0]).total_seconds() / 3600 + else: + hours_per_timestep = 1.0 + else: + hours_per_timestep = 1.0 + + # Calculate number of timesteps per period + timesteps_per_period = int(self.cluster_duration_hours / hours_per_timestep) + total_periods = n_timesteps // timesteps_per_period + + # Determine actual number of clusters + n_clusters = min(n_representatives, total_periods) + + # Create tsam aggregation + tsam_agg = tsam.TimeSeriesAggregation( + df, + noTypicalPeriods=n_clusters, + hoursPerPeriod=self.cluster_duration_hours, + resolution=hours_per_timestep, + clusterMethod='k_means', + extremePeriodMethod='new_cluster_center' if self.use_extreme_periods else 'None', + weightDict={name: w for name, w in self.weights.items() if name in df.columns}, + addPeakMax=self.time_series_for_high_peaks, + addPeakMin=self.time_series_for_low_peaks, + segmentation=self.n_segments is not None, + noSegments=self.n_segments if self.n_segments is not None else 1, + ) + + tsam_agg.createTypicalPeriods() + aggregated_df = tsam_agg.predictOriginalData() + + # Build timestep mapping + # For each original timestep, find which representative timestep it maps to + cluster_order = tsam_agg.clusterOrder + timestep_mapping = np.zeros(n_timesteps, dtype=np.int32) + + for period_idx, cluster_id in enumerate(cluster_order): + for pos in range(timesteps_per_period): + original_idx = period_idx * timesteps_per_period + pos + if original_idx < n_timesteps: + representative_idx = cluster_id * timesteps_per_period + pos + timestep_mapping[original_idx] = representative_idx + + # Build representative weights (how many originals each representative covers) + n_representative_timesteps = n_clusters * timesteps_per_period + representative_weights = np.zeros(n_representative_timesteps, dtype=np.float64) + + for cluster_id, count in tsam_agg.clusterPeriodNoOccur.items(): + for pos in range(timesteps_per_period): + rep_idx = cluster_id * timesteps_per_period + pos + if rep_idx < n_representative_timesteps: + representative_weights[rep_idx] = count + + # Create cluster structure for storage linking + cluster_occurrences = xr.DataArray( + [tsam_agg.clusterPeriodNoOccur.get(c, 0) for c in range(n_clusters)], + dims=['cluster'], + name='cluster_occurrences', + ) + + cluster_structure = ClusterStructure( + cluster_order=xr.DataArray(cluster_order, dims=['original_period'], name='cluster_order'), + cluster_occurrences=cluster_occurrences, + n_clusters=n_clusters, + timesteps_per_cluster=timesteps_per_period, + ) + + # Convert aggregated data to xarray Dataset + # Extract only the typical period timesteps + typical_timesteps = n_clusters * timesteps_per_period + aggregated_ds = xr.Dataset( + {col: (['time'], aggregated_df[col].values[:typical_timesteps]) for col in aggregated_df.columns}, + coords={'time': np.arange(typical_timesteps)}, + ) + + return AggregationResult( + timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), + n_representatives=n_representative_timesteps, + representative_weights=xr.DataArray(representative_weights, dims=['time'], name='representative_weights'), + aggregated_data=aggregated_ds, + cluster_structure=cluster_structure, + original_data=data, + ) + + def _aggregate_multi_dimensional( + self, + data: xr.Dataset, + n_representatives: int, + hours_per_timestep: float | None = None, + **kwargs, + ) -> AggregationResult: + """Aggregate multi-dimensional data (with period/scenario dims). + + Performs independent aggregation for each (period, scenario) combination, + then combines results into multi-dimensional arrays. + """ + + has_period = 'period' in data.dims + has_scenario = 'scenario' in data.dims + + periods = data.coords['period'].values if has_period else [None] + scenarios = data.coords['scenario'].values if has_scenario else [None] + + # Collect results for each combination + results: dict[tuple, AggregationResult] = {} + + for period in periods: + for scenario in scenarios: + # Select slice + slice_data = data + if period is not None: + slice_data = slice_data.sel(period=period) + if scenario is not None: + slice_data = slice_data.sel(scenario=scenario) + + # Aggregate this slice + result = self._aggregate_single(slice_data, n_representatives, hours_per_timestep, **kwargs) + results[(period, scenario)] = result + + # Combine results into multi-dimensional arrays + # For now, assume all slices have same n_representatives (simplification) + first_result = next(iter(results.values())) + n_rep = first_result.n_representatives + n_original = first_result.n_original_timesteps + + # Build multi-dimensional timestep_mapping + if has_period and has_scenario: + mapping_data = np.zeros((n_original, len(periods), len(scenarios)), dtype=np.int32) + weights_data = np.zeros((n_rep, len(periods), len(scenarios)), dtype=np.float64) + for (p, s), res in results.items(): + pi = list(periods).index(p) + si = list(scenarios).index(s) + mapping_data[:, pi, si] = res.timestep_mapping.values + weights_data[:, pi, si] = res.representative_weights.values + + timestep_mapping = xr.DataArray( + mapping_data, + dims=['original_time', 'period', 'scenario'], + coords={'original_time': np.arange(n_original), 'period': periods, 'scenario': scenarios}, + name='timestep_mapping', + ) + representative_weights = xr.DataArray( + weights_data, + dims=['time', 'period', 'scenario'], + coords={'time': np.arange(n_rep), 'period': periods, 'scenario': scenarios}, + name='representative_weights', + ) + elif has_period: + mapping_data = np.zeros((n_original, len(periods)), dtype=np.int32) + weights_data = np.zeros((n_rep, len(periods)), dtype=np.float64) + for (p, _), res in results.items(): + pi = list(periods).index(p) + mapping_data[:, pi] = res.timestep_mapping.values + weights_data[:, pi] = res.representative_weights.values + + timestep_mapping = xr.DataArray( + mapping_data, + dims=['original_time', 'period'], + coords={'original_time': np.arange(n_original), 'period': periods}, + name='timestep_mapping', + ) + representative_weights = xr.DataArray( + weights_data, + dims=['time', 'period'], + coords={'time': np.arange(n_rep), 'period': periods}, + name='representative_weights', + ) + else: # has_scenario only + mapping_data = np.zeros((n_original, len(scenarios)), dtype=np.int32) + weights_data = np.zeros((n_rep, len(scenarios)), dtype=np.float64) + for (_, s), res in results.items(): + si = list(scenarios).index(s) + mapping_data[:, si] = res.timestep_mapping.values + weights_data[:, si] = res.representative_weights.values + + timestep_mapping = xr.DataArray( + mapping_data, + dims=['original_time', 'scenario'], + coords={'original_time': np.arange(n_original), 'scenario': scenarios}, + name='timestep_mapping', + ) + representative_weights = xr.DataArray( + weights_data, + dims=['time', 'scenario'], + coords={'time': np.arange(n_rep), 'scenario': scenarios}, + name='representative_weights', + ) + + # Use cluster structure from first result (for now - could be enhanced) + # In multi-dimensional case, cluster structure may vary by period/scenario + cluster_structure = first_result.cluster_structure + + return AggregationResult( + timestep_mapping=timestep_mapping, + n_representatives=n_rep, + representative_weights=representative_weights, + aggregated_data=first_result.aggregated_data, # Simplified - use first slice's data + cluster_structure=cluster_structure, + original_data=data, + ) + + +def create_tsam_backend_from_clustering( + clustering, # flixopt.clustering.Clustering +) -> tuple[TSAMBackend, AggregationResult]: + """Create TSAMBackend and AggregationResult from existing Clustering object. + + This is a bridge function to help migrate from the old Clustering class + to the new aggregation abstraction. + + Args: + clustering: Existing flixopt Clustering object (after calling cluster()). + + Returns: + Tuple of (TSAMBackend, AggregationResult). + """ + if clustering.tsam is None: + raise ValueError('Clustering has not been executed. Call cluster() first.') + + tsam_agg = clustering.tsam + + backend = TSAMBackend( + cluster_duration=clustering.hours_per_period, + n_segments=clustering.n_segments, + time_series_for_high_peaks=clustering.time_series_for_high_peaks, + time_series_for_low_peaks=clustering.time_series_for_low_peaks, + weights=clustering.weights, + ) + + # Build AggregationResult from Clustering state + n_timesteps = clustering.nr_of_time_steps + timesteps_per_period = int(clustering.hours_per_period / clustering.hours_per_time_step) + cluster_order = tsam_agg.clusterOrder + n_clusters = len(tsam_agg.clusterPeriodNoOccur) + + # Build timestep mapping + timestep_mapping = np.zeros(n_timesteps, dtype=np.int32) + for period_idx, cluster_id in enumerate(cluster_order): + for pos in range(timesteps_per_period): + original_idx = period_idx * timesteps_per_period + pos + if original_idx < n_timesteps: + representative_idx = cluster_id * timesteps_per_period + pos + timestep_mapping[original_idx] = representative_idx + + # Build weights + n_representative_timesteps = n_clusters * timesteps_per_period + representative_weights = np.zeros(n_representative_timesteps, dtype=np.float64) + for cluster_id, count in tsam_agg.clusterPeriodNoOccur.items(): + for pos in range(timesteps_per_period): + rep_idx = cluster_id * timesteps_per_period + pos + if rep_idx < n_representative_timesteps: + representative_weights[rep_idx] = count + + # Create cluster structure + cluster_occurrences = xr.DataArray( + [tsam_agg.clusterPeriodNoOccur.get(c, 0) for c in range(n_clusters)], + dims=['cluster'], + name='cluster_occurrences', + ) + + cluster_structure = ClusterStructure( + cluster_order=xr.DataArray(cluster_order, dims=['original_period'], name='cluster_order'), + cluster_occurrences=cluster_occurrences, + n_clusters=n_clusters, + timesteps_per_cluster=timesteps_per_period, + ) + + # Build aggregated data as xarray Dataset + aggregated_df = clustering.aggregated_data + aggregated_ds = xr.Dataset( + {col: (['time'], aggregated_df[col].values[:n_representative_timesteps]) for col in aggregated_df.columns}, + coords={'time': np.arange(n_representative_timesteps)}, + ) + + # Original data as xarray Dataset + original_df = clustering.original_data + original_ds = xr.Dataset( + {col: (['time'], original_df[col].values) for col in original_df.columns}, + coords={'time': np.arange(n_timesteps)}, + ) + + result = AggregationResult( + timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), + n_representatives=n_representative_timesteps, + representative_weights=xr.DataArray(representative_weights, dims=['time'], name='representative_weights'), + aggregated_data=aggregated_ds, + cluster_structure=cluster_structure, + original_data=original_ds, + ) + + return backend, result diff --git a/flixopt/clustering.py b/flixopt/clustering.py index b6224e838..db5153aa9 100644 --- a/flixopt/clustering.py +++ b/flixopt/clustering.py @@ -911,6 +911,58 @@ def __init__( self.storage_cyclic = storage_cyclic self.n_original_periods = len(self.cluster_order) + @classmethod + def from_cluster_structure( + cls, + model: FlowSystemModel, + flow_system: FlowSystem, + cluster_structure, # aggregation.ClusterStructure + storage_cyclic: bool = True, + ) -> TypicalPeriodsModel: + """Create TypicalPeriodsModel from a ClusterStructure. + + This is the recommended way to create TypicalPeriodsModel when using + the new aggregation API, as it accepts the generic ClusterStructure + from any aggregation backend. + + Args: + model: The FlowSystemModel to add constraints to. + flow_system: The FlowSystem being optimized. + cluster_structure: ClusterStructure from flixopt.aggregation module. + storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. + + Returns: + Configured TypicalPeriodsModel instance. + + Example: + >>> from flixopt.aggregation import ClusterStructure + >>> structure = ClusterStructure(...) + >>> model = TypicalPeriodsModel.from_cluster_structure(model, flow_system, structure) + """ + # Extract data from ClusterStructure + cluster_order = cluster_structure.cluster_order.values + n_clusters = ( + int(cluster_structure.n_clusters) + if isinstance(cluster_structure.n_clusters, (int, np.integer)) + else int(cluster_structure.n_clusters.values) + ) + + # Convert cluster_occurrences DataArray to dict + cluster_occurrences = {} + for c in range(n_clusters): + occ = cluster_structure.cluster_occurrences.sel(cluster=c) + cluster_occurrences[c] = int(occ.values) + + return cls( + model=model, + flow_system=flow_system, + cluster_order=cluster_order, + cluster_occurrences=cluster_occurrences, + n_typical_periods=n_clusters, + timesteps_per_period=cluster_structure.timesteps_per_cluster, + storage_cyclic=storage_cyclic, + ) + def do_modeling(self): """Create SOC boundary variables and inter-period linking constraints. diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 1e6759b5f..cc6bc9117 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -39,6 +39,7 @@ import pyvis from .solvers import _Solver + from .structure import TimeSeriesWeights from .types import Effect_TPS, Numeric_S, Numeric_TPS, NumericOrBool from .carrier import Carrier, CarrierContainer @@ -2061,6 +2062,47 @@ def scenario_weights(self, value: Numeric_S | None) -> None: self._scenario_weights = self.fit_to_model_coords('scenario_weights', value, dims=['scenario']) + @property + def weights(self) -> TimeSeriesWeights: + """Unified weighting system for time series aggregation. + + Returns a TimeSeriesWeights object providing a clean, unified interface + for all weight types used in flixopt. This is the recommended way to + access weights for new code (PyPSA-inspired design). + + The temporal weight combines timestep_duration and cluster_weight, + which is the proper weight for summing over time. + + Returns: + TimeSeriesWeights with temporal, period, and scenario weights. + + Example: + >>> weights = flow_system.weights + >>> weighted_total = (flow_rate * weights.temporal).sum('time') + >>> # Or use the convenience method: + >>> weighted_total = weights.sum_over_time(flow_rate) + """ + from .structure import TimeSeriesWeights + + return TimeSeriesWeights( + temporal=self.timestep_duration * self.cluster_weight, + period=self.period_weights, + scenario=self._scenario_weights, + ) + + @property + def aggregation_weight(self) -> xr.DataArray: + """Combined weight for time aggregation. + + Combines timestep_duration (physical duration) and cluster_weight (cluster representation). + Use this for proper time aggregation in clustered models. + + Note: + This is equivalent to `weights.temporal`. The unified TimeSeriesWeights + interface (via `flow_system.weights`) is recommended for new code. + """ + return self.timestep_duration * self.cluster_weight + def _validate_scenario_parameter(self, value: bool | list[str], param_name: str, element_type: str) -> None: """ Validate scenario parameter value. diff --git a/flixopt/structure.py b/flixopt/structure.py index 15666c86b..eafee4e0c 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -42,6 +42,92 @@ CLASS_REGISTRY = {} +@dataclass +class TimeSeriesWeights: + """Unified weighting system for time series aggregation (PyPSA-inspired). + + This class provides a clean, unified interface for time series weights, + combining the various weight types used in flixopt into a single object. + + Attributes: + temporal: Combined weight for temporal operations (timestep_duration × cluster_weight). + Applied to all time-summing operations. dims: [time] or [time, period, scenario] + period: Weight for each period in multi-period optimization. + dims: [period] or None + scenario: Weight for each scenario in stochastic optimization. + dims: [scenario] or None + objective: Optional override weight for objective function calculations. + If None, uses temporal weight. dims: [time] or [time, period, scenario] + storage: Optional override weight for storage balance equations. + If None, uses temporal weight. dims: [time] or [time, period, scenario] + + Example: + >>> # Access via FlowSystem + >>> weights = flow_system.weights + >>> weighted_sum = (flow_rate * weights.temporal).sum('time') + >>> + >>> # With period/scenario weighting + >>> total = weighted_sum * weights.period * weights.scenario + + Note: + For backwards compatibility, the existing properties (cluster_weight, + timestep_duration, aggregation_weight) are still available on FlowSystem + and FlowSystemModel. + """ + + temporal: xr.DataArray + period: xr.DataArray | None = None + scenario: xr.DataArray | None = None + objective: xr.DataArray | None = None + storage: xr.DataArray | None = None + + def __post_init__(self): + """Validate weights.""" + if not isinstance(self.temporal, xr.DataArray): + raise TypeError('temporal must be an xarray DataArray') + if 'time' not in self.temporal.dims: + raise ValueError("temporal must have 'time' dimension") + + @property + def effective_objective(self) -> xr.DataArray: + """Get effective objective weight (override or temporal).""" + return self.objective if self.objective is not None else self.temporal + + @property + def effective_storage(self) -> xr.DataArray: + """Get effective storage weight (override or temporal).""" + return self.storage if self.storage is not None else self.temporal + + def sum_over_time(self, data: xr.DataArray) -> xr.DataArray: + """Sum data over time dimension with proper weighting. + + Args: + data: DataArray with 'time' dimension. + + Returns: + Data summed over time with temporal weighting applied. + """ + if 'time' not in data.dims: + return data + return (data * self.temporal).sum('time') + + def apply_period_scenario_weights(self, data: xr.DataArray) -> xr.DataArray: + """Apply period and scenario weights to data. + + Args: + data: DataArray, optionally with 'period' and/or 'scenario' dims. + + Returns: + Data with period and scenario weights applied. + """ + result = data + if self.period is not None and 'period' in data.dims: + result = result * self.period + if self.scenario is not None and 'scenario' in data.dims: + result = result * self.scenario + return result + + def register_class_for_io(cls): """Register a class for serialization/deserialization.""" name = cls.__name__ diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 846f8d004..55fe39bf2 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1531,6 +1531,246 @@ def _expand_dataarray( scenarios=scenarios, ) + # ===================================================================== + # New Aggregation API (Phase 3 - Backend-agnostic interface) + # ===================================================================== + + def aggregate( + self, + method: str | Any = 'tsam', + n_representatives: int | None = None, + reduce: bool = True, + **kwargs, + ) -> FlowSystem: + """Unified aggregation method supporting multiple backends. + + This is the recommended API for time series aggregation. It supports + multiple backends (TSAM, manual, etc.) through a unified interface. + + For TSAM backend, this delegates to cluster() or cluster_reduce() + based on the ``reduce`` parameter. + + Args: + method: Aggregation backend. Options: + - 'tsam': Use TSAM package for k-means clustering (default) + - 'manual': Use ManualBackend with pre-computed mapping + - Custom Aggregator instance + n_representatives: Target number of representative timesteps. + For 'tsam' with cluster_duration='1D', this is the number of + typical days. + reduce: Aggregation mode: + - True: Reduce timesteps (cluster_reduce mode) + - False: Keep all timesteps with equality constraints (cluster mode) + **kwargs: Backend-specific options. For 'tsam': + - cluster_duration: Duration per cluster ('1D', '24h', etc.) + - n_segments: Inner-period segmentation + - time_series_for_high_peaks: Force high-value period inclusion + - time_series_for_low_peaks: Force low-value period inclusion + - aggregate_data: Whether to aggregate time series data + - include_storage: Include storage in constraints + + Returns: + New FlowSystem with aggregation applied. + + Example: + >>> # TSAM clustering with 8 typical days + >>> fs_agg = fs.transform.aggregate( + ... method='tsam', + ... n_representatives=8, + ... reduce=True, + ... cluster_duration='1D', + ... ) + + >>> # Manual aggregation with external clustering + >>> fs_agg = fs.transform.set_aggregation(my_mapping, my_weights) + + See Also: + set_aggregation: For PyPSA-style manual aggregation + cluster: TSAM constraint-based clustering + cluster_reduce: TSAM reduction-based clustering + """ + from .aggregation import Aggregator, get_backend + + # Handle string backend names + if isinstance(method, str): + backend_cls = get_backend(method) + if method == 'tsam': + # Delegate to existing TSAM methods for backwards compatibility + return self._aggregate_tsam(n_representatives, reduce, **kwargs) + elif method == 'manual': + raise ValueError("Use set_aggregation() for manual aggregation, not aggregate(method='manual')") + else: + # Custom registered backend + _backend = backend_cls(**kwargs) # noqa: F841 + elif isinstance(method, Aggregator): + _backend = method # noqa: F841 + else: + raise TypeError(f'method must be str or Aggregator, got {type(method)}') + + # Use backend to aggregate + raise NotImplementedError( + "Generic backend aggregation not yet implemented. Use method='tsam' or set_aggregation() for now." + ) + + def _aggregate_tsam( + self, + n_representatives: int | None, + reduce: bool, + **kwargs, + ) -> FlowSystem: + """Internal: delegate to existing TSAM methods.""" + # Extract TSAM-specific kwargs + cluster_duration = kwargs.pop('cluster_duration', '1D') + n_segments = kwargs.pop('n_segments', None) + aggregate_data = kwargs.pop('aggregate_data', True) + include_storage = kwargs.pop('include_storage', True) + flexibility_percent = kwargs.pop('flexibility_percent', 0) + flexibility_penalty = kwargs.pop('flexibility_penalty', 0) + time_series_for_high_peaks = kwargs.pop('time_series_for_high_peaks', None) + time_series_for_low_peaks = kwargs.pop('time_series_for_low_peaks', None) + components_to_clusterize = kwargs.pop('components_to_clusterize', None) + weights = kwargs.pop('weights', None) + + if reduce: + # cluster_reduce doesn't support n_segments + return self.cluster_reduce( + n_clusters=n_representatives, + cluster_duration=cluster_duration, + weights=weights, + time_series_for_high_peaks=time_series_for_high_peaks, + time_series_for_low_peaks=time_series_for_low_peaks, + storage_cyclic=kwargs.pop('storage_cyclic', True), + ) + else: + return self.cluster( + n_clusters=n_representatives, + cluster_duration=cluster_duration, + n_segments=n_segments, + aggregate_data=aggregate_data, + include_storage=include_storage, + flexibility_percent=flexibility_percent, + flexibility_penalty=flexibility_penalty, + time_series_for_high_peaks=time_series_for_high_peaks, + time_series_for_low_peaks=time_series_for_low_peaks, + components_to_clusterize=components_to_clusterize, + ) + + def set_aggregation( + self, + timestep_mapping: xr.DataArray, + weights: xr.DataArray, + reduce: bool = True, + cluster_structure: Any = None, + aggregated_data: xr.Dataset | None = None, + ) -> FlowSystem: + """Set aggregation from external tool (PyPSA-style workflow). + + This enables users to bring their own aggregation results from any tool + (sklearn, custom algorithms, hierarchical clustering, etc.) and apply + them to flixopt. + + This is similar to PyPSA's approach where aggregation is done externally + and the framework just accepts the results. + + Args: + timestep_mapping: Maps each original timestep to representative index. + DataArray with dims [original_time] or [original_time, period, scenario]. + Values should be integers in range [0, n_representatives). + weights: Weight for each representative timestep. + DataArray with dims [time] or [time, period, scenario]. + Typically equals count of original timesteps each representative covers. + reduce: Aggregation mode: + - True (default): Reduce timesteps (like cluster_reduce) + - False: Keep all timesteps with equality constraints (like cluster) + cluster_structure: Optional ClusterStructure for storage inter-period linking. + Required for proper storage optimization in reduce mode. + aggregated_data: Optional pre-aggregated time series data. + If not provided, data will be extracted from mapping. + + Returns: + New FlowSystem with aggregation applied. + + Example: + >>> # External clustering with sklearn + >>> from sklearn.cluster import KMeans + >>> import xarray as xr + >>> + >>> # ... perform clustering ... + >>> mapping = xr.DataArray(my_mapping, dims=['original_time']) + >>> weights = xr.DataArray(my_weights, dims=['time']) + >>> + >>> fs_agg = fs.transform.set_aggregation( + ... timestep_mapping=mapping, + ... weights=weights, + ... reduce=True, + ... ) + + See Also: + aggregate: Unified aggregation API with backend support + flixopt.aggregation.ManualBackend: Backend class for manual aggregation + flixopt.aggregation.create_manual_backend_from_labels: Helper for sklearn labels + """ + from .aggregation import ManualBackend + + # Create ManualBackend from provided data + backend = ManualBackend( + timestep_mapping=timestep_mapping, + representative_weights=weights, + cluster_structure=cluster_structure, + ) + + # Build aggregation result + # For now, we need to convert flow_system data to xr.Dataset for the backend + data = self._fs_data_to_dataset() + n_representatives = len(weights) + + result = backend.aggregate(data, n_representatives) + + # Apply aggregation based on mode + if reduce: + return self._apply_reduce_aggregation(result) + else: + return self._apply_constraint_aggregation(result) + + def _fs_data_to_dataset(self) -> xr.Dataset: + """Convert FlowSystem time series data to xarray Dataset.""" + from .core import TimeSeriesData + + data_vars = {} + for element in self._fs.values(): + for attr_name, attr_value in element.__dict__.items(): + if isinstance(attr_value, TimeSeriesData) and attr_value.has_data: + name = f'{element.label_full}|{attr_name}' + data_vars[name] = (['time'], attr_value.data.values) + + return xr.Dataset( + data_vars, + coords={'time': self._fs.timesteps}, + ) + + def _apply_reduce_aggregation(self, result) -> FlowSystem: + """Apply reduce-mode aggregation using AggregationResult. + + This creates a new FlowSystem with reduced timesteps, similar to + cluster_reduce() but using the generic AggregationResult. + """ + # For now, delegate to existing cluster_reduce infrastructure + # Full implementation would create FlowSystem directly from result + raise NotImplementedError( + 'set_aggregation with reduce=True not yet fully implemented. ' + 'Use cluster_reduce() for now, or set_aggregation with reduce=False.' + ) + + def _apply_constraint_aggregation(self, result) -> FlowSystem: + """Apply constraint-mode aggregation using AggregationResult. + + This creates equality constraints to equate clustered timesteps, + similar to cluster() but using the generic AggregationResult. + """ + # For now, delegate to existing cluster infrastructure + # Full implementation would create constraints from result.timestep_mapping + raise NotImplementedError('set_aggregation with reduce=False not yet fully implemented. Use cluster() for now.') + # Future methods can be added here: # # def mga(self, alternatives: int = 5) -> FlowSystem: diff --git a/tests/test_aggregation/__init__.py b/tests/test_aggregation/__init__.py new file mode 100644 index 000000000..4a026052c --- /dev/null +++ b/tests/test_aggregation/__init__.py @@ -0,0 +1 @@ +"""Tests for the flixopt.aggregation module.""" diff --git a/tests/test_aggregation/test_base.py b/tests/test_aggregation/test_base.py new file mode 100644 index 000000000..ce0439590 --- /dev/null +++ b/tests/test_aggregation/test_base.py @@ -0,0 +1,161 @@ +"""Tests for flixopt.aggregation.base module.""" + +import numpy as np +import pytest +import xarray as xr + +from flixopt.aggregation import ( + AggregationInfo, + AggregationResult, + ClusterStructure, + create_cluster_structure_from_mapping, +) + + +class TestClusterStructure: + """Tests for ClusterStructure dataclass.""" + + def test_basic_creation(self): + """Test basic ClusterStructure creation.""" + cluster_order = xr.DataArray([0, 1, 0, 1, 2, 0], dims=['original_period']) + cluster_occurrences = xr.DataArray([3, 2, 1], dims=['cluster']) + + structure = ClusterStructure( + cluster_order=cluster_order, + cluster_occurrences=cluster_occurrences, + n_clusters=3, + timesteps_per_cluster=24, + ) + + assert structure.n_clusters == 3 + assert structure.timesteps_per_cluster == 24 + assert structure.n_original_periods == 6 + + def test_creation_from_numpy(self): + """Test ClusterStructure creation from numpy arrays.""" + structure = ClusterStructure( + cluster_order=np.array([0, 0, 1, 1, 0]), + cluster_occurrences=np.array([3, 2]), + n_clusters=2, + timesteps_per_cluster=12, + ) + + assert isinstance(structure.cluster_order, xr.DataArray) + assert isinstance(structure.cluster_occurrences, xr.DataArray) + assert structure.n_original_periods == 5 + + def test_get_cluster_weight_per_timestep(self): + """Test weight calculation per timestep.""" + structure = ClusterStructure( + cluster_order=xr.DataArray([0, 1, 0], dims=['original_period']), + cluster_occurrences=xr.DataArray([2, 1], dims=['cluster']), + n_clusters=2, + timesteps_per_cluster=4, + ) + + weights = structure.get_cluster_weight_per_timestep() + + # Cluster 0 has 4 timesteps, each with weight 2 + # Cluster 1 has 4 timesteps, each with weight 1 + assert len(weights) == 8 + assert float(weights.isel(time=0).values) == 2.0 + assert float(weights.isel(time=4).values) == 1.0 + + +class TestAggregationResult: + """Tests for AggregationResult dataclass.""" + + def test_basic_creation(self): + """Test basic AggregationResult creation.""" + result = AggregationResult( + timestep_mapping=xr.DataArray([0, 0, 1, 1, 2, 2], dims=['original_time']), + n_representatives=3, + representative_weights=xr.DataArray([2, 2, 2], dims=['time']), + ) + + assert result.n_representatives == 3 + assert result.n_original_timesteps == 6 + + def test_creation_from_numpy(self): + """Test AggregationResult creation from numpy arrays.""" + result = AggregationResult( + timestep_mapping=np.array([0, 1, 0, 1]), + n_representatives=2, + representative_weights=np.array([2.0, 2.0]), + ) + + assert isinstance(result.timestep_mapping, xr.DataArray) + assert isinstance(result.representative_weights, xr.DataArray) + + def test_validation_success(self): + """Test validation passes for valid result.""" + result = AggregationResult( + timestep_mapping=xr.DataArray([0, 1, 0, 1], dims=['original_time']), + n_representatives=2, + representative_weights=xr.DataArray([2.0, 2.0], dims=['time']), + ) + + # Should not raise + result.validate() + + def test_validation_invalid_mapping(self): + """Test validation fails for out-of-range mapping.""" + result = AggregationResult( + timestep_mapping=xr.DataArray([0, 5, 0, 1], dims=['original_time']), # 5 is out of range + n_representatives=2, + representative_weights=xr.DataArray([2.0, 2.0], dims=['time']), + ) + + with pytest.raises(ValueError, match='timestep_mapping contains index'): + result.validate() + + def test_get_expansion_mapping(self): + """Test get_expansion_mapping returns named DataArray.""" + result = AggregationResult( + timestep_mapping=xr.DataArray([0, 1, 0], dims=['original_time']), + n_representatives=2, + representative_weights=xr.DataArray([2.0, 1.0], dims=['time']), + ) + + mapping = result.get_expansion_mapping() + assert mapping.name == 'expansion_mapping' + + +class TestCreateClusterStructureFromMapping: + """Tests for create_cluster_structure_from_mapping function.""" + + def test_basic_creation(self): + """Test creating ClusterStructure from timestep mapping.""" + # 12 original timesteps, 4 per period, 3 periods + # Mapping: period 0 -> cluster 0, period 1 -> cluster 1, period 2 -> cluster 0 + mapping = xr.DataArray( + [0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3], # First and third period map to cluster 0 + dims=['original_time'], + ) + + structure = create_cluster_structure_from_mapping(mapping, timesteps_per_cluster=4) + + assert structure.timesteps_per_cluster == 4 + assert structure.n_original_periods == 3 + + +class TestAggregationInfo: + """Tests for AggregationInfo dataclass.""" + + def test_creation(self): + """Test AggregationInfo creation.""" + result = AggregationResult( + timestep_mapping=xr.DataArray([0, 1], dims=['original_time']), + n_representatives=2, + representative_weights=xr.DataArray([1.0, 1.0], dims=['time']), + ) + + info = AggregationInfo( + result=result, + original_flow_system=None, # Would be FlowSystem in practice + mode='reduce', + backend_name='tsam', + ) + + assert info.mode == 'reduce' + assert info.backend_name == 'tsam' diff --git a/tests/test_aggregation/test_integration.py b/tests/test_aggregation/test_integration.py new file mode 100644 index 000000000..a7a560cf3 --- /dev/null +++ b/tests/test_aggregation/test_integration.py @@ -0,0 +1,194 @@ +"""Integration tests for flixopt.aggregation module with FlowSystem.""" + +import numpy as np +import pandas as pd +import pytest +import xarray as xr + +from flixopt import FlowSystem, TimeSeriesWeights + + +class TestTimeSeriesWeights: + """Tests for TimeSeriesWeights class.""" + + def test_creation(self): + """Test TimeSeriesWeights creation.""" + temporal = xr.DataArray([1.0, 1.0, 1.0], dims=['time']) + weights = TimeSeriesWeights(temporal=temporal) + + assert 'time' in weights.temporal.dims + assert float(weights.temporal.sum().values) == 3.0 + + def test_invalid_no_time_dim(self): + """Test error when temporal has no time dimension.""" + temporal = xr.DataArray([1.0, 1.0], dims=['other']) + + with pytest.raises(ValueError, match='time'): + TimeSeriesWeights(temporal=temporal) + + def test_sum_over_time(self): + """Test sum_over_time convenience method.""" + temporal = xr.DataArray([2.0, 3.0, 1.0], dims=['time'], coords={'time': [0, 1, 2]}) + weights = TimeSeriesWeights(temporal=temporal) + + data = xr.DataArray([10.0, 20.0, 30.0], dims=['time'], coords={'time': [0, 1, 2]}) + result = weights.sum_over_time(data) + + # 10*2 + 20*3 + 30*1 = 20 + 60 + 30 = 110 + assert float(result.values) == 110.0 + + def test_effective_objective(self): + """Test effective_objective property.""" + temporal = xr.DataArray([1.0, 1.0], dims=['time']) + objective = xr.DataArray([2.0, 2.0], dims=['time']) + + # Without override + weights1 = TimeSeriesWeights(temporal=temporal) + assert np.array_equal(weights1.effective_objective.values, temporal.values) + + # With override + weights2 = TimeSeriesWeights(temporal=temporal, objective=objective) + assert np.array_equal(weights2.effective_objective.values, objective.values) + + +class TestFlowSystemWeightsProperty: + """Tests for FlowSystem.weights property.""" + + def test_weights_property_exists(self): + """Test that FlowSystem has weights property.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) + + weights = fs.weights + assert isinstance(weights, TimeSeriesWeights) + + def test_weights_temporal_equals_aggregation_weight(self): + """Test that weights.temporal equals aggregation_weight.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) + + weights = fs.weights + aggregation_weight = fs.aggregation_weight + + np.testing.assert_array_almost_equal(weights.temporal.values, aggregation_weight.values) + + def test_weights_with_cluster_weight(self): + """Test weights property includes cluster_weight.""" + # Create FlowSystem with custom cluster_weight + timesteps = pd.date_range('2024-01-01', periods=24, freq='h') + cluster_weight = np.array([2.0] * 12 + [1.0] * 12) # First 12h weighted 2x + + fs = FlowSystem(timesteps=timesteps, cluster_weight=cluster_weight) + + weights = fs.weights + + # temporal = timestep_duration * cluster_weight + # timestep_duration is 1h for all, so temporal = cluster_weight + expected = 1.0 * cluster_weight + np.testing.assert_array_almost_equal(weights.temporal.values, expected) + + +class TestAggregateMethod: + """Tests for FlowSystem.transform.aggregate method.""" + + def test_aggregate_method_exists(self): + """Test that transform.aggregate method exists.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=48, freq='h')) + + assert hasattr(fs.transform, 'aggregate') + assert callable(fs.transform.aggregate) + + def test_aggregate_tsam_delegates_to_cluster_reduce(self): + """Test that aggregate with method='tsam' works.""" + # This test requires tsam to be installed + pytest.importorskip('tsam') + from flixopt import Bus, Flow, Sink, Source + from flixopt.core import TimeSeriesData + + # Create FlowSystem with 7 days of data (168 hours) + n_hours = 168 # 7 days + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=n_hours, freq='h')) + + # Add some basic components with time series data + demand_data = np.sin(np.linspace(0, 14 * np.pi, n_hours)) + 2 # Varying demand over 7 days + bus = Bus('electricity') + # Bus label is passed as string to Flow + grid_flow = Flow('grid_in', bus='electricity', size=100) + demand_flow = Flow( + 'demand_out', bus='electricity', size=100, fixed_relative_profile=TimeSeriesData(demand_data / 100) + ) + source = Source('grid', outputs=[grid_flow]) + sink = Sink('demand', inputs=[demand_flow]) + fs.add_elements(source, sink, bus) + + # Should delegate to cluster_reduce - reduce 7 days to 2 representative days + fs_agg = fs.transform.aggregate( + method='tsam', + n_representatives=2, + reduce=True, + cluster_duration='1D', + ) + + # Check that timesteps were reduced (from 168 hours to 48 hours = 2 days x 24 hours) + assert len(fs_agg.timesteps) < len(fs.timesteps) + assert len(fs_agg.timesteps) == 48 # 2 representative days x 24 hours + + +class TestSetAggregationMethod: + """Tests for FlowSystem.transform.set_aggregation method.""" + + def test_set_aggregation_method_exists(self): + """Test that transform.set_aggregation method exists.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) + + assert hasattr(fs.transform, 'set_aggregation') + assert callable(fs.transform.set_aggregation) + + def test_set_aggregation_raises_not_implemented(self): + """Test that set_aggregation raises NotImplementedError for now.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) + + mapping = xr.DataArray(np.arange(24) % 4, dims=['original_time']) + weights = xr.DataArray([6.0, 6.0, 6.0, 6.0], dims=['time']) + + # For now, should raise NotImplementedError + with pytest.raises(NotImplementedError): + fs.transform.set_aggregation( + timestep_mapping=mapping, + weights=weights, + reduce=True, + ) + + +class TestAggregationModuleImports: + """Tests for flixopt.aggregation module imports.""" + + def test_import_from_flixopt(self): + """Test that aggregation module can be imported from flixopt.""" + from flixopt import aggregation + + assert hasattr(aggregation, 'AggregationResult') + assert hasattr(aggregation, 'ClusterStructure') + assert hasattr(aggregation, 'Aggregator') + assert hasattr(aggregation, 'TSAMBackend') + assert hasattr(aggregation, 'ManualBackend') + + def test_list_backends(self): + """Test list_backends function.""" + from flixopt.aggregation import list_backends + + backends = list_backends() + assert 'manual' in backends + # 'tsam' may or may not be available depending on installation + + def test_get_backend(self): + """Test get_backend function.""" + from flixopt.aggregation import ManualBackend, get_backend + + backend_cls = get_backend('manual') + assert backend_cls is ManualBackend + + def test_get_backend_invalid(self): + """Test get_backend raises for invalid backend.""" + from flixopt.aggregation import get_backend + + with pytest.raises(ValueError, match='Unknown backend'): + get_backend('nonexistent') diff --git a/tests/test_aggregation/test_manual_backend.py b/tests/test_aggregation/test_manual_backend.py new file mode 100644 index 000000000..8ba8f3c79 --- /dev/null +++ b/tests/test_aggregation/test_manual_backend.py @@ -0,0 +1,131 @@ +"""Tests for flixopt.aggregation.manual module.""" + +import numpy as np +import pytest +import xarray as xr + +from flixopt.aggregation import ( + ManualBackend, + create_manual_backend_from_labels, + create_manual_backend_from_selection, +) + + +class TestManualBackend: + """Tests for ManualBackend class.""" + + def test_basic_creation(self): + """Test basic ManualBackend creation.""" + mapping = xr.DataArray([0, 1, 0, 1, 2, 2], dims=['original_time']) + weights = xr.DataArray([2.0, 2.0, 2.0], dims=['time']) + + backend = ManualBackend(timestep_mapping=mapping, representative_weights=weights) + + assert len(backend.timestep_mapping) == 6 + assert len(backend.representative_weights) == 3 + + def test_validation_dimension_mismatch(self): + """Test validation fails for mismatched dimensions.""" + mapping = xr.DataArray([0, 1, 5], dims=['original_time']) # 5 is out of range + weights = xr.DataArray([2.0, 2.0], dims=['time']) # Only 2 weights + + with pytest.raises(ValueError, match='timestep_mapping contains index'): + ManualBackend(timestep_mapping=mapping, representative_weights=weights) + + def test_aggregate_creates_result(self): + """Test aggregate method creates proper AggregationResult.""" + mapping = xr.DataArray([0, 1, 0, 1], dims=['original_time']) + weights = xr.DataArray([2.0, 2.0], dims=['time']) + + backend = ManualBackend(timestep_mapping=mapping, representative_weights=weights) + + # Create test data + data = xr.Dataset( + {'var1': (['time'], [1.0, 2.0, 3.0, 4.0])}, + coords={'time': range(4)}, + ) + + result = backend.aggregate(data) + + assert result.n_representatives == 2 + assert result.n_original_timesteps == 4 + assert result.aggregated_data is not None + + def test_aggregate_validates_data_dimensions(self): + """Test aggregate validates data dimensions match mapping.""" + mapping = xr.DataArray([0, 1, 0], dims=['original_time']) # 3 timesteps + weights = xr.DataArray([2.0, 1.0], dims=['time']) + + backend = ManualBackend(timestep_mapping=mapping, representative_weights=weights) + + # Data has wrong number of timesteps + data = xr.Dataset( + {'var1': (['time'], [1.0, 2.0, 3.0, 4.0, 5.0])}, # 5 timesteps + coords={'time': range(5)}, + ) + + with pytest.raises(ValueError, match='timesteps'): + backend.aggregate(data) + + +class TestCreateManualBackendFromLabels: + """Tests for create_manual_backend_from_labels function.""" + + def test_basic_creation(self): + """Test creating ManualBackend from cluster labels.""" + # 3 periods of 4 timesteps each, labeled [0, 1, 0] + labels = np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]) + + backend = create_manual_backend_from_labels(labels, timesteps_per_cluster=4) + + assert len(backend.representative_weights) == 8 # 2 clusters x 4 timesteps + # Cluster 0 appears 2 times, cluster 1 appears 1 time + assert float(backend.representative_weights.isel(time=0).values) == 2.0 + assert float(backend.representative_weights.isel(time=4).values) == 1.0 + + def test_non_consecutive_labels(self): + """Test handling of non-consecutive cluster labels.""" + # Labels are 0, 2, 0 (skipping 1) + labels = np.array([0, 0, 2, 2, 0, 0]) + + backend = create_manual_backend_from_labels(labels, timesteps_per_cluster=2) + + # Should remap to consecutive 0, 1 + assert len(backend.representative_weights) == 4 # 2 unique clusters x 2 timesteps + + +class TestCreateManualBackendFromSelection: + """Tests for create_manual_backend_from_selection function.""" + + def test_basic_creation(self): + """Test creating ManualBackend from selected indices.""" + # Select every 3rd timestep from 12 original timesteps + selected_indices = np.array([0, 3, 6, 9]) + weights = np.array([3.0, 3.0, 3.0, 3.0]) + + backend = create_manual_backend_from_selection( + selected_indices=selected_indices, + weights=weights, + n_original_timesteps=12, + ) + + assert len(backend.representative_weights) == 4 + # Check mapping assigns nearby timesteps to nearest representative + mapping = backend.timestep_mapping.values + assert mapping[0] == 0 # Timestep 0 -> representative 0 (at index 0) + assert mapping[1] == 0 # Timestep 1 -> representative 0 (nearest to 0) + # Timestep 5 is equidistant from indices 3 and 6, but argmin picks first + # Actually: distances from 5 to [0,3,6,9] = [5,2,1,4], so nearest is rep 2 (at index 6) + assert mapping[5] == 2 # Timestep 5 -> representative 2 (at index 6) + + def test_weights_length_mismatch(self): + """Test error when weights length doesn't match selected indices.""" + selected_indices = np.array([0, 3, 6]) + weights = np.array([3.0, 3.0]) # Wrong length + + with pytest.raises(ValueError, match='weights'): + create_manual_backend_from_selection( + selected_indices=selected_indices, + weights=weights, + n_original_timesteps=12, + ) diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py new file mode 100644 index 000000000..54e83033c --- /dev/null +++ b/tests/test_cluster_reduce_expand.py @@ -0,0 +1,349 @@ +"""Tests for cluster_reduce() and expand_solution() functionality.""" + +import numpy as np +import pandas as pd +import pytest +from numpy.testing import assert_allclose + +import flixopt as fx + + +def create_simple_system(timesteps: pd.DatetimeIndex) -> fx.FlowSystem: + """Create a simple FlowSystem for testing clustering.""" + # Create varying demand - different for each day to test clustering + hours = len(timesteps) + demand = np.sin(np.linspace(0, 4 * np.pi, hours)) * 10 + 15 # Oscillating demand + + flow_system = fx.FlowSystem(timesteps) + flow_system.add_elements( + fx.Bus('Heat'), + fx.Bus('Gas'), + fx.Effect('costs', '€', is_standard=True, is_objective=True), + fx.Sink('HeatDemand', inputs=[fx.Flow('Q', bus='Heat', fixed_relative_profile=demand, size=1)]), + fx.Source('GasSource', outputs=[fx.Flow('Gas', bus='Gas', effects_per_flow_hour=0.05)]), + fx.linear_converters.Boiler( + 'Boiler', + thermal_efficiency=0.9, + fuel_flow=fx.Flow('Q_fu', bus='Gas'), + thermal_flow=fx.Flow('Q_th', bus='Heat'), + ), + ) + return flow_system + + +@pytest.fixture +def timesteps_2_days(): + """48 hour timesteps (2 days).""" + return pd.date_range('2020-01-01', periods=48, freq='h') + + +@pytest.fixture +def timesteps_8_days(): + """192 hour timesteps (8 days) - more realistic for clustering.""" + return pd.date_range('2020-01-01', periods=192, freq='h') + + +def test_cluster_reduce_creates_reduced_timesteps(timesteps_8_days): + """Test that cluster_reduce creates a FlowSystem with fewer timesteps.""" + fs = create_simple_system(timesteps_8_days) + + # Reduce to 2 typical clusters (days) + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + + # Should have 2 * 24 = 48 timesteps instead of 192 + assert len(fs_reduced.timesteps) == 48 + assert hasattr(fs_reduced, '_cluster_info') + assert fs_reduced._cluster_info['n_clusters'] == 2 + + +def test_expand_solution_restores_full_timesteps(solver_fixture, timesteps_8_days): + """Test that expand_solution restores full timestep count.""" + fs = create_simple_system(timesteps_8_days) + + # Reduce to 2 typical clusters + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + + # Optimize + fs_reduced.optimize(solver_fixture) + assert fs_reduced.solution is not None + assert len(fs_reduced.timesteps) == 48 + + # Expand back to full + fs_expanded = fs_reduced.transform.expand_solution() + + # Should have original timestep count + assert len(fs_expanded.timesteps) == 192 + assert fs_expanded.solution is not None + + +def test_expand_solution_preserves_solution_variables(solver_fixture, timesteps_8_days): + """Test that expand_solution keeps all solution variables.""" + fs = create_simple_system(timesteps_8_days) + + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + fs_reduced.optimize(solver_fixture) + + reduced_vars = set(fs_reduced.solution.data_vars) + + fs_expanded = fs_reduced.transform.expand_solution() + expanded_vars = set(fs_expanded.solution.data_vars) + + # Should have all the same variables + assert reduced_vars == expanded_vars + + +def test_expand_solution_maps_values_correctly(solver_fixture, timesteps_8_days): + """Test that expand_solution correctly maps typical cluster values to all segments.""" + fs = create_simple_system(timesteps_8_days) + + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + fs_reduced.optimize(solver_fixture) + + # Get cluster_order to know mapping + info = fs_reduced._cluster_info + cluster_order = info['cluster_order'] + timesteps_per_cluster = info['timesteps_per_cluster'] # 24 + + reduced_flow = fs_reduced.solution['Boiler(Q_th)|flow_rate'].values + + fs_expanded = fs_reduced.transform.expand_solution() + expanded_flow = fs_expanded.solution['Boiler(Q_th)|flow_rate'].values + + # Check that values are correctly mapped + # For each original segment, values should match the corresponding typical cluster + for orig_segment_idx, cluster_id in enumerate(cluster_order): + orig_start = orig_segment_idx * timesteps_per_cluster + orig_end = orig_start + timesteps_per_cluster + + typical_start = cluster_id * timesteps_per_cluster + typical_end = typical_start + timesteps_per_cluster + + # Values in the expanded solution for this original segment + # should match the reduced solution for the corresponding typical cluster + expected = reduced_flow[typical_start:typical_end] + actual = expanded_flow[orig_start:orig_end] + + assert_allclose(actual, expected, rtol=1e-10) + + +def test_expand_solution_enables_statistics_accessor(solver_fixture, timesteps_8_days): + """Test that statistics accessor works on expanded FlowSystem.""" + fs = create_simple_system(timesteps_8_days) + + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + fs_reduced.optimize(solver_fixture) + + fs_expanded = fs_reduced.transform.expand_solution() + + # These should work without errors + flow_rates = fs_expanded.statistics.flow_rates + assert 'Boiler(Q_th)' in flow_rates + assert len(flow_rates['Boiler(Q_th)'].coords['time']) == 192 + + flow_hours = fs_expanded.statistics.flow_hours + assert 'Boiler(Q_th)' in flow_hours + + +def test_expand_solution_statistics_match_clustered(solver_fixture, timesteps_8_days): + """Test that total_effects match between clustered and expanded FlowSystem.""" + fs = create_simple_system(timesteps_8_days) + + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + fs_reduced.optimize(solver_fixture) + + fs_expanded = fs_reduced.transform.expand_solution() + + # Total effects should match between clustered and expanded + reduced_total = fs_reduced.statistics.total_effects['costs'].sum('contributor').item() + expanded_total = fs_expanded.statistics.total_effects['costs'].sum('contributor').item() + + assert_allclose(reduced_total, expanded_total, rtol=1e-6) + + # Flow hours should also match (need to sum over time with proper weighting) + reduced_flow_hours = ( + (fs_reduced.statistics.flow_hours['Boiler(Q_th)'] * fs_reduced.cluster_weight).sum('time').item() + ) + expanded_flow_hours = ( + (fs_expanded.statistics.flow_hours['Boiler(Q_th)'] * fs_expanded.cluster_weight).sum('time').item() + ) + + assert_allclose(reduced_flow_hours, expanded_flow_hours, rtol=1e-6) + + +def test_expand_solution_without_cluster_info_raises(solver_fixture, timesteps_2_days): + """Test that expand_solution raises error if not a reduced FlowSystem.""" + fs = create_simple_system(timesteps_2_days) + fs.optimize(solver_fixture) + + with pytest.raises(ValueError, match='cluster_reduce'): + fs.transform.expand_solution() + + +def test_expand_solution_without_solution_raises(timesteps_8_days): + """Test that expand_solution raises error if no solution.""" + fs = create_simple_system(timesteps_8_days) + + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + # Don't optimize - no solution + + with pytest.raises(ValueError, match='no solution'): + fs_reduced.transform.expand_solution() + + +# ==================== Multi-dimensional Tests ==================== + + +def create_system_with_scenarios(timesteps: pd.DatetimeIndex, scenarios: pd.Index) -> fx.FlowSystem: + """Create a FlowSystem with scenarios for testing.""" + hours = len(timesteps) + + # Create different demand profiles per scenario + demands = {} + for i, scenario in enumerate(scenarios): + # Different pattern per scenario + base_demand = np.sin(np.linspace(0, 4 * np.pi, hours)) * 10 + 15 + demands[scenario] = base_demand * (1 + 0.2 * i) # Scale differently per scenario + + # Create DataFrame with scenarios as columns + demand_df = pd.DataFrame(demands, index=timesteps) + + flow_system = fx.FlowSystem(timesteps, scenarios=scenarios) + flow_system.add_elements( + fx.Bus('Heat'), + fx.Bus('Gas'), + fx.Effect('costs', '€', is_standard=True, is_objective=True), + fx.Sink( + 'HeatDemand', + inputs=[fx.Flow('Q', bus='Heat', fixed_relative_profile=demand_df, size=1)], + ), + fx.Source('GasSource', outputs=[fx.Flow('Gas', bus='Gas', effects_per_flow_hour=0.05)]), + fx.linear_converters.Boiler( + 'Boiler', + thermal_efficiency=0.9, + fuel_flow=fx.Flow('Q_fu', bus='Gas'), + thermal_flow=fx.Flow('Q_th', bus='Heat'), + ), + ) + return flow_system + + +@pytest.fixture +def scenarios_2(): + """Two scenarios for testing.""" + return pd.Index(['base', 'high'], name='scenario') + + +def test_cluster_reduce_with_scenarios(timesteps_8_days, scenarios_2): + """Test that cluster_reduce handles scenarios correctly.""" + fs = create_system_with_scenarios(timesteps_8_days, scenarios_2) + + # Verify scenarios are set up correctly + assert fs.scenarios is not None + assert len(fs.scenarios) == 2 + + # Reduce to 2 typical clusters + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + + # Should have 2 * 24 = 48 timesteps + assert len(fs_reduced.timesteps) == 48 + + # Should have cluster_orders for each scenario + info = fs_reduced._cluster_info + assert 'cluster_orders' in info + assert info['has_scenarios'] is True + + # Each scenario should have its own cluster_order + for scenario in scenarios_2: + key = (None, scenario) + assert key in info['cluster_orders'] + + +def test_cluster_reduce_and_expand_with_scenarios(solver_fixture, timesteps_8_days, scenarios_2): + """Test full cluster_reduce -> optimize -> expand_solution cycle with scenarios.""" + fs = create_system_with_scenarios(timesteps_8_days, scenarios_2) + + # Reduce + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + + # Optimize + fs_reduced.optimize(solver_fixture) + assert fs_reduced.solution is not None + + # Expand + fs_expanded = fs_reduced.transform.expand_solution() + + # Should have original timesteps + assert len(fs_expanded.timesteps) == 192 + + # Solution should have scenario dimension + flow_var = 'Boiler(Q_th)|flow_rate' + assert flow_var in fs_expanded.solution + assert 'scenario' in fs_expanded.solution[flow_var].dims + assert len(fs_expanded.solution[flow_var].coords['time']) == 192 + + +def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_8_days, scenarios_2): + """Test that each scenario uses its own cluster_order in expand_solution.""" + fs = create_system_with_scenarios(timesteps_8_days, scenarios_2) + + fs_reduced = fs.transform.cluster_reduce( + n_clusters=2, + cluster_duration='1D', + ) + fs_reduced.optimize(solver_fixture) + + info = fs_reduced._cluster_info + cluster_orders = info['cluster_orders'] + timesteps_per_cluster = info['timesteps_per_cluster'] # 24 + + reduced_flow = fs_reduced.solution['Boiler(Q_th)|flow_rate'] + fs_expanded = fs_reduced.transform.expand_solution() + expanded_flow = fs_expanded.solution['Boiler(Q_th)|flow_rate'] + + # Check mapping for each scenario independently + for scenario in scenarios_2: + key = (None, scenario) + cluster_order = cluster_orders[key] + + reduced_scenario = reduced_flow.sel(scenario=scenario).values + expanded_scenario = expanded_flow.sel(scenario=scenario).values + + # Verify mapping is correct for this scenario + for orig_segment_idx, cluster_id in enumerate(cluster_order): + orig_start = orig_segment_idx * timesteps_per_cluster + orig_end = orig_start + timesteps_per_cluster + + typical_start = cluster_id * timesteps_per_cluster + typical_end = typical_start + timesteps_per_cluster + + expected = reduced_scenario[typical_start:typical_end] + actual = expanded_scenario[orig_start:orig_end] + + assert_allclose(actual, expected, rtol=1e-10, err_msg=f'Mismatch for scenario {scenario}') From 78639dff2002a296fefa13f6383d3062080262a3 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 20:00:05 +0100 Subject: [PATCH 059/191] Improve terminology --- flixopt/aggregation/base.py | 68 ++++++++++++++--------------- flixopt/aggregation/manual.py | 9 ++-- flixopt/aggregation/tsam_backend.py | 7 +++ 3 files changed, 46 insertions(+), 38 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 1ed40c0bb..dbacb509f 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -1,11 +1,18 @@ """ -Base classes and data structures for time series aggregation. +Base classes and data structures for time series aggregation (clustering). This module provides an abstraction layer for time series aggregation that -supports multiple backends (TSAM, manual/external, etc.) while maintaining -proper handling of multi-dimensional data (period, scenario dimensions). +supports multiple backends (TSAM, manual/external, etc.). -All data structures use xarray for consistent multi-dimensional support. +Terminology: +- "cluster" = a group of similar time chunks (e.g., similar days grouped together) +- "typical period" = a representative time chunk for a cluster (TSAM terminology) +- "cluster duration" = the length of each time chunk (e.g., 24h for daily clustering) + +Note: This is separate from the model's "period" dimension (years/months) and +"scenario" dimension. The aggregation operates on the 'time' dimension. + +All data structures use xarray for consistent handling of coordinates. """ from __future__ import annotations @@ -19,29 +26,28 @@ @dataclass class ClusterStructure: - """Structure information for inter-period storage linking. + """Structure information for inter-cluster storage linking. This class captures the hierarchical structure of time series clustering, which is needed for proper storage state-of-charge tracking across typical periods when using cluster_reduce(). - All arrays use xarray DataArrays to properly handle multi-dimensional - cases (period, scenario dimensions). + Note: "original_period" here refers to the original time chunks before + clustering (e.g., 365 original days), NOT the model's "period" dimension + (years/months). Each original time chunk gets assigned to a cluster. Attributes: - cluster_order: Maps original periods to cluster IDs. - dims: [original_period] or [original_period, period, scenario] - Each value indicates which typical period (cluster) the original - period belongs to. - cluster_occurrences: Count of how many original periods each cluster represents. - dims: [cluster] or [cluster, period, scenario] + cluster_order: Maps each original time chunk index to its cluster ID. + dims: [original_period] where original_period indexes the time chunks + (e.g., days) before clustering. Values are cluster indices (0 to n_clusters-1). + cluster_occurrences: Count of how many original time chunks each cluster represents. + dims: [cluster] n_clusters: Number of distinct clusters (typical periods). - Can be int (same for all) or DataArray (varies by period/scenario). - timesteps_per_cluster: Number of timesteps in each cluster period. + timesteps_per_cluster: Number of timesteps in each cluster (e.g., 24 for daily). Example: For 365 days clustered into 8 typical days: - - cluster_order: shape (365,), values 0-7 + - cluster_order: shape (365,), values 0-7 indicating which cluster each day belongs to - cluster_occurrences: shape (8,), e.g., [45, 46, 46, 46, 46, 45, 45, 46] - n_clusters: 8 - timesteps_per_cluster: 24 (for hourly data) @@ -107,22 +113,19 @@ class AggregationResult: """Universal result from any time series aggregation method. This dataclass captures all information needed to: - 1. Transform a FlowSystem to use aggregated timesteps + 1. Transform a FlowSystem to use aggregated (clustered) timesteps 2. Expand a solution back to original resolution 3. Properly weight results for statistics - All arrays use xarray DataArrays to properly handle multi-dimensional - cases (period, scenario dimensions). - Attributes: timestep_mapping: Maps each original timestep to its representative index. - dims: [original_time] or [original_time, period, scenario] + dims: [original_time] Values are indices into the representative timesteps (0 to n_representatives-1). n_representatives: Number of representative timesteps after aggregation. - Can be int (same for all) or DataArray (varies by period/scenario). representative_weights: Weight for each representative timestep. - dims: [time] or [time, period, scenario] + dims: [time] Typically equals the number of original timesteps each representative covers. + Used as cluster_weight in the FlowSystem. aggregated_data: Time series data aggregated to representative timesteps. Optional - some backends may not aggregate data. cluster_structure: Hierarchical clustering structure for storage linking. @@ -131,7 +134,7 @@ class AggregationResult: Optional - useful for expand_solution(). Example: - For 8760 hourly timesteps -> 192 representative timesteps (8 days x 24h): + For 8760 hourly timesteps clustered into 192 representative timesteps (8 clusters x 24h): - timestep_mapping: shape (8760,), values 0-191 - n_representatives: 192 - representative_weights: shape (192,), summing to 8760 @@ -218,12 +221,9 @@ class Aggregator(Protocol): """Protocol that any aggregation backend must implement. This protocol defines the interface for time series aggregation backends. - Implementations can use any aggregation algorithm (TSAM, sklearn k-means, + Implementations can use any clustering algorithm (TSAM, sklearn k-means, hierarchical clustering, etc.) as long as they return an AggregationResult. - The input data is an xarray Dataset to properly handle multi-dimensional - time series with period and scenario dimensions. - Example implementation: class MyAggregator: def aggregate( @@ -232,7 +232,7 @@ def aggregate( n_representatives: int, **kwargs ) -> AggregationResult: - # Custom aggregation logic + # Custom clustering logic ... return AggregationResult( timestep_mapping=mapping, @@ -247,14 +247,14 @@ def aggregate( n_representatives: int, **kwargs, ) -> AggregationResult: - """Perform time series aggregation. + """Perform time series aggregation (clustering). Args: data: Input time series data as xarray Dataset. - Must have 'time' dimension. May also have 'period' and/or - 'scenario' dimensions for multi-dimensional optimization. - n_representatives: Target number of representative timesteps. - **kwargs: Backend-specific options. + Must have 'time' dimension. + n_representatives: Target number of representative timesteps + (n_clusters * timesteps_per_cluster). + **kwargs: Backend-specific options (e.g., cluster_duration). Returns: AggregationResult containing mapping, weights, and optionally diff --git a/flixopt/aggregation/manual.py b/flixopt/aggregation/manual.py index c77dc6a84..159fc9a7a 100644 --- a/flixopt/aggregation/manual.py +++ b/flixopt/aggregation/manual.py @@ -26,14 +26,15 @@ class ManualBackend: Args: timestep_mapping: Mapping from original timesteps to representative indices. - DataArray with dims [original_time] or [original_time, period, scenario]. + DataArray with dims [original_time]. Values should be integers in range [0, n_representatives). representative_weights: Weight for each representative timestep. - DataArray with dims [time] or [time, period, scenario]. + DataArray with dims [time]. Typically equals count of original timesteps each representative covers. - cluster_structure: Optional cluster structure for storage inter-period linking. + This becomes the cluster_weight in the FlowSystem. + cluster_structure: Optional cluster structure for storage inter-cluster linking. If not provided and timesteps_per_cluster is given, will be inferred from mapping. - timesteps_per_cluster: Number of timesteps per cluster period. + timesteps_per_cluster: Number of timesteps per cluster (e.g., 24 for daily clusters). Required to infer cluster_structure if not explicitly provided. Example: diff --git a/flixopt/aggregation/tsam_backend.py b/flixopt/aggregation/tsam_backend.py index dbe9ed1e0..f7a4bd867 100644 --- a/flixopt/aggregation/tsam_backend.py +++ b/flixopt/aggregation/tsam_backend.py @@ -3,6 +3,13 @@ This backend wraps the existing flixopt Clustering class which uses the tsam package to perform k-means clustering of time series into typical periods. + +Terminology note: +- TSAM uses "typical periods" to mean representative time chunks (e.g., typical days) +- "cluster" = a group of similar time chunks (e.g., similar days) +- "cluster_duration" = length of each time chunk (e.g., 24h for daily clustering) +- "period" and "scenario" in method signatures refer to the MODEL's dimensions + (years/months and scenarios), NOT the clustering time chunks """ from __future__ import annotations From 97befe1b0d4363869c68eea13814d8489cd7ea81 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 20:10:24 +0100 Subject: [PATCH 060/191] Remove legacy clustering --- flixopt/aggregation/base.py | 3 - flixopt/flow_system.py | 118 ----- flixopt/transform_accessor.py | 589 ++------------------- tests/test_aggregation/test_base.py | 2 - tests/test_aggregation/test_integration.py | 2 - 5 files changed, 30 insertions(+), 684 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index dbacb509f..1bd638110 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -275,14 +275,11 @@ class AggregationInfo: Attributes: result: The AggregationResult from the aggregation backend. original_flow_system: Reference to the FlowSystem before aggregation. - mode: Whether aggregation used 'reduce' (fewer timesteps) or - 'constrain' (same timesteps with equality constraints). backend_name: Name of the aggregation backend used (e.g., 'tsam', 'manual'). """ result: AggregationResult original_flow_system: object # FlowSystem - avoid circular import - mode: str # 'reduce' or 'constrain' backend_name: str = 'unknown' diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index cc6bc9117..f93bda411 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -231,9 +231,6 @@ def __init__( # Solution dataset - populated after optimization or loaded from file self._solution: xr.Dataset | None = None - # Clustering info - populated by transform.cluster() - self._clustering_info: dict | None = None - # Typical periods info - populated by transform.cluster_reduce() self._cluster_info: dict | None = None @@ -645,31 +642,6 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: carriers_structure[name] = carrier_ref ds.attrs['carriers'] = json.dumps(carriers_structure) - # Include clustering info if present - if self._clustering_info is not None: - from .clustering import ClusteringParameters - - # Ensure parameters have indices populated before saving - params = self._clustering_info.get('parameters') - if isinstance(params, ClusteringParameters): - # Populate indices from tsam if not already set - if not params.has_indices: - clustering_obj = self._clustering_info.get('clustering') - if clustering_obj is not None: - if isinstance(clustering_obj, dict): - clustering_obj = next(iter(clustering_obj.values())) - params.populate_from_tsam(clustering_obj.tsam) - - # Serialize parameters (now includes indices) using Interface pattern - params_ref, params_arrays = params._create_reference_structure() - ds.attrs['_clustering_params'] = json.dumps(params_ref) - ds.update(params_arrays) - - # Store component labels to clusterize - components = self._clustering_info.get('components_to_clusterize') - if components: - ds.attrs['_clustering_components'] = json.dumps([c.label for c in components]) - # Add version info ds.attrs['flixopt_version'] = __version__ @@ -767,34 +739,6 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: carrier = cls._resolve_reference_structure(carrier_data, {}) flow_system._carriers.add(carrier) - # Restore clustering info if present (using Interface pattern) - if '_clustering_params' in reference_structure: - # Restore parameters (now includes indices via Interface pattern) - params = cls._resolve_reference_structure( - json.loads(reference_structure['_clustering_params']), arrays_dict - ) - - # Restore component references - components_to_clusterize = None - if '_clustering_components' in reference_structure: - component_labels = json.loads(reference_structure['_clustering_components']) - components_to_clusterize = [ - flow_system.components[label] for label in component_labels if label in flow_system.components - ] - - flow_system._clustering_info = { - 'parameters': params, - 'components_to_clusterize': components_to_clusterize, - 'restored_from_file': True, - } - if params.has_indices: - n_cluster_periods = len(params.cluster_order) - n_clusters = int(params.cluster_order.max()) + 1 - logger.info( - f'Restored clustering: {n_clusters} clusters, ' - f'{n_cluster_periods} periods, period_length={params.period_length}.' - ) - # Reconnect network to populate bus inputs/outputs (not stored in NetCDF). flow_system.connect_and_transform() @@ -1348,10 +1292,6 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: self.model.do_modeling() - # Add clustering constraints if this is a clustered FlowSystem - if self._clustering_info is not None: - self._add_clustering_constraints() - # Add typical periods storage modeling if this is a reduced FlowSystem if self._cluster_info is not None: self._add_typical_periods_modeling() @@ -1415,64 +1355,6 @@ def _add_typical_periods_modeling(self) -> None: ) typical_periods_model.do_modeling() - def _add_clustering_constraints(self) -> None: - """Add clustering constraints to the model.""" - import copy - - from .clustering import ClusteringModel - - info = self._clustering_info or {} - - if 'parameters' not in info: - raise KeyError('_clustering_info missing required key: "parameters"') - - base_parameters = info['parameters'] - clustering_obj = info.get('clustering') - - # Check if this is a multi-period/scenario clustering - is_multi_dimensional = isinstance(clustering_obj, dict) and len(clustering_obj) > 1 - - if is_multi_dimensional: - # For multi-period/scenario, create separate constraints for each combination - # Each (period, scenario) has its own clustering with different cluster assignments - for (period_label, scenario_label), clustering in clustering_obj.items(): - # Create a copy of parameters with this period's indices - params_copy = copy.copy(base_parameters) - params_copy.populate_from_tsam(clustering.tsam) - - # Determine period/scenario selector - period_selector = period_label if period_label is not None else None - scenario_selector = scenario_label if scenario_label is not None else None - - clustering_model = ClusteringModel( - model=self.model, - clustering_parameters=params_copy, - flow_system=self, - components_to_clusterize=info.get('components_to_clusterize'), - period_selector=period_selector, - scenario_selector=scenario_selector, - ) - clustering_model.do_modeling() - else: - # Single dimension - use original logic - if not base_parameters.has_indices: - if clustering_obj is None: - raise KeyError( - '_clustering_info missing "clustering" and parameters have no indices. ' - 'Either provide cluster_order/period_length or run transform.cluster() first.' - ) - if isinstance(clustering_obj, dict): - clustering_obj = next(iter(clustering_obj.values())) - base_parameters.populate_from_tsam(clustering_obj.tsam) - - clustering_model = ClusteringModel( - model=self.model, - clustering_parameters=base_parameters, - flow_system=self, - components_to_clusterize=info.get('components_to_clusterize'), - ) - clustering_model.do_modeling() - def solve(self, solver: _Solver) -> FlowSystem: """ Solve the optimization model and populate the solution. diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 55fe39bf2..8243d5bc3 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -16,7 +16,6 @@ import xarray as xr if TYPE_CHECKING: - from .clustering import ClusteringParameters from .flow_system import FlowSystem logger = logging.getLogger('flixopt') @@ -30,11 +29,11 @@ class TransformAccessor: with modified structure or data, accessible via `flow_system.transform`. Examples: - Clustered optimization (8 typical days): + Time series aggregation (8 typical days): - >>> clustered_fs = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> clustered_fs.optimize(solver) - >>> print(clustered_fs.solution) + >>> reduced_fs = flow_system.transform.cluster_reduce(n_clusters=8, cluster_duration='1D') + >>> reduced_fs.optimize(solver) + >>> expanded_fs = reduced_fs.transform.expand_solution() Future MGA: @@ -51,326 +50,6 @@ def __init__(self, flow_system: FlowSystem) -> None: """ self._fs = flow_system - def cluster( - self, - n_clusters: int | None, - cluster_duration: str | float, - n_segments: int | None = None, - aggregate_data: bool = True, - include_storage: bool = True, - flexibility_percent: float = 0, - flexibility_penalty: float = 0, - time_series_for_high_peaks: list | None = None, - time_series_for_low_peaks: list | None = None, - components_to_clusterize: list | None = None, - ) -> FlowSystem: - """ - Create a clustered FlowSystem for time series aggregation. - - This method creates a new FlowSystem that can be optimized with - clustered time series data. The clustering reduces computational - complexity by identifying representative time segments (e.g., typical days). - - For FlowSystems with multiple periods or scenarios, clustering is performed - independently for each period/scenario combination. - - The returned FlowSystem: - - Has the same timesteps as the original (clustering works via constraints, not reduction) - - Has aggregated time series data (if ``aggregate_data=True``) - - Will have clustering constraints added during ``build_model()`` - - Args: - n_clusters: Number of clusters (typical segments) to create. - E.g., 8 for 8 typical days from a year of data. - Set to None to skip inter-period clustering (only do segmentation). - cluster_duration: Duration of each cluster segment. Can be a pandas-style - string ('1D', '24h', '6h') or a numeric value in hours. - n_segments: Number of segments within each cluster (inner-period clustering). - For example, n_segments=4 with cluster_duration='1D' will reduce - 24 hourly timesteps to 4 representative segments per day. - Default is None (no inner-period segmentation). - aggregate_data: If True (default), aggregate time series data and fix - all time-dependent variables. If False, only fix binary variables. - include_storage: Whether to include storage flows in clustering constraints. - Default is True. - flexibility_percent: Maximum percentage (0-100) of binary values that can - deviate from the clustered pattern. Default is 0 (no flexibility). - flexibility_penalty: Penalty added to objective for each deviation. - Only applies when flexibility_percent > 0. Default is 0. - time_series_for_high_peaks: List of TimeSeriesData to force inclusion of - segments with high values. - time_series_for_low_peaks: List of TimeSeriesData to force inclusion of - segments with low values. - components_to_clusterize: List of components to apply clustering to. - If None, all components are clustered. - - Returns: - A new FlowSystem configured for clustered optimization. - - Raises: - ValueError: If timestep sizes are inconsistent. - ValueError: If cluster_duration is not a multiple of timestep size. - - Examples: - Basic clustered optimization (8 typical days): - - >>> clustered_fs = flow_system.transform.cluster( - ... n_clusters=8, - ... cluster_duration='1D', - ... ) - >>> clustered_fs.optimize(solver) - - With inner-period segmentation (8 typical days × 4 segments = 32 timesteps): - - >>> clustered_fs = flow_system.transform.cluster( - ... n_clusters=8, - ... cluster_duration='1D', - ... n_segments=4, # Reduce 24 hours to 4 segments - ... ) - - Segmentation only (no clustering, reduce each day to 4 segments): - - >>> clustered_fs = flow_system.transform.cluster( - ... n_clusters=None, # Skip inter-period clustering - ... cluster_duration='1D', - ... n_segments=4, - ... ) - - Multi-period FlowSystem (each year clustered independently): - - >>> multi_year_fs = fx.FlowSystem(timesteps, periods=pd.Index([2025, 2026, 2027])) - >>> clustered_fs = multi_year_fs.transform.cluster( - ... n_clusters=8, - ... cluster_duration='1D', - ... ) - """ - from .clustering import ClusteringParameters - - # Create ClusteringParameters from keyword arguments - params = ClusteringParameters( - n_clusters=n_clusters, - cluster_duration=cluster_duration, - n_segments=n_segments, - aggregate_data=aggregate_data, - include_storage=include_storage, - flexibility_percent=flexibility_percent, - flexibility_penalty=flexibility_penalty, - time_series_for_high_peaks=time_series_for_high_peaks, - time_series_for_low_peaks=time_series_for_low_peaks, - ) - - # Check for multi-period/scenario dimensions - has_periods = self._fs.periods is not None - has_scenarios = self._fs.scenarios is not None - - if not has_periods and not has_scenarios: - # Simple case: no extra dimensions - return self._cluster_simple(params, components_to_clusterize) - else: - # Multi-dimensional case: cluster independently per period/scenario - return self._cluster_multi_dimensional(params, components_to_clusterize) - - def _cluster_simple( - self, - params: ClusteringParameters, - components_to_clusterize: list | None, - ) -> FlowSystem: - """Perform clustering for simple case (no periods/scenarios).""" - import numpy as np - - from .clustering import Clustering - from .core import DataConverter, TimeSeriesData, drop_constant_arrays - - # Validation - dt_min = float(self._fs.timestep_duration.min().item()) - dt_max = float(self._fs.timestep_duration.max().item()) - if dt_min != dt_max: - raise ValueError( - f'Clustering failed due to inconsistent time step sizes: ' - f'delta_t varies from {dt_min} to {dt_max} hours.' - ) - ratio = params.cluster_duration_hours / dt_max - if not np.isclose(ratio, round(ratio), atol=1e-9): - raise ValueError( - f'The selected cluster_duration={params.cluster_duration_hours}h does not match the time ' - f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.' - ) - - logger.info(f'{"":#^80}') - logger.info(f'{" Clustering TimeSeries Data ":#^80}') - - # Get dataset representation - ds = self._fs.to_dataset(include_solution=False) - temporaly_changing_ds = drop_constant_arrays(ds, dim='time') - - # Perform clustering - clustering = Clustering( - original_data=temporaly_changing_ds.to_dataframe(), - hours_per_time_step=float(dt_min), - hours_per_period=params.cluster_duration_hours, - nr_of_periods=params.n_clusters, - n_segments=params.n_segments, - weights=self._calculate_clustering_weights(temporaly_changing_ds), - time_series_for_high_peaks=params.labels_for_high_peaks, - time_series_for_low_peaks=params.labels_for_low_peaks, - ) - clustering.cluster() - - # Create new FlowSystem (with aggregated data if requested) - if params.aggregate_data: - ds = self._fs.to_dataset() - for name, series in clustering.aggregated_data.items(): - da = DataConverter.to_dataarray(series, self._fs.coords).rename(name).assign_attrs(ds[name].attrs) - if TimeSeriesData.is_timeseries_data(da): - da = TimeSeriesData.from_dataarray(da) - ds[name] = da - - from .flow_system import FlowSystem - - clustered_fs = FlowSystem.from_dataset(ds) - else: - clustered_fs = self._fs.copy() - - # Store clustering info for later use - clustered_fs._clustering_info = { - 'parameters': params, - 'clustering': clustering, - 'components_to_clusterize': components_to_clusterize, - 'original_fs': self._fs, - } - - return clustered_fs - - def _cluster_multi_dimensional( - self, - params: ClusteringParameters, - components_to_clusterize: list | None, - ) -> FlowSystem: - """Perform clustering independently for each period/scenario combination.""" - import numpy as np - - from .clustering import Clustering - from .core import DataConverter, TimeSeriesData, drop_constant_arrays - - # Validation - dt_min = float(self._fs.timestep_duration.min().item()) - dt_max = float(self._fs.timestep_duration.max().item()) - if dt_min != dt_max: - raise ValueError( - f'Clustering failed due to inconsistent time step sizes: ' - f'delta_t varies from {dt_min} to {dt_max} hours.' - ) - ratio = params.cluster_duration_hours / dt_max - if not np.isclose(ratio, round(ratio), atol=1e-9): - raise ValueError( - f'The selected cluster_duration={params.cluster_duration_hours}h does not match the time ' - f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.' - ) - - logger.info(f'{"":#^80}') - logger.info(f'{" Clustering TimeSeries Data (Multi-dimensional) ":#^80}') - - # Determine iteration dimensions - periods = list(self._fs.periods) if self._fs.periods is not None else [None] - scenarios = list(self._fs.scenarios) if self._fs.scenarios is not None else [None] - - ds = self._fs.to_dataset(include_solution=False).copy(deep=True) # Deep copy to allow in-place modifications - clustering_results: dict[tuple, Clustering] = {} - - # Cluster each period x scenario combination independently - for period_label in periods: - for scenario_label in scenarios: - # Select slice for this combination - selector = {} - if period_label is not None: - selector['period'] = period_label - if scenario_label is not None: - selector['scenario'] = scenario_label - - if selector: - ds_slice = ds.sel(**selector, drop=True) - else: - ds_slice = ds - - # Drop constant arrays for clustering - temporaly_changing_ds = drop_constant_arrays(ds_slice, dim='time') - - # Skip if no time-varying data - if len(temporaly_changing_ds.data_vars) == 0: - logger.warning(f'No time-varying data for period={period_label}, scenario={scenario_label}') - continue - - dim_info = [] - if period_label is not None: - dim_info.append(f'period={period_label}') - if scenario_label is not None: - dim_info.append(f'scenario={scenario_label}') - logger.info(f'Clustering {", ".join(dim_info) or "data"}...') - - # Perform clustering on this slice - clustering = Clustering( - original_data=temporaly_changing_ds.to_dataframe(), - hours_per_time_step=float(dt_min), - hours_per_period=params.cluster_duration_hours, - nr_of_periods=params.n_clusters, - n_segments=params.n_segments, - weights=self._calculate_clustering_weights(temporaly_changing_ds), - time_series_for_high_peaks=params.labels_for_high_peaks, - time_series_for_low_peaks=params.labels_for_low_peaks, - ) - clustering.cluster() - clustering_results[(period_label, scenario_label)] = clustering - - # Apply aggregated data if requested - if params.aggregate_data: - for name, series in clustering.aggregated_data.items(): - if name not in ds.data_vars: - continue - # Get the original data array to update - original_da = ds[name] - # Create aggregated data array - agg_da = DataConverter.to_dataarray(series, {'time': ds_slice.indexes['time']}) - - # Update the slice in the full dataset - if selector: - # Need to update just this slice in the full array - # Use xr.where or direct assignment - if 'period' in original_da.dims and period_label is not None: - if 'scenario' in original_da.dims and scenario_label is not None: - original_da.loc[{'period': period_label, 'scenario': scenario_label}] = ( - agg_da.values - ) - else: - original_da.loc[{'period': period_label}] = agg_da.values - elif 'scenario' in original_da.dims and scenario_label is not None: - original_da.loc[{'scenario': scenario_label}] = agg_da.values - - # Create new FlowSystem - from .flow_system import FlowSystem - - if params.aggregate_data: - # Ensure TimeSeriesData is preserved - for name in ds.data_vars: - da = ds[name] - if TimeSeriesData.is_timeseries_data(da): - ds[name] = TimeSeriesData.from_dataarray(da) - clustered_fs = FlowSystem.from_dataset(ds) - else: - clustered_fs = self._fs.copy() - - # Store clustering info for later use - clustered_fs._clustering_info = { - 'parameters': params, - 'clustering': clustering_results, # Required by _add_clustering_constraints - 'clustering_results': clustering_results, # Dict of Clustering objects per dimension - 'components_to_clusterize': components_to_clusterize, - 'original_fs': self._fs, - 'has_periods': self._fs.periods is not None, - 'has_scenarios': self._fs.scenarios is not None, - } - - return clustered_fs - @staticmethod def _calculate_clustering_weights(ds) -> dict[str, float]: """Calculate weights for clustering based on dataset attributes.""" @@ -398,155 +77,6 @@ def _calculate_clustering_weights(ds) -> dict[str, float]: return weights - def add_clustering( - self, - parameters: ClusteringParameters, - components_to_clusterize: list | None = None, - ) -> FlowSystem: - """Add clustering constraints using ClusteringParameters with pre-set indices. - - This method allows applying clustering to a FlowSystem using indices - computed outside of flixopt. This is useful when: - - You want to cluster based on a subset of time series data (faster tsam) - - You have custom clustering logic or algorithms - - You want to reuse clustering results across multiple FlowSystems - - The clustering indices define equality constraints that equate variable values - at specific timestep pairs. The parameters must have `cluster_order` and - `period_length` set (either directly or via `populate_from_tsam()`). - - Args: - parameters: ClusteringParameters with clustering indices set. - Must have `cluster_order` and `period_length` populated. - components_to_clusterize: Components to apply clustering to. - If None, all components are clustered. - - Returns: - A new FlowSystem with clustering constraints configured. - - Examples: - External clustering with tsam on subset of data: - - >>> import tsam.timeseriesaggregation as tsam - >>> # Extract subset of timeseries for clustering - >>> subset_df = pd.DataFrame( - ... { - ... 'price': flow_system['prices'].values, - ... 'demand': flow_system['heat_demand'].values, - ... }, - ... index=flow_system.timesteps, - ... ) - >>> - >>> # Run tsam on subset - >>> aggregation = tsam.TimeSeriesAggregation(subset_df, noTypicalPeriods=8, hoursPerPeriod=24) - >>> aggregation.createTypicalPeriods() - >>> - >>> # Create parameters and populate from tsam - >>> params = fx.ClusteringParameters(n_clusters=8, cluster_duration='1D') - >>> params.populate_from_tsam(aggregation) - >>> - >>> # Apply to FlowSystem - >>> clustered_fs = flow_system.transform.add_clustering(params) - >>> clustered_fs.optimize(solver) - - With pre-computed cluster assignments: - - >>> import xarray as xr - >>> params = fx.ClusteringParameters( - ... n_clusters=8, - ... cluster_duration='1D', - ... cluster_order=xr.DataArray([0, 1, 2, 0, 1, 2, 0, 1], dims=['cluster_period']), - ... period_length=24, - ... flexibility_percent=5, # Allow 5% binary deviation - ... ) - >>> clustered_fs = flow_system.transform.add_clustering(params) - """ - from .clustering import ClusteringParameters - from .core import DataConverter, TimeSeriesData - - # Validate parameters type - if not isinstance(parameters, ClusteringParameters): - raise TypeError(f'parameters must be ClusteringParameters, got {type(parameters).__name__}') - - # Validate that indices are set - if not parameters.has_indices: - raise ValueError( - 'ClusteringParameters must have indices set. ' - 'Either provide cluster_order/period_length directly, pass tsam_aggregation, or call populate_from_tsam().' - ) - - # Aggregate data if tsam_aggregation is provided and aggregate_data=True - if parameters.aggregate_data and parameters.tsam_aggregation is not None: - ds = self._fs.to_dataset() - tsam_agg = parameters.tsam_aggregation - - # Get aggregated data from tsam (this is pre-computed for the subset that was clustered) - aggregated_df = tsam_agg.predictOriginalData() - - # For variables not in the clustering subset, compute aggregation manually - # using the cluster assignments - period_length = parameters.period_length - cluster_order = parameters.cluster_order.values - n_timesteps = len(self._fs.timesteps) - - for name in ds.data_vars: - da = ds[name] - if 'time' not in da.dims: - continue - - if name in aggregated_df.columns: - # Use tsam's aggregated result for columns that were clustered - series = aggregated_df[name] - da_new = DataConverter.to_dataarray(series, self._fs.coords).rename(name).assign_attrs(da.attrs) - else: - # Manually aggregate using cluster assignments - # For each timestep, replace with mean of corresponding timesteps in same cluster - import numpy as np - - values = da.values.copy() - aggregated_values = np.zeros_like(values) - - # Build mapping: for each cluster, collect all timestep indices - n_clusters = int(cluster_order.max()) + 1 - cluster_to_timesteps: dict[int, list[int]] = {c: [] for c in range(n_clusters)} - for period_idx, cluster_id in enumerate(cluster_order): - for pos in range(period_length): - ts_idx = period_idx * period_length + pos - if ts_idx < n_timesteps: - cluster_to_timesteps[int(cluster_id)].append((ts_idx, pos)) - - # For each cluster, compute mean for each position - for _cluster_id, ts_list in cluster_to_timesteps.items(): - # Group by position within period - position_values: dict[int, list] = {} - for ts_idx, pos in ts_list: - position_values.setdefault(pos, []).append(values[ts_idx]) - - # Compute mean for each position and assign back - for ts_idx, pos in ts_list: - aggregated_values[ts_idx] = np.mean(position_values[pos]) - - da_new = da.copy(data=aggregated_values) - - if TimeSeriesData.is_timeseries_data(da_new): - da_new = TimeSeriesData.from_dataarray(da_new) - ds[name] = da_new - - from .flow_system import FlowSystem - - clustered_fs = FlowSystem.from_dataset(ds) - else: - # No data aggregation - just copy - clustered_fs = self._fs.copy() - - # Store clustering info - clustered_fs._clustering_info = { - 'parameters': parameters, - 'components_to_clusterize': components_to_clusterize, - } - - return clustered_fs - def sel( self, time: str | slice | list[str] | pd.Timestamp | pd.DatetimeIndex | None = None, @@ -1539,7 +1069,6 @@ def aggregate( self, method: str | Any = 'tsam', n_representatives: int | None = None, - reduce: bool = True, **kwargs, ) -> FlowSystem: """Unified aggregation method supporting multiple backends. @@ -1547,37 +1076,30 @@ def aggregate( This is the recommended API for time series aggregation. It supports multiple backends (TSAM, manual, etc.) through a unified interface. - For TSAM backend, this delegates to cluster() or cluster_reduce() - based on the ``reduce`` parameter. + For TSAM backend, this delegates to cluster_reduce(). Args: method: Aggregation backend. Options: - 'tsam': Use TSAM package for k-means clustering (default) - 'manual': Use ManualBackend with pre-computed mapping - Custom Aggregator instance - n_representatives: Target number of representative timesteps. + n_representatives: Target number of clusters (typical periods). For 'tsam' with cluster_duration='1D', this is the number of typical days. - reduce: Aggregation mode: - - True: Reduce timesteps (cluster_reduce mode) - - False: Keep all timesteps with equality constraints (cluster mode) **kwargs: Backend-specific options. For 'tsam': - cluster_duration: Duration per cluster ('1D', '24h', etc.) - - n_segments: Inner-period segmentation - time_series_for_high_peaks: Force high-value period inclusion - time_series_for_low_peaks: Force low-value period inclusion - - aggregate_data: Whether to aggregate time series data - - include_storage: Include storage in constraints + - weights: Custom clustering weights Returns: - New FlowSystem with aggregation applied. + New FlowSystem with reduced timesteps. Example: >>> # TSAM clustering with 8 typical days >>> fs_agg = fs.transform.aggregate( ... method='tsam', ... n_representatives=8, - ... reduce=True, ... cluster_duration='1D', ... ) @@ -1586,7 +1108,6 @@ def aggregate( See Also: set_aggregation: For PyPSA-style manual aggregation - cluster: TSAM constraint-based clustering cluster_reduce: TSAM reduction-based clustering """ from .aggregation import Aggregator, get_backend @@ -1595,8 +1116,8 @@ def aggregate( if isinstance(method, str): backend_cls = get_backend(method) if method == 'tsam': - # Delegate to existing TSAM methods for backwards compatibility - return self._aggregate_tsam(n_representatives, reduce, **kwargs) + # Delegate to existing TSAM method + return self._aggregate_tsam(n_representatives, **kwargs) elif method == 'manual': raise ValueError("Use set_aggregation() for manual aggregation, not aggregate(method='manual')") else: @@ -1615,51 +1136,28 @@ def aggregate( def _aggregate_tsam( self, n_representatives: int | None, - reduce: bool, **kwargs, ) -> FlowSystem: - """Internal: delegate to existing TSAM methods.""" + """Internal: delegate to cluster_reduce().""" # Extract TSAM-specific kwargs cluster_duration = kwargs.pop('cluster_duration', '1D') - n_segments = kwargs.pop('n_segments', None) - aggregate_data = kwargs.pop('aggregate_data', True) - include_storage = kwargs.pop('include_storage', True) - flexibility_percent = kwargs.pop('flexibility_percent', 0) - flexibility_penalty = kwargs.pop('flexibility_penalty', 0) time_series_for_high_peaks = kwargs.pop('time_series_for_high_peaks', None) time_series_for_low_peaks = kwargs.pop('time_series_for_low_peaks', None) - components_to_clusterize = kwargs.pop('components_to_clusterize', None) weights = kwargs.pop('weights', None) - if reduce: - # cluster_reduce doesn't support n_segments - return self.cluster_reduce( - n_clusters=n_representatives, - cluster_duration=cluster_duration, - weights=weights, - time_series_for_high_peaks=time_series_for_high_peaks, - time_series_for_low_peaks=time_series_for_low_peaks, - storage_cyclic=kwargs.pop('storage_cyclic', True), - ) - else: - return self.cluster( - n_clusters=n_representatives, - cluster_duration=cluster_duration, - n_segments=n_segments, - aggregate_data=aggregate_data, - include_storage=include_storage, - flexibility_percent=flexibility_percent, - flexibility_penalty=flexibility_penalty, - time_series_for_high_peaks=time_series_for_high_peaks, - time_series_for_low_peaks=time_series_for_low_peaks, - components_to_clusterize=components_to_clusterize, - ) + return self.cluster_reduce( + n_clusters=n_representatives, + cluster_duration=cluster_duration, + weights=weights, + time_series_for_high_peaks=time_series_for_high_peaks, + time_series_for_low_peaks=time_series_for_low_peaks, + storage_cyclic=kwargs.pop('storage_cyclic', True), + ) def set_aggregation( self, timestep_mapping: xr.DataArray, weights: xr.DataArray, - reduce: bool = True, cluster_structure: Any = None, aggregated_data: xr.Dataset | None = None, ) -> FlowSystem: @@ -1674,21 +1172,19 @@ def set_aggregation( Args: timestep_mapping: Maps each original timestep to representative index. - DataArray with dims [original_time] or [original_time, period, scenario]. + DataArray with dims [original_time]. Values should be integers in range [0, n_representatives). weights: Weight for each representative timestep. - DataArray with dims [time] or [time, period, scenario]. + DataArray with dims [time]. Typically equals count of original timesteps each representative covers. - reduce: Aggregation mode: - - True (default): Reduce timesteps (like cluster_reduce) - - False: Keep all timesteps with equality constraints (like cluster) - cluster_structure: Optional ClusterStructure for storage inter-period linking. - Required for proper storage optimization in reduce mode. + This becomes the cluster_weight in the reduced FlowSystem. + cluster_structure: Optional ClusterStructure for storage inter-cluster linking. + Required for proper storage optimization. aggregated_data: Optional pre-aggregated time series data. If not provided, data will be extracted from mapping. Returns: - New FlowSystem with aggregation applied. + New FlowSystem with reduced timesteps. Example: >>> # External clustering with sklearn @@ -1702,7 +1198,6 @@ def set_aggregation( >>> fs_agg = fs.transform.set_aggregation( ... timestep_mapping=mapping, ... weights=weights, - ... reduce=True, ... ) See Also: @@ -1724,13 +1219,12 @@ def set_aggregation( data = self._fs_data_to_dataset() n_representatives = len(weights) - result = backend.aggregate(data, n_representatives) + _result = backend.aggregate(data, n_representatives) # noqa: F841 - # Apply aggregation based on mode - if reduce: - return self._apply_reduce_aggregation(result) - else: - return self._apply_constraint_aggregation(result) + # Full implementation would create FlowSystem directly from result + raise NotImplementedError( + 'set_aggregation() is not yet fully implemented. Use cluster_reduce() for TSAM-based aggregation.' + ) def _fs_data_to_dataset(self) -> xr.Dataset: """Convert FlowSystem time series data to xarray Dataset.""" @@ -1748,29 +1242,6 @@ def _fs_data_to_dataset(self) -> xr.Dataset: coords={'time': self._fs.timesteps}, ) - def _apply_reduce_aggregation(self, result) -> FlowSystem: - """Apply reduce-mode aggregation using AggregationResult. - - This creates a new FlowSystem with reduced timesteps, similar to - cluster_reduce() but using the generic AggregationResult. - """ - # For now, delegate to existing cluster_reduce infrastructure - # Full implementation would create FlowSystem directly from result - raise NotImplementedError( - 'set_aggregation with reduce=True not yet fully implemented. ' - 'Use cluster_reduce() for now, or set_aggregation with reduce=False.' - ) - - def _apply_constraint_aggregation(self, result) -> FlowSystem: - """Apply constraint-mode aggregation using AggregationResult. - - This creates equality constraints to equate clustered timesteps, - similar to cluster() but using the generic AggregationResult. - """ - # For now, delegate to existing cluster infrastructure - # Full implementation would create constraints from result.timestep_mapping - raise NotImplementedError('set_aggregation with reduce=False not yet fully implemented. Use cluster() for now.') - # Future methods can be added here: # # def mga(self, alternatives: int = 5) -> FlowSystem: diff --git a/tests/test_aggregation/test_base.py b/tests/test_aggregation/test_base.py index ce0439590..7930efcca 100644 --- a/tests/test_aggregation/test_base.py +++ b/tests/test_aggregation/test_base.py @@ -153,9 +153,7 @@ def test_creation(self): info = AggregationInfo( result=result, original_flow_system=None, # Would be FlowSystem in practice - mode='reduce', backend_name='tsam', ) - assert info.mode == 'reduce' assert info.backend_name == 'tsam' diff --git a/tests/test_aggregation/test_integration.py b/tests/test_aggregation/test_integration.py index a7a560cf3..c0f5f3054 100644 --- a/tests/test_aggregation/test_integration.py +++ b/tests/test_aggregation/test_integration.py @@ -123,7 +123,6 @@ def test_aggregate_tsam_delegates_to_cluster_reduce(self): fs_agg = fs.transform.aggregate( method='tsam', n_representatives=2, - reduce=True, cluster_duration='1D', ) @@ -154,7 +153,6 @@ def test_set_aggregation_raises_not_implemented(self): fs.transform.set_aggregation( timestep_mapping=mapping, weights=weights, - reduce=True, ) From b987bfd9b6ff244651578239cf1cdaaca7f591ed Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 20:15:08 +0100 Subject: [PATCH 061/191] Update CHANGELOG.md --- CHANGELOG.md | 133 ++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 100 insertions(+), 33 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9ce9f3b2..f423f4609 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,62 +53,129 @@ Until here --> ## [5.1.0] - Upcoming -**Summary**: This release improves the time series clustering (tsam) integration with a simplified keyword-based API. +**Summary**: This release introduces a new **aggregation abstraction layer** for time series clustering, making flixopt future-proof for alternative clustering methods beyond TSAM. The API is simplified to focus on timestep reduction (`cluster_reduce`), removing the constraint-based clustering approach. ### ✨ Added -**Improved Clustering API**: The new `transform.cluster()` method provides a clean, keyword-based interface: +**New Aggregation Module** (`flixopt.aggregation`): A backend-agnostic abstraction for time series aggregation: ```python -# Cluster into 8 typical days -clustered_fs = flow_system.transform.cluster( - n_clusters=8, - cluster_duration='1D', -) -clustered_fs.optimize(solver) +from flixopt import aggregation -# With peak preservation -clustered_fs = flow_system.transform.cluster( - n_clusters=8, +# Available backends +aggregation.list_backends() # ['tsam', 'manual'] + +# Core data structures for any aggregation method +aggregation.AggregationResult # Universal result format +aggregation.ClusterStructure # For storage inter-cluster linking +aggregation.Aggregator # Protocol for custom backends +``` + +**Unified Aggregation API**: New `transform.aggregate()` method supporting multiple backends: + +```python +# TSAM clustering (default) - clusters 365 days into 8 typical days +fs_reduced = flow_system.transform.aggregate( + method='tsam', + n_representatives=8, cluster_duration='1D', - time_series_for_high_peaks=[heat_demand_ts], ) +fs_reduced.optimize(solver) + +# Expand back to full resolution +fs_expanded = fs_reduced.transform.expand_solution() ``` -### 💥 Breaking Changes +**TimeSeriesWeights Class**: PyPSA-inspired unified weighting system: + +```python +# Access weights on any FlowSystem +weights = flow_system.weights -**ClusteringParameters API Changed**: The `ClusteringParameters` class has new parameter names: +# temporal = timestep_duration × cluster_weight +weights.temporal # Applied to objective and constraints +weights.effective_objective # For objective function (with optional override) -| Old Parameter | New Parameter | -|---------------|---------------| -| `hours_per_period` | `cluster_duration` (accepts '1D', '24h', or hours) | -| `nr_of_periods` | `n_clusters` | -| `fix_storage_flows` | `include_storage` | -| `aggregate_data_and_fix_non_binary_vars` | `aggregate_data` | -| `percentage_of_period_freedom` | `flexibility_percent` | -| `penalty_of_period_freedom` | `flexibility_penalty` | +# Convenience method for weighted summation +total_energy = weights.sum_over_time(flow_rates) +``` -**Migration Example**: +**Manual Aggregation Backend**: Enables PyPSA-style workflow with external clustering tools: ```python -# Old (v5.0): -params = fx.ClusteringParameters( - hours_per_period=24, - nr_of_periods=8, - fix_storage_flows=True, - aggregate_data_and_fix_non_binary_vars=True, +from flixopt.aggregation import ManualBackend, create_manual_backend_from_labels + +# Use sklearn or any clustering algorithm +from sklearn.cluster import KMeans +# ... perform clustering, get labels ... + +# Create backend from cluster labels +backend = create_manual_backend_from_labels(labels, timesteps_per_cluster=24) + +# Or directly with mapping and weights +backend = ManualBackend( + timestep_mapping=my_mapping, # xr.DataArray: original → representative + representative_weights=my_weights, # xr.DataArray: weight per representative ) -clustered_fs = flow_system.transform.cluster(params) +``` + +**set_aggregation() Method** (placeholder): Future PyPSA-style manual aggregation: -# New (v5.1): +```python +# Coming soon - apply external clustering results directly +fs_agg = flow_system.transform.set_aggregation( + timestep_mapping=mapping, + weights=weights, +) +``` + +### 💥 Breaking Changes + +**Removed `transform.cluster()` method**: The constraint-based clustering approach has been removed. Use `cluster_reduce()` instead: + +```python +# Old (removed): clustered_fs = flow_system.transform.cluster( n_clusters=8, cluster_duration='1D', - include_storage=True, - aggregate_data=True, +) + +# New (use cluster_reduce instead): +reduced_fs = flow_system.transform.cluster_reduce( + n_clusters=8, + cluster_duration='1D', ) ``` +**Removed constraint-based clustering infrastructure**: +- `transform.cluster()` - removed (use `cluster_reduce()`) +- `transform.add_clustering()` - removed +- `FlowSystem._clustering_info` - removed (only `_cluster_info` for `cluster_reduce` remains) +- `FlowSystem._add_clustering_constraints()` - removed + +### ♻️ Changed + +**Terminology clarification** in aggregation module: +- "cluster" = a group of similar time chunks (e.g., similar days grouped together) +- "typical period" = a representative time chunk for a cluster (TSAM terminology) +- "cluster duration" = the length of each time chunk (e.g., 24h for daily clustering) + +Note: This is separate from the model's "period" dimension (years/months) and "scenario" dimension. + +**xarray-native data structures**: All aggregation interfaces use `xr.DataArray` and `xr.Dataset` for proper coordinate handling. + +### 🔥 Removed + +- `transform.cluster()` method (constraint-based clustering) +- `transform.add_clustering()` method +- `ClusteringModel` constraint generation (internal) +- `_clustering_info` storage on FlowSystem + +### 📝 Docs + +- Improved terminology: clarified distinction between clustering "typical periods" and model "period" dimension +- Added aggregation module documentation with backend examples + --- From bbebb96124b255fd43ccd66e0a41dc4700b7e9c2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 20:24:27 +0100 Subject: [PATCH 062/191] Update notebooks --- docs/notebooks/08c-cluster-reduce.ipynb | 501 ++++++++++ docs/notebooks/08c-clustering.ipynb | 913 ------------------ docs/notebooks/08d-cluster-multiperiod.ipynb | 596 ++++++++++++ docs/notebooks/08d-external-clustering.ipynb | 419 -------- docs/notebooks/08e-cluster-and-reduce.ipynb | 446 --------- .../data/generate_example_systems.py | 21 +- 6 files changed, 1110 insertions(+), 1786 deletions(-) create mode 100644 docs/notebooks/08c-cluster-reduce.ipynb delete mode 100644 docs/notebooks/08c-clustering.ipynb create mode 100644 docs/notebooks/08d-cluster-multiperiod.ipynb delete mode 100644 docs/notebooks/08d-external-clustering.ipynb delete mode 100644 docs/notebooks/08e-cluster-and-reduce.ipynb diff --git a/docs/notebooks/08c-cluster-reduce.ipynb b/docs/notebooks/08c-cluster-reduce.ipynb new file mode 100644 index 000000000..2fc7beb85 --- /dev/null +++ b/docs/notebooks/08c-cluster-reduce.ipynb @@ -0,0 +1,501 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Time Series Clustering with `cluster_reduce()`\n", + "\n", + "Accelerate investment optimization using typical periods (clustering).\n", + "\n", + "This notebook demonstrates:\n", + "\n", + "- **Typical periods**: Cluster similar time segments (e.g., days) and solve only representative ones\n", + "- **Weighted costs**: Automatically weight operational costs by cluster occurrence\n", + "- **Storage linking**: Track storage state across original periods\n", + "- **Two-stage workflow**: Fast sizing with clustering, accurate dispatch at full resolution\n", + "\n", + "!!! note \"Requirements\"\n", + " This notebook requires the `tsam` package: `pip install tsam`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "import timeit\n", + "from pathlib import Path\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "## Load the FlowSystem\n", + "\n", + "We use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# Generate example data if not present\n", + "data_file = Path('data/district_heating_system.nc4')\n", + "if not data_file.exists():\n", + " from data.generate_example_systems import create_district_heating_system\n", + "\n", + " fs = create_district_heating_system()\n", + " fs.to_netcdf(data_file)\n", + "\n", + "# Load the district heating system\n", + "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "\n", + "timesteps = flow_system.timesteps\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Components: {list(flow_system.components.keys())}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize input data\n", + "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", + "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", + "\n", + "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", + "fig.add_trace(go.Scatter(x=timesteps, y=heat_demand.values, name='Heat Demand', line=dict(width=0.5)), row=1, col=1)\n", + "fig.add_trace(\n", + " go.Scatter(x=timesteps, y=electricity_price.values, name='Electricity Price', line=dict(width=0.5)), row=2, col=1\n", + ")\n", + "fig.update_layout(height=400, title='One Month of Input Data')\n", + "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", + "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## Method 1: Full Optimization (Baseline)\n", + "\n", + "First, solve the complete problem with all 2976 timesteps:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", + "\n", + "start = timeit.default_timer()\n", + "fs_full = flow_system.copy()\n", + "fs_full.optimize(solver)\n", + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.1f} seconds')\n", + "print(f'Total cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_full.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "## Method 2: Clustering with `cluster_reduce()`\n", + "\n", + "The `cluster_reduce()` method:\n", + "\n", + "1. **Clusters similar days** using the TSAM (Time Series Aggregation Module) package\n", + "2. **Reduces timesteps** to only typical periods (e.g., 8 typical days = 768 timesteps)\n", + "3. **Weights costs** by how many original days each typical day represents\n", + "4. **Links storage states** across original periods for correct long-term behavior\n", + "\n", + "!!! warning \"Peak Forcing\"\n", + " Always use `time_series_for_high_peaks` to ensure extreme demand days are captured.\n", + " Without this, clustering may miss peak periods, causing undersized components." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# IMPORTANT: Force inclusion of peak demand periods!\n", + "peak_series = ['HeatDemand(Q_th)|fixed_relative_profile']\n", + "\n", + "# Create reduced FlowSystem with 8 typical days\n", + "fs_clustered = flow_system.transform.cluster_reduce(\n", + " n_clusters=8, # 8 typical days\n", + " cluster_duration='1D', # Daily clustering\n", + " time_series_for_high_peaks=peak_series, # Capture peak demand day\n", + " storage_cyclic=True, # SOC[end] = SOC[start]\n", + ")\n", + "\n", + "time_clustering = timeit.default_timer() - start\n", + "print(f'Clustering time: {time_clustering:.1f} seconds')\n", + "print(f'Reduced: {len(flow_system.timesteps)} → {len(fs_clustered.timesteps)} timesteps')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# Optimize the reduced system\n", + "start = timeit.default_timer()\n", + "fs_clustered.optimize(solver)\n", + "time_clustered = timeit.default_timer() - start\n", + "\n", + "print(f'Clustered optimization: {time_clustered:.1f} seconds')\n", + "print(f'Total cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "print(f'\\nSpeedup vs full: {time_full / (time_clustering + time_clustered):.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_clustered.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] + }, + { + "cell_type": "markdown", + "id": "10", + "metadata": {}, + "source": [ + "## Understanding the Clustering\n", + "\n", + "The clustering algorithm groups similar days together. Let's inspect the cluster structure:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# Show clustering info\n", + "info = fs_clustered._cluster_info\n", + "print('Clustering Configuration:')\n", + "print(f' Number of typical periods: {info[\"n_clusters\"]}')\n", + "print(f' Timesteps per period: {info[\"timesteps_per_cluster\"]}')\n", + "print(f' Total reduced timesteps: {info[\"n_clusters\"] * info[\"timesteps_per_cluster\"]}')\n", + "print(f' Cluster order (first 10 days): {info[\"cluster_order\"][:10]}...')\n", + "\n", + "# Show how many times each cluster appears\n", + "cluster_order = info['cluster_order']\n", + "unique, counts = np.unique(cluster_order, return_counts=True)\n", + "print('\\nCluster occurrences:')\n", + "for cluster_id, count in zip(unique, counts, strict=False):\n", + " print(f' Cluster {cluster_id}: {count} days')" + ] + }, + { + "cell_type": "markdown", + "id": "12", + "metadata": {}, + "source": [ + "## Method 3: Two-Stage Workflow (Recommended)\n", + "\n", + "The recommended approach for investment optimization:\n", + "\n", + "1. **Stage 1**: Fast sizing with `cluster_reduce()` \n", + "2. **Stage 2**: Fix sizes (with safety margin) and dispatch at full resolution\n", + "\n", + "!!! tip \"Safety Margin\"\n", + " Typical periods aggregate similar days, so individual days may have higher demand \n", + " than the typical day. Adding a 5-10% margin ensures feasibility." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# Stage 1 already done above\n", + "print('Stage 1: Sizing with typical periods')\n", + "print(f' Time: {time_clustering + time_clustered:.1f} seconds')\n", + "print(f' Cost estimate: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "\n", + "# Apply safety margin to sizes\n", + "SAFETY_MARGIN = 1.05 # 5% buffer\n", + "sizes_with_margin = {name: float(size.item()) * SAFETY_MARGIN for name, size in fs_clustered.statistics.sizes.items()}\n", + "print(f'\\nSizes with {(SAFETY_MARGIN - 1) * 100:.0f}% safety margin:')\n", + "for name, size in sizes_with_margin.items():\n", + " original = fs_clustered.statistics.sizes[name].item()\n", + " print(f' {name}: {original:.1f} → {size:.1f}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# Stage 2: Fix sizes and optimize at full resolution\n", + "print('Stage 2: Dispatch at full resolution')\n", + "start = timeit.default_timer()\n", + "\n", + "fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin)\n", + "fs_dispatch.optimize(solver)\n", + "\n", + "time_dispatch = timeit.default_timer() - start\n", + "print(f' Time: {time_dispatch:.1f} seconds')\n", + "print(f' Actual cost: {fs_dispatch.solution[\"costs\"].item():,.0f} €')\n", + "\n", + "# Total comparison\n", + "total_two_stage = time_clustering + time_clustered + time_dispatch\n", + "print(f'\\nTotal two-stage time: {total_two_stage:.1f} seconds')\n", + "print(f'Speedup vs full: {time_full / total_two_stage:.1f}x')" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "## Compare Results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "results = {\n", + " 'Full (baseline)': {\n", + " 'Time [s]': time_full,\n", + " 'Cost [€]': fs_full.solution['costs'].item(),\n", + " 'CHP': fs_full.statistics.sizes['CHP(Q_th)'].item(),\n", + " 'Boiler': fs_full.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Storage': fs_full.statistics.sizes['Storage'].item(),\n", + " },\n", + " 'Clustered (8 days)': {\n", + " 'Time [s]': time_clustering + time_clustered,\n", + " 'Cost [€]': fs_clustered.solution['costs'].item(),\n", + " 'CHP': fs_clustered.statistics.sizes['CHP(Q_th)'].item(),\n", + " 'Boiler': fs_clustered.statistics.sizes['Boiler(Q_th)'].item(),\n", + " 'Storage': fs_clustered.statistics.sizes['Storage'].item(),\n", + " },\n", + " 'Two-Stage': {\n", + " 'Time [s]': total_two_stage,\n", + " 'Cost [€]': fs_dispatch.solution['costs'].item(),\n", + " 'CHP': sizes_with_margin['CHP(Q_th)'],\n", + " 'Boiler': sizes_with_margin['Boiler(Q_th)'],\n", + " 'Storage': sizes_with_margin['Storage'],\n", + " },\n", + "}\n", + "\n", + "comparison = pd.DataFrame(results).T\n", + "baseline_cost = comparison.loc['Full (baseline)', 'Cost [€]']\n", + "baseline_time = comparison.loc['Full (baseline)', 'Time [s]']\n", + "comparison['Cost Gap [%]'] = ((comparison['Cost [€]'] - baseline_cost) / abs(baseline_cost) * 100).round(2)\n", + "comparison['Speedup'] = (baseline_time / comparison['Time [s]']).round(1)\n", + "\n", + "comparison.style.format(\n", + " {\n", + " 'Time [s]': '{:.1f}',\n", + " 'Cost [€]': '{:,.0f}',\n", + " 'CHP': '{:.1f}',\n", + " 'Boiler': '{:.1f}',\n", + " 'Storage': '{:.0f}',\n", + " 'Cost Gap [%]': '{:.2f}',\n", + " 'Speedup': '{:.1f}x',\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "## Expand Solution to Full Resolution\n", + "\n", + "Use `expand_solution()` to map the clustered solution back to all original timesteps.\n", + "This repeats the typical period values for all days belonging to that cluster:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "# Expand the clustered solution to full resolution\n", + "fs_expanded = fs_clustered.transform.expand_solution()\n", + "\n", + "print(f'Expanded: {len(fs_clustered.timesteps)} → {len(fs_expanded.timesteps)} timesteps')\n", + "print(f'Cost: {fs_expanded.solution[\"costs\"].item():,.0f} €')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "# Compare heat balance: Full vs Expanded\n", + "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, subplot_titles=['Full Optimization', 'Expanded from Clustering'])\n", + "\n", + "# Full\n", + "for var in ['CHP(Q_th)', 'Boiler(Q_th)']:\n", + " values = fs_full.solution[f'{var}|flow_rate'].values\n", + " fig.add_trace(go.Scatter(x=fs_full.timesteps, y=values, name=var, legendgroup=var, showlegend=True), row=1, col=1)\n", + "\n", + "# Expanded\n", + "for var in ['CHP(Q_th)', 'Boiler(Q_th)']:\n", + " values = fs_expanded.solution[f'{var}|flow_rate'].values\n", + " fig.add_trace(\n", + " go.Scatter(x=fs_expanded.timesteps, y=values, name=var, legendgroup=var, showlegend=False), row=2, col=1\n", + " )\n", + "\n", + "fig.update_layout(height=500, title='Heat Production Comparison')\n", + "fig.update_yaxes(title_text='MW', row=1, col=1)\n", + "fig.update_yaxes(title_text='MW', row=2, col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "20", + "metadata": {}, + "source": [ + "## Visualize Clustered Heat Balance" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "fs_clustered.statistics.plot.balance('Heat');" + ] + }, + { + "cell_type": "markdown", + "id": "22", + "metadata": {}, + "source": [ + "## API Reference\n", + "\n", + "### `transform.cluster_reduce()` Parameters\n", + "\n", + "| Parameter | Type | Description |\n", + "|-----------|------|-------------|\n", + "| `n_clusters` | `int` | Number of typical periods (e.g., 8 typical days) |\n", + "| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h') or hours |\n", + "| `weights` | `dict[str, float]` | Optional weights for time series in clustering |\n", + "| `time_series_for_high_peaks` | `list[str]` | **Essential**: Force inclusion of peak periods |\n", + "| `time_series_for_low_peaks` | `list[str]` | Force inclusion of minimum periods |\n", + "| `storage_cyclic` | `bool` | Enforce SOC[end] = SOC[start] (default: True) |\n", + "\n", + "### Peak Forcing Format\n", + "\n", + "```python\n", + "time_series_for_high_peaks = ['ComponentName(FlowName)|fixed_relative_profile']\n", + "```\n", + "\n", + "### Recommended Workflow\n", + "\n", + "```python\n", + "# Stage 1: Fast sizing\n", + "fs_sizing = flow_system.transform.cluster_reduce(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " time_series_for_high_peaks=['Demand(Flow)|fixed_relative_profile'],\n", + ")\n", + "fs_sizing.optimize(solver)\n", + "\n", + "# Apply safety margin\n", + "sizes = {k: v.item() * 1.05 for k, v in fs_sizing.statistics.sizes.items()}\n", + "\n", + "# Stage 2: Accurate dispatch\n", + "fs_dispatch = flow_system.transform.fix_sizes(sizes)\n", + "fs_dispatch.optimize(solver)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "23", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "You learned how to:\n", + "\n", + "- Use **`cluster_reduce()`** to aggregate time series into typical periods\n", + "- Apply **peak forcing** to capture extreme demand days\n", + "- Use **two-stage optimization** for fast yet accurate investment decisions\n", + "- **Expand solutions** back to full resolution with `expand_solution()`\n", + "\n", + "### Key Takeaways\n", + "\n", + "1. **Always use peak forcing** (`time_series_for_high_peaks`) for demand time series\n", + "2. **Add safety margin** (5-10%) when fixing sizes from clustering\n", + "3. **Two-stage is recommended**: clustering for sizing, full resolution for dispatch\n", + "4. **Storage linking** ensures correct long-term storage behavior\n", + "\n", + "### Next Steps\n", + "\n", + "- **[08d-cluster-multiperiod](08d-cluster-multiperiod.ipynb)**: Clustering with multiple periods and scenarios" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb deleted file mode 100644 index 5967fda45..000000000 --- a/docs/notebooks/08c-clustering.ipynb +++ /dev/null @@ -1,913 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "id": "0", - "metadata": {}, - "source": [ - "# Clustering and Segmentation with tsam\n", - "\n", - "Speed up large problems by reducing time series complexity using the [tsam](https://github.com/FZJ-IEK3-VSA/tsam) package.\n", - "\n", - "This notebook demonstrates two complementary techniques:\n", - "\n", - "- **Clustering** (inter-period): Identify typical periods (e.g., 8 typical days from 365 days)\n", - "- **Segmentation** (inner-period): Reduce timesteps within periods (e.g., 24 hours to 4 segments)\n", - "\n", - "Both can be used independently or combined for maximum speedup.\n", - "\n", - "!!! note \"Requirements\"\n", - " This notebook requires the `tsam` package: `pip install tsam`" - ] - }, - { - "cell_type": "markdown", - "id": "1", - "metadata": {}, - "source": [ - "## Setup" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ - "import timeit\n", - "\n", - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.graph_objects as go\n", - "from plotly.subplots import make_subplots\n", - "\n", - "import flixopt as fx\n", - "\n", - "fx.CONFIG.notebook()" - ] - }, - { - "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "## Load the FlowSystem\n", - "\n", - "We use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "\n", - "# Generate example data if not present (for local development)\n", - "data_file = Path('data/district_heating_system.nc4')\n", - "if not data_file.exists():\n", - " from data.generate_example_systems import create_district_heating_system\n", - "\n", - " fs = create_district_heating_system()\n", - " fs.to_netcdf(data_file)\n", - "\n", - "# Load the district heating system (real data from Zeitreihen2020.csv)\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", - "\n", - "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", - "print(f'Components: {list(flow_system.components.keys())}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "5", - "metadata": {}, - "outputs": [], - "source": [ - "# Visualize first two weeks of data\n", - "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", - "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", - "\n", - "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", - "\n", - "fig.add_trace(go.Scatter(x=timesteps[:1344], y=heat_demand.values[:1344], name='Heat Demand'), row=1, col=1)\n", - "fig.add_trace(go.Scatter(x=timesteps[:1344], y=electricity_price.values[:1344], name='Electricity Price'), row=2, col=1)\n", - "\n", - "fig.update_layout(height=400, title='First Two Weeks of Data')\n", - "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", - "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", - "fig.show()" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "## Part 1: Clustering (Inter-Period Aggregation)\n", - "\n", - "**Clustering** groups similar periods together to find representative \"typical\" periods.\n", - "\n", - "For example, with 31 days of data:\n", - "- Original: 31 days × 96 timesteps/day = 2,976 timesteps \n", - "- Clustered (8 typical days): 8 days × 96 timesteps/day = 768 representative timesteps\n", - "\n", - "The optimizer only solves for 8 unique days, but weights results by how often each typical day occurred.\n", - "\n", - "```python\n", - "fs.transform.cluster(\n", - " n_clusters=8, # Find 8 typical days\n", - " cluster_duration='1D', # Each cluster is 1 day\n", - ")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "# Cluster with 8 typical days (from 31 days)\n", - "fs_clustering_demo = flow_system.copy()\n", - "fs_clustered_demo = fs_clustering_demo.transform.cluster(n_clusters=8, cluster_duration='1D')\n", - "\n", - "# Get the clustering object to access tsam results\n", - "clustering = fs_clustered_demo._clustering_info['clustering']\n", - "\n", - "print(f'Original: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) / 96:.0f} days)')\n", - "print(f'Clustered: {clustering.nr_of_periods} typical days')\n", - "print(f'Cluster assignments: {list(clustering.tsam.clusterOrder)}')\n", - "\n", - "# Plot original vs aggregated data\n", - "clustering.plot()" - ] - }, - { - "cell_type": "markdown", - "id": "8", - "metadata": {}, - "source": [ - "### Comparing Different Cluster Counts\n", - "\n", - "More clusters = better accuracy but less speedup. Let's compare:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "# Test different numbers of clusters\n", - "cluster_configs = [4, 8, 12, 16]\n", - "clustering_results = {}\n", - "\n", - "for n in cluster_configs:\n", - " fs_test = flow_system.copy()\n", - " fs_clustered = fs_test.transform.cluster(n_clusters=n, cluster_duration='1D')\n", - " clustering_results[n] = fs_clustered._clustering_info['clustering']\n", - "\n", - "# Use heat demand for comparison (most relevant for district heating)\n", - "heat_demand_col = [c for c in clustering_results[4].original_data.columns if 'Heat' in c or 'Q_th' in c][0]\n", - "print(f'Comparing: {heat_demand_col}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "# Compare the aggregated data for each configuration\n", - "fig = make_subplots(\n", - " rows=2,\n", - " cols=2,\n", - " subplot_titles=[f'{n} Typical Days' for n in cluster_configs],\n", - " shared_xaxes=True,\n", - " shared_yaxes=True,\n", - " vertical_spacing=0.12,\n", - " horizontal_spacing=0.08,\n", - ")\n", - "\n", - "for i, (_n, clustering) in enumerate(clustering_results.items()):\n", - " row, col = divmod(i, 2)\n", - " row += 1\n", - " col += 1\n", - "\n", - " original = clustering.original_data[heat_demand_col]\n", - " aggregated = clustering.aggregated_data[heat_demand_col]\n", - "\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=list(range(len(original))),\n", - " y=original.values,\n", - " name='Original',\n", - " line=dict(color='lightgray'),\n", - " showlegend=(i == 0),\n", - " ),\n", - " row=row,\n", - " col=col,\n", - " )\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=list(range(len(aggregated))),\n", - " y=aggregated.values,\n", - " name='Clustered',\n", - " line=dict(color='blue', width=2),\n", - " showlegend=(i == 0),\n", - " ),\n", - " row=row,\n", - " col=col,\n", - " )\n", - "\n", - "fig.update_layout(\n", - " title='Heat Demand: Original vs Clustered',\n", - " height=500,\n", - " legend=dict(orientation='h', yanchor='bottom', y=1.02),\n", - ")\n", - "fig.update_xaxes(title_text='Timestep', row=2)\n", - "fig.update_yaxes(title_text='Heat Demand [MW]', col=1)\n", - "fig.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "11", - "metadata": {}, - "outputs": [], - "source": [ - "# Calculate error metrics for each configuration\n", - "metrics = []\n", - "for n, clustering in clustering_results.items():\n", - " original = clustering.original_data[heat_demand_col].values\n", - " aggregated = clustering.aggregated_data[heat_demand_col].values\n", - "\n", - " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", - " mae = np.mean(np.abs(original - aggregated))\n", - " max_error = np.max(np.abs(original - aggregated))\n", - " correlation = np.corrcoef(original, aggregated)[0, 1]\n", - "\n", - " metrics.append(\n", - " {\n", - " 'Typical Days': n,\n", - " 'RMSE': rmse,\n", - " 'MAE': mae,\n", - " 'Max Error': max_error,\n", - " 'Correlation': correlation,\n", - " }\n", - " )\n", - "\n", - "metrics_df = pd.DataFrame(metrics).set_index('Typical Days')\n", - "metrics_df.style.format(\n", - " {\n", - " 'RMSE': '{:.2f}',\n", - " 'MAE': '{:.2f}',\n", - " 'Max Error': '{:.2f}',\n", - " 'Correlation': '{:.4f}',\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "12", - "metadata": {}, - "source": [ - "## Part 2: Segmentation (Inner-Period Aggregation)\n", - "\n", - "**Segmentation** reduces the number of timesteps *within* each period by grouping similar consecutive timesteps.\n", - "\n", - "For example, with 15-minute resolution data:\n", - "- Original day: 96 timesteps (24h × 4 per hour)\n", - "- Segmented (12 segments): 12 representative timesteps per day (~2 hours each)\n", - "\n", - "This is useful when you have high-resolution data but don't need that granularity for your analysis.\n", - "\n", - "```python\n", - "fs.transform.cluster(\n", - " n_clusters=None, # Skip clustering (keep all periods)\n", - " cluster_duration='1D', # Segment within each day\n", - " n_segments=12, # Reduce to 12 segments per day\n", - ")\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [ - "# Segmentation only: reduce 96 timesteps/day to 12 segments/day\n", - "fs_segmentation_demo = flow_system.copy()\n", - "fs_segmented_demo = fs_segmentation_demo.transform.cluster(\n", - " n_clusters=None, # No clustering - keep all 31 days\n", - " cluster_duration='1D', # Segment within each day\n", - " n_segments=12, # 12 segments per day (~2 hours each)\n", - ")\n", - "\n", - "# Get the clustering object\n", - "segmentation = fs_segmented_demo._clustering_info['clustering']\n", - "\n", - "print('Original: 96 timesteps per day (15-min resolution)')\n", - "print(f'Segmented: {segmentation.n_segments} segments per day (~2 hours each)')\n", - "\n", - "# Plot original vs segmented data\n", - "segmentation.plot()" - ] - }, - { - "cell_type": "markdown", - "id": "14", - "metadata": {}, - "source": [ - "### Comparing Different Segment Counts\n", - "\n", - "More segments = better accuracy but less speedup:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "15", - "metadata": {}, - "outputs": [], - "source": [ - "# Test different numbers of segments\n", - "segment_configs = [6, 12, 24, 48]\n", - "segmentation_results = {}\n", - "\n", - "for n_seg in segment_configs:\n", - " fs_test = flow_system.copy()\n", - " fs_seg = fs_test.transform.cluster(n_clusters=None, cluster_duration='1D', n_segments=n_seg)\n", - " segmentation_results[n_seg] = fs_seg._clustering_info['clustering']\n", - "\n", - "# Use heat demand for comparison\n", - "heat_demand_col = [c for c in segmentation_results[6].original_data.columns if 'Heat' in c or 'Q_th' in c][0]\n", - "print(f'Comparing: {heat_demand_col}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "16", - "metadata": {}, - "outputs": [], - "source": [ - "# Compare the segmented data for first day only (clearer visualization)\n", - "fig = make_subplots(\n", - " rows=2,\n", - " cols=2,\n", - " subplot_titles=[f'{n} Segments per Day' for n in segment_configs],\n", - " shared_xaxes=True,\n", - " shared_yaxes=True,\n", - " vertical_spacing=0.12,\n", - " horizontal_spacing=0.08,\n", - ")\n", - "\n", - "# Only show first day (96 timesteps) for clarity\n", - "day_length = 96\n", - "\n", - "for i, (_n_seg, seg_result) in enumerate(segmentation_results.items()):\n", - " row, col = divmod(i, 2)\n", - " row += 1\n", - " col += 1\n", - "\n", - " original = seg_result.original_data[heat_demand_col][:day_length]\n", - " aggregated = seg_result.aggregated_data[heat_demand_col][:day_length]\n", - "\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=list(range(len(original))),\n", - " y=original.values,\n", - " name='Original',\n", - " line=dict(color='lightgray'),\n", - " showlegend=(i == 0),\n", - " ),\n", - " row=row,\n", - " col=col,\n", - " )\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=list(range(len(aggregated))),\n", - " y=aggregated.values,\n", - " name='Segmented',\n", - " line=dict(color='green', width=2),\n", - " showlegend=(i == 0),\n", - " ),\n", - " row=row,\n", - " col=col,\n", - " )\n", - "\n", - "fig.update_layout(\n", - " title='Heat Demand (First Day): Original vs Segmented',\n", - " height=500,\n", - " legend=dict(orientation='h', yanchor='bottom', y=1.02),\n", - ")\n", - "fig.update_xaxes(title_text='Timestep', row=2)\n", - "fig.update_yaxes(title_text='Heat Demand [MW]', col=1)\n", - "fig.show()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "17", - "metadata": {}, - "outputs": [], - "source": [ - "# Calculate error metrics for segmentation\n", - "seg_metrics = []\n", - "for n_seg, seg_result in segmentation_results.items():\n", - " original = seg_result.original_data[heat_demand_col].values\n", - " aggregated = seg_result.aggregated_data[heat_demand_col].values\n", - "\n", - " rmse = np.sqrt(np.mean((original - aggregated) ** 2))\n", - " mae = np.mean(np.abs(original - aggregated))\n", - " max_error = np.max(np.abs(original - aggregated))\n", - " correlation = np.corrcoef(original, aggregated)[0, 1]\n", - "\n", - " seg_metrics.append(\n", - " {\n", - " 'Segments': n_seg,\n", - " 'RMSE': rmse,\n", - " 'MAE': mae,\n", - " 'Max Error': max_error,\n", - " 'Correlation': correlation,\n", - " }\n", - " )\n", - "\n", - "seg_metrics_df = pd.DataFrame(seg_metrics).set_index('Segments')\n", - "seg_metrics_df.style.format(\n", - " {\n", - " 'RMSE': '{:.2f}',\n", - " 'MAE': '{:.2f}',\n", - " 'Max Error': '{:.2f}',\n", - " 'Correlation': '{:.4f}',\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "18", - "metadata": {}, - "source": [ - "## Part 3: Combined Clustering + Segmentation\n", - "\n", - "For maximum speedup, combine both techniques:\n", - "\n", - "```python\n", - "fs.transform.cluster(\n", - " n_clusters=8, # 8 typical days (inter-period)\n", - " cluster_duration='1D',\n", - " n_segments=12, # 12 segments per day (inner-period)\n", - ")\n", - "```\n", - "\n", - "This reduces 2,976 timesteps to just 8 × 12 = 96 representative timesteps!" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": {}, - "outputs": [], - "source": [ - "# Combined: 8 typical days × 12 segments each\n", - "fs_combined_demo = flow_system.copy()\n", - "fs_combined = fs_combined_demo.transform.cluster(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " n_segments=12,\n", - ")\n", - "\n", - "combined_clustering = fs_combined._clustering_info['clustering']\n", - "\n", - "print(f'Original: {len(flow_system.timesteps)} timesteps')\n", - "print(\n", - " f'Combined: {combined_clustering.nr_of_periods} typical days × {combined_clustering.n_segments} segments = {combined_clustering.nr_of_periods * combined_clustering.n_segments} representative timesteps'\n", - ")\n", - "\n", - "# Plot the combined result\n", - "combined_clustering.plot()" - ] - }, - { - "cell_type": "markdown", - "id": "20", - "metadata": {}, - "source": [ - "## Performance Comparison\n", - "\n", - "Now let's compare the optimization performance of all approaches.\n", - "\n", - "### Baseline: Full Optimization (No Aggregation)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21", - "metadata": { - "jupyter": { - "is_executing": true - } - }, - "outputs": [], - "source": [ - "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", - "\n", - "start = timeit.default_timer()\n", - "fs_full = flow_system.copy()\n", - "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start\n", - "\n", - "print(f'Full optimization: {time_full:.2f} seconds')\n", - "print(f'Cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_full.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ] - }, - { - "cell_type": "markdown", - "id": "22", - "metadata": {}, - "source": [ - "### Clustering Only (8 Typical Days)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23", - "metadata": {}, - "outputs": [], - "source": [ - "start = timeit.default_timer()\n", - "\n", - "# Cluster into 8 typical days\n", - "fs_clustered = flow_system.transform.cluster(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - ")\n", - "\n", - "fs_clustered.optimize(solver)\n", - "time_clustered = timeit.default_timer() - start\n", - "\n", - "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", - "print(f'Cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", - "print(f'Speedup: {time_full / time_clustered:.1f}x')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_clustered.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ] - }, - { - "cell_type": "markdown", - "id": "24", - "metadata": {}, - "source": [ - "### Segmentation Only (12 Segments per Day)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25", - "metadata": {}, - "outputs": [], - "source": [ - "start = timeit.default_timer()\n", - "\n", - "# Segmentation only: reduce timesteps within each day\n", - "fs_segmented = flow_system.transform.cluster(\n", - " n_clusters=None, # No clustering\n", - " cluster_duration='1D',\n", - " n_segments=12, # 12 segments per day\n", - ")\n", - "\n", - "fs_segmented.optimize(solver)\n", - "time_segmented = timeit.default_timer() - start\n", - "\n", - "print(f'Segmentation optimization: {time_segmented:.2f} seconds')\n", - "print(f'Cost: {fs_segmented.solution[\"costs\"].item():,.0f} €')\n", - "print(f'Speedup: {time_full / time_segmented:.1f}x')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_segmented.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ] - }, - { - "cell_type": "markdown", - "id": "26", - "metadata": {}, - "source": [ - "### Combined: Clustering + Segmentation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "27", - "metadata": {}, - "outputs": [], - "source": [ - "start = timeit.default_timer()\n", - "\n", - "# Combined: 8 typical days × 12 segments each\n", - "fs_combined_opt = flow_system.transform.cluster(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " n_segments=12,\n", - ")\n", - "\n", - "fs_combined_opt.optimize(solver)\n", - "time_combined = timeit.default_timer() - start\n", - "\n", - "print(f'Combined optimization: {time_combined:.2f} seconds')\n", - "print(f'Cost: {fs_combined_opt.solution[\"costs\"].item():,.0f} €')\n", - "print(f'Speedup: {time_full / time_combined:.1f}x')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_combined_opt.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ] - }, - { - "cell_type": "markdown", - "id": "28", - "metadata": {}, - "source": [ - "## Compare Results" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29", - "metadata": {}, - "outputs": [], - "source": [ - "results = {\n", - " 'Full (baseline)': {\n", - " 'Time [s]': time_full,\n", - " 'Cost [€]': fs_full.solution['costs'].item(),\n", - " 'CHP Size': fs_full.statistics.sizes['CHP(Q_th)'].item(),\n", - " 'Boiler Size': fs_full.statistics.sizes['Boiler(Q_th)'].item(),\n", - " 'Storage Size': fs_full.statistics.sizes['Storage'].item(),\n", - " },\n", - " 'Clustering (8 days)': {\n", - " 'Time [s]': time_clustered,\n", - " 'Cost [€]': fs_clustered.solution['costs'].item(),\n", - " 'CHP Size': fs_clustered.statistics.sizes['CHP(Q_th)'].item(),\n", - " 'Boiler Size': fs_clustered.statistics.sizes['Boiler(Q_th)'].item(),\n", - " 'Storage Size': fs_clustered.statistics.sizes['Storage'].item(),\n", - " },\n", - " 'Segmentation (12 seg)': {\n", - " 'Time [s]': time_segmented,\n", - " 'Cost [€]': fs_segmented.solution['costs'].item(),\n", - " 'CHP Size': fs_segmented.statistics.sizes['CHP(Q_th)'].item(),\n", - " 'Boiler Size': fs_segmented.statistics.sizes['Boiler(Q_th)'].item(),\n", - " 'Storage Size': fs_segmented.statistics.sizes['Storage'].item(),\n", - " },\n", - " 'Combined (8×12)': {\n", - " 'Time [s]': time_combined,\n", - " 'Cost [€]': fs_combined_opt.solution['costs'].item(),\n", - " 'CHP Size': fs_combined_opt.statistics.sizes['CHP(Q_th)'].item(),\n", - " 'Boiler Size': fs_combined_opt.statistics.sizes['Boiler(Q_th)'].item(),\n", - " 'Storage Size': fs_combined_opt.statistics.sizes['Storage'].item(),\n", - " },\n", - "}\n", - "\n", - "comparison = pd.DataFrame(results).T\n", - "baseline_cost = comparison.loc['Full (baseline)', 'Cost [€]']\n", - "baseline_time = comparison.loc['Full (baseline)', 'Time [s]']\n", - "comparison['Cost Gap [%]'] = ((comparison['Cost [€]'] - baseline_cost) / abs(baseline_cost) * 100).round(2)\n", - "comparison['Speedup'] = (baseline_time / comparison['Time [s]']).round(1)\n", - "\n", - "comparison.style.format(\n", - " {\n", - " 'Time [s]': '{:.2f}',\n", - " 'Cost [€]': '{:,.0f}',\n", - " 'CHP Size': '{:.1f}',\n", - " 'Boiler Size': '{:.1f}',\n", - " 'Storage Size': '{:.0f}',\n", - " 'Cost Gap [%]': '{:.2f}',\n", - " 'Speedup': '{:.1f}x',\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "id": "30", - "metadata": {}, - "source": [ - "## Multi-Period Clustering\n", - "\n", - "For multi-year investment studies, clustering is applied **independently per period** (year).\n", - "Each year gets its own set of typical days:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "31", - "metadata": {}, - "outputs": [], - "source": [ - "# Load raw data for multi-period example\n", - "data = pd.read_csv('../../examples/resources/Zeitreihen2020.csv', index_col=0, parse_dates=True).sort_index()\n", - "data_2w = data['2020-01-01':'2020-01-14 23:45:00'] # Two weeks\n", - "timesteps_2w = data_2w.index\n", - "\n", - "# Build system with periods\n", - "fs_mp = fx.FlowSystem(\n", - " timesteps_2w,\n", - " periods=pd.Index([2024, 2025, 2026], name='year'),\n", - ")\n", - "\n", - "# Scale demands by year (growing demand)\n", - "heat_demand_2w = data_2w['Q_Netz/MW'].to_numpy()\n", - "elec_demand_2w = data_2w['P_Netz/MW'].to_numpy()\n", - "elec_price_2w = data_2w['Strompr.€/MWh'].to_numpy()\n", - "gas_price_2w = data_2w['Gaspr.€/MWh'].to_numpy()\n", - "\n", - "# Create period-varying profiles (demand grows 5% per year)\n", - "heat_profile = fx.TimeSeriesData(\n", - " np.stack([heat_demand_2w * 1.0, heat_demand_2w * 1.05, heat_demand_2w * 1.10]),\n", - " dims=['period', 'time'],\n", - ")\n", - "elec_profile = fx.TimeSeriesData(\n", - " np.stack([elec_demand_2w * 1.0, elec_demand_2w * 1.05, elec_demand_2w * 1.10]),\n", - " dims=['period', 'time'],\n", - ")\n", - "\n", - "fs_mp.add_elements(\n", - " fx.Bus('Electricity'),\n", - " fx.Bus('Heat'),\n", - " fx.Bus('Gas'),\n", - " fx.Effect('costs', '€', is_standard=True, is_objective=True),\n", - " fx.linear_converters.Boiler(\n", - " 'Boiler',\n", - " thermal_efficiency=0.85,\n", - " thermal_flow=fx.Flow('Q_th', bus='Heat', size=350),\n", - " fuel_flow=fx.Flow('Q_fu', bus='Gas'),\n", - " ),\n", - " fx.Source(\n", - " 'GasGrid',\n", - " outputs=[fx.Flow('Q_Gas', bus='Gas', size=1000, effects_per_flow_hour={'costs': gas_price_2w})],\n", - " ),\n", - " fx.Source(\n", - " 'GridBuy',\n", - " outputs=[fx.Flow('P_el', bus='Electricity', size=1000, effects_per_flow_hour={'costs': elec_price_2w})],\n", - " ),\n", - " fx.Sink('HeatDemand', inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_profile)]),\n", - " fx.Sink('ElecDemand', inputs=[fx.Flow('P_el', bus='Electricity', size=1, fixed_relative_profile=elec_profile)]),\n", - ")\n", - "\n", - "print(f'Multi-period system: {len(fs_mp.timesteps)} timesteps × {len(fs_mp.periods)} periods')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "32", - "metadata": {}, - "outputs": [], - "source": [ - "# Cluster - each period gets clustered independently\n", - "fs_mp_clustered = fs_mp.transform.cluster(n_clusters=4, cluster_duration='1D')\n", - "\n", - "# Get clustering info\n", - "clustering_info = fs_mp_clustered._clustering_info\n", - "print(f'Clustering was applied to {len(clustering_info[\"clustering_results\"])} period(s):')\n", - "for (period, _scenario), _ in clustering_info['clustering_results'].items():\n", - " print(f' - period={period}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "33", - "metadata": {}, - "outputs": [], - "source": [ - "# Optimize\n", - "fs_mp_clustered.optimize(solver)\n", - "print(f'Multi-period clustered cost: {fs_mp_clustered.solution[\"costs\"].sum().item():,.0f} €')" - ] - }, - { - "cell_type": "markdown", - "id": "34", - "metadata": {}, - "source": [ - "## API Reference\n", - "\n", - "### `transform.cluster()` Parameters\n", - "\n", - "| Parameter | Type | Description |\n", - "|-----------|------|-------------|\n", - "| `n_clusters` | `int \\| None` | Number of typical periods (e.g., 8 typical days). Set to `None` for segmentation-only. |\n", - "| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h', or hours as float) |\n", - "| `n_segments` | `int \\| None` | Segments within each period (inner-period aggregation). Default: `None` (no segmentation) |\n", - "| `aggregate_data` | `bool` | If True (default), aggregate time series data |\n", - "| `include_storage` | `bool` | Include storage in clustering constraints (default: True) |\n", - "| `flexibility_percent` | `float` | Allow binary variable deviations (default: 0) |\n", - "| `flexibility_penalty` | `float` | Penalty for deviations (default: 0) |\n", - "| `time_series_for_high_peaks` | `list` | Force inclusion of high-value periods |\n", - "| `time_series_for_low_peaks` | `list` | Force inclusion of low-value periods |\n", - "\n", - "### Common Patterns\n", - "\n", - "```python\n", - "# Clustering only: 8 typical days from a year\n", - "fs.transform.cluster(n_clusters=8, cluster_duration='1D')\n", - "\n", - "# Segmentation only: reduce to 12 segments per day\n", - "fs.transform.cluster(n_clusters=None, cluster_duration='1D', n_segments=12)\n", - "\n", - "# Combined: 8 typical days × 12 segments each\n", - "fs.transform.cluster(n_clusters=8, cluster_duration='1D', n_segments=12)\n", - "\n", - "# Force inclusion of peak demand periods\n", - "fs.transform.cluster(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " time_series_for_high_peaks=[heat_demand_ts],\n", - ")\n", - "```" - ] - }, - { - "cell_type": "markdown", - "id": "35", - "metadata": {}, - "source": [ - "## Summary\n", - "\n", - "You learned how to:\n", - "\n", - "- Use **clustering** (`n_clusters`) to identify typical periods (inter-period aggregation)\n", - "- Use **segmentation** (`n_segments`) to reduce timesteps within periods (inner-period aggregation)\n", - "- **Combine both** techniques for maximum speedup\n", - "- Cluster **multi-period** FlowSystems (each period independently)\n", - "\n", - "### When to Use Each Technique\n", - "\n", - "| Technique | Use Case | Example |\n", - "|-----------|----------|---------|\n", - "| **Clustering** | Many similar periods (days, weeks) | 365 days → 12 typical days |\n", - "| **Segmentation** | High-resolution data not needed | 96 timesteps/day → 12 segments |\n", - "| **Combined** | Large problems with high resolution | 365 × 96 → 12 × 12 = 144 timesteps |\n", - "\n", - "### Accuracy vs. Speed Trade-off\n", - "\n", - "| Approach | Speedup | Accuracy | Best For |\n", - "|----------|---------|----------|----------|\n", - "| More clusters/segments | Lower | Higher | Final results |\n", - "| Fewer clusters/segments | Higher | Lower | Screening, exploration |\n", - "\n", - "### Next Steps\n", - "\n", - "- **[08a-Aggregation](08a-aggregation.ipynb)**: Other aggregation techniques (resampling, two-stage)\n", - "- **[08b-Rolling Horizon](08b-rolling-horizon.ipynb)**: Sequential optimization for long time series" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/docs/notebooks/08d-cluster-multiperiod.ipynb b/docs/notebooks/08d-cluster-multiperiod.ipynb new file mode 100644 index 000000000..4e8d6aa94 --- /dev/null +++ b/docs/notebooks/08d-cluster-multiperiod.ipynb @@ -0,0 +1,596 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Multi-Period Clustering with `cluster_reduce()`\n", + "\n", + "Combine time series clustering with multi-period investment optimization.\n", + "\n", + "This notebook demonstrates:\n", + "\n", + "- **Multi-period modeling**: Optimize investments across multiple planning periods (years)\n", + "- **Scenario analysis**: Handle demand uncertainty with weighted scenarios\n", + "- **Clustering per period**: Apply typical-period aggregation independently for each period/scenario\n", + "- **Scalability**: Reduce computational complexity for long-horizon planning\n", + "\n", + "!!! note \"Requirements\"\n", + " This notebook requires the `tsam` package: `pip install tsam`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "import timeit\n", + "from pathlib import Path\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "## Load the Multi-Period System\n", + "\n", + "We use a pre-built multi-period heating system with:\n", + "- **3 planning periods** (years 2024, 2025, 2026)\n", + "- **2 scenarios** (high demand 30%, low demand 70%)\n", + "- **2 weeks** at hourly resolution (336 timesteps)\n", + "\n", + "This represents a capacity expansion problem where we optimize component sizes once,\n", + "but operations are simulated across multiple future years and demand scenarios." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# Generate example data if not present\n", + "data_file = Path('data/multiperiod_system.nc4')\n", + "if not data_file.exists():\n", + " from data.generate_example_systems import create_multiperiod_system\n", + "\n", + " fs = create_multiperiod_system()\n", + " fs.to_netcdf(data_file)\n", + "\n", + "# Load the multi-period system\n", + "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "\n", + "print(f'Timesteps: {len(flow_system.timesteps)} ({len(flow_system.timesteps) // 24} days)')\n", + "print(f'Periods: {list(flow_system.periods.values)}')\n", + "print(f'Scenarios: {list(flow_system.scenarios.values)}')\n", + "print(f'Scenario weights: {flow_system.scenario_weights.values}')\n", + "print(f'\\nComponents: {list(flow_system.components.keys())}')" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "## Selecting a Subset with `transform.isel()`\n", + "\n", + "For demonstration purposes, we'll use only the first week of data.\n", + "The `isel()` method (index select) lets you slice FlowSystems by time:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "# Select first week only (168 hours)\n", + "flow_system = flow_system.transform.isel(time=slice(0, 168))\n", + "\n", + "print(f'After isel: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) // 24} days)')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize demand scenarios\n", + "heat_demand = flow_system.components['Building'].inputs[0].fixed_relative_profile\n", + "\n", + "fig = go.Figure()\n", + "for scenario in flow_system.scenarios:\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=flow_system.timesteps,\n", + " y=heat_demand.sel(scenario=scenario).values,\n", + " name=f'{scenario}',\n", + " line=dict(width=1),\n", + " )\n", + " )\n", + "\n", + "fig.update_layout(\n", + " height=350,\n", + " title='Heat Demand by Scenario (One Week)',\n", + " xaxis_title='Time',\n", + " yaxis_title='Heat Demand [kW]',\n", + ")\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "7", + "metadata": {}, + "source": [ + "## Full Optimization (Baseline)\n", + "\n", + "First, solve the complete problem with all timesteps across all periods and scenarios:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", + "\n", + "start = timeit.default_timer()\n", + "fs_full = flow_system.copy()\n", + "fs_full.optimize(solver)\n", + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.2f} seconds')\n", + "print(f'Total cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_full.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "## Multi-Period Clustering with `cluster_reduce()`\n", + "\n", + "When applied to a multi-period system, `cluster_reduce()` clusters **each period/scenario combination independently**.\n", + "This is because demand patterns and optimal operations may differ across:\n", + "\n", + "- **Periods**: Different years may have different characteristics\n", + "- **Scenarios**: High vs low demand scenarios need different representative days\n", + "\n", + "The investment decisions (sizes) remain consistent across all periods and scenarios,\n", + "while the operational patterns are optimized for each cluster." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "start = timeit.default_timer()\n", + "\n", + "# Force inclusion of peak demand periods\n", + "peak_series = ['Building(Heat)|fixed_relative_profile']\n", + "\n", + "# Cluster to 3 typical days (from 7 days)\n", + "fs_clustered = flow_system.transform.cluster_reduce(\n", + " n_clusters=3,\n", + " cluster_duration='1D',\n", + " time_series_for_high_peaks=peak_series,\n", + ")\n", + "\n", + "time_clustering = timeit.default_timer() - start\n", + "\n", + "print(f'Clustering time: {time_clustering:.2f} seconds')\n", + "print(f'Reduced: {len(flow_system.timesteps)} → {len(fs_clustered.timesteps)} timesteps per period')\n", + "print('Total problem reduction: 7 days × 3 periods × 2 scenarios → 3 days × 3 × 2')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# Optimize the reduced system\n", + "start = timeit.default_timer()\n", + "fs_clustered.optimize(solver)\n", + "time_clustered = timeit.default_timer() - start\n", + "\n", + "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", + "print(f'Total cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "print(f'\\nSpeedup vs full: {time_full / (time_clustering + time_clustered):.1f}x')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_clustered.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.1f}')" + ] + }, + { + "cell_type": "markdown", + "id": "12", + "metadata": {}, + "source": [ + "## Understand the Cluster Structure\n", + "\n", + "Let's inspect how days were grouped into clusters:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "info = fs_clustered._cluster_info\n", + "\n", + "print('Clustering Configuration:')\n", + "print(f' Typical periods (clusters): {info[\"n_clusters\"]}')\n", + "print(f' Timesteps per cluster: {info[\"timesteps_per_cluster\"]}')\n", + "print(f' Original periods: {info[\"n_original_periods\"]}')\n", + "\n", + "# The cluster_order shows which cluster each original day belongs to\n", + "cluster_order = info['cluster_order']\n", + "day_names = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n", + "\n", + "print('\\nCluster assignments per day:')\n", + "for i, cluster_id in enumerate(cluster_order):\n", + " print(f' {day_names[i]}: Cluster {cluster_id}')\n", + "\n", + "# Cluster occurrences (how many original days each cluster represents)\n", + "unique, counts = np.unique(cluster_order, return_counts=True)\n", + "print('\\nCluster weights (days represented):')\n", + "for cluster_id, count in zip(unique, counts, strict=False):\n", + " print(f' Cluster {cluster_id}: {count} days')" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "## Two-Stage Workflow for Multi-Period\n", + "\n", + "For investment optimization across multiple periods, the recommended workflow is:\n", + "\n", + "1. **Stage 1**: Fast sizing with clustering (reduced timesteps)\n", + "2. **Stage 2**: Fix sizes and run full-resolution dispatch\n", + "\n", + "This gives accurate investment decisions while maintaining computational tractability." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# Stage 1 already done - apply safety margin\n", + "SAFETY_MARGIN = 1.10 # 10% buffer for multi-period uncertainty\n", + "\n", + "sizes_with_margin = {name: float(size.item()) * SAFETY_MARGIN for name, size in fs_clustered.statistics.sizes.items()}\n", + "\n", + "print('Stage 1: Sizing with clustering')\n", + "print(f' Time: {time_clustering + time_clustered:.2f} seconds')\n", + "print(f' Cost estimate: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "print(f'\\nSizes with {(SAFETY_MARGIN - 1) * 100:.0f}% safety margin:')\n", + "for name, size in sizes_with_margin.items():\n", + " original = fs_clustered.statistics.sizes[name].item()\n", + " print(f' {name}: {original:.1f} → {size:.1f}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# Stage 2: Full resolution dispatch with fixed sizes\n", + "print('Stage 2: Full resolution dispatch')\n", + "start = timeit.default_timer()\n", + "\n", + "fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin)\n", + "fs_dispatch.optimize(solver)\n", + "\n", + "time_dispatch = timeit.default_timer() - start\n", + "\n", + "print(f' Time: {time_dispatch:.2f} seconds')\n", + "print(f' Actual cost: {fs_dispatch.solution[\"costs\"].item():,.0f} €')\n", + "\n", + "# Total comparison\n", + "total_two_stage = time_clustering + time_clustered + time_dispatch\n", + "print(f'\\nTotal two-stage time: {total_two_stage:.2f} seconds')\n", + "print(f'Speedup vs full: {time_full / total_two_stage:.1f}x')" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "## Compare Results Across Methods" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "results = {\n", + " 'Full (baseline)': {\n", + " 'Time [s]': time_full,\n", + " 'Cost [€]': fs_full.solution['costs'].item(),\n", + " 'Boiler': fs_full.statistics.sizes['Boiler(Heat)'].item(),\n", + " 'Storage': fs_full.statistics.sizes['ThermalStorage'].item(),\n", + " },\n", + " 'Clustered (3 days)': {\n", + " 'Time [s]': time_clustering + time_clustered,\n", + " 'Cost [€]': fs_clustered.solution['costs'].item(),\n", + " 'Boiler': fs_clustered.statistics.sizes['Boiler(Heat)'].item(),\n", + " 'Storage': fs_clustered.statistics.sizes['ThermalStorage'].item(),\n", + " },\n", + " 'Two-Stage': {\n", + " 'Time [s]': total_two_stage,\n", + " 'Cost [€]': fs_dispatch.solution['costs'].item(),\n", + " 'Boiler': sizes_with_margin['Boiler(Heat)'],\n", + " 'Storage': sizes_with_margin['ThermalStorage'],\n", + " },\n", + "}\n", + "\n", + "comparison = pd.DataFrame(results).T\n", + "baseline_cost = comparison.loc['Full (baseline)', 'Cost [€]']\n", + "baseline_time = comparison.loc['Full (baseline)', 'Time [s]']\n", + "comparison['Cost Gap [%]'] = ((comparison['Cost [€]'] - baseline_cost) / abs(baseline_cost) * 100).round(2)\n", + "comparison['Speedup'] = (baseline_time / comparison['Time [s]']).round(1)\n", + "\n", + "comparison.style.format(\n", + " {\n", + " 'Time [s]': '{:.2f}',\n", + " 'Cost [€]': '{:,.0f}',\n", + " 'Boiler': '{:.1f}',\n", + " 'Storage': '{:.0f}',\n", + " 'Cost Gap [%]': '{:.2f}',\n", + " 'Speedup': '{:.1f}x',\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "## Visualize Results by Period and Scenario" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "# Plot heat balance for one period/scenario combination\n", + "period = 2024\n", + "scenario = 'high_demand'\n", + "\n", + "fig = make_subplots(\n", + " rows=2,\n", + " cols=1,\n", + " shared_xaxes=True,\n", + " subplot_titles=['Full Optimization', 'Clustered Optimization'],\n", + " vertical_spacing=0.12,\n", + ")\n", + "\n", + "for i, (fs, title) in enumerate([(fs_full, 'Full'), (fs_clustered, 'Clustered')], 1):\n", + " ts = fs.timesteps\n", + " data = fs.solution['Boiler(Heat)|flow_rate'].sel(period=period, scenario=scenario).values\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=ts,\n", + " y=data,\n", + " name=f'Boiler ({title})',\n", + " line=dict(width=1),\n", + " ),\n", + " row=i,\n", + " col=1,\n", + " )\n", + "\n", + "fig.update_layout(\n", + " height=500,\n", + " title=f'Boiler Output: Period {period}, Scenario {scenario}',\n", + ")\n", + "fig.update_yaxes(title_text='kW', row=1, col=1)\n", + "fig.update_yaxes(title_text='kW', row=2, col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [ + "## Expand Clustered Solution to Full Resolution\n", + "\n", + "Use `expand_solution()` to map the clustered results back to all original timesteps:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "# Expand the clustered solution\n", + "fs_expanded = fs_clustered.transform.expand_solution()\n", + "\n", + "print(f'Expanded: {len(fs_clustered.timesteps)} → {len(fs_expanded.timesteps)} timesteps')\n", + "print(f'Cost: {fs_expanded.solution[\"costs\"].item():,.0f} €')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "# Compare expanded vs full resolution\n", + "period = 2025\n", + "scenario = 'low_demand'\n", + "\n", + "fig = make_subplots(\n", + " rows=2,\n", + " cols=1,\n", + " shared_xaxes=True,\n", + " subplot_titles=['Full Optimization', 'Expanded from Clustering'],\n", + " vertical_spacing=0.12,\n", + ")\n", + "\n", + "for i, (fs, title) in enumerate([(fs_full, 'Full'), (fs_expanded, 'Expanded')], 1):\n", + " data = fs.solution['Boiler(Heat)|flow_rate'].sel(period=period, scenario=scenario).values\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=fs.timesteps,\n", + " y=data,\n", + " name=title,\n", + " line=dict(width=1),\n", + " showlegend=True,\n", + " ),\n", + " row=i,\n", + " col=1,\n", + " )\n", + "\n", + "fig.update_layout(\n", + " height=450,\n", + " title=f'Boiler Output: Period {period}, Scenario {scenario}',\n", + ")\n", + "fig.update_yaxes(title_text='kW', row=1, col=1)\n", + "fig.update_yaxes(title_text='kW', row=2, col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "## Key Considerations for Multi-Period Clustering\n", + "\n", + "### 1. Independent Clustering per Period/Scenario\n", + "\n", + "Each period and scenario combination is clustered independently because:\n", + "- Demand patterns may differ across years (growth, seasonality)\n", + "- Scenarios represent different futures that shouldn't be mixed\n", + "- Investment decisions must be robust across all combinations\n", + "\n", + "### 2. Safety Margins\n", + "\n", + "Multi-period systems often warrant larger safety margins (10-15%) because:\n", + "- More uncertainty across multiple years\n", + "- Investments made once must work for all periods\n", + "- Scenario weights may not perfectly represent actual outcomes\n", + "\n", + "### 3. Computational Benefits\n", + "\n", + "Clustering becomes more valuable as problem size grows:\n", + "\n", + "| Scenario | Full Problem | With Clustering |\n", + "|----------|--------------|----------------|\n", + "| 1 period, 1 scenario, 365 days | 8,760 timesteps | ~730 (10 typical days) |\n", + "| 3 periods, 2 scenarios, 365 days | 52,560 timesteps | ~4,380 |\n", + "| 10 periods, 3 scenarios, 365 days | 262,800 timesteps | ~21,900 |\n", + "\n", + "The speedup factor increases with problem size." + ] + }, + { + "cell_type": "markdown", + "id": "25", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "You learned how to:\n", + "\n", + "- Load **multi-period systems** with periods and scenarios\n", + "- Use **`transform.isel()`** to select time subsets\n", + "- Apply **`cluster_reduce()`** to multi-dimensional FlowSystems\n", + "- Use the **two-stage workflow** for robust investment optimization\n", + "- **Expand solutions** back to full resolution with `expand_solution()`\n", + "\n", + "### Key Takeaways\n", + "\n", + "1. **Clustering is applied per period/scenario**: Each combination gets independent typical periods\n", + "2. **Investments are shared**: Component sizes are optimized once across all periods/scenarios\n", + "3. **Use larger safety margins**: Multi-period uncertainty warrants 10-15% buffers\n", + "4. **Two-stage is recommended**: Fast sizing with clustering, accurate dispatch at full resolution\n", + "\n", + "### API Reference\n", + "\n", + "```python\n", + "# Load multi-period system\n", + "fs = fx.FlowSystem.from_netcdf('multiperiod_system.nc4')\n", + "\n", + "# Select time subset (optional)\n", + "fs = fs.transform.isel(time=slice(0, 168)) # First 168 timesteps\n", + "\n", + "# Cluster reduce (applies per period/scenario)\n", + "fs_clustered = fs.transform.cluster_reduce(\n", + " n_clusters=10,\n", + " cluster_duration='1D',\n", + " time_series_for_high_peaks=['Demand(Flow)|fixed_relative_profile'],\n", + ")\n", + "\n", + "# Two-stage workflow\n", + "fs_clustered.optimize(solver)\n", + "sizes = {k: v.item() * 1.10 for k, v in fs_clustered.statistics.sizes.items()}\n", + "fs_dispatch = fs.transform.fix_sizes(sizes)\n", + "fs_dispatch.optimize(solver)\n", + "```" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/notebooks/08d-external-clustering.ipynb b/docs/notebooks/08d-external-clustering.ipynb deleted file mode 100644 index da213fba2..000000000 --- a/docs/notebooks/08d-external-clustering.ipynb +++ /dev/null @@ -1,419 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# External Clustering\n", - "\n", - "This notebook demonstrates different ways to apply clustering to a FlowSystem:\n", - "\n", - "1. **Built-in clustering** - Let flixopt handle everything via `transform.cluster()`\n", - "2. **External tsam** - Run tsam yourself on a data subset and pass results to flixopt\n", - "3. **Custom indices** - Provide your own cluster assignments directly\n", - "\n", - "The latter two options are useful when:\n", - "- You want to cluster on a subset of time series (faster tsam computation)\n", - "- You have custom clustering algorithms\n", - "- You want to reuse clustering results across multiple FlowSystems" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd\n", - "import xarray as xr\n", - "\n", - "import flixopt as fx\n", - "\n", - "fx.CONFIG.notebook()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load a Pre-built FlowSystem\n", - "\n", - "We'll use the district heating system from the data directory." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "\n", - "# Generate example data if not present\n", - "data_file = Path('data/district_heating_system.nc4')\n", - "if not data_file.exists():\n", - " from data.generate_example_systems import create_district_heating_system\n", - "\n", - " fs = create_district_heating_system()\n", - " fs.to_netcdf(data_file)\n", - "\n", - "# Load the FlowSystem\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", - "print(f'Loaded FlowSystem: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) / 96:.0f} days)')\n", - "print(f'Components: {list(flow_system.components.keys())}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Extract key time series from the FlowSystem for later use\n", - "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", - "elec_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", - "\n", - "print(f'Heat demand shape: {heat_demand.shape}')\n", - "print(f'Electricity price shape: {elec_price.shape}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Baseline: solve without clustering\n", - "solver = fx.solvers.HighsSolver(mip_gap=0.01, log_to_console=False)\n", - "fs_baseline = flow_system.copy()\n", - "fs_baseline.optimize(solver)\n", - "print(f'Baseline cost (no clustering): {fs_baseline.solution[\"costs\"].item():,.0f} €')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Option 1: Built-in Clustering\n", - "\n", - "The simplest approach - let flixopt handle clustering internally using tsam.\n", - "This extracts ALL time series from the FlowSystem and clusters on them." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create clustered system using built-in method\n", - "fs_builtin = flow_system.transform.cluster(\n", - " n_clusters=8, # Find 8 typical days\n", - " cluster_duration='1D',\n", - ")\n", - "\n", - "fs_builtin.optimize(solver)\n", - "print(f'Built-in clustering cost: {fs_builtin.solution[\"costs\"].item():,.0f} €')\n", - "\n", - "# Access the clustering parameters\n", - "params = fs_builtin._clustering_info['parameters']\n", - "print(f'\\nCluster assignments: {params.cluster_order.values}')\n", - "print(f'Period length: {params.period_length} timesteps')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Option 2: External tsam on Data Subset\n", - "\n", - "Run tsam yourself on a **subset** of time series data, then pass results to flixopt.\n", - "\n", - "This is useful when:\n", - "- You only want to cluster based on the most important time series (faster tsam)\n", - "- You want more control over tsam parameters\n", - "- You want to reuse the same clustering for multiple FlowSystems" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tsam.timeseriesaggregation as tsam\n", - "\n", - "# Create DataFrame with only the KEY time series\n", - "# (Much faster than letting flixopt extract ALL time series)\n", - "clustering_data = pd.DataFrame(\n", - " {\n", - " 'heat_demand': heat_demand.values,\n", - " 'elec_price': elec_price.values,\n", - " },\n", - " index=flow_system.timesteps,\n", - ")\n", - "\n", - "print(f'Clustering on {len(clustering_data.columns)} time series (subset of FlowSystem data)')\n", - "print(f'Columns: {list(clustering_data.columns)}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Run tsam with custom parameters\n", - "aggregation = tsam.TimeSeriesAggregation(\n", - " clustering_data,\n", - " noTypicalPeriods=8,\n", - " hoursPerPeriod=24,\n", - " resolution=0.25, # 15-min resolution\n", - " clusterMethod='hierarchical',\n", - ")\n", - "aggregation.createTypicalPeriods()\n", - "\n", - "print(f'tsam cluster order: {aggregation.clusterOrder}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Create ClusteringParameters with the external tsam aggregation\n", - "# This allows flixopt to use the tsam results to aggregate ALL FlowSystem data\n", - "params_external = fx.ClusteringParameters(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " tsam_aggregation=aggregation, # Pass the tsam object for data aggregation\n", - ")\n", - "\n", - "print(f'Indices populated: {params_external.has_indices}')\n", - "print(f'Cluster order: {params_external.cluster_order.values}')\n", - "print(f'Period length: {params_external.period_length}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Apply to FlowSystem using add_clustering()\n", - "fs_external = flow_system.transform.add_clustering(params_external)\n", - "\n", - "fs_external.optimize(solver)\n", - "print(f'External tsam clustering cost: {fs_external.solution[\"costs\"].item():,.0f} €')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Option 3: Custom Indices\n", - "\n", - "Provide your own cluster assignments directly - no tsam required.\n", - "\n", - "This is useful when:\n", - "- You have a custom clustering algorithm\n", - "- You want to manually define typical periods (e.g., weekdays vs weekends)\n", - "- You're loading clustering results from another source" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Define custom cluster assignments based on day of week\n", - "# We have 31 days, let's group by weekday pattern\n", - "n_days = len(flow_system.timesteps) // 96 # 96 timesteps per day (15-min)\n", - "print(f'Number of days: {n_days}')\n", - "\n", - "# Simple pattern: group every 4th day together\n", - "custom_cluster_order = [i % 8 for i in range(n_days)]\n", - "\n", - "# Note: With custom indices (no tsam object), we use aggregate_data=False\n", - "# because we don't have a tsam to transform the data. This only equalizes\n", - "# binary (on/off) decisions across similar periods.\n", - "params_custom = fx.ClusteringParameters(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " aggregate_data=False, # No tsam available for data transformation\n", - " # Provide indices directly\n", - " cluster_order=xr.DataArray(custom_cluster_order, dims=['cluster_period'], name='cluster_order'),\n", - " period_length=96, # 96 timesteps per day (15-min resolution)\n", - ")\n", - "\n", - "print(f'Custom indices set: {params_custom.has_indices}')\n", - "print(f'Cluster order: {params_custom.cluster_order.values}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Apply to FlowSystem\n", - "fs_custom = flow_system.transform.add_clustering(params_custom)\n", - "\n", - "fs_custom.optimize(solver)\n", - "print(f'Custom clustering cost: {fs_custom.solution[\"costs\"].item():,.0f} €')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Comparison" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "results = pd.DataFrame(\n", - " {\n", - " 'Method': ['Baseline (no clustering)', 'Built-in clustering', 'External tsam (subset)', 'Custom indices'],\n", - " 'Cost [€]': [\n", - " fs_baseline.solution['costs'].item(),\n", - " fs_builtin.solution['costs'].item(),\n", - " fs_external.solution['costs'].item(),\n", - " fs_custom.solution['costs'].item(),\n", - " ],\n", - " }\n", - ").set_index('Method')\n", - "\n", - "results['Gap vs Baseline [%]'] = (results['Cost [€]'] / results.loc['Baseline (no clustering)', 'Cost [€]'] - 1) * 100\n", - "results.style.format({'Cost [€]': '{:,.0f}', 'Gap vs Baseline [%]': '{:.2f}'})" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## IO: Save and Reload\n", - "\n", - "Clustering indices are automatically saved with the FlowSystem and restored on load." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tempfile\n", - "\n", - "# Save clustered FlowSystem\n", - "with tempfile.TemporaryDirectory() as tmpdir:\n", - " path = Path(tmpdir) / 'clustered_system.nc4'\n", - " fs_external.to_netcdf(path)\n", - " print(f'Saved to: {path}')\n", - "\n", - " # Reload\n", - " fs_loaded = fx.FlowSystem.from_netcdf(path)\n", - "\n", - " # Check clustering was restored\n", - " params_loaded = fs_loaded._clustering_info['parameters']\n", - " print('\\nRestored clustering:')\n", - " print(f' has_indices: {params_loaded.has_indices}')\n", - " print(f' cluster_order: {params_loaded.cluster_order.values}')\n", - " print(f' period_length: {params_loaded.period_length}')\n", - "\n", - " # Solve reloaded system\n", - " fs_loaded.optimize(solver)\n", - " print(f'\\nReloaded cost: {fs_loaded.solution[\"costs\"].item():,.0f} €')\n", - " print(f'Original cost: {fs_external.solution[\"costs\"].item():,.0f} €')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Advanced: Segmentation with External tsam\n", - "\n", - "You can also provide segment assignments for intra-period aggregation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Run tsam with segmentation on the data subset\n", - "aggregation_seg = tsam.TimeSeriesAggregation(\n", - " clustering_data,\n", - " noTypicalPeriods=8,\n", - " hoursPerPeriod=24,\n", - " resolution=0.25,\n", - " segmentation=True,\n", - " noSegments=12, # 12 segments per day (~2 hours each)\n", - ")\n", - "aggregation_seg.createTypicalPeriods()\n", - "\n", - "# Create parameters with segmentation and tsam for data aggregation\n", - "params_seg = fx.ClusteringParameters(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " n_segments=12,\n", - " tsam_aggregation=aggregation_seg, # Pass tsam for data aggregation\n", - ")\n", - "\n", - "print(f'Segment assignment shape: {params_seg.segment_assignment.shape}')\n", - "print(f'Segment assignment for cluster 0:\\n{params_seg.segment_assignment.sel(cluster=0).values}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Apply segmented clustering\n", - "fs_segmented = flow_system.transform.add_clustering(params_seg)\n", - "fs_segmented.optimize(solver)\n", - "print(f'Segmented clustering cost: {fs_segmented.solution[\"costs\"].item():,.0f} €')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Summary\n", - "\n", - "| Method | Data Aggregation | When to Use |\n", - "|--------|------------------|-------------|\n", - "| `transform.cluster()` | Yes | Default - let flixopt handle everything |\n", - "| `tsam_aggregation=...` | Yes | External tsam on data subset, with data aggregation |\n", - "| Direct `cluster_order` | No | Custom algorithms or manual period grouping (binary only) |\n", - "\n", - "All methods use `ClusteringParameters` which stores:\n", - "- `cluster_order`: Which cluster each period belongs to\n", - "- `period_length`: Timesteps per period\n", - "- `segment_assignment`: (optional) Segment IDs within each cluster\n", - "- `tsam_aggregation`: (optional) tsam object for data transformation" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "name": "python", - "version": "3.11" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/notebooks/08e-cluster-and-reduce.ipynb b/docs/notebooks/08e-cluster-and-reduce.ipynb deleted file mode 100644 index 8ab2418fd..000000000 --- a/docs/notebooks/08e-cluster-and-reduce.ipynb +++ /dev/null @@ -1,446 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Typical Periods Optimization with `cluster_reduce()`\n", - "\n", - "This notebook demonstrates the `cluster_reduce()` method for fast sizing optimization using typical periods.\n", - "\n", - "## Key Concept\n", - "\n", - "Unlike `cluster()` which uses equality constraints (same number of timesteps), `cluster_reduce()` **actually reduces** the number of timesteps:\n", - "\n", - "| Method | Timesteps | Mechanism | Use Case |\n", - "|--------|-----------|-----------|----------|\n", - "| `cluster()` | 2976 | Equality constraints | Accurate operational dispatch |\n", - "| `cluster_reduce()` | 768 (8×96) | Typical periods only | Fast initial sizing |\n", - "\n", - "## Features\n", - "\n", - "- **Actual timestep reduction**: Only solves for typical periods (e.g., 8 days × 96 timesteps = 768 instead of 2976)\n", - "- **Timestep weighting**: Operational costs are weighted by cluster occurrence\n", - "- **Inter-period storage linking**: SOC_boundary variables track storage state across original periods\n", - "- **Cyclic constraint**: Optional cyclic storage constraint for long-term balance\n", - "\n", - "!!! note \"Requirements\"\n", - " This notebook requires the `tsam` package: `pip install tsam`" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import timeit\n", - "\n", - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.graph_objects as go\n", - "from plotly.subplots import make_subplots\n", - "\n", - "import flixopt as fx\n", - "\n", - "fx.CONFIG.notebook()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Load the FlowSystem\n", - "\n", - "We use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pathlib import Path\n", - "\n", - "# Generate example data if not present (for local development)\n", - "data_file = Path('data/district_heating_system.nc4')\n", - "if not data_file.exists():\n", - " from data.generate_example_systems import create_district_heating_system\n", - "\n", - " fs = create_district_heating_system()\n", - " fs.to_netcdf(data_file)\n", - "\n", - "# Load the district heating system (real data from Zeitreihen2020.csv)\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", - "\n", - "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", - "print(f'Components: {list(flow_system.components.keys())}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Visualize first two weeks of data\n", - "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", - "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", - "\n", - "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", - "\n", - "fig.add_trace(go.Scatter(x=timesteps[:1344], y=heat_demand.values[:1344], name='Heat Demand'), row=1, col=1)\n", - "fig.add_trace(go.Scatter(x=timesteps[:1344], y=electricity_price.values[:1344], name='Electricity Price'), row=2, col=1)\n", - "\n", - "fig.update_layout(height=400, title='First Two Weeks of Data')\n", - "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", - "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", - "fig.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Method 1: Full Optimization (Baseline)\n", - "\n", - "First, let's solve the full problem with all timesteps." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", - "\n", - "start = timeit.default_timer()\n", - "fs_full = flow_system.copy()\n", - "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start\n", - "\n", - "print(f'Full optimization: {time_full:.2f} seconds')\n", - "print(f'Total cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_full.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Method 2: Typical Periods with `cluster_reduce()`\n", - "\n", - "Now let's use the `cluster_reduce()` method to solve with only 8 typical days (768 timesteps).\n", - "\n", - "**Important**: Use `time_series_for_high_peaks` to force inclusion of peak demand periods. Without this, the typical periods may miss extreme peaks, leading to undersized components that cause infeasibility in the full-resolution dispatch stage." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "start = timeit.default_timer()\n", - "\n", - "# IMPORTANT: Use time_series_for_high_peaks to force inclusion of peak demand periods!\n", - "# Without this, the typical periods may miss extreme peaks, leading to undersized components.\n", - "# The format is the column name in the internal dataframe: 'ComponentName(FlowName)|attribute'\n", - "peak_forcing_series = ['HeatDemand(Q_th)|fixed_relative_profile']\n", - "\n", - "# Create reduced FlowSystem with 8 typical days\n", - "fs_reduced = flow_system.transform.cluster_reduce(\n", - " n_clusters=8, # 8 typical days\n", - " cluster_duration='1D', # Daily periods (can also use hours, e.g., 24)\n", - " time_series_for_high_peaks=peak_forcing_series, # Force inclusion of peak demand day!\n", - " storage_inter_period_linking=True, # Link storage states between periods\n", - " storage_cyclic=True, # Cyclic constraint: SOC[0] = SOC[end]\n", - ")\n", - "\n", - "time_clustering = timeit.default_timer() - start\n", - "print(f'Clustering time: {time_clustering:.2f} seconds')\n", - "print(f'Reduced from {len(flow_system.timesteps)} to {len(fs_reduced.timesteps)} timesteps')\n", - "print(f'Timestep weights (cluster occurrences): {np.unique(fs_reduced._cluster_info[\"timestep_weights\"])}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Optimize the reduced system\n", - "start = timeit.default_timer()\n", - "fs_reduced.optimize(solver)\n", - "time_reduced = timeit.default_timer() - start\n", - "\n", - "print(f'Reduced optimization: {time_reduced:.2f} seconds')\n", - "print(f'Total cost: {fs_reduced.solution[\"costs\"].item():,.0f} €')\n", - "print(f'Speedup vs full: {time_full / (time_clustering + time_reduced):.1f}x')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_reduced.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Method 3: Two-Stage Workflow\n", - "\n", - "The recommended workflow:\n", - "1. **Stage 1**: Fast sizing with `cluster_reduce()`\n", - "2. **Stage 2**: Fix sizes (with safety margin) and re-optimize for accurate dispatch\n", - "\n", - "**Note**: Typical periods aggregate similar days, so individual days within a cluster may have higher demand than the typical day. Adding a 5-10% safety margin to sizes helps ensure feasibility." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Stage 1: Fast sizing (already done above)\n", - "print('Stage 1: Sizing with typical periods')\n", - "print(f' Time: {time_clustering + time_reduced:.2f} seconds')\n", - "print(f' Cost estimate: {fs_reduced.solution[\"costs\"].item():,.0f} €')\n", - "\n", - "# Apply safety margin to sizes (5-10% buffer for demand variability)\n", - "SAFETY_MARGIN = 1.05 # 5% buffer\n", - "sizes_with_margin = {name: float(size.item()) * SAFETY_MARGIN for name, size in fs_reduced.statistics.sizes.items()}\n", - "print(f'\\nSizes with {(SAFETY_MARGIN - 1) * 100:.0f}% safety margin:')\n", - "for name, size in sizes_with_margin.items():\n", - " original = fs_reduced.statistics.sizes[name].item()\n", - " print(f' {name}: {original:.1f} -> {size:.1f}')\n", - "\n", - "# Stage 2: Fix sizes and re-optimize at full resolution\n", - "print('\\nStage 2: Dispatch at full resolution')\n", - "start = timeit.default_timer()\n", - "\n", - "fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin)\n", - "fs_dispatch.optimize(solver)\n", - "\n", - "time_dispatch = timeit.default_timer() - start\n", - "print(f' Time: {time_dispatch:.2f} seconds')\n", - "print(f' Actual cost: {fs_dispatch.solution[\"costs\"].item():,.0f} €')\n", - "\n", - "# Total time comparison\n", - "total_two_stage = time_clustering + time_reduced + time_dispatch\n", - "print(f'\\nTotal two-stage time: {total_two_stage:.2f} seconds')\n", - "print(f'Full optimization time: {time_full:.2f} seconds')\n", - "print(f'Two-stage speedup: {time_full / total_two_stage:.1f}x')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Compare Results" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "results = {\n", - " 'Full (baseline)': {\n", - " 'Time [s]': time_full,\n", - " 'Cost [€]': fs_full.solution['costs'].item(),\n", - " 'CHP Size': fs_full.statistics.sizes['CHP(Q_th)'].item(),\n", - " 'Boiler Size': fs_full.statistics.sizes['Boiler(Q_th)'].item(),\n", - " 'Storage Size': fs_full.statistics.sizes['Storage'].item(),\n", - " },\n", - " 'Typical Periods (sizing)': {\n", - " 'Time [s]': time_clustering + time_reduced,\n", - " 'Cost [€]': fs_reduced.solution['costs'].item(),\n", - " 'CHP Size': fs_reduced.statistics.sizes['CHP(Q_th)'].item(),\n", - " 'Boiler Size': fs_reduced.statistics.sizes['Boiler(Q_th)'].item(),\n", - " 'Storage Size': fs_reduced.statistics.sizes['Storage'].item(),\n", - " },\n", - " 'Two-Stage (with margin)': {\n", - " 'Time [s]': total_two_stage,\n", - " 'Cost [€]': fs_dispatch.solution['costs'].item(),\n", - " 'CHP Size': sizes_with_margin['CHP(Q_th)'],\n", - " 'Boiler Size': sizes_with_margin['Boiler(Q_th)'],\n", - " 'Storage Size': sizes_with_margin['Storage'],\n", - " },\n", - "}\n", - "\n", - "comparison = pd.DataFrame(results).T\n", - "baseline_cost = comparison.loc['Full (baseline)', 'Cost [€]']\n", - "baseline_time = comparison.loc['Full (baseline)', 'Time [s]']\n", - "comparison['Cost Gap [%]'] = ((comparison['Cost [€]'] - baseline_cost) / abs(baseline_cost) * 100).round(2)\n", - "comparison['Speedup'] = (baseline_time / comparison['Time [s]']).round(1)\n", - "\n", - "comparison.style.format(\n", - " {\n", - " 'Time [s]': '{:.2f}',\n", - " 'Cost [€]': '{:,.0f}',\n", - " 'CHP Size': '{:.1f}',\n", - " 'Boiler Size': '{:.1f}',\n", - " 'Storage Size': '{:.0f}',\n", - " 'Cost Gap [%]': '{:.2f}',\n", - " 'Speedup': '{:.1f}x',\n", - " }\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Inter-Period Storage Linking\n", - "\n", - "The `cluster_reduce()` method creates special constraints to track storage state across original periods:\n", - "\n", - "- **SOC_boundary[d]**: Storage state at the boundary of original period d\n", - "- **delta_SOC[c]**: Change in SOC during typical period c\n", - "- **Linking**: `SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]]`\n", - "- **Cyclic**: `SOC_boundary[0] = SOC_boundary[end]` (optional)\n", - "\n", - "This ensures long-term storage behavior is captured correctly even though we only solve for typical periods." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Show clustering info\n", - "info = fs_reduced._cluster_info\n", - "print('Typical Periods Configuration:')\n", - "print(f' Number of typical periods: {info[\"n_clusters\"]}')\n", - "print(f' Timesteps per period: {info[\"timesteps_per_cluster\"]}')\n", - "print(f' Total reduced timesteps: {info[\"n_clusters\"] * info[\"timesteps_per_cluster\"]}')\n", - "print(f' Cluster order (first 10): {info[\"cluster_order\"][:10]}...')\n", - "cluster_occurrences = info['cluster_occurrences'][(None, None)]\n", - "print(f' Cluster occurrences: {dict(cluster_occurrences)}')\n", - "print(f' Storage inter-period linking: {info[\"storage_inter_period_linking\"]}')\n", - "print(f' Storage cyclic: {info[\"storage_cyclic\"]}')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## API Reference\n", - "\n", - "### `transform.cluster_reduce()` Parameters\n", - "\n", - "| Parameter | Type | Description |\n", - "|-----------|------|-------------|\n", - "| `n_clusters` | `int` | Number of typical periods to extract (e.g., 8) |\n", - "| `cluster_duration` | `str \\| float` | Duration of each period ('1D', '24h') or hours as float |\n", - "| `weights` | `dict[str, float]` | Optional weights for clustering each time series |\n", - "| `time_series_for_high_peaks` | `list[str]` | **IMPORTANT**: Force inclusion of high-value periods to capture peak demands |\n", - "| `time_series_for_low_peaks` | `list[str]` | Force inclusion of low-value periods |\n", - "| `storage_inter_period_linking` | `bool` | Link storage states between periods (default: True) |\n", - "| `storage_cyclic` | `bool` | Enforce cyclic storage constraint (default: True) |\n", - "\n", - "### Peak Forcing\n", - "\n", - "**Always use `time_series_for_high_peaks`** for demand time series to ensure extreme peaks are captured. The format is:\n", - "```python\n", - "time_series_for_high_peaks=['ComponentName(FlowName)|fixed_relative_profile']\n", - "```\n", - "\n", - "Without peak forcing, the clustering algorithm may select typical periods that don't include the peak demand day, leading to undersized components and infeasibility in the dispatch stage.\n", - "\n", - "### Comparison with `cluster()`\n", - "\n", - "| Feature | `cluster()` | `cluster_reduce()` |\n", - "|---------|-------------|--------------------|\n", - "| Timesteps | Original (2976) | Reduced (e.g., 768) |\n", - "| Mechanism | Equality constraints | Typical periods only |\n", - "| Solve time | Moderate reduction | Dramatic reduction |\n", - "| Accuracy | Higher | Lower (sizing only) |\n", - "| Storage handling | Via constraints | SOC boundary linking |\n", - "| Use case | Final dispatch | Initial sizing |" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Summary\n", - "\n", - "The `cluster_reduce()` method provides:\n", - "\n", - "1. **Dramatic speedup** for sizing optimization by reducing timesteps\n", - "2. **Proper cost weighting** so operational costs reflect cluster occurrences\n", - "3. **Storage state tracking** across original periods via SOC_boundary variables\n", - "4. **Two-stage workflow** support via `fix_sizes()` for accurate dispatch\n", - "\n", - "### Recommended Workflow\n", - "\n", - "```python\n", - "# Stage 1: Fast sizing with typical periods\n", - "fs_sizing = flow_system.transform.cluster_reduce(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " time_series_for_high_peaks=['DemandComponent(FlowName)|fixed_relative_profile'],\n", - ")\n", - "fs_sizing.optimize(solver)\n", - "\n", - "# Apply safety margin (typical periods aggregate, so individual days may exceed)\n", - "SAFETY_MARGIN = 1.05 # 5% buffer\n", - "sizes_with_margin = {\n", - " name: float(size.item()) * SAFETY_MARGIN\n", - " for name, size in fs_sizing.statistics.sizes.items()\n", - "}\n", - "\n", - "# Stage 2: Fix sizes and optimize dispatch at full resolution\n", - "fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin)\n", - "fs_dispatch.optimize(solver)\n", - "```\n", - "\n", - "### Key Considerations\n", - "\n", - "- **Peak forcing is essential**: Use `time_series_for_high_peaks` to capture peak demand days\n", - "- **Safety margin recommended**: Add 5-10% buffer to sizes since aggregation smooths peaks\n", - "- **Two-stage is recommended**: Use `cluster_reduce()` for fast sizing, then `fix_sizes()` for dispatch\n", - "- **Storage linking preserves long-term behavior**: SOC_boundary variables ensure correct storage cycling" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fs_expanded = fs_reduced.transform.expand_solution()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "fs_reduced.statistics.plot.balance('Heat')" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "name": "python", - "version": "3.11.0" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index 53070eeda..a30761dc3 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -457,10 +457,13 @@ def create_multiperiod_system() -> fx.FlowSystem: - 3 planning periods (years 2024, 2025, 2026) - 2 scenarios (high demand, low demand) - Each period: 48 hours (2 days representative) + Each period: 336 hours (2 weeks) - suitable for clustering demonstrations. + Use transform.sisel() to select subsets if needed. """ - timesteps = pd.date_range('2024-01-01', periods=48, freq='h') - hour_of_day = np.arange(48) % 24 + n_hours = 336 # 2 weeks + timesteps = pd.date_range('2024-01-01', periods=n_hours, freq='h') + hour_of_day = np.arange(n_hours) % 24 + day_of_week = (np.arange(n_hours) // 24) % 7 # Period definitions (years) periods = pd.Index([2024, 2025, 2026], name='period') @@ -469,19 +472,21 @@ def create_multiperiod_system() -> fx.FlowSystem: scenarios = pd.Index(['high_demand', 'low_demand'], name='scenario') scenario_weights = np.array([0.3, 0.7]) - # Base demand pattern (hourly) + # Base demand pattern (hourly) with daily and weekly variation base_pattern = np.where((hour_of_day >= 7) & (hour_of_day <= 18), 80.0, 35.0) + weekend_factor = np.where(day_of_week >= 5, 0.6, 1.0) + base_pattern = base_pattern * weekend_factor # Scenario-specific scaling np.random.seed(42) - high_demand = base_pattern * 1.2 + np.random.normal(0, 5, 48) - low_demand = base_pattern * 0.85 + np.random.normal(0, 3, 48) + high_demand = base_pattern * 1.3 + np.random.normal(0, 8, n_hours) + low_demand = base_pattern * 0.8 + np.random.normal(0, 5, n_hours) # Create DataFrame with scenario columns heat_demand = pd.DataFrame( { - 'high_demand': np.clip(high_demand, 20, 120), - 'low_demand': np.clip(low_demand, 15, 90), + 'high_demand': np.clip(high_demand, 20, 150), + 'low_demand': np.clip(low_demand, 15, 100), }, index=timesteps, ) From e8afe18c8da920d2511e0effbedd2aa4561e37f5 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 20:32:43 +0100 Subject: [PATCH 063/191] Update notebooks --- docs/notebooks/08a-aggregation.ipynb | 2 +- ...ster-reduce.ipynb => 08c-clustering.ipynb} | 43 ++++++++++++------- ...ipynb => 08d-clustering-multiperiod.ipynb} | 20 +++++---- 3 files changed, 40 insertions(+), 25 deletions(-) rename docs/notebooks/{08c-cluster-reduce.ipynb => 08c-clustering.ipynb} (93%) rename docs/notebooks/{08d-cluster-multiperiod.ipynb => 08d-clustering-multiperiod.ipynb} (97%) diff --git a/docs/notebooks/08a-aggregation.ipynb b/docs/notebooks/08a-aggregation.ipynb index 6d0260539..b7ed85f7d 100644 --- a/docs/notebooks/08a-aggregation.ipynb +++ b/docs/notebooks/08a-aggregation.ipynb @@ -392,7 +392,7 @@ "\n", "### Further Reading\n", "\n", - "- For clustering with typical periods, see `transform.cluster()` (requires `tsam` package)\n", + "- For clustering with typical periods, see `transform.aggregate()` (requires `tsam` package)\n", "- For time selection, see `transform.sel()` and `transform.isel()`" ] } diff --git a/docs/notebooks/08c-cluster-reduce.ipynb b/docs/notebooks/08c-clustering.ipynb similarity index 93% rename from docs/notebooks/08c-cluster-reduce.ipynb rename to docs/notebooks/08c-clustering.ipynb index 2fc7beb85..a9a1ef302 100644 --- a/docs/notebooks/08c-cluster-reduce.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -5,7 +5,7 @@ "id": "0", "metadata": {}, "source": [ - "# Time Series Clustering with `cluster_reduce()`\n", + "# Time Series Clustering with `aggregate()`\n", "\n", "Accelerate investment optimization using typical periods (clustering).\n", "\n", @@ -131,9 +131,9 @@ "id": "7", "metadata": {}, "source": [ - "## Method 2: Clustering with `cluster_reduce()`\n", + "## Method 2: Clustering with `aggregate()`\n", "\n", - "The `cluster_reduce()` method:\n", + "The `aggregate()` method:\n", "\n", "1. **Clusters similar days** using the TSAM (Time Series Aggregation Module) package\n", "2. **Reduces timesteps** to only typical periods (e.g., 8 typical days = 768 timesteps)\n", @@ -158,8 +158,9 @@ "peak_series = ['HeatDemand(Q_th)|fixed_relative_profile']\n", "\n", "# Create reduced FlowSystem with 8 typical days\n", - "fs_clustered = flow_system.transform.cluster_reduce(\n", - " n_clusters=8, # 8 typical days\n", + "fs_clustered = flow_system.transform.aggregate(\n", + " method='tsam',\n", + " n_representatives=8, # 8 typical days\n", " cluster_duration='1D', # Daily clustering\n", " time_series_for_high_peaks=peak_series, # Capture peak demand day\n", " storage_cyclic=True, # SOC[end] = SOC[start]\n", @@ -232,7 +233,7 @@ "\n", "The recommended approach for investment optimization:\n", "\n", - "1. **Stage 1**: Fast sizing with `cluster_reduce()` \n", + "1. **Stage 1**: Fast sizing with `aggregate()` \n", "2. **Stage 2**: Fix sizes (with safety margin) and dispatch at full resolution\n", "\n", "!!! tip \"Safety Margin\"\n", @@ -411,21 +412,32 @@ "metadata": {}, "outputs": [], "source": [ - "fs_clustered.statistics.plot.balance('Heat');" + "fs_clustered.statistics.plot.storage('Storage')" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "22", "metadata": {}, + "outputs": [], + "source": [ + "fs_expanded.statistics.plot.storage('Storage')" + ] + }, + { + "cell_type": "markdown", + "id": "23", + "metadata": {}, "source": [ "## API Reference\n", "\n", - "### `transform.cluster_reduce()` Parameters\n", + "### `transform.aggregate()` Parameters\n", "\n", "| Parameter | Type | Description |\n", "|-----------|------|-------------|\n", - "| `n_clusters` | `int` | Number of typical periods (e.g., 8 typical days) |\n", + "| `method` | `str` | Aggregation backend: 'tsam' (default) or 'manual' |\n", + "| `n_representatives` | `int` | Number of typical periods (e.g., 8 typical days) |\n", "| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h') or hours |\n", "| `weights` | `dict[str, float]` | Optional weights for time series in clustering |\n", "| `time_series_for_high_peaks` | `list[str]` | **Essential**: Force inclusion of peak periods |\n", @@ -442,8 +454,9 @@ "\n", "```python\n", "# Stage 1: Fast sizing\n", - "fs_sizing = flow_system.transform.cluster_reduce(\n", - " n_clusters=8,\n", + "fs_sizing = flow_system.transform.aggregate(\n", + " method='tsam',\n", + " n_representatives=8,\n", " cluster_duration='1D',\n", " time_series_for_high_peaks=['Demand(Flow)|fixed_relative_profile'],\n", ")\n", @@ -460,14 +473,14 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "## Summary\n", "\n", "You learned how to:\n", "\n", - "- Use **`cluster_reduce()`** to aggregate time series into typical periods\n", + "- Use **`aggregate()`** to aggregate time series into typical periods\n", "- Apply **peak forcing** to capture extreme demand days\n", "- Use **two-stage optimization** for fast yet accurate investment decisions\n", "- **Expand solutions** back to full resolution with `expand_solution()`\n", @@ -481,7 +494,7 @@ "\n", "### Next Steps\n", "\n", - "- **[08d-cluster-multiperiod](08d-cluster-multiperiod.ipynb)**: Clustering with multiple periods and scenarios" + "- **[08d-clustering-multiperiod](08d-clustering-multiperiod.ipynb)**: Clustering with multiple periods and scenarios" ] } ], diff --git a/docs/notebooks/08d-cluster-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb similarity index 97% rename from docs/notebooks/08d-cluster-multiperiod.ipynb rename to docs/notebooks/08d-clustering-multiperiod.ipynb index 4e8d6aa94..5bf2d89b6 100644 --- a/docs/notebooks/08d-cluster-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -5,7 +5,7 @@ "id": "0", "metadata": {}, "source": [ - "# Multi-Period Clustering with `cluster_reduce()`\n", + "# Multi-Period Clustering with `aggregate()`\n", "\n", "Combine time series clustering with multi-period investment optimization.\n", "\n", @@ -171,9 +171,9 @@ "id": "9", "metadata": {}, "source": [ - "## Multi-Period Clustering with `cluster_reduce()`\n", + "## Multi-Period Clustering with `aggregate()`\n", "\n", - "When applied to a multi-period system, `cluster_reduce()` clusters **each period/scenario combination independently**.\n", + "When applied to a multi-period system, `aggregate()` clusters **each period/scenario combination independently**.\n", "This is because demand patterns and optimal operations may differ across:\n", "\n", "- **Periods**: Different years may have different characteristics\n", @@ -196,8 +196,9 @@ "peak_series = ['Building(Heat)|fixed_relative_profile']\n", "\n", "# Cluster to 3 typical days (from 7 days)\n", - "fs_clustered = flow_system.transform.cluster_reduce(\n", - " n_clusters=3,\n", + "fs_clustered = flow_system.transform.aggregate(\n", + " method='tsam',\n", + " n_representatives=3,\n", " cluster_duration='1D',\n", " time_series_for_high_peaks=peak_series,\n", ")\n", @@ -544,7 +545,7 @@ "\n", "- Load **multi-period systems** with periods and scenarios\n", "- Use **`transform.isel()`** to select time subsets\n", - "- Apply **`cluster_reduce()`** to multi-dimensional FlowSystems\n", + "- Apply **`aggregate()`** to multi-dimensional FlowSystems\n", "- Use the **two-stage workflow** for robust investment optimization\n", "- **Expand solutions** back to full resolution with `expand_solution()`\n", "\n", @@ -564,9 +565,10 @@ "# Select time subset (optional)\n", "fs = fs.transform.isel(time=slice(0, 168)) # First 168 timesteps\n", "\n", - "# Cluster reduce (applies per period/scenario)\n", - "fs_clustered = fs.transform.cluster_reduce(\n", - " n_clusters=10,\n", + "# Aggregate (applies per period/scenario)\n", + "fs_clustered = fs.transform.aggregate(\n", + " method='tsam',\n", + " n_representatives=10,\n", " cluster_duration='1D',\n", " time_series_for_high_peaks=['Demand(Flow)|fixed_relative_profile'],\n", ")\n", From 00e75083ff7e5e1966eed6a08c3c2f9a419e4046 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 20:53:13 +0100 Subject: [PATCH 064/191] Update notebooks and fix storage inital=equal --- .../08d-clustering-multiperiod.ipynb | 51 +++++++++---------- flixopt/transform_accessor.py | 4 +- 2 files changed, 27 insertions(+), 28 deletions(-) diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index 5bf2d89b6..7d13afd07 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -32,6 +32,7 @@ "\n", "import numpy as np\n", "import pandas as pd\n", + "import plotly.express as px\n", "import plotly.graph_objects as go\n", "from plotly.subplots import make_subplots\n", "\n", @@ -115,16 +116,9 @@ "# Visualize demand scenarios\n", "heat_demand = flow_system.components['Building'].inputs[0].fixed_relative_profile\n", "\n", - "fig = go.Figure()\n", - "for scenario in flow_system.scenarios:\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=flow_system.timesteps,\n", - " y=heat_demand.sel(scenario=scenario).values,\n", - " name=f'{scenario}',\n", - " line=dict(width=1),\n", - " )\n", - " )\n", + "fig = px.line(\n", + " heat_demand.to_dataframe('value').reset_index(), x='time', y='value', facet_col='period', facet_row='scenario'\n", + ")\n", "\n", "fig.update_layout(\n", " height=350,\n", @@ -160,10 +154,10 @@ "time_full = timeit.default_timer() - start\n", "\n", "print(f'Full optimization: {time_full:.2f} seconds')\n", - "print(f'Total cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", + "print(f'Total cost (objective): {fs_full.solution[\"objective\"].item():,.0f} €')\n", "print('\\nOptimized sizes:')\n", "for name, size in fs_full.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" + " print(f' {name}: {size.max().item():.1f}')" ] }, { @@ -223,11 +217,11 @@ "time_clustered = timeit.default_timer() - start\n", "\n", "print(f'Clustered optimization: {time_clustered:.2f} seconds')\n", - "print(f'Total cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "print(f'Total cost (objective): {fs_clustered.solution[\"objective\"].item():,.0f} €')\n", "print(f'\\nSpeedup vs full: {time_full / (time_clustering + time_clustered):.1f}x')\n", "print('\\nOptimized sizes:')\n", "for name, size in fs_clustered.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" + " print(f' {name}: {size.max().item():.1f}')" ] }, { @@ -294,14 +288,14 @@ "# Stage 1 already done - apply safety margin\n", "SAFETY_MARGIN = 1.10 # 10% buffer for multi-period uncertainty\n", "\n", - "sizes_with_margin = {name: float(size.item()) * SAFETY_MARGIN for name, size in fs_clustered.statistics.sizes.items()}\n", + "sizes_with_margin = {name: size.max().item() * SAFETY_MARGIN for name, size in fs_clustered.statistics.sizes.items()}\n", "\n", "print('Stage 1: Sizing with clustering')\n", "print(f' Time: {time_clustering + time_clustered:.2f} seconds')\n", - "print(f' Cost estimate: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "print(f' Cost estimate: {fs_clustered.solution[\"objective\"].item():,.0f} €')\n", "print(f'\\nSizes with {(SAFETY_MARGIN - 1) * 100:.0f}% safety margin:')\n", "for name, size in sizes_with_margin.items():\n", - " original = fs_clustered.statistics.sizes[name].item()\n", + " original = fs_clustered.statistics.sizes[name].max().item()\n", " print(f' {name}: {original:.1f} → {size:.1f}')" ] }, @@ -322,7 +316,7 @@ "time_dispatch = timeit.default_timer() - start\n", "\n", "print(f' Time: {time_dispatch:.2f} seconds')\n", - "print(f' Actual cost: {fs_dispatch.solution[\"costs\"].item():,.0f} €')\n", + "print(f' Actual cost: {fs_dispatch.solution[\"objective\"].item():,.0f} €')\n", "\n", "# Total comparison\n", "total_two_stage = time_clustering + time_clustered + time_dispatch\n", @@ -348,19 +342,19 @@ "results = {\n", " 'Full (baseline)': {\n", " 'Time [s]': time_full,\n", - " 'Cost [€]': fs_full.solution['costs'].item(),\n", - " 'Boiler': fs_full.statistics.sizes['Boiler(Heat)'].item(),\n", - " 'Storage': fs_full.statistics.sizes['ThermalStorage'].item(),\n", + " 'Cost [€]': fs_full.solution['objective'].item(),\n", + " 'Boiler': fs_full.statistics.sizes['Boiler(Heat)'].max().item(),\n", + " 'Storage': fs_full.statistics.sizes['ThermalStorage'].max().item(),\n", " },\n", " 'Clustered (3 days)': {\n", " 'Time [s]': time_clustering + time_clustered,\n", - " 'Cost [€]': fs_clustered.solution['costs'].item(),\n", - " 'Boiler': fs_clustered.statistics.sizes['Boiler(Heat)'].item(),\n", - " 'Storage': fs_clustered.statistics.sizes['ThermalStorage'].item(),\n", + " 'Cost [€]': fs_clustered.solution['objective'].item(),\n", + " 'Boiler': fs_clustered.statistics.sizes['Boiler(Heat)'].max().item(),\n", + " 'Storage': fs_clustered.statistics.sizes['ThermalStorage'].max().item(),\n", " },\n", " 'Two-Stage': {\n", " 'Time [s]': total_two_stage,\n", - " 'Cost [€]': fs_dispatch.solution['costs'].item(),\n", + " 'Cost [€]': fs_dispatch.solution['objective'].item(),\n", " 'Boiler': sizes_with_margin['Boiler(Heat)'],\n", " 'Storage': sizes_with_margin['ThermalStorage'],\n", " },\n", @@ -455,7 +449,7 @@ "fs_expanded = fs_clustered.transform.expand_solution()\n", "\n", "print(f'Expanded: {len(fs_clustered.timesteps)} → {len(fs_expanded.timesteps)} timesteps')\n", - "print(f'Cost: {fs_expanded.solution[\"costs\"].item():,.0f} €')" + "print(f'Cost (objective): {fs_expanded.solution[\"objective\"].item():,.0f} €')" ] }, { @@ -575,9 +569,12 @@ "\n", "# Two-stage workflow\n", "fs_clustered.optimize(solver)\n", - "sizes = {k: v.item() * 1.10 for k, v in fs_clustered.statistics.sizes.items()}\n", + "sizes = {k: v.max().item() * 1.10 for k, v in fs_clustered.statistics.sizes.items()}\n", "fs_dispatch = fs.transform.fix_sizes(sizes)\n", "fs_dispatch.optimize(solver)\n", + "\n", + "# Access results\n", + "print(fs_dispatch.solution['objective'].item()) # Total weighted cost\n", "```" ] } diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 8243d5bc3..a713d6ee0 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -785,7 +785,9 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: # Remove 'equals_final' from storages - doesn't make sense on reduced timesteps for storage in reduced_fs.storages.values(): - if storage.initial_charge_state == 'equals_final': + # Handle both scalar and xarray cases + ics = storage.initial_charge_state + if isinstance(ics, str) and ics == 'equals_final': storage.initial_charge_state = 0 reduced_fs._cluster_info = { From 0badd1460594512536bcb79afd60b736e0fcd3ee Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 20:53:36 +0100 Subject: [PATCH 065/191] Update notebooks and fix storage inital=equal --- docs/notebooks/08d-clustering-multiperiod.ipynb | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index 7d13afd07..82985ce86 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -246,7 +246,8 @@ "print('Clustering Configuration:')\n", "print(f' Typical periods (clusters): {info[\"n_clusters\"]}')\n", "print(f' Timesteps per cluster: {info[\"timesteps_per_cluster\"]}')\n", - "print(f' Original periods: {info[\"n_original_periods\"]}')\n", + "print(f' Has model periods: {info[\"has_periods\"]}')\n", + "print(f' Has scenarios: {info[\"has_scenarios\"]}')\n", "\n", "# The cluster_order shows which cluster each original day belongs to\n", "cluster_order = info['cluster_order']\n", From aa203915ef1a8a022675e8391ed6cf37a9abc532 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 21:36:46 +0100 Subject: [PATCH 066/191] Remove old clustering code --- flixopt/__init__.py | 5 +- flixopt/aggregation/__init__.py | 22 +- flixopt/aggregation/base.py | 5 + flixopt/aggregation/tsam_backend.py | 140 ++-- flixopt/clustering.py | 1086 --------------------------- flixopt/flow_system.py | 71 +- flixopt/optimization.py | 173 +---- flixopt/transform_accessor.py | 150 ++-- 8 files changed, 218 insertions(+), 1434 deletions(-) delete mode 100644 flixopt/clustering.py diff --git a/flixopt/__init__.py b/flixopt/__init__.py index 00555c7e0..e79af22ce 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -15,7 +15,6 @@ # Import commonly used classes and functions from . import aggregation, linear_converters, plotting, results, solvers from .carrier import Carrier, CarrierContainer -from .clustering import ClusteringParameters from .components import ( LinearConverter, Sink, @@ -30,7 +29,7 @@ from .elements import Bus, Flow from .flow_system import FlowSystem from .interface import InvestParameters, Piece, Piecewise, PiecewiseConversion, PiecewiseEffects, StatusParameters -from .optimization import ClusteredOptimization, Optimization, SegmentedOptimization +from .optimization import Optimization, SegmentedOptimization from .plot_result import PlotResult from .structure import TimeSeriesWeights @@ -51,7 +50,6 @@ 'Transmission', 'FlowSystem', 'Optimization', - 'ClusteredOptimization', 'SegmentedOptimization', 'InvestParameters', 'StatusParameters', @@ -59,7 +57,6 @@ 'Piecewise', 'PiecewiseConversion', 'PiecewiseEffects', - 'ClusteringParameters', 'PlotResult', 'TimeSeriesWeights', 'aggregation', diff --git a/flixopt/aggregation/__init__.py b/flixopt/aggregation/__init__.py index 02d7552a4..b0241b25a 100644 --- a/flixopt/aggregation/__init__.py +++ b/flixopt/aggregation/__init__.py @@ -50,17 +50,32 @@ create_manual_backend_from_selection, ) +# Lazy import for InterClusterLinking to avoid circular imports +# It depends on structure.Submodel which has complex import dependencies +InterClusterLinking = None + + +def _get_inter_cluster_linking(): + """Get InterClusterLinking class with lazy import.""" + global InterClusterLinking + if InterClusterLinking is None: + from .storage_linking import InterClusterLinking as _InterClusterLinking + + InterClusterLinking = _InterClusterLinking + return InterClusterLinking + + # Conditional imports based on package availability _BACKENDS = {'manual': ManualBackend} try: - from .tsam_backend import TSAMBackend, create_tsam_backend_from_clustering + from .tsam_backend import TSAMBackend, plot_aggregation _BACKENDS['tsam'] = TSAMBackend except ImportError: # tsam not installed - TSAMBackend not available TSAMBackend = None - create_tsam_backend_from_clustering = None + plot_aggregation = None def get_backend(name: str): @@ -103,14 +118,15 @@ def list_backends() -> list[str]: 'AggregationInfo', 'ClusterStructure', 'Aggregator', + 'InterClusterLinking', # Backends 'TSAMBackend', 'ManualBackend', # Utilities 'create_cluster_structure_from_mapping', - 'create_tsam_backend_from_clustering', 'create_manual_backend_from_labels', 'create_manual_backend_from_selection', + 'plot_aggregation', 'get_backend', 'list_backends', ] diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 1bd638110..e66e6f742 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -270,17 +270,22 @@ class AggregationInfo: This is stored on the FlowSystem after aggregation to enable: - expand_solution() to map back to original timesteps - Statistics to properly weight results + - Inter-cluster storage linking - Serialization/deserialization of aggregated models Attributes: result: The AggregationResult from the aggregation backend. original_flow_system: Reference to the FlowSystem before aggregation. backend_name: Name of the aggregation backend used (e.g., 'tsam', 'manual'). + storage_inter_cluster_linking: Whether to add inter-cluster storage constraints. + storage_cyclic: Whether to enforce cyclic storage (SOC[start] = SOC[end]). """ result: AggregationResult original_flow_system: object # FlowSystem - avoid circular import backend_name: str = 'unknown' + storage_inter_cluster_linking: bool = True + storage_cyclic: bool = True def create_cluster_structure_from_mapping( diff --git a/flixopt/aggregation/tsam_backend.py b/flixopt/aggregation/tsam_backend.py index f7a4bd867..e43eededd 100644 --- a/flixopt/aggregation/tsam_backend.py +++ b/flixopt/aggregation/tsam_backend.py @@ -359,92 +359,88 @@ def _aggregate_multi_dimensional( ) -def create_tsam_backend_from_clustering( - clustering, # flixopt.clustering.Clustering -) -> tuple[TSAMBackend, AggregationResult]: - """Create TSAMBackend and AggregationResult from existing Clustering object. +def plot_aggregation( + result: AggregationResult, + colormap: str | None = None, + show: bool | None = None, +): + """Plot original vs aggregated data comparison. - This is a bridge function to help migrate from the old Clustering class - to the new aggregation abstraction. + Visualizes the original time series (dashed lines) overlaid with + the aggregated/clustered time series (solid lines) for comparison. Args: - clustering: Existing flixopt Clustering object (after calling cluster()). + result: AggregationResult containing original and aggregated data. + colormap: Colorscale name for the time series colors. + Defaults to CONFIG.Plotting.default_qualitative_colorscale. + show: Whether to display the figure. + Defaults to CONFIG.Plotting.default_show. Returns: - Tuple of (TSAMBackend, AggregationResult). + PlotResult containing the comparison figure and underlying data. + + Example: + >>> result = backend.aggregate(data, n_representatives=8) + >>> plot_aggregation(result) """ - if clustering.tsam is None: - raise ValueError('Clustering has not been executed. Call cluster() first.') + import plotly.express as px - tsam_agg = clustering.tsam + from ..color_processing import process_colors + from ..config import CONFIG + from ..plot_result import PlotResult - backend = TSAMBackend( - cluster_duration=clustering.hours_per_period, - n_segments=clustering.n_segments, - time_series_for_high_peaks=clustering.time_series_for_high_peaks, - time_series_for_low_peaks=clustering.time_series_for_low_peaks, - weights=clustering.weights, - ) + if result.original_data is None or result.aggregated_data is None: + raise ValueError('AggregationResult must contain both original_data and aggregated_data for plotting') - # Build AggregationResult from Clustering state - n_timesteps = clustering.nr_of_time_steps - timesteps_per_period = int(clustering.hours_per_period / clustering.hours_per_time_step) - cluster_order = tsam_agg.clusterOrder - n_clusters = len(tsam_agg.clusterPeriodNoOccur) - - # Build timestep mapping - timestep_mapping = np.zeros(n_timesteps, dtype=np.int32) - for period_idx, cluster_id in enumerate(cluster_order): - for pos in range(timesteps_per_period): - original_idx = period_idx * timesteps_per_period + pos - if original_idx < n_timesteps: - representative_idx = cluster_id * timesteps_per_period + pos - timestep_mapping[original_idx] = representative_idx - - # Build weights - n_representative_timesteps = n_clusters * timesteps_per_period - representative_weights = np.zeros(n_representative_timesteps, dtype=np.float64) - for cluster_id, count in tsam_agg.clusterPeriodNoOccur.items(): - for pos in range(timesteps_per_period): - rep_idx = cluster_id * timesteps_per_period + pos - if rep_idx < n_representative_timesteps: - representative_weights[rep_idx] = count - - # Create cluster structure - cluster_occurrences = xr.DataArray( - [tsam_agg.clusterPeriodNoOccur.get(c, 0) for c in range(n_clusters)], - dims=['cluster'], - name='cluster_occurrences', - ) + # Convert xarray to DataFrames + original_df = result.original_data.to_dataframe() + aggregated_df = result.aggregated_data.to_dataframe() - cluster_structure = ClusterStructure( - cluster_order=xr.DataArray(cluster_order, dims=['original_period'], name='cluster_order'), - cluster_occurrences=cluster_occurrences, - n_clusters=n_clusters, - timesteps_per_cluster=timesteps_per_period, - ) + # Expand aggregated data to original length using mapping + mapping = result.timestep_mapping.values + expanded_agg = aggregated_df.iloc[mapping].reset_index(drop=True) - # Build aggregated data as xarray Dataset - aggregated_df = clustering.aggregated_data - aggregated_ds = xr.Dataset( - {col: (['time'], aggregated_df[col].values[:n_representative_timesteps]) for col in aggregated_df.columns}, - coords={'time': np.arange(n_representative_timesteps)}, + # Rename for legend + original_df = original_df.rename(columns={col: f'Original - {col}' for col in original_df.columns}) + expanded_agg = expanded_agg.rename(columns={col: f'Aggregated - {col}' for col in expanded_agg.columns}) + + colors = list( + process_colors(colormap or CONFIG.Plotting.default_qualitative_colorscale, list(original_df.columns)).values() ) - # Original data as xarray Dataset - original_df = clustering.original_data - original_ds = xr.Dataset( - {col: (['time'], original_df[col].values) for col in original_df.columns}, - coords={'time': np.arange(n_timesteps)}, + # Create line plot for original data (dashed) + original_df = original_df.reset_index() + index_name = original_df.columns[0] + df_org_long = original_df.melt(id_vars=index_name, var_name='variable', value_name='value') + fig = px.line(df_org_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) + for trace in fig.data: + trace.update(line=dict(dash='dash')) + + # Add aggregated data (solid lines) + expanded_agg[index_name] = original_df[index_name] + df_agg_long = expanded_agg.melt(id_vars=index_name, var_name='variable', value_name='value') + fig2 = px.line(df_agg_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) + for trace in fig2.data: + fig.add_trace(trace) + + fig.update_layout( + title='Original vs Aggregated Data (original = ---)', + xaxis_title='Time', + yaxis_title='Value', ) - result = AggregationResult( - timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), - n_representatives=n_representative_timesteps, - representative_weights=xr.DataArray(representative_weights, dims=['time'], name='representative_weights'), - aggregated_data=aggregated_ds, - cluster_structure=cluster_structure, - original_data=original_ds, + # Build xarray Dataset with both original and aggregated data + data = xr.Dataset( + { + 'original': result.original_data.to_array(dim='variable'), + 'aggregated': result.aggregated_data.to_array(dim='variable'), + } ) + plot_result = PlotResult(data=data, figure=fig) + + if show is None: + show = CONFIG.Plotting.default_show + if show: + plot_result.show() - return backend, result + return plot_result diff --git a/flixopt/clustering.py b/flixopt/clustering.py deleted file mode 100644 index db5153aa9..000000000 --- a/flixopt/clustering.py +++ /dev/null @@ -1,1086 +0,0 @@ -""" -This module contains the Clustering functionality for the flixopt framework. -Through this, clustering TimeSeriesData is possible. -""" - -from __future__ import annotations - -import copy -import logging -import timeit -from typing import TYPE_CHECKING - -import numpy as np - -try: - import tsam.timeseriesaggregation as tsam - - TSAM_AVAILABLE = True -except ImportError: - TSAM_AVAILABLE = False - -from .color_processing import process_colors -from .components import Storage -from .config import CONFIG -from .plot_result import PlotResult -from .structure import ( - FlowSystemModel, - Interface, - Submodel, - register_class_for_io, -) # Interface and register_class_for_io used by ClusteringParameters - -if TYPE_CHECKING: - import linopy - import pandas as pd - import xarray as xr - - from .core import Scalar, TimeSeriesData - from .elements import Component - from .flow_system import FlowSystem - -logger = logging.getLogger('flixopt') - - -class Clustering: - """ - Clustering organizing class for time series aggregation using tsam. - """ - - def __init__( - self, - original_data: pd.DataFrame, - hours_per_time_step: Scalar, - hours_per_period: Scalar, - nr_of_periods: int | None = 8, - n_segments: int | None = None, - weights: dict[str, float] | None = None, - time_series_for_high_peaks: list[str] | None = None, - time_series_for_low_peaks: list[str] | None = None, - ): - """ - Args: - original_data: The original data to aggregate. - hours_per_time_step: The duration of each timestep in hours. - hours_per_period: The duration of each period in hours. - nr_of_periods: The number of typical periods to use in the aggregation. - Set to None to skip period clustering and only do segmentation. - n_segments: Number of segments within each period (inner-period clustering). - If None, no inner-period segmentation is performed. - weights: The weights for aggregation. If None, all time series are equally weighted. - time_series_for_high_peaks: List of time series to use for explicitly selecting periods with high values. - time_series_for_low_peaks: List of time series to use for explicitly selecting periods with low values. - """ - if not TSAM_AVAILABLE: - raise ImportError( - "The 'tsam' package is required for clustering functionality. Install it with 'pip install tsam'." - ) - self.original_data = copy.deepcopy(original_data) - self.hours_per_time_step = hours_per_time_step - self.hours_per_period = hours_per_period - self.nr_of_periods = nr_of_periods - self.n_segments = n_segments - self.nr_of_time_steps = len(self.original_data.index) - self.weights = weights or {} - self.time_series_for_high_peaks = time_series_for_high_peaks or [] - self.time_series_for_low_peaks = time_series_for_low_peaks or [] - - self.aggregated_data: pd.DataFrame | None = None - self.clustering_duration_seconds = None - self.tsam: tsam.TimeSeriesAggregation | None = None - - def cluster(self) -> None: - """ - Perform time series clustering/aggregation. - """ - start_time = timeit.default_timer() - - # Determine number of periods for clustering - # If nr_of_periods is None, use segmentation only (no inter-period clustering) - total_periods = int(self.nr_of_time_steps * self.hours_per_time_step / self.hours_per_period) - n_typical_periods = self.nr_of_periods if self.nr_of_periods is not None else total_periods - - # Create aggregation object - self.tsam = tsam.TimeSeriesAggregation( - self.original_data, - noTypicalPeriods=n_typical_periods, - hoursPerPeriod=self.hours_per_period, - resolution=self.hours_per_time_step, - clusterMethod='k_means', - extremePeriodMethod='new_cluster_center' if self.use_extreme_periods else 'None', - weightDict={name: weight for name, weight in self.weights.items() if name in self.original_data.columns}, - addPeakMax=self.time_series_for_high_peaks, - addPeakMin=self.time_series_for_low_peaks, - # Inner-period segmentation parameters - segmentation=self.n_segments is not None, - noSegments=self.n_segments if self.n_segments is not None else 1, - ) - - self.tsam.createTypicalPeriods() - self.aggregated_data = self.tsam.predictOriginalData() - - self.clustering_duration_seconds = timeit.default_timer() - start_time - if logger.isEnabledFor(logging.INFO): - logger.info(self.describe_clusters()) - - def describe_clusters(self) -> str: - description = {} - for cluster in self.get_cluster_indices().keys(): - description[cluster] = [ - str(indexVector[0]) + '...' + str(indexVector[-1]) - for indexVector in self.get_cluster_indices()[cluster] - ] - - if self.use_extreme_periods: - # Zeitreihe rauslöschen: - extreme_periods = self.tsam.extremePeriods.copy() - for key in extreme_periods: - del extreme_periods[key]['profile'] - else: - extreme_periods = {} - - return ( - f'{"":#^80}\n' - f'{" Clustering ":#^80}\n' - f'periods_order:\n' - f'{self.tsam.clusterOrder}\n' - f'clusterPeriodNoOccur:\n' - f'{self.tsam.clusterPeriodNoOccur}\n' - f'index_vectors_of_clusters:\n' - f'{description}\n' - f'{"":#^80}\n' - f'extreme_periods:\n' - f'{extreme_periods}\n' - f'{"":#^80}' - ) - - @property - def use_extreme_periods(self): - return self.time_series_for_high_peaks or self.time_series_for_low_peaks - - def plot(self, colormap: str | None = None, show: bool | None = None) -> PlotResult: - """Plot original vs aggregated data comparison. - - Visualizes the original time series (dashed lines) overlaid with - the aggregated/clustered time series (solid lines) for comparison. - - Args: - colormap: Colorscale name for the time series colors. - Defaults to CONFIG.Plotting.default_qualitative_colorscale. - show: Whether to display the figure. - Defaults to CONFIG.Plotting.default_show. - - Returns: - PlotResult containing the comparison figure and underlying data. - - Examples: - >>> clustering.cluster() - >>> clustering.plot() - >>> clustering.plot(colormap='Set2', show=False).to_html('clustering.html') - """ - import plotly.express as px - import xarray as xr - - df_org = self.original_data.copy().rename( - columns={col: f'Original - {col}' for col in self.original_data.columns} - ) - df_agg = self.aggregated_data.copy().rename( - columns={col: f'Aggregated - {col}' for col in self.aggregated_data.columns} - ) - colors = list( - process_colors(colormap or CONFIG.Plotting.default_qualitative_colorscale, list(df_org.columns)).values() - ) - - # Create line plot for original data (dashed) - index_name = df_org.index.name or 'index' - df_org_long = df_org.reset_index().melt(id_vars=index_name, var_name='variable', value_name='value') - fig = px.line(df_org_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) - for trace in fig.data: - trace.update(line=dict(dash='dash')) - - # Add aggregated data (solid lines) - df_agg_long = df_agg.reset_index().melt(id_vars=index_name, var_name='variable', value_name='value') - fig2 = px.line(df_agg_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) - for trace in fig2.data: - fig.add_trace(trace) - - fig.update_layout( - title='Original vs Aggregated Data (original = ---)', - xaxis_title='Time in h', - yaxis_title='Value', - ) - - # Build xarray Dataset with both original and aggregated data - data = xr.Dataset( - { - 'original': self.original_data.to_xarray().to_array(dim='variable'), - 'aggregated': self.aggregated_data.to_xarray().to_array(dim='variable'), - } - ) - result = PlotResult(data=data, figure=fig) - - if show is None: - show = CONFIG.Plotting.default_show - if show: - result.show() - - return result - - def get_cluster_indices(self) -> dict[str, list[np.ndarray]]: - """ - Generates a dictionary that maps each cluster to a list of index vectors representing the time steps - assigned to that cluster for each period. - - Returns: - dict: {cluster_0: [index_vector_3, index_vector_7, ...], - cluster_1: [index_vector_1], - ...} - """ - clusters = self.tsam.clusterPeriodNoOccur.keys() - index_vectors = {cluster: [] for cluster in clusters} - - # Use actual timesteps per period, not segment count - period_length = int(self.hours_per_period / self.hours_per_time_step) - total_steps = len(self.tsam.timeSeries) - - for period, cluster_id in enumerate(self.tsam.clusterOrder): - start_idx = period * period_length - end_idx = np.min([start_idx + period_length, total_steps]) - index_vectors[cluster_id].append(np.arange(start_idx, end_idx)) - - return index_vectors - - def get_equation_indices(self, skip_first_index_of_period: bool = True) -> tuple[np.ndarray, np.ndarray]: - """ - Generates pairs of indices for the equations by comparing index vectors of the same cluster. - If `skip_first_index_of_period` is True, the first index of each period is skipped. - - Args: - skip_first_index_of_period (bool): Whether to include or skip the first index of each period. - - Returns: - tuple[np.ndarray, np.ndarray]: Two arrays of indices. - """ - idx_var1 = [] - idx_var2 = [] - - # Iterate through cluster index vectors - for index_vectors in self.get_cluster_indices().values(): - if len(index_vectors) <= 1: # Only proceed if cluster has more than one period - continue - - # Process the first vector, optionally skip first index - first_vector = index_vectors[0][1:] if skip_first_index_of_period else index_vectors[0] - - # Compare first vector to others in the cluster - for other_vector in index_vectors[1:]: - if skip_first_index_of_period: - other_vector = other_vector[1:] - - # Compare elements up to the minimum length of both vectors - min_len = min(len(first_vector), len(other_vector)) - idx_var1.extend(first_vector[:min_len]) - idx_var2.extend(other_vector[:min_len]) - - # Convert lists to numpy arrays - return np.array(idx_var1), np.array(idx_var2) - - def get_equation_groups(self, skip_first_index_of_period: bool = True) -> list[list[int]]: - """Get groups of timestep indices that should be equal (inter-cluster). - - Each group contains timesteps at the same position within periods of the same cluster. - E.g., if cluster 0 has periods [0-95] and [192-287], position 5 gives group [5, 197]. - - Args: - skip_first_index_of_period: Skip first timestep of each period (for storage continuity). - - Returns: - List of groups, where each group is a list of timestep indices to equate. - """ - groups = [] - - for index_vectors in self.get_cluster_indices().values(): - if len(index_vectors) <= 1: - continue - - # Determine the length and starting offset - start_offset = 1 if skip_first_index_of_period else 0 - min_len = min(len(v) for v in index_vectors) - start_offset - - # Create a group for each position across all periods in this cluster - for pos in range(min_len): - group = [int(v[pos + start_offset]) for v in index_vectors] - if len(group) > 1: - groups.append(group) - - return groups - - def get_segment_equation_groups(self) -> list[list[int]]: - """Get groups of timestep indices that should be equal (intra-segment). - - Each group contains all timesteps within the same segment. - - Returns: - List of groups, where each group is a list of timestep indices to equate. - """ - if self.n_segments is None: - return [] - - groups = [] - period_length = int(self.hours_per_period / self.hours_per_time_step) - segment_duration_dict = self.tsam.segmentDurationDict['Segment Duration'] - - for period_idx, cluster_id in enumerate(self.tsam.clusterOrder): - period_offset = period_idx * period_length - start_step = 0 - - for seg_idx in range(self.n_segments): - duration = segment_duration_dict[(cluster_id, seg_idx)] - if duration > 1: - # Group all timesteps in this segment - group = [period_offset + start_step + step for step in range(duration)] - groups.append(group) - start_step += duration - - return groups - - def get_segment_equation_indices(self) -> tuple[np.ndarray, np.ndarray]: - """ - Generates pairs of indices for intra-segment equalization. - - When segmentation is enabled, all timesteps within the same segment should have - equal values. This method returns index pairs where each timestep in a segment - is paired with the first timestep of that segment. - - Returns: - tuple[np.ndarray, np.ndarray]: Two arrays of indices. For each pair (i, j), - variable[i] should equal variable[j]. - - Note: - Only generates constraints when n_segments is set. Returns empty arrays otherwise. - """ - if self.n_segments is None: - return np.array([]), np.array([]) - - idx_var1 = [] - idx_var2 = [] - - period_length = int(self.hours_per_period / self.hours_per_time_step) - segment_duration_dict = self.tsam.segmentDurationDict['Segment Duration'] - - for period_idx, cluster_id in enumerate(self.tsam.clusterOrder): - period_offset = period_idx * period_length - start_step = 0 - - for seg_idx in range(self.n_segments): - # Get duration for this (cluster, segment) - duration = segment_duration_dict[(cluster_id, seg_idx)] - - # Equate all timesteps in this segment to the first timestep - first_ts = period_offset + start_step - for step in range(1, duration): - idx_var1.append(first_ts) - idx_var2.append(period_offset + start_step + step) - - start_step += duration - - return np.array(idx_var1), np.array(idx_var2) - - -def _parse_cluster_duration(duration: str | float) -> float: - """Convert cluster duration to hours. - - Args: - duration: Either a pandas-style duration string ('1D', '24h', '6h') - or a numeric value in hours. - - Returns: - Duration in hours. - - Examples: - >>> _parse_cluster_duration('1D') - 24.0 - >>> _parse_cluster_duration('6h') - 6.0 - >>> _parse_cluster_duration(24) - 24.0 - """ - import pandas as pd - - if isinstance(duration, (int, float)): - return float(duration) - - # Parse pandas-style duration strings - td = pd.Timedelta(duration) - return td.total_seconds() / 3600 - - -@register_class_for_io -class ClusteringParameters(Interface): - """Parameters for time series clustering. - - This class configures how time series data is clustered into representative - segments using the tsam (time series aggregation module) package. - - Note: - The term "cluster" here refers to clustering time segments (e.g., typical days), - not to be confused with the FlowSystem's "period" dimension (e.g., years). - - Args: - n_clusters: Number of clusters to create (e.g., 8 typical days). - Set to None to skip clustering and only do segmentation. - cluster_duration: Duration of each cluster segment. Can be a pandas-style - string ('1D', '24h', '6h') or a numeric value in hours. - n_segments: Number of segments to create within each cluster (inner-period - clustering). For example, n_segments=4 with cluster_duration='1D' will - reduce 24 hourly timesteps to 4 representative segments per day. - Default is None (no inner-period segmentation). - aggregate_data: If True, aggregate time series data and fix all time-dependent - variables. If False, only fix binary variables. Default is True. - include_storage: Whether to include storage flows in clustering constraints. - If other flows are fixed, fixing storage flows is usually not required. - Default is True. - flexibility_percent: Maximum percentage (0-100) of binary values that can - deviate from the clustered pattern. Default is 0 (no flexibility). - flexibility_penalty: Penalty added to objective for each deviation. - Only applies when flexibility_percent > 0. Default is 0. - time_series_for_high_peaks: List of TimeSeriesData to force inclusion of - segments with high values. - time_series_for_low_peaks: List of TimeSeriesData to force inclusion of - segments with low values. - cluster_order: Pre-computed cluster assignments. DataArray of shape (cluster_period,) - specifying which cluster each period belongs to. If provided, tsam clustering - is skipped. - period_length: Number of timesteps per clustering-period. Required if cluster_order - is provided. - segment_assignment: Pre-computed segment assignments. DataArray of shape (cluster, position) - specifying segment ID for each position. Optional. - skip_first_of_period: Whether to skip the first timestep of each period for storage - constraints (to maintain inter-period continuity). Default is True. - - Examples: - Basic usage (8 typical days): - - >>> clustered_fs = flow_system.transform.cluster( - ... n_clusters=8, - ... cluster_duration='1D', - ... ) - - With inner-period segmentation (8 typical days × 4 segments each = 32 timesteps): - - >>> clustered_fs = flow_system.transform.cluster( - ... n_clusters=8, - ... cluster_duration='1D', - ... n_segments=4, # Reduce 24h to 4 segments per day - ... ) - - With pre-computed cluster assignments (external clustering): - - >>> params = fx.ClusteringParameters( - ... n_clusters=8, - ... cluster_duration='1D', - ... cluster_order=xr.DataArray([0, 1, 2, 0, 1, ...], dims=['cluster_period']), - ... period_length=24, - ... ) - >>> clustered_fs = flow_system.transform.cluster(parameters=params) - """ - - def __init__( - self, - n_clusters: int | None, - cluster_duration: str | float, - n_segments: int | None = None, - aggregate_data: bool = True, - include_storage: bool = True, - flexibility_percent: float = 0, - flexibility_penalty: float = 0, - time_series_for_high_peaks: list[TimeSeriesData] | None = None, - time_series_for_low_peaks: list[TimeSeriesData] | None = None, - # Clustering indices (optional - computed from tsam if not provided) - cluster_order: xr.DataArray | None = None, - period_length: int | None = None, - segment_assignment: xr.DataArray | None = None, - skip_first_of_period: bool = True, - # External tsam aggregation for data transformation - tsam_aggregation: tsam.TimeSeriesAggregation | None = None, - ): - import xarray as xr - - self.n_clusters = n_clusters - self.cluster_duration = cluster_duration # Store original for serialization - self.cluster_duration_hours = _parse_cluster_duration(cluster_duration) - self.n_segments = n_segments - self.aggregate_data = aggregate_data - self.include_storage = include_storage - self.flexibility_percent = flexibility_percent - self.flexibility_penalty = flexibility_penalty - self.time_series_for_high_peaks: list[TimeSeriesData] = time_series_for_high_peaks or [] - self.time_series_for_low_peaks: list[TimeSeriesData] = time_series_for_low_peaks or [] - self.skip_first_of_period = skip_first_of_period - self.tsam_aggregation = tsam_aggregation # Not serialized - runtime only - - # Clustering indices - ensure DataArrays have names for IO - if cluster_order is not None: - if isinstance(cluster_order, xr.DataArray): - self.cluster_order = ( - cluster_order.rename('cluster_order') if cluster_order.name is None else cluster_order - ) - else: - self.cluster_order = xr.DataArray(cluster_order, dims=['cluster_period'], name='cluster_order') - else: - self.cluster_order = None - - self.period_length = int(period_length) if period_length is not None else None - - if segment_assignment is not None: - if isinstance(segment_assignment, xr.DataArray): - self.segment_assignment = ( - segment_assignment.rename('segment_assignment') - if segment_assignment.name is None - else segment_assignment - ) - else: - self.segment_assignment = xr.DataArray( - segment_assignment, dims=['cluster', 'position'], name='segment_assignment' - ) - else: - self.segment_assignment = None - - # Auto-populate indices from tsam if provided - if tsam_aggregation is not None and not self.has_indices: - self.populate_from_tsam(tsam_aggregation) - - @property - def has_indices(self) -> bool: - """Whether clustering indices have been computed/provided.""" - return self.cluster_order is not None and self.period_length is not None - - @property - def use_extreme_periods(self) -> bool: - """Whether extreme segment selection is enabled.""" - return bool(self.time_series_for_high_peaks or self.time_series_for_low_peaks) - - @property - def use_segmentation(self) -> bool: - """Whether inner-period segmentation is enabled.""" - return self.n_segments is not None - - @property - def labels_for_high_peaks(self) -> list[str]: - """Names of time series used for high peak selection.""" - return [ts.name for ts in self.time_series_for_high_peaks] - - @property - def labels_for_low_peaks(self) -> list[str]: - """Names of time series used for low peak selection.""" - return [ts.name for ts in self.time_series_for_low_peaks] - - def populate_from_tsam(self, aggregation: tsam.TimeSeriesAggregation) -> None: - """Populate clustering indices from a tsam TimeSeriesAggregation object. - - Args: - aggregation: tsam object after calling createTypicalPeriods(). - """ - import xarray as xr - - if not TSAM_AVAILABLE: - raise ImportError("The 'tsam' package is required. Install with 'pip install tsam'.") - - self.period_length = int(aggregation.hoursPerPeriod / aggregation.resolution) - self.cluster_order = xr.DataArray(aggregation.clusterOrder, dims=['cluster_period'], name='cluster_order') - - # Build segment assignment if segmentation is used - if aggregation.segmentation and hasattr(aggregation, 'segmentDurationDict'): - n_clusters = aggregation.noTypicalPeriods - segment_duration_dict = aggregation.segmentDurationDict['Segment Duration'] - - # Build (cluster, position) -> segment_id mapping - arr = np.zeros((n_clusters, self.period_length), dtype=np.int32) - for cluster_id in range(n_clusters): - pos = 0 - for seg_idx in range(aggregation.noSegments): - duration = segment_duration_dict[(cluster_id, seg_idx)] - arr[cluster_id, pos : pos + duration] = seg_idx - pos += duration - - self.segment_assignment = xr.DataArray(arr, dims=['cluster', 'position'], name='segment_assignment') - - def get_cluster_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get inter-cluster equation pairs (i, j) where var[i] == var[j]. - - Returns: - Tuple of (idx_i, idx_j) arrays of timestep indices to equate. - """ - if self.cluster_order is None or self.period_length is None: - raise ValueError('Clustering indices not set. Call populate_from_tsam() first or provide cluster_order.') - - cluster_to_periods: dict[int, list[int]] = {} - for period_idx, cluster_id in enumerate(self.cluster_order.values): - cluster_to_periods.setdefault(int(cluster_id), []).append(period_idx) - - idx_i, idx_j = [], [] - start_pos = 1 if self.skip_first_of_period else 0 - - for periods in cluster_to_periods.values(): - if len(periods) <= 1: - continue - first_period = periods[0] - for pos in range(start_pos, self.period_length): - first_ts = first_period * self.period_length + pos - for other_period in periods[1:]: - idx_i.append(first_ts) - idx_j.append(other_period * self.period_length + pos) - - return np.array(idx_i, dtype=np.int32), np.array(idx_j, dtype=np.int32) - - def get_segment_indices(self) -> tuple[np.ndarray, np.ndarray]: - """Get intra-segment equation pairs (i, j) where var[i] == var[j]. - - Returns: - Tuple of (idx_i, idx_j) arrays of timestep indices to equate. - """ - if self.segment_assignment is None: - return np.array([], dtype=np.int32), np.array([], dtype=np.int32) - - if self.cluster_order is None or self.period_length is None: - raise ValueError('Clustering indices not set. Call populate_from_tsam() first or provide cluster_order.') - - idx_i, idx_j = [], [] - seg_arr = self.segment_assignment.values # (cluster, position) - - for period_idx, cluster_id in enumerate(self.cluster_order.values): - period_offset = period_idx * self.period_length - segment_ids = seg_arr[int(cluster_id)] # (position,) - - # Group positions by segment - for seg_id in np.unique(segment_ids): - positions = np.where(segment_ids == seg_id)[0] - if len(positions) > 1: - first_ts = period_offset + positions[0] - for pos in positions[1:]: - idx_i.append(first_ts) - idx_j.append(period_offset + pos) - - return np.array(idx_i, dtype=np.int32), np.array(idx_j, dtype=np.int32) - - -class ClusteringModel(Submodel): - """Model that adds clustering constraints to equate variables across clustered time segments. - - Creates equations that equate variable values at corresponding time indices within the same cluster, - and optionally allows binary variables to deviate with a penalty. - """ - - def __init__( - self, - model: FlowSystemModel, - clustering_parameters: ClusteringParameters, - flow_system: FlowSystem, - components_to_clusterize: list[Component] | None = None, - period_selector: int | str | None = None, - scenario_selector: str | None = None, - ): - """ - Args: - model: The FlowSystemModel to add constraints to. - clustering_parameters: Parameters controlling clustering behavior (must have indices populated). - flow_system: The FlowSystem being optimized. - components_to_clusterize: Components to apply clustering to. If None, all components. - period_selector: If provided, only add constraints for this period (for multi-period FlowSystems). - scenario_selector: If provided, only add constraints for this scenario (for multi-scenario FlowSystems). - """ - # Include period/scenario in label for multi-dimensional cases - label_suffix = '' - if period_selector is not None: - label_suffix += f'|{period_selector}' - if scenario_selector is not None: - label_suffix += f'|{scenario_selector}' - - super().__init__(model, label_of_element='Clustering', label_of_model=f'Clustering{label_suffix}') - self.flow_system = flow_system - self.clustering_parameters = clustering_parameters - self.components_to_clusterize = components_to_clusterize - self.period_selector = period_selector - self.scenario_selector = scenario_selector - - def do_modeling(self): - """Create equality constraints for clustered time indices. - - Equalizes: - - flow_rate: continuous flow variables (batched into single constraint) - - status: binary on/off variables (individual constraints) - - inside_piece: piecewise segment binaries (individual constraints) - """ - if not self.clustering_parameters.has_indices: - raise ValueError( - 'ClusteringParameters must have indices populated. ' - 'Call populate_from_tsam() or provide cluster_order/period_length directly.' - ) - - components = self.components_to_clusterize or list(self.flow_system.components.values()) - - # Collect variables to equalize, grouped by type - continuous_vars: dict[str, linopy.Variable] = {} - binary_vars: dict[str, linopy.Variable] = {} - - for component in components: - if isinstance(component, Storage) and not self.clustering_parameters.include_storage: - continue - - for flow in component.inputs + component.outputs: - # Continuous: flow_rate (when aggregating data) - if self.clustering_parameters.aggregate_data: - name = f'{flow.label_full}|flow_rate' - if name in component.submodel.variables: - continuous_vars[name] = component.submodel.variables[name] - - # Binary: status - name = f'{flow.label_full}|status' - if name in component.submodel.variables: - binary_vars[name] = component.submodel.variables[name] - - # Binary: piecewise segment selection - piecewise = getattr(component.submodel, 'piecewise_conversion', None) - if piecewise is not None: - for piece in piecewise.pieces: - if piece.inside_piece is not None: - binary_vars[piece.inside_piece.name] = piece.inside_piece - - # Create constraints from clustering parameters - params = self.clustering_parameters - - for constraint_type, idx_pair in [ - ('cluster', params.get_cluster_indices()), - ('segment', params.get_segment_indices()), - ]: - if len(idx_pair[0]) == 0: - continue - - # Batch continuous variables into single constraint - if continuous_vars: - self._add_equality_constraint(continuous_vars, idx_pair, f'base_{constraint_type}') - - # Individual constraints for binaries (needed for flexibility correction vars) - for var in binary_vars.values(): - self._add_equality_constraint( - {var.name: var}, idx_pair, f'base_{constraint_type}|{var.name}', allow_flexibility=True - ) - - # Add penalty for flexibility deviations - self._add_flexibility_penalty() - - def _add_equality_constraint( - self, - variables: dict[str, linopy.Variable], - indices: tuple[np.ndarray, np.ndarray], - suffix: str, - allow_flexibility: bool = False, - ) -> None: - """Add equality constraint: var[idx_i] == var[idx_j] for all index pairs. - - Args: - variables: Variables to constrain (batched if multiple). - indices: Tuple of (idx_i, idx_j) arrays - timesteps to equate. - suffix: Constraint name suffix. - allow_flexibility: If True, add correction variables for binaries. - """ - import linopy - - idx_i, idx_j = indices - n_equations = len(idx_i) - - # Build constraint expression for each variable - expressions = [] - for name, var in variables.items(): - if 'time' not in var.dims: - continue - - # For multi-period/scenario, select only the relevant slice - # Each period/scenario has its own clustering indices - if self.period_selector is not None and 'period' in var.dims: - var = var.sel(period=self.period_selector) - if self.scenario_selector is not None and 'scenario' in var.dims: - var = var.sel(scenario=self.scenario_selector) - - # Compute difference: var[idx_i] - var[idx_j] - diff = var.isel(time=idx_i) - var.isel(time=idx_j) - - # Replace time dim with integer eq_idx (avoids duplicate datetime coords) - diff = diff.rename({'time': 'eq_idx'}).assign_coords(eq_idx=np.arange(n_equations)) - expressions.append(diff.expand_dims(variable=[name])) - - if not expressions: - return - - # Merge into single expression with 'variable' dimension - lhs = linopy.merge(*expressions, dim='variable') if len(expressions) > 1 else expressions[0] - - # Add flexibility for binaries - if allow_flexibility and self.clustering_parameters.flexibility_percent > 0: - var_name = next(iter(variables)) # Single variable for binary case - if var_name in self._model.variables.binaries: - lhs = self._add_binary_flexibility(lhs, n_equations, suffix, var_name) - - self.add_constraints(lhs == 0, short_name=f'equate_{suffix}') - - def _add_binary_flexibility(self, lhs, n_equations: int, suffix: str, var_name: str): - """Add correction variables to allow limited binary deviations.""" - coords = [np.arange(n_equations)] - dims = ['eq_idx'] - - k_up = self.add_variables(binary=True, coords=coords, dims=dims, short_name=f'k_up_{suffix}|{var_name}') - k_down = self.add_variables(binary=True, coords=coords, dims=dims, short_name=f'k_down_{suffix}|{var_name}') - - # Modified equation: diff + k_up - k_down == 0 - lhs = lhs + k_up - k_down - - # At most one correction per equation - self.add_constraints(k_up + k_down <= 1, short_name=f'lock_k_{suffix}|{var_name}') - - # Limit total corrections - max_corrections = int(self.clustering_parameters.flexibility_percent / 100 * n_equations) - self.add_constraints( - k_up.sum('eq_idx') + k_down.sum('eq_idx') <= max_corrections, - short_name=f'limit_k_{suffix}|{var_name}', - ) - - return lhs - - def _add_flexibility_penalty(self): - """Add penalty cost for flexibility correction variables.""" - penalty = self.clustering_parameters.flexibility_penalty - if self.clustering_parameters.flexibility_percent == 0 or penalty == 0: - return - - from .effects import PENALTY_EFFECT_LABEL - - for var in self.variables_direct.values(): - sum_dim = 'eq_idx' if 'eq_idx' in var.dims else 'time' - self._model.effects.add_share_to_effects( - name='Clustering', - expressions={PENALTY_EFFECT_LABEL: (var * penalty).sum(sum_dim)}, - target='periodic', - ) - - -class TypicalPeriodsModel(Submodel): - """Model that adds storage inter-period linking for typical periods optimization. - - When using cluster_reduce(), timesteps are reduced to only typical (representative) - periods. This model creates variables and constraints to track storage state - across the full original time horizon using boundary state variables. - - The approach: - 1. Create SOC_boundary[d] for each original period d (0 to n_original_periods) - 2. Compute delta_SOC[c] for each typical period c (change in SOC during period) - 3. Link: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - 4. Optionally enforce cyclic: SOC_boundary[0] = SOC_boundary[n_original_periods] - - This allows the optimizer to properly value storage for long-term (seasonal) - patterns while only solving for the typical period timesteps. - """ - - def __init__( - self, - model: FlowSystemModel, - flow_system: FlowSystem, - cluster_order: np.ndarray | list, - cluster_occurrences: dict[int, int], - n_typical_periods: int, - timesteps_per_period: int, - storage_cyclic: bool = True, - ): - """ - Args: - model: The FlowSystemModel to add constraints to. - flow_system: The FlowSystem being optimized. - cluster_order: Array indicating which typical period (cluster) each original - period belongs to. Length = n_original_periods. - cluster_occurrences: Dict mapping cluster_id to number of original periods - it represents. - n_typical_periods: Number of typical (representative) periods. - timesteps_per_period: Number of timesteps in each period. - storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. - """ - super().__init__(model, label_of_element='TypicalPeriods', label_of_model='TypicalPeriods') - self.flow_system = flow_system - self.cluster_order = np.array(cluster_order) - self.cluster_occurrences = cluster_occurrences - self.n_typical_periods = n_typical_periods - self.timesteps_per_period = timesteps_per_period - self.storage_cyclic = storage_cyclic - self.n_original_periods = len(self.cluster_order) - - @classmethod - def from_cluster_structure( - cls, - model: FlowSystemModel, - flow_system: FlowSystem, - cluster_structure, # aggregation.ClusterStructure - storage_cyclic: bool = True, - ) -> TypicalPeriodsModel: - """Create TypicalPeriodsModel from a ClusterStructure. - - This is the recommended way to create TypicalPeriodsModel when using - the new aggregation API, as it accepts the generic ClusterStructure - from any aggregation backend. - - Args: - model: The FlowSystemModel to add constraints to. - flow_system: The FlowSystem being optimized. - cluster_structure: ClusterStructure from flixopt.aggregation module. - storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. - - Returns: - Configured TypicalPeriodsModel instance. - - Example: - >>> from flixopt.aggregation import ClusterStructure - >>> structure = ClusterStructure(...) - >>> model = TypicalPeriodsModel.from_cluster_structure(model, flow_system, structure) - """ - # Extract data from ClusterStructure - cluster_order = cluster_structure.cluster_order.values - n_clusters = ( - int(cluster_structure.n_clusters) - if isinstance(cluster_structure.n_clusters, (int, np.integer)) - else int(cluster_structure.n_clusters.values) - ) - - # Convert cluster_occurrences DataArray to dict - cluster_occurrences = {} - for c in range(n_clusters): - occ = cluster_structure.cluster_occurrences.sel(cluster=c) - cluster_occurrences[c] = int(occ.values) - - return cls( - model=model, - flow_system=flow_system, - cluster_order=cluster_order, - cluster_occurrences=cluster_occurrences, - n_typical_periods=n_clusters, - timesteps_per_period=cluster_structure.timesteps_per_cluster, - storage_cyclic=storage_cyclic, - ) - - def do_modeling(self): - """Create SOC boundary variables and inter-period linking constraints. - - For each storage: - - SOC_boundary[d]: State of charge at start of original period d - - delta_SOC[c]: Change in SOC during typical period c - - Linking: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - """ - - storages = list(self.flow_system.storages.values()) - if not storages: - logger.info('No storages found - skipping inter-period linking') - return - - logger.info( - f'Adding inter-period storage linking for {len(storages)} storages ' - f'({self.n_original_periods} original periods, {self.n_typical_periods} typical)' - ) - - for storage in storages: - self._add_storage_linking(storage) - - def _add_storage_linking(self, storage) -> None: - """Add inter-period linking constraints for a single storage. - - Args: - storage: Storage component to add linking for. - """ - import xarray as xr - - label = storage.label - - # Get the charge state variable from the storage's submodel - charge_state_name = f'{label}|charge_state' - if charge_state_name not in storage.submodel.variables: - logger.warning(f'Storage {label} has no charge_state variable - skipping') - return - - charge_state = storage.submodel.variables[charge_state_name] - - # Get storage capacity bounds (may have period/scenario dimensions) - capacity = storage.capacity_in_flow_hours - if hasattr(capacity, 'fixed_size') and capacity.fixed_size is not None: - cap_value = capacity.fixed_size - elif hasattr(capacity, 'maximum') and capacity.maximum is not None: - cap_value = capacity.maximum - else: - cap_value = 1e9 # Large default - - # Create SOC_boundary variables for each original period boundary - # We need n_original_periods + 1 boundaries (start of first period through end of last) - n_boundaries = self.n_original_periods + 1 - boundary_coords = [np.arange(n_boundaries)] - boundary_dims = ['period_boundary'] - - # Build bounds - handle both scalar and multi-dimensional cap_value - # If cap_value has period/scenario dims, we need to include them - if isinstance(cap_value, xr.DataArray) and cap_value.dims: - # cap_value has dimensions (e.g., period, scenario) - need to broadcast - extra_dims = list(cap_value.dims) - extra_coords = {dim: cap_value.coords[dim].values for dim in extra_dims} - - # Add extra dims/coords to the variable - boundary_dims = ['period_boundary'] + extra_dims - boundary_coords = [np.arange(n_boundaries)] + [extra_coords[d] for d in extra_dims] - - # Build lb and ub with all dimensions - lb_coords = {'period_boundary': np.arange(n_boundaries), **extra_coords} - lb_shape = [n_boundaries] + [len(extra_coords[d]) for d in extra_dims] - lb = xr.DataArray( - np.zeros(lb_shape), - coords=lb_coords, - dims=boundary_dims, - ) - - # Broadcast cap_value across period_boundary dimension - ub = cap_value.expand_dims({'period_boundary': n_boundaries}, axis=0) - ub = ub.assign_coords(period_boundary=np.arange(n_boundaries)) - else: - # Scalar cap_value - simple case - if hasattr(cap_value, 'item'): - cap_value = float(cap_value.item()) - else: - cap_value = float(cap_value) - lb = xr.DataArray(0.0, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) - ub = xr.DataArray(cap_value, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) - - soc_boundary = self.add_variables( - lower=lb, - upper=ub, - coords=boundary_coords, - dims=boundary_dims, - short_name=f'SOC_boundary|{label}', - ) - - # Pre-compute delta_SOC for each typical period - # delta_SOC[c] = charge_state[c, end] - charge_state[c, start] - # We store these as a dict since linopy expressions can't be concat'd with xr.concat - delta_soc_dict = {} - for c in range(self.n_typical_periods): - # Get start and end timestep indices for this typical period - start_idx = c * self.timesteps_per_period - end_idx = (c + 1) * self.timesteps_per_period # charge_state has extra timestep at end - - # charge_state at end - charge_state at start of typical period c - # Note: charge_state is indexed by time with extra timestep - delta_soc_dict[c] = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) - - # Create linking constraints: - # SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - for d in range(self.n_original_periods): - c = int(self.cluster_order[d]) # Which typical period this original period maps to - lhs = soc_boundary.isel(period_boundary=d + 1) - soc_boundary.isel(period_boundary=d) - delta_soc_dict[c] - self.add_constraints(lhs == 0, short_name=f'inter_period_link|{label}|{d}') - - # Cyclic constraint: SOC_boundary[0] = SOC_boundary[end] - if self.storage_cyclic: - lhs = soc_boundary.isel(period_boundary=0) - soc_boundary.isel(period_boundary=self.n_original_periods) - self.add_constraints(lhs == 0, short_name=f'cyclic|{label}') - - logger.debug(f'Added inter-period linking for storage {label}') diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index f93bda411..018e89ad5 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -38,6 +38,7 @@ import pyvis + from .aggregation import AggregationInfo from .solvers import _Solver from .structure import TimeSeriesWeights from .types import Effect_TPS, Numeric_S, Numeric_TPS, NumericOrBool @@ -231,8 +232,8 @@ def __init__( # Solution dataset - populated after optimization or loaded from file self._solution: xr.Dataset | None = None - # Typical periods info - populated by transform.cluster_reduce() - self._cluster_info: dict | None = None + # Aggregation info - populated by transform.cluster_reduce() or transform.aggregate() + self._aggregation_info: AggregationInfo | None = None # Statistics accessor cache - lazily initialized, invalidated on new solution self._statistics: StatisticsAccessor | None = None @@ -1292,68 +1293,40 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: self.model.do_modeling() - # Add typical periods storage modeling if this is a reduced FlowSystem - if self._cluster_info is not None: - self._add_typical_periods_modeling() + # Add inter-cluster storage linking if this is an aggregated FlowSystem + if self._aggregation_info is not None: + self._add_inter_cluster_linking() return self - def _apply_timestep_weights(self) -> None: - """Apply timestep weights to the model for cluster_reduce() optimization. - - .. deprecated:: - This method is deprecated. Cluster weights are now stored directly on FlowSystem - as `cluster_weight` and accessed via `FlowSystemModel.cluster_weight` and - `FlowSystemModel.aggregation_weight`. - """ - warnings.warn( - '_apply_timestep_weights() is deprecated. Cluster weights are now stored directly ' - 'on FlowSystem as `cluster_weight` and accessed via FlowSystemModel.cluster_weight.', - DeprecationWarning, - stacklevel=2, - ) - info = self._cluster_info - if info is None: - return - - timestep_weights = info['timestep_weights'] - - # Store timestep weights on the model for backward compatibility - self.model.timestep_weights = xr.DataArray( - timestep_weights, - coords={'time': self.timesteps}, - dims=['time'], - name='timestep_weights', - ) - logger.info(f'Applied timestep weights for typical periods: sum={sum(timestep_weights)}') - - def _add_typical_periods_modeling(self) -> None: - """Add storage inter-period linking for typical periods optimization. + def _add_inter_cluster_linking(self) -> None: + """Add storage inter-cluster linking for aggregated optimization. Creates SOC_boundary variables that link storage states between sequential - periods in the original time series, using the delta SOC from typical periods. + periods in the original time series, using the delta SOC from representative periods. """ - from .clustering import TypicalPeriodsModel + from .aggregation.storage_linking import InterClusterLinking - info = self._cluster_info + info = self._aggregation_info if info is None: return - if not info.get('storage_inter_period_linking', True): - logger.info('Storage inter-period linking disabled') + if not info.storage_inter_cluster_linking: + logger.info('Storage inter-cluster linking disabled') + return + + if info.result.cluster_structure is None: + logger.warning('No cluster structure available for inter-cluster linking') return - # Create typical periods model for storage linking - typical_periods_model = TypicalPeriodsModel( + # Create inter-cluster linking model for storage + linking_model = InterClusterLinking( model=self.model, flow_system=self, - cluster_order=info['cluster_order'], - cluster_occurrences=info['cluster_occurrences'], - n_typical_periods=info['n_clusters'], - timesteps_per_period=info['timesteps_per_cluster'], - storage_cyclic=info.get('storage_cyclic', True), + cluster_structure=info.result.cluster_structure, + storage_cyclic=info.storage_cyclic, ) - typical_periods_model.do_modeling() + linking_model.do_modeling() def solve(self, solver: _Solver) -> FlowSystem: """ diff --git a/flixopt/optimization.py b/flixopt/optimization.py index a576477e0..0d643d1b0 100644 --- a/flixopt/optimization.py +++ b/flixopt/optimization.py @@ -1,11 +1,13 @@ """ This module contains the Optimization functionality for the flixopt framework. It is used to optimize a FlowSystemModel for a given FlowSystem through a solver. -There are three different Optimization types: + +There are two Optimization types: 1. Optimization: Optimizes the FlowSystemModel for the full FlowSystem - 2. ClusteredOptimization: Optimizes the FlowSystemModel for the full FlowSystem, but clusters the TimeSeriesData. - This simplifies the mathematical model and usually speeds up the solving process. - 3. SegmentedOptimization: Solves a FlowSystemModel for each individual Segment of the FlowSystem. + 2. SegmentedOptimization: Solves a FlowSystemModel for each individual Segment of the FlowSystem. + +For time series aggregation (clustering), use FlowSystem.transform.aggregate() or +FlowSystem.transform.cluster_reduce() instead. """ from __future__ import annotations @@ -16,27 +18,22 @@ import sys import timeit import warnings -from collections import Counter from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable -import numpy as np from tqdm import tqdm from . import io as fx_io -from .clustering import Clustering, ClusteringModel, ClusteringParameters from .components import Storage from .config import CONFIG, DEPRECATION_REMOVAL_VERSION, SUCCESS_LEVEL -from .core import DataConverter, TimeSeriesData, drop_constant_arrays from .effects import PENALTY_EFFECT_LABEL from .features import InvestmentModel -from .flow_system import FlowSystem from .results import Results, SegmentedResults if TYPE_CHECKING: import pandas as pd import xarray as xr - from .elements import Component + from .flow_system import FlowSystem from .solvers import _Solver from .structure import FlowSystemModel @@ -357,162 +354,6 @@ def modeled(self) -> bool: return True if self.model is not None else False -class ClusteredOptimization(Optimization): - """ - ClusteredOptimization reduces computational complexity by clustering time series into typical periods. - - This optimization approach clusters time series data using techniques from the tsam library to identify - representative time periods, significantly reducing computation time while maintaining solution accuracy. - - Note: - The quality of the solution depends on the choice of aggregation parameters. - The optimal parameters depend on the specific problem and the characteristics of the time series data. - For more information, refer to the [tsam documentation](https://tsam.readthedocs.io/en/latest/). - - Args: - name: Name of the optimization - flow_system: FlowSystem to be optimized - clustering_parameters: Parameters for clustering. See ClusteringParameters class documentation - components_to_clusterize: list of Components to perform aggregation on. If None, all components are aggregated. - This equalizes variables in the components according to the typical periods computed in the aggregation - folder: Folder where results should be saved. If None, current working directory is used - normalize_weights: Whether to automatically normalize the weights of scenarios to sum up to 1 when solving - - Attributes: - clustering (Clustering | None): Contains the clustered time series data - clustering_model (ClusteringModel | None): Contains Variables and Constraints that equalize clusters of the time series data - """ - - def __init__( - self, - name: str, - flow_system: FlowSystem, - clustering_parameters: ClusteringParameters, - components_to_clusterize: list[Component] | None = None, - folder: pathlib.Path | None = None, - normalize_weights: bool = True, - ): - warnings.warn( - f'ClusteredOptimization is deprecated and will be removed in v{DEPRECATION_REMOVAL_VERSION}. ' - 'Use FlowSystem.transform.cluster() followed by FlowSystem.optimize(solver) instead. ' - 'Example: clustered_fs = flow_system.transform.cluster(n_clusters=8, cluster_duration="1D"); ' - 'clustered_fs.optimize(solver)', - DeprecationWarning, - stacklevel=2, - ) - # Note: Multi-period and multi-scenario are now supported via the new transform.cluster() API - # Skip parent deprecation warning by calling common init directly - _initialize_optimization_common( - self, - name=name, - flow_system=flow_system, - folder=folder, - normalize_weights=normalize_weights, - ) - self.clustering_parameters = clustering_parameters - self.components_to_clusterize = components_to_clusterize - self.clustering: Clustering | None = None - self.clustering_model: ClusteringModel | None = None - - def do_modeling(self) -> ClusteredOptimization: - t_start = timeit.default_timer() - self.flow_system.connect_and_transform() - self._perform_clustering() - - # Model the System - self.model = self.flow_system.create_model(self.normalize_weights) - self.model.do_modeling() - # Add Clustering Submodel after modeling the rest - # Populate clustering indices from tsam - self.clustering_parameters.populate_from_tsam(self.clustering.tsam) - self.clustering_model = ClusteringModel( - self.model, self.clustering_parameters, self.flow_system, self.components_to_clusterize - ) - self.clustering_model.do_modeling() - self.durations['modeling'] = round(timeit.default_timer() - t_start, 2) - return self - - def _perform_clustering(self): - from .clustering import Clustering - - t_start_agg = timeit.default_timer() - - # Validation - dt_min = float(self.flow_system.timestep_duration.min().item()) - dt_max = float(self.flow_system.timestep_duration.max().item()) - if not dt_min == dt_max: - raise ValueError( - f'Clustering failed due to inconsistent time step sizes:delta_t varies from {dt_min} to {dt_max} hours.' - ) - ratio = self.clustering_parameters.cluster_duration_hours / dt_max - if not np.isclose(ratio, round(ratio), atol=1e-9): - raise ValueError( - f'The selected cluster_duration={self.clustering_parameters.cluster_duration_hours}h does not match the time ' - f'step size of {dt_max} hours. It must be an integer multiple of {dt_max} hours.' - ) - - logger.info(f'{"":#^80}') - logger.info(f'{" Clustering TimeSeries Data ":#^80}') - - ds = self.flow_system.to_dataset() - - temporaly_changing_ds = drop_constant_arrays(ds, dim='time') - - # Clustering - creation of clustered timeseries: - self.clustering = Clustering( - original_data=temporaly_changing_ds.to_dataframe(), - hours_per_time_step=float(dt_min), - hours_per_period=self.clustering_parameters.cluster_duration_hours, - nr_of_periods=self.clustering_parameters.n_clusters, - weights=self.calculate_clustering_weights(temporaly_changing_ds), - time_series_for_high_peaks=self.clustering_parameters.labels_for_high_peaks, - time_series_for_low_peaks=self.clustering_parameters.labels_for_low_peaks, - ) - - self.clustering.cluster() - result = self.clustering.plot(show=CONFIG.Plotting.default_show) - result.to_html(self.folder / 'clustering.html') - if self.clustering_parameters.aggregate_data: - ds = self.flow_system.to_dataset() - for name, series in self.clustering.aggregated_data.items(): - da = ( - DataConverter.to_dataarray(series, self.flow_system.coords) - .rename(name) - .assign_attrs(ds[name].attrs) - ) - if TimeSeriesData.is_timeseries_data(da): - da = TimeSeriesData.from_dataarray(da) - - ds[name] = da - - self.flow_system = FlowSystem.from_dataset(ds) - self.flow_system.connect_and_transform() - self.durations['clustering'] = round(timeit.default_timer() - t_start_agg, 2) - - @classmethod - def calculate_clustering_weights(cls, ds: xr.Dataset) -> dict[str, float]: - """Calculate weights for all datavars in the dataset. Weights are pulled from the attrs of the datavars.""" - groups = [da.attrs.get('clustering_group') for da in ds.data_vars.values() if 'clustering_group' in da.attrs] - group_counts = Counter(groups) - - # Calculate weight for each group (1/count) - group_weights = {group: 1 / count for group, count in group_counts.items()} - - weights = {} - for name, da in ds.data_vars.items(): - clustering_group = da.attrs.get('clustering_group') - group_weight = group_weights.get(clustering_group) - if group_weight is not None: - weights[name] = group_weight - else: - weights[name] = da.attrs.get('clustering_weight', 1) - - if np.all(np.isclose(list(weights.values()), 1, atol=1e-6)): - logger.info('All Clustering weights were set to 1') - - return weights - - class SegmentedOptimization: """Solve large optimization problems by dividing time horizon into (overlapping) segments. diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index a713d6ee0..9b932d4b4 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -616,7 +616,7 @@ def cluster_reduce( Returns: A new FlowSystem with reduced timesteps (only typical clusters). - The FlowSystem has metadata stored in `_cluster_info` for weighting. + The FlowSystem has metadata stored in `_aggregation_info` for expansion. Raises: ValueError: If timestep sizes are inconsistent. @@ -648,7 +648,9 @@ def cluster_reduce( - A 5-10% safety margin on sizes is recommended for the dispatch stage - Storage linking adds SOC_boundary variables to track state between clusters """ - from .clustering import Clustering + import tsam.timeseriesaggregation as tsam + + from .aggregation import AggregationInfo, AggregationResult, ClusterStructure from .core import TimeSeriesData, drop_constant_arrays from .flow_system import FlowSystem @@ -682,10 +684,11 @@ def cluster_reduce( ds = self._fs.to_dataset(include_solution=False) - # Cluster each (period, scenario) combination - clustering_results: dict[tuple, Clustering] = {} + # Cluster each (period, scenario) combination using tsam directly + tsam_results: dict[tuple, tsam.TimeSeriesAggregation] = {} cluster_orders: dict[tuple, np.ndarray] = {} cluster_occurrences_all: dict[tuple, dict] = {} + use_extreme_periods = bool(time_series_for_high_peaks or time_series_for_low_peaks) for period_label in periods: for scenario_label in scenarios: @@ -693,30 +696,35 @@ def cluster_reduce( selector = {k: v for k, v in [('period', period_label), ('scenario', scenario_label)] if v is not None} ds_slice = ds.sel(**selector, drop=True) if selector else ds temporaly_changing_ds = drop_constant_arrays(ds_slice, dim='time') + df = temporaly_changing_ds.to_dataframe() if selector: logger.info(f'Clustering {", ".join(f"{k}={v}" for k, v in selector.items())}...') - clustering = Clustering( - original_data=temporaly_changing_ds.to_dataframe(), - hours_per_time_step=dt, - hours_per_period=hours_per_cluster, - nr_of_periods=n_clusters, - weights=weights or self._calculate_clustering_weights(temporaly_changing_ds), - time_series_for_high_peaks=time_series_for_high_peaks or [], - time_series_for_low_peaks=time_series_for_low_peaks or [], + # Use tsam directly + clustering_weights = weights or self._calculate_clustering_weights(temporaly_changing_ds) + tsam_agg = tsam.TimeSeriesAggregation( + df, + noTypicalPeriods=n_clusters, + hoursPerPeriod=hours_per_cluster, + resolution=dt, + clusterMethod='k_means', + extremePeriodMethod='new_cluster_center' if use_extreme_periods else 'None', + weightDict={name: w for name, w in clustering_weights.items() if name in df.columns}, + addPeakMax=time_series_for_high_peaks or [], + addPeakMin=time_series_for_low_peaks or [], ) - clustering.cluster() + tsam_agg.createTypicalPeriods() - clustering_results[key] = clustering - cluster_orders[key] = clustering.tsam.clusterOrder - cluster_occurrences_all[key] = clustering.tsam.clusterPeriodNoOccur + tsam_results[key] = tsam_agg + cluster_orders[key] = tsam_agg.clusterOrder + cluster_occurrences_all[key] = tsam_agg.clusterPeriodNoOccur - # Use first clustering for structure + # Use first result for structure first_key = (periods[0], scenarios[0]) - first_clustering = clustering_results[first_key] - n_reduced_timesteps = len(first_clustering.tsam.typicalPeriods) - actual_n_clusters = len(first_clustering.tsam.clusterPeriodNoOccur) + first_tsam = tsam_results[first_key] + n_reduced_timesteps = len(first_tsam.typicalPeriods) + actual_n_clusters = len(first_tsam.clusterPeriodNoOccur) # Create new time index (needed for weights and typical periods) new_time_index = pd.date_range( @@ -742,8 +750,8 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: # Build typical periods DataArrays keyed by (variable_name, (period, scenario)) typical_das: dict[str, dict[tuple, xr.DataArray]] = {} - for key, clustering in clustering_results.items(): - typical_df = clustering.tsam.typicalPeriods + for key, tsam_agg in tsam_results.items(): + typical_df = tsam_agg.typicalPeriods for col in typical_df.columns: typical_das.setdefault(col, {})[key] = xr.DataArray( typical_df[col].values, dims=['time'], coords={'time': new_time_index} @@ -790,21 +798,48 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: if isinstance(ics, str) and ics == 'equals_final': storage.initial_charge_state = 0 - reduced_fs._cluster_info = { - 'clustering_results': clustering_results, - 'cluster_orders': cluster_orders, - 'cluster_occurrences': cluster_occurrences_all, - 'timestep_weights': timestep_weights, - 'n_clusters': actual_n_clusters, - 'timesteps_per_cluster': timesteps_per_cluster, - 'storage_inter_period_linking': storage_inter_period_linking, - 'storage_cyclic': storage_cyclic, - 'original_fs': self._fs, - 'has_periods': has_periods, - 'has_scenarios': has_scenarios, - 'cluster_order': cluster_orders[first_key], - 'clustering': first_clustering, - } + # Build AggregationInfo for inter-cluster linking and solution expansion + n_original_timesteps = len(self._fs.timesteps) + + # Build timestep_mapping: maps each original timestep to its representative + timestep_mapping = np.zeros(n_original_timesteps, dtype=np.int32) + for period_idx, cluster_id in enumerate(cluster_orders[first_key]): + for pos in range(timesteps_per_cluster): + original_idx = period_idx * timesteps_per_cluster + pos + if original_idx < n_original_timesteps: + representative_idx = cluster_id * timesteps_per_cluster + pos + timestep_mapping[original_idx] = representative_idx + + # Build cluster_occurrences as DataArray + first_occurrences = cluster_occurrences_all[first_key] + cluster_occurrences_da = xr.DataArray( + [first_occurrences.get(c, 0) for c in range(actual_n_clusters)], + dims=['cluster'], + name='cluster_occurrences', + ) + + cluster_structure = ClusterStructure( + cluster_order=xr.DataArray(cluster_orders[first_key], dims=['original_period'], name='cluster_order'), + cluster_occurrences=cluster_occurrences_da, + n_clusters=actual_n_clusters, + timesteps_per_cluster=timesteps_per_cluster, + ) + + aggregation_result = AggregationResult( + timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), + n_representatives=n_reduced_timesteps, + representative_weights=timestep_weights.rename('representative_weights'), + cluster_structure=cluster_structure, + original_data=ds, + ) + + reduced_fs._aggregation_info = AggregationInfo( + result=aggregation_result, + original_flow_system=self._fs, + backend_name='tsam', + storage_inter_cluster_linking=storage_inter_period_linking, + storage_cyclic=storage_cyclic, + ) return reduced_fs @@ -908,21 +943,29 @@ def expand_solution(self) -> FlowSystem: from .flow_system import FlowSystem # Validate - if not hasattr(self._fs, '_cluster_info') or self._fs._cluster_info is None: + if self._fs._aggregation_info is None: raise ValueError( - 'expand_solution() requires a FlowSystem created with cluster_reduce(). ' - 'This FlowSystem has no cluster info.' + 'expand_solution() requires a FlowSystem created with cluster_reduce() or aggregate(). ' + 'This FlowSystem has no aggregation info.' ) if self._fs.solution is None: raise ValueError('FlowSystem has no solution. Run optimize() or solve() first.') - info = self._fs._cluster_info - timesteps_per_cluster = info['timesteps_per_cluster'] - original_fs: FlowSystem = info['original_fs'] - n_clusters = info['n_clusters'] - has_periods = info.get('has_periods', False) - has_scenarios = info.get('has_scenarios', False) - cluster_orders = info.get('cluster_orders', {(None, None): info['cluster_order']}) + info = self._fs._aggregation_info + cluster_structure = info.result.cluster_structure + if cluster_structure is None: + raise ValueError('No cluster structure available for expansion.') + + timesteps_per_cluster = cluster_structure.timesteps_per_cluster + original_fs: FlowSystem = info.original_flow_system + n_clusters = ( + int(cluster_structure.n_clusters) + if isinstance(cluster_structure.n_clusters, (int, np.integer)) + else int(cluster_structure.n_clusters.values) + ) + has_periods = original_fs.periods is not None + has_scenarios = original_fs.scenarios is not None + cluster_order = cluster_structure.cluster_order.values periods = list(original_fs.periods) if has_periods else [None] scenarios = list(original_fs.scenarios) if has_scenarios else [None] @@ -930,13 +973,12 @@ def expand_solution(self) -> FlowSystem: original_timesteps = original_fs.timesteps n_original_timesteps = len(original_timesteps) n_reduced_timesteps = n_clusters * timesteps_per_cluster - first_key = (periods[0], scenarios[0]) - # Build expansion mappings per (period, scenario) - mappings = { - key: self._build_expansion_mapping(order, timesteps_per_cluster, n_original_timesteps) - for key, order in cluster_orders.items() - } + # Build expansion mapping (same for all period/scenario combinations) + base_mapping = self._build_expansion_mapping(cluster_order, timesteps_per_cluster, n_original_timesteps) + + # Create mappings dict for all (period, scenario) combinations using the same mapping + mappings = {(p, s): base_mapping for p in periods for s in scenarios} # Expand function for DataArrays def expand_da(da: xr.DataArray) -> xr.DataArray: @@ -977,7 +1019,7 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: + ( f', {n_combinations} period/scenario combinations)' if n_combinations > 1 - else f' → {len(cluster_orders[first_key])} original segments)' + else f' → {len(cluster_order)} original segments)' ) ) From cf54be518b89d97cccd1608fcb214dd53383b47f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 21:36:50 +0100 Subject: [PATCH 067/191] Remove old clustering code --- tests/test_cluster_reduce_expand.py | 43 +++++++++++++---------------- 1 file changed, 19 insertions(+), 24 deletions(-) diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index 54e83033c..526f0e052 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -55,8 +55,8 @@ def test_cluster_reduce_creates_reduced_timesteps(timesteps_8_days): # Should have 2 * 24 = 48 timesteps instead of 192 assert len(fs_reduced.timesteps) == 48 - assert hasattr(fs_reduced, '_cluster_info') - assert fs_reduced._cluster_info['n_clusters'] == 2 + assert hasattr(fs_reduced, '_aggregation_info') + assert fs_reduced._aggregation_info.result.cluster_structure.n_clusters == 2 def test_expand_solution_restores_full_timesteps(solver_fixture, timesteps_8_days): @@ -112,9 +112,9 @@ def test_expand_solution_maps_values_correctly(solver_fixture, timesteps_8_days) fs_reduced.optimize(solver_fixture) # Get cluster_order to know mapping - info = fs_reduced._cluster_info - cluster_order = info['cluster_order'] - timesteps_per_cluster = info['timesteps_per_cluster'] # 24 + info = fs_reduced._aggregation_info + cluster_order = info.result.cluster_structure.cluster_order.values + timesteps_per_cluster = info.result.cluster_structure.timesteps_per_cluster # 24 reduced_flow = fs_reduced.solution['Boiler(Q_th)|flow_rate'].values @@ -188,12 +188,12 @@ def test_expand_solution_statistics_match_clustered(solver_fixture, timesteps_8_ assert_allclose(reduced_flow_hours, expanded_flow_hours, rtol=1e-6) -def test_expand_solution_without_cluster_info_raises(solver_fixture, timesteps_2_days): +def test_expand_solution_without_aggregation_info_raises(solver_fixture, timesteps_2_days): """Test that expand_solution raises error if not a reduced FlowSystem.""" fs = create_simple_system(timesteps_2_days) fs.optimize(solver_fixture) - with pytest.raises(ValueError, match='cluster_reduce'): + with pytest.raises(ValueError, match='cluster_reduce|aggregate'): fs.transform.expand_solution() @@ -271,15 +271,13 @@ def test_cluster_reduce_with_scenarios(timesteps_8_days, scenarios_2): # Should have 2 * 24 = 48 timesteps assert len(fs_reduced.timesteps) == 48 - # Should have cluster_orders for each scenario - info = fs_reduced._cluster_info - assert 'cluster_orders' in info - assert info['has_scenarios'] is True - - # Each scenario should have its own cluster_order - for scenario in scenarios_2: - key = (None, scenario) - assert key in info['cluster_orders'] + # Should have aggregation info with cluster structure + info = fs_reduced._aggregation_info + assert info is not None + assert info.result.cluster_structure is not None + assert info.result.cluster_structure.n_clusters == 2 + # Original FlowSystem had scenarios + assert info.original_flow_system.scenarios is not None def test_cluster_reduce_and_expand_with_scenarios(solver_fixture, timesteps_8_days, scenarios_2): @@ -310,7 +308,7 @@ def test_cluster_reduce_and_expand_with_scenarios(solver_fixture, timesteps_8_da def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_8_days, scenarios_2): - """Test that each scenario uses its own cluster_order in expand_solution.""" + """Test that expand_solution correctly maps scenarios in multi-scenario systems.""" fs = create_system_with_scenarios(timesteps_8_days, scenarios_2) fs_reduced = fs.transform.cluster_reduce( @@ -319,19 +317,16 @@ def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_ ) fs_reduced.optimize(solver_fixture) - info = fs_reduced._cluster_info - cluster_orders = info['cluster_orders'] - timesteps_per_cluster = info['timesteps_per_cluster'] # 24 + info = fs_reduced._aggregation_info + cluster_order = info.result.cluster_structure.cluster_order.values + timesteps_per_cluster = info.result.cluster_structure.timesteps_per_cluster # 24 reduced_flow = fs_reduced.solution['Boiler(Q_th)|flow_rate'] fs_expanded = fs_reduced.transform.expand_solution() expanded_flow = fs_expanded.solution['Boiler(Q_th)|flow_rate'] - # Check mapping for each scenario independently + # Check mapping for each scenario (all use the same cluster_order in simplified implementation) for scenario in scenarios_2: - key = (None, scenario) - cluster_order = cluster_orders[key] - reduced_scenario = reduced_flow.sel(scenario=scenario).values expanded_scenario = expanded_flow.sel(scenario=scenario).values From 71d010a1c155257792fd349b515b3b6865863165 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 21:37:56 +0100 Subject: [PATCH 068/191] Remove old clustering code --- flixopt/aggregation/storage_linking.py | 189 +++++++++++++++++++++++++ 1 file changed, 189 insertions(+) create mode 100644 flixopt/aggregation/storage_linking.py diff --git a/flixopt/aggregation/storage_linking.py b/flixopt/aggregation/storage_linking.py new file mode 100644 index 000000000..5cf9d9453 --- /dev/null +++ b/flixopt/aggregation/storage_linking.py @@ -0,0 +1,189 @@ +""" +Inter-cluster storage linking for aggregated optimization. + +When using time series aggregation (clustering), timesteps are reduced to only +representative (typical) periods. This module provides the `InterClusterLinking` +model that tracks storage state across the full original time horizon. +""" + +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING + +import numpy as np + +from ..structure import Submodel + +if TYPE_CHECKING: + from ..flow_system import FlowSystem + from ..structure import FlowSystemModel + from .base import ClusterStructure + +logger = logging.getLogger('flixopt') + + +class InterClusterLinking(Submodel): + """Model that links storage state across representative periods. + + When using aggregation (clustering), timesteps are reduced to only representative + periods. This model creates variables and constraints to track storage state + across the full original time horizon using boundary state variables. + + The approach: + 1. Create SOC_boundary[d] for each original period d (0 to n_original_periods) + 2. Compute delta_SOC[c] for each representative period c (change in SOC during period) + 3. Link: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + 4. Optionally enforce cyclic: SOC_boundary[0] = SOC_boundary[n_original_periods] + + This allows the optimizer to properly value storage for long-term (seasonal) + patterns while only solving for the representative timesteps. + + Example: + >>> from flixopt.aggregation import ClusterStructure, InterClusterLinking + >>> structure = ClusterStructure(...) + >>> model = InterClusterLinking( + ... model=flow_system.model, + ... flow_system=flow_system, + ... cluster_structure=structure, + ... ) + >>> model.do_modeling() + """ + + def __init__( + self, + model: FlowSystemModel, + flow_system: FlowSystem, + cluster_structure: ClusterStructure, + storage_cyclic: bool = True, + ): + """ + Args: + model: The FlowSystemModel to add constraints to. + flow_system: The FlowSystem being optimized. + cluster_structure: Clustering structure with cluster_order and occurrences. + storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. + """ + super().__init__(model, label_of_element='InterClusterLinking', label_of_model='InterClusterLinking') + self.flow_system = flow_system + self.cluster_structure = cluster_structure + self.storage_cyclic = storage_cyclic + + # Extract commonly used values from cluster_structure + self._cluster_order = cluster_structure.cluster_order.values + self._n_clusters = ( + int(cluster_structure.n_clusters) + if isinstance(cluster_structure.n_clusters, (int, np.integer)) + else int(cluster_structure.n_clusters.values) + ) + self._timesteps_per_cluster = cluster_structure.timesteps_per_cluster + self._n_original_periods = len(self._cluster_order) + + def do_modeling(self): + """Create SOC boundary variables and inter-period linking constraints. + + For each storage: + - SOC_boundary[d]: State of charge at start of original period d + - delta_SOC[c]: Change in SOC during representative period c + - Linking: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + """ + storages = list(self.flow_system.storages.values()) + if not storages: + logger.info('No storages found - skipping inter-cluster linking') + return + + logger.info( + f'Adding inter-cluster storage linking for {len(storages)} storages ' + f'({self._n_original_periods} original periods, {self._n_clusters} clusters)' + ) + + for storage in storages: + self._add_storage_linking(storage) + + def _add_storage_linking(self, storage) -> None: + """Add inter-cluster linking constraints for a single storage. + + Args: + storage: Storage component to add linking for. + """ + import xarray as xr + + label = storage.label + + # Get the charge state variable from the storage's submodel + charge_state_name = f'{label}|charge_state' + if charge_state_name not in storage.submodel.variables: + logger.warning(f'Storage {label} has no charge_state variable - skipping') + return + + charge_state = storage.submodel.variables[charge_state_name] + + # Get storage capacity bounds (may have period/scenario dimensions) + capacity = storage.capacity_in_flow_hours + if hasattr(capacity, 'fixed_size') and capacity.fixed_size is not None: + cap_value = capacity.fixed_size + elif hasattr(capacity, 'maximum') and capacity.maximum is not None: + cap_value = capacity.maximum + else: + cap_value = 1e9 # Large default + + # Create SOC_boundary variables for each original period boundary + # We need n_original_periods + 1 boundaries (start of first through end of last) + n_boundaries = self._n_original_periods + 1 + boundary_coords = [np.arange(n_boundaries)] + boundary_dims = ['period_boundary'] + + # Build bounds - handle both scalar and multi-dimensional cap_value + if isinstance(cap_value, xr.DataArray) and cap_value.dims: + # cap_value has dimensions (e.g., period, scenario) - need to broadcast + extra_dims = list(cap_value.dims) + extra_coords = {dim: cap_value.coords[dim].values for dim in extra_dims} + + boundary_dims = ['period_boundary'] + extra_dims + boundary_coords = [np.arange(n_boundaries)] + [extra_coords[d] for d in extra_dims] + + lb_coords = {'period_boundary': np.arange(n_boundaries), **extra_coords} + lb_shape = [n_boundaries] + [len(extra_coords[d]) for d in extra_dims] + lb = xr.DataArray(np.zeros(lb_shape), coords=lb_coords, dims=boundary_dims) + + ub = cap_value.expand_dims({'period_boundary': n_boundaries}, axis=0) + ub = ub.assign_coords(period_boundary=np.arange(n_boundaries)) + else: + # Scalar cap_value + if hasattr(cap_value, 'item'): + cap_value = float(cap_value.item()) + else: + cap_value = float(cap_value) + lb = xr.DataArray(0.0, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) + ub = xr.DataArray(cap_value, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) + + soc_boundary = self.add_variables( + lower=lb, + upper=ub, + coords=boundary_coords, + dims=boundary_dims, + short_name=f'SOC_boundary|{label}', + ) + + # Pre-compute delta_SOC for each representative period + # delta_SOC[c] = charge_state[c, end] - charge_state[c, start] + delta_soc_dict = {} + for c in range(self._n_clusters): + start_idx = c * self._timesteps_per_cluster + end_idx = (c + 1) * self._timesteps_per_cluster # charge_state has extra timestep + + delta_soc_dict[c] = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) + + # Create linking constraints: + # SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + for d in range(self._n_original_periods): + c = int(self._cluster_order[d]) + lhs = soc_boundary.isel(period_boundary=d + 1) - soc_boundary.isel(period_boundary=d) - delta_soc_dict[c] + self.add_constraints(lhs == 0, short_name=f'link|{label}|{d}') + + # Cyclic constraint: SOC_boundary[0] = SOC_boundary[end] + if self.storage_cyclic: + lhs = soc_boundary.isel(period_boundary=0) - soc_boundary.isel(period_boundary=self._n_original_periods) + self.add_constraints(lhs == 0, short_name=f'cyclic|{label}') + + logger.debug(f'Added inter-cluster linking for storage {label}') From bfc81d27b2a8f17b8671ab327ef500299c00fbd9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 21:39:14 +0100 Subject: [PATCH 069/191] Update notebooks --- docs/notebooks/08c-clustering.ipynb | 16 +++++++++------- docs/notebooks/08d-clustering-multiperiod.ipynb | 11 +++++------ 2 files changed, 14 insertions(+), 13 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index a9a1ef302..34ecae48b 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -209,15 +209,16 @@ "outputs": [], "source": [ "# Show clustering info\n", - "info = fs_clustered._cluster_info\n", + "info = fs_clustered._aggregation_info\n", + "cs = info.result.cluster_structure\n", "print('Clustering Configuration:')\n", - "print(f' Number of typical periods: {info[\"n_clusters\"]}')\n", - "print(f' Timesteps per period: {info[\"timesteps_per_cluster\"]}')\n", - "print(f' Total reduced timesteps: {info[\"n_clusters\"] * info[\"timesteps_per_cluster\"]}')\n", - "print(f' Cluster order (first 10 days): {info[\"cluster_order\"][:10]}...')\n", + "print(f' Number of typical periods: {cs.n_clusters}')\n", + "print(f' Timesteps per period: {cs.timesteps_per_cluster}')\n", + "print(f' Total reduced timesteps: {cs.n_clusters * cs.timesteps_per_cluster}')\n", + "print(f' Cluster order (first 10 days): {cs.cluster_order.values[:10]}...')\n", "\n", "# Show how many times each cluster appears\n", - "cluster_order = info['cluster_order']\n", + "cluster_order = cs.cluster_order.values\n", "unique, counts = np.unique(cluster_order, return_counts=True)\n", "print('\\nCluster occurrences:')\n", "for cluster_id, count in zip(unique, counts, strict=False):\n", @@ -494,7 +495,8 @@ "\n", "### Next Steps\n", "\n", - "- **[08d-clustering-multiperiod](08d-clustering-multiperiod.ipynb)**: Clustering with multiple periods and scenarios" + "- **[08d-clustering-multiperiod](08d-clustering-multiperiod.ipynb)**: Clustering with multiple periods and scenarios\n", + "- **[08e-clustering-internals](08e-clustering-internals.ipynb)**: Deep dive into weights, TSAM, and cost scaling" ] } ], diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index 82985ce86..38f7794d8 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -241,16 +241,15 @@ "metadata": {}, "outputs": [], "source": [ - "info = fs_clustered._cluster_info\n", + "info = fs_clustered._aggregation_info\n", + "cs = info.result.cluster_structure\n", "\n", "print('Clustering Configuration:')\n", - "print(f' Typical periods (clusters): {info[\"n_clusters\"]}')\n", - "print(f' Timesteps per cluster: {info[\"timesteps_per_cluster\"]}')\n", - "print(f' Has model periods: {info[\"has_periods\"]}')\n", - "print(f' Has scenarios: {info[\"has_scenarios\"]}')\n", + "print(f' Typical periods (clusters): {cs.n_clusters}')\n", + "print(f' Timesteps per cluster: {cs.timesteps_per_cluster}')\n", "\n", "# The cluster_order shows which cluster each original day belongs to\n", - "cluster_order = info['cluster_order']\n", + "cluster_order = cs.cluster_order.values\n", "day_names = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n", "\n", "print('\\nCluster assignments per day:')\n", From a56d1f883ca207935b381d41824e57282d7da6cd Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 21:39:19 +0100 Subject: [PATCH 070/191] Update notebooks --- docs/notebooks/08e-clustering-internals.ipynb | 677 ++++++++++++++++++ 1 file changed, 677 insertions(+) create mode 100644 docs/notebooks/08e-clustering-internals.ipynb diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb new file mode 100644 index 000000000..1efff22b0 --- /dev/null +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -0,0 +1,677 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Clustering Internals: Weights, TSAM, and Cost Scaling\n", + "\n", + "A deep dive into how time series clustering works under the hood.\n", + "\n", + "This notebook covers:\n", + "\n", + "- **Cluster weights**: How operational costs are scaled to represent the full time horizon\n", + "- **TSAM integration**: How the Time Series Aggregation Module performs clustering\n", + "- **Typical periods**: Visualizing representative vs original time series\n", + "- **Storage handling**: Inter-period linking and cyclic constraints\n", + "- **The `_aggregation_info` structure**: Internal data for expansion and analysis\n", + "\n", + "!!! note \"Prerequisites\"\n", + " This notebook assumes familiarity with [08c-clustering](08c-clustering.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "from pathlib import Path\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.express as px\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2", + "metadata": {}, + "outputs": [], + "source": [ + "# Load the district heating system\n", + "data_file = Path('data/district_heating_system.nc4')\n", + "if not data_file.exists():\n", + " from data.generate_example_systems import create_district_heating_system\n", + "\n", + " fs = create_district_heating_system()\n", + " fs.to_netcdf(data_file)\n", + "\n", + "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "print(f'Loaded: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) / 96:.0f} days)')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# Create a clustered system for analysis\n", + "fs_clustered = flow_system.transform.aggregate(\n", + " method='tsam',\n", + " n_representatives=8,\n", + " cluster_duration='1D',\n", + " time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'],\n", + ")\n", + "\n", + "print(f'Clustered: {len(fs_clustered.timesteps)} timesteps')" + ] + }, + { + "cell_type": "markdown", + "id": "4", + "metadata": {}, + "source": [ + "## 1. The `_aggregation_info` Structure\n", + "\n", + "After clustering, the FlowSystem stores metadata in `_aggregation_info` that enables:\n", + "- Expanding solutions back to full resolution\n", + "- Understanding which original days map to which clusters\n", + "- Weighting costs correctly in the objective function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5", + "metadata": {}, + "outputs": [], + "source": [ + "info = fs_clustered._aggregation_info\n", + "\n", + "print('AggregationInfo structure:')\n", + "print(f' backend_name: {info.backend_name}')\n", + "print(f' storage_inter_cluster_linking: {info.storage_inter_cluster_linking}')\n", + "print(f' storage_cyclic: {info.storage_cyclic}')\n", + "\n", + "cs = info.result.cluster_structure\n", + "print('\\nClusterStructure:')\n", + "print(f' n_clusters: {cs.n_clusters}')\n", + "print(f' timesteps_per_cluster: {cs.timesteps_per_cluster}')\n", + "print(f' cluster_order shape: {cs.cluster_order.shape}')\n", + "print(f' cluster_occurrences: {dict(cs.cluster_occurrences)}')" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "### Cluster Order: Mapping Days to Clusters\n", + "\n", + "The `cluster_order` array shows which cluster each original day belongs to:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "info = fs_clustered._aggregation_info\n", + "cs = info.result.cluster_structure\n", + "cluster_order = cs.cluster_order.values\n", + "n_original_days = len(cluster_order)\n", + "\n", + "# Create a DataFrame for visualization\n", + "days_df = pd.DataFrame(\n", + " {\n", + " 'Day': range(1, n_original_days + 1),\n", + " 'Cluster': cluster_order,\n", + " 'Date': pd.date_range('2020-01-01', periods=n_original_days, freq='D'),\n", + " }\n", + ")\n", + "days_df['Weekday'] = days_df['Date'].dt.day_name()\n", + "\n", + "print(f'Original days: {n_original_days}')\n", + "print(f'Number of clusters: {cs.n_clusters}')\n", + "print('\\nFirst 14 days:')\n", + "print(days_df.head(14).to_string(index=False))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize cluster assignment as a heatmap\n", + "fig = px.bar(\n", + " days_df,\n", + " x='Day',\n", + " y=[1] * len(days_df),\n", + " color='Cluster',\n", + " color_continuous_scale='Viridis',\n", + " title='Cluster Assignment by Day',\n", + " labels={'y': ''},\n", + ")\n", + "fig.update_layout(height=250, yaxis_visible=False, coloraxis_colorbar_title='Cluster')\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "## 2. Cluster Weights: Scaling Operational Costs\n", + "\n", + "When we optimize over 8 typical days instead of 31, the operational costs for each typical day\n", + "must be **scaled** to represent all the days it represents.\n", + "\n", + "### The `cluster_weight` Property\n", + "\n", + "The clustered FlowSystem has a `cluster_weight` that stores the weight for each timestep:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "# The cluster_weight is stored on the FlowSystem\n", + "print('cluster_weight structure:')\n", + "print(fs_clustered.cluster_weight)\n", + "print(f'\\nShape: {fs_clustered.cluster_weight.shape}')\n", + "print(f'Sum of weights: {fs_clustered.cluster_weight.sum().item():.0f}')\n", + "print(f'Expected (original timesteps): {len(flow_system.timesteps)}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# Cluster occurrences (how many original days each cluster represents)\n", + "info = fs_clustered._aggregation_info\n", + "cs = info.result.cluster_structure\n", + "cluster_occurrences = dict(cs.cluster_occurrences)\n", + "\n", + "print('Cluster occurrences (days represented by each typical day):')\n", + "for cluster_id, count in sorted(cluster_occurrences.items()):\n", + " print(f' Cluster {cluster_id}: {count} days (weight = {count})')\n", + "\n", + "print(f'\\nTotal: {sum(cluster_occurrences.values())} days')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize weights across the reduced timesteps\n", + "info = fs_clustered._aggregation_info\n", + "cs = info.result.cluster_structure\n", + "weights = fs_clustered.cluster_weight.values\n", + "timesteps_per_day = cs.timesteps_per_cluster\n", + "\n", + "fig = go.Figure()\n", + "fig.add_trace(\n", + " go.Scatter(\n", + " x=list(range(len(weights))),\n", + " y=weights,\n", + " mode='lines',\n", + " name='Cluster Weight',\n", + " line=dict(width=1),\n", + " )\n", + ")\n", + "\n", + "# Add vertical lines at day boundaries\n", + "for i in range(1, cs.n_clusters):\n", + " fig.add_vline(x=i * timesteps_per_day, line_dash='dash', line_color='gray', opacity=0.5)\n", + "\n", + "fig.update_layout(\n", + " height=300,\n", + " title='Cluster Weight per Timestep (Each Typical Day Has Uniform Weight)',\n", + " xaxis_title='Timestep Index',\n", + " yaxis_title='Weight',\n", + ")\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "### How Weights Affect the Objective Function\n", + "\n", + "The objective function multiplies operational costs by the cluster weight:\n", + "\n", + "$$\\text{Objective} = \\sum_{t \\in \\text{typical}} w_t \\cdot c_t$$\n", + "\n", + "Where:\n", + "- $w_t$ = cluster weight for timestep $t$ (= number of original days this cluster represents)\n", + "- $c_t$ = operational cost at timestep $t$\n", + "\n", + "This ensures that a typical day representing 7 similar days contributes 7× more to the objective\n", + "than a typical day representing only 1 day (e.g., a peak day)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# Demonstrate how weights are applied (conceptually)\n", + "solver = fx.solvers.HighsSolver(mip_gap=0.01, log_to_console=False)\n", + "fs_clustered.optimize(solver)\n", + "\n", + "# The 'costs' solution is already weighted\n", + "total_cost = fs_clustered.solution['costs'].item()\n", + "\n", + "# We can also access the per-timestep costs\n", + "costs_per_timestep = fs_clustered.solution['costs(temporal)|per_timestep']\n", + "\n", + "print(f'Total cost (weighted): {total_cost:,.0f} €')\n", + "print(f'\\nCosts per timestep shape: {costs_per_timestep.shape}')\n", + "print(f'Sum of weighted costs: {(costs_per_timestep * fs_clustered.cluster_weight).sum().item():,.0f} €')" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "## 3. TSAM Integration: The Clustering Algorithm\n", + "\n", + "flixopt uses the [TSAM](https://github.com/FZJ-IEK3-VSA/tsam) (Time Series Aggregation Module) \n", + "package for clustering. TSAM uses k-means clustering to group similar time periods.\n", + "\n", + "### The Clustering Object" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# Access the TSAM clustering object\n", + "clustering = info['clustering']\n", + "\n", + "print(f'Clustering type: {type(clustering).__name__}')\n", + "print(f'\\nTSAM aggregation object: {type(clustering.tsam).__name__}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "# The TSAM object contains the clustering results\n", + "tsam = clustering.tsam\n", + "\n", + "print('TSAM typical periods (centroids):')\n", + "print(tsam.typicalPeriods.head(10))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "# Cluster centers vs original data\n", + "print('\\nOriginal time series used for clustering:')\n", + "print(f'Shape: {tsam.normalizedPeriodlyProfiles.shape}')\n", + "print(f'Columns: {list(tsam.normalizedPeriodlyProfiles.columns)}')" + ] + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "### Visualizing Typical Periods vs Original Data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "# Get heat demand from original and clustered systems\n", + "original_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", + "clustered_demand = fs_clustered.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", + "\n", + "# Reshape original demand into days\n", + "timesteps_per_day = 96 # 15-minute resolution\n", + "n_days = len(original_demand) // timesteps_per_day\n", + "original_by_day = original_demand[: n_days * timesteps_per_day].reshape(n_days, timesteps_per_day)\n", + "\n", + "# Create subplots\n", + "fig = make_subplots(\n", + " rows=2,\n", + " cols=1,\n", + " subplot_titles=['Original: All 31 Days', f'Clustered: {info[\"n_clusters\"]} Typical Days'],\n", + " vertical_spacing=0.15,\n", + ")\n", + "\n", + "# Plot all original days (faded)\n", + "hours = np.arange(timesteps_per_day) / 4 # Convert to hours\n", + "for day in range(n_days):\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=hours,\n", + " y=original_by_day[day],\n", + " mode='lines',\n", + " line=dict(width=0.5, color='lightblue'),\n", + " showlegend=False,\n", + " hoverinfo='skip',\n", + " ),\n", + " row=1,\n", + " col=1,\n", + " )\n", + "\n", + "# Plot typical days (bold colors)\n", + "colors = px.colors.qualitative.Set1\n", + "n_clusters = info['n_clusters']\n", + "clustered_by_day = clustered_demand.reshape(n_clusters, timesteps_per_day)\n", + "\n", + "for cluster_id in range(n_clusters):\n", + " weight = cluster_occurrences.get(cluster_id, cluster_occurrences.get(np.int32(cluster_id), 1))\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=hours,\n", + " y=clustered_by_day[cluster_id],\n", + " mode='lines',\n", + " name=f'Cluster {cluster_id} (×{weight})',\n", + " line=dict(width=2, color=colors[cluster_id % len(colors)]),\n", + " ),\n", + " row=2,\n", + " col=1,\n", + " )\n", + "\n", + "fig.update_layout(height=600, title='Heat Demand: Original vs Typical Days')\n", + "fig.update_xaxes(title_text='Hour of Day', row=2, col=1)\n", + "fig.update_yaxes(title_text='MW', row=1, col=1)\n", + "fig.update_yaxes(title_text='MW', row=2, col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "21", + "metadata": {}, + "source": [ + "## 4. Storage Handling in Clustering\n", + "\n", + "Storage behavior across typical periods requires special handling:\n", + "\n", + "### Cyclic Constraint (`storage_cyclic=True`)\n", + "\n", + "When enabled (default), the storage state at the end of each typical period must equal \n", + "the state at the beginning. This prevents the optimizer from \"cheating\" by starting \n", + "with a full storage and ending empty.\n", + "\n", + "### Inter-Period Linking\n", + "\n", + "The `storage_inter_period_linking` option controls whether storage states are linked \n", + "across typical periods to simulate long-term storage behavior." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", + "metadata": {}, + "outputs": [], + "source": [ + "print('Storage settings:')\n", + "print(f' storage_cyclic: {info[\"storage_cyclic\"]}')\n", + "print(f' storage_inter_period_linking: {info[\"storage_inter_period_linking\"]}')\n", + "\n", + "# Show storage charge state in clustered solution\n", + "charge_state = fs_clustered.solution['Storage|charge_state']\n", + "print(f'\\nCharge state shape: {charge_state.shape}')\n", + "print(f'Initial charge: {charge_state.values[0]:.1f} MWh')\n", + "print(f'Final charge: {charge_state.values[-1]:.1f} MWh')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "23", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize storage behavior across typical periods\n", + "fig = go.Figure()\n", + "\n", + "timesteps_per_day = info['timesteps_per_cluster']\n", + "charge_values = charge_state.values\n", + "\n", + "# Plot each typical day's storage trajectory\n", + "colors = px.colors.qualitative.Set1\n", + "for cluster_id in range(info['n_clusters']):\n", + " start_idx = cluster_id * timesteps_per_day\n", + " end_idx = start_idx + timesteps_per_day + 1 # Include endpoint\n", + "\n", + " if end_idx <= len(charge_values):\n", + " hours = np.arange(timesteps_per_day + 1) / 4\n", + " weight = cluster_occurrences.get(cluster_id, cluster_occurrences.get(np.int32(cluster_id), 1))\n", + "\n", + " fig.add_trace(\n", + " go.Scatter(\n", + " x=hours,\n", + " y=charge_values[start_idx:end_idx],\n", + " mode='lines',\n", + " name=f'Cluster {cluster_id} (×{weight})',\n", + " line=dict(width=2, color=colors[cluster_id % len(colors)]),\n", + " )\n", + " )\n", + "\n", + "fig.update_layout(\n", + " height=400,\n", + " title='Storage Charge State by Typical Period (Cyclic: Start = End)',\n", + " xaxis_title='Hour of Day',\n", + " yaxis_title='Charge State [MWh]',\n", + ")\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "24", + "metadata": {}, + "source": [ + "## 5. The `weights` Property: Unified Access\n", + "\n", + "The FlowSystem provides a unified `weights` property that combines all weighting factors\n", + "(aggregation weights, scenario weights, period weights) into a single xarray structure:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "# The weights property provides unified access\n", + "weights = fs_clustered.weights\n", + "\n", + "print('FlowSystem weights structure:')\n", + "print(f' Type: {type(weights).__name__}')\n", + "print(f' temporal: {weights.temporal}')\n", + "print(f' aggregation_weight: {weights.aggregation_weight}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "26", + "metadata": {}, + "outputs": [], + "source": [ + "# Compare weights for original vs clustered systems\n", + "print('Original system weights:')\n", + "print(f' temporal: {flow_system.weights.temporal}')\n", + "print(f' aggregation_weight: {flow_system.weights.aggregation_weight}')\n", + "\n", + "print('\\nClustered system weights:')\n", + "print(f' temporal: {fs_clustered.weights.temporal}')\n", + "print(f' aggregation_weight (cluster_weight): sum = {fs_clustered.weights.aggregation_weight.sum().item():.0f}')" + ] + }, + { + "cell_type": "markdown", + "id": "27", + "metadata": {}, + "source": [ + "## 6. Time Series Weights in Clustering\n", + "\n", + "You can influence which time series are prioritized during clustering using the `weights` parameter.\n", + "By default, all time series are weighted equally, but you may want to:\n", + "\n", + "- Give higher weight to demand profiles (more important to capture accurately)\n", + "- Give lower weight to price signals (less critical for sizing)\n", + "\n", + "### Automatic Weight Calculation\n", + "\n", + "flixopt automatically calculates weights based on `clustering_group` attributes to avoid\n", + "double-counting correlated time series:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "28", + "metadata": {}, + "outputs": [], + "source": [ + "# Show the time series used for clustering and their weights\n", + "if hasattr(clustering, 'tsam') and hasattr(clustering.tsam, 'normalizedPeriodlyProfiles'):\n", + " ts_names = list(clustering.tsam.normalizedPeriodlyProfiles.columns)\n", + " print('Time series used for clustering:')\n", + " for name in ts_names:\n", + " print(f' - {name}')" + ] + }, + { + "cell_type": "markdown", + "id": "29", + "metadata": {}, + "source": [ + "## 7. Peak Forcing: Ensuring Extreme Periods\n", + "\n", + "The `time_series_for_high_peaks` parameter forces inclusion of periods containing peak values.\n", + "This is critical for proper component sizing." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "30", + "metadata": {}, + "outputs": [], + "source": [ + "# Find which cluster contains the peak demand day\n", + "original_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", + "daily_max = original_demand.reshape(-1, 96).max(axis=1)\n", + "\n", + "peak_day = np.argmax(daily_max)\n", + "peak_cluster = cluster_order[peak_day]\n", + "peak_value = daily_max[peak_day]\n", + "\n", + "# Get weight for the peak cluster\n", + "peak_weight = cluster_occurrences.get(peak_cluster, cluster_occurrences.get(np.int32(peak_cluster), 1))\n", + "\n", + "print(f'Peak demand day: Day {peak_day + 1} (0-indexed: {peak_day})')\n", + "print(f'Peak value: {peak_value:.1f} MW')\n", + "print(f'Assigned to cluster: {peak_cluster}')\n", + "print(f'Cluster {peak_cluster} represents {peak_weight} day(s)')\n", + "\n", + "# The peak day should be in a cluster with weight 1 (unique)\n", + "if peak_weight == 1:\n", + " print('\\\\n✓ Peak day is isolated in its own cluster (weight=1) - good!')\n", + "else:\n", + " print(f'\\\\n⚠ Peak day shares cluster with {peak_weight - 1} other day(s)')" + ] + }, + { + "cell_type": "markdown", + "id": "31", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "You learned about the internal mechanics of clustering:\n", + "\n", + "1. **`_cluster_info`**: Contains all metadata for expansion and analysis\n", + "2. **Cluster weights**: Scale operational costs so each typical period represents its original days\n", + "3. **TSAM integration**: k-means clustering groups similar time periods\n", + "4. **Storage handling**: Cyclic constraints ensure realistic storage behavior\n", + "5. **Peak forcing**: Guarantees extreme periods are captured for proper sizing\n", + "\n", + "### Key Formulas\n", + "\n", + "**Weighted objective:**\n", + "$$\\text{Objective} = \\sum_{t \\in \\text{typical}} w_t \\cdot c_t$$\n", + "\n", + "**Weight conservation:**\n", + "$$\\sum_{t \\in \\text{typical}} w_t = |\\text{original timesteps}|$$\n", + "\n", + "### When to Customize\n", + "\n", + "| Scenario | Solution |\n", + "|----------|----------|\n", + "| Peak days not captured | Add `time_series_for_high_peaks` |\n", + "| Minimum periods important | Add `time_series_for_low_peaks` |\n", + "| Specific profiles more important | Use custom `weights` dict |\n", + "| Storage behaves unrealistically | Check `storage_cyclic` setting |" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 1faaaf09485af82cee476c4c1f20aad41ff9f7cd Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 21:54:14 +0100 Subject: [PATCH 071/191] =?UTF-8?q?=20=20=E2=8E=BF=20=C2=A0=E2=98=92=20Ren?= =?UTF-8?q?ame=20cluster=5Freduce()=20to=20cluster()=20in=20transform=5Fac?= =?UTF-8?q?cessor.py=20=20=20=20=20=20=E2=98=92=20Remove=20aggregate(),=20?= =?UTF-8?q?set=5Faggregation(),=20=5Faggregate=5Ftsam()=20from=20transform?= =?UTF-8?q?=5Faccessor.py=20=20=20=20=20=20=E2=98=92=20Remove=20Aggregator?= =?UTF-8?q?=20protocol=20from=20base.py=20=20=20=20=20=20=E2=98=92=20Move?= =?UTF-8?q?=20plot=5Faggregation()=20to=20base.py=20=20=20=20=20=20?= =?UTF-8?q?=E2=98=92=20Delete=20tsam=5Fbackend.py=20=20=20=20=20=20?= =?UTF-8?q?=E2=98=92=20Delete=20manual.py?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- flixopt/aggregation/__init__.py | 104 +------ flixopt/aggregation/base.py | 135 ++++++--- flixopt/aggregation/manual.py | 335 --------------------- flixopt/aggregation/tsam_backend.py | 446 ---------------------------- flixopt/transform_accessor.py | 222 ++------------ 5 files changed, 120 insertions(+), 1122 deletions(-) delete mode 100644 flixopt/aggregation/manual.py delete mode 100644 flixopt/aggregation/tsam_backend.py diff --git a/flixopt/aggregation/__init__.py b/flixopt/aggregation/__init__.py index b0241b25a..c7a03cad8 100644 --- a/flixopt/aggregation/__init__.py +++ b/flixopt/aggregation/__init__.py @@ -1,53 +1,36 @@ """ Time Series Aggregation Module for flixopt. -This module provides an abstraction layer for time series aggregation that -supports multiple backends while maintaining proper handling of multi-dimensional -data (period, scenario dimensions). - -Available backends: -- TSAMBackend: Uses tsam package for k-means clustering into typical periods -- ManualBackend: Accepts user-provided mapping/weights for external aggregation +This module provides data structures for time series clustering/aggregation. Key classes: -- AggregationResult: Universal result container from any aggregation backend -- ClusterStructure: Hierarchical structure info for storage inter-period linking -- Aggregator: Protocol that all backends implement +- AggregationResult: Universal result container for clustering +- ClusterStructure: Hierarchical structure info for storage inter-cluster linking +- AggregationInfo: Stored on FlowSystem after clustering Example usage: - # Using TSAM backend - from flixopt.aggregation import TSAMBackend - - backend = TSAMBackend(cluster_duration='1D', n_segments=4) - result = backend.aggregate(data, n_representatives=8) - - # Using manual/external aggregation (PyPSA-style) - from flixopt.aggregation import ManualBackend - import xarray as xr - - backend = ManualBackend( - timestep_mapping=xr.DataArray(my_mapping, dims=['original_time']), - representative_weights=xr.DataArray(my_weights, dims=['time']), + # Cluster a FlowSystem to reduce timesteps + fs_clustered = flow_system.transform.cluster( + n_clusters=8, + cluster_duration='1D', + time_series_for_high_peaks=['Demand|fixed_relative_profile'], ) - result = backend.aggregate(data) - # Or via transform accessor - fs_aggregated = fs.transform.aggregate(method='tsam', n_representatives=8) - fs_aggregated = fs.transform.set_aggregation(my_mapping, my_weights) + # Access clustering metadata + info = fs_clustered._aggregation_info + print(f'Number of clusters: {info.result.cluster_structure.n_clusters}') + + # Expand solution back to full resolution + fs_expanded = fs_clustered.transform.expand_solution() """ from .base import ( AggregationInfo, AggregationResult, - Aggregator, ClusterStructure, create_cluster_structure_from_mapping, -) -from .manual import ( - ManualBackend, - create_manual_backend_from_labels, - create_manual_backend_from_selection, + plot_aggregation, ) # Lazy import for InterClusterLinking to avoid circular imports @@ -65,68 +48,13 @@ def _get_inter_cluster_linking(): return InterClusterLinking -# Conditional imports based on package availability -_BACKENDS = {'manual': ManualBackend} - -try: - from .tsam_backend import TSAMBackend, plot_aggregation - - _BACKENDS['tsam'] = TSAMBackend -except ImportError: - # tsam not installed - TSAMBackend not available - TSAMBackend = None - plot_aggregation = None - - -def get_backend(name: str): - """Get aggregation backend by name. - - Args: - name: Backend name ('tsam', 'manual'). - - Returns: - Backend class. - - Raises: - ValueError: If backend is not available. - """ - if name not in _BACKENDS: - available = list(_BACKENDS.keys()) - raise ValueError(f"Unknown backend '{name}'. Available: {available}") - - backend_class = _BACKENDS[name] - if backend_class is None: - raise ImportError( - f"Backend '{name}' is not available. Install required dependencies (e.g., 'pip install tsam' for TSAM)." - ) - - return backend_class - - -def list_backends() -> list[str]: - """List available aggregation backends. - - Returns: - List of backend names that are currently available. - """ - return [name for name, cls in _BACKENDS.items() if cls is not None] - - __all__ = [ # Core classes 'AggregationResult', 'AggregationInfo', 'ClusterStructure', - 'Aggregator', 'InterClusterLinking', - # Backends - 'TSAMBackend', - 'ManualBackend', # Utilities 'create_cluster_structure_from_mapping', - 'create_manual_backend_from_labels', - 'create_manual_backend_from_selection', 'plot_aggregation', - 'get_backend', - 'list_backends', ] diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index e66e6f742..d57ee6298 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -18,7 +18,6 @@ from __future__ import annotations from dataclasses import dataclass -from typing import Protocol, runtime_checkable import numpy as np import xarray as xr @@ -216,53 +215,6 @@ def validate(self) -> None: ) -@runtime_checkable -class Aggregator(Protocol): - """Protocol that any aggregation backend must implement. - - This protocol defines the interface for time series aggregation backends. - Implementations can use any clustering algorithm (TSAM, sklearn k-means, - hierarchical clustering, etc.) as long as they return an AggregationResult. - - Example implementation: - class MyAggregator: - def aggregate( - self, - data: xr.Dataset, - n_representatives: int, - **kwargs - ) -> AggregationResult: - # Custom clustering logic - ... - return AggregationResult( - timestep_mapping=mapping, - n_representatives=n_representatives, - representative_weights=weights, - ) - """ - - def aggregate( - self, - data: xr.Dataset, - n_representatives: int, - **kwargs, - ) -> AggregationResult: - """Perform time series aggregation (clustering). - - Args: - data: Input time series data as xarray Dataset. - Must have 'time' dimension. - n_representatives: Target number of representative timesteps - (n_clusters * timesteps_per_cluster). - **kwargs: Backend-specific options (e.g., cluster_duration). - - Returns: - AggregationResult containing mapping, weights, and optionally - aggregated data and cluster structure. - """ - ... - - @dataclass class AggregationInfo: """Information about an aggregation stored on a FlowSystem. @@ -336,3 +288,90 @@ def create_cluster_structure_from_mapping( n_clusters=n_clusters, timesteps_per_cluster=timesteps_per_cluster, ) + + +def plot_aggregation( + result: AggregationResult, + colormap: str | None = None, + show: bool | None = None, +): + """Plot original vs aggregated data comparison. + + Visualizes the original time series (dashed lines) overlaid with + the aggregated/clustered time series (solid lines) for comparison. + + Args: + result: AggregationResult containing original and aggregated data. + colormap: Colorscale name for the time series colors. + Defaults to CONFIG.Plotting.default_qualitative_colorscale. + show: Whether to display the figure. + Defaults to CONFIG.Plotting.default_show. + + Returns: + PlotResult containing the comparison figure and underlying data. + + Example: + >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') + >>> plot_aggregation(fs_clustered._aggregation_info.result) + """ + import plotly.express as px + + from ..color_processing import process_colors + from ..config import CONFIG + from ..plot_result import PlotResult + + if result.original_data is None or result.aggregated_data is None: + raise ValueError('AggregationResult must contain both original_data and aggregated_data for plotting') + + # Convert xarray to DataFrames + original_df = result.original_data.to_dataframe() + aggregated_df = result.aggregated_data.to_dataframe() + + # Expand aggregated data to original length using mapping + mapping = result.timestep_mapping.values + expanded_agg = aggregated_df.iloc[mapping].reset_index(drop=True) + + # Rename for legend + original_df = original_df.rename(columns={col: f'Original - {col}' for col in original_df.columns}) + expanded_agg = expanded_agg.rename(columns={col: f'Aggregated - {col}' for col in expanded_agg.columns}) + + colors = list( + process_colors(colormap or CONFIG.Plotting.default_qualitative_colorscale, list(original_df.columns)).values() + ) + + # Create line plot for original data (dashed) + original_df = original_df.reset_index() + index_name = original_df.columns[0] + df_org_long = original_df.melt(id_vars=index_name, var_name='variable', value_name='value') + fig = px.line(df_org_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) + for trace in fig.data: + trace.update(line=dict(dash='dash')) + + # Add aggregated data (solid lines) + expanded_agg[index_name] = original_df[index_name] + df_agg_long = expanded_agg.melt(id_vars=index_name, var_name='variable', value_name='value') + fig2 = px.line(df_agg_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) + for trace in fig2.data: + fig.add_trace(trace) + + fig.update_layout( + title='Original vs Aggregated Data (original = ---)', + xaxis_title='Time', + yaxis_title='Value', + ) + + # Build xarray Dataset with both original and aggregated data + data = xr.Dataset( + { + 'original': result.original_data.to_array(dim='variable'), + 'aggregated': result.aggregated_data.to_array(dim='variable'), + } + ) + plot_result = PlotResult(data=data, figure=fig) + + if show is None: + show = CONFIG.Plotting.default_show + if show: + plot_result.show() + + return plot_result diff --git a/flixopt/aggregation/manual.py b/flixopt/aggregation/manual.py deleted file mode 100644 index 159fc9a7a..000000000 --- a/flixopt/aggregation/manual.py +++ /dev/null @@ -1,335 +0,0 @@ -""" -Manual aggregation backend for user-provided clustering results. - -This backend enables PyPSA-style workflows where users perform aggregation -externally (using sklearn, custom algorithms, etc.) and then provide the -mapping and weights to flixopt. -""" - -from __future__ import annotations - -import numpy as np -import xarray as xr - -from .base import AggregationResult, ClusterStructure, create_cluster_structure_from_mapping - - -class ManualBackend: - """Backend for user-provided aggregation results. - - This backend accepts pre-computed aggregation mapping and weights, - enabling users to use any external clustering tool (sklearn k-means, - hierarchical clustering, etc.) with flixopt. - - This is similar to PyPSA's approach where aggregation is done externally - and the framework just accepts the results. - - Args: - timestep_mapping: Mapping from original timesteps to representative indices. - DataArray with dims [original_time]. - Values should be integers in range [0, n_representatives). - representative_weights: Weight for each representative timestep. - DataArray with dims [time]. - Typically equals count of original timesteps each representative covers. - This becomes the cluster_weight in the FlowSystem. - cluster_structure: Optional cluster structure for storage inter-cluster linking. - If not provided and timesteps_per_cluster is given, will be inferred from mapping. - timesteps_per_cluster: Number of timesteps per cluster (e.g., 24 for daily clusters). - Required to infer cluster_structure if not explicitly provided. - - Example: - >>> # External clustering with sklearn - >>> from sklearn.cluster import KMeans - >>> kmeans = KMeans(n_clusters=8) - >>> labels = kmeans.fit_predict(my_data) - >>> - >>> # Create mapping (original timestep -> representative) - >>> mapping = ... # compute from labels - >>> weights = ... # count occurrences - >>> - >>> # Use with flixopt - >>> backend = ManualBackend( - ... timestep_mapping=xr.DataArray(mapping, dims=['original_time']), - ... representative_weights=xr.DataArray(weights, dims=['time']), - ... ) - >>> result = backend.aggregate(data, n_representatives=192) - """ - - def __init__( - self, - timestep_mapping: xr.DataArray, - representative_weights: xr.DataArray, - cluster_structure: ClusterStructure | None = None, - timesteps_per_cluster: int | None = None, - ): - # Validate and store mapping - if not isinstance(timestep_mapping, xr.DataArray): - timestep_mapping = xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping') - self.timestep_mapping = timestep_mapping - - # Validate and store weights - if not isinstance(representative_weights, xr.DataArray): - representative_weights = xr.DataArray(representative_weights, dims=['time'], name='representative_weights') - self.representative_weights = representative_weights - - # Store or infer cluster structure - self.cluster_structure = cluster_structure - self.timesteps_per_cluster = timesteps_per_cluster - - # Validate - self._validate() - - def _validate(self) -> None: - """Validate input arrays.""" - # Check mapping has required dimension - if 'original_time' not in self.timestep_mapping.dims: - if 'time' in self.timestep_mapping.dims: - # Rename for clarity - self.timestep_mapping = self.timestep_mapping.rename({'time': 'original_time'}) - else: - raise ValueError("timestep_mapping must have 'original_time' or 'time' dimension") - - # Check weights has required dimension - if 'time' not in self.representative_weights.dims: - raise ValueError("representative_weights must have 'time' dimension") - - # Check mapping values are non-negative integers - min_val = int(self.timestep_mapping.min().values) - if min_val < 0: - raise ValueError(f'timestep_mapping contains negative value: {min_val}') - - # Check mapping values are within bounds - max_val = int(self.timestep_mapping.max().values) - n_weights = len(self.representative_weights.coords['time']) - if max_val >= n_weights: - raise ValueError( - f'timestep_mapping contains index {max_val} but representative_weights only has {n_weights} elements' - ) - - def aggregate( - self, - data: xr.Dataset, - n_representatives: int | None = None, - **kwargs, - ) -> AggregationResult: - """Create AggregationResult from stored mapping and weights. - - The data parameter is used to: - 1. Validate dimensions match the mapping - 2. Create aggregated data by indexing with the mapping - - Args: - data: Input time series data as xarray Dataset. - Used for validation and to create aggregated_data. - n_representatives: Number of representatives. If None, inferred from weights. - **kwargs: Ignored (for protocol compatibility). - - Returns: - AggregationResult with the stored mapping and weights. - """ - # Infer n_representatives if not provided - if n_representatives is None: - n_representatives = len(self.representative_weights.coords['time']) - - # Validate data dimensions match mapping - self._validate_data_dimensions(data) - - # Create aggregated data by indexing original data - aggregated_data = self._create_aggregated_data(data, n_representatives) - - # Infer cluster structure if needed - cluster_structure = self.cluster_structure - if cluster_structure is None and self.timesteps_per_cluster is not None: - cluster_structure = create_cluster_structure_from_mapping(self.timestep_mapping, self.timesteps_per_cluster) - - return AggregationResult( - timestep_mapping=self.timestep_mapping, - n_representatives=n_representatives, - representative_weights=self.representative_weights, - aggregated_data=aggregated_data, - cluster_structure=cluster_structure, - original_data=data, - ) - - def _validate_data_dimensions(self, data: xr.Dataset) -> None: - """Validate that data dimensions are compatible with mapping.""" - # Check time dimension length - if 'time' not in data.dims: - raise ValueError("Input data must have 'time' dimension") - - n_data_timesteps = len(data.coords['time']) - n_mapping_timesteps = len(self.timestep_mapping.coords['original_time']) - - if n_data_timesteps != n_mapping_timesteps: - raise ValueError(f'Data has {n_data_timesteps} timesteps but mapping expects {n_mapping_timesteps}') - - # Check period/scenario dimensions if present in mapping - for dim in ['period', 'scenario']: - if dim in self.timestep_mapping.dims: - if dim not in data.dims: - raise ValueError(f"Mapping has '{dim}' dimension but data does not") - mapping_coords = self.timestep_mapping.coords[dim].values - data_coords = data.coords[dim].values - if not np.array_equal(mapping_coords, data_coords): - raise ValueError(f"'{dim}' coordinates don't match between mapping and data") - - def _create_aggregated_data( - self, - data: xr.Dataset, - n_representatives: int, - ) -> xr.Dataset: - """Create aggregated data by extracting representative timesteps. - - For each representative timestep, we take the value from the first - original timestep that maps to it (simple selection, not averaging). - """ - # Find first original timestep for each representative - mapping_vals = self.timestep_mapping.values - if mapping_vals.ndim > 1: - # Multi-dimensional - use first slice - mapping_vals = mapping_vals[:, 0] if mapping_vals.ndim == 2 else mapping_vals[:, 0, 0] - - # For each representative, find the first original that maps to it - first_original = {} - for orig_idx, rep_idx in enumerate(mapping_vals): - if rep_idx not in first_original: - first_original[int(rep_idx)] = orig_idx - - # Build index array for selecting representative values - rep_indices = [first_original.get(i, 0) for i in range(n_representatives)] - - # Select from data - aggregated_vars = {} - for var_name, var_data in data.data_vars.items(): - if 'time' in var_data.dims: - # Select representative timesteps - selected = var_data.isel(time=rep_indices) - # Reassign time coordinate - selected = selected.assign_coords(time=np.arange(n_representatives)) - aggregated_vars[var_name] = selected - else: - # Non-time variable - keep as is - aggregated_vars[var_name] = var_data - - return xr.Dataset(aggregated_vars) - - -def create_manual_backend_from_labels( - labels: np.ndarray, - timesteps_per_cluster: int, - n_timesteps: int | None = None, -) -> ManualBackend: - """Create ManualBackend from cluster labels (e.g., from sklearn KMeans). - - This is a convenience function for creating a ManualBackend when you have - cluster labels from a standard clustering algorithm. - - Args: - labels: Cluster label for each timestep (from KMeans.fit_predict, etc.). - Shape: (n_timesteps,) with values in [0, n_clusters). - timesteps_per_cluster: Number of timesteps per cluster period. - n_timesteps: Total number of timesteps. If None, inferred from labels. - - Returns: - ManualBackend configured with the label-derived mapping. - - Example: - >>> from sklearn.cluster import KMeans - >>> kmeans = KMeans(n_clusters=8).fit(daily_profiles) - >>> labels = np.repeat(kmeans.labels_, 24) # Expand to hourly - >>> backend = create_manual_backend_from_labels(labels, timesteps_per_cluster=24) - """ - if n_timesteps is None: - n_timesteps = len(labels) - - # Get unique clusters and count occurrences - unique_clusters = np.unique(labels) - n_clusters = len(unique_clusters) - - # Remap labels to 0..n_clusters-1 if needed - if not np.array_equal(unique_clusters, np.arange(n_clusters)): - label_map = {old: new for new, old in enumerate(unique_clusters)} - labels = np.array([label_map[label] for label in labels]) - - # Build timestep mapping - # Each original timestep maps to: cluster_id * timesteps_per_cluster + position_in_period - n_original_periods = n_timesteps // timesteps_per_cluster - timestep_mapping = np.zeros(n_timesteps, dtype=np.int32) - - for period_idx in range(n_original_periods): - cluster_id = labels[period_idx * timesteps_per_cluster] # Label of first timestep in period - for pos in range(timesteps_per_cluster): - orig_idx = period_idx * timesteps_per_cluster + pos - if orig_idx < n_timesteps: - timestep_mapping[orig_idx] = cluster_id * timesteps_per_cluster + pos - - # Build weights (count of originals per representative) - n_representative_timesteps = n_clusters * timesteps_per_cluster - representative_weights = np.zeros(n_representative_timesteps, dtype=np.float64) - - # Count occurrences of each cluster - cluster_counts = {} - for period_idx in range(n_original_periods): - cluster_id = labels[period_idx * timesteps_per_cluster] - cluster_counts[cluster_id] = cluster_counts.get(cluster_id, 0) + 1 - - for cluster_id, count in cluster_counts.items(): - for pos in range(timesteps_per_cluster): - rep_idx = cluster_id * timesteps_per_cluster + pos - if rep_idx < n_representative_timesteps: - representative_weights[rep_idx] = count - - return ManualBackend( - timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), - representative_weights=xr.DataArray(representative_weights, dims=['time'], name='representative_weights'), - timesteps_per_cluster=timesteps_per_cluster, - ) - - -def create_manual_backend_from_selection( - selected_indices: np.ndarray, - weights: np.ndarray, - n_original_timesteps: int, - timesteps_per_period: int | None = None, -) -> ManualBackend: - """Create ManualBackend from selected representative timesteps. - - This is useful when you have a simple selection-based aggregation - (e.g., select every Nth timestep, select specific representative days). - - Args: - selected_indices: Indices of selected representative timesteps. - These become the new time axis. - weights: Weight for each selected timestep (how many originals it represents). - n_original_timesteps: Total number of original timesteps. - timesteps_per_period: Optional, for creating cluster structure. - - Returns: - ManualBackend configured with the selection-based mapping. - - Example: - >>> # Select every 7th day as representative - >>> selected = np.arange(0, 365 * 24, 7 * 24) # Weekly representatives - >>> weights = np.ones(len(selected)) * 7 # Each represents 7 days - >>> backend = create_manual_backend_from_selection(selected, weights, n_original_timesteps=365 * 24) - """ - n_representatives = len(selected_indices) - - if len(weights) != n_representatives: - raise ValueError(f'weights has {len(weights)} elements but selected_indices has {n_representatives}') - - # Build mapping: each original maps to nearest selected - timestep_mapping = np.zeros(n_original_timesteps, dtype=np.int32) - - # Simple nearest-neighbor assignment - for orig_idx in range(n_original_timesteps): - # Find nearest selected index - distances = np.abs(selected_indices - orig_idx) - nearest_rep = np.argmin(distances) - timestep_mapping[orig_idx] = nearest_rep - - return ManualBackend( - timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), - representative_weights=xr.DataArray(weights, dims=['time'], name='representative_weights'), - timesteps_per_cluster=timesteps_per_period, - ) diff --git a/flixopt/aggregation/tsam_backend.py b/flixopt/aggregation/tsam_backend.py deleted file mode 100644 index e43eededd..000000000 --- a/flixopt/aggregation/tsam_backend.py +++ /dev/null @@ -1,446 +0,0 @@ -""" -TSAM (Time Series Aggregation Module) backend for time series aggregation. - -This backend wraps the existing flixopt Clustering class which uses the -tsam package to perform k-means clustering of time series into typical periods. - -Terminology note: -- TSAM uses "typical periods" to mean representative time chunks (e.g., typical days) -- "cluster" = a group of similar time chunks (e.g., similar days) -- "cluster_duration" = length of each time chunk (e.g., 24h for daily clustering) -- "period" and "scenario" in method signatures refer to the MODEL's dimensions - (years/months and scenarios), NOT the clustering time chunks -""" - -from __future__ import annotations - -import logging - -import numpy as np -import xarray as xr - -from .base import AggregationResult, ClusterStructure - -logger = logging.getLogger('flixopt') - -# Check if tsam is available -try: - import tsam.timeseriesaggregation as tsam - - TSAM_AVAILABLE = True -except ImportError: - TSAM_AVAILABLE = False - - -def _parse_cluster_duration(duration: str | float) -> float: - """Convert cluster duration to hours. - - Args: - duration: Either a pandas-style duration string ('1D', '24h', '6h') - or a numeric value in hours. - - Returns: - Duration in hours. - """ - import pandas as pd - - if isinstance(duration, (int, float)): - return float(duration) - - # Parse pandas-style duration strings - td = pd.Timedelta(duration) - return td.total_seconds() / 3600 - - -class TSAMBackend: - """TSAM-based time series aggregation backend. - - This backend uses the tsam (Time Series Aggregation Module) package - to perform k-means clustering of time series into typical periods. - - Features: - - Inter-period clustering (typical days/weeks) - - Intra-period segmentation (reduce timesteps within periods) - - Extreme period preservation (high/low peaks) - - Custom weighting of time series for clustering - - Args: - cluster_duration: Duration of each cluster period. - Can be pandas-style string ('1D', '24h') or hours as float. - n_segments: Number of segments within each period for intra-period - clustering. None for no segmentation. - time_series_for_high_peaks: Column names to preserve high-value periods for. - time_series_for_low_peaks: Column names to preserve low-value periods for. - weights: Dict mapping column names to clustering weights. - - Example: - >>> backend = TSAMBackend(cluster_duration='1D', n_segments=4) - >>> result = backend.aggregate(data, n_representatives=8) - """ - - def __init__( - self, - cluster_duration: str | float = '1D', - n_segments: int | None = None, - time_series_for_high_peaks: list[str] | None = None, - time_series_for_low_peaks: list[str] | None = None, - weights: dict[str, float] | None = None, - ): - if not TSAM_AVAILABLE: - raise ImportError("The 'tsam' package is required for TSAMBackend. Install it with 'pip install tsam'.") - - self.cluster_duration = cluster_duration - self.cluster_duration_hours = _parse_cluster_duration(cluster_duration) - self.n_segments = n_segments - self.time_series_for_high_peaks = time_series_for_high_peaks or [] - self.time_series_for_low_peaks = time_series_for_low_peaks or [] - self.weights = weights or {} - - @property - def use_extreme_periods(self) -> bool: - """Whether extreme period selection is enabled.""" - return bool(self.time_series_for_high_peaks or self.time_series_for_low_peaks) - - def aggregate( - self, - data: xr.Dataset, - n_representatives: int, - hours_per_timestep: float | None = None, - **kwargs, - ) -> AggregationResult: - """Perform TSAM aggregation on the input data. - - For multi-dimensional data (period/scenario), aggregation is performed - independently for each (period, scenario) combination. - - Args: - data: Input time series data as xarray Dataset. - Must have 'time' dimension. - n_representatives: Target number of typical periods (clusters). - hours_per_timestep: Duration of each timestep in hours. - If None, inferred from time coordinates. - **kwargs: Additional options passed to tsam. - - Returns: - AggregationResult with mapping, weights, and aggregated data. - """ - # Convert Dataset to DataFrame for tsam - # Handle multi-dimensional case - has_period = 'period' in data.dims - has_scenario = 'scenario' in data.dims - - if has_period or has_scenario: - return self._aggregate_multi_dimensional(data, n_representatives, hours_per_timestep, **kwargs) - else: - return self._aggregate_single(data, n_representatives, hours_per_timestep, **kwargs) - - def _aggregate_single( - self, - data: xr.Dataset, - n_representatives: int, - hours_per_timestep: float | None = None, - **kwargs, - ) -> AggregationResult: - """Aggregate a single-dimensional time series.""" - import pandas as pd - - # Convert to DataFrame - df = data.to_dataframe() - if isinstance(df.index, pd.MultiIndex): - # Flatten multi-index (shouldn't happen for single-dim, but be safe) - df = df.reset_index(drop=True) - - n_timesteps = len(df) - - # Infer hours_per_timestep if not provided - if hours_per_timestep is None: - if 'time' in data.coords and hasattr(data.coords['time'], 'values'): - time_vals = pd.to_datetime(data.coords['time'].values) - if len(time_vals) > 1: - hours_per_timestep = (time_vals[1] - time_vals[0]).total_seconds() / 3600 - else: - hours_per_timestep = 1.0 - else: - hours_per_timestep = 1.0 - - # Calculate number of timesteps per period - timesteps_per_period = int(self.cluster_duration_hours / hours_per_timestep) - total_periods = n_timesteps // timesteps_per_period - - # Determine actual number of clusters - n_clusters = min(n_representatives, total_periods) - - # Create tsam aggregation - tsam_agg = tsam.TimeSeriesAggregation( - df, - noTypicalPeriods=n_clusters, - hoursPerPeriod=self.cluster_duration_hours, - resolution=hours_per_timestep, - clusterMethod='k_means', - extremePeriodMethod='new_cluster_center' if self.use_extreme_periods else 'None', - weightDict={name: w for name, w in self.weights.items() if name in df.columns}, - addPeakMax=self.time_series_for_high_peaks, - addPeakMin=self.time_series_for_low_peaks, - segmentation=self.n_segments is not None, - noSegments=self.n_segments if self.n_segments is not None else 1, - ) - - tsam_agg.createTypicalPeriods() - aggregated_df = tsam_agg.predictOriginalData() - - # Build timestep mapping - # For each original timestep, find which representative timestep it maps to - cluster_order = tsam_agg.clusterOrder - timestep_mapping = np.zeros(n_timesteps, dtype=np.int32) - - for period_idx, cluster_id in enumerate(cluster_order): - for pos in range(timesteps_per_period): - original_idx = period_idx * timesteps_per_period + pos - if original_idx < n_timesteps: - representative_idx = cluster_id * timesteps_per_period + pos - timestep_mapping[original_idx] = representative_idx - - # Build representative weights (how many originals each representative covers) - n_representative_timesteps = n_clusters * timesteps_per_period - representative_weights = np.zeros(n_representative_timesteps, dtype=np.float64) - - for cluster_id, count in tsam_agg.clusterPeriodNoOccur.items(): - for pos in range(timesteps_per_period): - rep_idx = cluster_id * timesteps_per_period + pos - if rep_idx < n_representative_timesteps: - representative_weights[rep_idx] = count - - # Create cluster structure for storage linking - cluster_occurrences = xr.DataArray( - [tsam_agg.clusterPeriodNoOccur.get(c, 0) for c in range(n_clusters)], - dims=['cluster'], - name='cluster_occurrences', - ) - - cluster_structure = ClusterStructure( - cluster_order=xr.DataArray(cluster_order, dims=['original_period'], name='cluster_order'), - cluster_occurrences=cluster_occurrences, - n_clusters=n_clusters, - timesteps_per_cluster=timesteps_per_period, - ) - - # Convert aggregated data to xarray Dataset - # Extract only the typical period timesteps - typical_timesteps = n_clusters * timesteps_per_period - aggregated_ds = xr.Dataset( - {col: (['time'], aggregated_df[col].values[:typical_timesteps]) for col in aggregated_df.columns}, - coords={'time': np.arange(typical_timesteps)}, - ) - - return AggregationResult( - timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), - n_representatives=n_representative_timesteps, - representative_weights=xr.DataArray(representative_weights, dims=['time'], name='representative_weights'), - aggregated_data=aggregated_ds, - cluster_structure=cluster_structure, - original_data=data, - ) - - def _aggregate_multi_dimensional( - self, - data: xr.Dataset, - n_representatives: int, - hours_per_timestep: float | None = None, - **kwargs, - ) -> AggregationResult: - """Aggregate multi-dimensional data (with period/scenario dims). - - Performs independent aggregation for each (period, scenario) combination, - then combines results into multi-dimensional arrays. - """ - - has_period = 'period' in data.dims - has_scenario = 'scenario' in data.dims - - periods = data.coords['period'].values if has_period else [None] - scenarios = data.coords['scenario'].values if has_scenario else [None] - - # Collect results for each combination - results: dict[tuple, AggregationResult] = {} - - for period in periods: - for scenario in scenarios: - # Select slice - slice_data = data - if period is not None: - slice_data = slice_data.sel(period=period) - if scenario is not None: - slice_data = slice_data.sel(scenario=scenario) - - # Aggregate this slice - result = self._aggregate_single(slice_data, n_representatives, hours_per_timestep, **kwargs) - results[(period, scenario)] = result - - # Combine results into multi-dimensional arrays - # For now, assume all slices have same n_representatives (simplification) - first_result = next(iter(results.values())) - n_rep = first_result.n_representatives - n_original = first_result.n_original_timesteps - - # Build multi-dimensional timestep_mapping - if has_period and has_scenario: - mapping_data = np.zeros((n_original, len(periods), len(scenarios)), dtype=np.int32) - weights_data = np.zeros((n_rep, len(periods), len(scenarios)), dtype=np.float64) - for (p, s), res in results.items(): - pi = list(periods).index(p) - si = list(scenarios).index(s) - mapping_data[:, pi, si] = res.timestep_mapping.values - weights_data[:, pi, si] = res.representative_weights.values - - timestep_mapping = xr.DataArray( - mapping_data, - dims=['original_time', 'period', 'scenario'], - coords={'original_time': np.arange(n_original), 'period': periods, 'scenario': scenarios}, - name='timestep_mapping', - ) - representative_weights = xr.DataArray( - weights_data, - dims=['time', 'period', 'scenario'], - coords={'time': np.arange(n_rep), 'period': periods, 'scenario': scenarios}, - name='representative_weights', - ) - elif has_period: - mapping_data = np.zeros((n_original, len(periods)), dtype=np.int32) - weights_data = np.zeros((n_rep, len(periods)), dtype=np.float64) - for (p, _), res in results.items(): - pi = list(periods).index(p) - mapping_data[:, pi] = res.timestep_mapping.values - weights_data[:, pi] = res.representative_weights.values - - timestep_mapping = xr.DataArray( - mapping_data, - dims=['original_time', 'period'], - coords={'original_time': np.arange(n_original), 'period': periods}, - name='timestep_mapping', - ) - representative_weights = xr.DataArray( - weights_data, - dims=['time', 'period'], - coords={'time': np.arange(n_rep), 'period': periods}, - name='representative_weights', - ) - else: # has_scenario only - mapping_data = np.zeros((n_original, len(scenarios)), dtype=np.int32) - weights_data = np.zeros((n_rep, len(scenarios)), dtype=np.float64) - for (_, s), res in results.items(): - si = list(scenarios).index(s) - mapping_data[:, si] = res.timestep_mapping.values - weights_data[:, si] = res.representative_weights.values - - timestep_mapping = xr.DataArray( - mapping_data, - dims=['original_time', 'scenario'], - coords={'original_time': np.arange(n_original), 'scenario': scenarios}, - name='timestep_mapping', - ) - representative_weights = xr.DataArray( - weights_data, - dims=['time', 'scenario'], - coords={'time': np.arange(n_rep), 'scenario': scenarios}, - name='representative_weights', - ) - - # Use cluster structure from first result (for now - could be enhanced) - # In multi-dimensional case, cluster structure may vary by period/scenario - cluster_structure = first_result.cluster_structure - - return AggregationResult( - timestep_mapping=timestep_mapping, - n_representatives=n_rep, - representative_weights=representative_weights, - aggregated_data=first_result.aggregated_data, # Simplified - use first slice's data - cluster_structure=cluster_structure, - original_data=data, - ) - - -def plot_aggregation( - result: AggregationResult, - colormap: str | None = None, - show: bool | None = None, -): - """Plot original vs aggregated data comparison. - - Visualizes the original time series (dashed lines) overlaid with - the aggregated/clustered time series (solid lines) for comparison. - - Args: - result: AggregationResult containing original and aggregated data. - colormap: Colorscale name for the time series colors. - Defaults to CONFIG.Plotting.default_qualitative_colorscale. - show: Whether to display the figure. - Defaults to CONFIG.Plotting.default_show. - - Returns: - PlotResult containing the comparison figure and underlying data. - - Example: - >>> result = backend.aggregate(data, n_representatives=8) - >>> plot_aggregation(result) - """ - import plotly.express as px - - from ..color_processing import process_colors - from ..config import CONFIG - from ..plot_result import PlotResult - - if result.original_data is None or result.aggregated_data is None: - raise ValueError('AggregationResult must contain both original_data and aggregated_data for plotting') - - # Convert xarray to DataFrames - original_df = result.original_data.to_dataframe() - aggregated_df = result.aggregated_data.to_dataframe() - - # Expand aggregated data to original length using mapping - mapping = result.timestep_mapping.values - expanded_agg = aggregated_df.iloc[mapping].reset_index(drop=True) - - # Rename for legend - original_df = original_df.rename(columns={col: f'Original - {col}' for col in original_df.columns}) - expanded_agg = expanded_agg.rename(columns={col: f'Aggregated - {col}' for col in expanded_agg.columns}) - - colors = list( - process_colors(colormap or CONFIG.Plotting.default_qualitative_colorscale, list(original_df.columns)).values() - ) - - # Create line plot for original data (dashed) - original_df = original_df.reset_index() - index_name = original_df.columns[0] - df_org_long = original_df.melt(id_vars=index_name, var_name='variable', value_name='value') - fig = px.line(df_org_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) - for trace in fig.data: - trace.update(line=dict(dash='dash')) - - # Add aggregated data (solid lines) - expanded_agg[index_name] = original_df[index_name] - df_agg_long = expanded_agg.melt(id_vars=index_name, var_name='variable', value_name='value') - fig2 = px.line(df_agg_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) - for trace in fig2.data: - fig.add_trace(trace) - - fig.update_layout( - title='Original vs Aggregated Data (original = ---)', - xaxis_title='Time', - yaxis_title='Value', - ) - - # Build xarray Dataset with both original and aggregated data - data = xr.Dataset( - { - 'original': result.original_data.to_array(dim='variable'), - 'aggregated': result.aggregated_data.to_array(dim='variable'), - } - ) - plot_result = PlotResult(data=data, figure=fig) - - if show is None: - show = CONFIG.Plotting.default_show - if show: - plot_result.show() - - return plot_result diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 9b932d4b4..25170ab8e 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -574,14 +574,14 @@ def fix_sizes( return new_fs - def cluster_reduce( + def cluster( self, n_clusters: int, cluster_duration: str | float, weights: dict[str, float] | None = None, time_series_for_high_peaks: list[str] | None = None, time_series_for_low_peaks: list[str] | None = None, - storage_inter_period_linking: bool = True, + storage_inter_cluster_linking: bool = True, storage_cyclic: bool = True, ) -> FlowSystem: """ @@ -589,34 +589,33 @@ def cluster_reduce( This method creates a new FlowSystem optimized for sizing studies by reducing the number of timesteps to only the typical (representative) clusters identified - through time series aggregation. Unlike `cluster()` which uses equality constraints, - this method actually reduces the problem size for faster solving. + through time series aggregation using the tsam package. The method: - 1. Performs time series clustering using tsam + 1. Performs time series clustering using tsam (k-means) 2. Extracts only the typical clusters (not all original timesteps) 3. Applies timestep weighting for accurate cost representation 4. Optionally links storage states between clusters via boundary variables - Use this for initial sizing optimization, then use `fix_sizes()` to re-optimize + Use this for initial sizing optimization, then use ``fix_sizes()`` to re-optimize at full resolution for accurate dispatch results. Args: - n_clusters: Number of clusters (typical segments) to extract (e.g., 8 typical days). + n_clusters: Number of clusters (typical periods) to extract (e.g., 8 typical days). cluster_duration: Duration of each cluster. Can be a pandas-style string ('1D', '24h', '6h') or a numeric value in hours. weights: Optional clustering weights per time series. Keys are time series labels. time_series_for_high_peaks: Time series labels for explicitly selecting high-value clusters. **Recommended** for demand time series to capture peak demand days. time_series_for_low_peaks: Time series labels for explicitly selecting low-value clusters. - storage_inter_period_linking: If True, link storage states between clusters using + storage_inter_cluster_linking: If True, link storage states between clusters using boundary variables. This preserves long-term storage behavior. Default: True. storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end] for storages. - Only used when storage_inter_period_linking=True. Default: True. + Only used when storage_inter_cluster_linking=True. Default: True. Returns: A new FlowSystem with reduced timesteps (only typical clusters). - The FlowSystem has metadata stored in `_aggregation_info` for expansion. + The FlowSystem has metadata stored in ``_aggregation_info`` for expansion. Raises: ValueError: If timestep sizes are inconsistent. @@ -626,7 +625,7 @@ def cluster_reduce( Two-stage sizing optimization: >>> # Stage 1: Size with reduced timesteps (fast) - >>> fs_sizing = flow_system.transform.cluster_reduce( + >>> fs_sizing = flow_system.transform.cluster( ... n_clusters=8, ... cluster_duration='1D', ... time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'], @@ -644,7 +643,7 @@ def cluster_reduce( Note: - This is best suited for initial sizing, not final dispatch optimization - - Use `time_series_for_high_peaks` to ensure peak demand clusters are captured + - Use ``time_series_for_high_peaks`` to ensure peak demand clusters are captured - A 5-10% safety margin on sizes is recommended for the dispatch stage - Storage linking adds SOC_boundary variables to track state between clusters """ @@ -665,7 +664,7 @@ def cluster_reduce( dt = float(self._fs.timestep_duration.min().item()) if not np.isclose(dt, float(self._fs.timestep_duration.max().item())): raise ValueError( - f'cluster_reduce() requires uniform timestep sizes, got min={dt}h, ' + f'cluster() requires uniform timestep sizes, got min={dt}h, ' f'max={float(self._fs.timestep_duration.max().item())}h.' ) if not np.isclose(hours_per_cluster / dt, round(hours_per_cluster / dt), atol=1e-9): @@ -837,7 +836,7 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: result=aggregation_result, original_flow_system=self._fs, backend_name='tsam', - storage_inter_cluster_linking=storage_inter_period_linking, + storage_inter_cluster_linking=storage_inter_cluster_linking, storage_cyclic=storage_cyclic, ) @@ -892,7 +891,7 @@ def _combine_slices_to_dataarray( def expand_solution(self) -> FlowSystem: """Expand a reduced (clustered) FlowSystem back to full original timesteps. - After solving a FlowSystem created with ``cluster_reduce()``, this method + After solving a FlowSystem created with ``cluster()``, this method disaggregates the FlowSystem by: 1. Expanding all time series data from typical clusters to full timesteps 2. Expanding the solution by mapping each typical cluster back to all @@ -909,14 +908,14 @@ def expand_solution(self) -> FlowSystem: FlowSystem: A new FlowSystem with full timesteps and expanded solution. Raises: - ValueError: If the FlowSystem was not created with ``cluster_reduce()``. + ValueError: If the FlowSystem was not created with ``cluster()``. ValueError: If the FlowSystem has no solution. Examples: Two-stage optimization with solution expansion: >>> # Stage 1: Size with reduced timesteps - >>> fs_reduced = flow_system.transform.cluster_reduce( + >>> fs_reduced = flow_system.transform.cluster( ... n_clusters=8, ... cluster_duration='1D', ... ) @@ -945,7 +944,7 @@ def expand_solution(self) -> FlowSystem: # Validate if self._fs._aggregation_info is None: raise ValueError( - 'expand_solution() requires a FlowSystem created with cluster_reduce() or aggregate(). ' + 'expand_solution() requires a FlowSystem created with cluster(). ' 'This FlowSystem has no aggregation info.' ) if self._fs.solution is None: @@ -1104,190 +1103,3 @@ def _expand_dataarray( periods=periods, scenarios=scenarios, ) - - # ===================================================================== - # New Aggregation API (Phase 3 - Backend-agnostic interface) - # ===================================================================== - - def aggregate( - self, - method: str | Any = 'tsam', - n_representatives: int | None = None, - **kwargs, - ) -> FlowSystem: - """Unified aggregation method supporting multiple backends. - - This is the recommended API for time series aggregation. It supports - multiple backends (TSAM, manual, etc.) through a unified interface. - - For TSAM backend, this delegates to cluster_reduce(). - - Args: - method: Aggregation backend. Options: - - 'tsam': Use TSAM package for k-means clustering (default) - - 'manual': Use ManualBackend with pre-computed mapping - - Custom Aggregator instance - n_representatives: Target number of clusters (typical periods). - For 'tsam' with cluster_duration='1D', this is the number of - typical days. - **kwargs: Backend-specific options. For 'tsam': - - cluster_duration: Duration per cluster ('1D', '24h', etc.) - - time_series_for_high_peaks: Force high-value period inclusion - - time_series_for_low_peaks: Force low-value period inclusion - - weights: Custom clustering weights - - Returns: - New FlowSystem with reduced timesteps. - - Example: - >>> # TSAM clustering with 8 typical days - >>> fs_agg = fs.transform.aggregate( - ... method='tsam', - ... n_representatives=8, - ... cluster_duration='1D', - ... ) - - >>> # Manual aggregation with external clustering - >>> fs_agg = fs.transform.set_aggregation(my_mapping, my_weights) - - See Also: - set_aggregation: For PyPSA-style manual aggregation - cluster_reduce: TSAM reduction-based clustering - """ - from .aggregation import Aggregator, get_backend - - # Handle string backend names - if isinstance(method, str): - backend_cls = get_backend(method) - if method == 'tsam': - # Delegate to existing TSAM method - return self._aggregate_tsam(n_representatives, **kwargs) - elif method == 'manual': - raise ValueError("Use set_aggregation() for manual aggregation, not aggregate(method='manual')") - else: - # Custom registered backend - _backend = backend_cls(**kwargs) # noqa: F841 - elif isinstance(method, Aggregator): - _backend = method # noqa: F841 - else: - raise TypeError(f'method must be str or Aggregator, got {type(method)}') - - # Use backend to aggregate - raise NotImplementedError( - "Generic backend aggregation not yet implemented. Use method='tsam' or set_aggregation() for now." - ) - - def _aggregate_tsam( - self, - n_representatives: int | None, - **kwargs, - ) -> FlowSystem: - """Internal: delegate to cluster_reduce().""" - # Extract TSAM-specific kwargs - cluster_duration = kwargs.pop('cluster_duration', '1D') - time_series_for_high_peaks = kwargs.pop('time_series_for_high_peaks', None) - time_series_for_low_peaks = kwargs.pop('time_series_for_low_peaks', None) - weights = kwargs.pop('weights', None) - - return self.cluster_reduce( - n_clusters=n_representatives, - cluster_duration=cluster_duration, - weights=weights, - time_series_for_high_peaks=time_series_for_high_peaks, - time_series_for_low_peaks=time_series_for_low_peaks, - storage_cyclic=kwargs.pop('storage_cyclic', True), - ) - - def set_aggregation( - self, - timestep_mapping: xr.DataArray, - weights: xr.DataArray, - cluster_structure: Any = None, - aggregated_data: xr.Dataset | None = None, - ) -> FlowSystem: - """Set aggregation from external tool (PyPSA-style workflow). - - This enables users to bring their own aggregation results from any tool - (sklearn, custom algorithms, hierarchical clustering, etc.) and apply - them to flixopt. - - This is similar to PyPSA's approach where aggregation is done externally - and the framework just accepts the results. - - Args: - timestep_mapping: Maps each original timestep to representative index. - DataArray with dims [original_time]. - Values should be integers in range [0, n_representatives). - weights: Weight for each representative timestep. - DataArray with dims [time]. - Typically equals count of original timesteps each representative covers. - This becomes the cluster_weight in the reduced FlowSystem. - cluster_structure: Optional ClusterStructure for storage inter-cluster linking. - Required for proper storage optimization. - aggregated_data: Optional pre-aggregated time series data. - If not provided, data will be extracted from mapping. - - Returns: - New FlowSystem with reduced timesteps. - - Example: - >>> # External clustering with sklearn - >>> from sklearn.cluster import KMeans - >>> import xarray as xr - >>> - >>> # ... perform clustering ... - >>> mapping = xr.DataArray(my_mapping, dims=['original_time']) - >>> weights = xr.DataArray(my_weights, dims=['time']) - >>> - >>> fs_agg = fs.transform.set_aggregation( - ... timestep_mapping=mapping, - ... weights=weights, - ... ) - - See Also: - aggregate: Unified aggregation API with backend support - flixopt.aggregation.ManualBackend: Backend class for manual aggregation - flixopt.aggregation.create_manual_backend_from_labels: Helper for sklearn labels - """ - from .aggregation import ManualBackend - - # Create ManualBackend from provided data - backend = ManualBackend( - timestep_mapping=timestep_mapping, - representative_weights=weights, - cluster_structure=cluster_structure, - ) - - # Build aggregation result - # For now, we need to convert flow_system data to xr.Dataset for the backend - data = self._fs_data_to_dataset() - n_representatives = len(weights) - - _result = backend.aggregate(data, n_representatives) # noqa: F841 - - # Full implementation would create FlowSystem directly from result - raise NotImplementedError( - 'set_aggregation() is not yet fully implemented. Use cluster_reduce() for TSAM-based aggregation.' - ) - - def _fs_data_to_dataset(self) -> xr.Dataset: - """Convert FlowSystem time series data to xarray Dataset.""" - from .core import TimeSeriesData - - data_vars = {} - for element in self._fs.values(): - for attr_name, attr_value in element.__dict__.items(): - if isinstance(attr_value, TimeSeriesData) and attr_value.has_data: - name = f'{element.label_full}|{attr_name}' - data_vars[name] = (['time'], attr_value.data.values) - - return xr.Dataset( - data_vars, - coords={'time': self._fs.timesteps}, - ) - - # Future methods can be added here: - # - # def mga(self, alternatives: int = 5) -> FlowSystem: - # """Create a FlowSystem configured for Modeling to Generate Alternatives.""" - # ... From 3b8872291258c68e50864196fb9a5bca26128fdf Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 22:40:10 +0100 Subject: [PATCH 072/191] =?UTF-8?q?=E2=8F=BA=20Summary?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Successfully simplified the aggregation module to a single cluster() method: Removed - aggregation/tsam_backend.py - unused wrapper class - aggregation/manual.py - unclear user value - Aggregator protocol from base.py - aggregate(), set_aggregation(), _aggregate_tsam() methods from transform_accessor.py - test_manual_backend.py test file Renamed - cluster_reduce() → cluster() - storage_inter_period_linking param → storage_inter_cluster_linking Kept - Core dataclasses: AggregationResult, ClusterStructure, AggregationInfo - InterClusterLinking for storage state linking - plot_aggregation() (moved to base.py) - create_cluster_structure_from_mapping() utility New API # Simple, single method fs_clustered = flow_system.transform.cluster( n_clusters=8, cluster_duration='1D', time_series_for_high_peaks=['Demand|fixed_relative_profile'], ) Files Changed | File | Action | |-----------------------------------------------|-------------------------------------------------------| | transform_accessor.py | Renamed method, removed obsolete methods | | aggregation/base.py | Removed Aggregator protocol, added plot_aggregation() | | aggregation/tsam_backend.py | DELETED | | aggregation/manual.py | DELETED | | aggregation/__init__.py | Simplified exports | | tests/test_aggregation/test_manual_backend.py | DELETED | | Notebooks 08a,08c,08d,08e | Updated to use cluster() | All 41 tests pass. --- docs/notebooks/08a-aggregation.ipynb | 2 +- docs/notebooks/08c-clustering.ipynb | 14 +- .../08d-clustering-multiperiod.ipynb | 10 +- docs/notebooks/08e-clustering-internals.ipynb | 93 ++++++++----- flixopt/aggregation/base.py | 4 +- flixopt/flow_system.py | 8 +- flixopt/optimization.py | 3 +- flixopt/structure.py | 2 +- flixopt/transform_accessor.py | 2 +- tests/test_aggregation/test_integration.py | 82 +++-------- tests/test_aggregation/test_manual_backend.py | 131 ------------------ tests/test_cluster_reduce_expand.py | 36 ++--- 12 files changed, 118 insertions(+), 269 deletions(-) delete mode 100644 tests/test_aggregation/test_manual_backend.py diff --git a/docs/notebooks/08a-aggregation.ipynb b/docs/notebooks/08a-aggregation.ipynb index b7ed85f7d..6d0260539 100644 --- a/docs/notebooks/08a-aggregation.ipynb +++ b/docs/notebooks/08a-aggregation.ipynb @@ -392,7 +392,7 @@ "\n", "### Further Reading\n", "\n", - "- For clustering with typical periods, see `transform.aggregate()` (requires `tsam` package)\n", + "- For clustering with typical periods, see `transform.cluster()` (requires `tsam` package)\n", "- For time selection, see `transform.sel()` and `transform.isel()`" ] } diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 34ecae48b..55a2f4a18 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -158,9 +158,8 @@ "peak_series = ['HeatDemand(Q_th)|fixed_relative_profile']\n", "\n", "# Create reduced FlowSystem with 8 typical days\n", - "fs_clustered = flow_system.transform.aggregate(\n", - " method='tsam',\n", - " n_representatives=8, # 8 typical days\n", + "fs_clustered = flow_system.transform.cluster(\n", + " n_clusters=8, # 8 typical days\n", " cluster_duration='1D', # Daily clustering\n", " time_series_for_high_peaks=peak_series, # Capture peak demand day\n", " storage_cyclic=True, # SOC[end] = SOC[start]\n", @@ -433,12 +432,12 @@ "source": [ "## API Reference\n", "\n", - "### `transform.aggregate()` Parameters\n", + "### `transform.cluster()` Parameters\n", "\n", "| Parameter | Type | Description |\n", "|-----------|------|-------------|\n", "| `method` | `str` | Aggregation backend: 'tsam' (default) or 'manual' |\n", - "| `n_representatives` | `int` | Number of typical periods (e.g., 8 typical days) |\n", + "| `n_clusters` | `int` | Number of typical periods (e.g., 8 typical days) |\n", "| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h') or hours |\n", "| `weights` | `dict[str, float]` | Optional weights for time series in clustering |\n", "| `time_series_for_high_peaks` | `list[str]` | **Essential**: Force inclusion of peak periods |\n", @@ -455,9 +454,8 @@ "\n", "```python\n", "# Stage 1: Fast sizing\n", - "fs_sizing = flow_system.transform.aggregate(\n", - " method='tsam',\n", - " n_representatives=8,\n", + "fs_sizing = flow_system.transform.cluster(\n", + " n_clusters=8,\n", " cluster_duration='1D',\n", " time_series_for_high_peaks=['Demand(Flow)|fixed_relative_profile'],\n", ")\n", diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index 38f7794d8..c97053af1 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -190,9 +190,8 @@ "peak_series = ['Building(Heat)|fixed_relative_profile']\n", "\n", "# Cluster to 3 typical days (from 7 days)\n", - "fs_clustered = flow_system.transform.aggregate(\n", - " method='tsam',\n", - " n_representatives=3,\n", + "fs_clustered = flow_system.transform.cluster(\n", + " n_clusters=3,\n", " cluster_duration='1D',\n", " time_series_for_high_peaks=peak_series,\n", ")\n", @@ -560,9 +559,8 @@ "fs = fs.transform.isel(time=slice(0, 168)) # First 168 timesteps\n", "\n", "# Aggregate (applies per period/scenario)\n", - "fs_clustered = fs.transform.aggregate(\n", - " method='tsam',\n", - " n_representatives=10,\n", + "fs_clustered = fs.transform.cluster(\n", + " n_clusters=10,\n", " cluster_duration='1D',\n", " time_series_for_high_peaks=['Demand(Flow)|fixed_relative_profile'],\n", ")\n", diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index 1efff22b0..053a77a30 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -68,9 +68,8 @@ "outputs": [], "source": [ "# Create a clustered system for analysis\n", - "fs_clustered = flow_system.transform.aggregate(\n", - " method='tsam',\n", - " n_representatives=8,\n", + "fs_clustered = flow_system.transform.cluster(\n", + " n_clusters=8,\n", " cluster_duration='1D',\n", " time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'],\n", ")\n", @@ -309,7 +308,7 @@ "flixopt uses the [TSAM](https://github.com/FZJ-IEK3-VSA/tsam) (Time Series Aggregation Module) \n", "package for clustering. TSAM uses k-means clustering to group similar time periods.\n", "\n", - "### The Clustering Object" + "### The AggregationResult Object" ] }, { @@ -319,11 +318,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Access the TSAM clustering object\n", - "clustering = info['clustering']\n", + "# Access the AggregationResult which contains the TSAM clustering data\n", + "info = fs_clustered._aggregation_info\n", + "result = info.result\n", "\n", - "print(f'Clustering type: {type(clustering).__name__}')\n", - "print(f'\\nTSAM aggregation object: {type(clustering.tsam).__name__}')" + "print(f'AggregationResult type: {type(result).__name__}')\n", + "print(f'Timestep mapping shape: {result.timestep_mapping.shape}')\n", + "print(f'Representative weights shape: {result.representative_weights.shape}')" ] }, { @@ -333,11 +334,14 @@ "metadata": {}, "outputs": [], "source": [ - "# The TSAM object contains the clustering results\n", - "tsam = clustering.tsam\n", + "# The AggregationResult contains aggregated data\n", + "result = fs_clustered._aggregation_info.result\n", "\n", - "print('TSAM typical periods (centroids):')\n", - "print(tsam.typicalPeriods.head(10))" + "print('Aggregated data variables:')\n", + "if result.aggregated_data is not None:\n", + " for var_name in result.aggregated_data.data_vars:\n", + " shape = result.aggregated_data[var_name].shape\n", + " print(f' {var_name}: {shape}')" ] }, { @@ -347,10 +351,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Cluster centers vs original data\n", - "print('\\nOriginal time series used for clustering:')\n", - "print(f'Shape: {tsam.normalizedPeriodlyProfiles.shape}')\n", - "print(f'Columns: {list(tsam.normalizedPeriodlyProfiles.columns)}')" + "# Show structure of original data (used for clustering)\n", + "result = fs_clustered._aggregation_info.result\n", + "\n", + "print('Original data used for clustering:')\n", + "if result.original_data is not None:\n", + " print(f'Shape: {dict(result.original_data.dims)}')\n", + " print(f'Variables: {list(result.original_data.data_vars)[:5]}...') # Show first 5" ] }, { @@ -372,6 +379,11 @@ "original_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", "clustered_demand = fs_clustered.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", "\n", + "# Get cluster structure info\n", + "info = fs_clustered._aggregation_info\n", + "cs = info.result.cluster_structure\n", + "cluster_occurrences = dict(cs.cluster_occurrences)\n", + "\n", "# Reshape original demand into days\n", "timesteps_per_day = 96 # 15-minute resolution\n", "n_days = len(original_demand) // timesteps_per_day\n", @@ -381,7 +393,7 @@ "fig = make_subplots(\n", " rows=2,\n", " cols=1,\n", - " subplot_titles=['Original: All 31 Days', f'Clustered: {info[\"n_clusters\"]} Typical Days'],\n", + " subplot_titles=['Original: All 31 Days', f'Clustered: {cs.n_clusters} Typical Days'],\n", " vertical_spacing=0.15,\n", ")\n", "\n", @@ -403,7 +415,7 @@ "\n", "# Plot typical days (bold colors)\n", "colors = px.colors.qualitative.Set1\n", - "n_clusters = info['n_clusters']\n", + "n_clusters = cs.n_clusters\n", "clustered_by_day = clustered_demand.reshape(n_clusters, timesteps_per_day)\n", "\n", "for cluster_id in range(n_clusters):\n", @@ -455,9 +467,11 @@ "metadata": {}, "outputs": [], "source": [ + "info = fs_clustered._aggregation_info\n", + "\n", "print('Storage settings:')\n", - "print(f' storage_cyclic: {info[\"storage_cyclic\"]}')\n", - "print(f' storage_inter_period_linking: {info[\"storage_inter_period_linking\"]}')\n", + "print(f' storage_cyclic: {info.storage_cyclic}')\n", + "print(f' storage_inter_cluster_linking: {info.storage_inter_cluster_linking}')\n", "\n", "# Show storage charge state in clustered solution\n", "charge_state = fs_clustered.solution['Storage|charge_state']\n", @@ -474,14 +488,18 @@ "outputs": [], "source": [ "# Visualize storage behavior across typical periods\n", + "info = fs_clustered._aggregation_info\n", + "cs = info.result.cluster_structure\n", + "cluster_occurrences = dict(cs.cluster_occurrences)\n", + "\n", "fig = go.Figure()\n", "\n", - "timesteps_per_day = info['timesteps_per_cluster']\n", + "timesteps_per_day = cs.timesteps_per_cluster\n", "charge_values = charge_state.values\n", "\n", "# Plot each typical day's storage trajectory\n", "colors = px.colors.qualitative.Set1\n", - "for cluster_id in range(info['n_clusters']):\n", + "for cluster_id in range(cs.n_clusters):\n", " start_idx = cluster_id * timesteps_per_day\n", " end_idx = start_idx + timesteps_per_day + 1 # Include endpoint\n", "\n", @@ -531,8 +549,7 @@ "\n", "print('FlowSystem weights structure:')\n", "print(f' Type: {type(weights).__name__}')\n", - "print(f' temporal: {weights.temporal}')\n", - "print(f' aggregation_weight: {weights.aggregation_weight}')" + "print(f' temporal: {weights.temporal}')" ] }, { @@ -544,12 +561,11 @@ "source": [ "# Compare weights for original vs clustered systems\n", "print('Original system weights:')\n", - "print(f' temporal: {flow_system.weights.temporal}')\n", - "print(f' aggregation_weight: {flow_system.weights.aggregation_weight}')\n", + "print(f' temporal sum: {flow_system.weights.temporal.sum().item():.0f}')\n", "\n", "print('\\nClustered system weights:')\n", - "print(f' temporal: {fs_clustered.weights.temporal}')\n", - "print(f' aggregation_weight (cluster_weight): sum = {fs_clustered.weights.aggregation_weight.sum().item():.0f}')" + "print(f' temporal sum: {fs_clustered.weights.temporal.sum().item():.0f}')\n", + "print(f' cluster_weight sum: {fs_clustered.cluster_weight.sum().item():.0f}')" ] }, { @@ -578,10 +594,12 @@ "metadata": {}, "outputs": [], "source": [ - "# Show the time series used for clustering and their weights\n", - "if hasattr(clustering, 'tsam') and hasattr(clustering.tsam, 'normalizedPeriodlyProfiles'):\n", - " ts_names = list(clustering.tsam.normalizedPeriodlyProfiles.columns)\n", - " print('Time series used for clustering:')\n", + "# The time series used for clustering come from the FlowSystem's dataset\n", + "# The cluster() method extracts all time-varying data for clustering\n", + "info = fs_clustered._aggregation_info\n", + "if info.result.original_data is not None:\n", + " ts_names = list(info.result.original_data.data_vars)[:10] # Show first 10\n", + " print('Time series used for clustering (first 10):')\n", " for name in ts_names:\n", " print(f' - {name}')" ] @@ -605,6 +623,11 @@ "outputs": [], "source": [ "# Find which cluster contains the peak demand day\n", + "info = fs_clustered._aggregation_info\n", + "cs = info.result.cluster_structure\n", + "cluster_order = cs.cluster_order.values\n", + "cluster_occurrences = dict(cs.cluster_occurrences)\n", + "\n", "original_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", "daily_max = original_demand.reshape(-1, 96).max(axis=1)\n", "\n", @@ -622,9 +645,9 @@ "\n", "# The peak day should be in a cluster with weight 1 (unique)\n", "if peak_weight == 1:\n", - " print('\\\\n✓ Peak day is isolated in its own cluster (weight=1) - good!')\n", + " print('\\n✓ Peak day is isolated in its own cluster (weight=1) - good!')\n", "else:\n", - " print(f'\\\\n⚠ Peak day shares cluster with {peak_weight - 1} other day(s)')" + " print(f'\\n⚠ Peak day shares cluster with {peak_weight - 1} other day(s)')" ] }, { @@ -636,7 +659,7 @@ "\n", "You learned about the internal mechanics of clustering:\n", "\n", - "1. **`_cluster_info`**: Contains all metadata for expansion and analysis\n", + "1. **`_aggregation_info`**: Contains all metadata for expansion and analysis\n", "2. **Cluster weights**: Scale operational costs so each typical period represents its original days\n", "3. **TSAM integration**: k-means clustering groups similar time periods\n", "4. **Storage handling**: Cyclic constraints ensure realistic storage behavior\n", diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index d57ee6298..ed01d56b8 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -29,7 +29,7 @@ class ClusterStructure: This class captures the hierarchical structure of time series clustering, which is needed for proper storage state-of-charge tracking across - typical periods when using cluster_reduce(). + typical periods when using cluster(). Note: "original_period" here refers to the original time chunks before clustering (e.g., 365 original days), NOT the model's "period" dimension @@ -128,7 +128,7 @@ class AggregationResult: aggregated_data: Time series data aggregated to representative timesteps. Optional - some backends may not aggregate data. cluster_structure: Hierarchical clustering structure for storage linking. - Optional - only needed when using cluster_reduce() mode. + Optional - only needed when using cluster() mode. original_data: Reference to original data before aggregation. Optional - useful for expand_solution(). diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 018e89ad5..7d61a9386 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -70,7 +70,7 @@ class FlowSystem(Interface, CompositeContainerMixin[Element]): Period weights are always computed internally from the period index (like timestep_duration for time). The final `weights` array (accessible via `flow_system.model.objective_weights`) is computed as period_weights × normalized_scenario_weights, with normalization applied to the scenario weights by default. cluster_weight: Weight for each timestep representing cluster representation count. - If None (default), all timesteps have weight 1.0. Used by cluster_reduce() to specify + If None (default), all timesteps have weight 1.0. Used by cluster() to specify how many original timesteps each cluster represents. Combined with timestep_duration via aggregation_weight for proper time aggregation in clustered models. scenario_independent_sizes: Controls whether investment sizes are equalized across scenarios. @@ -196,9 +196,9 @@ def __init__( self.timestep_duration = self.fit_to_model_coords('timestep_duration', timestep_duration) - # Cluster weight for cluster_reduce optimization (default 1.0) + # Cluster weight for cluster() optimization (default 1.0) # Represents how many original timesteps each cluster represents - # May have period/scenario dimensions if cluster_reduce was used with those + # May have period/scenario dimensions if cluster() was used with those self.cluster_weight = self.fit_to_model_coords( 'cluster_weight', np.ones(len(self.timesteps)) if cluster_weight is None else cluster_weight, @@ -232,7 +232,7 @@ def __init__( # Solution dataset - populated after optimization or loaded from file self._solution: xr.Dataset | None = None - # Aggregation info - populated by transform.cluster_reduce() or transform.aggregate() + # Aggregation info - populated by transform.cluster() self._aggregation_info: AggregationInfo | None = None # Statistics accessor cache - lazily initialized, invalidated on new solution diff --git a/flixopt/optimization.py b/flixopt/optimization.py index 0d643d1b0..6a1a87ce1 100644 --- a/flixopt/optimization.py +++ b/flixopt/optimization.py @@ -6,8 +6,7 @@ 1. Optimization: Optimizes the FlowSystemModel for the full FlowSystem 2. SegmentedOptimization: Solves a FlowSystemModel for each individual Segment of the FlowSystem. -For time series aggregation (clustering), use FlowSystem.transform.aggregate() or -FlowSystem.transform.cluster_reduce() instead. +For time series aggregation (clustering), use FlowSystem.transform.cluster() instead. """ from __future__ import annotations diff --git a/flixopt/structure.py b/flixopt/structure.py index eafee4e0c..d401451c1 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -305,7 +305,7 @@ def hours_of_previous_timesteps(self): @property def cluster_weight(self) -> xr.DataArray: - """Cluster weight for cluster_reduce optimization. + """Cluster weight for cluster() optimization. Represents how many original timesteps each cluster represents. Default is 1.0 for all timesteps. diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 25170ab8e..2f8143aa9 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -31,7 +31,7 @@ class TransformAccessor: Examples: Time series aggregation (8 typical days): - >>> reduced_fs = flow_system.transform.cluster_reduce(n_clusters=8, cluster_duration='1D') + >>> reduced_fs = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') >>> reduced_fs.optimize(solver) >>> expanded_fs = reduced_fs.transform.expand_solution() diff --git a/tests/test_aggregation/test_integration.py b/tests/test_aggregation/test_integration.py index c0f5f3054..664808b66 100644 --- a/tests/test_aggregation/test_integration.py +++ b/tests/test_aggregation/test_integration.py @@ -86,18 +86,18 @@ def test_weights_with_cluster_weight(self): np.testing.assert_array_almost_equal(weights.temporal.values, expected) -class TestAggregateMethod: - """Tests for FlowSystem.transform.aggregate method.""" +class TestClusterMethod: + """Tests for FlowSystem.transform.cluster method.""" - def test_aggregate_method_exists(self): - """Test that transform.aggregate method exists.""" + def test_cluster_method_exists(self): + """Test that transform.cluster method exists.""" fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=48, freq='h')) - assert hasattr(fs.transform, 'aggregate') - assert callable(fs.transform.aggregate) + assert hasattr(fs.transform, 'cluster') + assert callable(fs.transform.cluster) - def test_aggregate_tsam_delegates_to_cluster_reduce(self): - """Test that aggregate with method='tsam' works.""" + def test_cluster_reduces_timesteps(self): + """Test that cluster reduces timesteps.""" # This test requires tsam to be installed pytest.importorskip('tsam') from flixopt import Bus, Flow, Sink, Source @@ -119,41 +119,15 @@ def test_aggregate_tsam_delegates_to_cluster_reduce(self): sink = Sink('demand', inputs=[demand_flow]) fs.add_elements(source, sink, bus) - # Should delegate to cluster_reduce - reduce 7 days to 2 representative days - fs_agg = fs.transform.aggregate( - method='tsam', - n_representatives=2, + # Reduce 7 days to 2 representative days + fs_clustered = fs.transform.cluster( + n_clusters=2, cluster_duration='1D', ) # Check that timesteps were reduced (from 168 hours to 48 hours = 2 days x 24 hours) - assert len(fs_agg.timesteps) < len(fs.timesteps) - assert len(fs_agg.timesteps) == 48 # 2 representative days x 24 hours - - -class TestSetAggregationMethod: - """Tests for FlowSystem.transform.set_aggregation method.""" - - def test_set_aggregation_method_exists(self): - """Test that transform.set_aggregation method exists.""" - fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) - - assert hasattr(fs.transform, 'set_aggregation') - assert callable(fs.transform.set_aggregation) - - def test_set_aggregation_raises_not_implemented(self): - """Test that set_aggregation raises NotImplementedError for now.""" - fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) - - mapping = xr.DataArray(np.arange(24) % 4, dims=['original_time']) - weights = xr.DataArray([6.0, 6.0, 6.0, 6.0], dims=['time']) - - # For now, should raise NotImplementedError - with pytest.raises(NotImplementedError): - fs.transform.set_aggregation( - timestep_mapping=mapping, - weights=weights, - ) + assert len(fs_clustered.timesteps) < len(fs.timesteps) + assert len(fs_clustered.timesteps) == 48 # 2 representative days x 24 hours class TestAggregationModuleImports: @@ -165,28 +139,16 @@ def test_import_from_flixopt(self): assert hasattr(aggregation, 'AggregationResult') assert hasattr(aggregation, 'ClusterStructure') - assert hasattr(aggregation, 'Aggregator') - assert hasattr(aggregation, 'TSAMBackend') - assert hasattr(aggregation, 'ManualBackend') - - def test_list_backends(self): - """Test list_backends function.""" - from flixopt.aggregation import list_backends - - backends = list_backends() - assert 'manual' in backends - # 'tsam' may or may not be available depending on installation + assert hasattr(aggregation, 'AggregationInfo') - def test_get_backend(self): - """Test get_backend function.""" - from flixopt.aggregation import ManualBackend, get_backend + def test_plot_aggregation_available(self): + """Test that plot_aggregation is available.""" + from flixopt.aggregation import plot_aggregation - backend_cls = get_backend('manual') - assert backend_cls is ManualBackend + assert callable(plot_aggregation) - def test_get_backend_invalid(self): - """Test get_backend raises for invalid backend.""" - from flixopt.aggregation import get_backend + def test_create_cluster_structure_from_mapping_available(self): + """Test that create_cluster_structure_from_mapping is available.""" + from flixopt.aggregation import create_cluster_structure_from_mapping - with pytest.raises(ValueError, match='Unknown backend'): - get_backend('nonexistent') + assert callable(create_cluster_structure_from_mapping) diff --git a/tests/test_aggregation/test_manual_backend.py b/tests/test_aggregation/test_manual_backend.py deleted file mode 100644 index 8ba8f3c79..000000000 --- a/tests/test_aggregation/test_manual_backend.py +++ /dev/null @@ -1,131 +0,0 @@ -"""Tests for flixopt.aggregation.manual module.""" - -import numpy as np -import pytest -import xarray as xr - -from flixopt.aggregation import ( - ManualBackend, - create_manual_backend_from_labels, - create_manual_backend_from_selection, -) - - -class TestManualBackend: - """Tests for ManualBackend class.""" - - def test_basic_creation(self): - """Test basic ManualBackend creation.""" - mapping = xr.DataArray([0, 1, 0, 1, 2, 2], dims=['original_time']) - weights = xr.DataArray([2.0, 2.0, 2.0], dims=['time']) - - backend = ManualBackend(timestep_mapping=mapping, representative_weights=weights) - - assert len(backend.timestep_mapping) == 6 - assert len(backend.representative_weights) == 3 - - def test_validation_dimension_mismatch(self): - """Test validation fails for mismatched dimensions.""" - mapping = xr.DataArray([0, 1, 5], dims=['original_time']) # 5 is out of range - weights = xr.DataArray([2.0, 2.0], dims=['time']) # Only 2 weights - - with pytest.raises(ValueError, match='timestep_mapping contains index'): - ManualBackend(timestep_mapping=mapping, representative_weights=weights) - - def test_aggregate_creates_result(self): - """Test aggregate method creates proper AggregationResult.""" - mapping = xr.DataArray([0, 1, 0, 1], dims=['original_time']) - weights = xr.DataArray([2.0, 2.0], dims=['time']) - - backend = ManualBackend(timestep_mapping=mapping, representative_weights=weights) - - # Create test data - data = xr.Dataset( - {'var1': (['time'], [1.0, 2.0, 3.0, 4.0])}, - coords={'time': range(4)}, - ) - - result = backend.aggregate(data) - - assert result.n_representatives == 2 - assert result.n_original_timesteps == 4 - assert result.aggregated_data is not None - - def test_aggregate_validates_data_dimensions(self): - """Test aggregate validates data dimensions match mapping.""" - mapping = xr.DataArray([0, 1, 0], dims=['original_time']) # 3 timesteps - weights = xr.DataArray([2.0, 1.0], dims=['time']) - - backend = ManualBackend(timestep_mapping=mapping, representative_weights=weights) - - # Data has wrong number of timesteps - data = xr.Dataset( - {'var1': (['time'], [1.0, 2.0, 3.0, 4.0, 5.0])}, # 5 timesteps - coords={'time': range(5)}, - ) - - with pytest.raises(ValueError, match='timesteps'): - backend.aggregate(data) - - -class TestCreateManualBackendFromLabels: - """Tests for create_manual_backend_from_labels function.""" - - def test_basic_creation(self): - """Test creating ManualBackend from cluster labels.""" - # 3 periods of 4 timesteps each, labeled [0, 1, 0] - labels = np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0]) - - backend = create_manual_backend_from_labels(labels, timesteps_per_cluster=4) - - assert len(backend.representative_weights) == 8 # 2 clusters x 4 timesteps - # Cluster 0 appears 2 times, cluster 1 appears 1 time - assert float(backend.representative_weights.isel(time=0).values) == 2.0 - assert float(backend.representative_weights.isel(time=4).values) == 1.0 - - def test_non_consecutive_labels(self): - """Test handling of non-consecutive cluster labels.""" - # Labels are 0, 2, 0 (skipping 1) - labels = np.array([0, 0, 2, 2, 0, 0]) - - backend = create_manual_backend_from_labels(labels, timesteps_per_cluster=2) - - # Should remap to consecutive 0, 1 - assert len(backend.representative_weights) == 4 # 2 unique clusters x 2 timesteps - - -class TestCreateManualBackendFromSelection: - """Tests for create_manual_backend_from_selection function.""" - - def test_basic_creation(self): - """Test creating ManualBackend from selected indices.""" - # Select every 3rd timestep from 12 original timesteps - selected_indices = np.array([0, 3, 6, 9]) - weights = np.array([3.0, 3.0, 3.0, 3.0]) - - backend = create_manual_backend_from_selection( - selected_indices=selected_indices, - weights=weights, - n_original_timesteps=12, - ) - - assert len(backend.representative_weights) == 4 - # Check mapping assigns nearby timesteps to nearest representative - mapping = backend.timestep_mapping.values - assert mapping[0] == 0 # Timestep 0 -> representative 0 (at index 0) - assert mapping[1] == 0 # Timestep 1 -> representative 0 (nearest to 0) - # Timestep 5 is equidistant from indices 3 and 6, but argmin picks first - # Actually: distances from 5 to [0,3,6,9] = [5,2,1,4], so nearest is rep 2 (at index 6) - assert mapping[5] == 2 # Timestep 5 -> representative 2 (at index 6) - - def test_weights_length_mismatch(self): - """Test error when weights length doesn't match selected indices.""" - selected_indices = np.array([0, 3, 6]) - weights = np.array([3.0, 3.0]) # Wrong length - - with pytest.raises(ValueError, match='weights'): - create_manual_backend_from_selection( - selected_indices=selected_indices, - weights=weights, - n_original_timesteps=12, - ) diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index 526f0e052..23b1bf4c1 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -1,4 +1,4 @@ -"""Tests for cluster_reduce() and expand_solution() functionality.""" +"""Tests for cluster() and expand_solution() functionality.""" import numpy as np import pandas as pd @@ -43,12 +43,12 @@ def timesteps_8_days(): return pd.date_range('2020-01-01', periods=192, freq='h') -def test_cluster_reduce_creates_reduced_timesteps(timesteps_8_days): - """Test that cluster_reduce creates a FlowSystem with fewer timesteps.""" +def test_cluster_creates_reduced_timesteps(timesteps_8_days): + """Test that cluster creates a FlowSystem with fewer timesteps.""" fs = create_simple_system(timesteps_8_days) # Reduce to 2 typical clusters (days) - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) @@ -64,7 +64,7 @@ def test_expand_solution_restores_full_timesteps(solver_fixture, timesteps_8_day fs = create_simple_system(timesteps_8_days) # Reduce to 2 typical clusters - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) @@ -86,7 +86,7 @@ def test_expand_solution_preserves_solution_variables(solver_fixture, timesteps_ """Test that expand_solution keeps all solution variables.""" fs = create_simple_system(timesteps_8_days) - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) @@ -105,7 +105,7 @@ def test_expand_solution_maps_values_correctly(solver_fixture, timesteps_8_days) """Test that expand_solution correctly maps typical cluster values to all segments.""" fs = create_simple_system(timesteps_8_days) - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) @@ -142,7 +142,7 @@ def test_expand_solution_enables_statistics_accessor(solver_fixture, timesteps_8 """Test that statistics accessor works on expanded FlowSystem.""" fs = create_simple_system(timesteps_8_days) - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) @@ -163,7 +163,7 @@ def test_expand_solution_statistics_match_clustered(solver_fixture, timesteps_8_ """Test that total_effects match between clustered and expanded FlowSystem.""" fs = create_simple_system(timesteps_8_days) - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) @@ -193,7 +193,7 @@ def test_expand_solution_without_aggregation_info_raises(solver_fixture, timeste fs = create_simple_system(timesteps_2_days) fs.optimize(solver_fixture) - with pytest.raises(ValueError, match='cluster_reduce|aggregate'): + with pytest.raises(ValueError, match='cluster'): fs.transform.expand_solution() @@ -201,7 +201,7 @@ def test_expand_solution_without_solution_raises(timesteps_8_days): """Test that expand_solution raises error if no solution.""" fs = create_simple_system(timesteps_8_days) - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) @@ -254,8 +254,8 @@ def scenarios_2(): return pd.Index(['base', 'high'], name='scenario') -def test_cluster_reduce_with_scenarios(timesteps_8_days, scenarios_2): - """Test that cluster_reduce handles scenarios correctly.""" +def test_cluster_with_scenarios(timesteps_8_days, scenarios_2): + """Test that cluster handles scenarios correctly.""" fs = create_system_with_scenarios(timesteps_8_days, scenarios_2) # Verify scenarios are set up correctly @@ -263,7 +263,7 @@ def test_cluster_reduce_with_scenarios(timesteps_8_days, scenarios_2): assert len(fs.scenarios) == 2 # Reduce to 2 typical clusters - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) @@ -280,12 +280,12 @@ def test_cluster_reduce_with_scenarios(timesteps_8_days, scenarios_2): assert info.original_flow_system.scenarios is not None -def test_cluster_reduce_and_expand_with_scenarios(solver_fixture, timesteps_8_days, scenarios_2): - """Test full cluster_reduce -> optimize -> expand_solution cycle with scenarios.""" +def test_cluster_and_expand_with_scenarios(solver_fixture, timesteps_8_days, scenarios_2): + """Test full cluster -> optimize -> expand_solution cycle with scenarios.""" fs = create_system_with_scenarios(timesteps_8_days, scenarios_2) # Reduce - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) @@ -311,7 +311,7 @@ def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_ """Test that expand_solution correctly maps scenarios in multi-scenario systems.""" fs = create_system_with_scenarios(timesteps_8_days, scenarios_2) - fs_reduced = fs.transform.cluster_reduce( + fs_reduced = fs.transform.cluster( n_clusters=2, cluster_duration='1D', ) From f653269ac022ea8439169ec94edd7d843a0ede20 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 22:47:14 +0100 Subject: [PATCH 073/191] terminology unification is complete. All references have been renamed: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - AggregationResult → ClusterResult - AggregationInfo → ClusterInfo - _aggregation_info → _cluster_info --- docs/notebooks/08c-clustering.ipynb | 2 +- .../08d-clustering-multiperiod.ipynb | 2 +- docs/notebooks/08e-clustering-internals.ipynb | 42 +++++++++---------- flixopt/aggregation/__init__.py | 14 +++---- flixopt/aggregation/base.py | 16 +++---- flixopt/flow_system.py | 8 ++-- flixopt/transform_accessor.py | 14 +++---- tests/test_aggregation/test_base.py | 32 +++++++------- tests/test_aggregation/test_integration.py | 4 +- tests/test_cluster_reduce_expand.py | 12 +++--- 10 files changed, 73 insertions(+), 73 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 55a2f4a18..4d26363b1 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -208,7 +208,7 @@ "outputs": [], "source": [ "# Show clustering info\n", - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "cs = info.result.cluster_structure\n", "print('Clustering Configuration:')\n", "print(f' Number of typical periods: {cs.n_clusters}')\n", diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index c97053af1..219fba33b 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -240,7 +240,7 @@ "metadata": {}, "outputs": [], "source": [ - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "cs = info.result.cluster_structure\n", "\n", "print('Clustering Configuration:')\n", diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index 053a77a30..58d58a394 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -15,7 +15,7 @@ "- **TSAM integration**: How the Time Series Aggregation Module performs clustering\n", "- **Typical periods**: Visualizing representative vs original time series\n", "- **Storage handling**: Inter-period linking and cyclic constraints\n", - "- **The `_aggregation_info` structure**: Internal data for expansion and analysis\n", + "- **The `_cluster_info` structure**: Internal data for expansion and analysis\n", "\n", "!!! note \"Prerequisites\"\n", " This notebook assumes familiarity with [08c-clustering](08c-clustering.ipynb)." @@ -82,9 +82,9 @@ "id": "4", "metadata": {}, "source": [ - "## 1. The `_aggregation_info` Structure\n", + "## 1. The `_cluster_info` Structure\n", "\n", - "After clustering, the FlowSystem stores metadata in `_aggregation_info` that enables:\n", + "After clustering, the FlowSystem stores metadata in `_cluster_info` that enables:\n", "- Expanding solutions back to full resolution\n", "- Understanding which original days map to which clusters\n", "- Weighting costs correctly in the objective function" @@ -97,9 +97,9 @@ "metadata": {}, "outputs": [], "source": [ - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "\n", - "print('AggregationInfo structure:')\n", + "print('ClusterInfo structure:')\n", "print(f' backend_name: {info.backend_name}')\n", "print(f' storage_inter_cluster_linking: {info.storage_inter_cluster_linking}')\n", "print(f' storage_cyclic: {info.storage_cyclic}')\n", @@ -129,7 +129,7 @@ "metadata": {}, "outputs": [], "source": [ - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "cs = info.result.cluster_structure\n", "cluster_order = cs.cluster_order.values\n", "n_original_days = len(cluster_order)\n", @@ -209,7 +209,7 @@ "outputs": [], "source": [ "# Cluster occurrences (how many original days each cluster represents)\n", - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "cs = info.result.cluster_structure\n", "cluster_occurrences = dict(cs.cluster_occurrences)\n", "\n", @@ -228,7 +228,7 @@ "outputs": [], "source": [ "# Visualize weights across the reduced timesteps\n", - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "cs = info.result.cluster_structure\n", "weights = fs_clustered.cluster_weight.values\n", "timesteps_per_day = cs.timesteps_per_cluster\n", @@ -308,7 +308,7 @@ "flixopt uses the [TSAM](https://github.com/FZJ-IEK3-VSA/tsam) (Time Series Aggregation Module) \n", "package for clustering. TSAM uses k-means clustering to group similar time periods.\n", "\n", - "### The AggregationResult Object" + "### The ClusterResult Object" ] }, { @@ -318,11 +318,11 @@ "metadata": {}, "outputs": [], "source": [ - "# Access the AggregationResult which contains the TSAM clustering data\n", - "info = fs_clustered._aggregation_info\n", + "# Access the ClusterResult which contains the TSAM clustering data\n", + "info = fs_clustered._cluster_info\n", "result = info.result\n", "\n", - "print(f'AggregationResult type: {type(result).__name__}')\n", + "print(f'ClusterResult type: {type(result).__name__}')\n", "print(f'Timestep mapping shape: {result.timestep_mapping.shape}')\n", "print(f'Representative weights shape: {result.representative_weights.shape}')" ] @@ -334,8 +334,8 @@ "metadata": {}, "outputs": [], "source": [ - "# The AggregationResult contains aggregated data\n", - "result = fs_clustered._aggregation_info.result\n", + "# The ClusterResult contains aggregated data\n", + "result = fs_clustered._cluster_info.result\n", "\n", "print('Aggregated data variables:')\n", "if result.aggregated_data is not None:\n", @@ -352,7 +352,7 @@ "outputs": [], "source": [ "# Show structure of original data (used for clustering)\n", - "result = fs_clustered._aggregation_info.result\n", + "result = fs_clustered._cluster_info.result\n", "\n", "print('Original data used for clustering:')\n", "if result.original_data is not None:\n", @@ -380,7 +380,7 @@ "clustered_demand = fs_clustered.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", "\n", "# Get cluster structure info\n", - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "cs = info.result.cluster_structure\n", "cluster_occurrences = dict(cs.cluster_occurrences)\n", "\n", @@ -467,7 +467,7 @@ "metadata": {}, "outputs": [], "source": [ - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "\n", "print('Storage settings:')\n", "print(f' storage_cyclic: {info.storage_cyclic}')\n", @@ -488,7 +488,7 @@ "outputs": [], "source": [ "# Visualize storage behavior across typical periods\n", - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "cs = info.result.cluster_structure\n", "cluster_occurrences = dict(cs.cluster_occurrences)\n", "\n", @@ -596,7 +596,7 @@ "source": [ "# The time series used for clustering come from the FlowSystem's dataset\n", "# The cluster() method extracts all time-varying data for clustering\n", - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "if info.result.original_data is not None:\n", " ts_names = list(info.result.original_data.data_vars)[:10] # Show first 10\n", " print('Time series used for clustering (first 10):')\n", @@ -623,7 +623,7 @@ "outputs": [], "source": [ "# Find which cluster contains the peak demand day\n", - "info = fs_clustered._aggregation_info\n", + "info = fs_clustered._cluster_info\n", "cs = info.result.cluster_structure\n", "cluster_order = cs.cluster_order.values\n", "cluster_occurrences = dict(cs.cluster_occurrences)\n", @@ -659,7 +659,7 @@ "\n", "You learned about the internal mechanics of clustering:\n", "\n", - "1. **`_aggregation_info`**: Contains all metadata for expansion and analysis\n", + "1. **`_cluster_info`**: Contains all metadata for expansion and analysis\n", "2. **Cluster weights**: Scale operational costs so each typical period represents its original days\n", "3. **TSAM integration**: k-means clustering groups similar time periods\n", "4. **Storage handling**: Cyclic constraints ensure realistic storage behavior\n", diff --git a/flixopt/aggregation/__init__.py b/flixopt/aggregation/__init__.py index c7a03cad8..2ce828fae 100644 --- a/flixopt/aggregation/__init__.py +++ b/flixopt/aggregation/__init__.py @@ -4,9 +4,9 @@ This module provides data structures for time series clustering/aggregation. Key classes: -- AggregationResult: Universal result container for clustering +- ClusterResult: Universal result container for clustering - ClusterStructure: Hierarchical structure info for storage inter-cluster linking -- AggregationInfo: Stored on FlowSystem after clustering +- ClusterInfo: Stored on FlowSystem after clustering Example usage: @@ -18,7 +18,7 @@ ) # Access clustering metadata - info = fs_clustered._aggregation_info + info = fs_clustered._cluster_info print(f'Number of clusters: {info.result.cluster_structure.n_clusters}') # Expand solution back to full resolution @@ -26,8 +26,8 @@ """ from .base import ( - AggregationInfo, - AggregationResult, + ClusterInfo, + ClusterResult, ClusterStructure, create_cluster_structure_from_mapping, plot_aggregation, @@ -50,8 +50,8 @@ def _get_inter_cluster_linking(): __all__ = [ # Core classes - 'AggregationResult', - 'AggregationInfo', + 'ClusterResult', + 'ClusterInfo', 'ClusterStructure', 'InterClusterLinking', # Utilities diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index ed01d56b8..3484a4832 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -108,7 +108,7 @@ def get_cluster_weight_per_timestep(self) -> xr.DataArray: @dataclass -class AggregationResult: +class ClusterResult: """Universal result from any time series aggregation method. This dataclass captures all information needed to: @@ -216,7 +216,7 @@ def validate(self) -> None: @dataclass -class AggregationInfo: +class ClusterInfo: """Information about an aggregation stored on a FlowSystem. This is stored on the FlowSystem after aggregation to enable: @@ -226,14 +226,14 @@ class AggregationInfo: - Serialization/deserialization of aggregated models Attributes: - result: The AggregationResult from the aggregation backend. + result: The ClusterResult from the aggregation backend. original_flow_system: Reference to the FlowSystem before aggregation. backend_name: Name of the aggregation backend used (e.g., 'tsam', 'manual'). storage_inter_cluster_linking: Whether to add inter-cluster storage constraints. storage_cyclic: Whether to enforce cyclic storage (SOC[start] = SOC[end]). """ - result: AggregationResult + result: ClusterResult original_flow_system: object # FlowSystem - avoid circular import backend_name: str = 'unknown' storage_inter_cluster_linking: bool = True @@ -291,7 +291,7 @@ def create_cluster_structure_from_mapping( def plot_aggregation( - result: AggregationResult, + result: ClusterResult, colormap: str | None = None, show: bool | None = None, ): @@ -301,7 +301,7 @@ def plot_aggregation( the aggregated/clustered time series (solid lines) for comparison. Args: - result: AggregationResult containing original and aggregated data. + result: ClusterResult containing original and aggregated data. colormap: Colorscale name for the time series colors. Defaults to CONFIG.Plotting.default_qualitative_colorscale. show: Whether to display the figure. @@ -312,7 +312,7 @@ def plot_aggregation( Example: >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> plot_aggregation(fs_clustered._aggregation_info.result) + >>> plot_aggregation(fs_clustered._cluster_info.result) """ import plotly.express as px @@ -321,7 +321,7 @@ def plot_aggregation( from ..plot_result import PlotResult if result.original_data is None or result.aggregated_data is None: - raise ValueError('AggregationResult must contain both original_data and aggregated_data for plotting') + raise ValueError('ClusterResult must contain both original_data and aggregated_data for plotting') # Convert xarray to DataFrames original_df = result.original_data.to_dataframe() diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 7d61a9386..b83d57e5a 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -38,7 +38,7 @@ import pyvis - from .aggregation import AggregationInfo + from .aggregation import ClusterInfo from .solvers import _Solver from .structure import TimeSeriesWeights from .types import Effect_TPS, Numeric_S, Numeric_TPS, NumericOrBool @@ -233,7 +233,7 @@ def __init__( self._solution: xr.Dataset | None = None # Aggregation info - populated by transform.cluster() - self._aggregation_info: AggregationInfo | None = None + self._cluster_info: ClusterInfo | None = None # Statistics accessor cache - lazily initialized, invalidated on new solution self._statistics: StatisticsAccessor | None = None @@ -1294,7 +1294,7 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: self.model.do_modeling() # Add inter-cluster storage linking if this is an aggregated FlowSystem - if self._aggregation_info is not None: + if self._cluster_info is not None: self._add_inter_cluster_linking() return self @@ -1307,7 +1307,7 @@ def _add_inter_cluster_linking(self) -> None: """ from .aggregation.storage_linking import InterClusterLinking - info = self._aggregation_info + info = self._cluster_info if info is None: return diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 2f8143aa9..45b50484b 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -615,7 +615,7 @@ def cluster( Returns: A new FlowSystem with reduced timesteps (only typical clusters). - The FlowSystem has metadata stored in ``_aggregation_info`` for expansion. + The FlowSystem has metadata stored in ``_cluster_info`` for expansion. Raises: ValueError: If timestep sizes are inconsistent. @@ -649,7 +649,7 @@ def cluster( """ import tsam.timeseriesaggregation as tsam - from .aggregation import AggregationInfo, AggregationResult, ClusterStructure + from .aggregation import ClusterInfo, ClusterResult, ClusterStructure from .core import TimeSeriesData, drop_constant_arrays from .flow_system import FlowSystem @@ -797,7 +797,7 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: if isinstance(ics, str) and ics == 'equals_final': storage.initial_charge_state = 0 - # Build AggregationInfo for inter-cluster linking and solution expansion + # Build ClusterInfo for inter-cluster linking and solution expansion n_original_timesteps = len(self._fs.timesteps) # Build timestep_mapping: maps each original timestep to its representative @@ -824,7 +824,7 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: timesteps_per_cluster=timesteps_per_cluster, ) - aggregation_result = AggregationResult( + aggregation_result = ClusterResult( timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), n_representatives=n_reduced_timesteps, representative_weights=timestep_weights.rename('representative_weights'), @@ -832,7 +832,7 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: original_data=ds, ) - reduced_fs._aggregation_info = AggregationInfo( + reduced_fs._cluster_info = ClusterInfo( result=aggregation_result, original_flow_system=self._fs, backend_name='tsam', @@ -942,7 +942,7 @@ def expand_solution(self) -> FlowSystem: from .flow_system import FlowSystem # Validate - if self._fs._aggregation_info is None: + if self._fs._cluster_info is None: raise ValueError( 'expand_solution() requires a FlowSystem created with cluster(). ' 'This FlowSystem has no aggregation info.' @@ -950,7 +950,7 @@ def expand_solution(self) -> FlowSystem: if self._fs.solution is None: raise ValueError('FlowSystem has no solution. Run optimize() or solve() first.') - info = self._fs._aggregation_info + info = self._fs._cluster_info cluster_structure = info.result.cluster_structure if cluster_structure is None: raise ValueError('No cluster structure available for expansion.') diff --git a/tests/test_aggregation/test_base.py b/tests/test_aggregation/test_base.py index 7930efcca..75d8b0f7f 100644 --- a/tests/test_aggregation/test_base.py +++ b/tests/test_aggregation/test_base.py @@ -5,8 +5,8 @@ import xarray as xr from flixopt.aggregation import ( - AggregationInfo, - AggregationResult, + ClusterInfo, + ClusterResult, ClusterStructure, create_cluster_structure_from_mapping, ) @@ -62,12 +62,12 @@ def test_get_cluster_weight_per_timestep(self): assert float(weights.isel(time=4).values) == 1.0 -class TestAggregationResult: - """Tests for AggregationResult dataclass.""" +class TestClusterResult: + """Tests for ClusterResult dataclass.""" def test_basic_creation(self): - """Test basic AggregationResult creation.""" - result = AggregationResult( + """Test basic ClusterResult creation.""" + result = ClusterResult( timestep_mapping=xr.DataArray([0, 0, 1, 1, 2, 2], dims=['original_time']), n_representatives=3, representative_weights=xr.DataArray([2, 2, 2], dims=['time']), @@ -77,8 +77,8 @@ def test_basic_creation(self): assert result.n_original_timesteps == 6 def test_creation_from_numpy(self): - """Test AggregationResult creation from numpy arrays.""" - result = AggregationResult( + """Test ClusterResult creation from numpy arrays.""" + result = ClusterResult( timestep_mapping=np.array([0, 1, 0, 1]), n_representatives=2, representative_weights=np.array([2.0, 2.0]), @@ -89,7 +89,7 @@ def test_creation_from_numpy(self): def test_validation_success(self): """Test validation passes for valid result.""" - result = AggregationResult( + result = ClusterResult( timestep_mapping=xr.DataArray([0, 1, 0, 1], dims=['original_time']), n_representatives=2, representative_weights=xr.DataArray([2.0, 2.0], dims=['time']), @@ -100,7 +100,7 @@ def test_validation_success(self): def test_validation_invalid_mapping(self): """Test validation fails for out-of-range mapping.""" - result = AggregationResult( + result = ClusterResult( timestep_mapping=xr.DataArray([0, 5, 0, 1], dims=['original_time']), # 5 is out of range n_representatives=2, representative_weights=xr.DataArray([2.0, 2.0], dims=['time']), @@ -111,7 +111,7 @@ def test_validation_invalid_mapping(self): def test_get_expansion_mapping(self): """Test get_expansion_mapping returns named DataArray.""" - result = AggregationResult( + result = ClusterResult( timestep_mapping=xr.DataArray([0, 1, 0], dims=['original_time']), n_representatives=2, representative_weights=xr.DataArray([2.0, 1.0], dims=['time']), @@ -139,18 +139,18 @@ def test_basic_creation(self): assert structure.n_original_periods == 3 -class TestAggregationInfo: - """Tests for AggregationInfo dataclass.""" +class TestClusterInfo: + """Tests for ClusterInfo dataclass.""" def test_creation(self): - """Test AggregationInfo creation.""" - result = AggregationResult( + """Test ClusterInfo creation.""" + result = ClusterResult( timestep_mapping=xr.DataArray([0, 1], dims=['original_time']), n_representatives=2, representative_weights=xr.DataArray([1.0, 1.0], dims=['time']), ) - info = AggregationInfo( + info = ClusterInfo( result=result, original_flow_system=None, # Would be FlowSystem in practice backend_name='tsam', diff --git a/tests/test_aggregation/test_integration.py b/tests/test_aggregation/test_integration.py index 664808b66..62e9fc8b4 100644 --- a/tests/test_aggregation/test_integration.py +++ b/tests/test_aggregation/test_integration.py @@ -137,9 +137,9 @@ def test_import_from_flixopt(self): """Test that aggregation module can be imported from flixopt.""" from flixopt import aggregation - assert hasattr(aggregation, 'AggregationResult') + assert hasattr(aggregation, 'ClusterResult') assert hasattr(aggregation, 'ClusterStructure') - assert hasattr(aggregation, 'AggregationInfo') + assert hasattr(aggregation, 'ClusterInfo') def test_plot_aggregation_available(self): """Test that plot_aggregation is available.""" diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index 23b1bf4c1..a6547131f 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -55,8 +55,8 @@ def test_cluster_creates_reduced_timesteps(timesteps_8_days): # Should have 2 * 24 = 48 timesteps instead of 192 assert len(fs_reduced.timesteps) == 48 - assert hasattr(fs_reduced, '_aggregation_info') - assert fs_reduced._aggregation_info.result.cluster_structure.n_clusters == 2 + assert hasattr(fs_reduced, '_cluster_info') + assert fs_reduced._cluster_info.result.cluster_structure.n_clusters == 2 def test_expand_solution_restores_full_timesteps(solver_fixture, timesteps_8_days): @@ -112,7 +112,7 @@ def test_expand_solution_maps_values_correctly(solver_fixture, timesteps_8_days) fs_reduced.optimize(solver_fixture) # Get cluster_order to know mapping - info = fs_reduced._aggregation_info + info = fs_reduced._cluster_info cluster_order = info.result.cluster_structure.cluster_order.values timesteps_per_cluster = info.result.cluster_structure.timesteps_per_cluster # 24 @@ -188,7 +188,7 @@ def test_expand_solution_statistics_match_clustered(solver_fixture, timesteps_8_ assert_allclose(reduced_flow_hours, expanded_flow_hours, rtol=1e-6) -def test_expand_solution_without_aggregation_info_raises(solver_fixture, timesteps_2_days): +def test_expand_solution_without_cluster_info_raises(solver_fixture, timesteps_2_days): """Test that expand_solution raises error if not a reduced FlowSystem.""" fs = create_simple_system(timesteps_2_days) fs.optimize(solver_fixture) @@ -272,7 +272,7 @@ def test_cluster_with_scenarios(timesteps_8_days, scenarios_2): assert len(fs_reduced.timesteps) == 48 # Should have aggregation info with cluster structure - info = fs_reduced._aggregation_info + info = fs_reduced._cluster_info assert info is not None assert info.result.cluster_structure is not None assert info.result.cluster_structure.n_clusters == 2 @@ -317,7 +317,7 @@ def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_ ) fs_reduced.optimize(solver_fixture) - info = fs_reduced._aggregation_info + info = fs_reduced._cluster_info cluster_order = info.result.cluster_structure.cluster_order.values timesteps_per_cluster = info.result.cluster_structure.timesteps_per_cluster # 24 From 6cb06b1ef63510aef68949c873b71312974e5f95 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 23:04:51 +0100 Subject: [PATCH 074/191] Made cluster_order and timestep_mapping multi-dimensional to store per-(period, scenario) cluster assignments --- flixopt/aggregation/base.py | 74 ++++++++++++- flixopt/aggregation/storage_linking.py | 82 +++++++++++++- flixopt/transform_accessor.py | 145 ++++++++++++++++++++----- tests/test_cluster_reduce_expand.py | 11 +- 4 files changed, 272 insertions(+), 40 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 3484a4832..66f13942a 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -37,10 +37,11 @@ class ClusterStructure: Attributes: cluster_order: Maps each original time chunk index to its cluster ID. - dims: [original_period] where original_period indexes the time chunks - (e.g., days) before clustering. Values are cluster indices (0 to n_clusters-1). + dims: [original_period] for simple case, or + [original_period, period, scenario] for multi-period/scenario systems. + Values are cluster indices (0 to n_clusters-1). cluster_occurrences: Count of how many original time chunks each cluster represents. - dims: [cluster] + dims: [cluster] for simple case, or [cluster, period, scenario] for multi-dim. n_clusters: Number of distinct clusters (typical periods). timesteps_per_cluster: Number of timesteps in each cluster (e.g., 24 for daily). @@ -50,6 +51,10 @@ class ClusterStructure: - cluster_occurrences: shape (8,), e.g., [45, 46, 46, 46, 46, 45, 45, 46] - n_clusters: 8 - timesteps_per_cluster: 24 (for hourly data) + + For multi-scenario (e.g., 2 scenarios): + - cluster_order: shape (365, 2) with dims [original_period, scenario] + - cluster_occurrences: shape (8, 2) with dims [cluster, scenario] """ cluster_order: xr.DataArray @@ -78,6 +83,47 @@ def n_original_periods(self) -> int: """Number of original periods (before clustering).""" return len(self.cluster_order.coords['original_period']) + @property + def has_multi_dims(self) -> bool: + """Check if cluster_order has period/scenario dimensions.""" + return 'period' in self.cluster_order.dims or 'scenario' in self.cluster_order.dims + + def get_cluster_order_for_slice(self, period: str | None = None, scenario: str | None = None) -> np.ndarray: + """Get cluster_order for a specific (period, scenario) combination. + + Args: + period: Period label (None if no period dimension). + scenario: Scenario label (None if no scenario dimension). + + Returns: + 1D numpy array of cluster indices for the specified slice. + """ + order = self.cluster_order + if 'period' in order.dims and period is not None: + order = order.sel(period=period) + if 'scenario' in order.dims and scenario is not None: + order = order.sel(scenario=scenario) + return order.values.astype(int) + + def get_cluster_occurrences_for_slice( + self, period: str | None = None, scenario: str | None = None + ) -> dict[int, int]: + """Get cluster occurrence counts for a specific (period, scenario) combination. + + Args: + period: Period label (None if no period dimension). + scenario: Scenario label (None if no scenario dimension). + + Returns: + Dict mapping cluster ID to occurrence count. + """ + occurrences = self.cluster_occurrences + if 'period' in occurrences.dims and period is not None: + occurrences = occurrences.sel(period=period) + if 'scenario' in occurrences.dims and scenario is not None: + occurrences = occurrences.sel(scenario=scenario) + return {int(c): int(occurrences.sel(cluster=c).values) for c in occurrences.coords['cluster'].values} + def get_cluster_weight_per_timestep(self) -> xr.DataArray: """Get weight for each representative timestep. @@ -118,11 +164,12 @@ class ClusterResult: Attributes: timestep_mapping: Maps each original timestep to its representative index. - dims: [original_time] + dims: [original_time] for simple case, or + [original_time, period, scenario] for multi-period/scenario systems. Values are indices into the representative timesteps (0 to n_representatives-1). n_representatives: Number of representative timesteps after aggregation. representative_weights: Weight for each representative timestep. - dims: [time] + dims: [time] or [time, period, scenario] Typically equals the number of original timesteps each representative covers. Used as cluster_weight in the FlowSystem. aggregated_data: Time series data aggregated to representative timesteps. @@ -178,6 +225,23 @@ def get_expansion_mapping(self) -> xr.DataArray: """ return self.timestep_mapping.rename('expansion_mapping') + def get_timestep_mapping_for_slice(self, period: str | None = None, scenario: str | None = None) -> np.ndarray: + """Get timestep_mapping for a specific (period, scenario) combination. + + Args: + period: Period label (None if no period dimension). + scenario: Scenario label (None if no scenario dimension). + + Returns: + 1D numpy array of representative timestep indices for the specified slice. + """ + mapping = self.timestep_mapping + if 'period' in mapping.dims and period is not None: + mapping = mapping.sel(period=period) + if 'scenario' in mapping.dims and scenario is not None: + mapping = mapping.sel(scenario=scenario) + return mapping.values.astype(int) + def validate(self) -> None: """Validate that all fields are consistent. diff --git a/flixopt/aggregation/storage_linking.py b/flixopt/aggregation/storage_linking.py index 5cf9d9453..681f5def8 100644 --- a/flixopt/aggregation/storage_linking.py +++ b/flixopt/aggregation/storage_linking.py @@ -70,14 +70,14 @@ def __init__( self.storage_cyclic = storage_cyclic # Extract commonly used values from cluster_structure - self._cluster_order = cluster_structure.cluster_order.values self._n_clusters = ( int(cluster_structure.n_clusters) if isinstance(cluster_structure.n_clusters, (int, np.integer)) else int(cluster_structure.n_clusters.values) ) self._timesteps_per_cluster = cluster_structure.timesteps_per_cluster - self._n_original_periods = len(self._cluster_order) + self._n_original_periods = cluster_structure.n_original_periods + self._has_multi_dims = cluster_structure.has_multi_dims def do_modeling(self): """Create SOC boundary variables and inter-period linking constraints. @@ -176,10 +176,18 @@ def _add_storage_linking(self, storage) -> None: # Create linking constraints: # SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - for d in range(self._n_original_periods): - c = int(self._cluster_order[d]) - lhs = soc_boundary.isel(period_boundary=d + 1) - soc_boundary.isel(period_boundary=d) - delta_soc_dict[c] - self.add_constraints(lhs == 0, short_name=f'link|{label}|{d}') + if self._has_multi_dims: + # Multi-dimensional cluster_order: create constraints per (period, scenario) slice + self._add_linking_constraints_multi_dim(storage, soc_boundary, delta_soc_dict, label) + else: + # Simple case: single cluster_order for all slices + cluster_order = self.cluster_structure.get_cluster_order_for_slice() + for d in range(self._n_original_periods): + c = int(cluster_order[d]) + lhs = ( + soc_boundary.isel(period_boundary=d + 1) - soc_boundary.isel(period_boundary=d) - delta_soc_dict[c] + ) + self.add_constraints(lhs == 0, short_name=f'link|{label}|{d}') # Cyclic constraint: SOC_boundary[0] = SOC_boundary[end] if self.storage_cyclic: @@ -187,3 +195,65 @@ def _add_storage_linking(self, storage) -> None: self.add_constraints(lhs == 0, short_name=f'cyclic|{label}') logger.debug(f'Added inter-cluster linking for storage {label}') + + def _add_linking_constraints_multi_dim( + self, + storage, + soc_boundary, + delta_soc_dict: dict, + label: str, + ) -> None: + """Add linking constraints when cluster_order has period/scenario dimensions. + + When different (period, scenario) slices have different cluster assignments, + we need to create constraints that select the correct delta_SOC for each slice. + + Args: + storage: Storage component being linked. + soc_boundary: SOC boundary variable with dims [period_boundary, period?, scenario?]. + delta_soc_dict: Dict mapping cluster ID to delta_SOC expression. + label: Storage label for constraint naming. + """ + # Determine which dimensions we're iterating over + periods = list(self.flow_system.periods) if self.flow_system.periods is not None else [None] + scenarios = list(self.flow_system.scenarios) if self.flow_system.scenarios is not None else [None] + has_periods = periods != [None] + has_scenarios = scenarios != [None] + + # For each (period, scenario) combination, create constraints using the slice's cluster_order + for p in periods: + for s in scenarios: + cluster_order = self.cluster_structure.get_cluster_order_for_slice(period=p, scenario=s) + + # Build selector for this slice + selector = {} + if has_periods and p is not None: + selector['period'] = p + if has_scenarios and s is not None: + selector['scenario'] = s + + # Select the slice of soc_boundary and delta_soc for this (period, scenario) + soc_boundary_slice = soc_boundary.sel(**selector) if selector else soc_boundary + + for d in range(self._n_original_periods): + c = int(cluster_order[d]) + delta_soc = delta_soc_dict[c] + if selector: + delta_soc = ( + delta_soc.sel(**selector) if any(dim in delta_soc.dims for dim in selector) else delta_soc + ) + + lhs = ( + soc_boundary_slice.isel(period_boundary=d + 1) + - soc_boundary_slice.isel(period_boundary=d) + - delta_soc + ) + + # Build constraint name with period/scenario info + slice_suffix = '' + if has_periods and p is not None: + slice_suffix += f'|p={p}' + if has_scenarios and s is not None: + slice_suffix += f'|s={s}' + + self.add_constraints(lhs == 0, short_name=f'link|{label}|{d}{slice_suffix}') diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 45b50484b..456635113 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -800,32 +800,75 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: # Build ClusterInfo for inter-cluster linking and solution expansion n_original_timesteps = len(self._fs.timesteps) - # Build timestep_mapping: maps each original timestep to its representative - timestep_mapping = np.zeros(n_original_timesteps, dtype=np.int32) - for period_idx, cluster_id in enumerate(cluster_orders[first_key]): - for pos in range(timesteps_per_cluster): - original_idx = period_idx * timesteps_per_cluster + pos - if original_idx < n_original_timesteps: - representative_idx = cluster_id * timesteps_per_cluster + pos - timestep_mapping[original_idx] = representative_idx - - # Build cluster_occurrences as DataArray - first_occurrences = cluster_occurrences_all[first_key] - cluster_occurrences_da = xr.DataArray( - [first_occurrences.get(c, 0) for c in range(actual_n_clusters)], - dims=['cluster'], - name='cluster_occurrences', - ) + # Build per-slice cluster_order and timestep_mapping as multi-dimensional DataArrays + # This is needed because each (period, scenario) combination may have different clustering + + def _build_timestep_mapping_for_key(key: tuple) -> np.ndarray: + """Build timestep_mapping for a single (period, scenario) slice.""" + mapping = np.zeros(n_original_timesteps, dtype=np.int32) + for period_idx, cluster_id in enumerate(cluster_orders[key]): + for pos in range(timesteps_per_cluster): + original_idx = period_idx * timesteps_per_cluster + pos + if original_idx < n_original_timesteps: + representative_idx = cluster_id * timesteps_per_cluster + pos + mapping[original_idx] = representative_idx + return mapping + + def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: + """Build cluster_occurrences array for a single (period, scenario) slice.""" + occurrences = cluster_occurrences_all[key] + return np.array([occurrences.get(c, 0) for c in range(actual_n_clusters)]) + + # Build multi-dimensional arrays + if has_periods or has_scenarios: + # Multi-dimensional case: build arrays for each (period, scenario) combination + # cluster_order: dims [original_period, period?, scenario?] + cluster_order_slices = {} + timestep_mapping_slices = {} + cluster_occurrences_slices = {} + + for p in periods: + for s in scenarios: + key = (p, s) + cluster_order_slices[key] = xr.DataArray( + cluster_orders[key], dims=['original_period'], name='cluster_order' + ) + timestep_mapping_slices[key] = xr.DataArray( + _build_timestep_mapping_for_key(key), dims=['original_time'], name='timestep_mapping' + ) + cluster_occurrences_slices[key] = xr.DataArray( + _build_cluster_occurrences_for_key(key), dims=['cluster'], name='cluster_occurrences' + ) + + # Combine slices into multi-dimensional DataArrays + cluster_order_da = self._combine_slices_to_dataarray_generic( + cluster_order_slices, ['original_period'], periods, scenarios, 'cluster_order' + ) + timestep_mapping_da = self._combine_slices_to_dataarray_generic( + timestep_mapping_slices, ['original_time'], periods, scenarios, 'timestep_mapping' + ) + cluster_occurrences_da = self._combine_slices_to_dataarray_generic( + cluster_occurrences_slices, ['cluster'], periods, scenarios, 'cluster_occurrences' + ) + else: + # Simple case: single (None, None) slice + cluster_order_da = xr.DataArray(cluster_orders[first_key], dims=['original_period'], name='cluster_order') + timestep_mapping_da = xr.DataArray( + _build_timestep_mapping_for_key(first_key), dims=['original_time'], name='timestep_mapping' + ) + cluster_occurrences_da = xr.DataArray( + _build_cluster_occurrences_for_key(first_key), dims=['cluster'], name='cluster_occurrences' + ) cluster_structure = ClusterStructure( - cluster_order=xr.DataArray(cluster_orders[first_key], dims=['original_period'], name='cluster_order'), + cluster_order=cluster_order_da, cluster_occurrences=cluster_occurrences_da, n_clusters=actual_n_clusters, timesteps_per_cluster=timesteps_per_cluster, ) aggregation_result = ClusterResult( - timestep_mapping=xr.DataArray(timestep_mapping, dims=['original_time'], name='timestep_mapping'), + timestep_mapping=timestep_mapping_da, n_representatives=n_reduced_timesteps, representative_weights=timestep_weights.rename('representative_weights'), cluster_structure=cluster_structure, @@ -888,6 +931,54 @@ def _combine_slices_to_dataarray( return result.assign_attrs(original_da.attrs) + @staticmethod + def _combine_slices_to_dataarray_generic( + slices: dict[tuple, xr.DataArray], + base_dims: list[str], + periods: list, + scenarios: list, + name: str, + ) -> xr.DataArray: + """Combine per-(period, scenario) slices into a multi-dimensional DataArray. + + Generic version that works with any base dimension (not just 'time'). + + Args: + slices: Dict mapping (period, scenario) tuples to DataArrays. + base_dims: Base dimensions of each slice (e.g., ['original_period'] or ['original_time']). + periods: List of period labels ([None] if no periods dimension). + scenarios: List of scenario labels ([None] if no scenarios dimension). + name: Name for the resulting DataArray. + + Returns: + DataArray with dimensions [base_dims..., period?, scenario?]. + """ + first_key = (periods[0], scenarios[0]) + has_periods = periods != [None] + has_scenarios = scenarios != [None] + + # Simple case: no period/scenario dimensions + if not has_periods and not has_scenarios: + return slices[first_key].rename(name) + + # Multi-dimensional: use xr.concat to stack along period/scenario dims + if has_periods and has_scenarios: + # Stack scenarios first, then periods + period_arrays = [] + for p in periods: + scenario_arrays = [slices[(p, s)] for s in scenarios] + period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) + result = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) + elif has_periods: + result = xr.concat([slices[(p, None)] for p in periods], dim=pd.Index(periods, name='period')) + else: + result = xr.concat([slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario')) + + # Put base dimension first (standard order) + result = result.transpose(base_dims[0], ...) + + return result.rename(name) + def expand_solution(self) -> FlowSystem: """Expand a reduced (clustered) FlowSystem back to full original timesteps. @@ -964,7 +1055,6 @@ def expand_solution(self) -> FlowSystem: ) has_periods = original_fs.periods is not None has_scenarios = original_fs.scenarios is not None - cluster_order = cluster_structure.cluster_order.values periods = list(original_fs.periods) if has_periods else [None] scenarios = list(original_fs.scenarios) if has_scenarios else [None] @@ -973,11 +1063,15 @@ def expand_solution(self) -> FlowSystem: n_original_timesteps = len(original_timesteps) n_reduced_timesteps = n_clusters * timesteps_per_cluster - # Build expansion mapping (same for all period/scenario combinations) - base_mapping = self._build_expansion_mapping(cluster_order, timesteps_per_cluster, n_original_timesteps) - - # Create mappings dict for all (period, scenario) combinations using the same mapping - mappings = {(p, s): base_mapping for p in periods for s in scenarios} + # Build expansion mapping per (period, scenario) combination + # Each slice may have a different cluster assignment + mappings = {} + for p in periods: + for s in scenarios: + cluster_order = cluster_structure.get_cluster_order_for_slice(period=p, scenario=s) + mappings[(p, s)] = self._build_expansion_mapping( + cluster_order, timesteps_per_cluster, n_original_timesteps + ) # Expand function for DataArrays def expand_da(da: xr.DataArray) -> xr.DataArray: @@ -1012,13 +1106,14 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: ) n_combinations = len(periods) * len(scenarios) + n_original_segments = cluster_structure.n_original_periods logger.info( f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' f'({n_clusters} clusters' + ( f', {n_combinations} period/scenario combinations)' if n_combinations > 1 - else f' → {len(cluster_order)} original segments)' + else f' → {n_original_segments} original segments)' ) ) diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index a6547131f..b7a41ff1a 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -318,19 +318,22 @@ def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_ fs_reduced.optimize(solver_fixture) info = fs_reduced._cluster_info - cluster_order = info.result.cluster_structure.cluster_order.values - timesteps_per_cluster = info.result.cluster_structure.timesteps_per_cluster # 24 + cluster_structure = info.result.cluster_structure + timesteps_per_cluster = cluster_structure.timesteps_per_cluster # 24 reduced_flow = fs_reduced.solution['Boiler(Q_th)|flow_rate'] fs_expanded = fs_reduced.transform.expand_solution() expanded_flow = fs_expanded.solution['Boiler(Q_th)|flow_rate'] - # Check mapping for each scenario (all use the same cluster_order in simplified implementation) + # Check mapping for each scenario using its own cluster_order for scenario in scenarios_2: + # Get the cluster_order for THIS scenario + cluster_order = cluster_structure.get_cluster_order_for_slice(scenario=scenario) + reduced_scenario = reduced_flow.sel(scenario=scenario).values expanded_scenario = expanded_flow.sel(scenario=scenario).values - # Verify mapping is correct for this scenario + # Verify mapping is correct for this scenario using its own cluster_order for orig_segment_idx, cluster_id in enumerate(cluster_order): orig_start = orig_segment_idx * timesteps_per_cluster orig_end = orig_start + timesteps_per_cluster From 3e55faa3883393986356fb16167f2249f7205077 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 23:10:07 +0100 Subject: [PATCH 075/191] fix --- flixopt/aggregation/storage_linking.py | 93 ++++++++++++++++---------- 1 file changed, 56 insertions(+), 37 deletions(-) diff --git a/flixopt/aggregation/storage_linking.py b/flixopt/aggregation/storage_linking.py index 681f5def8..66f948be9 100644 --- a/flixopt/aggregation/storage_linking.py +++ b/flixopt/aggregation/storage_linking.py @@ -130,32 +130,41 @@ def _add_storage_linking(self, storage) -> None: # Create SOC_boundary variables for each original period boundary # We need n_original_periods + 1 boundaries (start of first through end of last) n_boundaries = self._n_original_periods + 1 - boundary_coords = [np.arange(n_boundaries)] - boundary_dims = ['period_boundary'] - - # Build bounds - handle both scalar and multi-dimensional cap_value + boundary_coords = {'cluster_boundary': np.arange(n_boundaries)} + boundary_dims = ['cluster_boundary'] + + # Determine extra dimensions from FlowSystem (period, scenario) + # These are needed even if cap_value is scalar, because different periods/scenarios + # may have different cluster assignments + extra_dims = [] + if self.flow_system.periods is not None: + extra_dims.append('period') + boundary_coords['period'] = np.array(list(self.flow_system.periods)) + if self.flow_system.scenarios is not None: + extra_dims.append('scenario') + boundary_coords['scenario'] = np.array(list(self.flow_system.scenarios)) + + if extra_dims: + boundary_dims = ['cluster_boundary'] + extra_dims + + # Build bounds shape + lb_shape = [n_boundaries] + [len(boundary_coords[d]) for d in extra_dims] + lb = xr.DataArray(np.zeros(lb_shape), coords=boundary_coords, dims=boundary_dims) + + # Get upper bound from capacity if isinstance(cap_value, xr.DataArray) and cap_value.dims: - # cap_value has dimensions (e.g., period, scenario) - need to broadcast - extra_dims = list(cap_value.dims) - extra_coords = {dim: cap_value.coords[dim].values for dim in extra_dims} - - boundary_dims = ['period_boundary'] + extra_dims - boundary_coords = [np.arange(n_boundaries)] + [extra_coords[d] for d in extra_dims] - - lb_coords = {'period_boundary': np.arange(n_boundaries), **extra_coords} - lb_shape = [n_boundaries] + [len(extra_coords[d]) for d in extra_dims] - lb = xr.DataArray(np.zeros(lb_shape), coords=lb_coords, dims=boundary_dims) - - ub = cap_value.expand_dims({'period_boundary': n_boundaries}, axis=0) - ub = ub.assign_coords(period_boundary=np.arange(n_boundaries)) + # cap_value has dimensions - expand to include cluster_boundary + ub = cap_value.expand_dims({'cluster_boundary': n_boundaries}, axis=0) + ub = ub.assign_coords(cluster_boundary=np.arange(n_boundaries)) + # Ensure dims are in the right order + ub = ub.transpose('cluster_boundary', ...) else: - # Scalar cap_value + # Scalar cap_value - broadcast to all dims if hasattr(cap_value, 'item'): cap_value = float(cap_value.item()) else: cap_value = float(cap_value) - lb = xr.DataArray(0.0, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) - ub = xr.DataArray(cap_value, coords={'period_boundary': np.arange(n_boundaries)}, dims=['period_boundary']) + ub = xr.DataArray(np.full(lb_shape, cap_value), coords=boundary_coords, dims=boundary_dims) soc_boundary = self.add_variables( lower=lb, @@ -185,13 +194,15 @@ def _add_storage_linking(self, storage) -> None: for d in range(self._n_original_periods): c = int(cluster_order[d]) lhs = ( - soc_boundary.isel(period_boundary=d + 1) - soc_boundary.isel(period_boundary=d) - delta_soc_dict[c] + soc_boundary.isel(cluster_boundary=d + 1) + - soc_boundary.isel(cluster_boundary=d) + - delta_soc_dict[c] ) self.add_constraints(lhs == 0, short_name=f'link|{label}|{d}') # Cyclic constraint: SOC_boundary[0] = SOC_boundary[end] if self.storage_cyclic: - lhs = soc_boundary.isel(period_boundary=0) - soc_boundary.isel(period_boundary=self._n_original_periods) + lhs = soc_boundary.isel(cluster_boundary=0) - soc_boundary.isel(cluster_boundary=self._n_original_periods) self.add_constraints(lhs == 0, short_name=f'cyclic|{label}') logger.debug(f'Added inter-cluster linking for storage {label}') @@ -210,7 +221,7 @@ def _add_linking_constraints_multi_dim( Args: storage: Storage component being linked. - soc_boundary: SOC boundary variable with dims [period_boundary, period?, scenario?]. + soc_boundary: SOC boundary variable with dims [cluster_boundary, period?, scenario?]. delta_soc_dict: Dict mapping cluster ID to delta_SOC expression. label: Storage label for constraint naming. """ @@ -220,32 +231,40 @@ def _add_linking_constraints_multi_dim( has_periods = periods != [None] has_scenarios = scenarios != [None] + # Check which dimensions soc_boundary actually has + soc_dims = set(soc_boundary.dims) + # For each (period, scenario) combination, create constraints using the slice's cluster_order for p in periods: for s in scenarios: cluster_order = self.cluster_structure.get_cluster_order_for_slice(period=p, scenario=s) - # Build selector for this slice - selector = {} - if has_periods and p is not None: - selector['period'] = p - if has_scenarios and s is not None: - selector['scenario'] = s + # Build selector for this slice - only include dims that exist in soc_boundary + soc_selector = {} + if has_periods and p is not None and 'period' in soc_dims: + soc_selector['period'] = p + if has_scenarios and s is not None and 'scenario' in soc_dims: + soc_selector['scenario'] = s - # Select the slice of soc_boundary and delta_soc for this (period, scenario) - soc_boundary_slice = soc_boundary.sel(**selector) if selector else soc_boundary + # Select the slice of soc_boundary for this (period, scenario) + soc_boundary_slice = soc_boundary.sel(**soc_selector) if soc_selector else soc_boundary for d in range(self._n_original_periods): c = int(cluster_order[d]) delta_soc = delta_soc_dict[c] - if selector: - delta_soc = ( - delta_soc.sel(**selector) if any(dim in delta_soc.dims for dim in selector) else delta_soc - ) + + # Build selector for delta_soc - check which dims it has + delta_selector = {} + if has_periods and p is not None and 'period' in delta_soc.dims: + delta_selector['period'] = p + if has_scenarios and s is not None and 'scenario' in delta_soc.dims: + delta_selector['scenario'] = s + if delta_selector: + delta_soc = delta_soc.sel(**delta_selector) lhs = ( - soc_boundary_slice.isel(period_boundary=d + 1) - - soc_boundary_slice.isel(period_boundary=d) + soc_boundary_slice.isel(cluster_boundary=d + 1) + - soc_boundary_slice.isel(cluster_boundary=d) - delta_soc ) From d0f37cf7ff707cd0da9bdbac55cde43b0f940c4a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 23:15:34 +0100 Subject: [PATCH 076/191] Update notebooks --- docs/notebooks/08c-clustering.ipynb | 12 ++++++------ docs/notebooks/08d-clustering-multiperiod.ipynb | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 4d26363b1..14669dc49 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -5,7 +5,7 @@ "id": "0", "metadata": {}, "source": [ - "# Time Series Clustering with `aggregate()`\n", + "# Time Series Clustering with `cluster()`\n", "\n", "Accelerate investment optimization using typical periods (clustering).\n", "\n", @@ -131,9 +131,9 @@ "id": "7", "metadata": {}, "source": [ - "## Method 2: Clustering with `aggregate()`\n", + "## Method 2: Clustering with `cluster()`\n", "\n", - "The `aggregate()` method:\n", + "The `cluster()` method:\n", "\n", "1. **Clusters similar days** using the TSAM (Time Series Aggregation Module) package\n", "2. **Reduces timesteps** to only typical periods (e.g., 8 typical days = 768 timesteps)\n", @@ -233,7 +233,7 @@ "\n", "The recommended approach for investment optimization:\n", "\n", - "1. **Stage 1**: Fast sizing with `aggregate()` \n", + "1. **Stage 1**: Fast sizing with `cluster()` \n", "2. **Stage 2**: Fix sizes (with safety margin) and dispatch at full resolution\n", "\n", "!!! tip \"Safety Margin\"\n", @@ -436,12 +436,12 @@ "\n", "| Parameter | Type | Description |\n", "|-----------|------|-------------|\n", - "| `method` | `str` | Aggregation backend: 'tsam' (default) or 'manual' |\n", "| `n_clusters` | `int` | Number of typical periods (e.g., 8 typical days) |\n", "| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h') or hours |\n", "| `weights` | `dict[str, float]` | Optional weights for time series in clustering |\n", "| `time_series_for_high_peaks` | `list[str]` | **Essential**: Force inclusion of peak periods |\n", "| `time_series_for_low_peaks` | `list[str]` | Force inclusion of minimum periods |\n", + "| `storage_inter_cluster_linking` | `bool` | Add inter-cluster storage constraints (default: True) |\n", "| `storage_cyclic` | `bool` | Enforce SOC[end] = SOC[start] (default: True) |\n", "\n", "### Peak Forcing Format\n", @@ -479,7 +479,7 @@ "\n", "You learned how to:\n", "\n", - "- Use **`aggregate()`** to aggregate time series into typical periods\n", + "- Use **`cluster()`** to reduce time series into typical periods\n", "- Apply **peak forcing** to capture extreme demand days\n", "- Use **two-stage optimization** for fast yet accurate investment decisions\n", "- **Expand solutions** back to full resolution with `expand_solution()`\n", diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index 219fba33b..573e696d4 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -5,7 +5,7 @@ "id": "0", "metadata": {}, "source": [ - "# Multi-Period Clustering with `aggregate()`\n", + "# Multi-Period Clustering with `cluster()`\n", "\n", "Combine time series clustering with multi-period investment optimization.\n", "\n", @@ -13,7 +13,7 @@ "\n", "- **Multi-period modeling**: Optimize investments across multiple planning periods (years)\n", "- **Scenario analysis**: Handle demand uncertainty with weighted scenarios\n", - "- **Clustering per period**: Apply typical-period aggregation independently for each period/scenario\n", + "- **Clustering per period**: Apply typical-period clustering independently for each period/scenario\n", "- **Scalability**: Reduce computational complexity for long-horizon planning\n", "\n", "!!! note \"Requirements\"\n", @@ -165,9 +165,9 @@ "id": "9", "metadata": {}, "source": [ - "## Multi-Period Clustering with `aggregate()`\n", + "## Multi-Period Clustering with `cluster()`\n", "\n", - "When applied to a multi-period system, `aggregate()` clusters **each period/scenario combination independently**.\n", + "When applied to a multi-period system, `cluster()` clusters **each period/scenario combination independently**.\n", "This is because demand patterns and optimal operations may differ across:\n", "\n", "- **Periods**: Different years may have different characteristics\n", @@ -538,7 +538,7 @@ "\n", "- Load **multi-period systems** with periods and scenarios\n", "- Use **`transform.isel()`** to select time subsets\n", - "- Apply **`aggregate()`** to multi-dimensional FlowSystems\n", + "- Apply **`cluster()`** to multi-dimensional FlowSystems\n", "- Use the **two-stage workflow** for robust investment optimization\n", "- **Expand solutions** back to full resolution with `expand_solution()`\n", "\n", @@ -558,7 +558,7 @@ "# Select time subset (optional)\n", "fs = fs.transform.isel(time=slice(0, 168)) # First 168 timesteps\n", "\n", - "# Aggregate (applies per period/scenario)\n", + "# Cluster (applies per period/scenario)\n", "fs_clustered = fs.transform.cluster(\n", " n_clusters=10,\n", " cluster_duration='1D',\n", From d4bbcfe7fca6b7d8cfbadec652f2c2b37880ef26 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 23:16:09 +0100 Subject: [PATCH 077/191] simplified: Changes Made flixopt/transform_accessor.py: 1. expand_solution() - Removed the loop that rebuilt mappings. Now uses info.result.timestep_mapping directly (8 lines removed) 2. _expand_dataarray() - Changed signature from mappings: dict[tuple, np.ndarray] to timestep_mapping: xr.DataArray. Now uses .sel() to get per-slice mappings from the stored DataArray. 3. Removed _build_expansion_mapping() - No longer needed since we use the stored mapping (22 lines removed) What Was Kept - _combine_slices_to_dataarray_generic() - Serves a different purpose (metadata arrays like cluster_order with non-time dimensions) - _add_linking_constraints_multi_dim() - The separation provides clear structure --- flixopt/transform_accessor.py | 59 ++++++++++------------------------- 1 file changed, 17 insertions(+), 42 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 456635113..76806705d 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1063,21 +1063,14 @@ def expand_solution(self) -> FlowSystem: n_original_timesteps = len(original_timesteps) n_reduced_timesteps = n_clusters * timesteps_per_cluster - # Build expansion mapping per (period, scenario) combination - # Each slice may have a different cluster assignment - mappings = {} - for p in periods: - for s in scenarios: - cluster_order = cluster_structure.get_cluster_order_for_slice(period=p, scenario=s) - mappings[(p, s)] = self._build_expansion_mapping( - cluster_order, timesteps_per_cluster, n_original_timesteps - ) + # Use stored timestep_mapping directly (already multi-dimensional) + timestep_mapping = info.result.timestep_mapping # Expand function for DataArrays def expand_da(da: xr.DataArray) -> xr.DataArray: if 'time' not in da.dims: return da.copy() - return self._expand_dataarray(da, mappings, original_timesteps, periods, scenarios) + return self._expand_dataarray(da, timestep_mapping, original_timesteps, periods, scenarios) # 1. Expand FlowSystem data (with cluster_weight set to 1.0 for all timesteps) reduced_ds = self._fs.to_dataset(include_solution=False) @@ -1119,33 +1112,10 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: return expanded_fs - @staticmethod - def _build_expansion_mapping( - cluster_order: np.ndarray, timesteps_per_cluster: int, n_original_timesteps: int - ) -> np.ndarray: - """Build mapping from original timesteps to reduced (typical) timesteps. - - Args: - cluster_order: Array mapping each original segment to its cluster ID. - timesteps_per_cluster: Number of timesteps per cluster. - n_original_timesteps: Total number of original timesteps. - - Returns: - Array where mapping[i] gives the reduced timestep index for original timestep i. - """ - n_reduced = len(set(cluster_order)) * timesteps_per_cluster - segment_indices = np.arange(n_original_timesteps) // timesteps_per_cluster - pos_in_segment = np.arange(n_original_timesteps) % timesteps_per_cluster - # Handle edge case where segment_indices exceed cluster_order length - safe_segment_indices = np.minimum(segment_indices, len(cluster_order) - 1) - cluster_ids = cluster_order[safe_segment_indices] - mapping = cluster_ids * timesteps_per_cluster + pos_in_segment - return np.minimum(mapping, n_reduced - 1).astype(np.int32) - @staticmethod def _expand_dataarray( da: xr.DataArray, - mappings: dict[tuple, np.ndarray], + timestep_mapping: xr.DataArray, original_timesteps: pd.DatetimeIndex, periods: list, scenarios: list, @@ -1154,7 +1124,8 @@ def _expand_dataarray( Args: da: DataArray with reduced time dimension. - mappings: Dict mapping (period, scenario) tuples to expansion index arrays. + timestep_mapping: DataArray mapping original timesteps to reduced indices. + dims: [original_time] or [original_time, period?, scenario?] original_timesteps: Original time coordinates. periods: List of period labels ([None] if no periods). scenarios: List of scenario labels ([None] if no scenarios). @@ -1162,13 +1133,12 @@ def _expand_dataarray( Returns: DataArray with expanded time dimension. """ - first_key = (periods[0], scenarios[0]) has_periods = periods != [None] has_scenarios = scenarios != [None] # Simple case: no period/scenario dimensions in the data if (not has_periods and not has_scenarios) or ('period' not in da.dims and 'scenario' not in da.dims): - mapping = mappings[first_key] + mapping = timestep_mapping.values expanded = da.isel(time=xr.DataArray(mapping, dims=['time'])) return expanded.assign_coords(time=original_timesteps).assign_attrs(da.attrs) @@ -1176,10 +1146,15 @@ def _expand_dataarray( expanded_slices: dict[tuple, xr.DataArray] = {} for p in periods: for s in scenarios: - key = (p, s) - mapping = mappings[key] - - # Select the slice for this (period, scenario) combination + # Get mapping for this (period, scenario) slice + mapping_slice = timestep_mapping + if p is not None and 'period' in timestep_mapping.dims: + mapping_slice = mapping_slice.sel(period=p) + if s is not None and 'scenario' in timestep_mapping.dims: + mapping_slice = mapping_slice.sel(scenario=s) + mapping = mapping_slice.values + + # Select the data slice for this (period, scenario) combination selector = {} if p is not None and 'period' in da.dims: selector['period'] = p @@ -1188,7 +1163,7 @@ def _expand_dataarray( slice_da = da.sel(**selector, drop=True) if selector else da expanded = slice_da.isel(time=xr.DataArray(mapping, dims=['time'])) - expanded_slices[key] = expanded.assign_coords(time=original_timesteps) + expanded_slices[(p, s)] = expanded.assign_coords(time=original_timesteps) # Recombine slices using _combine_slices_to_dataarray return TransformAccessor._combine_slices_to_dataarray( From 83a4603e909144d48db02938ea99a4ca533c84de Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 18 Dec 2025 23:23:52 +0100 Subject: [PATCH 078/191] Improve notebook --- docs/notebooks/08e-clustering-internals.ipynb | 715 ++++++++---------- 1 file changed, 330 insertions(+), 385 deletions(-) diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index 58d58a394..157b54f5a 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -5,17 +5,18 @@ "id": "0", "metadata": {}, "source": [ - "# Clustering Internals: Weights, TSAM, and Cost Scaling\n", + "# Clustering Internals: Architecture and Data Structures\n", "\n", "A deep dive into how time series clustering works under the hood.\n", "\n", "This notebook covers:\n", "\n", - "- **Cluster weights**: How operational costs are scaled to represent the full time horizon\n", - "- **TSAM integration**: How the Time Series Aggregation Module performs clustering\n", - "- **Typical periods**: Visualizing representative vs original time series\n", - "- **Storage handling**: Inter-period linking and cyclic constraints\n", - "- **The `_cluster_info` structure**: Internal data for expansion and analysis\n", + "- **Module overview**: The `flixopt.aggregation` module and its classes\n", + "- **Data flow**: From `cluster()` through optimization to `expand_solution()`\n", + "- **Core classes**: `ClusterStructure`, `ClusterResult`, `ClusterInfo`\n", + "- **Cluster weights**: How operational costs are scaled correctly\n", + "- **Storage linking**: Inter-cluster constraints for realistic storage behavior\n", + "- **Multi-dimensional support**: Handling periods and scenarios\n", "\n", "!!! note \"Prerequisites\"\n", " This notebook assumes familiarity with [08c-clustering](08c-clustering.ipynb)." @@ -60,104 +61,209 @@ "print(f'Loaded: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) / 96:.0f} days)')" ] }, + { + "cell_type": "markdown", + "id": "3", + "metadata": {}, + "source": [ + "## 1. Module Architecture Overview\n", + "\n", + "The clustering functionality lives in `flixopt.aggregation` with this structure:\n", + "\n", + "```\n", + "flixopt.aggregation/\n", + "├── base.py # Core dataclasses: ClusterStructure, ClusterResult, ClusterInfo\n", + "├── storage_linking.py # InterClusterLinking for storage constraints\n", + "└── __init__.py # Public exports\n", + "```\n", + "\n", + "### Key Classes\n", + "\n", + "| Class | Purpose |\n", + "|-------|--------|\n", + "| `ClusterStructure` | Hierarchical structure: which original periods map to which clusters |\n", + "| `ClusterResult` | Universal container: timestep mapping, weights, aggregated data |\n", + "| `ClusterInfo` | Stored on FlowSystem after clustering; enables `expand_solution()` |\n", + "| `InterClusterLinking` | Adds storage SOC constraints across the original time horizon |" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "3", + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "# Import the aggregation module to explore its contents\n", + "from flixopt import aggregation\n", + "\n", + "print('Available in flixopt.aggregation:')\n", + "print([name for name in dir(aggregation) if not name.startswith('_')])" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## 2. Data Flow: From `cluster()` to `expand_solution()`\n", + "\n", + "```\n", + "┌─────────────────────────────────────────────────────────────────┐\n", + "│ flow_system.transform.cluster(n_clusters=8, ...) │\n", + "└─────────────────────────────────────────────────────────────────┘\n", + " │\n", + " ▼\n", + "┌─────────────────────────────────────────────────────────────────┐\n", + "│ 1. Extract time series data from FlowSystem │\n", + "│ 2. Call tsam for clustering │\n", + "│ 3. Build ClusterStructure (cluster_order, occurrences) │\n", + "│ 4. Build ClusterResult (timestep_mapping, weights) │\n", + "│ 5. Create reduced FlowSystem with representative timesteps │\n", + "│ 6. Store ClusterInfo on reduced_fs._cluster_info │\n", + "└─────────────────────────────────────────────────────────────────┘\n", + " │\n", + " ▼\n", + "┌─────────────────────────────────────────────────────────────────┐\n", + "│ reduced_fs.optimize(solver) │\n", + "│ └─ InterClusterLinking adds storage constraints if enabled │\n", + "└─────────────────────────────────────────────────────────────────┘\n", + " │\n", + " ▼\n", + "┌─────────────────────────────────────────────────────────────────┐\n", + "│ reduced_fs.transform.expand_solution() │\n", + "│ └─ Uses stored timestep_mapping to expand back to full time │\n", + "└─────────────────────────────────────────────────────────────────┘\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6", "metadata": {}, "outputs": [], "source": [ - "# Create a clustered system for analysis\n", + "# Create a clustered system\n", "fs_clustered = flow_system.transform.cluster(\n", " n_clusters=8,\n", " cluster_duration='1D',\n", " time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'],\n", ")\n", "\n", - "print(f'Clustered: {len(fs_clustered.timesteps)} timesteps')" + "print(f'Original timesteps: {len(flow_system.timesteps)}')\n", + "print(f'Clustered timesteps: {len(fs_clustered.timesteps)}')\n", + "print(f'Reduction: {len(flow_system.timesteps) / len(fs_clustered.timesteps):.1f}x')" ] }, { "cell_type": "markdown", - "id": "4", + "id": "7", "metadata": {}, "source": [ - "## 1. The `_cluster_info` Structure\n", + "## 3. The `ClusterInfo` Structure\n", "\n", - "After clustering, the FlowSystem stores metadata in `_cluster_info` that enables:\n", + "After clustering, metadata is stored in `fs._cluster_info`. This enables:\n", "- Expanding solutions back to full resolution\n", "- Understanding which original days map to which clusters\n", - "- Weighting costs correctly in the objective function" + "- Correct weighting in the objective function" ] }, { "cell_type": "code", "execution_count": null, - "id": "5", + "id": "8", "metadata": {}, "outputs": [], "source": [ "info = fs_clustered._cluster_info\n", "\n", - "print('ClusterInfo structure:')\n", - "print(f' backend_name: {info.backend_name}')\n", + "print('ClusterInfo attributes:')\n", + "print(f' backend_name: {info.backend_name}')\n", "print(f' storage_inter_cluster_linking: {info.storage_inter_cluster_linking}')\n", - "print(f' storage_cyclic: {info.storage_cyclic}')\n", - "\n", - "cs = info.result.cluster_structure\n", - "print('\\nClusterStructure:')\n", - "print(f' n_clusters: {cs.n_clusters}')\n", - "print(f' timesteps_per_cluster: {cs.timesteps_per_cluster}')\n", - "print(f' cluster_order shape: {cs.cluster_order.shape}')\n", - "print(f' cluster_occurrences: {dict(cs.cluster_occurrences)}')" + "print(f' storage_cyclic: {info.storage_cyclic}')\n", + "print(f' original_flow_system: {type(info.original_flow_system).__name__}')\n", + "print(f' result: {type(info.result).__name__}')" ] }, { "cell_type": "markdown", - "id": "6", + "id": "9", "metadata": {}, "source": [ - "### Cluster Order: Mapping Days to Clusters\n", + "## 4. The `ClusterStructure`: Hierarchical Mapping\n", + "\n", + "The `ClusterStructure` captures which original periods (days) belong to which clusters:\n", "\n", - "The `cluster_order` array shows which cluster each original day belongs to:" + "- **`cluster_order`**: Array mapping each original period index to its cluster ID\n", + "- **`cluster_occurrences`**: How many original periods each cluster represents\n", + "- **`n_clusters`**: Number of representative clusters\n", + "- **`timesteps_per_cluster`**: Timesteps in each cluster (e.g., 96 for daily with 15-min resolution)" ] }, { "cell_type": "code", "execution_count": null, - "id": "7", + "id": "10", "metadata": {}, "outputs": [], "source": [ - "info = fs_clustered._cluster_info\n", "cs = info.result.cluster_structure\n", + "\n", + "print('ClusterStructure:')\n", + "print(f' n_clusters: {cs.n_clusters}')\n", + "print(f' timesteps_per_cluster: {cs.timesteps_per_cluster}')\n", + "print(f' n_original_periods: {cs.n_original_periods}')\n", + "print(f' cluster_order dims: {cs.cluster_order.dims}')\n", + "print(f' cluster_order shape: {cs.cluster_order.shape}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "11", + "metadata": {}, + "outputs": [], + "source": [ + "# cluster_order shows which cluster each original day belongs to\n", "cluster_order = cs.cluster_order.values\n", - "n_original_days = len(cluster_order)\n", "\n", - "# Create a DataFrame for visualization\n", - "days_df = pd.DataFrame(\n", - " {\n", - " 'Day': range(1, n_original_days + 1),\n", - " 'Cluster': cluster_order,\n", - " 'Date': pd.date_range('2020-01-01', periods=n_original_days, freq='D'),\n", - " }\n", - ")\n", - "days_df['Weekday'] = days_df['Date'].dt.day_name()\n", + "print('Cluster assignments (first 14 days):')\n", + "for day in range(min(14, len(cluster_order))):\n", + " print(f' Day {day + 1:2d} → Cluster {cluster_order[day]}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# cluster_occurrences shows how many original days each cluster represents\n", + "print('Cluster occurrences (days per cluster):')\n", + "for cluster_id in range(cs.n_clusters):\n", + " count = int(cs.cluster_occurrences.sel(cluster=cluster_id).values)\n", + " print(f' Cluster {cluster_id}: {count} day(s)')\n", "\n", - "print(f'Original days: {n_original_days}')\n", - "print(f'Number of clusters: {cs.n_clusters}')\n", - "print('\\nFirst 14 days:')\n", - "print(days_df.head(14).to_string(index=False))" + "print(f'\\nTotal: {int(cs.cluster_occurrences.sum().values)} days')" ] }, { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "13", "metadata": {}, "outputs": [], "source": [ - "# Visualize cluster assignment as a heatmap\n", + "# Visualize cluster assignment\n", + "days_df = pd.DataFrame(\n", + " {\n", + " 'Day': range(1, cs.n_original_periods + 1),\n", + " 'Cluster': cluster_order,\n", + " }\n", + ")\n", + "\n", "fig = px.bar(\n", " days_df,\n", " x='Day',\n", @@ -165,7 +271,6 @@ " color='Cluster',\n", " color_continuous_scale='Viridis',\n", " title='Cluster Assignment by Day',\n", - " labels={'y': ''},\n", ")\n", "fig.update_layout(height=250, yaxis_visible=False, coloraxis_colorbar_title='Cluster')\n", "fig.show()" @@ -173,65 +278,97 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "14", "metadata": {}, "source": [ - "## 2. Cluster Weights: Scaling Operational Costs\n", + "## 5. The `ClusterResult`: Timestep Mapping and Weights\n", "\n", - "When we optimize over 8 typical days instead of 31, the operational costs for each typical day\n", - "must be **scaled** to represent all the days it represents.\n", + "The `ClusterResult` contains:\n", "\n", - "### The `cluster_weight` Property\n", - "\n", - "The clustered FlowSystem has a `cluster_weight` that stores the weight for each timestep:" + "- **`timestep_mapping`**: Maps each original timestep to its representative timestep index\n", + "- **`representative_weights`**: Weight for each representative timestep (used as `cluster_weight`)\n", + "- **`cluster_structure`**: Reference to the hierarchical structure\n", + "- **`original_data`**: The time series data used for clustering" ] }, { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "15", "metadata": {}, "outputs": [], "source": [ - "# The cluster_weight is stored on the FlowSystem\n", - "print('cluster_weight structure:')\n", - "print(fs_clustered.cluster_weight)\n", - "print(f'\\nShape: {fs_clustered.cluster_weight.shape}')\n", - "print(f'Sum of weights: {fs_clustered.cluster_weight.sum().item():.0f}')\n", - "print(f'Expected (original timesteps): {len(flow_system.timesteps)}')" + "result = info.result\n", + "\n", + "print('ClusterResult:')\n", + "print(f' n_representatives: {result.n_representatives}')\n", + "print(f' timestep_mapping dims: {result.timestep_mapping.dims}')\n", + "print(f' timestep_mapping shape: {result.timestep_mapping.shape}')\n", + "print(f' representative_weights: {result.representative_weights.shape}')" ] }, { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "16", "metadata": {}, "outputs": [], "source": [ - "# Cluster occurrences (how many original days each cluster represents)\n", - "info = fs_clustered._cluster_info\n", - "cs = info.result.cluster_structure\n", - "cluster_occurrences = dict(cs.cluster_occurrences)\n", + "# The timestep_mapping shows which representative timestep each original timestep maps to\n", + "mapping = result.timestep_mapping.values\n", + "\n", + "print('Timestep mapping (first 10 original timesteps):')\n", + "for t in range(10):\n", + " print(f' Original t={t} → Representative t={mapping[t]}')\n", + "\n", + "print(f'\\n... (total {len(mapping)} mappings)')" + ] + }, + { + "cell_type": "markdown", + "id": "17", + "metadata": {}, + "source": [ + "## 6. Cluster Weights: Scaling Operational Costs\n", + "\n", + "When optimizing over typical periods, operational costs must be **scaled** to represent the full time horizon.\n", + "\n", + "### The Weight Formula\n", + "\n", + "$$\\text{Objective} = \\sum_{t \\in \\text{typical}} w_t \\cdot c_t$$\n", + "\n", + "Where:\n", + "- $w_t$ = cluster weight for timestep $t$ (number of original days this cluster represents)\n", + "- $c_t$ = operational cost at timestep $t$\n", "\n", - "print('Cluster occurrences (days represented by each typical day):')\n", - "for cluster_id, count in sorted(cluster_occurrences.items()):\n", - " print(f' Cluster {cluster_id}: {count} days (weight = {count})')\n", + "### Weight Conservation\n", "\n", - "print(f'\\nTotal: {sum(cluster_occurrences.values())} days')" + "$$\\sum_{t \\in \\text{typical}} w_t = |\\text{original timesteps}|$$" ] }, { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "18", "metadata": {}, "outputs": [], "source": [ - "# Visualize weights across the reduced timesteps\n", - "info = fs_clustered._cluster_info\n", - "cs = info.result.cluster_structure\n", + "# The cluster_weight is stored on the FlowSystem\n", + "print('cluster_weight on FlowSystem:')\n", + "print(f' Shape: {fs_clustered.cluster_weight.shape}')\n", + "print(f' Sum: {fs_clustered.cluster_weight.sum().item():.0f}')\n", + "print(f' Expected (original timesteps): {len(flow_system.timesteps)}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "19", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize weights across timesteps\n", "weights = fs_clustered.cluster_weight.values\n", - "timesteps_per_day = cs.timesteps_per_cluster\n", "\n", "fig = go.Figure()\n", "fig.add_trace(\n", @@ -240,17 +377,16 @@ " y=weights,\n", " mode='lines',\n", " name='Cluster Weight',\n", - " line=dict(width=1),\n", " )\n", ")\n", "\n", - "# Add vertical lines at day boundaries\n", + "# Add vertical lines at cluster boundaries\n", "for i in range(1, cs.n_clusters):\n", - " fig.add_vline(x=i * timesteps_per_day, line_dash='dash', line_color='gray', opacity=0.5)\n", + " fig.add_vline(x=i * cs.timesteps_per_cluster, line_dash='dash', line_color='gray', opacity=0.5)\n", "\n", "fig.update_layout(\n", " height=300,\n", - " title='Cluster Weight per Timestep (Each Typical Day Has Uniform Weight)',\n", + " title='Cluster Weight per Timestep',\n", " xaxis_title='Timestep Index',\n", " yaxis_title='Weight',\n", ")\n", @@ -259,119 +395,152 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "20", "metadata": {}, "source": [ - "### How Weights Affect the Objective Function\n", + "## 7. Storage Inter-Cluster Linking\n", "\n", - "The objective function multiplies operational costs by the cluster weight:\n", + "Storage behavior requires special handling in clustering. The `InterClusterLinking` class:\n", "\n", - "$$\\text{Objective} = \\sum_{t \\in \\text{typical}} w_t \\cdot c_t$$\n", + "1. Creates **SOC_boundary** variables for each original period boundary\n", + "2. Computes **delta_SOC** for each representative period (change in SOC)\n", + "3. Links them: `SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]]`\n", + "4. Optionally enforces cyclic constraint: `SOC_boundary[0] = SOC_boundary[end]`\n", "\n", - "Where:\n", - "- $w_t$ = cluster weight for timestep $t$ (= number of original days this cluster represents)\n", - "- $c_t$ = operational cost at timestep $t$\n", - "\n", - "This ensures that a typical day representing 7 similar days contributes 7× more to the objective\n", - "than a typical day representing only 1 day (e.g., a peak day)." + "This tracks storage state across the **full original time horizon** while only solving for representative periods." ] }, { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "21", + "metadata": {}, + "outputs": [], + "source": [ + "print('Storage settings in ClusterInfo:')\n", + "print(f' storage_inter_cluster_linking: {info.storage_inter_cluster_linking}')\n", + "print(f' storage_cyclic: {info.storage_cyclic}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "22", "metadata": {}, "outputs": [], "source": [ - "# Demonstrate how weights are applied (conceptually)\n", + "# Optimize and examine storage behavior\n", "solver = fx.solvers.HighsSolver(mip_gap=0.01, log_to_console=False)\n", "fs_clustered.optimize(solver)\n", "\n", - "# The 'costs' solution is already weighted\n", - "total_cost = fs_clustered.solution['costs'].item()\n", - "\n", - "# We can also access the per-timestep costs\n", - "costs_per_timestep = fs_clustered.solution['costs(temporal)|per_timestep']\n", - "\n", - "print(f'Total cost (weighted): {total_cost:,.0f} €')\n", - "print(f'\\nCosts per timestep shape: {costs_per_timestep.shape}')\n", - "print(f'Sum of weighted costs: {(costs_per_timestep * fs_clustered.cluster_weight).sum().item():,.0f} €')" + "# Check storage charge state\n", + "if 'Storage|charge_state' in fs_clustered.solution:\n", + " charge_state = fs_clustered.solution['Storage|charge_state']\n", + " print(f'Charge state shape: {charge_state.shape}')\n", + " print(f'Initial charge: {charge_state.values[0]:.1f} MWh')\n", + " print(f'Final charge: {charge_state.values[-1]:.1f} MWh')\n", + "else:\n", + " print('No storage in this system')" ] }, { "cell_type": "markdown", - "id": "15", + "id": "23", "metadata": {}, "source": [ - "## 3. TSAM Integration: The Clustering Algorithm\n", + "## 8. Multi-Dimensional Support (Periods/Scenarios)\n", "\n", - "flixopt uses the [TSAM](https://github.com/FZJ-IEK3-VSA/tsam) (Time Series Aggregation Module) \n", - "package for clustering. TSAM uses k-means clustering to group similar time periods.\n", + "When a FlowSystem has multiple **periods** (e.g., investment years) or **scenarios**, each (period, scenario) combination may have **different cluster assignments**.\n", "\n", - "### The ClusterResult Object" + "The data structures support this with multi-dimensional arrays:\n", + "\n", + "```python\n", + "# Simple case (no periods/scenarios)\n", + "cluster_order.dims = ['original_period']\n", + "timestep_mapping.dims = ['original_time']\n", + "\n", + "# Multi-scenario case\n", + "cluster_order.dims = ['original_period', 'scenario']\n", + "timestep_mapping.dims = ['original_time', 'scenario']\n", + "```\n", + "\n", + "Helper methods extract per-slice data:\n", + "```python\n", + "cluster_structure.get_cluster_order_for_slice(period='2025', scenario='high')\n", + "cluster_result.get_timestep_mapping_for_slice(scenario='base')\n", + "```" ] }, { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "24", "metadata": {}, "outputs": [], "source": [ - "# Access the ClusterResult which contains the TSAM clustering data\n", - "info = fs_clustered._cluster_info\n", - "result = info.result\n", + "# Check if our system has multi-dimensional clustering\n", + "print('Multi-dimensional check:')\n", + "print(f' cluster_order dims: {cs.cluster_order.dims}')\n", + "print(f' has_multi_dims: {cs.has_multi_dims}')\n", + "\n", + "# Get cluster order (works for both simple and multi-dim cases)\n", + "cluster_order = cs.get_cluster_order_for_slice()\n", + "print(f' cluster_order shape: {cluster_order.shape}')" + ] + }, + { + "cell_type": "markdown", + "id": "25", + "metadata": {}, + "source": [ + "## 9. Expanding Solutions Back to Full Resolution\n", "\n", - "print(f'ClusterResult type: {type(result).__name__}')\n", - "print(f'Timestep mapping shape: {result.timestep_mapping.shape}')\n", - "print(f'Representative weights shape: {result.representative_weights.shape}')" + "After optimization, `expand_solution()` uses the stored `timestep_mapping` to map typical period results back to the original time horizon:" ] }, { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "26", "metadata": {}, "outputs": [], "source": [ - "# The ClusterResult contains aggregated data\n", - "result = fs_clustered._cluster_info.result\n", - "\n", - "print('Aggregated data variables:')\n", - "if result.aggregated_data is not None:\n", - " for var_name in result.aggregated_data.data_vars:\n", - " shape = result.aggregated_data[var_name].shape\n", - " print(f' {var_name}: {shape}')" + "# Expand the solution\n", + "fs_expanded = fs_clustered.transform.expand_solution()\n", + "\n", + "print(f'Clustered timesteps: {len(fs_clustered.timesteps)}')\n", + "print(f'Expanded timesteps: {len(fs_expanded.timesteps)}')\n", + "print(f'Original timesteps: {len(flow_system.timesteps)}')" ] }, { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "27", "metadata": {}, "outputs": [], "source": [ - "# Show structure of original data (used for clustering)\n", - "result = fs_clustered._cluster_info.result\n", + "# The expanded solution has full time resolution\n", + "if 'Boiler(Q_th)|flow_rate' in fs_expanded.solution:\n", + " flow_clustered = fs_clustered.solution['Boiler(Q_th)|flow_rate']\n", + " flow_expanded = fs_expanded.solution['Boiler(Q_th)|flow_rate']\n", "\n", - "print('Original data used for clustering:')\n", - "if result.original_data is not None:\n", - " print(f'Shape: {dict(result.original_data.dims)}')\n", - " print(f'Variables: {list(result.original_data.data_vars)[:5]}...') # Show first 5" + " print(f'Clustered flow shape: {flow_clustered.shape}')\n", + " print(f'Expanded flow shape: {flow_expanded.shape}')" ] }, { "cell_type": "markdown", - "id": "19", + "id": "28", "metadata": {}, "source": [ - "### Visualizing Typical Periods vs Original Data" + "## 10. Visualizing Typical vs Original Data" ] }, { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -379,53 +548,42 @@ "original_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", "clustered_demand = fs_clustered.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", "\n", - "# Get cluster structure info\n", - "info = fs_clustered._cluster_info\n", - "cs = info.result.cluster_structure\n", - "cluster_occurrences = dict(cs.cluster_occurrences)\n", - "\n", - "# Reshape original demand into days\n", - "timesteps_per_day = 96 # 15-minute resolution\n", + "# Reshape into days\n", + "timesteps_per_day = cs.timesteps_per_cluster\n", "n_days = len(original_demand) // timesteps_per_day\n", "original_by_day = original_demand[: n_days * timesteps_per_day].reshape(n_days, timesteps_per_day)\n", + "clustered_by_day = clustered_demand.reshape(cs.n_clusters, timesteps_per_day)\n", "\n", "# Create subplots\n", "fig = make_subplots(\n", " rows=2,\n", " cols=1,\n", - " subplot_titles=['Original: All 31 Days', f'Clustered: {cs.n_clusters} Typical Days'],\n", + " subplot_titles=[f'Original: All {n_days} Days', f'Clustered: {cs.n_clusters} Typical Days'],\n", " vertical_spacing=0.15,\n", ")\n", "\n", - "# Plot all original days (faded)\n", "hours = np.arange(timesteps_per_day) / 4 # Convert to hours\n", + "\n", + "# Plot all original days (faded)\n", "for day in range(n_days):\n", " fig.add_trace(\n", " go.Scatter(\n", - " x=hours,\n", - " y=original_by_day[day],\n", - " mode='lines',\n", - " line=dict(width=0.5, color='lightblue'),\n", - " showlegend=False,\n", - " hoverinfo='skip',\n", + " x=hours, y=original_by_day[day], mode='lines', line=dict(width=0.5, color='lightblue'), showlegend=False\n", " ),\n", " row=1,\n", " col=1,\n", " )\n", "\n", - "# Plot typical days (bold colors)\n", + "# Plot typical days\n", "colors = px.colors.qualitative.Set1\n", - "n_clusters = cs.n_clusters\n", - "clustered_by_day = clustered_demand.reshape(n_clusters, timesteps_per_day)\n", - "\n", - "for cluster_id in range(n_clusters):\n", - " weight = cluster_occurrences.get(cluster_id, cluster_occurrences.get(np.int32(cluster_id), 1))\n", + "for cluster_id in range(cs.n_clusters):\n", + " weight = int(cs.cluster_occurrences.sel(cluster=cluster_id).values)\n", " fig.add_trace(\n", " go.Scatter(\n", " x=hours,\n", " y=clustered_by_day[cluster_id],\n", " mode='lines',\n", - " name=f'Cluster {cluster_id} (×{weight})',\n", + " name=f'Cluster {cluster_id} (x{weight})',\n", " line=dict(width=2, color=colors[cluster_id % len(colors)]),\n", " ),\n", " row=2,\n", @@ -434,259 +592,46 @@ "\n", "fig.update_layout(height=600, title='Heat Demand: Original vs Typical Days')\n", "fig.update_xaxes(title_text='Hour of Day', row=2, col=1)\n", - "fig.update_yaxes(title_text='MW', row=1, col=1)\n", - "fig.update_yaxes(title_text='MW', row=2, col=1)\n", - "fig.show()" - ] - }, - { - "cell_type": "markdown", - "id": "21", - "metadata": {}, - "source": [ - "## 4. Storage Handling in Clustering\n", - "\n", - "Storage behavior across typical periods requires special handling:\n", - "\n", - "### Cyclic Constraint (`storage_cyclic=True`)\n", - "\n", - "When enabled (default), the storage state at the end of each typical period must equal \n", - "the state at the beginning. This prevents the optimizer from \"cheating\" by starting \n", - "with a full storage and ending empty.\n", - "\n", - "### Inter-Period Linking\n", - "\n", - "The `storage_inter_period_linking` option controls whether storage states are linked \n", - "across typical periods to simulate long-term storage behavior." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "22", - "metadata": {}, - "outputs": [], - "source": [ - "info = fs_clustered._cluster_info\n", - "\n", - "print('Storage settings:')\n", - "print(f' storage_cyclic: {info.storage_cyclic}')\n", - "print(f' storage_inter_cluster_linking: {info.storage_inter_cluster_linking}')\n", - "\n", - "# Show storage charge state in clustered solution\n", - "charge_state = fs_clustered.solution['Storage|charge_state']\n", - "print(f'\\nCharge state shape: {charge_state.shape}')\n", - "print(f'Initial charge: {charge_state.values[0]:.1f} MWh')\n", - "print(f'Final charge: {charge_state.values[-1]:.1f} MWh')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "23", - "metadata": {}, - "outputs": [], - "source": [ - "# Visualize storage behavior across typical periods\n", - "info = fs_clustered._cluster_info\n", - "cs = info.result.cluster_structure\n", - "cluster_occurrences = dict(cs.cluster_occurrences)\n", - "\n", - "fig = go.Figure()\n", - "\n", - "timesteps_per_day = cs.timesteps_per_cluster\n", - "charge_values = charge_state.values\n", - "\n", - "# Plot each typical day's storage trajectory\n", - "colors = px.colors.qualitative.Set1\n", - "for cluster_id in range(cs.n_clusters):\n", - " start_idx = cluster_id * timesteps_per_day\n", - " end_idx = start_idx + timesteps_per_day + 1 # Include endpoint\n", - "\n", - " if end_idx <= len(charge_values):\n", - " hours = np.arange(timesteps_per_day + 1) / 4\n", - " weight = cluster_occurrences.get(cluster_id, cluster_occurrences.get(np.int32(cluster_id), 1))\n", - "\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=hours,\n", - " y=charge_values[start_idx:end_idx],\n", - " mode='lines',\n", - " name=f'Cluster {cluster_id} (×{weight})',\n", - " line=dict(width=2, color=colors[cluster_id % len(colors)]),\n", - " )\n", - " )\n", - "\n", - "fig.update_layout(\n", - " height=400,\n", - " title='Storage Charge State by Typical Period (Cyclic: Start = End)',\n", - " xaxis_title='Hour of Day',\n", - " yaxis_title='Charge State [MWh]',\n", - ")\n", + "fig.update_yaxes(title_text='MW')\n", "fig.show()" ] }, { "cell_type": "markdown", - "id": "24", - "metadata": {}, - "source": [ - "## 5. The `weights` Property: Unified Access\n", - "\n", - "The FlowSystem provides a unified `weights` property that combines all weighting factors\n", - "(aggregation weights, scenario weights, period weights) into a single xarray structure:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "25", - "metadata": {}, - "outputs": [], - "source": [ - "# The weights property provides unified access\n", - "weights = fs_clustered.weights\n", - "\n", - "print('FlowSystem weights structure:')\n", - "print(f' Type: {type(weights).__name__}')\n", - "print(f' temporal: {weights.temporal}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26", - "metadata": {}, - "outputs": [], - "source": [ - "# Compare weights for original vs clustered systems\n", - "print('Original system weights:')\n", - "print(f' temporal sum: {flow_system.weights.temporal.sum().item():.0f}')\n", - "\n", - "print('\\nClustered system weights:')\n", - "print(f' temporal sum: {fs_clustered.weights.temporal.sum().item():.0f}')\n", - "print(f' cluster_weight sum: {fs_clustered.cluster_weight.sum().item():.0f}')" - ] - }, - { - "cell_type": "markdown", - "id": "27", - "metadata": {}, - "source": [ - "## 6. Time Series Weights in Clustering\n", - "\n", - "You can influence which time series are prioritized during clustering using the `weights` parameter.\n", - "By default, all time series are weighted equally, but you may want to:\n", - "\n", - "- Give higher weight to demand profiles (more important to capture accurately)\n", - "- Give lower weight to price signals (less critical for sizing)\n", - "\n", - "### Automatic Weight Calculation\n", - "\n", - "flixopt automatically calculates weights based on `clustering_group` attributes to avoid\n", - "double-counting correlated time series:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "28", - "metadata": {}, - "outputs": [], - "source": [ - "# The time series used for clustering come from the FlowSystem's dataset\n", - "# The cluster() method extracts all time-varying data for clustering\n", - "info = fs_clustered._cluster_info\n", - "if info.result.original_data is not None:\n", - " ts_names = list(info.result.original_data.data_vars)[:10] # Show first 10\n", - " print('Time series used for clustering (first 10):')\n", - " for name in ts_names:\n", - " print(f' - {name}')" - ] - }, - { - "cell_type": "markdown", - "id": "29", - "metadata": {}, - "source": [ - "## 7. Peak Forcing: Ensuring Extreme Periods\n", - "\n", - "The `time_series_for_high_peaks` parameter forces inclusion of periods containing peak values.\n", - "This is critical for proper component sizing." - ] - }, - { - "cell_type": "code", - "execution_count": null, "id": "30", "metadata": {}, - "outputs": [], - "source": [ - "# Find which cluster contains the peak demand day\n", - "info = fs_clustered._cluster_info\n", - "cs = info.result.cluster_structure\n", - "cluster_order = cs.cluster_order.values\n", - "cluster_occurrences = dict(cs.cluster_occurrences)\n", - "\n", - "original_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", - "daily_max = original_demand.reshape(-1, 96).max(axis=1)\n", - "\n", - "peak_day = np.argmax(daily_max)\n", - "peak_cluster = cluster_order[peak_day]\n", - "peak_value = daily_max[peak_day]\n", - "\n", - "# Get weight for the peak cluster\n", - "peak_weight = cluster_occurrences.get(peak_cluster, cluster_occurrences.get(np.int32(peak_cluster), 1))\n", - "\n", - "print(f'Peak demand day: Day {peak_day + 1} (0-indexed: {peak_day})')\n", - "print(f'Peak value: {peak_value:.1f} MW')\n", - "print(f'Assigned to cluster: {peak_cluster}')\n", - "print(f'Cluster {peak_cluster} represents {peak_weight} day(s)')\n", - "\n", - "# The peak day should be in a cluster with weight 1 (unique)\n", - "if peak_weight == 1:\n", - " print('\\n✓ Peak day is isolated in its own cluster (weight=1) - good!')\n", - "else:\n", - " print(f'\\n⚠ Peak day shares cluster with {peak_weight - 1} other day(s)')" - ] - }, - { - "cell_type": "markdown", - "id": "31", - "metadata": {}, "source": [ "## Summary\n", "\n", - "You learned about the internal mechanics of clustering:\n", + "### Module Structure\n", "\n", - "1. **`_cluster_info`**: Contains all metadata for expansion and analysis\n", - "2. **Cluster weights**: Scale operational costs so each typical period represents its original days\n", - "3. **TSAM integration**: k-means clustering groups similar time periods\n", - "4. **Storage handling**: Cyclic constraints ensure realistic storage behavior\n", - "5. **Peak forcing**: Guarantees extreme periods are captured for proper sizing\n", + "```\n", + "flixopt.aggregation\n", + "├── ClusterStructure # Hierarchical: cluster_order, occurrences\n", + "├── ClusterResult # Container: timestep_mapping, weights\n", + "├── ClusterInfo # Stored on FlowSystem._cluster_info\n", + "└── InterClusterLinking # Storage SOC constraints\n", + "```\n", "\n", - "### Key Formulas\n", - "\n", - "**Weighted objective:**\n", - "$$\\text{Objective} = \\sum_{t \\in \\text{typical}} w_t \\cdot c_t$$\n", + "### Data Flow\n", "\n", - "**Weight conservation:**\n", - "$$\\sum_{t \\in \\text{typical}} w_t = |\\text{original timesteps}|$$\n", + "1. `cluster()` → Creates reduced FlowSystem + stores `ClusterInfo`\n", + "2. `optimize()` → `InterClusterLinking` adds storage constraints\n", + "3. `expand_solution()` → Uses `timestep_mapping` to restore full time\n", "\n", - "### When to Customize\n", + "### Key Formulas\n", "\n", - "| Scenario | Solution |\n", - "|----------|----------|\n", - "| Peak days not captured | Add `time_series_for_high_peaks` |\n", - "| Minimum periods important | Add `time_series_for_low_peaks` |\n", - "| Specific profiles more important | Use custom `weights` dict |\n", - "| Storage behaves unrealistically | Check `storage_cyclic` setting |" + "| Formula | Description |\n", + "|---------|-------------|\n", + "| $\\sum w_t \\cdot c_t$ | Weighted objective function |\n", + "| $\\sum w_t = N_{\\text{original}}$ | Weight conservation |\n", + "| $SOC_{d+1} = SOC_d + \\Delta SOC_{c[d]}$ | Inter-cluster storage linking |" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, From 04a8030dcd75e863c96a6ec599dffefba80450f8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 09:18:40 +0100 Subject: [PATCH 079/191] Improve plotting --- flixopt/aggregation/base.py | 191 ++++++++++++++++++++++++++++++++++ flixopt/transform_accessor.py | 1 + 2 files changed, 192 insertions(+) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 66f13942a..144ebb405 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -152,6 +152,65 @@ def get_cluster_weight_per_timestep(self) -> xr.DataArray: name='cluster_weight', ) + def plot(self, show: bool | None = None): + """Plot cluster assignment visualization. + + Shows which cluster each original period belongs to, and the + number of occurrences per cluster. + + Args: + show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. + + Returns: + PlotResult containing the figure and underlying data. + """ + import plotly.express as px + + from ..config import CONFIG + from ..plot_result import PlotResult + + n_clusters = ( + int(self.n_clusters) if isinstance(self.n_clusters, (int, np.integer)) else int(self.n_clusters.values) + ) + + # Create DataFrame for plotting + import pandas as pd + + cluster_order = self.get_cluster_order_for_slice() + df = pd.DataFrame( + { + 'Original Period': range(1, len(cluster_order) + 1), + 'Cluster': cluster_order, + } + ) + + # Bar chart showing cluster assignment + fig = px.bar( + df, + x='Original Period', + y=[1] * len(df), + color='Cluster', + color_continuous_scale='Viridis', + title=f'Cluster Assignment ({self.n_original_periods} periods → {n_clusters} clusters)', + ) + fig.update_layout(yaxis_visible=False, coloraxis_colorbar_title='Cluster') + + # Build data for PlotResult + data = xr.Dataset( + { + 'cluster_order': self.cluster_order, + 'cluster_occurrences': self.cluster_occurrences, + } + ) + plot_result = PlotResult(data=data, figure=fig) + + if show is None: + show = CONFIG.Plotting.default_show + if show: + plot_result.show() + + return plot_result + @dataclass class ClusterResult: @@ -278,6 +337,102 @@ def validate(self) -> None: stacklevel=2, ) + def plot(self, colormap: str | None = None, show: bool | None = None): + """Plot original vs aggregated data comparison. + + Convenience method that calls plot_aggregation() on this result. + + Args: + colormap: Colorscale name for the time series colors. + show: Whether to display the figure. + + Returns: + PlotResult containing the comparison figure and underlying data. + """ + return plot_aggregation(self, colormap=colormap, show=show) + + def plot_typical_periods(self, variable: str | None = None, show: bool | None = None): + """Plot each cluster's typical period profile. + + Shows each cluster as a separate subplot with its occurrence count + in the title. Useful for understanding what each cluster represents. + + Args: + variable: Variable to plot. If None, plots the first available variable. + show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. + + Returns: + PlotResult containing the figure and underlying data. + """ + from plotly.subplots import make_subplots + + from ..config import CONFIG + from ..plot_result import PlotResult + + if self.aggregated_data is None or self.cluster_structure is None: + raise ValueError('ClusterResult must contain aggregated_data and cluster_structure for this plot') + + cs = self.cluster_structure + n_clusters = int(cs.n_clusters) if isinstance(cs.n_clusters, (int, np.integer)) else int(cs.n_clusters.values) + + # Select variable + variables = list(self.aggregated_data.data_vars) + if variable is None: + variable = variables[0] + elif variable not in variables: + raise ValueError(f'Variable {variable} not found. Available: {variables}') + + data = self.aggregated_data[variable].values + + # Reshape to [n_clusters, timesteps_per_cluster] + data_by_cluster = data.reshape(n_clusters, cs.timesteps_per_cluster) + + # Create subplots + n_cols = min(4, n_clusters) + n_rows = (n_clusters + n_cols - 1) // n_cols + fig = make_subplots( + rows=n_rows, + cols=n_cols, + subplot_titles=[ + f'Cluster {c} (×{int(cs.cluster_occurrences.sel(cluster=c).values)})' for c in range(n_clusters) + ], + ) + + x = np.arange(cs.timesteps_per_cluster) + for c in range(n_clusters): + row = c // n_cols + 1 + col = c % n_cols + 1 + fig.add_trace( + {'type': 'scatter', 'x': x, 'y': data_by_cluster[c], 'mode': 'lines', 'showlegend': False}, + row=row, + col=col, + ) + + fig.update_layout( + title=f'Typical Periods: {variable}', + height=200 * n_rows, + ) + + # Build data for PlotResult + result_data = xr.Dataset( + { + 'typical_periods': xr.DataArray( + data_by_cluster, + dims=['cluster', 'timestep'], + coords={'cluster': range(n_clusters), 'timestep': range(cs.timesteps_per_cluster)}, + ), + 'occurrences': cs.cluster_occurrences, + } + ) + plot_result = PlotResult(data=result_data, figure=fig) + + if show is None: + show = CONFIG.Plotting.default_show + if show: + plot_result.show() + + return plot_result + @dataclass class ClusterInfo: @@ -303,6 +458,42 @@ class ClusterInfo: storage_inter_cluster_linking: bool = True storage_cyclic: bool = True + def plot(self, colormap: str | None = None, show: bool | None = None): + """Plot original vs aggregated data comparison. + + Convenience method that calls plot_aggregation() on the result. + + Args: + colormap: Colorscale name for the time series colors. + show: Whether to display the figure. + + Returns: + PlotResult containing the comparison figure and underlying data. + + Example: + >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') + >>> fs_clustered._cluster_info.plot() + """ + return self.result.plot(colormap=colormap, show=show) + + def plot_typical_periods(self, variable: str | None = None, show: bool | None = None): + """Plot each cluster's typical period profile. + + Convenience method that calls plot_typical_periods() on the result. + + Args: + variable: Variable to plot. If None, plots the first available variable. + show: Whether to display the figure. + + Returns: + PlotResult containing the figure and underlying data. + + Example: + >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') + >>> fs_clustered._cluster_info.plot_typical_periods() + """ + return self.result.plot_typical_periods(variable=variable, show=show) + def create_cluster_structure_from_mapping( timestep_mapping: xr.DataArray, diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 76806705d..f2978d034 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -873,6 +873,7 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: representative_weights=timestep_weights.rename('representative_weights'), cluster_structure=cluster_structure, original_data=ds, + aggregated_data=ds_new, ) reduced_fs._cluster_info = ClusterInfo( From 486929a51d614e121d129af8e85b06b6e3b6fcdb Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 09:29:28 +0100 Subject: [PATCH 080/191] Improve plotting --- flixopt/aggregation/base.py | 162 ++++++++++++++++++++---------------- 1 file changed, 89 insertions(+), 73 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 144ebb405..3f360094b 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -340,16 +340,95 @@ def validate(self) -> None: def plot(self, colormap: str | None = None, show: bool | None = None): """Plot original vs aggregated data comparison. - Convenience method that calls plot_aggregation() on this result. + Visualizes the original time series (dashed lines) overlaid with + the aggregated/clustered time series (solid lines) for comparison. + Constants (time-invariant variables) are excluded from the plot. Args: colormap: Colorscale name for the time series colors. + Defaults to CONFIG.Plotting.default_qualitative_colorscale. show: Whether to display the figure. + Defaults to CONFIG.Plotting.default_show. Returns: PlotResult containing the comparison figure and underlying data. """ - return plot_aggregation(self, colormap=colormap, show=show) + import plotly.express as px + + from ..color_processing import process_colors + from ..config import CONFIG + from ..plot_result import PlotResult + + if self.original_data is None or self.aggregated_data is None: + raise ValueError('ClusterResult must contain both original_data and aggregated_data for plotting') + + # Filter to only time-varying variables (exclude constants) + time_vars = [ + name + for name in self.original_data.data_vars + if 'time' in self.original_data[name].dims + and not np.isclose(self.original_data[name].min(), self.original_data[name].max()) + ] + if not time_vars: + raise ValueError('No time-varying variables found in original_data') + + original_filtered = self.original_data[time_vars] + aggregated_filtered = self.aggregated_data[time_vars] + + # Convert xarray to DataFrames + original_df = original_filtered.to_dataframe() + aggregated_df = aggregated_filtered.to_dataframe() + + # Expand aggregated data to original length using mapping + mapping = self.timestep_mapping.values + expanded_agg = aggregated_df.iloc[mapping].reset_index(drop=True) + + # Rename for legend + original_df = original_df.rename(columns={col: f'Original - {col}' for col in original_df.columns}) + expanded_agg = expanded_agg.rename(columns={col: f'Aggregated - {col}' for col in expanded_agg.columns}) + + colors = list( + process_colors( + colormap or CONFIG.Plotting.default_qualitative_colorscale, list(original_df.columns) + ).values() + ) + + # Create line plot for original data (dashed) + original_df = original_df.reset_index() + index_name = original_df.columns[0] + df_org_long = original_df.melt(id_vars=index_name, var_name='variable', value_name='value') + fig = px.line(df_org_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) + for trace in fig.data: + trace.update(line=dict(dash='dash')) + + # Add aggregated data (solid lines) + expanded_agg[index_name] = original_df[index_name] + df_agg_long = expanded_agg.melt(id_vars=index_name, var_name='variable', value_name='value') + fig2 = px.line(df_agg_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) + for trace in fig2.data: + fig.add_trace(trace) + + fig.update_layout( + title='Original vs Aggregated Data (original = ---)', + xaxis_title='Time', + yaxis_title='Value', + ) + + # Build xarray Dataset with both original and aggregated data + data = xr.Dataset( + { + 'original': original_filtered.to_array(dim='variable'), + 'aggregated': aggregated_filtered.to_array(dim='variable'), + } + ) + plot_result = PlotResult(data=data, figure=fig) + + if show is None: + show = CONFIG.Plotting.default_show + if show: + plot_result.show() + + return plot_result def plot_typical_periods(self, variable: str | None = None, show: bool | None = None): """Plot each cluster's typical period profile. @@ -461,11 +540,13 @@ class ClusterInfo: def plot(self, colormap: str | None = None, show: bool | None = None): """Plot original vs aggregated data comparison. - Convenience method that calls plot_aggregation() on the result. + Convenience method that calls result.plot(). Args: colormap: Colorscale name for the time series colors. + Defaults to CONFIG.Plotting.default_qualitative_colorscale. show: Whether to display the figure. + Defaults to CONFIG.Plotting.default_show. Returns: PlotResult containing the comparison figure and underlying data. @@ -479,11 +560,12 @@ def plot(self, colormap: str | None = None, show: bool | None = None): def plot_typical_periods(self, variable: str | None = None, show: bool | None = None): """Plot each cluster's typical period profile. - Convenience method that calls plot_typical_periods() on the result. + Convenience method that calls result.plot_typical_periods(). Args: variable: Variable to plot. If None, plots the first available variable. show: Whether to display the figure. + Defaults to CONFIG.Plotting.default_show. Returns: PlotResult containing the figure and underlying data. @@ -552,81 +634,15 @@ def plot_aggregation( ): """Plot original vs aggregated data comparison. - Visualizes the original time series (dashed lines) overlaid with - the aggregated/clustered time series (solid lines) for comparison. + .. deprecated:: + Use ``result.plot()`` directly instead. Args: result: ClusterResult containing original and aggregated data. colormap: Colorscale name for the time series colors. - Defaults to CONFIG.Plotting.default_qualitative_colorscale. show: Whether to display the figure. - Defaults to CONFIG.Plotting.default_show. Returns: PlotResult containing the comparison figure and underlying data. - - Example: - >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> plot_aggregation(fs_clustered._cluster_info.result) """ - import plotly.express as px - - from ..color_processing import process_colors - from ..config import CONFIG - from ..plot_result import PlotResult - - if result.original_data is None or result.aggregated_data is None: - raise ValueError('ClusterResult must contain both original_data and aggregated_data for plotting') - - # Convert xarray to DataFrames - original_df = result.original_data.to_dataframe() - aggregated_df = result.aggregated_data.to_dataframe() - - # Expand aggregated data to original length using mapping - mapping = result.timestep_mapping.values - expanded_agg = aggregated_df.iloc[mapping].reset_index(drop=True) - - # Rename for legend - original_df = original_df.rename(columns={col: f'Original - {col}' for col in original_df.columns}) - expanded_agg = expanded_agg.rename(columns={col: f'Aggregated - {col}' for col in expanded_agg.columns}) - - colors = list( - process_colors(colormap or CONFIG.Plotting.default_qualitative_colorscale, list(original_df.columns)).values() - ) - - # Create line plot for original data (dashed) - original_df = original_df.reset_index() - index_name = original_df.columns[0] - df_org_long = original_df.melt(id_vars=index_name, var_name='variable', value_name='value') - fig = px.line(df_org_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) - for trace in fig.data: - trace.update(line=dict(dash='dash')) - - # Add aggregated data (solid lines) - expanded_agg[index_name] = original_df[index_name] - df_agg_long = expanded_agg.melt(id_vars=index_name, var_name='variable', value_name='value') - fig2 = px.line(df_agg_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) - for trace in fig2.data: - fig.add_trace(trace) - - fig.update_layout( - title='Original vs Aggregated Data (original = ---)', - xaxis_title='Time', - yaxis_title='Value', - ) - - # Build xarray Dataset with both original and aggregated data - data = xr.Dataset( - { - 'original': result.original_data.to_array(dim='variable'), - 'aggregated': result.aggregated_data.to_array(dim='variable'), - } - ) - plot_result = PlotResult(data=data, figure=fig) - - if show is None: - show = CONFIG.Plotting.default_show - if show: - plot_result.show() - - return plot_result + return result.plot(colormap=colormap, show=show) From 4d6dcc032e67615059b3bdb5ae6ad2e96fd851c2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 09:33:39 +0100 Subject: [PATCH 081/191] Improve repr --- flixopt/aggregation/base.py | 46 +++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 3f360094b..2daa0e1ec 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -78,6 +78,19 @@ def __post_init__(self): elif self.cluster_occurrences.name is None: self.cluster_occurrences = self.cluster_occurrences.rename('cluster_occurrences') + def __repr__(self) -> str: + n_clusters = ( + int(self.n_clusters) if isinstance(self.n_clusters, (int, np.integer)) else int(self.n_clusters.values) + ) + occ = [int(self.cluster_occurrences.sel(cluster=c).values) for c in range(n_clusters)] + return ( + f'ClusterStructure(\n' + f' {self.n_original_periods} original periods → {n_clusters} clusters\n' + f' timesteps_per_cluster={self.timesteps_per_cluster}\n' + f' occurrences={occ}\n' + f')' + ) + @property def n_original_periods(self) -> int: """Number of original periods (before clustering).""" @@ -268,6 +281,22 @@ def __post_init__(self): elif self.representative_weights.name is None: self.representative_weights = self.representative_weights.rename('representative_weights') + def __repr__(self) -> str: + n_rep = ( + int(self.n_representatives) + if isinstance(self.n_representatives, (int, np.integer)) + else int(self.n_representatives.values) + ) + has_structure = self.cluster_structure is not None + has_data = self.original_data is not None and self.aggregated_data is not None + return ( + f'ClusterResult(\n' + f' {self.n_original_timesteps} original → {n_rep} representative timesteps\n' + f' weights sum={float(self.representative_weights.sum().values):.0f}\n' + f' cluster_structure={has_structure}, data={has_data}\n' + f')' + ) + @property def n_original_timesteps(self) -> int: """Number of original timesteps (before aggregation).""" @@ -537,6 +566,23 @@ class ClusterInfo: storage_inter_cluster_linking: bool = True storage_cyclic: bool = True + def __repr__(self) -> str: + cs = self.result.cluster_structure + if cs is not None: + n_clusters = ( + int(cs.n_clusters) if isinstance(cs.n_clusters, (int, np.integer)) else int(cs.n_clusters.values) + ) + structure_info = f'{cs.n_original_periods} periods → {n_clusters} clusters' + else: + structure_info = 'no structure' + return ( + f'ClusterInfo(\n' + f' backend={self.backend_name!r}\n' + f' {structure_info}\n' + f' storage_linking={self.storage_inter_cluster_linking}, cyclic={self.storage_cyclic}\n' + f')' + ) + def plot(self, colormap: str | None = None, show: bool | None = None): """Plot original vs aggregated data comparison. From 33dd89bd4f8adb2978acdd631ce331830e1d4b5d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 09:40:13 +0100 Subject: [PATCH 082/191] Simplify notebook --- docs/notebooks/08e-clustering-internals.ipynb | 533 ++---------------- 1 file changed, 56 insertions(+), 477 deletions(-) diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index 157b54f5a..274927b17 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -5,18 +5,9 @@ "id": "0", "metadata": {}, "source": [ - "# Clustering Internals: Architecture and Data Structures\n", + "# Clustering Internals\n", "\n", - "A deep dive into how time series clustering works under the hood.\n", - "\n", - "This notebook covers:\n", - "\n", - "- **Module overview**: The `flixopt.aggregation` module and its classes\n", - "- **Data flow**: From `cluster()` through optimization to `expand_solution()`\n", - "- **Core classes**: `ClusterStructure`, `ClusterResult`, `ClusterInfo`\n", - "- **Cluster weights**: How operational costs are scaled correctly\n", - "- **Storage linking**: Inter-cluster constraints for realistic storage behavior\n", - "- **Multi-dimensional support**: Handling periods and scenarios\n", + "Understanding the data structures behind time series clustering.\n", "\n", "!!! note \"Prerequisites\"\n", " This notebook assumes familiarity with [08c-clustering](08c-clustering.ipynb)." @@ -31,24 +22,10 @@ "source": [ "from pathlib import Path\n", "\n", - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.express as px\n", - "import plotly.graph_objects as go\n", - "from plotly.subplots import make_subplots\n", - "\n", "import flixopt as fx\n", "\n", - "fx.CONFIG.notebook()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "2", - "metadata": {}, - "outputs": [], - "source": [ + "fx.CONFIG.notebook()\n", + "\n", "# Load the district heating system\n", "data_file = Path('data/district_heating_system.nc4')\n", "if not data_file.exists():\n", @@ -57,581 +34,183 @@ " fs = create_district_heating_system()\n", " fs.to_netcdf(data_file)\n", "\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", - "print(f'Loaded: {len(flow_system.timesteps)} timesteps ({len(flow_system.timesteps) / 96:.0f} days)')" + "flow_system = fx.FlowSystem.from_netcdf(data_file)" ] }, { "cell_type": "markdown", - "id": "3", - "metadata": {}, - "source": [ - "## 1. Module Architecture Overview\n", - "\n", - "The clustering functionality lives in `flixopt.aggregation` with this structure:\n", - "\n", - "```\n", - "flixopt.aggregation/\n", - "├── base.py # Core dataclasses: ClusterStructure, ClusterResult, ClusterInfo\n", - "├── storage_linking.py # InterClusterLinking for storage constraints\n", - "└── __init__.py # Public exports\n", - "```\n", - "\n", - "### Key Classes\n", - "\n", - "| Class | Purpose |\n", - "|-------|--------|\n", - "| `ClusterStructure` | Hierarchical structure: which original periods map to which clusters |\n", - "| `ClusterResult` | Universal container: timestep mapping, weights, aggregated data |\n", - "| `ClusterInfo` | Stored on FlowSystem after clustering; enables `expand_solution()` |\n", - "| `InterClusterLinking` | Adds storage SOC constraints across the original time horizon |" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "4", - "metadata": {}, - "outputs": [], - "source": [ - "# Import the aggregation module to explore its contents\n", - "from flixopt import aggregation\n", - "\n", - "print('Available in flixopt.aggregation:')\n", - "print([name for name in dir(aggregation) if not name.startswith('_')])" - ] - }, - { - "cell_type": "markdown", - "id": "5", + "id": "2", "metadata": {}, "source": [ - "## 2. Data Flow: From `cluster()` to `expand_solution()`\n", + "## Clustering and ClusterInfo\n", "\n", - "```\n", - "┌─────────────────────────────────────────────────────────────────┐\n", - "│ flow_system.transform.cluster(n_clusters=8, ...) │\n", - "└─────────────────────────────────────────────────────────────────┘\n", - " │\n", - " ▼\n", - "┌─────────────────────────────────────────────────────────────────┐\n", - "│ 1. Extract time series data from FlowSystem │\n", - "│ 2. Call tsam for clustering │\n", - "│ 3. Build ClusterStructure (cluster_order, occurrences) │\n", - "│ 4. Build ClusterResult (timestep_mapping, weights) │\n", - "│ 5. Create reduced FlowSystem with representative timesteps │\n", - "│ 6. Store ClusterInfo on reduced_fs._cluster_info │\n", - "└─────────────────────────────────────────────────────────────────┘\n", - " │\n", - " ▼\n", - "┌─────────────────────────────────────────────────────────────────┐\n", - "│ reduced_fs.optimize(solver) │\n", - "│ └─ InterClusterLinking adds storage constraints if enabled │\n", - "└─────────────────────────────────────────────────────────────────┘\n", - " │\n", - " ▼\n", - "┌─────────────────────────────────────────────────────────────────┐\n", - "│ reduced_fs.transform.expand_solution() │\n", - "│ └─ Uses stored timestep_mapping to expand back to full time │\n", - "└─────────────────────────────────────────────────────────────────┘\n", - "```" + "After calling `cluster()`, metadata is stored in `fs._cluster_info`:" ] }, { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "3", "metadata": {}, "outputs": [], "source": [ - "# Create a clustered system\n", "fs_clustered = flow_system.transform.cluster(\n", " n_clusters=8,\n", " cluster_duration='1D',\n", " time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'],\n", ")\n", "\n", - "print(f'Original timesteps: {len(flow_system.timesteps)}')\n", - "print(f'Clustered timesteps: {len(fs_clustered.timesteps)}')\n", - "print(f'Reduction: {len(flow_system.timesteps) / len(fs_clustered.timesteps):.1f}x')" - ] - }, - { - "cell_type": "markdown", - "id": "7", - "metadata": {}, - "source": [ - "## 3. The `ClusterInfo` Structure\n", - "\n", - "After clustering, metadata is stored in `fs._cluster_info`. This enables:\n", - "- Expanding solutions back to full resolution\n", - "- Understanding which original days map to which clusters\n", - "- Correct weighting in the objective function" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "info = fs_clustered._cluster_info\n", - "\n", - "print('ClusterInfo attributes:')\n", - "print(f' backend_name: {info.backend_name}')\n", - "print(f' storage_inter_cluster_linking: {info.storage_inter_cluster_linking}')\n", - "print(f' storage_cyclic: {info.storage_cyclic}')\n", - "print(f' original_flow_system: {type(info.original_flow_system).__name__}')\n", - "print(f' result: {type(info.result).__name__}')" + "fs_clustered._cluster_info" ] }, { "cell_type": "markdown", - "id": "9", + "id": "4", "metadata": {}, "source": [ - "## 4. The `ClusterStructure`: Hierarchical Mapping\n", - "\n", - "The `ClusterStructure` captures which original periods (days) belong to which clusters:\n", - "\n", - "- **`cluster_order`**: Array mapping each original period index to its cluster ID\n", - "- **`cluster_occurrences`**: How many original periods each cluster represents\n", - "- **`n_clusters`**: Number of representative clusters\n", - "- **`timesteps_per_cluster`**: Timesteps in each cluster (e.g., 96 for daily with 15-min resolution)" + "The `ClusterInfo` contains:\n", + "- **`result`**: A `ClusterResult` with timestep mapping and weights\n", + "- **`result.cluster_structure`**: A `ClusterStructure` with cluster assignments" ] }, { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "5", "metadata": {}, "outputs": [], "source": [ - "cs = info.result.cluster_structure\n", - "\n", - "print('ClusterStructure:')\n", - "print(f' n_clusters: {cs.n_clusters}')\n", - "print(f' timesteps_per_cluster: {cs.timesteps_per_cluster}')\n", - "print(f' n_original_periods: {cs.n_original_periods}')\n", - "print(f' cluster_order dims: {cs.cluster_order.dims}')\n", - "print(f' cluster_order shape: {cs.cluster_order.shape}')" + "fs_clustered._cluster_info.result" ] }, { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "6", "metadata": {}, "outputs": [], "source": [ - "# cluster_order shows which cluster each original day belongs to\n", - "cluster_order = cs.cluster_order.values\n", - "\n", - "print('Cluster assignments (first 14 days):')\n", - "for day in range(min(14, len(cluster_order))):\n", - " print(f' Day {day + 1:2d} → Cluster {cluster_order[day]}')" + "fs_clustered._cluster_info.result.cluster_structure" ] }, { - "cell_type": "code", - "execution_count": null, - "id": "12", + "cell_type": "markdown", + "id": "7", "metadata": {}, - "outputs": [], "source": [ - "# cluster_occurrences shows how many original days each cluster represents\n", - "print('Cluster occurrences (days per cluster):')\n", - "for cluster_id in range(cs.n_clusters):\n", - " count = int(cs.cluster_occurrences.sel(cluster=cluster_id).values)\n", - " print(f' Cluster {cluster_id}: {count} day(s)')\n", + "## Visualizing Clustering\n", "\n", - "print(f'\\nTotal: {int(cs.cluster_occurrences.sum().values)} days')" + "Built-in plot methods show how original periods map to clusters:" ] }, { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "8", "metadata": {}, "outputs": [], "source": [ - "# Visualize cluster assignment\n", - "days_df = pd.DataFrame(\n", - " {\n", - " 'Day': range(1, cs.n_original_periods + 1),\n", - " 'Cluster': cluster_order,\n", - " }\n", - ")\n", - "\n", - "fig = px.bar(\n", - " days_df,\n", - " x='Day',\n", - " y=[1] * len(days_df),\n", - " color='Cluster',\n", - " color_continuous_scale='Viridis',\n", - " title='Cluster Assignment by Day',\n", - ")\n", - "fig.update_layout(height=250, yaxis_visible=False, coloraxis_colorbar_title='Cluster')\n", - "fig.show()" - ] - }, - { - "cell_type": "markdown", - "id": "14", - "metadata": {}, - "source": [ - "## 5. The `ClusterResult`: Timestep Mapping and Weights\n", - "\n", - "The `ClusterResult` contains:\n", - "\n", - "- **`timestep_mapping`**: Maps each original timestep to its representative timestep index\n", - "- **`representative_weights`**: Weight for each representative timestep (used as `cluster_weight`)\n", - "- **`cluster_structure`**: Reference to the hierarchical structure\n", - "- **`original_data`**: The time series data used for clustering" + "# Which original period belongs to which cluster?\n", + "fs_clustered._cluster_info.result.cluster_structure.plot()" ] }, { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "9", "metadata": {}, "outputs": [], "source": [ - "result = info.result\n", - "\n", - "print('ClusterResult:')\n", - "print(f' n_representatives: {result.n_representatives}')\n", - "print(f' timestep_mapping dims: {result.timestep_mapping.dims}')\n", - "print(f' timestep_mapping shape: {result.timestep_mapping.shape}')\n", - "print(f' representative_weights: {result.representative_weights.shape}')" + "# What does each cluster's typical profile look like?\n", + "fs_clustered._cluster_info.plot_typical_periods('HeatDemand(Q_th)|fixed_relative_profile')" ] }, { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "10", "metadata": {}, "outputs": [], "source": [ - "# The timestep_mapping shows which representative timestep each original timestep maps to\n", - "mapping = result.timestep_mapping.values\n", - "\n", - "print('Timestep mapping (first 10 original timesteps):')\n", - "for t in range(10):\n", - " print(f' Original t={t} → Representative t={mapping[t]}')\n", - "\n", - "print(f'\\n... (total {len(mapping)} mappings)')" + "# How well does the aggregated data match the original?\n", + "fs_clustered._cluster_info.plot()" ] }, { "cell_type": "markdown", - "id": "17", + "id": "11", "metadata": {}, "source": [ - "## 6. Cluster Weights: Scaling Operational Costs\n", + "## Cluster Weights\n", "\n", - "When optimizing over typical periods, operational costs must be **scaled** to represent the full time horizon.\n", - "\n", - "### The Weight Formula\n", + "Each representative timestep has a weight equal to the number of original periods it represents.\n", + "This ensures operational costs scale correctly:\n", "\n", "$$\\text{Objective} = \\sum_{t \\in \\text{typical}} w_t \\cdot c_t$$\n", "\n", - "Where:\n", - "- $w_t$ = cluster weight for timestep $t$ (number of original days this cluster represents)\n", - "- $c_t$ = operational cost at timestep $t$\n", - "\n", - "### Weight Conservation\n", - "\n", - "$$\\sum_{t \\in \\text{typical}} w_t = |\\text{original timesteps}|$$" + "The weights sum to the original timestep count:" ] }, { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "12", "metadata": {}, "outputs": [], "source": [ - "# The cluster_weight is stored on the FlowSystem\n", - "print('cluster_weight on FlowSystem:')\n", - "print(f' Shape: {fs_clustered.cluster_weight.shape}')\n", - "print(f' Sum: {fs_clustered.cluster_weight.sum().item():.0f}')\n", - "print(f' Expected (original timesteps): {len(flow_system.timesteps)}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "19", - "metadata": {}, - "outputs": [], - "source": [ - "# Visualize weights across timesteps\n", - "weights = fs_clustered.cluster_weight.values\n", - "\n", - "fig = go.Figure()\n", - "fig.add_trace(\n", - " go.Scatter(\n", - " x=list(range(len(weights))),\n", - " y=weights,\n", - " mode='lines',\n", - " name='Cluster Weight',\n", - " )\n", - ")\n", - "\n", - "# Add vertical lines at cluster boundaries\n", - "for i in range(1, cs.n_clusters):\n", - " fig.add_vline(x=i * cs.timesteps_per_cluster, line_dash='dash', line_color='gray', opacity=0.5)\n", - "\n", - "fig.update_layout(\n", - " height=300,\n", - " title='Cluster Weight per Timestep',\n", - " xaxis_title='Timestep Index',\n", - " yaxis_title='Weight',\n", - ")\n", - "fig.show()" + "print(f'Sum of weights: {fs_clustered.cluster_weight.sum().item():.0f}')\n", + "print(f'Original timesteps: {len(flow_system.timesteps)}')" ] }, { "cell_type": "markdown", - "id": "20", + "id": "13", "metadata": {}, "source": [ - "## 7. Storage Inter-Cluster Linking\n", - "\n", - "Storage behavior requires special handling in clustering. The `InterClusterLinking` class:\n", + "## Solution Expansion\n", "\n", - "1. Creates **SOC_boundary** variables for each original period boundary\n", - "2. Computes **delta_SOC** for each representative period (change in SOC)\n", - "3. Links them: `SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]]`\n", - "4. Optionally enforces cyclic constraint: `SOC_boundary[0] = SOC_boundary[end]`\n", - "\n", - "This tracks storage state across the **full original time horizon** while only solving for representative periods." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "21", - "metadata": {}, - "outputs": [], - "source": [ - "print('Storage settings in ClusterInfo:')\n", - "print(f' storage_inter_cluster_linking: {info.storage_inter_cluster_linking}')\n", - "print(f' storage_cyclic: {info.storage_cyclic}')" + "After optimization, `expand_solution()` maps results back to full resolution:" ] }, { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "14", "metadata": {}, "outputs": [], "source": [ - "# Optimize and examine storage behavior\n", "solver = fx.solvers.HighsSolver(mip_gap=0.01, log_to_console=False)\n", "fs_clustered.optimize(solver)\n", "\n", - "# Check storage charge state\n", - "if 'Storage|charge_state' in fs_clustered.solution:\n", - " charge_state = fs_clustered.solution['Storage|charge_state']\n", - " print(f'Charge state shape: {charge_state.shape}')\n", - " print(f'Initial charge: {charge_state.values[0]:.1f} MWh')\n", - " print(f'Final charge: {charge_state.values[-1]:.1f} MWh')\n", - "else:\n", - " print('No storage in this system')" - ] - }, - { - "cell_type": "markdown", - "id": "23", - "metadata": {}, - "source": [ - "## 8. Multi-Dimensional Support (Periods/Scenarios)\n", - "\n", - "When a FlowSystem has multiple **periods** (e.g., investment years) or **scenarios**, each (period, scenario) combination may have **different cluster assignments**.\n", - "\n", - "The data structures support this with multi-dimensional arrays:\n", - "\n", - "```python\n", - "# Simple case (no periods/scenarios)\n", - "cluster_order.dims = ['original_period']\n", - "timestep_mapping.dims = ['original_time']\n", - "\n", - "# Multi-scenario case\n", - "cluster_order.dims = ['original_period', 'scenario']\n", - "timestep_mapping.dims = ['original_time', 'scenario']\n", - "```\n", - "\n", - "Helper methods extract per-slice data:\n", - "```python\n", - "cluster_structure.get_cluster_order_for_slice(period='2025', scenario='high')\n", - "cluster_result.get_timestep_mapping_for_slice(scenario='base')\n", - "```" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "24", - "metadata": {}, - "outputs": [], - "source": [ - "# Check if our system has multi-dimensional clustering\n", - "print('Multi-dimensional check:')\n", - "print(f' cluster_order dims: {cs.cluster_order.dims}')\n", - "print(f' has_multi_dims: {cs.has_multi_dims}')\n", - "\n", - "# Get cluster order (works for both simple and multi-dim cases)\n", - "cluster_order = cs.get_cluster_order_for_slice()\n", - "print(f' cluster_order shape: {cluster_order.shape}')" - ] - }, - { - "cell_type": "markdown", - "id": "25", - "metadata": {}, - "source": [ - "## 9. Expanding Solutions Back to Full Resolution\n", - "\n", - "After optimization, `expand_solution()` uses the stored `timestep_mapping` to map typical period results back to the original time horizon:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "26", - "metadata": {}, - "outputs": [], - "source": [ - "# Expand the solution\n", "fs_expanded = fs_clustered.transform.expand_solution()\n", "\n", - "print(f'Clustered timesteps: {len(fs_clustered.timesteps)}')\n", - "print(f'Expanded timesteps: {len(fs_expanded.timesteps)}')\n", - "print(f'Original timesteps: {len(flow_system.timesteps)}')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "27", - "metadata": {}, - "outputs": [], - "source": [ - "# The expanded solution has full time resolution\n", - "if 'Boiler(Q_th)|flow_rate' in fs_expanded.solution:\n", - " flow_clustered = fs_clustered.solution['Boiler(Q_th)|flow_rate']\n", - " flow_expanded = fs_expanded.solution['Boiler(Q_th)|flow_rate']\n", - "\n", - " print(f'Clustered flow shape: {flow_clustered.shape}')\n", - " print(f'Expanded flow shape: {flow_expanded.shape}')" + "print(f'Clustered: {len(fs_clustered.timesteps)} timesteps')\n", + "print(f'Expanded: {len(fs_expanded.timesteps)} timesteps')" ] }, { "cell_type": "markdown", - "id": "28", - "metadata": {}, - "source": [ - "## 10. Visualizing Typical vs Original Data" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "29", - "metadata": {}, - "outputs": [], - "source": [ - "# Get heat demand from original and clustered systems\n", - "original_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", - "clustered_demand = fs_clustered.components['HeatDemand'].inputs[0].fixed_relative_profile.values\n", - "\n", - "# Reshape into days\n", - "timesteps_per_day = cs.timesteps_per_cluster\n", - "n_days = len(original_demand) // timesteps_per_day\n", - "original_by_day = original_demand[: n_days * timesteps_per_day].reshape(n_days, timesteps_per_day)\n", - "clustered_by_day = clustered_demand.reshape(cs.n_clusters, timesteps_per_day)\n", - "\n", - "# Create subplots\n", - "fig = make_subplots(\n", - " rows=2,\n", - " cols=1,\n", - " subplot_titles=[f'Original: All {n_days} Days', f'Clustered: {cs.n_clusters} Typical Days'],\n", - " vertical_spacing=0.15,\n", - ")\n", - "\n", - "hours = np.arange(timesteps_per_day) / 4 # Convert to hours\n", - "\n", - "# Plot all original days (faded)\n", - "for day in range(n_days):\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=hours, y=original_by_day[day], mode='lines', line=dict(width=0.5, color='lightblue'), showlegend=False\n", - " ),\n", - " row=1,\n", - " col=1,\n", - " )\n", - "\n", - "# Plot typical days\n", - "colors = px.colors.qualitative.Set1\n", - "for cluster_id in range(cs.n_clusters):\n", - " weight = int(cs.cluster_occurrences.sel(cluster=cluster_id).values)\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=hours,\n", - " y=clustered_by_day[cluster_id],\n", - " mode='lines',\n", - " name=f'Cluster {cluster_id} (x{weight})',\n", - " line=dict(width=2, color=colors[cluster_id % len(colors)]),\n", - " ),\n", - " row=2,\n", - " col=1,\n", - " )\n", - "\n", - "fig.update_layout(height=600, title='Heat Demand: Original vs Typical Days')\n", - "fig.update_xaxes(title_text='Hour of Day', row=2, col=1)\n", - "fig.update_yaxes(title_text='MW')\n", - "fig.show()" - ] - }, - { - "cell_type": "markdown", - "id": "30", + "id": "15", "metadata": {}, "source": [ "## Summary\n", "\n", - "### Module Structure\n", - "\n", - "```\n", - "flixopt.aggregation\n", - "├── ClusterStructure # Hierarchical: cluster_order, occurrences\n", - "├── ClusterResult # Container: timestep_mapping, weights\n", - "├── ClusterInfo # Stored on FlowSystem._cluster_info\n", - "└── InterClusterLinking # Storage SOC constraints\n", - "```\n", - "\n", - "### Data Flow\n", - "\n", - "1. `cluster()` → Creates reduced FlowSystem + stores `ClusterInfo`\n", - "2. `optimize()` → `InterClusterLinking` adds storage constraints\n", - "3. `expand_solution()` → Uses `timestep_mapping` to restore full time\n", - "\n", - "### Key Formulas\n", + "| Class | Purpose |\n", + "|-------|--------|\n", + "| `ClusterInfo` | Stored on `fs._cluster_info` after `cluster()` |\n", + "| `ClusterResult` | Contains timestep mapping and weights |\n", + "| `ClusterStructure` | Maps original periods to clusters |\n", "\n", - "| Formula | Description |\n", - "|---------|-------------|\n", - "| $\\sum w_t \\cdot c_t$ | Weighted objective function |\n", - "| $\\sum w_t = N_{\\text{original}}$ | Weight conservation |\n", - "| $SOC_{d+1} = SOC_d + \\Delta SOC_{c[d]}$ | Inter-cluster storage linking |" + "**Key methods:**\n", + "- `cluster_structure.plot()` - visualize cluster assignments\n", + "- `cluster_info.plot()` - compare original vs aggregated data\n", + "- `cluster_info.plot_typical_periods()` - view each cluster's profile" ] } ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": "Python 3", "language": "python", "name": "python3" }, From 8177c2e0d66c8ad99eff14d30fa9a3df0be99f7c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 09:42:51 +0100 Subject: [PATCH 083/191] Update API --- docs/notebooks/08c-clustering.ipynb | 2 +- docs/notebooks/08d-clustering-multiperiod.ipynb | 2 +- docs/notebooks/08e-clustering-internals.ipynb | 16 ++++++++-------- flixopt/aggregation/__init__.py | 2 +- flixopt/aggregation/base.py | 4 ++-- flixopt/flow_system.py | 6 +++--- flixopt/transform_accessor.py | 8 ++++---- tests/test_cluster_reduce_expand.py | 12 ++++++------ 8 files changed, 26 insertions(+), 26 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 14669dc49..ff9fca727 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -208,7 +208,7 @@ "outputs": [], "source": [ "# Show clustering info\n", - "info = fs_clustered._cluster_info\n", + "info = fs_clustered.cluster_info\n", "cs = info.result.cluster_structure\n", "print('Clustering Configuration:')\n", "print(f' Number of typical periods: {cs.n_clusters}')\n", diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index 573e696d4..c66e02384 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -240,7 +240,7 @@ "metadata": {}, "outputs": [], "source": [ - "info = fs_clustered._cluster_info\n", + "info = fs_clustered.cluster_info\n", "cs = info.result.cluster_structure\n", "\n", "print('Clustering Configuration:')\n", diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index 274927b17..1e287a96d 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -44,7 +44,7 @@ "source": [ "## Clustering and ClusterInfo\n", "\n", - "After calling `cluster()`, metadata is stored in `fs._cluster_info`:" + "After calling `cluster()`, metadata is stored in `fs.cluster_info`:" ] }, { @@ -60,7 +60,7 @@ " time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'],\n", ")\n", "\n", - "fs_clustered._cluster_info" + "fs_clustered.cluster_info" ] }, { @@ -80,7 +80,7 @@ "metadata": {}, "outputs": [], "source": [ - "fs_clustered._cluster_info.result" + "fs_clustered.cluster_info.result" ] }, { @@ -90,7 +90,7 @@ "metadata": {}, "outputs": [], "source": [ - "fs_clustered._cluster_info.result.cluster_structure" + "fs_clustered.cluster_info.result.cluster_structure" ] }, { @@ -111,7 +111,7 @@ "outputs": [], "source": [ "# Which original period belongs to which cluster?\n", - "fs_clustered._cluster_info.result.cluster_structure.plot()" + "fs_clustered.cluster_info.result.cluster_structure.plot()" ] }, { @@ -122,7 +122,7 @@ "outputs": [], "source": [ "# What does each cluster's typical profile look like?\n", - "fs_clustered._cluster_info.plot_typical_periods('HeatDemand(Q_th)|fixed_relative_profile')" + "fs_clustered.cluster_info.plot_typical_periods('HeatDemand(Q_th)|fixed_relative_profile')" ] }, { @@ -133,7 +133,7 @@ "outputs": [], "source": [ "# How well does the aggregated data match the original?\n", - "fs_clustered._cluster_info.plot()" + "fs_clustered.cluster_info.plot()" ] }, { @@ -197,7 +197,7 @@ "\n", "| Class | Purpose |\n", "|-------|--------|\n", - "| `ClusterInfo` | Stored on `fs._cluster_info` after `cluster()` |\n", + "| `ClusterInfo` | Stored on `fs.cluster_info` after `cluster()` |\n", "| `ClusterResult` | Contains timestep mapping and weights |\n", "| `ClusterStructure` | Maps original periods to clusters |\n", "\n", diff --git a/flixopt/aggregation/__init__.py b/flixopt/aggregation/__init__.py index 2ce828fae..908ed94d8 100644 --- a/flixopt/aggregation/__init__.py +++ b/flixopt/aggregation/__init__.py @@ -18,7 +18,7 @@ ) # Access clustering metadata - info = fs_clustered._cluster_info + info = fs_clustered.cluster_info print(f'Number of clusters: {info.result.cluster_structure.n_clusters}') # Expand solution back to full resolution diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 2daa0e1ec..3d4ea7ad6 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -599,7 +599,7 @@ def plot(self, colormap: str | None = None, show: bool | None = None): Example: >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> fs_clustered._cluster_info.plot() + >>> fs_clustered.cluster_info.plot() """ return self.result.plot(colormap=colormap, show=show) @@ -618,7 +618,7 @@ def plot_typical_periods(self, variable: str | None = None, show: bool | None = Example: >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> fs_clustered._cluster_info.plot_typical_periods() + >>> fs_clustered.cluster_info.plot_typical_periods() """ return self.result.plot_typical_periods(variable=variable, show=show) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index b83d57e5a..a7791f79e 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -233,7 +233,7 @@ def __init__( self._solution: xr.Dataset | None = None # Aggregation info - populated by transform.cluster() - self._cluster_info: ClusterInfo | None = None + self.cluster_info: ClusterInfo | None = None # Statistics accessor cache - lazily initialized, invalidated on new solution self._statistics: StatisticsAccessor | None = None @@ -1294,7 +1294,7 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: self.model.do_modeling() # Add inter-cluster storage linking if this is an aggregated FlowSystem - if self._cluster_info is not None: + if self.cluster_info is not None: self._add_inter_cluster_linking() return self @@ -1307,7 +1307,7 @@ def _add_inter_cluster_linking(self) -> None: """ from .aggregation.storage_linking import InterClusterLinking - info = self._cluster_info + info = self.cluster_info if info is None: return diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index f2978d034..fcafff398 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -615,7 +615,7 @@ def cluster( Returns: A new FlowSystem with reduced timesteps (only typical clusters). - The FlowSystem has metadata stored in ``_cluster_info`` for expansion. + The FlowSystem has metadata stored in ``cluster_info`` for expansion. Raises: ValueError: If timestep sizes are inconsistent. @@ -876,7 +876,7 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: aggregated_data=ds_new, ) - reduced_fs._cluster_info = ClusterInfo( + reduced_fs.cluster_info = ClusterInfo( result=aggregation_result, original_flow_system=self._fs, backend_name='tsam', @@ -1034,7 +1034,7 @@ def expand_solution(self) -> FlowSystem: from .flow_system import FlowSystem # Validate - if self._fs._cluster_info is None: + if self._fs.cluster_info is None: raise ValueError( 'expand_solution() requires a FlowSystem created with cluster(). ' 'This FlowSystem has no aggregation info.' @@ -1042,7 +1042,7 @@ def expand_solution(self) -> FlowSystem: if self._fs.solution is None: raise ValueError('FlowSystem has no solution. Run optimize() or solve() first.') - info = self._fs._cluster_info + info = self._fs.cluster_info cluster_structure = info.result.cluster_structure if cluster_structure is None: raise ValueError('No cluster structure available for expansion.') diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index b7a41ff1a..c8de5ddf1 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -55,8 +55,8 @@ def test_cluster_creates_reduced_timesteps(timesteps_8_days): # Should have 2 * 24 = 48 timesteps instead of 192 assert len(fs_reduced.timesteps) == 48 - assert hasattr(fs_reduced, '_cluster_info') - assert fs_reduced._cluster_info.result.cluster_structure.n_clusters == 2 + assert hasattr(fs_reduced, 'cluster_info') + assert fs_reduced.cluster_info.result.cluster_structure.n_clusters == 2 def test_expand_solution_restores_full_timesteps(solver_fixture, timesteps_8_days): @@ -112,7 +112,7 @@ def test_expand_solution_maps_values_correctly(solver_fixture, timesteps_8_days) fs_reduced.optimize(solver_fixture) # Get cluster_order to know mapping - info = fs_reduced._cluster_info + info = fs_reduced.cluster_info cluster_order = info.result.cluster_structure.cluster_order.values timesteps_per_cluster = info.result.cluster_structure.timesteps_per_cluster # 24 @@ -188,7 +188,7 @@ def test_expand_solution_statistics_match_clustered(solver_fixture, timesteps_8_ assert_allclose(reduced_flow_hours, expanded_flow_hours, rtol=1e-6) -def test_expand_solution_without_cluster_info_raises(solver_fixture, timesteps_2_days): +def test_expand_solution_withoutcluster_info_raises(solver_fixture, timesteps_2_days): """Test that expand_solution raises error if not a reduced FlowSystem.""" fs = create_simple_system(timesteps_2_days) fs.optimize(solver_fixture) @@ -272,7 +272,7 @@ def test_cluster_with_scenarios(timesteps_8_days, scenarios_2): assert len(fs_reduced.timesteps) == 48 # Should have aggregation info with cluster structure - info = fs_reduced._cluster_info + info = fs_reduced.cluster_info assert info is not None assert info.result.cluster_structure is not None assert info.result.cluster_structure.n_clusters == 2 @@ -317,7 +317,7 @@ def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_ ) fs_reduced.optimize(solver_fixture) - info = fs_reduced._cluster_info + info = fs_reduced.cluster_info cluster_structure = info.result.cluster_structure timesteps_per_cluster = cluster_structure.timesteps_per_cluster # 24 From 126cbf44420e60d35d8f519fe9f5b31e9a10536a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 09:48:24 +0100 Subject: [PATCH 084/191] Update API --- docs/notebooks/08c-clustering.ipynb | 2 +- .../08d-clustering-multiperiod.ipynb | 2 +- docs/notebooks/08e-clustering-internals.ipynb | 24 +++---- flixopt/aggregation/__init__.py | 8 +-- flixopt/aggregation/base.py | 71 +++++++++++++++++-- flixopt/flow_system.py | 8 +-- flixopt/transform_accessor.py | 12 ++-- tests/test_aggregation/test_base.py | 10 +-- tests/test_aggregation/test_integration.py | 2 +- tests/test_cluster_reduce_expand.py | 12 ++-- 10 files changed, 107 insertions(+), 44 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index ff9fca727..3e9316fb8 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -208,7 +208,7 @@ "outputs": [], "source": [ "# Show clustering info\n", - "info = fs_clustered.cluster_info\n", + "info = fs_clustered.clustering\n", "cs = info.result.cluster_structure\n", "print('Clustering Configuration:')\n", "print(f' Number of typical periods: {cs.n_clusters}')\n", diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index c66e02384..ff972164a 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -240,7 +240,7 @@ "metadata": {}, "outputs": [], "source": [ - "info = fs_clustered.cluster_info\n", + "info = fs_clustered.clustering\n", "cs = info.result.cluster_structure\n", "\n", "print('Clustering Configuration:')\n", diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index 1e287a96d..3e4d25ecb 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -42,9 +42,9 @@ "id": "2", "metadata": {}, "source": [ - "## Clustering and ClusterInfo\n", + "## Clustering and Clustering\n", "\n", - "After calling `cluster()`, metadata is stored in `fs.cluster_info`:" + "After calling `cluster()`, metadata is stored in `fs.clustering`:" ] }, { @@ -60,7 +60,7 @@ " time_series_for_high_peaks=['HeatDemand(Q_th)|fixed_relative_profile'],\n", ")\n", "\n", - "fs_clustered.cluster_info" + "fs_clustered.clustering" ] }, { @@ -68,7 +68,7 @@ "id": "4", "metadata": {}, "source": [ - "The `ClusterInfo` contains:\n", + "The `Clustering` contains:\n", "- **`result`**: A `ClusterResult` with timestep mapping and weights\n", "- **`result.cluster_structure`**: A `ClusterStructure` with cluster assignments" ] @@ -80,7 +80,7 @@ "metadata": {}, "outputs": [], "source": [ - "fs_clustered.cluster_info.result" + "fs_clustered.clustering.result" ] }, { @@ -90,7 +90,7 @@ "metadata": {}, "outputs": [], "source": [ - "fs_clustered.cluster_info.result.cluster_structure" + "fs_clustered.clustering.result.cluster_structure" ] }, { @@ -111,7 +111,7 @@ "outputs": [], "source": [ "# Which original period belongs to which cluster?\n", - "fs_clustered.cluster_info.result.cluster_structure.plot()" + "fs_clustered.clustering.result.cluster_structure.plot()" ] }, { @@ -122,7 +122,7 @@ "outputs": [], "source": [ "# What does each cluster's typical profile look like?\n", - "fs_clustered.cluster_info.plot_typical_periods('HeatDemand(Q_th)|fixed_relative_profile')" + "fs_clustered.clustering.plot_typical_periods('HeatDemand(Q_th)|fixed_relative_profile')" ] }, { @@ -133,7 +133,7 @@ "outputs": [], "source": [ "# How well does the aggregated data match the original?\n", - "fs_clustered.cluster_info.plot()" + "fs_clustered.clustering.plot()" ] }, { @@ -197,14 +197,14 @@ "\n", "| Class | Purpose |\n", "|-------|--------|\n", - "| `ClusterInfo` | Stored on `fs.cluster_info` after `cluster()` |\n", + "| `Clustering` | Stored on `fs.clustering` after `cluster()` |\n", "| `ClusterResult` | Contains timestep mapping and weights |\n", "| `ClusterStructure` | Maps original periods to clusters |\n", "\n", "**Key methods:**\n", "- `cluster_structure.plot()` - visualize cluster assignments\n", - "- `cluster_info.plot()` - compare original vs aggregated data\n", - "- `cluster_info.plot_typical_periods()` - view each cluster's profile" + "- `clustering.plot()` - compare original vs aggregated data\n", + "- `clustering.plot_typical_periods()` - view each cluster's profile" ] } ], diff --git a/flixopt/aggregation/__init__.py b/flixopt/aggregation/__init__.py index 908ed94d8..93d81541a 100644 --- a/flixopt/aggregation/__init__.py +++ b/flixopt/aggregation/__init__.py @@ -6,7 +6,7 @@ Key classes: - ClusterResult: Universal result container for clustering - ClusterStructure: Hierarchical structure info for storage inter-cluster linking -- ClusterInfo: Stored on FlowSystem after clustering +- Clustering: Stored on FlowSystem after clustering Example usage: @@ -18,7 +18,7 @@ ) # Access clustering metadata - info = fs_clustered.cluster_info + info = fs_clustered.clustering print(f'Number of clusters: {info.result.cluster_structure.n_clusters}') # Expand solution back to full resolution @@ -26,7 +26,7 @@ """ from .base import ( - ClusterInfo, + Clustering, ClusterResult, ClusterStructure, create_cluster_structure_from_mapping, @@ -51,7 +51,7 @@ def _get_inter_cluster_linking(): __all__ = [ # Core classes 'ClusterResult', - 'ClusterInfo', + 'Clustering', 'ClusterStructure', 'InterClusterLinking', # Utilities diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 3d4ea7ad6..1a430ba84 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -543,7 +543,7 @@ def plot_typical_periods(self, variable: str | None = None, show: bool | None = @dataclass -class ClusterInfo: +class Clustering: """Information about an aggregation stored on a FlowSystem. This is stored on the FlowSystem after aggregation to enable: @@ -576,7 +576,7 @@ def __repr__(self) -> str: else: structure_info = 'no structure' return ( - f'ClusterInfo(\n' + f'Clustering(\n' f' backend={self.backend_name!r}\n' f' {structure_info}\n' f' storage_linking={self.storage_inter_cluster_linking}, cyclic={self.storage_cyclic}\n' @@ -599,7 +599,7 @@ def plot(self, colormap: str | None = None, show: bool | None = None): Example: >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> fs_clustered.cluster_info.plot() + >>> fs_clustered.clustering.plot() """ return self.result.plot(colormap=colormap, show=show) @@ -618,10 +618,73 @@ def plot_typical_periods(self, variable: str | None = None, show: bool | None = Example: >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> fs_clustered.cluster_info.plot_typical_periods() + >>> fs_clustered.clustering.plot_typical_periods() """ return self.result.plot_typical_periods(variable=variable, show=show) + def plot_structure(self, show: bool | None = None): + """Plot cluster assignment visualization. + + Shows which original period belongs to which cluster. + + Args: + show: Whether to display the figure. + Defaults to CONFIG.Plotting.default_show. + + Returns: + PlotResult containing the figure and underlying data. + + Example: + >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') + >>> fs_clustered.clustering.plot_structure() + """ + if self.result.cluster_structure is None: + raise ValueError('No cluster_structure available') + return self.result.cluster_structure.plot(show=show) + + # Convenience properties delegating to nested objects + + @property + def cluster_order(self) -> xr.DataArray: + """Which cluster each original period belongs to.""" + if self.result.cluster_structure is None: + raise ValueError('No cluster_structure available') + return self.result.cluster_structure.cluster_order + + @property + def occurrences(self) -> xr.DataArray: + """How many original periods each cluster represents.""" + if self.result.cluster_structure is None: + raise ValueError('No cluster_structure available') + return self.result.cluster_structure.cluster_occurrences + + @property + def n_clusters(self) -> int: + """Number of clusters.""" + if self.result.cluster_structure is None: + raise ValueError('No cluster_structure available') + n = self.result.cluster_structure.n_clusters + return int(n) if isinstance(n, (int, np.integer)) else int(n.values) + + @property + def n_original_periods(self) -> int: + """Number of original periods (before clustering).""" + if self.result.cluster_structure is None: + raise ValueError('No cluster_structure available') + return self.result.cluster_structure.n_original_periods + + @property + def timesteps_per_period(self) -> int: + """Number of timesteps in each period/cluster.""" + if self.result.cluster_structure is None: + raise ValueError('No cluster_structure available') + return self.result.cluster_structure.timesteps_per_cluster + + @property + def timestep_mapping(self) -> xr.DataArray: + """Mapping from original timesteps to representative timestep indices.""" + return self.result.timestep_mapping + def create_cluster_structure_from_mapping( timestep_mapping: xr.DataArray, diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index a7791f79e..e2cde262b 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -38,7 +38,7 @@ import pyvis - from .aggregation import ClusterInfo + from .aggregation import Clustering from .solvers import _Solver from .structure import TimeSeriesWeights from .types import Effect_TPS, Numeric_S, Numeric_TPS, NumericOrBool @@ -233,7 +233,7 @@ def __init__( self._solution: xr.Dataset | None = None # Aggregation info - populated by transform.cluster() - self.cluster_info: ClusterInfo | None = None + self.clustering: Clustering | None = None # Statistics accessor cache - lazily initialized, invalidated on new solution self._statistics: StatisticsAccessor | None = None @@ -1294,7 +1294,7 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: self.model.do_modeling() # Add inter-cluster storage linking if this is an aggregated FlowSystem - if self.cluster_info is not None: + if self.clustering is not None: self._add_inter_cluster_linking() return self @@ -1307,7 +1307,7 @@ def _add_inter_cluster_linking(self) -> None: """ from .aggregation.storage_linking import InterClusterLinking - info = self.cluster_info + info = self.clustering if info is None: return diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index fcafff398..ec6f1319b 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -615,7 +615,7 @@ def cluster( Returns: A new FlowSystem with reduced timesteps (only typical clusters). - The FlowSystem has metadata stored in ``cluster_info`` for expansion. + The FlowSystem has metadata stored in ``clustering`` for expansion. Raises: ValueError: If timestep sizes are inconsistent. @@ -649,7 +649,7 @@ def cluster( """ import tsam.timeseriesaggregation as tsam - from .aggregation import ClusterInfo, ClusterResult, ClusterStructure + from .aggregation import Clustering, ClusterResult, ClusterStructure from .core import TimeSeriesData, drop_constant_arrays from .flow_system import FlowSystem @@ -797,7 +797,7 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: if isinstance(ics, str) and ics == 'equals_final': storage.initial_charge_state = 0 - # Build ClusterInfo for inter-cluster linking and solution expansion + # Build Clustering for inter-cluster linking and solution expansion n_original_timesteps = len(self._fs.timesteps) # Build per-slice cluster_order and timestep_mapping as multi-dimensional DataArrays @@ -876,7 +876,7 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: aggregated_data=ds_new, ) - reduced_fs.cluster_info = ClusterInfo( + reduced_fs.clustering = Clustering( result=aggregation_result, original_flow_system=self._fs, backend_name='tsam', @@ -1034,7 +1034,7 @@ def expand_solution(self) -> FlowSystem: from .flow_system import FlowSystem # Validate - if self._fs.cluster_info is None: + if self._fs.clustering is None: raise ValueError( 'expand_solution() requires a FlowSystem created with cluster(). ' 'This FlowSystem has no aggregation info.' @@ -1042,7 +1042,7 @@ def expand_solution(self) -> FlowSystem: if self._fs.solution is None: raise ValueError('FlowSystem has no solution. Run optimize() or solve() first.') - info = self._fs.cluster_info + info = self._fs.clustering cluster_structure = info.result.cluster_structure if cluster_structure is None: raise ValueError('No cluster structure available for expansion.') diff --git a/tests/test_aggregation/test_base.py b/tests/test_aggregation/test_base.py index 75d8b0f7f..3b5afda10 100644 --- a/tests/test_aggregation/test_base.py +++ b/tests/test_aggregation/test_base.py @@ -5,7 +5,7 @@ import xarray as xr from flixopt.aggregation import ( - ClusterInfo, + Clustering, ClusterResult, ClusterStructure, create_cluster_structure_from_mapping, @@ -139,18 +139,18 @@ def test_basic_creation(self): assert structure.n_original_periods == 3 -class TestClusterInfo: - """Tests for ClusterInfo dataclass.""" +class TestClustering: + """Tests for Clustering dataclass.""" def test_creation(self): - """Test ClusterInfo creation.""" + """Test Clustering creation.""" result = ClusterResult( timestep_mapping=xr.DataArray([0, 1], dims=['original_time']), n_representatives=2, representative_weights=xr.DataArray([1.0, 1.0], dims=['time']), ) - info = ClusterInfo( + info = Clustering( result=result, original_flow_system=None, # Would be FlowSystem in practice backend_name='tsam', diff --git a/tests/test_aggregation/test_integration.py b/tests/test_aggregation/test_integration.py index 62e9fc8b4..2ef0a0640 100644 --- a/tests/test_aggregation/test_integration.py +++ b/tests/test_aggregation/test_integration.py @@ -139,7 +139,7 @@ def test_import_from_flixopt(self): assert hasattr(aggregation, 'ClusterResult') assert hasattr(aggregation, 'ClusterStructure') - assert hasattr(aggregation, 'ClusterInfo') + assert hasattr(aggregation, 'Clustering') def test_plot_aggregation_available(self): """Test that plot_aggregation is available.""" diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index c8de5ddf1..9b8095422 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -55,8 +55,8 @@ def test_cluster_creates_reduced_timesteps(timesteps_8_days): # Should have 2 * 24 = 48 timesteps instead of 192 assert len(fs_reduced.timesteps) == 48 - assert hasattr(fs_reduced, 'cluster_info') - assert fs_reduced.cluster_info.result.cluster_structure.n_clusters == 2 + assert hasattr(fs_reduced, 'clustering') + assert fs_reduced.clustering.result.cluster_structure.n_clusters == 2 def test_expand_solution_restores_full_timesteps(solver_fixture, timesteps_8_days): @@ -112,7 +112,7 @@ def test_expand_solution_maps_values_correctly(solver_fixture, timesteps_8_days) fs_reduced.optimize(solver_fixture) # Get cluster_order to know mapping - info = fs_reduced.cluster_info + info = fs_reduced.clustering cluster_order = info.result.cluster_structure.cluster_order.values timesteps_per_cluster = info.result.cluster_structure.timesteps_per_cluster # 24 @@ -188,7 +188,7 @@ def test_expand_solution_statistics_match_clustered(solver_fixture, timesteps_8_ assert_allclose(reduced_flow_hours, expanded_flow_hours, rtol=1e-6) -def test_expand_solution_withoutcluster_info_raises(solver_fixture, timesteps_2_days): +def test_expand_solution_withoutclustering_raises(solver_fixture, timesteps_2_days): """Test that expand_solution raises error if not a reduced FlowSystem.""" fs = create_simple_system(timesteps_2_days) fs.optimize(solver_fixture) @@ -272,7 +272,7 @@ def test_cluster_with_scenarios(timesteps_8_days, scenarios_2): assert len(fs_reduced.timesteps) == 48 # Should have aggregation info with cluster structure - info = fs_reduced.cluster_info + info = fs_reduced.clustering assert info is not None assert info.result.cluster_structure is not None assert info.result.cluster_structure.n_clusters == 2 @@ -317,7 +317,7 @@ def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_ ) fs_reduced.optimize(solver_fixture) - info = fs_reduced.cluster_info + info = fs_reduced.clustering cluster_structure = info.result.cluster_structure timesteps_per_cluster = cluster_structure.timesteps_per_cluster # 24 From 88248dbac8b9b868e2977ef409b906cd92217590 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 10:00:34 +0100 Subject: [PATCH 085/191] Update API --- flixopt/aggregation/base.py | 380 +++++++++++++++++++++++++++++++----- 1 file changed, 327 insertions(+), 53 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 1a430ba84..1a8050ea9 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -542,6 +542,317 @@ def plot_typical_periods(self, variable: str | None = None, show: bool | None = return plot_result +class ClusteringPlotAccessor: + """Plot accessor for Clustering objects. + + Provides visualization methods for comparing original vs aggregated data + and understanding the clustering structure. + + Example: + >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') + >>> fs_clustered.clustering.plot.compare() # timeseries comparison + >>> fs_clustered.clustering.plot.compare(kind='duration_curve') # duration curve + >>> fs_clustered.clustering.plot.heatmap() # structure visualization + >>> fs_clustered.clustering.plot.typical_periods() # cluster profiles + """ + + def __init__(self, clustering: Clustering): + self._clustering = clustering + + def compare( + self, + kind: str = 'timeseries', + variable: str | None = None, + colormap: str | None = None, + show: bool | None = None, + ): + """Compare original vs aggregated data. + + Args: + kind: Type of comparison plot. + - 'timeseries': Time series comparison (default) + - 'duration_curve': Sorted duration curve comparison + variable: Variable to plot. If None, plots first available variable. + colormap: Colorscale name for the colors. + Defaults to CONFIG.Plotting.default_qualitative_colorscale. + show: Whether to display the figure. + Defaults to CONFIG.Plotting.default_show. + + Returns: + PlotResult containing the comparison figure and underlying data. + """ + if kind == 'timeseries': + return self._compare_timeseries(variable=variable, colormap=colormap, show=show) + elif kind == 'duration_curve': + return self._compare_duration_curve(variable=variable, colormap=colormap, show=show) + else: + raise ValueError(f"Unknown kind '{kind}'. Use 'timeseries' or 'duration_curve'.") + + def _compare_timeseries( + self, + variable: str | None = None, + colormap: str | None = None, + show: bool | None = None, + ): + """Compare original vs aggregated as time series.""" + import plotly.graph_objects as go + + from ..config import CONFIG + from ..plot_result import PlotResult + + result = self._clustering.result + if result.original_data is None or result.aggregated_data is None: + raise ValueError('No original/aggregated data available for comparison') + + # Filter to time-varying variables + time_vars = [ + name + for name in result.original_data.data_vars + if 'time' in result.original_data[name].dims + and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) + ] + if not time_vars: + raise ValueError('No time-varying variables found') + + if variable is None: + variable = time_vars[0] + elif variable not in time_vars: + raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + + original = result.original_data[variable] + aggregated = result.aggregated_data[variable] + + # Expand aggregated to original length + mapping = result.timestep_mapping.values + expanded = aggregated.values[mapping] + + fig = go.Figure() + fig.add_trace( + go.Scatter( + x=original.coords['time'].values, + y=original.values, + name='Original', + line=dict(dash='dash'), + ) + ) + fig.add_trace( + go.Scatter( + x=original.coords['time'].values, + y=expanded, + name='Aggregated', + ) + ) + fig.update_layout( + title=f'Original vs Aggregated: {variable}', + xaxis_title='Time', + yaxis_title='Value', + ) + + data = xr.Dataset({'original': original, 'aggregated': xr.DataArray(expanded, dims=['time'])}) + plot_result = PlotResult(data=data, figure=fig) + + if show is None: + show = CONFIG.Plotting.default_show + if show: + plot_result.show() + + return plot_result + + def _compare_duration_curve( + self, + variable: str | None = None, + colormap: str | None = None, + show: bool | None = None, + ): + """Compare original vs aggregated as duration curves.""" + import plotly.graph_objects as go + + from ..config import CONFIG + from ..plot_result import PlotResult + + result = self._clustering.result + if result.original_data is None or result.aggregated_data is None: + raise ValueError('No original/aggregated data available for comparison') + + # Filter to time-varying variables + time_vars = [ + name + for name in result.original_data.data_vars + if 'time' in result.original_data[name].dims + and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) + ] + if not time_vars: + raise ValueError('No time-varying variables found') + + if variable is None: + variable = time_vars[0] + elif variable not in time_vars: + raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + + original = result.original_data[variable].values + aggregated = result.aggregated_data[variable].values + + # Expand aggregated to original length + mapping = result.timestep_mapping.values + expanded = aggregated[mapping] + + # Sort both for duration curve + original_sorted = np.sort(original)[::-1] + expanded_sorted = np.sort(expanded)[::-1] + x = np.arange(len(original_sorted)) + + fig = go.Figure() + fig.add_trace( + go.Scatter( + x=x, + y=original_sorted, + name='Original', + line=dict(dash='dash'), + ) + ) + fig.add_trace( + go.Scatter( + x=x, + y=expanded_sorted, + name='Aggregated', + ) + ) + fig.update_layout( + title=f'Duration Curve: {variable}', + xaxis_title='Hours (sorted)', + yaxis_title='Value', + ) + + data = xr.Dataset( + { + 'original_sorted': xr.DataArray(original_sorted, dims=['rank']), + 'aggregated_sorted': xr.DataArray(expanded_sorted, dims=['rank']), + } + ) + plot_result = PlotResult(data=data, figure=fig) + + if show is None: + show = CONFIG.Plotting.default_show + if show: + plot_result.show() + + return plot_result + + def heatmap( + self, + variable: str | None = None, + show: bool | None = None, + ): + """Plot clustering structure as a heatmap of periods vs timesteps. + + Shows the original data organized by periods (rows) and timesteps within + each period (columns), with color indicating the value. Periods are + grouped by their cluster assignment. + + Args: + variable: Variable to plot. If None, plots first available variable. + show: Whether to display the figure. + Defaults to CONFIG.Plotting.default_show. + + Returns: + PlotResult containing the heatmap figure and underlying data. + """ + import plotly.graph_objects as go + + from ..config import CONFIG + from ..plot_result import PlotResult + + result = self._clustering.result + cs = result.cluster_structure + if result.original_data is None or cs is None: + raise ValueError('No original data or cluster structure available') + + # Filter to time-varying variables + time_vars = [ + name + for name in result.original_data.data_vars + if 'time' in result.original_data[name].dims + and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) + ] + if not time_vars: + raise ValueError('No time-varying variables found') + + if variable is None: + variable = time_vars[0] + elif variable not in time_vars: + raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + + original = result.original_data[variable].values + n_periods = cs.n_original_periods + timesteps_per_period = cs.timesteps_per_cluster + cluster_order = cs.cluster_order.values + + # Reshape to [periods, timesteps_per_period] + data_matrix = original[: n_periods * timesteps_per_period].reshape(n_periods, timesteps_per_period) + + # Sort periods by cluster for better visualization + sorted_indices = np.argsort(cluster_order) + data_sorted = data_matrix[sorted_indices] + clusters_sorted = cluster_order[sorted_indices] + + # Create labels showing period and cluster + y_labels = [f'P{sorted_indices[i] + 1} (C{clusters_sorted[i]})' for i in range(n_periods)] + + fig = go.Figure( + data=go.Heatmap( + z=data_sorted, + x=list(range(timesteps_per_period)), + y=y_labels, + colorscale='Viridis', + colorbar=dict(title='Value'), + ) + ) + fig.update_layout( + title=f'Clustering Structure: {variable}', + xaxis_title='Timestep within period', + yaxis_title='Period (sorted by cluster)', + ) + + data = xr.Dataset( + { + 'heatmap': xr.DataArray( + data_sorted, + dims=['period', 'timestep'], + coords={'period': sorted_indices, 'timestep': range(timesteps_per_period)}, + ), + 'cluster': xr.DataArray(clusters_sorted, dims=['period']), + } + ) + plot_result = PlotResult(data=data, figure=fig) + + if show is None: + show = CONFIG.Plotting.default_show + if show: + plot_result.show() + + return plot_result + + def typical_periods( + self, + variable: str | None = None, + show: bool | None = None, + ): + """Plot each cluster's typical period profile. + + Shows each cluster as a separate subplot with its occurrence count + in the title. Useful for understanding what each cluster represents. + + Args: + variable: Variable to plot. If None, plots the first available variable. + show: Whether to display the figure. + Defaults to CONFIG.Plotting.default_show. + + Returns: + PlotResult containing the figure and underlying data. + """ + return self._clustering.result.plot_typical_periods(variable=variable, show=show) + + @dataclass class Clustering: """Information about an aggregation stored on a FlowSystem. @@ -558,6 +869,13 @@ class Clustering: backend_name: Name of the aggregation backend used (e.g., 'tsam', 'manual'). storage_inter_cluster_linking: Whether to add inter-cluster storage constraints. storage_cyclic: Whether to enforce cyclic storage (SOC[start] = SOC[end]). + + Example: + >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') + >>> fs_clustered.clustering.n_clusters + 8 + >>> fs_clustered.clustering.plot.compare() + >>> fs_clustered.clustering.plot.heatmap() """ result: ClusterResult @@ -583,64 +901,20 @@ def __repr__(self) -> str: f')' ) - def plot(self, colormap: str | None = None, show: bool | None = None): - """Plot original vs aggregated data comparison. - - Convenience method that calls result.plot(). - - Args: - colormap: Colorscale name for the time series colors. - Defaults to CONFIG.Plotting.default_qualitative_colorscale. - show: Whether to display the figure. - Defaults to CONFIG.Plotting.default_show. - - Returns: - PlotResult containing the comparison figure and underlying data. - - Example: - >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> fs_clustered.clustering.plot() - """ - return self.result.plot(colormap=colormap, show=show) - - def plot_typical_periods(self, variable: str | None = None, show: bool | None = None): - """Plot each cluster's typical period profile. - - Convenience method that calls result.plot_typical_periods(). - - Args: - variable: Variable to plot. If None, plots the first available variable. - show: Whether to display the figure. - Defaults to CONFIG.Plotting.default_show. - - Returns: - PlotResult containing the figure and underlying data. - - Example: - >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> fs_clustered.clustering.plot_typical_periods() - """ - return self.result.plot_typical_periods(variable=variable, show=show) - - def plot_structure(self, show: bool | None = None): - """Plot cluster assignment visualization. - - Shows which original period belongs to which cluster. - - Args: - show: Whether to display the figure. - Defaults to CONFIG.Plotting.default_show. + @property + def plot(self) -> ClusteringPlotAccessor: + """Access plotting methods for clustering visualization. Returns: - PlotResult containing the figure and underlying data. + ClusteringPlotAccessor with compare(), heatmap(), and typical_periods() methods. Example: - >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') - >>> fs_clustered.clustering.plot_structure() + >>> fs.clustering.plot.compare() # timeseries comparison + >>> fs.clustering.plot.compare(kind='duration_curve') # duration curve + >>> fs.clustering.plot.heatmap() # structure visualization + >>> fs.clustering.plot.typical_periods() # cluster profiles """ - if self.result.cluster_structure is None: - raise ValueError('No cluster_structure available') - return self.result.cluster_structure.plot(show=show) + return ClusteringPlotAccessor(self) # Convenience properties delegating to nested objects From e0f28a8f2bb1fa07a0ffbc445b3967ddcaf8e339 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 10:04:33 +0100 Subject: [PATCH 086/191] Update API --- flixopt/aggregation/base.py | 222 ++++++++++++++++++++++++------------ 1 file changed, 146 insertions(+), 76 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 1a8050ea9..d6a5a2591 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -588,6 +588,18 @@ def compare( else: raise ValueError(f"Unknown kind '{kind}'. Use 'timeseries' or 'duration_curve'.") + def _get_time_varying_variables(self) -> list[str]: + """Get list of time-varying variables from original data.""" + result = self._clustering.result + if result.original_data is None: + return [] + return [ + name + for name in result.original_data.data_vars + if 'time' in result.original_data[name].dims + and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) + ] + def _compare_timeseries( self, variable: str | None = None, @@ -595,7 +607,8 @@ def _compare_timeseries( show: bool | None = None, ): """Compare original vs aggregated as time series.""" - import plotly.graph_objects as go + import pandas as pd + import plotly.express as px from ..config import CONFIG from ..plot_result import PlotResult @@ -604,13 +617,7 @@ def _compare_timeseries( if result.original_data is None or result.aggregated_data is None: raise ValueError('No original/aggregated data available for comparison') - # Filter to time-varying variables - time_vars = [ - name - for name in result.original_data.data_vars - if 'time' in result.original_data[name].dims - and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) - ] + time_vars = self._get_time_varying_variables() if not time_vars: raise ValueError('No time-varying variables found') @@ -626,27 +633,26 @@ def _compare_timeseries( mapping = result.timestep_mapping.values expanded = aggregated.values[mapping] - fig = go.Figure() - fig.add_trace( - go.Scatter( - x=original.coords['time'].values, - y=original.values, - name='Original', - line=dict(dash='dash'), - ) - ) - fig.add_trace( - go.Scatter( - x=original.coords['time'].values, - y=expanded, - name='Aggregated', - ) + # Build long-form DataFrame for px.line + time_values = original.coords['time'].values + df = pd.DataFrame( + { + 'time': np.tile(time_values, 2), + 'value': np.concatenate([original.values, expanded]), + 'series': ['Original'] * len(time_values) + ['Aggregated'] * len(time_values), + } ) - fig.update_layout( + + colormap = colormap or CONFIG.Plotting.default_qualitative_colorscale + fig = px.line( + df, + x='time', + y='value', + color='series', title=f'Original vs Aggregated: {variable}', - xaxis_title='Time', - yaxis_title='Value', + color_discrete_sequence=px.colors.qualitative.__dict__.get(colormap, px.colors.qualitative.Plotly), ) + fig.update_traces(selector=dict(name='Original'), line_dash='dash') data = xr.Dataset({'original': original, 'aggregated': xr.DataArray(expanded, dims=['time'])}) plot_result = PlotResult(data=data, figure=fig) @@ -665,7 +671,8 @@ def _compare_duration_curve( show: bool | None = None, ): """Compare original vs aggregated as duration curves.""" - import plotly.graph_objects as go + import pandas as pd + import plotly.express as px from ..config import CONFIG from ..plot_result import PlotResult @@ -674,13 +681,7 @@ def _compare_duration_curve( if result.original_data is None or result.aggregated_data is None: raise ValueError('No original/aggregated data available for comparison') - # Filter to time-varying variables - time_vars = [ - name - for name in result.original_data.data_vars - if 'time' in result.original_data[name].dims - and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) - ] + time_vars = self._get_time_varying_variables() if not time_vars: raise ValueError('No time-varying variables found') @@ -699,29 +700,28 @@ def _compare_duration_curve( # Sort both for duration curve original_sorted = np.sort(original)[::-1] expanded_sorted = np.sort(expanded)[::-1] - x = np.arange(len(original_sorted)) - - fig = go.Figure() - fig.add_trace( - go.Scatter( - x=x, - y=original_sorted, - name='Original', - line=dict(dash='dash'), - ) - ) - fig.add_trace( - go.Scatter( - x=x, - y=expanded_sorted, - name='Aggregated', - ) + n = len(original_sorted) + + # Build long-form DataFrame for px.line + df = pd.DataFrame( + { + 'rank': np.tile(np.arange(n), 2), + 'value': np.concatenate([original_sorted, expanded_sorted]), + 'series': ['Original'] * n + ['Aggregated'] * n, + } ) - fig.update_layout( + + colormap = colormap or CONFIG.Plotting.default_qualitative_colorscale + fig = px.line( + df, + x='rank', + y='value', + color='series', title=f'Duration Curve: {variable}', - xaxis_title='Hours (sorted)', - yaxis_title='Value', + labels={'rank': 'Hours (sorted)', 'value': 'Value'}, + color_discrete_sequence=px.colors.qualitative.__dict__.get(colormap, px.colors.qualitative.Plotly), ) + fig.update_traces(selector=dict(name='Original'), line_dash='dash') data = xr.Dataset( { @@ -741,6 +741,7 @@ def _compare_duration_curve( def heatmap( self, variable: str | None = None, + colorscale: str | None = None, show: bool | None = None, ): """Plot clustering structure as a heatmap of periods vs timesteps. @@ -751,13 +752,15 @@ def heatmap( Args: variable: Variable to plot. If None, plots first available variable. + colorscale: Colorscale for heatmap. + Defaults to CONFIG.Plotting.default_sequential_colorscale. show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. Returns: PlotResult containing the heatmap figure and underlying data. """ - import plotly.graph_objects as go + import plotly.express as px from ..config import CONFIG from ..plot_result import PlotResult @@ -767,13 +770,7 @@ def heatmap( if result.original_data is None or cs is None: raise ValueError('No original data or cluster structure available') - # Filter to time-varying variables - time_vars = [ - name - for name in result.original_data.data_vars - if 'time' in result.original_data[name].dims - and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) - ] + time_vars = self._get_time_varying_variables() if not time_vars: raise ValueError('No time-varying variables found') @@ -798,19 +795,19 @@ def heatmap( # Create labels showing period and cluster y_labels = [f'P{sorted_indices[i] + 1} (C{clusters_sorted[i]})' for i in range(n_periods)] - fig = go.Figure( - data=go.Heatmap( - z=data_sorted, - x=list(range(timesteps_per_period)), - y=y_labels, - colorscale='Viridis', - colorbar=dict(title='Value'), - ) + # Build DataArray for px.imshow + heatmap_da = xr.DataArray( + data_sorted, + dims=['period', 'timestep'], + coords={'period': y_labels, 'timestep': range(timesteps_per_period)}, ) - fig.update_layout( + + colorscale = colorscale or CONFIG.Plotting.default_sequential_colorscale + fig = px.imshow( + heatmap_da, + color_continuous_scale=colorscale, title=f'Clustering Structure: {variable}', - xaxis_title='Timestep within period', - yaxis_title='Period (sorted by cluster)', + labels={'timestep': 'Timestep within period', 'period': 'Period (sorted by cluster)'}, ) data = xr.Dataset( @@ -835,22 +832,95 @@ def heatmap( def typical_periods( self, variable: str | None = None, + colormap: str | None = None, + facet_col_wrap: int | None = None, show: bool | None = None, ): """Plot each cluster's typical period profile. - Shows each cluster as a separate subplot with its occurrence count - in the title. Useful for understanding what each cluster represents. + Shows each cluster as a separate faceted subplot. Useful for + understanding what each cluster represents. Args: variable: Variable to plot. If None, plots the first available variable. + colormap: Colorscale for cluster colors. + Defaults to CONFIG.Plotting.default_qualitative_colorscale. + facet_col_wrap: Max columns before wrapping facets. + Defaults to CONFIG.Plotting.default_facet_cols. show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. Returns: PlotResult containing the figure and underlying data. """ - return self._clustering.result.plot_typical_periods(variable=variable, show=show) + import pandas as pd + import plotly.express as px + + from ..config import CONFIG + from ..plot_result import PlotResult + + result = self._clustering.result + cs = result.cluster_structure + if result.aggregated_data is None or cs is None: + raise ValueError('No aggregated data or cluster structure available') + + time_vars = self._get_time_varying_variables() + if not time_vars: + raise ValueError('No time-varying variables found') + + if variable is None: + variable = time_vars[0] + elif variable not in time_vars: + raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + + n_clusters = int(cs.n_clusters) if isinstance(cs.n_clusters, (int, np.integer)) else int(cs.n_clusters.values) + timesteps_per_cluster = cs.timesteps_per_cluster + data = result.aggregated_data[variable].values + + # Reshape to [n_clusters, timesteps_per_cluster] + data_by_cluster = data.reshape(n_clusters, timesteps_per_cluster) + + # Build long-form DataFrame with cluster labels including occurrence counts + rows = [] + for c in range(n_clusters): + occurrence = int(cs.cluster_occurrences.sel(cluster=c).values) + label = f'Cluster {c} (×{occurrence})' + for t in range(timesteps_per_cluster): + rows.append({'cluster': label, 'timestep': t, 'value': data_by_cluster[c, t]}) + df = pd.DataFrame(rows) + + colormap = colormap or CONFIG.Plotting.default_qualitative_colorscale + facet_col_wrap = facet_col_wrap or CONFIG.Plotting.default_facet_cols + + fig = px.line( + df, + x='timestep', + y='value', + facet_col='cluster', + facet_col_wrap=facet_col_wrap, + title=f'Typical Periods: {variable}', + color_discrete_sequence=px.colors.qualitative.__dict__.get(colormap, px.colors.qualitative.Plotly), + ) + fig.update_layout(showlegend=False) + + result_data = xr.Dataset( + { + 'typical_periods': xr.DataArray( + data_by_cluster, + dims=['cluster', 'timestep'], + coords={'cluster': range(n_clusters), 'timestep': range(timesteps_per_cluster)}, + ), + 'occurrences': cs.cluster_occurrences, + } + ) + plot_result = PlotResult(data=result_data, figure=fig) + + if show is None: + show = CONFIG.Plotting.default_show + if show: + plot_result.show() + + return plot_result @dataclass From 56b1ae24e988a4286301d2e6629397bbb1c48f4e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 10:28:09 +0100 Subject: [PATCH 087/191] Update API --- flixopt/aggregation/base.py | 412 +++++++++++++++++++++++----------- flixopt/transform_accessor.py | 69 +----- 2 files changed, 285 insertions(+), 196 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index d6a5a2591..4e12c0072 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -330,6 +330,91 @@ def get_timestep_mapping_for_slice(self, period: str | None = None, scenario: st mapping = mapping.sel(scenario=scenario) return mapping.values.astype(int) + def expand_data(self, aggregated: xr.DataArray, original_time: xr.DataArray | None = None) -> xr.DataArray: + """Expand aggregated data back to original timesteps. + + Uses the stored timestep_mapping to map each original timestep to its + representative value from the aggregated data. Handles multi-dimensional + data with period/scenario dimensions. + + Args: + aggregated: DataArray with aggregated (reduced) time dimension. + original_time: Original time coordinates. If None, uses coords from + original_data if available. + + Returns: + DataArray expanded to original timesteps. + + Example: + >>> result = fs_clustered.clustering.result + >>> aggregated_values = result.aggregated_data['Demand|profile'] + >>> expanded = result.expand_data(aggregated_values) + >>> len(expanded.time) == len(original_timesteps) # True + """ + import pandas as pd + + if original_time is None: + if self.original_data is None: + raise ValueError('original_time required when original_data is not available') + original_time = self.original_data.coords['time'] + + timestep_mapping = self.timestep_mapping + has_periods = 'period' in timestep_mapping.dims + has_scenarios = 'scenario' in timestep_mapping.dims + + # Simple case: no period/scenario dimensions + if not has_periods and not has_scenarios: + mapping = timestep_mapping.values + expanded_values = aggregated.values[mapping] + return xr.DataArray( + expanded_values, + coords={'time': original_time}, + dims=['time'], + attrs=aggregated.attrs, + ) + + # Multi-dimensional: expand each (period, scenario) slice and recombine + periods = list(timestep_mapping.coords['period'].values) if has_periods else [None] + scenarios = list(timestep_mapping.coords['scenario'].values) if has_scenarios else [None] + + expanded_slices: dict[tuple, xr.DataArray] = {} + for p in periods: + for s in scenarios: + # Get mapping for this slice + mapping_slice = timestep_mapping + if p is not None: + mapping_slice = mapping_slice.sel(period=p) + if s is not None: + mapping_slice = mapping_slice.sel(scenario=s) + mapping = mapping_slice.values + + # Select the data slice + selector = {} + if p is not None and 'period' in aggregated.dims: + selector['period'] = p + if s is not None and 'scenario' in aggregated.dims: + selector['scenario'] = s + + slice_da = aggregated.sel(**selector, drop=True) if selector else aggregated + expanded = slice_da.isel(time=xr.DataArray(mapping, dims=['time'])) + expanded_slices[(p, s)] = expanded.assign_coords(time=original_time) + + # Recombine slices using xr.concat + if has_periods and has_scenarios: + period_arrays = [] + for p in periods: + scenario_arrays = [expanded_slices[(p, s)] for s in scenarios] + period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) + result = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) + elif has_periods: + result = xr.concat([expanded_slices[(p, None)] for p in periods], dim=pd.Index(periods, name='period')) + else: + result = xr.concat( + [expanded_slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario') + ) + + return result.transpose('time', ...).assign_attrs(aggregated.attrs) + def validate(self) -> None: """Validate that all fields are consistent. @@ -562,7 +647,9 @@ def __init__(self, clustering: Clustering): def compare( self, kind: str = 'timeseries', - variable: str | None = None, + variable: str | list[str] | None = None, + facet_col: str | None = 'scenario', + facet_row: str | None = 'period', colormap: str | None = None, show: bool | None = None, ): @@ -572,7 +659,10 @@ def compare( kind: Type of comparison plot. - 'timeseries': Time series comparison (default) - 'duration_curve': Sorted duration curve comparison - variable: Variable to plot. If None, plots first available variable. + variable: Variable(s) to plot. Can be a string, list of strings, + or None to plot all time-varying variables. + facet_col: Dimension for subplot columns (default: 'scenario' if present). + facet_row: Dimension for subplot rows (default: 'period' if present). colormap: Colorscale name for the colors. Defaults to CONFIG.Plotting.default_qualitative_colorscale. show: Whether to display the figure. @@ -582,9 +672,13 @@ def compare( PlotResult containing the comparison figure and underlying data. """ if kind == 'timeseries': - return self._compare_timeseries(variable=variable, colormap=colormap, show=show) + return self._compare_timeseries( + variable=variable, facet_col=facet_col, facet_row=facet_row, colormap=colormap, show=show + ) elif kind == 'duration_curve': - return self._compare_duration_curve(variable=variable, colormap=colormap, show=show) + return self._compare_duration_curve( + variable=variable, facet_col=facet_col, facet_row=facet_row, colormap=colormap, show=show + ) else: raise ValueError(f"Unknown kind '{kind}'. Use 'timeseries' or 'duration_curve'.") @@ -600,14 +694,41 @@ def _get_time_varying_variables(self) -> list[str]: and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) ] + def _resolve_variables(self, variable: str | list[str] | None) -> list[str]: + """Resolve variable parameter to a list of valid variable names.""" + time_vars = self._get_time_varying_variables() + if not time_vars: + raise ValueError('No time-varying variables found') + + if variable is None: + return time_vars + elif isinstance(variable, str): + if variable not in time_vars: + raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + return [variable] + else: + invalid = [v for v in variable if v not in time_vars] + if invalid: + raise ValueError(f'Variables {invalid} not found. Available: {time_vars}') + return list(variable) + + def _resolve_facets( + self, ds: xr.Dataset, facet_col: str | None, facet_row: str | None + ) -> tuple[str | None, str | None]: + """Resolve facet dimensions, returning None if not present in data.""" + actual_col = facet_col if facet_col and facet_col in ds.dims else None + actual_row = facet_row if facet_row and facet_row in ds.dims else None + return actual_col, actual_row + def _compare_timeseries( self, - variable: str | None = None, + variable: str | list[str] | None = None, + facet_col: str | None = None, + facet_row: str | None = None, colormap: str | None = None, show: bool | None = None, ): """Compare original vs aggregated as time series.""" - import pandas as pd import plotly.express as px from ..config import CONFIG @@ -617,45 +738,48 @@ def _compare_timeseries( if result.original_data is None or result.aggregated_data is None: raise ValueError('No original/aggregated data available for comparison') - time_vars = self._get_time_varying_variables() - if not time_vars: - raise ValueError('No time-varying variables found') + variables = self._resolve_variables(variable) - if variable is None: - variable = time_vars[0] - elif variable not in time_vars: - raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + # Build Dataset with Original/Aggregated for each variable + data_vars = {} + for var in variables: + original = result.original_data[var] + aggregated = result.aggregated_data[var] + expanded = result.expand_data(aggregated) + data_vars[f'{var} (Original)'] = original + data_vars[f'{var} (Aggregated)'] = expanded + ds = xr.Dataset(data_vars) - original = result.original_data[variable] - aggregated = result.aggregated_data[variable] + # Resolve facets + actual_facet_col, actual_facet_row = self._resolve_facets(ds, facet_col, facet_row) - # Expand aggregated to original length - mapping = result.timestep_mapping.values - expanded = aggregated.values[mapping] - - # Build long-form DataFrame for px.line - time_values = original.coords['time'].values - df = pd.DataFrame( - { - 'time': np.tile(time_values, 2), - 'value': np.concatenate([original.values, expanded]), - 'series': ['Original'] * len(time_values) + ['Aggregated'] * len(time_values), - } - ) + # Convert to long-form DataFrame (like _dataset_to_long_df) + df = ds.to_dataframe().reset_index() + coord_cols = [c for c in ds.coords.keys() if c in df.columns] + df = df.melt(id_vars=coord_cols, var_name='series', value_name='value') colormap = colormap or CONFIG.Plotting.default_qualitative_colorscale + title = 'Original vs Aggregated' if len(variables) > 1 else f'Original vs Aggregated: {variables[0]}' + fig = px.line( df, x='time', y='value', color='series', - title=f'Original vs Aggregated: {variable}', + facet_col=actual_facet_col, + facet_row=actual_facet_row, + title=title, color_discrete_sequence=px.colors.qualitative.__dict__.get(colormap, px.colors.qualitative.Plotly), ) - fig.update_traces(selector=dict(name='Original'), line_dash='dash') + # Dash lines for Original series + for trace in fig.data: + if 'Original' in trace.name: + trace.line.dash = 'dash' + if actual_facet_row or actual_facet_col: + fig.update_yaxes(matches=None) + fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) - data = xr.Dataset({'original': original, 'aggregated': xr.DataArray(expanded, dims=['time'])}) - plot_result = PlotResult(data=data, figure=fig) + plot_result = PlotResult(data=ds, figure=fig) if show is None: show = CONFIG.Plotting.default_show @@ -666,12 +790,13 @@ def _compare_timeseries( def _compare_duration_curve( self, - variable: str | None = None, + variable: str | list[str] | None = None, + facet_col: str | None = None, + facet_row: str | None = None, colormap: str | None = None, show: bool | None = None, ): """Compare original vs aggregated as duration curves.""" - import pandas as pd import plotly.express as px from ..config import CONFIG @@ -681,55 +806,44 @@ def _compare_duration_curve( if result.original_data is None or result.aggregated_data is None: raise ValueError('No original/aggregated data available for comparison') - time_vars = self._get_time_varying_variables() - if not time_vars: - raise ValueError('No time-varying variables found') - - if variable is None: - variable = time_vars[0] - elif variable not in time_vars: - raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") - - original = result.original_data[variable].values - aggregated = result.aggregated_data[variable].values - - # Expand aggregated to original length - mapping = result.timestep_mapping.values - expanded = aggregated[mapping] - - # Sort both for duration curve - original_sorted = np.sort(original)[::-1] - expanded_sorted = np.sort(expanded)[::-1] - n = len(original_sorted) - - # Build long-form DataFrame for px.line - df = pd.DataFrame( - { - 'rank': np.tile(np.arange(n), 2), - 'value': np.concatenate([original_sorted, expanded_sorted]), - 'series': ['Original'] * n + ['Aggregated'] * n, - } - ) + variables = self._resolve_variables(variable) + + # Build Dataset with sorted values for each variable + data_vars = {} + for var in variables: + original = result.original_data[var] + aggregated = result.aggregated_data[var] + expanded = result.expand_data(aggregated) + # Sort values for duration curve + original_sorted = np.sort(original.values.flatten())[::-1] + expanded_sorted = np.sort(expanded.values.flatten())[::-1] + n = len(original_sorted) + data_vars[f'{var} (Original)'] = xr.DataArray(original_sorted, dims=['rank'], coords={'rank': range(n)}) + data_vars[f'{var} (Aggregated)'] = xr.DataArray(expanded_sorted, dims=['rank'], coords={'rank': range(n)}) + ds = xr.Dataset(data_vars) + + # Convert to long-form DataFrame + df = ds.to_dataframe().reset_index() + coord_cols = [c for c in ds.coords.keys() if c in df.columns] + df = df.melt(id_vars=coord_cols, var_name='series', value_name='value') colormap = colormap or CONFIG.Plotting.default_qualitative_colorscale + title = 'Duration Curve' if len(variables) > 1 else f'Duration Curve: {variables[0]}' + fig = px.line( df, x='rank', y='value', color='series', - title=f'Duration Curve: {variable}', + title=title, labels={'rank': 'Hours (sorted)', 'value': 'Value'}, color_discrete_sequence=px.colors.qualitative.__dict__.get(colormap, px.colors.qualitative.Plotly), ) - fig.update_traces(selector=dict(name='Original'), line_dash='dash') + for trace in fig.data: + if 'Original' in trace.name: + trace.line.dash = 'dash' - data = xr.Dataset( - { - 'original_sorted': xr.DataArray(original_sorted, dims=['rank']), - 'aggregated_sorted': xr.DataArray(expanded_sorted, dims=['rank']), - } - ) - plot_result = PlotResult(data=data, figure=fig) + plot_result = PlotResult(data=ds, figure=fig) if show is None: show = CONFIG.Plotting.default_show @@ -740,8 +854,9 @@ def _compare_duration_curve( def heatmap( self, - variable: str | None = None, + variable: str | list[str] | None = None, colorscale: str | None = None, + facet_col_wrap: int | None = None, show: bool | None = None, ): """Plot clustering structure as a heatmap of periods vs timesteps. @@ -751,9 +866,12 @@ def heatmap( grouped by their cluster assignment. Args: - variable: Variable to plot. If None, plots first available variable. + variable: Variable(s) to plot. Can be a string, list of strings, + or None to plot all time-varying variables. colorscale: Colorscale for heatmap. Defaults to CONFIG.Plotting.default_sequential_colorscale. + facet_col_wrap: Max columns before wrapping facets. + Defaults to CONFIG.Plotting.default_facet_cols. show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. @@ -774,52 +892,77 @@ def heatmap( if not time_vars: raise ValueError('No time-varying variables found') + # Normalize variable to list if variable is None: - variable = time_vars[0] - elif variable not in time_vars: - raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + variables = time_vars + elif isinstance(variable, str): + if variable not in time_vars: + raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + variables = [variable] + else: + invalid = [v for v in variable if v not in time_vars] + if invalid: + raise ValueError(f'Variables {invalid} not found. Available: {time_vars}') + variables = list(variable) - original = result.original_data[variable].values n_periods = cs.n_original_periods timesteps_per_period = cs.timesteps_per_cluster cluster_order = cs.cluster_order.values - - # Reshape to [periods, timesteps_per_period] - data_matrix = original[: n_periods * timesteps_per_period].reshape(n_periods, timesteps_per_period) - - # Sort periods by cluster for better visualization sorted_indices = np.argsort(cluster_order) - data_sorted = data_matrix[sorted_indices] clusters_sorted = cluster_order[sorted_indices] - - # Create labels showing period and cluster y_labels = [f'P{sorted_indices[i] + 1} (C{clusters_sorted[i]})' for i in range(n_periods)] - # Build DataArray for px.imshow - heatmap_da = xr.DataArray( - data_sorted, - dims=['period', 'timestep'], - coords={'period': y_labels, 'timestep': range(timesteps_per_period)}, - ) + # Build DataArray with variable dimension if multiple + data_vars = {} + if len(variables) == 1: + original = result.original_data[variables[0]].values + data_matrix = original[: n_periods * timesteps_per_period].reshape(n_periods, timesteps_per_period) + data_sorted = data_matrix[sorted_indices] + heatmap_da = xr.DataArray( + data_sorted, + dims=['period', 'timestep'], + coords={'period': y_labels, 'timestep': range(timesteps_per_period)}, + ) + data_vars['heatmap'] = xr.DataArray( + data_sorted, + dims=['period', 'timestep'], + coords={'period': sorted_indices, 'timestep': range(timesteps_per_period)}, + ) + title = f'Clustering Structure: {variables[0]}' + else: + arrays = [] + for var in variables: + original = result.original_data[var].values + data_matrix = original[: n_periods * timesteps_per_period].reshape(n_periods, timesteps_per_period) + data_sorted = data_matrix[sorted_indices] + arrays.append(data_sorted) + data_vars[var] = xr.DataArray( + data_sorted, + dims=['period', 'timestep'], + coords={'period': sorted_indices, 'timestep': range(timesteps_per_period)}, + ) + heatmap_da = xr.DataArray( + np.stack(arrays, axis=0), + dims=['variable', 'period', 'timestep'], + coords={'variable': variables, 'period': y_labels, 'timestep': range(timesteps_per_period)}, + ) + title = 'Clustering Structure' colorscale = colorscale or CONFIG.Plotting.default_sequential_colorscale + facet_col_wrap = facet_col_wrap or CONFIG.Plotting.default_facet_cols fig = px.imshow( heatmap_da, color_continuous_scale=colorscale, - title=f'Clustering Structure: {variable}', + facet_col='variable' if len(variables) > 1 else None, + facet_col_wrap=facet_col_wrap if len(variables) > 1 else None, + title=title, labels={'timestep': 'Timestep within period', 'period': 'Period (sorted by cluster)'}, ) + if len(variables) > 1: + fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) - data = xr.Dataset( - { - 'heatmap': xr.DataArray( - data_sorted, - dims=['period', 'timestep'], - coords={'period': sorted_indices, 'timestep': range(timesteps_per_period)}, - ), - 'cluster': xr.DataArray(clusters_sorted, dims=['period']), - } - ) + data_vars['cluster'] = xr.DataArray(clusters_sorted, dims=['period']) + data = xr.Dataset(data_vars) plot_result = PlotResult(data=data, figure=fig) if show is None: @@ -831,7 +974,7 @@ def heatmap( def typical_periods( self, - variable: str | None = None, + variable: str | list[str] | None = None, colormap: str | None = None, facet_col_wrap: int | None = None, show: bool | None = None, @@ -842,7 +985,8 @@ def typical_periods( understanding what each cluster represents. Args: - variable: Variable to plot. If None, plots the first available variable. + variable: Variable(s) to plot. Can be a string, list of strings, + or None to plot all time-varying variables. colormap: Colorscale for cluster colors. Defaults to CONFIG.Plotting.default_qualitative_colorscale. facet_col_wrap: Max columns before wrapping facets. @@ -868,51 +1012,61 @@ def typical_periods( if not time_vars: raise ValueError('No time-varying variables found') + # Normalize variable to list if variable is None: - variable = time_vars[0] - elif variable not in time_vars: - raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + variables = time_vars + elif isinstance(variable, str): + if variable not in time_vars: + raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") + variables = [variable] + else: + invalid = [v for v in variable if v not in time_vars] + if invalid: + raise ValueError(f'Variables {invalid} not found. Available: {time_vars}') + variables = list(variable) n_clusters = int(cs.n_clusters) if isinstance(cs.n_clusters, (int, np.integer)) else int(cs.n_clusters.values) timesteps_per_cluster = cs.timesteps_per_cluster - data = result.aggregated_data[variable].values - - # Reshape to [n_clusters, timesteps_per_cluster] - data_by_cluster = data.reshape(n_clusters, timesteps_per_cluster) # Build long-form DataFrame with cluster labels including occurrence counts rows = [] - for c in range(n_clusters): - occurrence = int(cs.cluster_occurrences.sel(cluster=c).values) - label = f'Cluster {c} (×{occurrence})' - for t in range(timesteps_per_cluster): - rows.append({'cluster': label, 'timestep': t, 'value': data_by_cluster[c, t]}) + data_vars = {} + for var in variables: + data = result.aggregated_data[var].values + data_by_cluster = data.reshape(n_clusters, timesteps_per_cluster) + data_vars[var] = xr.DataArray( + data_by_cluster, + dims=['cluster', 'timestep'], + coords={'cluster': range(n_clusters), 'timestep': range(timesteps_per_cluster)}, + ) + for c in range(n_clusters): + occurrence = int(cs.cluster_occurrences.sel(cluster=c).values) + label = f'Cluster {c} (×{occurrence})' + for t in range(timesteps_per_cluster): + rows.append({'cluster': label, 'timestep': t, 'value': data_by_cluster[c, t], 'variable': var}) df = pd.DataFrame(rows) colormap = colormap or CONFIG.Plotting.default_qualitative_colorscale facet_col_wrap = facet_col_wrap or CONFIG.Plotting.default_facet_cols + title = 'Typical Periods' if len(variables) > 1 else f'Typical Periods: {variables[0]}' fig = px.line( df, x='timestep', y='value', facet_col='cluster', - facet_col_wrap=facet_col_wrap, - title=f'Typical Periods: {variable}', + facet_row='variable' if len(variables) > 1 else None, + facet_col_wrap=facet_col_wrap if len(variables) == 1 else None, + title=title, color_discrete_sequence=px.colors.qualitative.__dict__.get(colormap, px.colors.qualitative.Plotly), ) fig.update_layout(showlegend=False) + if len(variables) > 1: + fig.update_yaxes(matches=None) + fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) - result_data = xr.Dataset( - { - 'typical_periods': xr.DataArray( - data_by_cluster, - dims=['cluster', 'timestep'], - coords={'cluster': range(n_clusters), 'timestep': range(timesteps_per_cluster)}, - ), - 'occurrences': cs.cluster_occurrences, - } - ) + data_vars['occurrences'] = cs.cluster_occurrences + result_data = xr.Dataset(data_vars) plot_result = PlotResult(data=result_data, figure=fig) if show is None: diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index ec6f1319b..00eabe093 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1064,14 +1064,11 @@ def expand_solution(self) -> FlowSystem: n_original_timesteps = len(original_timesteps) n_reduced_timesteps = n_clusters * timesteps_per_cluster - # Use stored timestep_mapping directly (already multi-dimensional) - timestep_mapping = info.result.timestep_mapping - - # Expand function for DataArrays + # Expand function using ClusterResult.expand_data() - handles multi-dimensional cases def expand_da(da: xr.DataArray) -> xr.DataArray: if 'time' not in da.dims: return da.copy() - return self._expand_dataarray(da, timestep_mapping, original_timesteps, periods, scenarios) + return info.result.expand_data(da, original_time=original_timesteps) # 1. Expand FlowSystem data (with cluster_weight set to 1.0 for all timesteps) reduced_ds = self._fs.to_dataset(include_solution=False) @@ -1112,65 +1109,3 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: ) return expanded_fs - - @staticmethod - def _expand_dataarray( - da: xr.DataArray, - timestep_mapping: xr.DataArray, - original_timesteps: pd.DatetimeIndex, - periods: list, - scenarios: list, - ) -> xr.DataArray: - """Expand a DataArray from reduced to original timesteps using cluster mappings. - - Args: - da: DataArray with reduced time dimension. - timestep_mapping: DataArray mapping original timesteps to reduced indices. - dims: [original_time] or [original_time, period?, scenario?] - original_timesteps: Original time coordinates. - periods: List of period labels ([None] if no periods). - scenarios: List of scenario labels ([None] if no scenarios). - - Returns: - DataArray with expanded time dimension. - """ - has_periods = periods != [None] - has_scenarios = scenarios != [None] - - # Simple case: no period/scenario dimensions in the data - if (not has_periods and not has_scenarios) or ('period' not in da.dims and 'scenario' not in da.dims): - mapping = timestep_mapping.values - expanded = da.isel(time=xr.DataArray(mapping, dims=['time'])) - return expanded.assign_coords(time=original_timesteps).assign_attrs(da.attrs) - - # Multi-dimensional: expand each (period, scenario) slice and recombine - expanded_slices: dict[tuple, xr.DataArray] = {} - for p in periods: - for s in scenarios: - # Get mapping for this (period, scenario) slice - mapping_slice = timestep_mapping - if p is not None and 'period' in timestep_mapping.dims: - mapping_slice = mapping_slice.sel(period=p) - if s is not None and 'scenario' in timestep_mapping.dims: - mapping_slice = mapping_slice.sel(scenario=s) - mapping = mapping_slice.values - - # Select the data slice for this (period, scenario) combination - selector = {} - if p is not None and 'period' in da.dims: - selector['period'] = p - if s is not None and 'scenario' in da.dims: - selector['scenario'] = s - - slice_da = da.sel(**selector, drop=True) if selector else da - expanded = slice_da.isel(time=xr.DataArray(mapping, dims=['time'])) - expanded_slices[(p, s)] = expanded.assign_coords(time=original_timesteps) - - # Recombine slices using _combine_slices_to_dataarray - return TransformAccessor._combine_slices_to_dataarray( - slices=expanded_slices, - original_da=da, - new_time_index=original_timesteps, - periods=periods, - scenarios=scenarios, - ) From 5319077e939ea858c4a1202a010d44075ece0e6a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 10:49:40 +0100 Subject: [PATCH 088/191] Update notebooks --- .../08d-clustering-multiperiod.ipynb | 158 ++++++++---------- docs/notebooks/08e-clustering-internals.ipynb | 140 +++++++++++++--- 2 files changed, 195 insertions(+), 103 deletions(-) diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index ff972164a..7d6b5dfaa 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -33,8 +33,6 @@ "import numpy as np\n", "import pandas as pd\n", "import plotly.express as px\n", - "import plotly.graph_objects as go\n", - "from plotly.subplots import make_subplots\n", "\n", "import flixopt as fx\n", "\n", @@ -227,6 +225,52 @@ "cell_type": "markdown", "id": "12", "metadata": {}, + "source": [ + "## Visualize Clustering Quality\n", + "\n", + "The `.plot` accessor provides built-in visualizations with automatic faceting by period and scenario:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# Compare original vs aggregated data - automatically faceted by period and scenario\n", + "fs_clustered.clustering.plot.compare(variable='Building(Heat)|fixed_relative_profile')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# Duration curves show how well the distribution is preserved per period/scenario\n", + "fs_clustered.clustering.plot.compare(\n", + " variable='Building(Heat)|fixed_relative_profile',\n", + " kind='duration_curve',\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# Heatmap shows cluster assignments - faceted by period and scenario\n", + "fs_clustered.clustering.plot.heatmap()" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, "source": [ "## Understand the Cluster Structure\n", "\n", @@ -236,7 +280,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -264,7 +308,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "18", "metadata": {}, "source": [ "## Two-Stage Workflow for Multi-Period\n", @@ -280,7 +324,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -301,7 +345,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -325,7 +369,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "21", "metadata": {}, "source": [ "## Compare Results Across Methods" @@ -334,7 +378,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -379,57 +423,28 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "23", "metadata": {}, "source": [ - "## Visualize Results by Period and Scenario" + "## Visualize Optimization Results\n", + "\n", + "Use the built-in statistics plotting to compare results across periods and scenarios:" ] }, { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "24", "metadata": {}, "outputs": [], "source": [ - "# Plot heat balance for one period/scenario combination\n", - "period = 2024\n", - "scenario = 'high_demand'\n", - "\n", - "fig = make_subplots(\n", - " rows=2,\n", - " cols=1,\n", - " shared_xaxes=True,\n", - " subplot_titles=['Full Optimization', 'Clustered Optimization'],\n", - " vertical_spacing=0.12,\n", - ")\n", - "\n", - "for i, (fs, title) in enumerate([(fs_full, 'Full'), (fs_clustered, 'Clustered')], 1):\n", - " ts = fs.timesteps\n", - " data = fs.solution['Boiler(Heat)|flow_rate'].sel(period=period, scenario=scenario).values\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=ts,\n", - " y=data,\n", - " name=f'Boiler ({title})',\n", - " line=dict(width=1),\n", - " ),\n", - " row=i,\n", - " col=1,\n", - " )\n", - "\n", - "fig.update_layout(\n", - " height=500,\n", - " title=f'Boiler Output: Period {period}, Scenario {scenario}',\n", - ")\n", - "fig.update_yaxes(title_text='kW', row=1, col=1)\n", - "fig.update_yaxes(title_text='kW', row=2, col=1)\n", - "fig.show()" + "# Plot flow rates with automatic faceting by period and scenario\n", + "fs_full.statistics.plot.flows('Boiler(Heat)')" ] }, { "cell_type": "markdown", - "id": "21", + "id": "25", "metadata": {}, "source": [ "## Expand Clustered Solution to Full Resolution\n", @@ -440,7 +455,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -454,48 +469,17 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "27", "metadata": {}, "outputs": [], "source": [ - "# Compare expanded vs full resolution\n", - "period = 2025\n", - "scenario = 'low_demand'\n", - "\n", - "fig = make_subplots(\n", - " rows=2,\n", - " cols=1,\n", - " shared_xaxes=True,\n", - " subplot_titles=['Full Optimization', 'Expanded from Clustering'],\n", - " vertical_spacing=0.12,\n", - ")\n", - "\n", - "for i, (fs, title) in enumerate([(fs_full, 'Full'), (fs_expanded, 'Expanded')], 1):\n", - " data = fs.solution['Boiler(Heat)|flow_rate'].sel(period=period, scenario=scenario).values\n", - " fig.add_trace(\n", - " go.Scatter(\n", - " x=fs.timesteps,\n", - " y=data,\n", - " name=title,\n", - " line=dict(width=1),\n", - " showlegend=True,\n", - " ),\n", - " row=i,\n", - " col=1,\n", - " )\n", - "\n", - "fig.update_layout(\n", - " height=450,\n", - " title=f'Boiler Output: Period {period}, Scenario {scenario}',\n", - ")\n", - "fig.update_yaxes(title_text='kW', row=1, col=1)\n", - "fig.update_yaxes(title_text='kW', row=2, col=1)\n", - "fig.show()" + "# Compare expanded solution - shows the repeated cluster patterns\n", + "fs_expanded.statistics.plot.flows('Boiler(Heat)')" ] }, { "cell_type": "markdown", - "id": "24", + "id": "28", "metadata": {}, "source": [ "## Key Considerations for Multi-Period Clustering\n", @@ -529,7 +513,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "29", "metadata": {}, "source": [ "## Summary\n", @@ -539,6 +523,7 @@ "- Load **multi-period systems** with periods and scenarios\n", "- Use **`transform.isel()`** to select time subsets\n", "- Apply **`cluster()`** to multi-dimensional FlowSystems\n", + "- **Visualize clustering** with the `.plot` accessor (compare, duration curves, heatmaps)\n", "- Use the **two-stage workflow** for robust investment optimization\n", "- **Expand solutions** back to full resolution with `expand_solution()`\n", "\n", @@ -548,6 +533,7 @@ "2. **Investments are shared**: Component sizes are optimized once across all periods/scenarios\n", "3. **Use larger safety margins**: Multi-period uncertainty warrants 10-15% buffers\n", "4. **Two-stage is recommended**: Fast sizing with clustering, accurate dispatch at full resolution\n", + "5. **Built-in plotting**: Use `.plot` accessor for automatic faceting by period/scenario\n", "\n", "### API Reference\n", "\n", @@ -565,14 +551,18 @@ " time_series_for_high_peaks=['Demand(Flow)|fixed_relative_profile'],\n", ")\n", "\n", + "# Visualize clustering quality\n", + "fs_clustered.clustering.plot.compare(variable='Demand(Flow)|profile')\n", + "fs_clustered.clustering.plot.heatmap()\n", + "\n", "# Two-stage workflow\n", "fs_clustered.optimize(solver)\n", "sizes = {k: v.max().item() * 1.10 for k, v in fs_clustered.statistics.sizes.items()}\n", "fs_dispatch = fs.transform.fix_sizes(sizes)\n", "fs_dispatch.optimize(solver)\n", "\n", - "# Access results\n", - "print(fs_dispatch.solution['objective'].item()) # Total weighted cost\n", + "# Visualize results\n", + "fs_dispatch.statistics.plot.flows('Boiler(Heat)')\n", "```" ] } diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index 3e4d25ecb..ab8e27b2b 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -7,7 +7,13 @@ "source": [ "# Clustering Internals\n", "\n", - "Understanding the data structures behind time series clustering.\n", + "Understanding the data structures and visualization tools behind time series clustering.\n", + "\n", + "This notebook demonstrates:\n", + "\n", + "- **Data structures**: `Clustering`, `ClusterResult`, and `ClusterStructure`\n", + "- **Plot accessor**: Built-in visualizations via `.plot`\n", + "- **Data expansion**: Using `expand_data()` to map aggregated data back to original timesteps\n", "\n", "!!! note \"Prerequisites\"\n", " This notebook assumes familiarity with [08c-clustering](08c-clustering.ipynb)." @@ -42,7 +48,7 @@ "id": "2", "metadata": {}, "source": [ - "## Clustering and Clustering\n", + "## Clustering Metadata\n", "\n", "After calling `cluster()`, metadata is stored in `fs.clustering`:" ] @@ -100,7 +106,7 @@ "source": [ "## Visualizing Clustering\n", "\n", - "Built-in plot methods show how original periods map to clusters:" + "The `.plot` accessor provides built-in visualizations for understanding clustering results." ] }, { @@ -110,8 +116,9 @@ "metadata": {}, "outputs": [], "source": [ - "# Which original period belongs to which cluster?\n", - "fs_clustered.clustering.result.cluster_structure.plot()" + "# Compare original vs aggregated data as timeseries\n", + "# By default, plots all time-varying variables\n", + "fs_clustered.clustering.plot.compare()" ] }, { @@ -121,8 +128,8 @@ "metadata": {}, "outputs": [], "source": [ - "# What does each cluster's typical profile look like?\n", - "fs_clustered.clustering.plot_typical_periods('HeatDemand(Q_th)|fixed_relative_profile')" + "# Compare specific variables only\n", + "fs_clustered.clustering.plot.compare(variable='HeatDemand(Q_th)|fixed_relative_profile')" ] }, { @@ -132,14 +139,89 @@ "metadata": {}, "outputs": [], "source": [ - "# How well does the aggregated data match the original?\n", - "fs_clustered.clustering.plot()" + "# Duration curves show how well the aggregated data preserves the distribution\n", + "fs_clustered.clustering.plot.compare(kind='duration_curve').data" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "11", "metadata": {}, + "outputs": [], + "source": [ + "# View typical period profiles for each cluster\n", + "# Each line represents a cluster's representative day\n", + "fs_clustered.clustering.plot.typical_periods(variable='HeatDemand(Q_th)|fixed_relative_profile')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Heatmap shows which original period belongs to which cluster\n", + "# Rows are original periods (days), columns show cluster assignment\n", + "fs_clustered.clustering.plot.heatmap()" + ] + }, + { + "cell_type": "markdown", + "id": "13", + "metadata": {}, + "source": [ + "## Expanding Aggregated Data\n", + "\n", + "The `ClusterResult.expand_data()` method maps aggregated data back to original timesteps.\n", + "This is useful for comparing clustering results before optimization:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "import plotly.express as px\n", + "\n", + "# Get original and aggregated data\n", + "result = fs_clustered.clustering.result\n", + "original = result.original_data['HeatDemand(Q_th)|fixed_relative_profile']\n", + "aggregated = result.aggregated_data['HeatDemand(Q_th)|fixed_relative_profile']\n", + "\n", + "# Expand aggregated data back to original timesteps\n", + "expanded = result.expand_data(aggregated)\n", + "\n", + "print(f'Original: {len(original.time)} timesteps')\n", + "print(f'Aggregated: {len(aggregated.time)} timesteps')\n", + "print(f'Expanded: {len(expanded.time)} timesteps')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# Plot original vs expanded (reconstructed from clusters)\n", + "import xarray as xr\n", + "\n", + "ds = xr.Dataset({'Original': original, 'Expanded': expanded})\n", + "df = ds.to_dataframe().reset_index().melt(id_vars='time', var_name='series', value_name='value')\n", + "\n", + "fig = px.line(df, x='time', y='value', color='series', title='Original vs Expanded Heat Demand')\n", + "fig.update_layout(height=350)\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, "source": [ "## Cluster Weights\n", "\n", @@ -154,7 +236,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -164,7 +246,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "18", "metadata": {}, "source": [ "## Solution Expansion\n", @@ -175,7 +257,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -190,7 +272,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "20", "metadata": {}, "source": [ "## Summary\n", @@ -198,13 +280,33 @@ "| Class | Purpose |\n", "|-------|--------|\n", "| `Clustering` | Stored on `fs.clustering` after `cluster()` |\n", - "| `ClusterResult` | Contains timestep mapping and weights |\n", + "| `ClusterResult` | Contains timestep mapping, weights, and `expand_data()` method |\n", "| `ClusterStructure` | Maps original periods to clusters |\n", "\n", - "**Key methods:**\n", - "- `cluster_structure.plot()` - visualize cluster assignments\n", - "- `clustering.plot()` - compare original vs aggregated data\n", - "- `clustering.plot_typical_periods()` - view each cluster's profile" + "### Plot Accessor Methods\n", + "\n", + "| Method | Description |\n", + "|--------|-------------|\n", + "| `plot.compare()` | Compare original vs aggregated data (timeseries) |\n", + "| `plot.compare(kind='duration_curve')` | Compare as duration curves |\n", + "| `plot.typical_periods()` | View each cluster's profile |\n", + "| `plot.heatmap()` | Visualize clustering structure |\n", + "\n", + "### Key Parameters\n", + "\n", + "```python\n", + "# Compare with options\n", + "clustering.plot.compare(\n", + " variable='Demand|profile', # Single variable, list, or None (all)\n", + " kind='timeseries', # 'timeseries' or 'duration_curve'\n", + " facet_col='scenario', # Facet by scenario if present\n", + " facet_row='period', # Facet by period if present\n", + ")\n", + "\n", + "# Expand aggregated data to original timesteps\n", + "result = clustering.result\n", + "expanded = result.expand_data(aggregated_data)\n", + "```" ] } ], From 259e02baf5ca07723c2d9b031747ce2dcbb9745c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 11:25:41 +0100 Subject: [PATCH 089/191] Update plotting --- .../08d-clustering-multiperiod.ipynb | 6 +- docs/notebooks/08e-clustering-internals.ipynb | 88 ++++------ flixopt/aggregation/base.py | 151 +++++++++--------- 3 files changed, 114 insertions(+), 131 deletions(-) diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index 7d6b5dfaa..e467522eb 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -439,7 +439,7 @@ "outputs": [], "source": [ "# Plot flow rates with automatic faceting by period and scenario\n", - "fs_full.statistics.plot.flows('Boiler(Heat)')" + "fs_full.statistics.plot.flows(component='Boiler')" ] }, { @@ -474,7 +474,7 @@ "outputs": [], "source": [ "# Compare expanded solution - shows the repeated cluster patterns\n", - "fs_expanded.statistics.plot.flows('Boiler(Heat)')" + "fs_expanded.statistics.plot.flows(component='Boiler')" ] }, { @@ -562,7 +562,7 @@ "fs_dispatch.optimize(solver)\n", "\n", "# Visualize results\n", - "fs_dispatch.statistics.plot.flows('Boiler(Heat)')\n", + "fs_dispatch.statistics.plot.flows(component='Boiler')\n", "```" ] } diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index ab8e27b2b..263e84b23 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -162,8 +162,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Heatmap shows which original period belongs to which cluster\n", - "# Rows are original periods (days), columns show cluster assignment\n", + "# Heatmap shows cluster assignments for each original period\n", "fs_clustered.clustering.plot.heatmap()" ] }, @@ -185,8 +184,6 @@ "metadata": {}, "outputs": [], "source": [ - "import plotly.express as px\n", - "\n", "# Get original and aggregated data\n", "result = fs_clustered.clustering.result\n", "original = result.original_data['HeatDemand(Q_th)|fixed_relative_profile']\n", @@ -201,21 +198,45 @@ ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "id": "15", "metadata": {}, - "outputs": [], "source": [ - "# Plot original vs expanded (reconstructed from clusters)\n", - "import xarray as xr\n", + "## Summary\n", + "\n", + "| Class | Purpose |\n", + "|-------|--------|\n", + "| `Clustering` | Stored on `fs.clustering` after `cluster()` |\n", + "| `ClusterResult` | Contains timestep mapping, weights, and `expand_data()` method |\n", + "| `ClusterStructure` | Maps original periods to clusters |\n", + "\n", + "### Plot Accessor Methods\n", + "\n", + "| Method | Description |\n", + "|--------|-------------|\n", + "| `plot.compare()` | Compare original vs aggregated data (timeseries) |\n", + "| `plot.compare(kind='duration_curve')` | Compare as duration curves |\n", + "| `plot.typical_periods()` | View each cluster's profile |\n", + "| `plot.heatmap()` | Visualize cluster assignments |\n", + "\n", + "### Key Parameters\n", + "\n", + "```python\n", + "# Compare with options\n", + "clustering.plot.compare(\n", + " variable='Demand|profile', # Single variable, list, or None (all)\n", + " kind='timeseries', # 'timeseries' or 'duration_curve'\n", + " facet_col='scenario', # Facet by scenario if present\n", + " facet_row='period', # Facet by period if present\n", + ")\n", "\n", - "ds = xr.Dataset({'Original': original, 'Expanded': expanded})\n", - "df = ds.to_dataframe().reset_index().melt(id_vars='time', var_name='series', value_name='value')\n", + "# Heatmap shows cluster assignments (no variable needed)\n", + "clustering.plot.heatmap()\n", "\n", - "fig = px.line(df, x='time', y='value', color='series', title='Original vs Expanded Heat Demand')\n", - "fig.update_layout(height=350)\n", - "fig.show()" + "# Expand aggregated data to original timesteps\n", + "result = clustering.result\n", + "expanded = result.expand_data(aggregated_data)\n", + "```" ] }, { @@ -269,45 +290,6 @@ "print(f'Clustered: {len(fs_clustered.timesteps)} timesteps')\n", "print(f'Expanded: {len(fs_expanded.timesteps)} timesteps')" ] - }, - { - "cell_type": "markdown", - "id": "20", - "metadata": {}, - "source": [ - "## Summary\n", - "\n", - "| Class | Purpose |\n", - "|-------|--------|\n", - "| `Clustering` | Stored on `fs.clustering` after `cluster()` |\n", - "| `ClusterResult` | Contains timestep mapping, weights, and `expand_data()` method |\n", - "| `ClusterStructure` | Maps original periods to clusters |\n", - "\n", - "### Plot Accessor Methods\n", - "\n", - "| Method | Description |\n", - "|--------|-------------|\n", - "| `plot.compare()` | Compare original vs aggregated data (timeseries) |\n", - "| `plot.compare(kind='duration_curve')` | Compare as duration curves |\n", - "| `plot.typical_periods()` | View each cluster's profile |\n", - "| `plot.heatmap()` | Visualize clustering structure |\n", - "\n", - "### Key Parameters\n", - "\n", - "```python\n", - "# Compare with options\n", - "clustering.plot.compare(\n", - " variable='Demand|profile', # Single variable, list, or None (all)\n", - " kind='timeseries', # 'timeseries' or 'duration_curve'\n", - " facet_col='scenario', # Facet by scenario if present\n", - " facet_row='period', # Facet by period if present\n", - ")\n", - "\n", - "# Expand aggregated data to original timesteps\n", - "result = clustering.result\n", - "expanded = result.expand_data(aggregated_data)\n", - "```" - ] } ], "metadata": { diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 4e12c0072..c2eeaac4c 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -854,30 +854,30 @@ def _compare_duration_curve( def heatmap( self, - variable: str | list[str] | None = None, colorscale: str | None = None, - facet_col_wrap: int | None = None, + facet_col: str | None = 'scenario', + facet_row: str | None = 'period', show: bool | None = None, ): - """Plot clustering structure as a heatmap of periods vs timesteps. + """Plot cluster assignments as a heatmap. + + Shows which cluster each original period belongs to. Rows are original + periods, color indicates cluster assignment. - Shows the original data organized by periods (rows) and timesteps within - each period (columns), with color indicating the value. Periods are - grouped by their cluster assignment. + For multi-period/scenario data, creates faceted subplots. Args: - variable: Variable(s) to plot. Can be a string, list of strings, - or None to plot all time-varying variables. colorscale: Colorscale for heatmap. Defaults to CONFIG.Plotting.default_sequential_colorscale. - facet_col_wrap: Max columns before wrapping facets. - Defaults to CONFIG.Plotting.default_facet_cols. + facet_col: Dimension to facet on columns ('scenario', 'period', or None). + facet_row: Dimension to facet on rows ('period', 'scenario', or None). show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. Returns: PlotResult containing the heatmap figure and underlying data. """ + import pandas as pd import plotly.express as px from ..config import CONFIG @@ -885,85 +885,86 @@ def heatmap( result = self._clustering.result cs = result.cluster_structure - if result.original_data is None or cs is None: - raise ValueError('No original data or cluster structure available') + if cs is None: + raise ValueError('No cluster structure available') - time_vars = self._get_time_varying_variables() - if not time_vars: - raise ValueError('No time-varying variables found') + cluster_order_da = cs.cluster_order - # Normalize variable to list - if variable is None: - variables = time_vars - elif isinstance(variable, str): - if variable not in time_vars: - raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") - variables = [variable] - else: - invalid = [v for v in variable if v not in time_vars] - if invalid: - raise ValueError(f'Variables {invalid} not found. Available: {time_vars}') - variables = list(variable) + # Check for multi-dimensional data + has_periods = 'period' in cluster_order_da.dims + has_scenarios = 'scenario' in cluster_order_da.dims - n_periods = cs.n_original_periods - timesteps_per_period = cs.timesteps_per_cluster - cluster_order = cs.cluster_order.values - sorted_indices = np.argsort(cluster_order) - clusters_sorted = cluster_order[sorted_indices] - y_labels = [f'P{sorted_indices[i] + 1} (C{clusters_sorted[i]})' for i in range(n_periods)] + # Resolve facets - only apply if dimension exists + actual_facet_col = facet_col if facet_col and has_scenarios and facet_col == 'scenario' else None + actual_facet_row = facet_row if facet_row and has_periods and facet_row == 'period' else None - # Build DataArray with variable dimension if multiple - data_vars = {} - if len(variables) == 1: - original = result.original_data[variables[0]].values - data_matrix = original[: n_periods * timesteps_per_period].reshape(n_periods, timesteps_per_period) - data_sorted = data_matrix[sorted_indices] - heatmap_da = xr.DataArray( - data_sorted, - dims=['period', 'timestep'], - coords={'period': y_labels, 'timestep': range(timesteps_per_period)}, + # Get dimension values + periods = list(cluster_order_da.coords['period'].values) if has_periods else [None] + scenarios = list(cluster_order_da.coords['scenario'].values) if has_scenarios else [None] + + # Build heatmap DataArray for each (period, scenario) slice + # Each slice is a 2D array with shape (n_original_periods, 1) showing cluster assignment + heatmap_slices: dict[tuple, xr.DataArray] = {} + for p in periods: + for s in scenarios: + cluster_order = cs.get_cluster_order_for_slice(period=p, scenario=s) + n_original_periods = len(cluster_order) + + # Create 2D array for heatmap (periods x 1 column for cluster) + heatmap_slices[(p, s)] = xr.DataArray( + cluster_order.reshape(-1, 1), + dims=['original_period', 'x'], + coords={'original_period': [f'P{i + 1}' for i in range(n_original_periods)], 'x': ['Cluster']}, + ) + + colorscale = colorscale or CONFIG.Plotting.default_sequential_colorscale + + # Combine slices into multi-dimensional DataArray if needed + if has_periods and has_scenarios: + # Create a combined facet dimension for px.imshow (only supports facet_col) + combined_slices = [] + facet_labels = [] + for p in periods: + for s in scenarios: + combined_slices.append(heatmap_slices[(p, s)]) + facet_labels.append(f'{p} / {s}') + heatmap_da = xr.concat(combined_slices, dim=pd.Index(facet_labels, name='facet')) + facet_dim = 'facet' + elif has_periods: + heatmap_da = xr.concat( + [heatmap_slices[(p, None)] for p in periods], + dim=pd.Index(periods, name='period'), ) - data_vars['heatmap'] = xr.DataArray( - data_sorted, - dims=['period', 'timestep'], - coords={'period': sorted_indices, 'timestep': range(timesteps_per_period)}, + facet_dim = 'period' if actual_facet_row or actual_facet_col else None + elif has_scenarios: + heatmap_da = xr.concat( + [heatmap_slices[(None, s)] for s in scenarios], + dim=pd.Index(scenarios, name='scenario'), ) - title = f'Clustering Structure: {variables[0]}' + facet_dim = 'scenario' if actual_facet_col else None else: - arrays = [] - for var in variables: - original = result.original_data[var].values - data_matrix = original[: n_periods * timesteps_per_period].reshape(n_periods, timesteps_per_period) - data_sorted = data_matrix[sorted_indices] - arrays.append(data_sorted) - data_vars[var] = xr.DataArray( - data_sorted, - dims=['period', 'timestep'], - coords={'period': sorted_indices, 'timestep': range(timesteps_per_period)}, - ) - heatmap_da = xr.DataArray( - np.stack(arrays, axis=0), - dims=['variable', 'period', 'timestep'], - coords={'variable': variables, 'period': y_labels, 'timestep': range(timesteps_per_period)}, - ) - title = 'Clustering Structure' + heatmap_da = heatmap_slices[(None, None)] + facet_dim = None - colorscale = colorscale or CONFIG.Plotting.default_sequential_colorscale - facet_col_wrap = facet_col_wrap or CONFIG.Plotting.default_facet_cols + # Use px.imshow with xr.DataArray fig = px.imshow( heatmap_da, color_continuous_scale=colorscale, - facet_col='variable' if len(variables) > 1 else None, - facet_col_wrap=facet_col_wrap if len(variables) > 1 else None, - title=title, - labels={'timestep': 'Timestep within period', 'period': 'Period (sorted by cluster)'}, + facet_col=facet_dim, + title='Cluster Assignments', + labels={'x': '', 'original_period': 'Original Period', 'color': 'Cluster'}, + aspect='auto', ) - if len(variables) > 1: + + # Clean up facet labels + if facet_dim: fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) - data_vars['cluster'] = xr.DataArray(clusters_sorted, dims=['period']) - data = xr.Dataset(data_vars) - plot_result = PlotResult(data=data, figure=fig) + # Hide x-axis since it's just a single "Cluster" column + fig.update_xaxes(showticklabels=False) + + # Build data for PlotResult + plot_result = PlotResult(data=xr.Dataset({'cluster_assignments': heatmap_da}), figure=fig) if show is None: show = CONFIG.Plotting.default_show From cf512bc434ea0828b14b12885170fdf1c05eca02 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 11:34:27 +0100 Subject: [PATCH 090/191] Parameter Naming (Aligned with StatisticsPlotAccessor) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit | Old | New | |---------------------|-----------| | variable | variables | | colormap/colorscale | colors | New Parameters Added All methods now have: - select: SelectType | None = None - xarray-style selection - **plotly_kwargs: Any - passthrough to Plotly Facet Defaults Aligned - facet_col: 'scenario' → 'period' - facet_row: 'period' → 'scenario' Color Type Enhanced - From: str | None - To: ColorType (str | list[str] | dict[str, str] | None) Method Renamed - typical_periods() → clusters() Methods Updated 1. compare() - new signature with variables, select, colors, **plotly_kwargs 2. heatmap() - new signature with select, colors, **plotly_kwargs 3. clusters() (renamed) - new signature with variables, select, colors, **plotly_kwargs Internal Helpers Updated - _resolve_variables() - parameter renamed - _compare_timeseries() - full API update with process_colors and _apply_selection - _compare_duration_curve() - full API update Note: The ClusterResult.plot_typical_periods() method (a different class) was left unchanged as it's a lower-level API. --- flixopt/aggregation/base.py | 252 +++++++++++++++++++++++++----------- 1 file changed, 178 insertions(+), 74 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index c2eeaac4c..642596594 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -18,10 +18,15 @@ from __future__ import annotations from dataclasses import dataclass +from typing import TYPE_CHECKING, Any import numpy as np import xarray as xr +if TYPE_CHECKING: + from ..color_processing import ColorType + from ..statistics_accessor import SelectType + @dataclass class ClusterStructure: @@ -638,7 +643,7 @@ class ClusteringPlotAccessor: >>> fs_clustered.clustering.plot.compare() # timeseries comparison >>> fs_clustered.clustering.plot.compare(kind='duration_curve') # duration curve >>> fs_clustered.clustering.plot.heatmap() # structure visualization - >>> fs_clustered.clustering.plot.typical_periods() # cluster profiles + >>> fs_clustered.clustering.plot.clusters() # cluster profiles """ def __init__(self, clustering: Clustering): @@ -647,11 +652,14 @@ def __init__(self, clustering: Clustering): def compare( self, kind: str = 'timeseries', - variable: str | list[str] | None = None, - facet_col: str | None = 'scenario', - facet_row: str | None = 'period', - colormap: str | None = None, + variables: str | list[str] | None = None, + *, + select: SelectType | None = None, + colors: ColorType | None = None, + facet_col: str | None = 'period', + facet_row: str | None = 'scenario', show: bool | None = None, + **plotly_kwargs: Any, ): """Compare original vs aggregated data. @@ -659,25 +667,38 @@ def compare( kind: Type of comparison plot. - 'timeseries': Time series comparison (default) - 'duration_curve': Sorted duration curve comparison - variable: Variable(s) to plot. Can be a string, list of strings, + variables: Variable(s) to plot. Can be a string, list of strings, or None to plot all time-varying variables. - facet_col: Dimension for subplot columns (default: 'scenario' if present). - facet_row: Dimension for subplot rows (default: 'period' if present). - colormap: Colorscale name for the colors. - Defaults to CONFIG.Plotting.default_qualitative_colorscale. + select: xarray-style selection dict, e.g. {'scenario': 'Base Case'}. + colors: Color specification (colorscale name, color list, or label-to-color dict). + facet_col: Dimension for subplot columns (default: 'period'). + facet_row: Dimension for subplot rows (default: 'scenario'). show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. + **plotly_kwargs: Additional arguments passed to plotly. Returns: PlotResult containing the comparison figure and underlying data. """ if kind == 'timeseries': return self._compare_timeseries( - variable=variable, facet_col=facet_col, facet_row=facet_row, colormap=colormap, show=show + variables=variables, + select=select, + colors=colors, + facet_col=facet_col, + facet_row=facet_row, + show=show, + **plotly_kwargs, ) elif kind == 'duration_curve': return self._compare_duration_curve( - variable=variable, facet_col=facet_col, facet_row=facet_row, colormap=colormap, show=show + variables=variables, + select=select, + colors=colors, + facet_col=facet_col, + facet_row=facet_row, + show=show, + **plotly_kwargs, ) else: raise ValueError(f"Unknown kind '{kind}'. Use 'timeseries' or 'duration_curve'.") @@ -694,23 +715,23 @@ def _get_time_varying_variables(self) -> list[str]: and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) ] - def _resolve_variables(self, variable: str | list[str] | None) -> list[str]: - """Resolve variable parameter to a list of valid variable names.""" + def _resolve_variables(self, variables: str | list[str] | None) -> list[str]: + """Resolve variables parameter to a list of valid variable names.""" time_vars = self._get_time_varying_variables() if not time_vars: raise ValueError('No time-varying variables found') - if variable is None: + if variables is None: return time_vars - elif isinstance(variable, str): - if variable not in time_vars: - raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") - return [variable] + elif isinstance(variables, str): + if variables not in time_vars: + raise ValueError(f"Variable '{variables}' not found. Available: {time_vars}") + return [variables] else: - invalid = [v for v in variable if v not in time_vars] + invalid = [v for v in variables if v not in time_vars] if invalid: raise ValueError(f'Variables {invalid} not found. Available: {time_vars}') - return list(variable) + return list(variables) def _resolve_facets( self, ds: xr.Dataset, facet_col: str | None, facet_row: str | None @@ -722,27 +743,32 @@ def _resolve_facets( def _compare_timeseries( self, - variable: str | list[str] | None = None, + variables: str | list[str] | None = None, + *, + select: SelectType | None = None, + colors: ColorType | None = None, facet_col: str | None = None, facet_row: str | None = None, - colormap: str | None = None, show: bool | None = None, + **plotly_kwargs: Any, ): """Compare original vs aggregated as time series.""" import plotly.express as px + from ..color_processing import process_colors from ..config import CONFIG from ..plot_result import PlotResult + from ..statistics_accessor import _apply_selection result = self._clustering.result if result.original_data is None or result.aggregated_data is None: raise ValueError('No original/aggregated data available for comparison') - variables = self._resolve_variables(variable) + resolved_variables = self._resolve_variables(variables) # Build Dataset with Original/Aggregated for each variable data_vars = {} - for var in variables: + for var in resolved_variables: original = result.original_data[var] aggregated = result.aggregated_data[var] expanded = result.expand_data(aggregated) @@ -750,6 +776,9 @@ def _compare_timeseries( data_vars[f'{var} (Aggregated)'] = expanded ds = xr.Dataset(data_vars) + # Apply selection + ds = _apply_selection(ds, select) + # Resolve facets actual_facet_col, actual_facet_row = self._resolve_facets(ds, facet_col, facet_row) @@ -758,8 +787,13 @@ def _compare_timeseries( coord_cols = [c for c in ds.coords.keys() if c in df.columns] df = df.melt(id_vars=coord_cols, var_name='series', value_name='value') - colormap = colormap or CONFIG.Plotting.default_qualitative_colorscale - title = 'Original vs Aggregated' if len(variables) > 1 else f'Original vs Aggregated: {variables[0]}' + series_labels = df['series'].unique().tolist() + color_map = process_colors(colors, series_labels, CONFIG.Plotting.default_qualitative_colorscale) + title = ( + 'Original vs Aggregated' + if len(resolved_variables) > 1 + else f'Original vs Aggregated: {resolved_variables[0]}' + ) fig = px.line( df, @@ -769,7 +803,8 @@ def _compare_timeseries( facet_col=actual_facet_col, facet_row=actual_facet_row, title=title, - color_discrete_sequence=px.colors.qualitative.__dict__.get(colormap, px.colors.qualitative.Plotly), + color_discrete_map=color_map, + **plotly_kwargs, ) # Dash lines for Original series for trace in fig.data: @@ -790,29 +825,38 @@ def _compare_timeseries( def _compare_duration_curve( self, - variable: str | list[str] | None = None, + variables: str | list[str] | None = None, + *, + select: SelectType | None = None, + colors: ColorType | None = None, facet_col: str | None = None, facet_row: str | None = None, - colormap: str | None = None, show: bool | None = None, + **plotly_kwargs: Any, ): """Compare original vs aggregated as duration curves.""" import plotly.express as px + from ..color_processing import process_colors from ..config import CONFIG from ..plot_result import PlotResult + from ..statistics_accessor import _apply_selection result = self._clustering.result if result.original_data is None or result.aggregated_data is None: raise ValueError('No original/aggregated data available for comparison') - variables = self._resolve_variables(variable) + # Apply selection to original data before resolving variables + original_data = _apply_selection(result.original_data, select) + aggregated_data = _apply_selection(result.aggregated_data, select) + + resolved_variables = self._resolve_variables(variables) # Build Dataset with sorted values for each variable data_vars = {} - for var in variables: - original = result.original_data[var] - aggregated = result.aggregated_data[var] + for var in resolved_variables: + original = original_data[var] + aggregated = aggregated_data[var] expanded = result.expand_data(aggregated) # Sort values for duration curve original_sorted = np.sort(original.values.flatten())[::-1] @@ -827,8 +871,9 @@ def _compare_duration_curve( coord_cols = [c for c in ds.coords.keys() if c in df.columns] df = df.melt(id_vars=coord_cols, var_name='series', value_name='value') - colormap = colormap or CONFIG.Plotting.default_qualitative_colorscale - title = 'Duration Curve' if len(variables) > 1 else f'Duration Curve: {variables[0]}' + series_labels = df['series'].unique().tolist() + color_map = process_colors(colors, series_labels, CONFIG.Plotting.default_qualitative_colorscale) + title = 'Duration Curve' if len(resolved_variables) > 1 else f'Duration Curve: {resolved_variables[0]}' fig = px.line( df, @@ -837,7 +882,8 @@ def _compare_duration_curve( color='series', title=title, labels={'rank': 'Hours (sorted)', 'value': 'Value'}, - color_discrete_sequence=px.colors.qualitative.__dict__.get(colormap, px.colors.qualitative.Plotly), + color_discrete_map=color_map, + **plotly_kwargs, ) for trace in fig.data: if 'Original' in trace.name: @@ -854,10 +900,13 @@ def _compare_duration_curve( def heatmap( self, - colorscale: str | None = None, - facet_col: str | None = 'scenario', - facet_row: str | None = 'period', + *, + select: SelectType | None = None, + colors: str | list[str] | None = None, + facet_col: str | None = 'period', + facet_row: str | None = 'scenario', show: bool | None = None, + **plotly_kwargs: Any, ): """Plot cluster assignments as a heatmap. @@ -867,12 +916,15 @@ def heatmap( For multi-period/scenario data, creates faceted subplots. Args: - colorscale: Colorscale for heatmap. + select: xarray-style selection dict, e.g. {'scenario': 'Base Case'}. + colors: Colorscale name (str) or list of colors for heatmap coloring. + Dicts are not supported for heatmaps. Defaults to CONFIG.Plotting.default_sequential_colorscale. - facet_col: Dimension to facet on columns ('scenario', 'period', or None). - facet_row: Dimension to facet on rows ('period', 'scenario', or None). + facet_col: Dimension to facet on columns (default: 'period'). + facet_row: Dimension to facet on rows (default: 'scenario'). show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. + **plotly_kwargs: Additional arguments passed to plotly. Returns: PlotResult containing the heatmap figure and underlying data. @@ -882,6 +934,7 @@ def heatmap( from ..config import CONFIG from ..plot_result import PlotResult + from ..statistics_accessor import _apply_selection result = self._clustering.result cs = result.cluster_structure @@ -890,6 +943,10 @@ def heatmap( cluster_order_da = cs.cluster_order + # Apply selection if provided + if select: + cluster_order_da = _apply_selection(cluster_order_da.to_dataset(name='cluster'), select)['cluster'] + # Check for multi-dimensional data has_periods = 'period' in cluster_order_da.dims has_scenarios = 'scenario' in cluster_order_da.dims @@ -917,7 +974,7 @@ def heatmap( coords={'original_period': [f'P{i + 1}' for i in range(n_original_periods)], 'x': ['Cluster']}, ) - colorscale = colorscale or CONFIG.Plotting.default_sequential_colorscale + colorscale = colors or CONFIG.Plotting.default_sequential_colorscale # Combine slices into multi-dimensional DataArray if needed if has_periods and has_scenarios: @@ -954,6 +1011,7 @@ def heatmap( title='Cluster Assignments', labels={'x': '', 'original_period': 'Original Period', 'color': 'Cluster'}, aspect='auto', + **plotly_kwargs, ) # Clean up facet labels @@ -963,8 +1021,53 @@ def heatmap( # Hide x-axis since it's just a single "Cluster" column fig.update_xaxes(showticklabels=False) - # Build data for PlotResult - plot_result = PlotResult(data=xr.Dataset({'cluster_assignments': heatmap_da}), figure=fig) + # Build data for PlotResult - map clusters to original timesteps + # This is more useful as it aligns with the time dimension of the data + timesteps_per_period = cs.timesteps_per_cluster + original_time = result.original_data.coords['time'] if result.original_data is not None else None + + if has_periods or has_scenarios: + # Multi-dimensional: build cluster assignment per timestep for each slice + cluster_slices = {} + for p in periods: + for s in scenarios: + cluster_order = cs.get_cluster_order_for_slice(period=p, scenario=s) + # Expand cluster_order to timesteps: each period's cluster repeated timesteps_per_period times + cluster_per_timestep = np.repeat(cluster_order, timesteps_per_period) + cluster_slices[(p, s)] = xr.DataArray( + cluster_per_timestep, + dims=['time'], + coords={'time': original_time} if original_time is not None else None, + ) + # Combine slices + if has_periods and has_scenarios: + period_arrays = [] + for p in periods: + scenario_arrays = [cluster_slices[(p, s)] for s in scenarios] + period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) + cluster_da = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) + elif has_periods: + cluster_da = xr.concat( + [cluster_slices[(p, None)] for p in periods], dim=pd.Index(periods, name='period') + ) + else: + cluster_da = xr.concat( + [cluster_slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario') + ) + cluster_da = cluster_da.transpose('time', ...) + else: + # Simple case: single cluster assignment array + cluster_order = cs.cluster_order.values + cluster_per_timestep = np.repeat(cluster_order, timesteps_per_period) + cluster_da = xr.DataArray( + cluster_per_timestep, + dims=['time'], + coords={'time': original_time} if original_time is not None else None, + name='cluster', + ) + + data = xr.Dataset({'cluster': cluster_da}) + plot_result = PlotResult(data=data, figure=fig) if show is None: show = CONFIG.Plotting.default_show @@ -973,12 +1076,15 @@ def heatmap( return plot_result - def typical_periods( + def clusters( self, - variable: str | list[str] | None = None, - colormap: str | None = None, + variables: str | list[str] | None = None, + *, + select: SelectType | None = None, + colors: ColorType | None = None, facet_col_wrap: int | None = None, show: bool | None = None, + **plotly_kwargs: Any, ): """Plot each cluster's typical period profile. @@ -986,14 +1092,15 @@ def typical_periods( understanding what each cluster represents. Args: - variable: Variable(s) to plot. Can be a string, list of strings, + variables: Variable(s) to plot. Can be a string, list of strings, or None to plot all time-varying variables. - colormap: Colorscale for cluster colors. - Defaults to CONFIG.Plotting.default_qualitative_colorscale. + select: xarray-style selection dict, e.g. {'scenario': 'Base Case'}. + colors: Color specification (colorscale name, color list, or label-to-color dict). facet_col_wrap: Max columns before wrapping facets. Defaults to CONFIG.Plotting.default_facet_cols. show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. + **plotly_kwargs: Additional arguments passed to plotly. Returns: PlotResult containing the figure and underlying data. @@ -1001,30 +1108,25 @@ def typical_periods( import pandas as pd import plotly.express as px + from ..color_processing import process_colors from ..config import CONFIG from ..plot_result import PlotResult + from ..statistics_accessor import _apply_selection result = self._clustering.result cs = result.cluster_structure if result.aggregated_data is None or cs is None: raise ValueError('No aggregated data or cluster structure available') + # Apply selection to aggregated data + aggregated_data = _apply_selection(result.aggregated_data, select) + time_vars = self._get_time_varying_variables() if not time_vars: raise ValueError('No time-varying variables found') - # Normalize variable to list - if variable is None: - variables = time_vars - elif isinstance(variable, str): - if variable not in time_vars: - raise ValueError(f"Variable '{variable}' not found. Available: {time_vars}") - variables = [variable] - else: - invalid = [v for v in variable if v not in time_vars] - if invalid: - raise ValueError(f'Variables {invalid} not found. Available: {time_vars}') - variables = list(variable) + # Resolve variables + resolved_variables = self._resolve_variables(variables) n_clusters = int(cs.n_clusters) if isinstance(cs.n_clusters, (int, np.integer)) else int(cs.n_clusters.values) timesteps_per_cluster = cs.timesteps_per_cluster @@ -1032,8 +1134,8 @@ def typical_periods( # Build long-form DataFrame with cluster labels including occurrence counts rows = [] data_vars = {} - for var in variables: - data = result.aggregated_data[var].values + for var in resolved_variables: + data = aggregated_data[var].values data_by_cluster = data.reshape(n_clusters, timesteps_per_cluster) data_vars[var] = xr.DataArray( data_by_cluster, @@ -1047,22 +1149,24 @@ def typical_periods( rows.append({'cluster': label, 'timestep': t, 'value': data_by_cluster[c, t], 'variable': var}) df = pd.DataFrame(rows) - colormap = colormap or CONFIG.Plotting.default_qualitative_colorscale + cluster_labels = df['cluster'].unique().tolist() + color_map = process_colors(colors, cluster_labels, CONFIG.Plotting.default_qualitative_colorscale) facet_col_wrap = facet_col_wrap or CONFIG.Plotting.default_facet_cols - title = 'Typical Periods' if len(variables) > 1 else f'Typical Periods: {variables[0]}' + title = 'Clusters' if len(resolved_variables) > 1 else f'Clusters: {resolved_variables[0]}' fig = px.line( df, x='timestep', y='value', facet_col='cluster', - facet_row='variable' if len(variables) > 1 else None, - facet_col_wrap=facet_col_wrap if len(variables) == 1 else None, + facet_row='variable' if len(resolved_variables) > 1 else None, + facet_col_wrap=facet_col_wrap if len(resolved_variables) == 1 else None, title=title, - color_discrete_sequence=px.colors.qualitative.__dict__.get(colormap, px.colors.qualitative.Plotly), + color_discrete_map=color_map, + **plotly_kwargs, ) fig.update_layout(showlegend=False) - if len(variables) > 1: + if len(resolved_variables) > 1: fig.update_yaxes(matches=None) fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) @@ -1131,13 +1235,13 @@ def plot(self) -> ClusteringPlotAccessor: """Access plotting methods for clustering visualization. Returns: - ClusteringPlotAccessor with compare(), heatmap(), and typical_periods() methods. + ClusteringPlotAccessor with compare(), heatmap(), and clusters() methods. Example: >>> fs.clustering.plot.compare() # timeseries comparison >>> fs.clustering.plot.compare(kind='duration_curve') # duration curve >>> fs.clustering.plot.heatmap() # structure visualization - >>> fs.clustering.plot.typical_periods() # cluster profiles + >>> fs.clustering.plot.clusters() # cluster profiles """ return ClusteringPlotAccessor(self) From 94fba48f563d420d27a41ab6358ecd5f1fa60ed6 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 11:40:25 +0100 Subject: [PATCH 091/191] Update low api method --- flixopt/aggregation/base.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 642596594..bd92262ce 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -549,7 +549,7 @@ def plot(self, colormap: str | None = None, show: bool | None = None): return plot_result - def plot_typical_periods(self, variable: str | None = None, show: bool | None = None): + def plot_clusters(self, variable: str | None = None, show: bool | None = None): """Plot each cluster's typical period profile. Shows each cluster as a separate subplot with its occurrence count @@ -607,14 +607,14 @@ def plot_typical_periods(self, variable: str | None = None, show: bool | None = ) fig.update_layout( - title=f'Typical Periods: {variable}', + title=f'Clusters: {variable}', height=200 * n_rows, ) # Build data for PlotResult result_data = xr.Dataset( { - 'typical_periods': xr.DataArray( + 'clusters': xr.DataArray( data_by_cluster, dims=['cluster', 'timestep'], coords={'cluster': range(n_clusters), 'timestep': range(cs.timesteps_per_cluster)}, From d7e7fafde7f904994f8aea619b08af2fc4b183a1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 11:43:07 +0100 Subject: [PATCH 092/191] Update heatmap t accept animation frame --- flixopt/aggregation/base.py | 138 +++++++++++++----------------------- 1 file changed, 48 insertions(+), 90 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index bd92262ce..cf09f9dca 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -904,16 +904,18 @@ def heatmap( select: SelectType | None = None, colors: str | list[str] | None = None, facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + animation_frame: str | None = 'scenario', show: bool | None = None, **plotly_kwargs: Any, ): - """Plot cluster assignments as a heatmap. + """Plot cluster assignments over time as a heatmap timeline. - Shows which cluster each original period belongs to. Rows are original - periods, color indicates cluster assignment. + Shows which cluster each timestep belongs to as a horizontal color bar. + The x-axis is time, color indicates cluster assignment. This visualization + aligns with time series data, making it easy to correlate cluster + assignments with other plots. - For multi-period/scenario data, creates faceted subplots. + For multi-period/scenario data, uses faceting and/or animation. Args: select: xarray-style selection dict, e.g. {'scenario': 'Base Case'}. @@ -921,13 +923,14 @@ def heatmap( Dicts are not supported for heatmaps. Defaults to CONFIG.Plotting.default_sequential_colorscale. facet_col: Dimension to facet on columns (default: 'period'). - facet_row: Dimension to facet on rows (default: 'scenario'). + animation_frame: Dimension for animation slider (default: 'scenario'). show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. **plotly_kwargs: Additional arguments passed to plotly. Returns: - PlotResult containing the heatmap figure and underlying data. + PlotResult containing the heatmap figure and cluster assignment data. + The data has 'cluster' variable with time dimension, matching original timesteps. """ import pandas as pd import plotly.express as px @@ -942,6 +945,8 @@ def heatmap( raise ValueError('No cluster structure available') cluster_order_da = cs.cluster_order + timesteps_per_period = cs.timesteps_per_cluster + original_time = result.original_data.coords['time'] if result.original_data is not None else None # Apply selection if provided if select: @@ -951,121 +956,74 @@ def heatmap( has_periods = 'period' in cluster_order_da.dims has_scenarios = 'scenario' in cluster_order_da.dims - # Resolve facets - only apply if dimension exists - actual_facet_col = facet_col if facet_col and has_scenarios and facet_col == 'scenario' else None - actual_facet_row = facet_row if facet_row and has_periods and facet_row == 'period' else None - # Get dimension values periods = list(cluster_order_da.coords['period'].values) if has_periods else [None] scenarios = list(cluster_order_da.coords['scenario'].values) if has_scenarios else [None] - # Build heatmap DataArray for each (period, scenario) slice - # Each slice is a 2D array with shape (n_original_periods, 1) showing cluster assignment - heatmap_slices: dict[tuple, xr.DataArray] = {} + # Build cluster assignment per timestep for each (period, scenario) slice + cluster_slices: dict[tuple, xr.DataArray] = {} for p in periods: for s in scenarios: cluster_order = cs.get_cluster_order_for_slice(period=p, scenario=s) - n_original_periods = len(cluster_order) - - # Create 2D array for heatmap (periods x 1 column for cluster) - heatmap_slices[(p, s)] = xr.DataArray( - cluster_order.reshape(-1, 1), - dims=['original_period', 'x'], - coords={'original_period': [f'P{i + 1}' for i in range(n_original_periods)], 'x': ['Cluster']}, + # Expand: each cluster repeated timesteps_per_period times + cluster_per_timestep = np.repeat(cluster_order, timesteps_per_period) + cluster_slices[(p, s)] = xr.DataArray( + cluster_per_timestep, + dims=['time'], + coords={'time': original_time} if original_time is not None else None, ) - colorscale = colors or CONFIG.Plotting.default_sequential_colorscale - - # Combine slices into multi-dimensional DataArray if needed + # Combine slices into multi-dimensional DataArray if has_periods and has_scenarios: - # Create a combined facet dimension for px.imshow (only supports facet_col) - combined_slices = [] - facet_labels = [] + period_arrays = [] for p in periods: - for s in scenarios: - combined_slices.append(heatmap_slices[(p, s)]) - facet_labels.append(f'{p} / {s}') - heatmap_da = xr.concat(combined_slices, dim=pd.Index(facet_labels, name='facet')) - facet_dim = 'facet' + scenario_arrays = [cluster_slices[(p, s)] for s in scenarios] + period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) + cluster_da = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) elif has_periods: - heatmap_da = xr.concat( - [heatmap_slices[(p, None)] for p in periods], + cluster_da = xr.concat( + [cluster_slices[(p, None)] for p in periods], dim=pd.Index(periods, name='period'), ) - facet_dim = 'period' if actual_facet_row or actual_facet_col else None elif has_scenarios: - heatmap_da = xr.concat( - [heatmap_slices[(None, s)] for s in scenarios], + cluster_da = xr.concat( + [cluster_slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario'), ) - facet_dim = 'scenario' if actual_facet_col else None else: - heatmap_da = heatmap_slices[(None, None)] - facet_dim = None + cluster_da = cluster_slices[(None, None)] + + # Resolve facet_col and animation_frame - only use if dimension exists + actual_facet_col = facet_col if facet_col and facet_col in cluster_da.dims else None + actual_animation = animation_frame if animation_frame and animation_frame in cluster_da.dims else None + + # Add dummy y dimension for heatmap visualization (single row) + heatmap_da = cluster_da.expand_dims('y', axis=-1) + heatmap_da = heatmap_da.assign_coords(y=['Cluster']) + + colorscale = colors or CONFIG.Plotting.default_sequential_colorscale # Use px.imshow with xr.DataArray fig = px.imshow( heatmap_da, color_continuous_scale=colorscale, - facet_col=facet_dim, + facet_col=actual_facet_col, + animation_frame=actual_animation, title='Cluster Assignments', - labels={'x': '', 'original_period': 'Original Period', 'color': 'Cluster'}, + labels={'time': 'Time', 'color': 'Cluster'}, aspect='auto', **plotly_kwargs, ) # Clean up facet labels - if facet_dim: + if actual_facet_col: fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) - # Hide x-axis since it's just a single "Cluster" column - fig.update_xaxes(showticklabels=False) - - # Build data for PlotResult - map clusters to original timesteps - # This is more useful as it aligns with the time dimension of the data - timesteps_per_period = cs.timesteps_per_cluster - original_time = result.original_data.coords['time'] if result.original_data is not None else None - - if has_periods or has_scenarios: - # Multi-dimensional: build cluster assignment per timestep for each slice - cluster_slices = {} - for p in periods: - for s in scenarios: - cluster_order = cs.get_cluster_order_for_slice(period=p, scenario=s) - # Expand cluster_order to timesteps: each period's cluster repeated timesteps_per_period times - cluster_per_timestep = np.repeat(cluster_order, timesteps_per_period) - cluster_slices[(p, s)] = xr.DataArray( - cluster_per_timestep, - dims=['time'], - coords={'time': original_time} if original_time is not None else None, - ) - # Combine slices - if has_periods and has_scenarios: - period_arrays = [] - for p in periods: - scenario_arrays = [cluster_slices[(p, s)] for s in scenarios] - period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) - cluster_da = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) - elif has_periods: - cluster_da = xr.concat( - [cluster_slices[(p, None)] for p in periods], dim=pd.Index(periods, name='period') - ) - else: - cluster_da = xr.concat( - [cluster_slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario') - ) - cluster_da = cluster_da.transpose('time', ...) - else: - # Simple case: single cluster assignment array - cluster_order = cs.cluster_order.values - cluster_per_timestep = np.repeat(cluster_order, timesteps_per_period) - cluster_da = xr.DataArray( - cluster_per_timestep, - dims=['time'], - coords={'time': original_time} if original_time is not None else None, - name='cluster', - ) + # Hide y-axis since it's just a single row + fig.update_yaxes(showticklabels=False) + # Data is exactly what we plotted (without dummy y dimension) + cluster_da.name = 'cluster' data = xr.Dataset({'cluster': cluster_da}) plot_result = PlotResult(data=data, figure=fig) From 11a63047f51f20c587265b6635736f5a393fe29b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 11:51:05 +0100 Subject: [PATCH 093/191] Simplify compare method in clustering --- docs/notebooks/08d-clustering-multiperiod.ipynb | 3 +-- docs/notebooks/08e-clustering-internals.ipynb | 14 ++++++++------ 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index e467522eb..84ff468ea 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -239,7 +239,7 @@ "outputs": [], "source": [ "# Compare original vs aggregated data - automatically faceted by period and scenario\n", - "fs_clustered.clustering.plot.compare(variable='Building(Heat)|fixed_relative_profile')" + "fs_clustered.clustering.plot.compare(variables='Building(Heat)|fixed_relative_profile')" ] }, { @@ -251,7 +251,6 @@ "source": [ "# Duration curves show how well the distribution is preserved per period/scenario\n", "fs_clustered.clustering.plot.compare(\n", - " variable='Building(Heat)|fixed_relative_profile',\n", " kind='duration_curve',\n", ")" ] diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index 263e84b23..a0ac80ca7 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -129,7 +129,7 @@ "outputs": [], "source": [ "# Compare specific variables only\n", - "fs_clustered.clustering.plot.compare(variable='HeatDemand(Q_th)|fixed_relative_profile')" + "fs_clustered.clustering.plot.compare(variables='HeatDemand(Q_th)|fixed_relative_profile')" ] }, { @@ -152,7 +152,7 @@ "source": [ "# View typical period profiles for each cluster\n", "# Each line represents a cluster's representative day\n", - "fs_clustered.clustering.plot.typical_periods(variable='HeatDemand(Q_th)|fixed_relative_profile')" + "fs_clustered.clustering.plot.clusters(variables='HeatDemand(Q_th)|fixed_relative_profile')" ] }, { @@ -216,7 +216,7 @@ "|--------|-------------|\n", "| `plot.compare()` | Compare original vs aggregated data (timeseries) |\n", "| `plot.compare(kind='duration_curve')` | Compare as duration curves |\n", - "| `plot.typical_periods()` | View each cluster's profile |\n", + "| `plot.clusters()` | View each cluster's profile |\n", "| `plot.heatmap()` | Visualize cluster assignments |\n", "\n", "### Key Parameters\n", @@ -224,10 +224,12 @@ "```python\n", "# Compare with options\n", "clustering.plot.compare(\n", - " variable='Demand|profile', # Single variable, list, or None (all)\n", + " variables='Demand|profile', # Single variable, list, or None (all)\n", " kind='timeseries', # 'timeseries' or 'duration_curve'\n", - " facet_col='scenario', # Facet by scenario if present\n", - " facet_row='period', # Facet by period if present\n", + " select={'scenario': 'Base'}, # xarray-style selection\n", + " colors='viridis', # Colorscale name, list, or dict\n", + " facet_col='period', # Facet by period if present\n", + " facet_row='scenario', # Facet by scenario if present\n", ")\n", "\n", "# Heatmap shows cluster assignments (no variable needed)\n", From a810878339a90df52c6365970ef2475352f1f71b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 11:51:09 +0100 Subject: [PATCH 094/191] Simplify compare method in clustering --- flixopt/aggregation/base.py | 239 +++++++++++------------------------- 1 file changed, 72 insertions(+), 167 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index cf09f9dca..52c3cbcb9 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -680,79 +680,7 @@ def compare( Returns: PlotResult containing the comparison figure and underlying data. """ - if kind == 'timeseries': - return self._compare_timeseries( - variables=variables, - select=select, - colors=colors, - facet_col=facet_col, - facet_row=facet_row, - show=show, - **plotly_kwargs, - ) - elif kind == 'duration_curve': - return self._compare_duration_curve( - variables=variables, - select=select, - colors=colors, - facet_col=facet_col, - facet_row=facet_row, - show=show, - **plotly_kwargs, - ) - else: - raise ValueError(f"Unknown kind '{kind}'. Use 'timeseries' or 'duration_curve'.") - - def _get_time_varying_variables(self) -> list[str]: - """Get list of time-varying variables from original data.""" - result = self._clustering.result - if result.original_data is None: - return [] - return [ - name - for name in result.original_data.data_vars - if 'time' in result.original_data[name].dims - and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) - ] - - def _resolve_variables(self, variables: str | list[str] | None) -> list[str]: - """Resolve variables parameter to a list of valid variable names.""" - time_vars = self._get_time_varying_variables() - if not time_vars: - raise ValueError('No time-varying variables found') - - if variables is None: - return time_vars - elif isinstance(variables, str): - if variables not in time_vars: - raise ValueError(f"Variable '{variables}' not found. Available: {time_vars}") - return [variables] - else: - invalid = [v for v in variables if v not in time_vars] - if invalid: - raise ValueError(f'Variables {invalid} not found. Available: {time_vars}') - return list(variables) - - def _resolve_facets( - self, ds: xr.Dataset, facet_col: str | None, facet_row: str | None - ) -> tuple[str | None, str | None]: - """Resolve facet dimensions, returning None if not present in data.""" - actual_col = facet_col if facet_col and facet_col in ds.dims else None - actual_row = facet_row if facet_row and facet_row in ds.dims else None - return actual_col, actual_row - - def _compare_timeseries( - self, - variables: str | list[str] | None = None, - *, - select: SelectType | None = None, - colors: ColorType | None = None, - facet_col: str | None = None, - facet_row: str | None = None, - show: bool | None = None, - **plotly_kwargs: Any, - ): - """Compare original vs aggregated as time series.""" + import pandas as pd import plotly.express as px from ..color_processing import process_colors @@ -760,56 +688,78 @@ def _compare_timeseries( from ..plot_result import PlotResult from ..statistics_accessor import _apply_selection + if kind not in ('timeseries', 'duration_curve'): + raise ValueError(f"Unknown kind '{kind}'. Use 'timeseries' or 'duration_curve'.") + result = self._clustering.result if result.original_data is None or result.aggregated_data is None: raise ValueError('No original/aggregated data available for comparison') resolved_variables = self._resolve_variables(variables) - # Build Dataset with Original/Aggregated for each variable + # Build Dataset with 'source' dimension for Original/Aggregated data_vars = {} for var in resolved_variables: original = result.original_data[var] aggregated = result.aggregated_data[var] expanded = result.expand_data(aggregated) - data_vars[f'{var} (Original)'] = original - data_vars[f'{var} (Aggregated)'] = expanded + + if kind == 'duration_curve': + # Sort values for duration curve (flatten, then sort descending) + original_sorted = np.sort(original.values.flatten())[::-1] + expanded_sorted = np.sort(expanded.values.flatten())[::-1] + n = len(original_sorted) + original = xr.DataArray(original_sorted, dims=['rank'], coords={'rank': range(n)}) + expanded = xr.DataArray(expanded_sorted, dims=['rank'], coords={'rank': range(n)}) + + # Concat along 'source' dimension + combined = xr.concat([original, expanded], dim=pd.Index(['Original', 'Aggregated'], name='source')) + data_vars[var] = combined ds = xr.Dataset(data_vars) - # Apply selection - ds = _apply_selection(ds, select) + # Apply selection (only for timeseries - duration curve already flattened) + if kind == 'timeseries': + ds = _apply_selection(ds, select) - # Resolve facets - actual_facet_col, actual_facet_row = self._resolve_facets(ds, facet_col, facet_row) + # Resolve facets (only for timeseries) + actual_facet_col = facet_col if kind == 'timeseries' and facet_col in ds.dims else None + actual_facet_row = facet_row if kind == 'timeseries' and facet_row in ds.dims else None - # Convert to long-form DataFrame (like _dataset_to_long_df) + # Convert to long-form DataFrame df = ds.to_dataframe().reset_index() coord_cols = [c for c in ds.coords.keys() if c in df.columns] - df = df.melt(id_vars=coord_cols, var_name='series', value_name='value') - - series_labels = df['series'].unique().tolist() - color_map = process_colors(colors, series_labels, CONFIG.Plotting.default_qualitative_colorscale) - title = ( - 'Original vs Aggregated' - if len(resolved_variables) > 1 - else f'Original vs Aggregated: {resolved_variables[0]}' - ) + df = df.melt(id_vars=coord_cols, var_name='variable', value_name='value') + + variable_labels = df['variable'].unique().tolist() + color_map = process_colors(colors, variable_labels, CONFIG.Plotting.default_qualitative_colorscale) + + # Set x-axis and title based on kind + if kind == 'timeseries': + x_col = 'time' + title = ( + 'Original vs Aggregated' + if len(resolved_variables) > 1 + else f'Original vs Aggregated: {resolved_variables[0]}' + ) + labels = {} + else: + x_col = 'rank' + title = 'Duration Curve' if len(resolved_variables) > 1 else f'Duration Curve: {resolved_variables[0]}' + labels = {'rank': 'Hours (sorted)', 'value': 'Value'} fig = px.line( df, - x='time', + x=x_col, y='value', - color='series', + color='variable', + line_dash='source', facet_col=actual_facet_col, facet_row=actual_facet_row, title=title, + labels=labels, color_discrete_map=color_map, **plotly_kwargs, ) - # Dash lines for Original series - for trace in fig.data: - if 'Original' in trace.name: - trace.line.dash = 'dash' if actual_facet_row or actual_facet_col: fig.update_yaxes(matches=None) fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) @@ -823,80 +773,35 @@ def _compare_timeseries( return plot_result - def _compare_duration_curve( - self, - variables: str | list[str] | None = None, - *, - select: SelectType | None = None, - colors: ColorType | None = None, - facet_col: str | None = None, - facet_row: str | None = None, - show: bool | None = None, - **plotly_kwargs: Any, - ): - """Compare original vs aggregated as duration curves.""" - import plotly.express as px - - from ..color_processing import process_colors - from ..config import CONFIG - from ..plot_result import PlotResult - from ..statistics_accessor import _apply_selection - + def _get_time_varying_variables(self) -> list[str]: + """Get list of time-varying variables from original data.""" result = self._clustering.result - if result.original_data is None or result.aggregated_data is None: - raise ValueError('No original/aggregated data available for comparison') - - # Apply selection to original data before resolving variables - original_data = _apply_selection(result.original_data, select) - aggregated_data = _apply_selection(result.aggregated_data, select) - - resolved_variables = self._resolve_variables(variables) - - # Build Dataset with sorted values for each variable - data_vars = {} - for var in resolved_variables: - original = original_data[var] - aggregated = aggregated_data[var] - expanded = result.expand_data(aggregated) - # Sort values for duration curve - original_sorted = np.sort(original.values.flatten())[::-1] - expanded_sorted = np.sort(expanded.values.flatten())[::-1] - n = len(original_sorted) - data_vars[f'{var} (Original)'] = xr.DataArray(original_sorted, dims=['rank'], coords={'rank': range(n)}) - data_vars[f'{var} (Aggregated)'] = xr.DataArray(expanded_sorted, dims=['rank'], coords={'rank': range(n)}) - ds = xr.Dataset(data_vars) - - # Convert to long-form DataFrame - df = ds.to_dataframe().reset_index() - coord_cols = [c for c in ds.coords.keys() if c in df.columns] - df = df.melt(id_vars=coord_cols, var_name='series', value_name='value') - - series_labels = df['series'].unique().tolist() - color_map = process_colors(colors, series_labels, CONFIG.Plotting.default_qualitative_colorscale) - title = 'Duration Curve' if len(resolved_variables) > 1 else f'Duration Curve: {resolved_variables[0]}' - - fig = px.line( - df, - x='rank', - y='value', - color='series', - title=title, - labels={'rank': 'Hours (sorted)', 'value': 'Value'}, - color_discrete_map=color_map, - **plotly_kwargs, - ) - for trace in fig.data: - if 'Original' in trace.name: - trace.line.dash = 'dash' - - plot_result = PlotResult(data=ds, figure=fig) + if result.original_data is None: + return [] + return [ + name + for name in result.original_data.data_vars + if 'time' in result.original_data[name].dims + and not np.isclose(result.original_data[name].min(), result.original_data[name].max()) + ] - if show is None: - show = CONFIG.Plotting.default_show - if show: - plot_result.show() + def _resolve_variables(self, variables: str | list[str] | None) -> list[str]: + """Resolve variables parameter to a list of valid variable names.""" + time_vars = self._get_time_varying_variables() + if not time_vars: + raise ValueError('No time-varying variables found') - return plot_result + if variables is None: + return time_vars + elif isinstance(variables, str): + if variables not in time_vars: + raise ValueError(f"Variable '{variables}' not found. Available: {time_vars}") + return [variables] + else: + invalid = [v for v in variables if v not in time_vars] + if invalid: + raise ValueError(f'Variables {invalid} not found. Available: {time_vars}') + return list(variables) def heatmap( self, From 6dc197900668f19b00726e7748226497af377d90 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 11:57:11 +0100 Subject: [PATCH 095/191] Simplify compare method in clustering --- flixopt/aggregation/base.py | 40 +++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 52c3cbcb9..a9843f56d 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -701,25 +701,32 @@ def compare( data_vars = {} for var in resolved_variables: original = result.original_data[var] - aggregated = result.aggregated_data[var] - expanded = result.expand_data(aggregated) - - if kind == 'duration_curve': - # Sort values for duration curve (flatten, then sort descending) - original_sorted = np.sort(original.values.flatten())[::-1] - expanded_sorted = np.sort(expanded.values.flatten())[::-1] - n = len(original_sorted) - original = xr.DataArray(original_sorted, dims=['rank'], coords={'rank': range(n)}) - expanded = xr.DataArray(expanded_sorted, dims=['rank'], coords={'rank': range(n)}) - - # Concat along 'source' dimension + expanded = result.expand_data(result.aggregated_data[var]) combined = xr.concat([original, expanded], dim=pd.Index(['Original', 'Aggregated'], name='source')) data_vars[var] = combined ds = xr.Dataset(data_vars) - # Apply selection (only for timeseries - duration curve already flattened) - if kind == 'timeseries': - ds = _apply_selection(ds, select) + # Apply selection + ds = _apply_selection(ds, select) + + # For duration curve: flatten and sort values + if kind == 'duration_curve': + sorted_vars = {} + for var in ds.data_vars: + for source in ds.coords['source'].values: + values = np.sort(ds[var].sel(source=source).values.flatten())[::-1] + sorted_vars[(var, source)] = values + n = len(values) + ds = xr.Dataset( + { + var: xr.DataArray( + [sorted_vars[(var, s)] for s in ['Original', 'Aggregated']], + dims=['source', 'rank'], + coords={'source': ['Original', 'Aggregated'], 'rank': range(n)}, + ) + for var in resolved_variables + } + ) # Resolve facets (only for timeseries) actual_facet_col = facet_col if kind == 'timeseries' and facet_col in ds.dims else None @@ -734,8 +741,8 @@ def compare( color_map = process_colors(colors, variable_labels, CONFIG.Plotting.default_qualitative_colorscale) # Set x-axis and title based on kind + x_col = 'time' if kind == 'timeseries' else 'rank' if kind == 'timeseries': - x_col = 'time' title = ( 'Original vs Aggregated' if len(resolved_variables) > 1 @@ -743,7 +750,6 @@ def compare( ) labels = {} else: - x_col = 'rank' title = 'Duration Curve' if len(resolved_variables) > 1 else f'Duration Curve: {resolved_variables[0]}' labels = {'rank': 'Hours (sorted)', 'value': 'Value'} From f1ceac5a907848985082a18eb10d9251052a7751 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 12:00:38 +0100 Subject: [PATCH 096/191] Simplify compare method in clustering --- flixopt/aggregation/base.py | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index a9843f56d..ff4cad46a 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -504,7 +504,7 @@ def plot(self, colormap: str | None = None, show: bool | None = None): # Rename for legend original_df = original_df.rename(columns={col: f'Original - {col}' for col in original_df.columns}) - expanded_agg = expanded_agg.rename(columns={col: f'Aggregated - {col}' for col in expanded_agg.columns}) + expanded_agg = expanded_agg.rename(columns={col: f'Clustered - {col}' for col in expanded_agg.columns}) colors = list( process_colors( @@ -528,16 +528,16 @@ def plot(self, colormap: str | None = None, show: bool | None = None): fig.add_trace(trace) fig.update_layout( - title='Original vs Aggregated Data (original = ---)', + title='Original vs Clustered Data (original = ---)', xaxis_title='Time', yaxis_title='Value', ) - # Build xarray Dataset with both original and aggregated data + # Build xarray Dataset with both original and clustered data data = xr.Dataset( { 'original': original_filtered.to_array(dim='variable'), - 'aggregated': aggregated_filtered.to_array(dim='variable'), + 'clustered': aggregated_filtered.to_array(dim='variable'), } ) plot_result = PlotResult(data=data, figure=fig) @@ -697,12 +697,12 @@ def compare( resolved_variables = self._resolve_variables(variables) - # Build Dataset with 'source' dimension for Original/Aggregated + # Build Dataset with 'representation' dimension for Original/Clustered data_vars = {} for var in resolved_variables: original = result.original_data[var] - expanded = result.expand_data(result.aggregated_data[var]) - combined = xr.concat([original, expanded], dim=pd.Index(['Original', 'Aggregated'], name='source')) + clustered = result.expand_data(result.aggregated_data[var]) + combined = xr.concat([original, clustered], dim=pd.Index(['Original', 'Clustered'], name='representation')) data_vars[var] = combined ds = xr.Dataset(data_vars) @@ -713,16 +713,16 @@ def compare( if kind == 'duration_curve': sorted_vars = {} for var in ds.data_vars: - for source in ds.coords['source'].values: - values = np.sort(ds[var].sel(source=source).values.flatten())[::-1] - sorted_vars[(var, source)] = values + for rep in ds.coords['representation'].values: + values = np.sort(ds[var].sel(representation=rep).values.flatten())[::-1] + sorted_vars[(var, rep)] = values n = len(values) ds = xr.Dataset( { var: xr.DataArray( - [sorted_vars[(var, s)] for s in ['Original', 'Aggregated']], - dims=['source', 'rank'], - coords={'source': ['Original', 'Aggregated'], 'rank': range(n)}, + [sorted_vars[(var, r)] for r in ['Original', 'Clustered']], + dims=['representation', 'rank'], + coords={'representation': ['Original', 'Clustered'], 'rank': range(n)}, ) for var in resolved_variables } @@ -744,9 +744,9 @@ def compare( x_col = 'time' if kind == 'timeseries' else 'rank' if kind == 'timeseries': title = ( - 'Original vs Aggregated' + 'Original vs Clustered' if len(resolved_variables) > 1 - else f'Original vs Aggregated: {resolved_variables[0]}' + else f'Original vs Clustered: {resolved_variables[0]}' ) labels = {} else: @@ -758,7 +758,7 @@ def compare( x=x_col, y='value', color='variable', - line_dash='source', + line_dash='representation', facet_col=actual_facet_col, facet_row=actual_facet_row, title=title, From be103b81114063f86cc3182b04fb0927553eb0d8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 14:44:16 +0100 Subject: [PATCH 097/191] improve API --- flixopt/aggregation/__init__.py | 2 - flixopt/aggregation/base.py | 203 +-------------------- flixopt/plot_result.py | 2 +- tests/test_aggregation/test_integration.py | 6 - 4 files changed, 5 insertions(+), 208 deletions(-) diff --git a/flixopt/aggregation/__init__.py b/flixopt/aggregation/__init__.py index 93d81541a..ab7a09cf7 100644 --- a/flixopt/aggregation/__init__.py +++ b/flixopt/aggregation/__init__.py @@ -30,7 +30,6 @@ ClusterResult, ClusterStructure, create_cluster_structure_from_mapping, - plot_aggregation, ) # Lazy import for InterClusterLinking to avoid circular imports @@ -56,5 +55,4 @@ def _get_inter_cluster_linking(): 'InterClusterLinking', # Utilities 'create_cluster_structure_from_mapping', - 'plot_aggregation', ] diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index ff4cad46a..d748b0ed9 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -25,6 +25,7 @@ if TYPE_CHECKING: from ..color_processing import ColorType + from ..plot_result import PlotResult from ..statistics_accessor import SelectType @@ -456,181 +457,6 @@ def validate(self) -> None: stacklevel=2, ) - def plot(self, colormap: str | None = None, show: bool | None = None): - """Plot original vs aggregated data comparison. - - Visualizes the original time series (dashed lines) overlaid with - the aggregated/clustered time series (solid lines) for comparison. - Constants (time-invariant variables) are excluded from the plot. - - Args: - colormap: Colorscale name for the time series colors. - Defaults to CONFIG.Plotting.default_qualitative_colorscale. - show: Whether to display the figure. - Defaults to CONFIG.Plotting.default_show. - - Returns: - PlotResult containing the comparison figure and underlying data. - """ - import plotly.express as px - - from ..color_processing import process_colors - from ..config import CONFIG - from ..plot_result import PlotResult - - if self.original_data is None or self.aggregated_data is None: - raise ValueError('ClusterResult must contain both original_data and aggregated_data for plotting') - - # Filter to only time-varying variables (exclude constants) - time_vars = [ - name - for name in self.original_data.data_vars - if 'time' in self.original_data[name].dims - and not np.isclose(self.original_data[name].min(), self.original_data[name].max()) - ] - if not time_vars: - raise ValueError('No time-varying variables found in original_data') - - original_filtered = self.original_data[time_vars] - aggregated_filtered = self.aggregated_data[time_vars] - - # Convert xarray to DataFrames - original_df = original_filtered.to_dataframe() - aggregated_df = aggregated_filtered.to_dataframe() - - # Expand aggregated data to original length using mapping - mapping = self.timestep_mapping.values - expanded_agg = aggregated_df.iloc[mapping].reset_index(drop=True) - - # Rename for legend - original_df = original_df.rename(columns={col: f'Original - {col}' for col in original_df.columns}) - expanded_agg = expanded_agg.rename(columns={col: f'Clustered - {col}' for col in expanded_agg.columns}) - - colors = list( - process_colors( - colormap or CONFIG.Plotting.default_qualitative_colorscale, list(original_df.columns) - ).values() - ) - - # Create line plot for original data (dashed) - original_df = original_df.reset_index() - index_name = original_df.columns[0] - df_org_long = original_df.melt(id_vars=index_name, var_name='variable', value_name='value') - fig = px.line(df_org_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) - for trace in fig.data: - trace.update(line=dict(dash='dash')) - - # Add aggregated data (solid lines) - expanded_agg[index_name] = original_df[index_name] - df_agg_long = expanded_agg.melt(id_vars=index_name, var_name='variable', value_name='value') - fig2 = px.line(df_agg_long, x=index_name, y='value', color='variable', color_discrete_sequence=colors) - for trace in fig2.data: - fig.add_trace(trace) - - fig.update_layout( - title='Original vs Clustered Data (original = ---)', - xaxis_title='Time', - yaxis_title='Value', - ) - - # Build xarray Dataset with both original and clustered data - data = xr.Dataset( - { - 'original': original_filtered.to_array(dim='variable'), - 'clustered': aggregated_filtered.to_array(dim='variable'), - } - ) - plot_result = PlotResult(data=data, figure=fig) - - if show is None: - show = CONFIG.Plotting.default_show - if show: - plot_result.show() - - return plot_result - - def plot_clusters(self, variable: str | None = None, show: bool | None = None): - """Plot each cluster's typical period profile. - - Shows each cluster as a separate subplot with its occurrence count - in the title. Useful for understanding what each cluster represents. - - Args: - variable: Variable to plot. If None, plots the first available variable. - show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. - - Returns: - PlotResult containing the figure and underlying data. - """ - from plotly.subplots import make_subplots - - from ..config import CONFIG - from ..plot_result import PlotResult - - if self.aggregated_data is None or self.cluster_structure is None: - raise ValueError('ClusterResult must contain aggregated_data and cluster_structure for this plot') - - cs = self.cluster_structure - n_clusters = int(cs.n_clusters) if isinstance(cs.n_clusters, (int, np.integer)) else int(cs.n_clusters.values) - - # Select variable - variables = list(self.aggregated_data.data_vars) - if variable is None: - variable = variables[0] - elif variable not in variables: - raise ValueError(f'Variable {variable} not found. Available: {variables}') - - data = self.aggregated_data[variable].values - - # Reshape to [n_clusters, timesteps_per_cluster] - data_by_cluster = data.reshape(n_clusters, cs.timesteps_per_cluster) - - # Create subplots - n_cols = min(4, n_clusters) - n_rows = (n_clusters + n_cols - 1) // n_cols - fig = make_subplots( - rows=n_rows, - cols=n_cols, - subplot_titles=[ - f'Cluster {c} (×{int(cs.cluster_occurrences.sel(cluster=c).values)})' for c in range(n_clusters) - ], - ) - - x = np.arange(cs.timesteps_per_cluster) - for c in range(n_clusters): - row = c // n_cols + 1 - col = c % n_cols + 1 - fig.add_trace( - {'type': 'scatter', 'x': x, 'y': data_by_cluster[c], 'mode': 'lines', 'showlegend': False}, - row=row, - col=col, - ) - - fig.update_layout( - title=f'Clusters: {variable}', - height=200 * n_rows, - ) - - # Build data for PlotResult - result_data = xr.Dataset( - { - 'clusters': xr.DataArray( - data_by_cluster, - dims=['cluster', 'timestep'], - coords={'cluster': range(n_clusters), 'timestep': range(cs.timesteps_per_cluster)}, - ), - 'occurrences': cs.cluster_occurrences, - } - ) - plot_result = PlotResult(data=result_data, figure=fig) - - if show is None: - show = CONFIG.Plotting.default_show - if show: - plot_result.show() - - return plot_result - class ClusteringPlotAccessor: """Plot accessor for Clustering objects. @@ -660,7 +486,7 @@ def compare( facet_row: str | None = 'scenario', show: bool | None = None, **plotly_kwargs: Any, - ): + ) -> PlotResult: """Compare original vs aggregated data. Args: @@ -818,7 +644,7 @@ def heatmap( animation_frame: str | None = 'scenario', show: bool | None = None, **plotly_kwargs: Any, - ): + ) -> PlotResult: """Plot cluster assignments over time as a heatmap timeline. Shows which cluster each timestep belongs to as a horizontal color bar. @@ -954,7 +780,7 @@ def clusters( facet_col_wrap: int | None = None, show: bool | None = None, **plotly_kwargs: Any, - ): + ) -> PlotResult: """Plot each cluster's typical period profile. Shows each cluster as a separate faceted subplot. Useful for @@ -1206,24 +1032,3 @@ def create_cluster_structure_from_mapping( n_clusters=n_clusters, timesteps_per_cluster=timesteps_per_cluster, ) - - -def plot_aggregation( - result: ClusterResult, - colormap: str | None = None, - show: bool | None = None, -): - """Plot original vs aggregated data comparison. - - .. deprecated:: - Use ``result.plot()`` directly instead. - - Args: - result: ClusterResult containing original and aggregated data. - colormap: Colorscale name for the time series colors. - show: Whether to display the figure. - - Returns: - PlotResult containing the comparison figure and underlying data. - """ - return result.plot(colormap=colormap, show=show) diff --git a/flixopt/plot_result.py b/flixopt/plot_result.py index 683fbcf3e..85e692602 100644 --- a/flixopt/plot_result.py +++ b/flixopt/plot_result.py @@ -41,7 +41,7 @@ class PlotResult: Customizing the figure: - >>> result = clustering.plot() + >>> result = clustering.plot.compare() >>> result.update(title='My Custom Title').show() """ diff --git a/tests/test_aggregation/test_integration.py b/tests/test_aggregation/test_integration.py index 2ef0a0640..b256f4d1e 100644 --- a/tests/test_aggregation/test_integration.py +++ b/tests/test_aggregation/test_integration.py @@ -141,12 +141,6 @@ def test_import_from_flixopt(self): assert hasattr(aggregation, 'ClusterStructure') assert hasattr(aggregation, 'Clustering') - def test_plot_aggregation_available(self): - """Test that plot_aggregation is available.""" - from flixopt.aggregation import plot_aggregation - - assert callable(plot_aggregation) - def test_create_cluster_structure_from_mapping_available(self): """Test that create_cluster_structure_from_mapping is available.""" from flixopt.aggregation import create_cluster_structure_from_mapping From ccec025fd23918afe4da657d5c42f8cbadf5e34c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 14:56:25 +0100 Subject: [PATCH 098/191] Add intra_cluster_mask to clustering for storage linking --- flixopt/aggregation/base.py | 70 ++++++++++++++++++++++++++++++++++++- flixopt/components.py | 18 +++++++--- 2 files changed, 82 insertions(+), 6 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index d748b0ed9..d27a58064 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -17,7 +17,7 @@ from __future__ import annotations -from dataclasses import dataclass +from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any import numpy as np @@ -907,6 +907,7 @@ class Clustering: backend_name: str = 'unknown' storage_inter_cluster_linking: bool = True storage_cyclic: bool = True + _intra_cluster_mask: xr.DataArray | None = field(default=None, repr=False) def __repr__(self) -> str: cs = self.result.cluster_structure @@ -983,6 +984,73 @@ def timestep_mapping(self) -> xr.DataArray: """Mapping from original timesteps to representative timestep indices.""" return self.result.timestep_mapping + def get_intra_cluster_mask(self, time_coords: np.ndarray | xr.DataArray) -> xr.DataArray: + """Get mask for intra-cluster timestep transitions (cached). + + Returns a boolean mask that is True for transitions within a cluster + and False for transitions at cluster boundaries. Used to skip + inter-cluster balance constraints in storage models. + + Args: + time_coords: Time coordinates for the mask. Should match the + constraint's time dimension (typically flow_system.timesteps). + + Returns: + DataArray with dims ['time'] or ['time', 'period', 'scenario'], + True for intra-cluster transitions. + + Example: + For 2 clusters with 24 timesteps each (48 total): + - Positions 0-22: True (within cluster 0) + - Position 23: False (boundary between cluster 0 and 1) + - Positions 24-47: True (within cluster 1) + """ + if self._intra_cluster_mask is not None: + return self._intra_cluster_mask + + if self.result.cluster_structure is None: + raise ValueError('No cluster_structure available') + + self._intra_cluster_mask = self._compute_intra_cluster_mask(time_coords) + return self._intra_cluster_mask + + def _compute_intra_cluster_mask(self, time_coords: np.ndarray | xr.DataArray) -> xr.DataArray: + """Compute the intra-cluster mask. + + Boundary positions are uniform across all period/scenario slices since + n_clusters and timesteps_per_cluster are uniform. + """ + n_clusters = self.n_clusters + steps_per_cluster = self.timesteps_per_period + n_timesteps = n_clusters * steps_per_cluster + + # Boundary positions: T-1, 2T-1, ..., (n_clusters-1)*T - 1 + # Position k links charge_state[k+1] to charge_state[k] + # Boundary at k means k is last timestep of cluster, k+1 is first of next + boundary_positions = [(c * steps_per_cluster) - 1 for c in range(1, n_clusters)] + + mask_values = np.ones(n_timesteps, dtype=bool) + mask_values[boundary_positions] = False + + if isinstance(time_coords, xr.DataArray): + time_coords = time_coords.values + + mask = xr.DataArray( + mask_values, + dims=['time'], + coords={'time': time_coords}, + name='intra_cluster_mask', + ) + + # Expand to include period/scenario dimensions if present (for broadcasting) + original_fs = self.original_flow_system + if hasattr(original_fs, 'periods') and original_fs.periods is not None: + mask = mask.expand_dims(period=list(original_fs.periods)) + if hasattr(original_fs, 'scenarios') and original_fs.scenarios is not None: + mask = mask.expand_dims(scenario=list(original_fs.scenarios)) + + return mask + def create_cluster_structure_from_mapping( timestep_mapping: xr.DataArray, diff --git a/flixopt/components.py b/flixopt/components.py index 6f982066c..97c1bb087 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -907,14 +907,22 @@ def _do_modeling(self): eff_charge = self.element.eta_charge eff_discharge = self.element.eta_discharge - self.add_constraints( + # Build balance expression + lhs = ( charge_state.isel(time=slice(1, None)) - == charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) - + charge_rate * eff_charge * timestep_duration - - discharge_rate * timestep_duration / eff_discharge, - short_name='charge_state', + - charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) + - charge_rate * eff_charge * timestep_duration + + discharge_rate * timestep_duration / eff_discharge ) + # Apply intra-cluster mask if clustered (skip inter-cluster boundaries) + clustering = self._model.flow_system.clustering + if clustering is not None: + mask = clustering.get_intra_cluster_mask(self._model.flow_system.timesteps) + lhs = lhs.where(mask) + + self.add_constraints(lhs == 0, short_name='charge_state') + # Create InvestmentModel and bounding constraints for investment if isinstance(self.element.capacity_in_flow_hours, InvestParameters): self.add_submodels( From f56f23eb82300518110570d58a4151e6e3ba21da Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 15:02:17 +0100 Subject: [PATCH 099/191] Add intra_cluster_mask to clustering for storage linking --- flixopt/aggregation/base.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index d27a58064..89067dc51 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -25,6 +25,7 @@ if TYPE_CHECKING: from ..color_processing import ColorType + from ..flow_system import FlowSystem from ..plot_result import PlotResult from ..statistics_accessor import SelectType @@ -903,7 +904,7 @@ class Clustering: """ result: ClusterResult - original_flow_system: object # FlowSystem - avoid circular import + original_flow_system: FlowSystem # FlowSystem - avoid circular import backend_name: str = 'unknown' storage_inter_cluster_linking: bool = True storage_cyclic: bool = True @@ -1044,9 +1045,9 @@ def _compute_intra_cluster_mask(self, time_coords: np.ndarray | xr.DataArray) -> # Expand to include period/scenario dimensions if present (for broadcasting) original_fs = self.original_flow_system - if hasattr(original_fs, 'periods') and original_fs.periods is not None: + if original_fs.periods is not None: mask = mask.expand_dims(period=list(original_fs.periods)) - if hasattr(original_fs, 'scenarios') and original_fs.scenarios is not None: + if original_fs.scenarios is not None: mask = mask.expand_dims(scenario=list(original_fs.scenarios)) return mask From e49ed1b678976d26ecbdcc2e914a9a3db9255eb2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 15:02:30 +0100 Subject: [PATCH 100/191] Temp --- flixopt/components.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 97c1bb087..4fa28eae6 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -918,8 +918,14 @@ def _do_modeling(self): # Apply intra-cluster mask if clustered (skip inter-cluster boundaries) clustering = self._model.flow_system.clustering if clustering is not None: - mask = clustering.get_intra_cluster_mask(self._model.flow_system.timesteps) - lhs = lhs.where(mask) + # Get indices to keep (all except cluster boundaries) + n_clusters = clustering.n_clusters + steps_per_cluster = clustering.timesteps_per_period + n_timesteps = n_clusters * steps_per_cluster + # Boundary indices: T-1, 2T-1, ..., (n_clusters-1)*T - 1 + boundary_indices = {(c * steps_per_cluster) - 1 for c in range(1, n_clusters)} + valid_indices = [i for i in range(n_timesteps) if i not in boundary_indices] + lhs = lhs.isel(time=valid_indices) self.add_constraints(lhs == 0, short_name='charge_state') From 2b084b3a71f0fd5642e9f2079f98317b2bf35120 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 15:02:48 +0100 Subject: [PATCH 101/191] Revert "Temp" This reverts commit e49ed1b678976d26ecbdcc2e914a9a3db9255eb2. --- flixopt/components.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 4fa28eae6..97c1bb087 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -918,14 +918,8 @@ def _do_modeling(self): # Apply intra-cluster mask if clustered (skip inter-cluster boundaries) clustering = self._model.flow_system.clustering if clustering is not None: - # Get indices to keep (all except cluster boundaries) - n_clusters = clustering.n_clusters - steps_per_cluster = clustering.timesteps_per_period - n_timesteps = n_clusters * steps_per_cluster - # Boundary indices: T-1, 2T-1, ..., (n_clusters-1)*T - 1 - boundary_indices = {(c * steps_per_cluster) - 1 for c in range(1, n_clusters)} - valid_indices = [i for i in range(n_timesteps) if i not in boundary_indices] - lhs = lhs.isel(time=valid_indices) + mask = clustering.get_intra_cluster_mask(self._model.flow_system.timesteps) + lhs = lhs.where(mask) self.add_constraints(lhs == 0, short_name='charge_state') From 8fae0f085c634377be2839bf47b4c0bf1eabbf58 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 18:01:31 +0100 Subject: [PATCH 102/191] fix intra_cluster_mask --- flixopt/aggregation/base.py | 31 +++++++++++++------------------ flixopt/components.py | 4 +++- 2 files changed, 16 insertions(+), 19 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 89067dc51..7c44205df 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -985,16 +985,16 @@ def timestep_mapping(self) -> xr.DataArray: """Mapping from original timesteps to representative timestep indices.""" return self.result.timestep_mapping - def get_intra_cluster_mask(self, time_coords: np.ndarray | xr.DataArray) -> xr.DataArray: - """Get mask for intra-cluster timestep transitions (cached). + @property + def intra_cluster_mask(self) -> xr.DataArray: + """Boolean mask for intra-cluster timestep transitions. - Returns a boolean mask that is True for transitions within a cluster + Returns a mask that is True for transitions within a cluster and False for transitions at cluster boundaries. Used to skip inter-cluster balance constraints in storage models. - Args: - time_coords: Time coordinates for the mask. Should match the - constraint's time dimension (typically flow_system.timesteps). + The mask uses the original FlowSystem's timesteps as coordinates. + Use `.assign_coords(time=new_coords)` if different coordinates are needed. Returns: DataArray with dims ['time'] or ['time', 'period', 'scenario'], @@ -1012,15 +1012,6 @@ def get_intra_cluster_mask(self, time_coords: np.ndarray | xr.DataArray) -> xr.D if self.result.cluster_structure is None: raise ValueError('No cluster_structure available') - self._intra_cluster_mask = self._compute_intra_cluster_mask(time_coords) - return self._intra_cluster_mask - - def _compute_intra_cluster_mask(self, time_coords: np.ndarray | xr.DataArray) -> xr.DataArray: - """Compute the intra-cluster mask. - - Boundary positions are uniform across all period/scenario slices since - n_clusters and timesteps_per_cluster are uniform. - """ n_clusters = self.n_clusters steps_per_cluster = self.timesteps_per_period n_timesteps = n_clusters * steps_per_cluster @@ -1033,8 +1024,11 @@ def _compute_intra_cluster_mask(self, time_coords: np.ndarray | xr.DataArray) -> mask_values = np.ones(n_timesteps, dtype=bool) mask_values[boundary_positions] = False - if isinstance(time_coords, xr.DataArray): - time_coords = time_coords.values + # Use clustered timesteps from aggregated_data or representative_weights + if self.result.aggregated_data is not None and 'time' in self.result.aggregated_data.coords: + time_coords = self.result.aggregated_data.coords['time'].values + else: + time_coords = self.result.representative_weights.coords['time'].values mask = xr.DataArray( mask_values, @@ -1050,7 +1044,8 @@ def _compute_intra_cluster_mask(self, time_coords: np.ndarray | xr.DataArray) -> if original_fs.scenarios is not None: mask = mask.expand_dims(scenario=list(original_fs.scenarios)) - return mask + self._intra_cluster_mask = mask + return self._intra_cluster_mask def create_cluster_structure_from_mapping( diff --git a/flixopt/components.py b/flixopt/components.py index 97c1bb087..c26285bd6 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -918,7 +918,9 @@ def _do_modeling(self): # Apply intra-cluster mask if clustered (skip inter-cluster boundaries) clustering = self._model.flow_system.clustering if clustering is not None: - mask = clustering.get_intra_cluster_mask(self._model.flow_system.timesteps) + # Shift mask coords to match lhs (which uses charge_state[1:], i.e., timesteps_extra[1:]) + shifted_time_coords = self._model.flow_system.timesteps_extra[1:] + mask = clustering.intra_cluster_mask.assign_coords(time=shifted_time_coords) lhs = lhs.where(mask) self.add_constraints(lhs == 0, short_name='charge_state') From d202b18d0ef3b9ed4dba99f12aea8f0429f03090 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 18:12:17 +0100 Subject: [PATCH 103/191] Change to cluster_start_mask --- flixopt/aggregation/base.py | 41 ++++++++++++++++--------------------- 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 7c44205df..70e25806e 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -908,7 +908,7 @@ class Clustering: backend_name: str = 'unknown' storage_inter_cluster_linking: bool = True storage_cyclic: bool = True - _intra_cluster_mask: xr.DataArray | None = field(default=None, repr=False) + _cluster_start_mask: xr.DataArray | None = field(default=None, repr=False) def __repr__(self) -> str: cs = self.result.cluster_structure @@ -986,28 +986,28 @@ def timestep_mapping(self) -> xr.DataArray: return self.result.timestep_mapping @property - def intra_cluster_mask(self) -> xr.DataArray: - """Boolean mask for intra-cluster timestep transitions. + def cluster_start_mask(self) -> xr.DataArray: + """Boolean mask True for the first timestep of each cluster. - Returns a mask that is True for transitions within a cluster - and False for transitions at cluster boundaries. Used to skip - inter-cluster balance constraints in storage models. + This provides a simple, intuitive way to identify cluster boundaries. + Invert (~) to get a mask for non-start timesteps. - The mask uses the original FlowSystem's timesteps as coordinates. + The mask uses clustered FlowSystem's timesteps as coordinates. Use `.assign_coords(time=new_coords)` if different coordinates are needed. Returns: DataArray with dims ['time'] or ['time', 'period', 'scenario'], - True for intra-cluster transitions. + True for first timestep of each cluster. Example: For 2 clusters with 24 timesteps each (48 total): - - Positions 0-22: True (within cluster 0) - - Position 23: False (boundary between cluster 0 and 1) - - Positions 24-47: True (within cluster 1) + - Position 0: True (start of cluster 0) + - Positions 1-23: False + - Position 24: True (start of cluster 1) + - Positions 25-47: False """ - if self._intra_cluster_mask is not None: - return self._intra_cluster_mask + if self._cluster_start_mask is not None: + return self._cluster_start_mask if self.result.cluster_structure is None: raise ValueError('No cluster_structure available') @@ -1016,13 +1016,8 @@ def intra_cluster_mask(self) -> xr.DataArray: steps_per_cluster = self.timesteps_per_period n_timesteps = n_clusters * steps_per_cluster - # Boundary positions: T-1, 2T-1, ..., (n_clusters-1)*T - 1 - # Position k links charge_state[k+1] to charge_state[k] - # Boundary at k means k is last timestep of cluster, k+1 is first of next - boundary_positions = [(c * steps_per_cluster) - 1 for c in range(1, n_clusters)] - - mask_values = np.ones(n_timesteps, dtype=bool) - mask_values[boundary_positions] = False + # First timestep of each cluster: 0, T, 2T, ... + mask_values = (np.arange(n_timesteps) % steps_per_cluster) == 0 # Use clustered timesteps from aggregated_data or representative_weights if self.result.aggregated_data is not None and 'time' in self.result.aggregated_data.coords: @@ -1034,7 +1029,7 @@ def intra_cluster_mask(self) -> xr.DataArray: mask_values, dims=['time'], coords={'time': time_coords}, - name='intra_cluster_mask', + name='cluster_start_mask', ) # Expand to include period/scenario dimensions if present (for broadcasting) @@ -1044,8 +1039,8 @@ def intra_cluster_mask(self) -> xr.DataArray: if original_fs.scenarios is not None: mask = mask.expand_dims(scenario=list(original_fs.scenarios)) - self._intra_cluster_mask = mask - return self._intra_cluster_mask + self._cluster_start_mask = mask + return self._cluster_start_mask def create_cluster_structure_from_mapping( From 9d95ae037181802583ae3fcef0bab0fbfea0b920 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 18:16:13 +0100 Subject: [PATCH 104/191] Fix storage cluster decoupling --- flixopt/components.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index c26285bd6..d923072d7 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -918,9 +918,22 @@ def _do_modeling(self): # Apply intra-cluster mask if clustered (skip inter-cluster boundaries) clustering = self._model.flow_system.clustering if clustering is not None: - # Shift mask coords to match lhs (which uses charge_state[1:], i.e., timesteps_extra[1:]) + # Skip transition j→j+1 if j+1 is a cluster start (entering a new cluster) + # cluster_start_mask[1:] gives starts at positions 1..n, which correspond to + # constraint positions 0..n-1 (shifted by 1). We invert to get "keep" mask. + cluster_start = clustering.cluster_start_mask + intra_mask_values = np.ones(len(cluster_start), dtype=bool) + intra_mask_values[:-1] = ~cluster_start.values[1:] # Skip where next is a start + shifted_time_coords = self._model.flow_system.timesteps_extra[1:] - mask = clustering.intra_cluster_mask.assign_coords(time=shifted_time_coords) + mask = xr.DataArray(intra_mask_values, dims=['time'], coords={'time': shifted_time_coords}) + + # Expand dims to match lhs if cluster_start has period/scenario dims + if 'period' in cluster_start.dims: + mask = mask.expand_dims(period=cluster_start.coords['period'].values) + if 'scenario' in cluster_start.dims: + mask = mask.expand_dims(scenario=cluster_start.coords['scenario'].values) + lhs = lhs.where(mask) self.add_constraints(lhs == 0, short_name='charge_state') From 6b030c714843dedec1efe365eff0dbf29c047600 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 18:22:09 +0100 Subject: [PATCH 105/191] Switch cluster start indexing --- flixopt/aggregation/base.py | 59 +++++++------------------------------ flixopt/components.py | 20 +++++-------- 2 files changed, 18 insertions(+), 61 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index 70e25806e..a4fb7e828 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -17,7 +17,7 @@ from __future__ import annotations -from dataclasses import dataclass, field +from dataclasses import dataclass from typing import TYPE_CHECKING, Any import numpy as np @@ -908,7 +908,6 @@ class Clustering: backend_name: str = 'unknown' storage_inter_cluster_linking: bool = True storage_cyclic: bool = True - _cluster_start_mask: xr.DataArray | None = field(default=None, repr=False) def __repr__(self) -> str: cs = self.result.cluster_structure @@ -986,61 +985,25 @@ def timestep_mapping(self) -> xr.DataArray: return self.result.timestep_mapping @property - def cluster_start_mask(self) -> xr.DataArray: - """Boolean mask True for the first timestep of each cluster. + def cluster_start_positions(self) -> np.ndarray: + """Integer positions where clusters start. - This provides a simple, intuitive way to identify cluster boundaries. - Invert (~) to get a mask for non-start timesteps. - - The mask uses clustered FlowSystem's timesteps as coordinates. - Use `.assign_coords(time=new_coords)` if different coordinates are needed. + Returns the indices of the first timestep of each cluster. + Use these positions to build masks for specific use cases. Returns: - DataArray with dims ['time'] or ['time', 'period', 'scenario'], - True for first timestep of each cluster. + 1D numpy array of positions: [0, T, 2T, ...] where T = timesteps_per_period. Example: - For 2 clusters with 24 timesteps each (48 total): - - Position 0: True (start of cluster 0) - - Positions 1-23: False - - Position 24: True (start of cluster 1) - - Positions 25-47: False + For 2 clusters with 24 timesteps each: + >>> clustering.cluster_start_positions + array([0, 24]) """ - if self._cluster_start_mask is not None: - return self._cluster_start_mask - if self.result.cluster_structure is None: raise ValueError('No cluster_structure available') - n_clusters = self.n_clusters - steps_per_cluster = self.timesteps_per_period - n_timesteps = n_clusters * steps_per_cluster - - # First timestep of each cluster: 0, T, 2T, ... - mask_values = (np.arange(n_timesteps) % steps_per_cluster) == 0 - - # Use clustered timesteps from aggregated_data or representative_weights - if self.result.aggregated_data is not None and 'time' in self.result.aggregated_data.coords: - time_coords = self.result.aggregated_data.coords['time'].values - else: - time_coords = self.result.representative_weights.coords['time'].values - - mask = xr.DataArray( - mask_values, - dims=['time'], - coords={'time': time_coords}, - name='cluster_start_mask', - ) - - # Expand to include period/scenario dimensions if present (for broadcasting) - original_fs = self.original_flow_system - if original_fs.periods is not None: - mask = mask.expand_dims(period=list(original_fs.periods)) - if original_fs.scenarios is not None: - mask = mask.expand_dims(scenario=list(original_fs.scenarios)) - - self._cluster_start_mask = mask - return self._cluster_start_mask + n_timesteps = self.n_clusters * self.timesteps_per_period + return np.arange(0, n_timesteps, self.timesteps_per_period) def create_cluster_structure_from_mapping( diff --git a/flixopt/components.py b/flixopt/components.py index d923072d7..74915b1ee 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -918,21 +918,15 @@ def _do_modeling(self): # Apply intra-cluster mask if clustered (skip inter-cluster boundaries) clustering = self._model.flow_system.clustering if clustering is not None: - # Skip transition j→j+1 if j+1 is a cluster start (entering a new cluster) - # cluster_start_mask[1:] gives starts at positions 1..n, which correspond to - # constraint positions 0..n-1 (shifted by 1). We invert to get "keep" mask. - cluster_start = clustering.cluster_start_mask - intra_mask_values = np.ones(len(cluster_start), dtype=bool) - intra_mask_values[:-1] = ~cluster_start.values[1:] # Skip where next is a start + # Skip constraint at position (start - 1) for each cluster start after the first. + # This removes the link between end of cluster N and start of cluster N+1. + starts = clustering.cluster_start_positions + n_timesteps = len(self._model.flow_system.timesteps) + mask_values = np.ones(n_timesteps, dtype=bool) + mask_values[starts[1:] - 1] = False # Skip positions before each new cluster shifted_time_coords = self._model.flow_system.timesteps_extra[1:] - mask = xr.DataArray(intra_mask_values, dims=['time'], coords={'time': shifted_time_coords}) - - # Expand dims to match lhs if cluster_start has period/scenario dims - if 'period' in cluster_start.dims: - mask = mask.expand_dims(period=cluster_start.coords['period'].values) - if 'scenario' in cluster_start.dims: - mask = mask.expand_dims(scenario=cluster_start.coords['scenario'].values) + mask = xr.DataArray(mask_values, dims=['time'], coords={'time': shifted_time_coords}) lhs = lhs.where(mask) From b4dd4286edd39477308e027242f76f678c23f01e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 18:28:38 +0100 Subject: [PATCH 106/191] Improve cluster indexing in Storage --- flixopt/components.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 74915b1ee..e0eafd1b5 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -920,15 +920,10 @@ def _do_modeling(self): if clustering is not None: # Skip constraint at position (start - 1) for each cluster start after the first. # This removes the link between end of cluster N and start of cluster N+1. - starts = clustering.cluster_start_positions - n_timesteps = len(self._model.flow_system.timesteps) - mask_values = np.ones(n_timesteps, dtype=bool) - mask_values[starts[1:] - 1] = False # Skip positions before each new cluster + mask = np.ones(lhs.sizes['time'], dtype=bool) + mask[clustering.cluster_start_positions] = False - shifted_time_coords = self._model.flow_system.timesteps_extra[1:] - mask = xr.DataArray(mask_values, dims=['time'], coords={'time': shifted_time_coords}) - - lhs = lhs.where(mask) + lhs = lhs.where(xr.DataArray(mask, coords={'time': lhs.coords['time']})) self.add_constraints(lhs == 0, short_name='charge_state') From ecb2c9d3fa37b107ba135d8814d91391ce041ae8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 18:36:08 +0100 Subject: [PATCH 107/191] Improve cluster indexing in Storage --- flixopt/components.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index e0eafd1b5..f329fa746 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -922,10 +922,11 @@ def _do_modeling(self): # This removes the link between end of cluster N and start of cluster N+1. mask = np.ones(lhs.sizes['time'], dtype=bool) mask[clustering.cluster_start_positions] = False + mask = xr.DataArray(mask, coords={'time': lhs.coords['time']}) + else: + mask = None - lhs = lhs.where(xr.DataArray(mask, coords={'time': lhs.coords['time']})) - - self.add_constraints(lhs == 0, short_name='charge_state') + self.add_constraints(lhs == 0, short_name='charge_state', mask=mask) # Create InvestmentModel and bounding constraints for investment if isinstance(self.element.capacity_in_flow_hours, InvestParameters): From 355a205a36af4cb80a6d5ca90c7fb7df45bb30b3 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 18:59:33 +0100 Subject: [PATCH 108/191] Add more storag options when clustering --- flixopt/aggregation/base.py | 20 ++++++++------------ flixopt/flow_system.py | 8 +++++--- flixopt/transform_accessor.py | 28 ++++++++++++++++++---------- 3 files changed, 31 insertions(+), 25 deletions(-) diff --git a/flixopt/aggregation/base.py b/flixopt/aggregation/base.py index a4fb7e828..59facd379 100644 --- a/flixopt/aggregation/base.py +++ b/flixopt/aggregation/base.py @@ -18,7 +18,7 @@ from __future__ import annotations from dataclasses import dataclass -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, Any, Literal import numpy as np import xarray as xr @@ -892,8 +892,11 @@ class Clustering: result: The ClusterResult from the aggregation backend. original_flow_system: Reference to the FlowSystem before aggregation. backend_name: Name of the aggregation backend used (e.g., 'tsam', 'manual'). - storage_inter_cluster_linking: Whether to add inter-cluster storage constraints. - storage_cyclic: Whether to enforce cyclic storage (SOC[start] = SOC[end]). + storage_mode: How storages are treated during clustering: + - 'independent': Clusters fully decoupled, no constraints between clusters + - 'cyclic': Each cluster's start equals its end (self-contained periods) + - 'intercluster': Link storage state across original timeline (seasonal storage) + - 'intercluster_cyclic': Like 'intercluster' but overall timeline is cyclic Example: >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') @@ -906,8 +909,7 @@ class Clustering: result: ClusterResult original_flow_system: FlowSystem # FlowSystem - avoid circular import backend_name: str = 'unknown' - storage_inter_cluster_linking: bool = True - storage_cyclic: bool = True + storage_mode: Literal['independent', 'cyclic', 'intercluster', 'intercluster_cyclic'] = 'intercluster_cyclic' def __repr__(self) -> str: cs = self.result.cluster_structure @@ -918,13 +920,7 @@ def __repr__(self) -> str: structure_info = f'{cs.n_original_periods} periods → {n_clusters} clusters' else: structure_info = 'no structure' - return ( - f'Clustering(\n' - f' backend={self.backend_name!r}\n' - f' {structure_info}\n' - f' storage_linking={self.storage_inter_cluster_linking}, cyclic={self.storage_cyclic}\n' - f')' - ) + return f'Clustering(\n backend={self.backend_name!r}\n {structure_info}\n storage={self.storage_mode!r}\n)' @property def plot(self) -> ClusteringPlotAccessor: diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index e2cde262b..57fd62c14 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -1311,8 +1311,9 @@ def _add_inter_cluster_linking(self) -> None: if info is None: return - if not info.storage_inter_cluster_linking: - logger.info('Storage inter-cluster linking disabled') + # Only add inter-cluster linking for 'intercluster' and 'intercluster_cyclic' modes + if info.storage_mode not in ('intercluster', 'intercluster_cyclic'): + logger.info(f"Storage mode '{info.storage_mode}' - skipping inter-cluster linking") return if info.result.cluster_structure is None: @@ -1320,11 +1321,12 @@ def _add_inter_cluster_linking(self) -> None: return # Create inter-cluster linking model for storage + storage_cyclic = info.storage_mode == 'intercluster_cyclic' linking_model = InterClusterLinking( model=self.model, flow_system=self, cluster_structure=info.result.cluster_structure, - storage_cyclic=info.storage_cyclic, + storage_cyclic=storage_cyclic, ) linking_model.do_modeling() diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 00eabe093..b857cbaa2 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -581,8 +581,7 @@ def cluster( weights: dict[str, float] | None = None, time_series_for_high_peaks: list[str] | None = None, time_series_for_low_peaks: list[str] | None = None, - storage_inter_cluster_linking: bool = True, - storage_cyclic: bool = True, + storage: Literal['independent', 'cyclic', 'intercluster', 'intercluster_cyclic'] = 'intercluster_cyclic', ) -> FlowSystem: """ Create a FlowSystem with reduced timesteps using typical clusters. @@ -595,7 +594,7 @@ def cluster( 1. Performs time series clustering using tsam (k-means) 2. Extracts only the typical clusters (not all original timesteps) 3. Applies timestep weighting for accurate cost representation - 4. Optionally links storage states between clusters via boundary variables + 4. Handles storage states between clusters based on the ``storage`` mode Use this for initial sizing optimization, then use ``fix_sizes()`` to re-optimize at full resolution for accurate dispatch results. @@ -608,10 +607,19 @@ def cluster( time_series_for_high_peaks: Time series labels for explicitly selecting high-value clusters. **Recommended** for demand time series to capture peak demand days. time_series_for_low_peaks: Time series labels for explicitly selecting low-value clusters. - storage_inter_cluster_linking: If True, link storage states between clusters using - boundary variables. This preserves long-term storage behavior. Default: True. - storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end] for storages. - Only used when storage_inter_cluster_linking=True. Default: True. + storage: How storages are treated during clustering. Options: + + - ``'independent'``: Clusters are fully decoupled. No constraints between + clusters, each cluster has free start/end SOC. Fast but ignores + seasonal storage value. + - ``'cyclic'``: Each cluster is self-contained. The SOC at the start of + each cluster equals its end (cluster returns to initial state). + Good for "average day" modeling. + - ``'intercluster'``: Link storage state across the original timeline using + SOC boundary variables (Kotzur et al. approach). Properly values + seasonal storage patterns. Overall SOC can drift. + - ``'intercluster_cyclic'`` (default): Like 'intercluster' but also enforces + that overall SOC returns to initial state (yearly cyclic). Returns: A new FlowSystem with reduced timesteps (only typical clusters). @@ -645,7 +653,8 @@ def cluster( - This is best suited for initial sizing, not final dispatch optimization - Use ``time_series_for_high_peaks`` to ensure peak demand clusters are captured - A 5-10% safety margin on sizes is recommended for the dispatch stage - - Storage linking adds SOC_boundary variables to track state between clusters + - For seasonal storage (e.g., hydrogen, thermal storage), use 'intercluster' or + 'intercluster_cyclic' to properly value long-term storage """ import tsam.timeseriesaggregation as tsam @@ -880,8 +889,7 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: result=aggregation_result, original_flow_system=self._fs, backend_name='tsam', - storage_inter_cluster_linking=storage_inter_cluster_linking, - storage_cyclic=storage_cyclic, + storage_mode=storage, ) return reduced_fs From 3c0203a735486b712d502a03edf2a507fb5bf16d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 19:02:05 +0100 Subject: [PATCH 109/191] Add more storag options when clustering --- flixopt/components.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index f329fa746..7c06dc2ed 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -915,19 +915,32 @@ def _do_modeling(self): + discharge_rate * timestep_duration / eff_discharge ) - # Apply intra-cluster mask if clustered (skip inter-cluster boundaries) + # Handle clustering modes for storage clustering = self._model.flow_system.clustering - if clustering is not None: - # Skip constraint at position (start - 1) for each cluster start after the first. - # This removes the link between end of cluster N and start of cluster N+1. + mask = None + + if clustering is not None and clustering.storage_mode in ('independent', 'intercluster', 'intercluster_cyclic'): + # Skip inter-cluster boundaries: removes link between end of cluster N and start of N+1 mask = np.ones(lhs.sizes['time'], dtype=bool) mask[clustering.cluster_start_positions] = False mask = xr.DataArray(mask, coords={'time': lhs.coords['time']}) - else: - mask = None self.add_constraints(lhs == 0, short_name='charge_state', mask=mask) + # For 'cyclic' mode: each cluster's start equals its end + if clustering is not None and clustering.storage_mode == 'cyclic': + starts = clustering.cluster_start_positions + for i, start_pos in enumerate(starts): + # End of cluster i is at (start of cluster i+1) - 1, or last timestep for final cluster + if i < len(starts) - 1: + end_pos = starts[i + 1] # In timesteps_extra, this is the end of cluster i + else: + end_pos = len(self._model.flow_system.timesteps) # Last position in timesteps_extra + self.add_constraints( + charge_state.isel(time=start_pos) == charge_state.isel(time=end_pos), + short_name=f'cluster_cyclic_{i}', + ) + # Create InvestmentModel and bounding constraints for investment if isinstance(self.element.capacity_in_flow_hours, InvestParameters): self.add_submodels( From 1612599382dd81152b5af155701eeb81b95100ff Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 19:05:20 +0100 Subject: [PATCH 110/191] Add more storag options when clustering --- flixopt/components.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 7c06dc2ed..6069b947a 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -919,8 +919,8 @@ def _do_modeling(self): clustering = self._model.flow_system.clustering mask = None - if clustering is not None and clustering.storage_mode in ('independent', 'intercluster', 'intercluster_cyclic'): - # Skip inter-cluster boundaries: removes link between end of cluster N and start of N+1 + if clustering is not None: + # All modes skip inter-cluster boundaries: removes naive link between end of cluster N and start of N+1 mask = np.ones(lhs.sizes['time'], dtype=bool) mask[clustering.cluster_start_positions] = False mask = xr.DataArray(mask, coords={'time': lhs.coords['time']}) From d6b82b5f52dffbd7a798edc7bacca6b1e1eb5190 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 19:11:18 +0100 Subject: [PATCH 111/191] Add more storag options when clustering --- flixopt/transform_accessor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index b857cbaa2..804c63ef2 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -581,7 +581,7 @@ def cluster( weights: dict[str, float] | None = None, time_series_for_high_peaks: list[str] | None = None, time_series_for_low_peaks: list[str] | None = None, - storage: Literal['independent', 'cyclic', 'intercluster', 'intercluster_cyclic'] = 'intercluster_cyclic', + storage_mode: Literal['independent', 'cyclic', 'intercluster', 'intercluster_cyclic'] = 'intercluster_cyclic', ) -> FlowSystem: """ Create a FlowSystem with reduced timesteps using typical clusters. @@ -607,7 +607,7 @@ def cluster( time_series_for_high_peaks: Time series labels for explicitly selecting high-value clusters. **Recommended** for demand time series to capture peak demand days. time_series_for_low_peaks: Time series labels for explicitly selecting low-value clusters. - storage: How storages are treated during clustering. Options: + storage_mode: How storages are treated during clustering. Options: - ``'independent'``: Clusters are fully decoupled. No constraints between clusters, each cluster has free start/end SOC. Fast but ignores @@ -889,7 +889,7 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: result=aggregation_result, original_flow_system=self._fs, backend_name='tsam', - storage_mode=storage, + storage_mode=storage_mode, ) return reduced_fs From 97ba2438c6a85942aaf8910d1a1df24b97030373 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 19:18:29 +0100 Subject: [PATCH 112/191] Add more storag options when clustering --- flixopt/components.py | 10 +++++----- flixopt/transform_accessor.py | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 6069b947a..acc5e8216 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -270,7 +270,7 @@ class Storage(Component): maximum_size (or fixed_size) must be explicitly set for proper model scaling. relative_minimum_charge_state: Minimum charge state (0-1). Default: 0. relative_maximum_charge_state: Maximum charge state (0-1). Default: 1. - initial_charge_state: Charge at start. Numeric or 'equals_final'. Default: 0. + initial_charge_state: Charge at start. Numeric, 'equals_final', or None (free). Default: 0. minimal_final_charge_state: Minimum absolute charge required at end (optional). maximal_final_charge_state: Maximum absolute charge allowed at end (optional). relative_minimum_final_charge_state: Minimum relative charge at end. @@ -388,7 +388,7 @@ def __init__( capacity_in_flow_hours: Numeric_PS | InvestParameters | None = None, relative_minimum_charge_state: Numeric_TPS = 0, relative_maximum_charge_state: Numeric_TPS = 1, - initial_charge_state: Numeric_PS | Literal['equals_final'] = 0, + initial_charge_state: Numeric_PS | Literal['equals_final'] | None = 0, minimal_final_charge_state: Numeric_PS | None = None, maximal_final_charge_state: Numeric_PS | None = None, relative_minimum_final_charge_state: Numeric_PS | None = None, @@ -452,7 +452,7 @@ def transform_data(self) -> None: self.relative_loss_per_hour = self._fit_coords( f'{self.prefix}|relative_loss_per_hour', self.relative_loss_per_hour ) - if not isinstance(self.initial_charge_state, str): + if self.initial_charge_state is not None and not isinstance(self.initial_charge_state, str): self.initial_charge_state = self._fit_coords( f'{self.prefix}|initial_charge_state', self.initial_charge_state, dims=['period', 'scenario'] ) @@ -531,8 +531,8 @@ def _plausibility_checks(self) -> None: min_initial_at_max_capacity = maximum_capacity * self.relative_minimum_charge_state.isel(time=0) max_initial_at_min_capacity = minimum_capacity * self.relative_maximum_charge_state.isel(time=0) - # Only perform numeric comparisons if not using 'equals_final' - if not initial_equals_final: + # Only perform numeric comparisons if using a numeric initial_charge_state + if not initial_equals_final and self.initial_charge_state is not None: if (self.initial_charge_state > max_initial_at_min_capacity).any(): raise PlausibilityError( f'{self.label_full}: {self.initial_charge_state=} ' diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 804c63ef2..86797c861 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -800,11 +800,11 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: ) # Remove 'equals_final' from storages - doesn't make sense on reduced timesteps + # Set to None so initial SOC is free (handled by storage_mode constraints) for storage in reduced_fs.storages.values(): - # Handle both scalar and xarray cases ics = storage.initial_charge_state if isinstance(ics, str) and ics == 'equals_final': - storage.initial_charge_state = 0 + storage.initial_charge_state = None # Build Clustering for inter-cluster linking and solution expansion n_original_timesteps = len(self._fs.timesteps) From 1b62c2c81b7db83a8a0c78b9d50a4298bf0d8b0a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 19 Dec 2025 21:59:40 +0100 Subject: [PATCH 113/191] Fix storage masking --- flixopt/components.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flixopt/components.py b/flixopt/components.py index acc5e8216..f9d36a19d 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -922,7 +922,7 @@ def _do_modeling(self): if clustering is not None: # All modes skip inter-cluster boundaries: removes naive link between end of cluster N and start of N+1 mask = np.ones(lhs.sizes['time'], dtype=bool) - mask[clustering.cluster_start_positions] = False + mask[clustering.cluster_start_positions[1:] - 1] = False mask = xr.DataArray(mask, coords={'time': lhs.coords['time']}) self.add_constraints(lhs == 0, short_name='charge_state', mask=mask) From 8d026b52ff501bea689f924d5d2e614c29597a8a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 16:39:39 +0100 Subject: [PATCH 114/191] Fix semantics: aggregation-> clsutering --- flixopt/__init__.py | 4 ++-- flixopt/{aggregation => clustering}/__init__.py | 0 flixopt/{aggregation => clustering}/base.py | 0 .../storage_linking.py | 2 +- flixopt/flow_system.py | 4 ++-- flixopt/transform_accessor.py | 2 +- mkdocs.yml | 2 +- .../__init__.py | 0 .../test_base.py | 4 ++-- .../test_integration.py | 16 ++++++++-------- 10 files changed, 17 insertions(+), 17 deletions(-) rename flixopt/{aggregation => clustering}/__init__.py (100%) rename flixopt/{aggregation => clustering}/base.py (100%) rename flixopt/{aggregation => clustering}/storage_linking.py (99%) rename tests/{test_aggregation => test_clustering}/__init__.py (100%) rename tests/{test_aggregation => test_clustering}/test_base.py (98%) rename tests/{test_aggregation => test_clustering}/test_integration.py (92%) diff --git a/flixopt/__init__.py b/flixopt/__init__.py index e79af22ce..73784f2cd 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -13,7 +13,7 @@ __version__ = '0.0.0.dev0' # Import commonly used classes and functions -from . import aggregation, linear_converters, plotting, results, solvers +from . import clustering, linear_converters, plotting, results, solvers from .carrier import Carrier, CarrierContainer from .components import ( LinearConverter, @@ -59,7 +59,7 @@ 'PiecewiseEffects', 'PlotResult', 'TimeSeriesWeights', - 'aggregation', + 'clustering', 'plotting', 'results', 'linear_converters', diff --git a/flixopt/aggregation/__init__.py b/flixopt/clustering/__init__.py similarity index 100% rename from flixopt/aggregation/__init__.py rename to flixopt/clustering/__init__.py diff --git a/flixopt/aggregation/base.py b/flixopt/clustering/base.py similarity index 100% rename from flixopt/aggregation/base.py rename to flixopt/clustering/base.py diff --git a/flixopt/aggregation/storage_linking.py b/flixopt/clustering/storage_linking.py similarity index 99% rename from flixopt/aggregation/storage_linking.py rename to flixopt/clustering/storage_linking.py index 66f948be9..af4631507 100644 --- a/flixopt/aggregation/storage_linking.py +++ b/flixopt/clustering/storage_linking.py @@ -40,7 +40,7 @@ class InterClusterLinking(Submodel): patterns while only solving for the representative timesteps. Example: - >>> from flixopt.aggregation import ClusterStructure, InterClusterLinking + >>> from flixopt.clustering import ClusterStructure, InterClusterLinking >>> structure = ClusterStructure(...) >>> model = InterClusterLinking( ... model=flow_system.model, diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 57fd62c14..438cdcf07 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -38,7 +38,7 @@ import pyvis - from .aggregation import Clustering + from .clustering import Clustering from .solvers import _Solver from .structure import TimeSeriesWeights from .types import Effect_TPS, Numeric_S, Numeric_TPS, NumericOrBool @@ -1305,7 +1305,7 @@ def _add_inter_cluster_linking(self) -> None: Creates SOC_boundary variables that link storage states between sequential periods in the original time series, using the delta SOC from representative periods. """ - from .aggregation.storage_linking import InterClusterLinking + from .clustering.storage_linking import InterClusterLinking info = self.clustering if info is None: diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 86797c861..3883fb3a2 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -658,7 +658,7 @@ def cluster( """ import tsam.timeseriesaggregation as tsam - from .aggregation import Clustering, ClusterResult, ClusterStructure + from .clustering import Clustering, ClusterResult, ClusterStructure from .core import TimeSeriesData, drop_constant_arrays from .flow_system import FlowSystem diff --git a/mkdocs.yml b/mkdocs.yml index 551fac523..493937983 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -69,7 +69,7 @@ nav: - Piecewise Effects: notebooks/06c-piecewise-effects.ipynb - Scaling: - Scenarios: notebooks/07-scenarios-and-periods.ipynb - - Aggregation: notebooks/08a-aggregation.ipynb + - Clustering: notebooks/08a-aggregation.ipynb - Rolling Horizon: notebooks/08b-rolling-horizon.ipynb - Results: - Plotting: notebooks/09-plotting-and-data-access.ipynb diff --git a/tests/test_aggregation/__init__.py b/tests/test_clustering/__init__.py similarity index 100% rename from tests/test_aggregation/__init__.py rename to tests/test_clustering/__init__.py diff --git a/tests/test_aggregation/test_base.py b/tests/test_clustering/test_base.py similarity index 98% rename from tests/test_aggregation/test_base.py rename to tests/test_clustering/test_base.py index 3b5afda10..a6c4d8cc7 100644 --- a/tests/test_aggregation/test_base.py +++ b/tests/test_clustering/test_base.py @@ -1,10 +1,10 @@ -"""Tests for flixopt.aggregation.base module.""" +"""Tests for flixopt.clustering.base module.""" import numpy as np import pytest import xarray as xr -from flixopt.aggregation import ( +from flixopt.clustering import ( Clustering, ClusterResult, ClusterStructure, diff --git a/tests/test_aggregation/test_integration.py b/tests/test_clustering/test_integration.py similarity index 92% rename from tests/test_aggregation/test_integration.py rename to tests/test_clustering/test_integration.py index b256f4d1e..e3c6083a0 100644 --- a/tests/test_aggregation/test_integration.py +++ b/tests/test_clustering/test_integration.py @@ -130,19 +130,19 @@ def test_cluster_reduces_timesteps(self): assert len(fs_clustered.timesteps) == 48 # 2 representative days x 24 hours -class TestAggregationModuleImports: - """Tests for flixopt.aggregation module imports.""" +class TestClusteringModuleImports: + """Tests for flixopt.clustering module imports.""" def test_import_from_flixopt(self): - """Test that aggregation module can be imported from flixopt.""" - from flixopt import aggregation + """Test that clustering module can be imported from flixopt.""" + from flixopt import clustering - assert hasattr(aggregation, 'ClusterResult') - assert hasattr(aggregation, 'ClusterStructure') - assert hasattr(aggregation, 'Clustering') + assert hasattr(clustering, 'ClusterResult') + assert hasattr(clustering, 'ClusterStructure') + assert hasattr(clustering, 'Clustering') def test_create_cluster_structure_from_mapping_available(self): """Test that create_cluster_structure_from_mapping is available.""" - from flixopt.aggregation import create_cluster_structure_from_mapping + from flixopt.clustering import create_cluster_structure_from_mapping assert callable(create_cluster_structure_from_mapping) From ac6e58a82ce263bf8eac4af5da55d1ea76c29c97 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 16:47:44 +0100 Subject: [PATCH 115/191] Add seasonal storage example --- .../data/generate_example_systems.py | 141 ++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index a30761dc3..c8e81167f 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -6,6 +6,7 @@ 3. multiperiod_system - System with periods and scenarios 4. district_heating_system - Real-world district heating data with investments (1 month) 5. operational_system - Real-world district heating for operational planning (2 weeks, no investments) +6. seasonal_storage_system - Solar thermal + seasonal pit storage (full year, 8760h) Run this script to regenerate the example data files. """ @@ -450,6 +451,145 @@ def create_operational_system() -> fx.FlowSystem: return fs +def create_seasonal_storage_system() -> fx.FlowSystem: + """Create a district heating system with solar thermal and seasonal storage. + + Demonstrates seasonal storage value with: + - Full year at hourly resolution (8760 timesteps) + - Solar thermal: high in summer, low in winter + - Heat demand: high in winter, low in summer + - Large seasonal pit storage (bridges seasons) + - Gas boiler backup + + This system clearly shows the value of inter-cluster storage linking: + - Summer: excess solar heat stored in pit + - Winter: stored heat reduces gas consumption + + Used by: 08c-clustering, 08c2-clustering-storage-modes notebooks + """ + # Full year, hourly + timesteps = pd.date_range('2024-01-01', periods=8760, freq='h') + hours = np.arange(8760) + hour_of_day = hours % 24 + day_of_year = hours // 24 + + np.random.seed(42) + + # --- Solar irradiance profile --- + # Seasonal variation: peaks in summer (day ~180), low in winter + seasonal_solar = 0.5 + 0.5 * np.cos(2 * np.pi * (day_of_year - 172) / 365) # Peak around June 21 + + # Daily variation: peaks at noon + daily_solar = np.maximum(0, np.cos(2 * np.pi * (hour_of_day - 12) / 24)) + + # Combine and scale (MW of solar thermal potential per MW installed) + solar_profile = seasonal_solar * daily_solar + solar_profile = solar_profile * (0.8 + 0.2 * np.random.random(8760)) # Add some variation + solar_profile = np.clip(solar_profile, 0, 1) + + # --- Heat demand profile --- + # Seasonal: high in winter, low in summer + seasonal_demand = 0.6 + 0.4 * np.cos(2 * np.pi * day_of_year / 365) # Peak Jan 1 + + # Daily: higher during day, lower at night + daily_demand = 0.7 + 0.3 * np.sin(2 * np.pi * (hour_of_day - 6) / 24) + + # Combine and scale to ~5 MW peak + heat_demand = 5 * seasonal_demand * daily_demand + heat_demand = heat_demand * (0.9 + 0.2 * np.random.random(8760)) # Add variation + heat_demand = np.clip(heat_demand, 0.5, 6) # MW + + # --- Gas price (slight seasonal variation) --- + gas_price = 40 + 10 * np.cos(2 * np.pi * day_of_year / 365) # €/MWh, higher in winter + + fs = fx.FlowSystem(timesteps) + fs.add_carriers( + fx.Carrier('gas', '#3498db', 'MW'), + fx.Carrier('heat', '#e74c3c', 'MW'), + ) + fs.add_elements( + # Buses + fx.Bus('Gas', carrier='gas'), + fx.Bus('Heat', carrier='heat'), + # Effects + fx.Effect('costs', '€', 'Total Costs', is_standard=True, is_objective=True), + fx.Effect('CO2', 'kg', 'CO2 Emissions'), + # Solar thermal collector (investment) - profile includes 70% collector efficiency + # Costs annualized for single-year analysis + fx.Source( + 'SolarThermal', + outputs=[ + fx.Flow( + 'Q_th', + bus='Heat', + size=fx.InvestParameters( + minimum_size=0, + maximum_size=20, # MW peak + effects_of_investment_per_size={'costs': 15000}, # €/MW (annualized) + ), + fixed_relative_profile=solar_profile * 0.7, # 70% collector efficiency + ) + ], + ), + # Gas boiler (backup) + fx.linear_converters.Boiler( + 'GasBoiler', + thermal_efficiency=0.90, + thermal_flow=fx.Flow( + 'Q_th', + bus='Heat', + size=fx.InvestParameters( + minimum_size=0, + maximum_size=8, # MW + effects_of_investment_per_size={'costs': 20000}, # €/MW (annualized) + ), + ), + fuel_flow=fx.Flow('Q_fu', bus='Gas'), + ), + # Gas supply (higher price makes solar+storage more attractive) + fx.Source( + 'GasGrid', + outputs=[ + fx.Flow( + 'Q_gas', + bus='Gas', + size=20, + effects_per_flow_hour={'costs': gas_price * 1.5, 'CO2': 0.2}, # €/MWh + ) + ], + ), + # Seasonal pit storage (large capacity for seasonal shifting) + fx.Storage( + 'SeasonalStorage', + capacity_in_flow_hours=fx.InvestParameters( + minimum_size=0, + maximum_size=5000, # MWh - large for seasonal storage + effects_of_investment_per_size={'costs': 20}, # €/MWh (pit storage is cheap) + ), + initial_charge_state='equals_final', # Yearly cyclic + eta_charge=0.95, + eta_discharge=0.95, + relative_loss_per_hour=0.0001, # Very low losses for pit storage + charging=fx.Flow( + 'Charge', + bus='Heat', + size=fx.InvestParameters(maximum_size=10, effects_of_investment_per_size={'costs': 5000}), + ), + discharging=fx.Flow( + 'Discharge', + bus='Heat', + size=fx.InvestParameters(maximum_size=10, effects_of_investment_per_size={'costs': 5000}), + ), + ), + # Heat demand + fx.Sink( + 'HeatDemand', + inputs=[fx.Flow('Q_th', bus='Heat', size=1, fixed_relative_profile=heat_demand)], + ), + ) + return fs + + def create_multiperiod_system() -> fx.FlowSystem: """Create a system with multiple periods and scenarios. @@ -550,6 +690,7 @@ def main(): ('multiperiod_system', create_multiperiod_system), ('district_heating_system', create_district_heating_system), ('operational_system', create_operational_system), + ('seasonal_storage_system', create_seasonal_storage_system), ] for name, create_func in systems: From 44e1832113b6433385c8e7fb6d08b18613841a18 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 17:00:44 +0100 Subject: [PATCH 116/191] Update notebook to show off storage modes --- docs/notebooks/08c-clustering.ipynb | 25 +- .../08c2-clustering-storage-modes.ipynb | 400 ++++++++++++++++++ 2 files changed, 417 insertions(+), 8 deletions(-) create mode 100644 docs/notebooks/08c2-clustering-storage-modes.ipynb diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 3e9316fb8..7919326d0 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -13,7 +13,6 @@ "\n", "- **Typical periods**: Cluster similar time segments (e.g., days) and solve only representative ones\n", "- **Weighted costs**: Automatically weight operational costs by cluster occurrence\n", - "- **Storage linking**: Track storage state across original periods\n", "- **Two-stage workflow**: Fast sizing with clustering, accurate dispatch at full resolution\n", "\n", "!!! note \"Requirements\"\n", @@ -138,7 +137,7 @@ "1. **Clusters similar days** using the TSAM (Time Series Aggregation Module) package\n", "2. **Reduces timesteps** to only typical periods (e.g., 8 typical days = 768 timesteps)\n", "3. **Weights costs** by how many original days each typical day represents\n", - "4. **Links storage states** across original periods for correct long-term behavior\n", + "4. **Handles storage** with configurable behavior via `storage_mode`\n", "\n", "!!! warning \"Peak Forcing\"\n", " Always use `time_series_for_high_peaks` to ensure extreme demand days are captured.\n", @@ -162,7 +161,7 @@ " n_clusters=8, # 8 typical days\n", " cluster_duration='1D', # Daily clustering\n", " time_series_for_high_peaks=peak_series, # Capture peak demand day\n", - " storage_cyclic=True, # SOC[end] = SOC[start]\n", + " storage_mode='intercluster_cyclic', # Link storage across clusters + yearly cyclic\n", ")\n", "\n", "time_clustering = timeit.default_timer() - start\n", @@ -441,8 +440,18 @@ "| `weights` | `dict[str, float]` | Optional weights for time series in clustering |\n", "| `time_series_for_high_peaks` | `list[str]` | **Essential**: Force inclusion of peak periods |\n", "| `time_series_for_low_peaks` | `list[str]` | Force inclusion of minimum periods |\n", - "| `storage_inter_cluster_linking` | `bool` | Add inter-cluster storage constraints (default: True) |\n", - "| `storage_cyclic` | `bool` | Enforce SOC[end] = SOC[start] (default: True) |\n", + "| `storage_mode` | `str` | Storage handling mode (see below) |\n", + "\n", + "### Storage Modes\n", + "\n", + "| Mode | Description |\n", + "|------|-------------|\n", + "| `'intercluster_cyclic'` | Links storage across clusters + yearly cyclic **(default)** |\n", + "| `'intercluster'` | Links storage across clusters, free start/end |\n", + "| `'cyclic'` | Each cluster is independent but cyclic (start = end) |\n", + "| `'independent'` | Each cluster is independent, free start/end |\n", + "\n", + "For a detailed comparison of storage modes, see [08c2-clustering-storage-modes](08c2-clustering-storage-modes.ipynb).\n", "\n", "### Peak Forcing Format\n", "\n", @@ -489,12 +498,12 @@ "1. **Always use peak forcing** (`time_series_for_high_peaks`) for demand time series\n", "2. **Add safety margin** (5-10%) when fixing sizes from clustering\n", "3. **Two-stage is recommended**: clustering for sizing, full resolution for dispatch\n", - "4. **Storage linking** ensures correct long-term storage behavior\n", + "4. **Storage handling** is configurable via `storage_mode`\n", "\n", "### Next Steps\n", "\n", - "- **[08d-clustering-multiperiod](08d-clustering-multiperiod.ipynb)**: Clustering with multiple periods and scenarios\n", - "- **[08e-clustering-internals](08e-clustering-internals.ipynb)**: Deep dive into weights, TSAM, and cost scaling" + "- **[08c2-clustering-storage-modes](08c2-clustering-storage-modes.ipynb)**: Compare storage modes using a seasonal storage system\n", + "- **[08d-clustering-multiperiod](08d-clustering-multiperiod.ipynb)**: Clustering with multiple periods and scenarios" ] } ], diff --git a/docs/notebooks/08c2-clustering-storage-modes.ipynb b/docs/notebooks/08c2-clustering-storage-modes.ipynb new file mode 100644 index 000000000..854af7ea9 --- /dev/null +++ b/docs/notebooks/08c2-clustering-storage-modes.ipynb @@ -0,0 +1,400 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "0", + "metadata": {}, + "source": [ + "# Clustering Storage Modes\n", + "\n", + "Compare different storage handling modes when clustering time series.\n", + "\n", + "This notebook demonstrates:\n", + "\n", + "- **Four storage modes**: `independent`, `cyclic`, `intercluster`, `intercluster_cyclic`\n", + "- **Seasonal storage**: Why inter-cluster linking matters for long-term storage\n", + "- **When to use each mode**: Choosing the right mode for your application\n", + "\n", + "!!! note \"Prerequisites\"\n", + " Read [08c-clustering](08c-clustering.ipynb) first for clustering basics." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1", + "metadata": {}, + "outputs": [], + "source": [ + "import timeit\n", + "from pathlib import Path\n", + "\n", + "import numpy as np\n", + "import pandas as pd\n", + "import plotly.graph_objects as go\n", + "from plotly.subplots import make_subplots\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.CONFIG.notebook()" + ] + }, + { + "cell_type": "markdown", + "id": "2", + "metadata": {}, + "source": [ + "## Load the Seasonal Storage System\n", + "\n", + "We use a solar thermal + seasonal pit storage system with a full year of data.\n", + "This is ideal for demonstrating storage modes because:\n", + "\n", + "- **Solar peaks in summer** when heat demand is low\n", + "- **Heat demand peaks in winter** when solar is minimal\n", + "- **Seasonal storage** bridges this gap by storing summer heat for winter" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3", + "metadata": {}, + "outputs": [], + "source": [ + "# Generate example data if not present\n", + "data_file = Path('data/seasonal_storage_system.nc4')\n", + "if not data_file.exists():\n", + " from data.generate_example_systems import create_seasonal_storage_system\n", + "\n", + " fs = create_seasonal_storage_system()\n", + " fs.to_netcdf(data_file)\n", + "\n", + "# Load the seasonal storage system\n", + "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "\n", + "timesteps = flow_system.timesteps\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days)')\n", + "print(f'Components: {list(flow_system.components.keys())}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize the seasonal patterns\n", + "solar_profile = flow_system.components['SolarThermal'].outputs[0].fixed_relative_profile\n", + "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", + "\n", + "# Daily average for clearer visualization\n", + "solar_daily = solar_profile.values.reshape(-1, 24).mean(axis=1)\n", + "demand_daily = heat_demand.values.reshape(-1, 24).mean(axis=1)\n", + "days = np.arange(len(solar_daily))\n", + "\n", + "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", + "fig.add_trace(go.Scatter(x=days, y=solar_daily, name='Solar (daily avg)', fill='tozeroy'), row=1, col=1)\n", + "fig.add_trace(go.Scatter(x=days, y=demand_daily, name='Heat Demand (daily avg)', fill='tozeroy'), row=2, col=1)\n", + "fig.update_layout(height=400, title='Seasonal Mismatch: Solar vs Heat Demand')\n", + "fig.update_xaxes(title_text='Day of Year', row=2, col=1)\n", + "fig.update_yaxes(title_text='Solar Profile', row=1, col=1)\n", + "fig.update_yaxes(title_text='Heat Demand [MW]', row=2, col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "5", + "metadata": {}, + "source": [ + "## Understanding Storage Modes\n", + "\n", + "When clustering reduces a full year to typical periods (e.g., 12 typical days), we need to\n", + "decide how storage behaves across these periods. There are four options:\n", + "\n", + "| Mode | Description | Use Case |\n", + "|------|-------------|----------|\n", + "| `'intercluster_cyclic'` | Links storage across clusters + yearly cyclic | **Default**. Seasonal storage, yearly optimization |\n", + "| `'intercluster'` | Links storage across clusters, free start/end | Multi-year optimization, flexible boundaries |\n", + "| `'cyclic'` | Each cluster independent, but cyclic (start = end) | Daily storage only, no seasonal effects |\n", + "| `'independent'` | Each cluster independent, free start/end | Fastest solve, ignores long-term storage |\n", + "\n", + "Let's compare them!" + ] + }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "## Baseline: Full Year Optimization\n", + "\n", + "First, optimize the full system to establish a baseline:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "solver = fx.solvers.HighsSolver(mip_gap=0.02)\n", + "\n", + "start = timeit.default_timer()\n", + "fs_full = flow_system.copy()\n", + "fs_full.optimize(solver)\n", + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.1f} seconds')\n", + "print(f'Total cost: {fs_full.solution[\"costs\"].item():,.0f} EUR')\n", + "print('\\nOptimized sizes:')\n", + "for name, size in fs_full.statistics.sizes.items():\n", + " print(f' {name}: {float(size.item()):.2f}')" + ] + }, + { + "cell_type": "markdown", + "id": "8", + "metadata": {}, + "source": [ + "## Compare Storage Modes\n", + "\n", + "Now let's cluster with each storage mode and compare results:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# Clustering parameters\n", + "N_CLUSTERS = 12 # 12 typical days for a full year\n", + "CLUSTER_DURATION = '1D'\n", + "PEAK_SERIES = ['HeatDemand(Q_th)|fixed_relative_profile']\n", + "\n", + "# Storage modes to compare\n", + "storage_modes = ['independent', 'cyclic', 'intercluster', 'intercluster_cyclic']\n", + "\n", + "results = {}\n", + "clustered_systems = {}\n", + "\n", + "for mode in storage_modes:\n", + " print(f'\\n--- Mode: {mode} ---')\n", + " start = timeit.default_timer()\n", + "\n", + " fs_clustered = flow_system.transform.cluster(\n", + " n_clusters=N_CLUSTERS,\n", + " cluster_duration=CLUSTER_DURATION,\n", + " time_series_for_high_peaks=PEAK_SERIES,\n", + " storage_mode=mode,\n", + " )\n", + " time_cluster = timeit.default_timer() - start\n", + "\n", + " start = timeit.default_timer()\n", + " fs_clustered.optimize(solver)\n", + " time_solve = timeit.default_timer() - start\n", + "\n", + " clustered_systems[mode] = fs_clustered\n", + "\n", + " results[mode] = {\n", + " 'Time [s]': time_cluster + time_solve,\n", + " 'Cost [EUR]': fs_clustered.solution['costs'].item(),\n", + " 'Solar [MW]': fs_clustered.statistics.sizes.get('SolarThermal(Q_th)', 0),\n", + " 'Boiler [MW]': fs_clustered.statistics.sizes.get('GasBoiler(Q_th)', 0),\n", + " 'Storage [MWh]': fs_clustered.statistics.sizes.get('SeasonalStorage', 0),\n", + " }\n", + "\n", + " # Handle xarray types\n", + " for key in ['Solar [MW]', 'Boiler [MW]', 'Storage [MWh]']:\n", + " val = results[mode][key]\n", + " results[mode][key] = float(val.item()) if hasattr(val, 'item') else float(val)\n", + "\n", + " print(f' Time: {results[mode][\"Time [s]\"]:.1f}s')\n", + " print(f' Cost: {results[mode][\"Cost [EUR]\"]:,.0f} EUR')\n", + " print(f' Storage: {results[mode][\"Storage [MWh]\"]:.0f} MWh')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "# Add full optimization result for comparison\n", + "results['Full (baseline)'] = {\n", + " 'Time [s]': time_full,\n", + " 'Cost [EUR]': fs_full.solution['costs'].item(),\n", + " 'Solar [MW]': float(fs_full.statistics.sizes.get('SolarThermal(Q_th)', 0).item()),\n", + " 'Boiler [MW]': float(fs_full.statistics.sizes.get('GasBoiler(Q_th)', 0).item()),\n", + " 'Storage [MWh]': float(fs_full.statistics.sizes.get('SeasonalStorage', 0).item()),\n", + "}\n", + "\n", + "# Create comparison DataFrame\n", + "comparison = pd.DataFrame(results).T\n", + "baseline_cost = comparison.loc['Full (baseline)', 'Cost [EUR]']\n", + "baseline_time = comparison.loc['Full (baseline)', 'Time [s]']\n", + "comparison['Cost Gap [%]'] = (comparison['Cost [EUR]'] - baseline_cost) / abs(baseline_cost) * 100\n", + "comparison['Speedup'] = baseline_time / comparison['Time [s]']\n", + "\n", + "comparison.style.format(\n", + " {\n", + " 'Time [s]': '{:.1f}',\n", + " 'Cost [EUR]': '{:,.0f}',\n", + " 'Solar [MW]': '{:.1f}',\n", + " 'Boiler [MW]': '{:.1f}',\n", + " 'Storage [MWh]': '{:.0f}',\n", + " 'Cost Gap [%]': '{:+.1f}',\n", + " 'Speedup': '{:.1f}x',\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, + "source": [ + "## Visualize Storage Behavior\n", + "\n", + "The key difference between modes is how storage is utilized across the year.\n", + "Let's expand each solution back to full resolution and compare:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], + "source": [ + "# Expand clustered solutions to full resolution\n", + "expanded_systems = {}\n", + "for mode in storage_modes:\n", + " expanded_systems[mode] = clustered_systems[mode].transform.expand_solution()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# Plot storage charge state for each mode\n", + "fig = make_subplots(\n", + " rows=len(storage_modes) + 1,\n", + " cols=1,\n", + " shared_xaxes=True,\n", + " vertical_spacing=0.05,\n", + " subplot_titles=['Full Optimization'] + [f'Mode: {m}' for m in storage_modes],\n", + ")\n", + "\n", + "# Full optimization\n", + "soc_full = fs_full.solution['SeasonalStorage|charge_state']\n", + "fig.add_trace(go.Scatter(x=fs_full.timesteps, y=soc_full.values, name='Full', line=dict(width=0.8)), row=1, col=1)\n", + "\n", + "# Expanded clustered solutions\n", + "for i, mode in enumerate(storage_modes, start=2):\n", + " fs_exp = expanded_systems[mode]\n", + " soc = fs_exp.solution['SeasonalStorage|charge_state']\n", + " fig.add_trace(go.Scatter(x=fs_exp.timesteps, y=soc.values, name=mode, line=dict(width=0.8)), row=i, col=1)\n", + "\n", + "fig.update_layout(height=800, title='Storage Charge State by Mode', showlegend=False)\n", + "for i in range(1, len(storage_modes) + 2):\n", + " fig.update_yaxes(title_text='SOC [MWh]', row=i, col=1)\n", + "fig.show()" + ] + }, + { + "cell_type": "markdown", + "id": "14", + "metadata": {}, + "source": [ + "## Interpretation\n", + "\n", + "### `'independent'` Mode\n", + "- Each typical period is solved independently\n", + "- Storage starts and ends at arbitrary states within each cluster\n", + "- **No seasonal storage benefit captured** - storage is only used for daily fluctuations\n", + "- Fastest to solve but least accurate for seasonal systems\n", + "\n", + "### `'cyclic'` Mode \n", + "- Each cluster is independent but enforces start = end state\n", + "- Better than independent but still **no cross-season linking**\n", + "- Good for systems where storage only balances within-day variations\n", + "\n", + "### `'intercluster'` Mode\n", + "- Links storage state across the original time series via typical periods\n", + "- **Captures seasonal storage behavior** - summer charging, winter discharging\n", + "- Free start and end states (useful for multi-year optimization)\n", + "\n", + "### `'intercluster_cyclic'` Mode (Default)\n", + "- Inter-cluster linking **plus** yearly cyclic constraint (end = start)\n", + "- **Best for yearly investment optimization** with seasonal storage\n", + "- Ensures the storage cycle is sustainable year after year" + ] + }, + { + "cell_type": "markdown", + "id": "15", + "metadata": {}, + "source": [ + "## When to Use Each Mode\n", + "\n", + "| Your System Has... | Recommended Mode |\n", + "|-------------------|------------------|\n", + "| Seasonal storage (pit, underground) | `'intercluster_cyclic'` |\n", + "| Only daily storage (batteries, hot water tanks) | `'cyclic'` |\n", + "| Multi-year optimization with inter-annual storage | `'intercluster'` |\n", + "| Quick sizing estimate, storage not critical | `'independent'` |\n", + "\n", + "!!! tip \"Rule of Thumb\"\n", + " Use `'intercluster_cyclic'` (default) unless you have a specific reason not to.\n", + " It provides the most accurate representation of storage behavior in clustered systems." + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, + "source": [ + "## Summary\n", + "\n", + "You learned how to:\n", + "\n", + "- Use **`storage_mode`** parameter to control storage behavior in clustering\n", + "- Understand the **difference between modes** and their impact on results\n", + "- Choose the **right mode** for your optimization problem\n", + "\n", + "### Key Takeaways\n", + "\n", + "1. **Seasonal storage requires inter-cluster linking** to capture charging/discharging across seasons\n", + "2. **`'intercluster_cyclic'`** is the default and best for yearly investment optimization\n", + "3. **`'independent'` and `'cyclic'`** are faster but miss long-term storage value\n", + "4. **Expand solutions** with `expand_solution()` to visualize storage behavior across the year\n", + "\n", + "### Next Steps\n", + "\n", + "- **[08d-clustering-multiperiod](08d-clustering-multiperiod.ipynb)**: Clustering with multiple periods and scenarios" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.12.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From a59ef62b026b7249644a26063e5b3f021dc05973 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 17:05:11 +0100 Subject: [PATCH 117/191] =?UTF-8?q?=E2=8F=BA=20All=20changes=20complete.?= =?UTF-8?q?=20Here's=20a=20summary:?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes Made 1. flixopt/components.py - Storage class - Added cluster_storage_mode parameter with default 'intercluster_cyclic' - Updated docstring with full documentation - Updated StorageModel._do_modeling to use self.element.cluster_storage_mode 2. flixopt/clustering/base.py - Clustering dataclass - Removed storage_mode attribute - Updated __repr__ to exclude storage mode 3. flixopt/transform_accessor.py - cluster() method - Removed storage_mode parameter - Updated docstrings 4. flixopt/flow_system.py - _add_inter_cluster_linking() - Now filters storages by their cluster_storage_mode - Only links storages with 'intercluster' or 'intercluster_cyclic' 5. flixopt/clustering/storage_linking.py - InterClusterLinking - Changed __init__ to accept storages list instead of storage_cyclic flag - Each storage's cyclic constraint is determined by its own cluster_storage_mode New Usage # Per-storage modes - different storages can have different behaviors battery = Storage('Battery', ..., cluster_storage_mode='cyclic') # short-term hydrogen = Storage('H2_Store', ..., cluster_storage_mode='intercluster_cyclic') # seasonal # Cluster without storage_mode parameter fs_clustered = fs.transform.cluster(n_clusters=4, cluster_duration='1D') # Re-optimize with different storage modes by changing the attribute battery.cluster_storage_mode = 'independent' fs_clustered.optimize(solver) # Now battery uses 'independent' mode --- flixopt/clustering/base.py | 10 ++-------- flixopt/clustering/storage_linking.py | 28 ++++++++++++++++----------- flixopt/components.py | 21 +++++++++++++++++++- flixopt/flow_system.py | 19 ++++++++++-------- flixopt/transform_accessor.py | 21 +++----------------- 5 files changed, 53 insertions(+), 46 deletions(-) diff --git a/flixopt/clustering/base.py b/flixopt/clustering/base.py index 59facd379..81c2b0bfc 100644 --- a/flixopt/clustering/base.py +++ b/flixopt/clustering/base.py @@ -18,7 +18,7 @@ from __future__ import annotations from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Literal +from typing import TYPE_CHECKING, Any import numpy as np import xarray as xr @@ -892,11 +892,6 @@ class Clustering: result: The ClusterResult from the aggregation backend. original_flow_system: Reference to the FlowSystem before aggregation. backend_name: Name of the aggregation backend used (e.g., 'tsam', 'manual'). - storage_mode: How storages are treated during clustering: - - 'independent': Clusters fully decoupled, no constraints between clusters - - 'cyclic': Each cluster's start equals its end (self-contained periods) - - 'intercluster': Link storage state across original timeline (seasonal storage) - - 'intercluster_cyclic': Like 'intercluster' but overall timeline is cyclic Example: >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') @@ -909,7 +904,6 @@ class Clustering: result: ClusterResult original_flow_system: FlowSystem # FlowSystem - avoid circular import backend_name: str = 'unknown' - storage_mode: Literal['independent', 'cyclic', 'intercluster', 'intercluster_cyclic'] = 'intercluster_cyclic' def __repr__(self) -> str: cs = self.result.cluster_structure @@ -920,7 +914,7 @@ def __repr__(self) -> str: structure_info = f'{cs.n_original_periods} periods → {n_clusters} clusters' else: structure_info = 'no structure' - return f'Clustering(\n backend={self.backend_name!r}\n {structure_info}\n storage={self.storage_mode!r}\n)' + return f'Clustering(\n backend={self.backend_name!r}\n {structure_info}\n)' @property def plot(self) -> ClusteringPlotAccessor: diff --git a/flixopt/clustering/storage_linking.py b/flixopt/clustering/storage_linking.py index af4631507..5c5ee5d6d 100644 --- a/flixopt/clustering/storage_linking.py +++ b/flixopt/clustering/storage_linking.py @@ -42,10 +42,14 @@ class InterClusterLinking(Submodel): Example: >>> from flixopt.clustering import ClusterStructure, InterClusterLinking >>> structure = ClusterStructure(...) + >>> storages = [ + ... s for s in fs.storages.values() if s.cluster_storage_mode in ('intercluster', 'intercluster_cyclic') + ... ] >>> model = InterClusterLinking( ... model=flow_system.model, ... flow_system=flow_system, ... cluster_structure=structure, + ... storages=storages, ... ) >>> model.do_modeling() """ @@ -55,19 +59,20 @@ def __init__( model: FlowSystemModel, flow_system: FlowSystem, cluster_structure: ClusterStructure, - storage_cyclic: bool = True, + storages: list, ): """ Args: model: The FlowSystemModel to add constraints to. flow_system: The FlowSystem being optimized. cluster_structure: Clustering structure with cluster_order and occurrences. - storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. + storages: List of Storage components to add inter-cluster linking for. + Each storage's cluster_storage_mode determines if cyclic constraint is added. """ super().__init__(model, label_of_element='InterClusterLinking', label_of_model='InterClusterLinking') self.flow_system = flow_system self.cluster_structure = cluster_structure - self.storage_cyclic = storage_cyclic + self.storages = storages # Extract commonly used values from cluster_structure self._n_clusters = ( @@ -87,24 +92,25 @@ def do_modeling(self): - delta_SOC[c]: Change in SOC during representative period c - Linking: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] """ - storages = list(self.flow_system.storages.values()) - if not storages: - logger.info('No storages found - skipping inter-cluster linking') + if not self.storages: + logger.info('No storages to link - skipping inter-cluster linking') return logger.info( - f'Adding inter-cluster storage linking for {len(storages)} storages ' + f'Adding inter-cluster storage linking for {len(self.storages)} storages ' f'({self._n_original_periods} original periods, {self._n_clusters} clusters)' ) - for storage in storages: - self._add_storage_linking(storage) + for storage in self.storages: + storage_cyclic = storage.cluster_storage_mode == 'intercluster_cyclic' + self._add_storage_linking(storage, storage_cyclic) - def _add_storage_linking(self, storage) -> None: + def _add_storage_linking(self, storage, storage_cyclic: bool) -> None: """Add inter-cluster linking constraints for a single storage. Args: storage: Storage component to add linking for. + storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. """ import xarray as xr @@ -201,7 +207,7 @@ def _add_storage_linking(self, storage) -> None: self.add_constraints(lhs == 0, short_name=f'link|{label}|{d}') # Cyclic constraint: SOC_boundary[0] = SOC_boundary[end] - if self.storage_cyclic: + if storage_cyclic: lhs = soc_boundary.isel(cluster_boundary=0) - soc_boundary.isel(cluster_boundary=self._n_original_periods) self.add_constraints(lhs == 0, short_name=f'cyclic|{label}') diff --git a/flixopt/components.py b/flixopt/components.py index f9d36a19d..9bb36f85c 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -282,6 +282,21 @@ class Storage(Component): relative_loss_per_hour: Self-discharge per hour (0-0.1). Default: 0. prevent_simultaneous_charge_and_discharge: Prevent charging and discharging simultaneously. Adds binary variables. Default: True. + cluster_storage_mode: How this storage is treated during clustering optimization. + Only relevant when using ``transform.cluster()``. Options: + + - ``'independent'``: Clusters are fully decoupled. No constraints between + clusters, each cluster has free start/end SOC. Fast but ignores + seasonal storage value. + - ``'cyclic'``: Each cluster is self-contained. The SOC at the start of + each cluster equals its end (cluster returns to initial state). + Good for "average day" modeling. + - ``'intercluster'``: Link storage state across the original timeline using + SOC boundary variables (Kotzur et al. approach). Properly values + seasonal storage patterns. Overall SOC can drift. + - ``'intercluster_cyclic'`` (default): Like 'intercluster' but also enforces + that overall SOC returns to initial state (yearly cyclic). + meta_data: Additional information stored in results. Python native types only. Examples: @@ -398,6 +413,9 @@ def __init__( relative_loss_per_hour: Numeric_TPS = 0, prevent_simultaneous_charge_and_discharge: bool = True, balanced: bool = False, + cluster_storage_mode: Literal[ + 'independent', 'cyclic', 'intercluster', 'intercluster_cyclic' + ] = 'intercluster_cyclic', meta_data: dict | None = None, ): # TODO: fixed_relative_chargeState implementieren @@ -427,6 +445,7 @@ def __init__( self.relative_loss_per_hour: Numeric_TPS = relative_loss_per_hour self.prevent_simultaneous_charge_and_discharge = prevent_simultaneous_charge_and_discharge self.balanced = balanced + self.cluster_storage_mode = cluster_storage_mode def create_model(self, model: FlowSystemModel) -> StorageModel: self._plausibility_checks() @@ -928,7 +947,7 @@ def _do_modeling(self): self.add_constraints(lhs == 0, short_name='charge_state', mask=mask) # For 'cyclic' mode: each cluster's start equals its end - if clustering is not None and clustering.storage_mode == 'cyclic': + if clustering is not None and self.element.cluster_storage_mode == 'cyclic': starts = clustering.cluster_start_positions for i, start_pos in enumerate(starts): # End of cluster i is at (start of cluster i+1) - 1, or last timestep for final cluster diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 438cdcf07..153045e44 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -1304,6 +1304,7 @@ def _add_inter_cluster_linking(self) -> None: Creates SOC_boundary variables that link storage states between sequential periods in the original time series, using the delta SOC from representative periods. + Only storages with cluster_storage_mode='intercluster' or 'intercluster_cyclic' are linked. """ from .clustering.storage_linking import InterClusterLinking @@ -1311,22 +1312,24 @@ def _add_inter_cluster_linking(self) -> None: if info is None: return - # Only add inter-cluster linking for 'intercluster' and 'intercluster_cyclic' modes - if info.storage_mode not in ('intercluster', 'intercluster_cyclic'): - logger.info(f"Storage mode '{info.storage_mode}' - skipping inter-cluster linking") - return - if info.result.cluster_structure is None: logger.warning('No cluster structure available for inter-cluster linking') return - # Create inter-cluster linking model for storage - storage_cyclic = info.storage_mode == 'intercluster_cyclic' + # Filter storages that need inter-cluster linking + storages_for_linking = [ + s for s in self.storages.values() if s.cluster_storage_mode in ('intercluster', 'intercluster_cyclic') + ] + + if not storages_for_linking: + logger.info('No storages with intercluster mode - skipping inter-cluster linking') + return + linking_model = InterClusterLinking( model=self.model, flow_system=self, cluster_structure=info.result.cluster_structure, - storage_cyclic=storage_cyclic, + storages=storages_for_linking, ) linking_model.do_modeling() diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 3883fb3a2..ac615a601 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -581,7 +581,6 @@ def cluster( weights: dict[str, float] | None = None, time_series_for_high_peaks: list[str] | None = None, time_series_for_low_peaks: list[str] | None = None, - storage_mode: Literal['independent', 'cyclic', 'intercluster', 'intercluster_cyclic'] = 'intercluster_cyclic', ) -> FlowSystem: """ Create a FlowSystem with reduced timesteps using typical clusters. @@ -594,7 +593,7 @@ def cluster( 1. Performs time series clustering using tsam (k-means) 2. Extracts only the typical clusters (not all original timesteps) 3. Applies timestep weighting for accurate cost representation - 4. Handles storage states between clusters based on the ``storage`` mode + 4. Handles storage states between clusters based on each Storage's ``cluster_storage_mode`` Use this for initial sizing optimization, then use ``fix_sizes()`` to re-optimize at full resolution for accurate dispatch results. @@ -607,19 +606,6 @@ def cluster( time_series_for_high_peaks: Time series labels for explicitly selecting high-value clusters. **Recommended** for demand time series to capture peak demand days. time_series_for_low_peaks: Time series labels for explicitly selecting low-value clusters. - storage_mode: How storages are treated during clustering. Options: - - - ``'independent'``: Clusters are fully decoupled. No constraints between - clusters, each cluster has free start/end SOC. Fast but ignores - seasonal storage value. - - ``'cyclic'``: Each cluster is self-contained. The SOC at the start of - each cluster equals its end (cluster returns to initial state). - Good for "average day" modeling. - - ``'intercluster'``: Link storage state across the original timeline using - SOC boundary variables (Kotzur et al. approach). Properly values - seasonal storage patterns. Overall SOC can drift. - - ``'intercluster_cyclic'`` (default): Like 'intercluster' but also enforces - that overall SOC returns to initial state (yearly cyclic). Returns: A new FlowSystem with reduced timesteps (only typical clusters). @@ -653,8 +639,8 @@ def cluster( - This is best suited for initial sizing, not final dispatch optimization - Use ``time_series_for_high_peaks`` to ensure peak demand clusters are captured - A 5-10% safety margin on sizes is recommended for the dispatch stage - - For seasonal storage (e.g., hydrogen, thermal storage), use 'intercluster' or - 'intercluster_cyclic' to properly value long-term storage + - For seasonal storage (e.g., hydrogen, thermal storage), set + ``Storage.cluster_storage_mode='intercluster'`` or ``'intercluster_cyclic'`` """ import tsam.timeseriesaggregation as tsam @@ -889,7 +875,6 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: result=aggregation_result, original_flow_system=self._fs, backend_name='tsam', - storage_mode=storage_mode, ) return reduced_fs From 1c0e67803d1ff092dcd109f5a6b90a46df85f55f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 17:07:52 +0100 Subject: [PATCH 118/191] Update the notebooks --- docs/notebooks/08c-clustering.ipynb | 6 ++-- .../08c2-clustering-storage-modes.ipynb | 33 +++++++++++++++---- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 7919326d0..cf5b53b53 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -161,7 +161,6 @@ " n_clusters=8, # 8 typical days\n", " cluster_duration='1D', # Daily clustering\n", " time_series_for_high_peaks=peak_series, # Capture peak demand day\n", - " storage_mode='intercluster_cyclic', # Link storage across clusters + yearly cyclic\n", ")\n", "\n", "time_clustering = timeit.default_timer() - start\n", @@ -440,9 +439,10 @@ "| `weights` | `dict[str, float]` | Optional weights for time series in clustering |\n", "| `time_series_for_high_peaks` | `list[str]` | **Essential**: Force inclusion of peak periods |\n", "| `time_series_for_low_peaks` | `list[str]` | Force inclusion of minimum periods |\n", - "| `storage_mode` | `str` | Storage handling mode (see below) |\n", "\n", - "### Storage Modes\n", + "### Storage Behavior\n", + "\n", + "Each `Storage` component has a `cluster_storage_mode` parameter that controls how it behaves during clustering:\n", "\n", "| Mode | Description |\n", "|------|-------------|\n", diff --git a/docs/notebooks/08c2-clustering-storage-modes.ipynb b/docs/notebooks/08c2-clustering-storage-modes.ipynb index 854af7ea9..4e540c770 100644 --- a/docs/notebooks/08c2-clustering-storage-modes.ipynb +++ b/docs/notebooks/08c2-clustering-storage-modes.ipynb @@ -111,7 +111,8 @@ "## Understanding Storage Modes\n", "\n", "When clustering reduces a full year to typical periods (e.g., 12 typical days), we need to\n", - "decide how storage behaves across these periods. There are four options:\n", + "decide how storage behaves across these periods. Each `Storage` component has a \n", + "`cluster_storage_mode` parameter with four options:\n", "\n", "| Mode | Description | Use Case |\n", "|------|-------------|----------|\n", @@ -161,7 +162,8 @@ "source": [ "## Compare Storage Modes\n", "\n", - "Now let's cluster with each storage mode and compare results:" + "Now let's cluster with each storage mode and compare results.\n", + "We set `cluster_storage_mode` on the Storage component before calling `cluster()`:" ] }, { @@ -184,13 +186,16 @@ "\n", "for mode in storage_modes:\n", " print(f'\\n--- Mode: {mode} ---')\n", - " start = timeit.default_timer()\n", "\n", - " fs_clustered = flow_system.transform.cluster(\n", + " # Create a copy and set the storage mode\n", + " fs_copy = flow_system.copy()\n", + " fs_copy.components['SeasonalStorage'].cluster_storage_mode = mode\n", + "\n", + " start = timeit.default_timer()\n", + " fs_clustered = fs_copy.transform.cluster(\n", " n_clusters=N_CLUSTERS,\n", " cluster_duration=CLUSTER_DURATION,\n", " time_series_for_high_peaks=PEAK_SERIES,\n", - " storage_mode=mode,\n", " )\n", " time_cluster = timeit.default_timer() - start\n", "\n", @@ -353,6 +358,22 @@ "| Multi-year optimization with inter-annual storage | `'intercluster'` |\n", "| Quick sizing estimate, storage not critical | `'independent'` |\n", "\n", + "### Setting the Mode\n", + "\n", + "```python\n", + "# Option 1: Set when creating the Storage\n", + "storage = fx.Storage(\n", + " 'SeasonalStorage',\n", + " capacity_in_flow_hours=5000,\n", + " cluster_storage_mode='intercluster_cyclic', # default\n", + " ...\n", + ")\n", + "\n", + "# Option 2: Modify before clustering\n", + "flow_system.components['SeasonalStorage'].cluster_storage_mode = 'cyclic'\n", + "fs_clustered = flow_system.transform.cluster(...)\n", + "```\n", + "\n", "!!! tip \"Rule of Thumb\"\n", " Use `'intercluster_cyclic'` (default) unless you have a specific reason not to.\n", " It provides the most accurate representation of storage behavior in clustered systems." @@ -367,7 +388,7 @@ "\n", "You learned how to:\n", "\n", - "- Use **`storage_mode`** parameter to control storage behavior in clustering\n", + "- Use **`cluster_storage_mode`** on Storage components to control behavior in clustering\n", "- Understand the **difference between modes** and their impact on results\n", "- Choose the **right mode** for your optimization problem\n", "\n", From a6e5bd4d4cbde50862f0a23161cca16cc21d6d49 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 17:10:17 +0100 Subject: [PATCH 119/191] Update parameter name --- docs/notebooks/08c2-clustering-storage-modes.ipynb | 6 +++++- flixopt/clustering/storage_linking.py | 8 +++----- flixopt/components.py | 10 ++++------ flixopt/flow_system.py | 4 ++-- flixopt/transform_accessor.py | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/docs/notebooks/08c2-clustering-storage-modes.ipynb b/docs/notebooks/08c2-clustering-storage-modes.ipynb index 4e540c770..d6201382d 100644 --- a/docs/notebooks/08c2-clustering-storage-modes.ipynb +++ b/docs/notebooks/08c2-clustering-storage-modes.ipynb @@ -138,7 +138,11 @@ "cell_type": "code", "execution_count": null, "id": "7", - "metadata": {}, + "metadata": { + "jupyter": { + "is_executing": true + } + }, "outputs": [], "source": [ "solver = fx.solvers.HighsSolver(mip_gap=0.02)\n", diff --git a/flixopt/clustering/storage_linking.py b/flixopt/clustering/storage_linking.py index 5c5ee5d6d..ff4dd24e9 100644 --- a/flixopt/clustering/storage_linking.py +++ b/flixopt/clustering/storage_linking.py @@ -42,9 +42,7 @@ class InterClusterLinking(Submodel): Example: >>> from flixopt.clustering import ClusterStructure, InterClusterLinking >>> structure = ClusterStructure(...) - >>> storages = [ - ... s for s in fs.storages.values() if s.cluster_storage_mode in ('intercluster', 'intercluster_cyclic') - ... ] + >>> storages = [s for s in fs.storages.values() if s.cluster_mode in ('intercluster', 'intercluster_cyclic')] >>> model = InterClusterLinking( ... model=flow_system.model, ... flow_system=flow_system, @@ -67,7 +65,7 @@ def __init__( flow_system: The FlowSystem being optimized. cluster_structure: Clustering structure with cluster_order and occurrences. storages: List of Storage components to add inter-cluster linking for. - Each storage's cluster_storage_mode determines if cyclic constraint is added. + Each storage's cluster_mode determines if cyclic constraint is added. """ super().__init__(model, label_of_element='InterClusterLinking', label_of_model='InterClusterLinking') self.flow_system = flow_system @@ -102,7 +100,7 @@ def do_modeling(self): ) for storage in self.storages: - storage_cyclic = storage.cluster_storage_mode == 'intercluster_cyclic' + storage_cyclic = storage.cluster_mode == 'intercluster_cyclic' self._add_storage_linking(storage, storage_cyclic) def _add_storage_linking(self, storage, storage_cyclic: bool) -> None: diff --git a/flixopt/components.py b/flixopt/components.py index 9bb36f85c..80dfb4229 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -282,7 +282,7 @@ class Storage(Component): relative_loss_per_hour: Self-discharge per hour (0-0.1). Default: 0. prevent_simultaneous_charge_and_discharge: Prevent charging and discharging simultaneously. Adds binary variables. Default: True. - cluster_storage_mode: How this storage is treated during clustering optimization. + cluster_mode: How this storage is treated during clustering optimization. Only relevant when using ``transform.cluster()``. Options: - ``'independent'``: Clusters are fully decoupled. No constraints between @@ -413,9 +413,7 @@ def __init__( relative_loss_per_hour: Numeric_TPS = 0, prevent_simultaneous_charge_and_discharge: bool = True, balanced: bool = False, - cluster_storage_mode: Literal[ - 'independent', 'cyclic', 'intercluster', 'intercluster_cyclic' - ] = 'intercluster_cyclic', + cluster_mode: Literal['independent', 'cyclic', 'intercluster', 'intercluster_cyclic'] = 'intercluster_cyclic', meta_data: dict | None = None, ): # TODO: fixed_relative_chargeState implementieren @@ -445,7 +443,7 @@ def __init__( self.relative_loss_per_hour: Numeric_TPS = relative_loss_per_hour self.prevent_simultaneous_charge_and_discharge = prevent_simultaneous_charge_and_discharge self.balanced = balanced - self.cluster_storage_mode = cluster_storage_mode + self.cluster_mode = cluster_mode def create_model(self, model: FlowSystemModel) -> StorageModel: self._plausibility_checks() @@ -947,7 +945,7 @@ def _do_modeling(self): self.add_constraints(lhs == 0, short_name='charge_state', mask=mask) # For 'cyclic' mode: each cluster's start equals its end - if clustering is not None and self.element.cluster_storage_mode == 'cyclic': + if clustering is not None and self.element.cluster_mode == 'cyclic': starts = clustering.cluster_start_positions for i, start_pos in enumerate(starts): # End of cluster i is at (start of cluster i+1) - 1, or last timestep for final cluster diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 153045e44..0e24a0ddb 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -1304,7 +1304,7 @@ def _add_inter_cluster_linking(self) -> None: Creates SOC_boundary variables that link storage states between sequential periods in the original time series, using the delta SOC from representative periods. - Only storages with cluster_storage_mode='intercluster' or 'intercluster_cyclic' are linked. + Only storages with cluster_mode='intercluster' or 'intercluster_cyclic' are linked. """ from .clustering.storage_linking import InterClusterLinking @@ -1318,7 +1318,7 @@ def _add_inter_cluster_linking(self) -> None: # Filter storages that need inter-cluster linking storages_for_linking = [ - s for s in self.storages.values() if s.cluster_storage_mode in ('intercluster', 'intercluster_cyclic') + s for s in self.storages.values() if s.cluster_mode in ('intercluster', 'intercluster_cyclic') ] if not storages_for_linking: diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index ac615a601..a209ce4ab 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -593,7 +593,7 @@ def cluster( 1. Performs time series clustering using tsam (k-means) 2. Extracts only the typical clusters (not all original timesteps) 3. Applies timestep weighting for accurate cost representation - 4. Handles storage states between clusters based on each Storage's ``cluster_storage_mode`` + 4. Handles storage states between clusters based on each Storage's ``cluster_mode`` Use this for initial sizing optimization, then use ``fix_sizes()`` to re-optimize at full resolution for accurate dispatch results. @@ -640,7 +640,7 @@ def cluster( - Use ``time_series_for_high_peaks`` to ensure peak demand clusters are captured - A 5-10% safety margin on sizes is recommended for the dispatch stage - For seasonal storage (e.g., hydrogen, thermal storage), set - ``Storage.cluster_storage_mode='intercluster'`` or ``'intercluster_cyclic'`` + ``Storage.cluster_mode='intercluster'`` or ``'intercluster_cyclic'`` """ import tsam.timeseriesaggregation as tsam From a84543b8f915c07df65a82e63b72dcd40fd54437 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 18:52:04 +0100 Subject: [PATCH 120/191] FIx clustered Storage --- flixopt/clustering/storage_linking.py | 17 ++++++++++ flixopt/components.py | 45 +++++++++++++++++++++------ 2 files changed, 52 insertions(+), 10 deletions(-) diff --git a/flixopt/clustering/storage_linking.py b/flixopt/clustering/storage_linking.py index ff4dd24e9..809c49a74 100644 --- a/flixopt/clustering/storage_linking.py +++ b/flixopt/clustering/storage_linking.py @@ -106,6 +106,12 @@ def do_modeling(self): def _add_storage_linking(self, storage, storage_cyclic: bool) -> None: """Add inter-cluster linking constraints for a single storage. + Following the S-N model from Blanke et al. (2022), this method: + 1. Constrains charge_state at each cluster start to 0 (ΔE_0 = 0) + 2. Creates SOC_boundary variables to track absolute SOC across original periods + 3. Links via: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + 4. Adds bounds: 0 ≤ SOC_boundary[d] + charge_state[t] ≤ capacity + Args: storage: Storage component to add linking for. storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. @@ -122,6 +128,17 @@ def _add_storage_linking(self, storage, storage_cyclic: bool) -> None: charge_state = storage.submodel.variables[charge_state_name] + # === CRITICAL FIX: Constrain each cluster's start charge_state to 0 === + # This makes charge_state relative to cluster start (like ΔE in S-N model) + # Without this, cluster starts are free variables allowing "free energy" + for c in range(self._n_clusters): + start_idx = c * self._timesteps_per_cluster + self.add_constraints( + charge_state.isel(time=start_idx) == 0, + short_name=f'cluster_start|{label}|{c}', + ) + logger.debug(f'Added {self._n_clusters} cluster start constraints for {label}') + # Get storage capacity bounds (may have period/scenario dimensions) capacity = storage.capacity_in_flow_hours if hasattr(capacity, 'fixed_size') and capacity.fixed_size is not None: diff --git a/flixopt/components.py b/flixopt/components.py index 80dfb4229..70d154284 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -1015,19 +1015,44 @@ def _initial_and_final_charge_state(self): @property def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: relative_lower_bound, relative_upper_bound = self._relative_charge_state_bounds + + # For inter-cluster modes, charge_state represents relative change from cluster start (ΔE) + # which can be negative (discharge) or positive (charge). The actual SOC is SOC_boundary + ΔE. + # We set lower bound to -capacity to allow the full range. + clustering = self._model.flow_system.clustering + is_intercluster = clustering is not None and self.element.cluster_mode in ( + 'intercluster', + 'intercluster_cyclic', + ) + if self.element.capacity_in_flow_hours is None: - # Unbounded storage: lower bound is 0, upper bound is infinite - return (0, np.inf) + # Unbounded storage: lower bound is 0 (or -inf for intercluster), upper bound is infinite + return (-np.inf if is_intercluster else 0, np.inf) elif isinstance(self.element.capacity_in_flow_hours, InvestParameters): - return ( - relative_lower_bound * self.element.capacity_in_flow_hours.minimum_or_fixed_size, - relative_upper_bound * self.element.capacity_in_flow_hours.maximum_or_fixed_size, - ) + cap_min = self.element.capacity_in_flow_hours.minimum_or_fixed_size + cap_max = self.element.capacity_in_flow_hours.maximum_or_fixed_size + if is_intercluster: + # For inter-cluster, charge_state is relative to cluster start (ΔE in S-N model) + # ΔE can be negative (discharge) or positive (charge), so allow full range. + # Create bounds with proper time dimension using the shape from relative bounds. + ones = xr.ones_like(relative_upper_bound) + return (-ones * cap_max, ones * cap_max) + else: + return ( + relative_lower_bound * cap_min, + relative_upper_bound * cap_max, + ) else: - return ( - relative_lower_bound * self.element.capacity_in_flow_hours, - relative_upper_bound * self.element.capacity_in_flow_hours, - ) + cap = self.element.capacity_in_flow_hours + if is_intercluster: + # Same as above: create bounds with time dimension + ones = xr.ones_like(relative_upper_bound) + return (-ones * cap, ones * cap) + else: + return ( + relative_lower_bound * cap, + relative_upper_bound * cap, + ) @property def _relative_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: From a8db4d7fb4dde9b21e05850e8c84c176be3c03cd Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 19:21:21 +0100 Subject: [PATCH 121/191] FIx clustered Storage --- flixopt/clustering/storage_linking.py | 83 ++++++++++++++++++++++++++- 1 file changed, 81 insertions(+), 2 deletions(-) diff --git a/flixopt/clustering/storage_linking.py b/flixopt/clustering/storage_linking.py index 809c49a74..dd8ff39f6 100644 --- a/flixopt/clustering/storage_linking.py +++ b/flixopt/clustering/storage_linking.py @@ -141,10 +141,14 @@ def _add_storage_linking(self, storage, storage_cyclic: bool) -> None: # Get storage capacity bounds (may have period/scenario dimensions) capacity = storage.capacity_in_flow_hours + has_investment = hasattr(capacity, 'maximum_size') # InvestParameters + if hasattr(capacity, 'fixed_size') and capacity.fixed_size is not None: cap_value = capacity.fixed_size - elif hasattr(capacity, 'maximum') and capacity.maximum is not None: - cap_value = capacity.maximum + elif hasattr(capacity, 'maximum_size') and capacity.maximum_size is not None: + cap_value = capacity.maximum_size + elif isinstance(capacity, (int, float)): + cap_value = capacity else: cap_value = 1e9 # Large default @@ -195,6 +199,15 @@ def _add_storage_linking(self, storage, storage_cyclic: bool) -> None: short_name=f'SOC_boundary|{label}', ) + # For investment-based storage, add bounding constraint: SOC_boundary <= investment.size + # This ensures SOC_boundary is scaled by the actual capacity investment + if has_investment and storage.submodel.investment is not None: + investment_size = storage.submodel.investment.size + self.add_constraints( + soc_boundary <= investment_size, + short_name=f'SOC_boundary_ub|{label}', + ) + # Pre-compute delta_SOC for each representative period # delta_SOC[c] = charge_state[c, end] - charge_state[c, start] delta_soc_dict = {} @@ -226,8 +239,74 @@ def _add_storage_linking(self, storage, storage_cyclic: bool) -> None: lhs = soc_boundary.isel(cluster_boundary=0) - soc_boundary.isel(cluster_boundary=self._n_original_periods) self.add_constraints(lhs == 0, short_name=f'cyclic|{label}') + # Add combined bound constraints: 0 <= SOC_boundary[d] + charge_state[t] <= capacity + # This ensures the actual SOC (boundary + relative) stays within physical bounds + self._add_combined_bound_constraints(storage, soc_boundary, charge_state, has_investment, label) + logger.debug(f'Added inter-cluster linking for storage {label}') + def _add_combined_bound_constraints( + self, + storage, + soc_boundary, + charge_state, + has_investment: bool, + label: str, + ) -> None: + """Add combined bound constraints: 0 <= SOC_boundary[d] + charge_state[t] <= capacity. + + Following the S-N model from Blanke et al. (2022), the actual SOC at any time t + is SOC_boundary[d] + charge_state[t] where d is the original period containing t. + This must be within [0, capacity] for physical validity. + + For efficiency, we add constraints only at cluster boundaries (first and last timestep + of each cluster) since these are the extremes due to the monotonic nature of charge_state + within a cluster. + + Args: + storage: Storage component. + soc_boundary: SOC boundary variable with cluster_boundary dimension. + charge_state: Charge state variable with time dimension. + has_investment: Whether storage has investment decision. + label: Storage label for constraint naming. + """ + cluster_order = self.cluster_structure.get_cluster_order_for_slice() + investment_size = storage.submodel.investment.size if has_investment else None + + # For each original period, ensure combined SOC is within bounds + # We sample at representative points within each cluster to limit constraint count + # Key insight: charge_state starts at 0 and evolves within the cluster + # The extremes typically occur at the end of charge/discharge cycles + + for d in range(self._n_original_periods): + c = int(cluster_order[d]) + cluster_start = c * self._timesteps_per_cluster + cluster_end = (c + 1) * self._timesteps_per_cluster # charge_state has extra timestep + + soc_d = soc_boundary.isel(cluster_boundary=d) + + # Check at representative timesteps within the cluster + # We check at: start, middle, and end to capture key points + check_indices = [ + cluster_start, # Start (should be 0) + cluster_start + self._timesteps_per_cluster // 2, # Middle + cluster_end, # End (delta_SOC) + ] + + for idx in check_indices: + if idx >= len(charge_state.coords['time']): + continue + + cs_t = charge_state.isel(time=idx) + combined = soc_d + cs_t + + # Lower bound: combined >= 0 + self.add_constraints(combined >= 0, short_name=f'soc_lb|{label}|{d}|{idx}') + + # Upper bound: combined <= capacity + if investment_size is not None: + self.add_constraints(combined <= investment_size, short_name=f'soc_ub|{label}|{d}|{idx}') + def _add_linking_constraints_multi_dim( self, storage, From c0ddab3dc3d46e55914cbf3c91aa4e7a7a2e0369 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 19:37:42 +0100 Subject: [PATCH 122/191] FIx clustered Storage --- flixopt/components.py | 221 ++++++++++++++++++++++++++++++++++++++++- flixopt/flow_system.py | 38 ------- 2 files changed, 219 insertions(+), 40 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 70d154284..ce96b11b2 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -977,8 +977,18 @@ def _do_modeling(self): relative_bounds=self._relative_charge_state_bounds, ) - # Initial charge state - self._initial_and_final_charge_state() + # Initial charge state (only for non-intercluster modes) + clustering = self._model.flow_system.clustering + is_intercluster = clustering is not None and self.element.cluster_mode in ( + 'intercluster', + 'intercluster_cyclic', + ) + if not is_intercluster: + self._initial_and_final_charge_state() + + # Add inter-cluster linking for intercluster modes + if is_intercluster: + self._add_intercluster_linking() # Balanced sizes if self.element.balanced: @@ -1012,6 +1022,213 @@ def _initial_and_final_charge_state(self): short_name='final_charge_min', ) + def _add_intercluster_linking(self) -> None: + """Add inter-cluster storage linking for aggregated optimization. + + Following the S-N model from Blanke et al. (2022), this method: + 1. Constrains charge_state at each cluster start to 0 (ΔE_0 = 0) + 2. Creates SOC_boundary variables to track absolute SOC across original periods + 3. Links via: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + 4. Adds bounds: 0 ≤ SOC_boundary[d] + charge_state[t] ≤ capacity + 5. Optionally enforces cyclic: SOC_boundary[0] = SOC_boundary[end] + """ + clustering = self._model.flow_system.clustering + if clustering is None or clustering.result.cluster_structure is None: + return + + cluster_structure = clustering.result.cluster_structure + n_clusters = ( + int(cluster_structure.n_clusters) + if isinstance(cluster_structure.n_clusters, (int, np.integer)) + else int(cluster_structure.n_clusters.values) + ) + timesteps_per_cluster = cluster_structure.timesteps_per_cluster + n_original_periods = cluster_structure.n_original_periods + has_multi_dims = cluster_structure.has_multi_dims + storage_cyclic = self.element.cluster_mode == 'intercluster_cyclic' + + charge_state = self.charge_state + + # Constrain each cluster's start charge_state to 0 (ΔE_0 = 0 in S-N model) + for c in range(n_clusters): + start_idx = c * timesteps_per_cluster + self.add_constraints( + charge_state.isel(time=start_idx) == 0, + short_name=f'cluster_start_{c}', + ) + + # Get storage capacity bounds + capacity = self.element.capacity_in_flow_hours + has_investment = isinstance(capacity, InvestParameters) + + if hasattr(capacity, 'fixed_size') and capacity.fixed_size is not None: + cap_value = capacity.fixed_size + elif hasattr(capacity, 'maximum_size') and capacity.maximum_size is not None: + cap_value = capacity.maximum_size + elif isinstance(capacity, (int, float)): + cap_value = capacity + else: + cap_value = 1e9 + + # Create SOC_boundary variables + n_boundaries = n_original_periods + 1 + boundary_coords = {'cluster_boundary': np.arange(n_boundaries)} + boundary_dims = ['cluster_boundary'] + + extra_dims = [] + if self._model.flow_system.periods is not None: + extra_dims.append('period') + boundary_coords['period'] = np.array(list(self._model.flow_system.periods)) + if self._model.flow_system.scenarios is not None: + extra_dims.append('scenario') + boundary_coords['scenario'] = np.array(list(self._model.flow_system.scenarios)) + + if extra_dims: + boundary_dims = ['cluster_boundary'] + extra_dims + + lb_shape = [n_boundaries] + [len(boundary_coords[d]) for d in extra_dims] + lb = xr.DataArray(np.zeros(lb_shape), coords=boundary_coords, dims=boundary_dims) + + if isinstance(cap_value, xr.DataArray) and cap_value.dims: + ub = cap_value.expand_dims({'cluster_boundary': n_boundaries}, axis=0) + ub = ub.assign_coords(cluster_boundary=np.arange(n_boundaries)) + ub = ub.transpose('cluster_boundary', ...) + else: + if hasattr(cap_value, 'item'): + cap_value = float(cap_value.item()) + else: + cap_value = float(cap_value) + ub = xr.DataArray(np.full(lb_shape, cap_value), coords=boundary_coords, dims=boundary_dims) + + soc_boundary = self.add_variables( + lower=lb, + upper=ub, + coords=boundary_coords, + dims=boundary_dims, + short_name='SOC_boundary', + ) + + # Add SOC_boundary <= investment.size for investment-based storage + if has_investment and self.investment is not None: + self.add_constraints( + soc_boundary <= self.investment.size, + short_name='SOC_boundary_ub', + ) + + # Pre-compute delta_SOC for each cluster + delta_soc_dict = {} + for c in range(n_clusters): + start_idx = c * timesteps_per_cluster + end_idx = (c + 1) * timesteps_per_cluster + delta_soc_dict[c] = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) + + # Create linking constraints + if has_multi_dims: + self._add_linking_constraints_multi_dim(cluster_structure, soc_boundary, delta_soc_dict, n_original_periods) + else: + cluster_order = cluster_structure.get_cluster_order_for_slice() + for d in range(n_original_periods): + c = int(cluster_order[d]) + lhs = ( + soc_boundary.isel(cluster_boundary=d + 1) + - soc_boundary.isel(cluster_boundary=d) + - delta_soc_dict[c] + ) + self.add_constraints(lhs == 0, short_name=f'link_{d}') + + # Cyclic constraint + if storage_cyclic: + self.add_constraints( + soc_boundary.isel(cluster_boundary=0) == soc_boundary.isel(cluster_boundary=n_original_periods), + short_name='cyclic', + ) + + # Combined bound constraints + self._add_combined_bound_constraints( + cluster_structure, soc_boundary, charge_state, has_investment, n_original_periods, timesteps_per_cluster + ) + + def _add_linking_constraints_multi_dim( + self, + cluster_structure, + soc_boundary, + delta_soc_dict: dict, + n_original_periods: int, + ) -> None: + """Add linking constraints when cluster_order has period/scenario dimensions.""" + periods = list(self._model.flow_system.periods) if self._model.flow_system.periods else [None] + scenarios = list(self._model.flow_system.scenarios) if self._model.flow_system.scenarios else [None] + has_periods = periods != [None] + has_scenarios = scenarios != [None] + soc_dims = set(soc_boundary.dims) + + for p in periods: + for s in scenarios: + cluster_order = cluster_structure.get_cluster_order_for_slice(period=p, scenario=s) + + soc_selector = {} + if has_periods and p is not None and 'period' in soc_dims: + soc_selector['period'] = p + if has_scenarios and s is not None and 'scenario' in soc_dims: + soc_selector['scenario'] = s + + soc_slice = soc_boundary.sel(**soc_selector) if soc_selector else soc_boundary + + for d in range(n_original_periods): + c = int(cluster_order[d]) + delta_soc = delta_soc_dict[c] + + delta_selector = {} + if has_periods and p is not None and 'period' in delta_soc.dims: + delta_selector['period'] = p + if has_scenarios and s is not None and 'scenario' in delta_soc.dims: + delta_selector['scenario'] = s + if delta_selector: + delta_soc = delta_soc.sel(**delta_selector) + + lhs = soc_slice.isel(cluster_boundary=d + 1) - soc_slice.isel(cluster_boundary=d) - delta_soc + + suffix = '' + if has_periods and p is not None: + suffix += f'_p{p}' + if has_scenarios and s is not None: + suffix += f'_s{s}' + self.add_constraints(lhs == 0, short_name=f'link_{d}{suffix}') + + def _add_combined_bound_constraints( + self, + cluster_structure, + soc_boundary, + charge_state, + has_investment: bool, + n_original_periods: int, + timesteps_per_cluster: int, + ) -> None: + """Add combined bound constraints: 0 <= SOC_boundary[d] + charge_state[t] <= capacity.""" + cluster_order = cluster_structure.get_cluster_order_for_slice() + investment_size = self.investment.size if has_investment and self.investment else None + + for d in range(n_original_periods): + c = int(cluster_order[d]) + cluster_start = c * timesteps_per_cluster + cluster_end = (c + 1) * timesteps_per_cluster + + soc_d = soc_boundary.isel(cluster_boundary=d) + + check_indices = [cluster_start, cluster_start + timesteps_per_cluster // 2, cluster_end] + + for idx in check_indices: + if idx >= len(charge_state.coords['time']): + continue + + cs_t = charge_state.isel(time=idx) + combined = soc_d + cs_t + + self.add_constraints(combined >= 0, short_name=f'soc_lb_{d}_{idx}') + + if investment_size is not None: + self.add_constraints(combined <= investment_size, short_name=f'soc_ub_{d}_{idx}') + @property def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: relative_lower_bound, relative_upper_bound = self._relative_charge_state_bounds diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 0e24a0ddb..40ab95b48 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -1293,46 +1293,8 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: self.model.do_modeling() - # Add inter-cluster storage linking if this is an aggregated FlowSystem - if self.clustering is not None: - self._add_inter_cluster_linking() - return self - def _add_inter_cluster_linking(self) -> None: - """Add storage inter-cluster linking for aggregated optimization. - - Creates SOC_boundary variables that link storage states between sequential - periods in the original time series, using the delta SOC from representative periods. - Only storages with cluster_mode='intercluster' or 'intercluster_cyclic' are linked. - """ - from .clustering.storage_linking import InterClusterLinking - - info = self.clustering - if info is None: - return - - if info.result.cluster_structure is None: - logger.warning('No cluster structure available for inter-cluster linking') - return - - # Filter storages that need inter-cluster linking - storages_for_linking = [ - s for s in self.storages.values() if s.cluster_mode in ('intercluster', 'intercluster_cyclic') - ] - - if not storages_for_linking: - logger.info('No storages with intercluster mode - skipping inter-cluster linking') - return - - linking_model = InterClusterLinking( - model=self.model, - flow_system=self, - cluster_structure=info.result.cluster_structure, - storages=storages_for_linking, - ) - linking_model.do_modeling() - def solve(self, solver: _Solver) -> FlowSystem: """ Solve the optimization model and populate the solution. From 325534e1b8ff2a583f46ecd8f72f7f0feb80263c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 20:11:29 +0100 Subject: [PATCH 123/191] The refactoring is complete. Here's a summary of what was done: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary New Files Created 1. flixopt/clustering/intercluster_helpers.py - New helper module with: - SliceContext - dataclass for iteration context with period/scenario/cluster_order - MultiDimIterator - unified iterator over (period, scenario) combinations - CapacityBounds - dataclass for capacity bounds - extract_capacity_bounds() - extract bounds from various capacity parameter types - build_boundary_coords() - build coordinates/dims for SOC_boundary variables Refactored Methods in StorageModel (components.py:1025-1211) The monolithic 200+ line method was split into focused helper methods: - _add_intercluster_linking() - Main orchestrator (now ~70 lines) - _add_cluster_start_constraints() - Adds ΔE_0 = 0 constraints - _compute_delta_soc() - Pre-computes delta_SOC for each cluster - _add_linking_constraints() - Links SOC boundaries using MultiDimIterator - _add_combined_bound_constraints() - Adds combined bounds, now correctly handles multi-dim cases Removed Files - flixopt/clustering/storage_linking.py - The old InterClusterLinking class is no longer needed Updated Exports - flixopt/clustering/__init__.py - Removed InterClusterLinking from exports Key Bug Fix The _add_combined_bound_constraints() method now uses MultiDimIterator to correctly iterate over (period, scenario) combinations, fixing the bug where it always used the default cluster_order slice regardless of dimensions. All 125 clustering and storage tests pass. --- flixopt/clustering/__init__.py | 16 - flixopt/clustering/intercluster_helpers.py | 197 +++++++++++ flixopt/clustering/storage_linking.py | 378 --------------------- flixopt/components.py | 245 ++++++------- 4 files changed, 308 insertions(+), 528 deletions(-) create mode 100644 flixopt/clustering/intercluster_helpers.py delete mode 100644 flixopt/clustering/storage_linking.py diff --git a/flixopt/clustering/__init__.py b/flixopt/clustering/__init__.py index ab7a09cf7..a5446a524 100644 --- a/flixopt/clustering/__init__.py +++ b/flixopt/clustering/__init__.py @@ -32,27 +32,11 @@ create_cluster_structure_from_mapping, ) -# Lazy import for InterClusterLinking to avoid circular imports -# It depends on structure.Submodel which has complex import dependencies -InterClusterLinking = None - - -def _get_inter_cluster_linking(): - """Get InterClusterLinking class with lazy import.""" - global InterClusterLinking - if InterClusterLinking is None: - from .storage_linking import InterClusterLinking as _InterClusterLinking - - InterClusterLinking = _InterClusterLinking - return InterClusterLinking - - __all__ = [ # Core classes 'ClusterResult', 'Clustering', 'ClusterStructure', - 'InterClusterLinking', # Utilities 'create_cluster_structure_from_mapping', ] diff --git a/flixopt/clustering/intercluster_helpers.py b/flixopt/clustering/intercluster_helpers.py new file mode 100644 index 000000000..c95dd2b2d --- /dev/null +++ b/flixopt/clustering/intercluster_helpers.py @@ -0,0 +1,197 @@ +"""Helper utilities for inter-cluster storage linking. + +This module provides reusable utilities for building inter-cluster storage linking +constraints following the S-N model from Blanke et al. (2022). +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import TYPE_CHECKING + +import numpy as np +import xarray as xr + +if TYPE_CHECKING: + from collections.abc import Iterator + + from ..flow_system import FlowSystem + from ..interface import InvestParameters + from .base import ClusterStructure + + +@dataclass +class SliceContext: + """Context for a (period, scenario) slice during constraint generation. + + Provides the current iteration state when iterating over multi-dimensional + cluster orders, along with helper methods for constraint naming. + """ + + period: str | int | None + scenario: str | None + cluster_order: np.ndarray + + @property + def suffix(self) -> str: + """Generate constraint name suffix like '_p2020_shigh'.""" + parts = [] + if self.period is not None: + parts.append(f'p{self.period}') + if self.scenario is not None: + parts.append(f's{self.scenario}') + return '_' + '_'.join(parts) if parts else '' + + +class MultiDimIterator: + """Unified iterator over (period, scenario) combinations. + + Provides a clean interface for iterating over multi-dimensional slices + with automatic handling of None cases and selector building. + + Example: + iterator = MultiDimIterator(flow_system, cluster_structure) + for ctx in iterator: + # ctx.period, ctx.scenario, ctx.cluster_order available + selector = iterator.build_selector(ctx, available_dims) + data_slice = data.sel(**selector) if selector else data + """ + + def __init__(self, flow_system: FlowSystem, cluster_structure: ClusterStructure): + """Initialize the iterator. + + Args: + flow_system: The FlowSystem containing period/scenario dimensions. + cluster_structure: The ClusterStructure with cluster ordering info. + """ + self.periods = list(flow_system.periods) if flow_system.periods is not None else [None] + self.scenarios = list(flow_system.scenarios) if flow_system.scenarios is not None else [None] + self.cluster_structure = cluster_structure + + @property + def has_periods(self) -> bool: + """Check if there are period dimensions.""" + return self.periods != [None] + + @property + def has_scenarios(self) -> bool: + """Check if there are scenario dimensions.""" + return self.scenarios != [None] + + @property + def is_multi_dim(self) -> bool: + """Check if there are any extra dimensions beyond time.""" + return self.has_periods or self.has_scenarios + + def __iter__(self) -> Iterator[SliceContext]: + """Iterate over all (period, scenario) combinations.""" + for p in self.periods: + for s in self.scenarios: + cluster_order = self.cluster_structure.get_cluster_order_for_slice(period=p, scenario=s) + yield SliceContext(period=p, scenario=s, cluster_order=cluster_order) + + def build_selector(self, ctx: SliceContext, available_dims: set[str]) -> dict: + """Build xarray selector dict for the given context. + + Args: + ctx: The current slice context. + available_dims: Set of dimension names available in the target data. + + Returns: + Dict suitable for xr.DataArray.sel(**selector). + """ + selector = {} + if self.has_periods and ctx.period is not None and 'period' in available_dims: + selector['period'] = ctx.period + if self.has_scenarios and ctx.scenario is not None and 'scenario' in available_dims: + selector['scenario'] = ctx.scenario + return selector + + +@dataclass +class CapacityBounds: + """Extracted capacity bounds for storage SOC_boundary variables.""" + + lower: xr.DataArray + upper: xr.DataArray + has_investment: bool + + +def extract_capacity_bounds( + capacity_param: InvestParameters | int | float, + boundary_coords: dict, + boundary_dims: list[str], +) -> CapacityBounds: + """Extract capacity bounds from storage parameters. + + Handles: + - Fixed numeric values + - InvestParameters with fixed_size or maximum_size + - xr.DataArray with dimensions + + Args: + capacity_param: The capacity parameter (InvestParameters or scalar). + boundary_coords: Coordinates for SOC_boundary variable. + boundary_dims: Dimension names for SOC_boundary variable. + + Returns: + CapacityBounds with lower/upper bounds and investment flag. + """ + n_boundaries = len(boundary_coords['cluster_boundary']) + lb_shape = [n_boundaries] + [len(boundary_coords[d]) for d in boundary_dims[1:]] + + lb = xr.DataArray(np.zeros(lb_shape), coords=boundary_coords, dims=boundary_dims) + + # Determine has_investment and cap_value + has_investment = hasattr(capacity_param, 'maximum_size') + + if hasattr(capacity_param, 'fixed_size') and capacity_param.fixed_size is not None: + cap_value = capacity_param.fixed_size + elif hasattr(capacity_param, 'maximum_size') and capacity_param.maximum_size is not None: + cap_value = capacity_param.maximum_size + elif isinstance(capacity_param, (int, float)): + cap_value = capacity_param + else: + cap_value = 1e9 # Large default for unbounded case + + # Build upper bound + if isinstance(cap_value, xr.DataArray) and cap_value.dims: + ub = cap_value.expand_dims({'cluster_boundary': n_boundaries}, axis=0) + ub = ub.assign_coords(cluster_boundary=np.arange(n_boundaries)) + ub = ub.transpose('cluster_boundary', ...) + else: + if hasattr(cap_value, 'item'): + cap_value = float(cap_value.item()) + else: + cap_value = float(cap_value) + ub = xr.DataArray(np.full(lb_shape, cap_value), coords=boundary_coords, dims=boundary_dims) + + return CapacityBounds(lower=lb, upper=ub, has_investment=has_investment) + + +def build_boundary_coords( + n_original_periods: int, + flow_system: FlowSystem, +) -> tuple[dict, list[str]]: + """Build coordinates and dimensions for SOC_boundary variables. + + Args: + n_original_periods: Number of original (non-aggregated) periods. + flow_system: The FlowSystem containing period/scenario dimensions. + + Returns: + Tuple of (coords dict, dims list) ready for variable creation. + """ + n_boundaries = n_original_periods + 1 + coords = {'cluster_boundary': np.arange(n_boundaries)} + dims = ['cluster_boundary'] + + if flow_system.periods is not None: + dims.append('period') + coords['period'] = np.array(list(flow_system.periods)) + + if flow_system.scenarios is not None: + dims.append('scenario') + coords['scenario'] = np.array(list(flow_system.scenarios)) + + return coords, dims diff --git a/flixopt/clustering/storage_linking.py b/flixopt/clustering/storage_linking.py deleted file mode 100644 index dd8ff39f6..000000000 --- a/flixopt/clustering/storage_linking.py +++ /dev/null @@ -1,378 +0,0 @@ -""" -Inter-cluster storage linking for aggregated optimization. - -When using time series aggregation (clustering), timesteps are reduced to only -representative (typical) periods. This module provides the `InterClusterLinking` -model that tracks storage state across the full original time horizon. -""" - -from __future__ import annotations - -import logging -from typing import TYPE_CHECKING - -import numpy as np - -from ..structure import Submodel - -if TYPE_CHECKING: - from ..flow_system import FlowSystem - from ..structure import FlowSystemModel - from .base import ClusterStructure - -logger = logging.getLogger('flixopt') - - -class InterClusterLinking(Submodel): - """Model that links storage state across representative periods. - - When using aggregation (clustering), timesteps are reduced to only representative - periods. This model creates variables and constraints to track storage state - across the full original time horizon using boundary state variables. - - The approach: - 1. Create SOC_boundary[d] for each original period d (0 to n_original_periods) - 2. Compute delta_SOC[c] for each representative period c (change in SOC during period) - 3. Link: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - 4. Optionally enforce cyclic: SOC_boundary[0] = SOC_boundary[n_original_periods] - - This allows the optimizer to properly value storage for long-term (seasonal) - patterns while only solving for the representative timesteps. - - Example: - >>> from flixopt.clustering import ClusterStructure, InterClusterLinking - >>> structure = ClusterStructure(...) - >>> storages = [s for s in fs.storages.values() if s.cluster_mode in ('intercluster', 'intercluster_cyclic')] - >>> model = InterClusterLinking( - ... model=flow_system.model, - ... flow_system=flow_system, - ... cluster_structure=structure, - ... storages=storages, - ... ) - >>> model.do_modeling() - """ - - def __init__( - self, - model: FlowSystemModel, - flow_system: FlowSystem, - cluster_structure: ClusterStructure, - storages: list, - ): - """ - Args: - model: The FlowSystemModel to add constraints to. - flow_system: The FlowSystem being optimized. - cluster_structure: Clustering structure with cluster_order and occurrences. - storages: List of Storage components to add inter-cluster linking for. - Each storage's cluster_mode determines if cyclic constraint is added. - """ - super().__init__(model, label_of_element='InterClusterLinking', label_of_model='InterClusterLinking') - self.flow_system = flow_system - self.cluster_structure = cluster_structure - self.storages = storages - - # Extract commonly used values from cluster_structure - self._n_clusters = ( - int(cluster_structure.n_clusters) - if isinstance(cluster_structure.n_clusters, (int, np.integer)) - else int(cluster_structure.n_clusters.values) - ) - self._timesteps_per_cluster = cluster_structure.timesteps_per_cluster - self._n_original_periods = cluster_structure.n_original_periods - self._has_multi_dims = cluster_structure.has_multi_dims - - def do_modeling(self): - """Create SOC boundary variables and inter-period linking constraints. - - For each storage: - - SOC_boundary[d]: State of charge at start of original period d - - delta_SOC[c]: Change in SOC during representative period c - - Linking: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - """ - if not self.storages: - logger.info('No storages to link - skipping inter-cluster linking') - return - - logger.info( - f'Adding inter-cluster storage linking for {len(self.storages)} storages ' - f'({self._n_original_periods} original periods, {self._n_clusters} clusters)' - ) - - for storage in self.storages: - storage_cyclic = storage.cluster_mode == 'intercluster_cyclic' - self._add_storage_linking(storage, storage_cyclic) - - def _add_storage_linking(self, storage, storage_cyclic: bool) -> None: - """Add inter-cluster linking constraints for a single storage. - - Following the S-N model from Blanke et al. (2022), this method: - 1. Constrains charge_state at each cluster start to 0 (ΔE_0 = 0) - 2. Creates SOC_boundary variables to track absolute SOC across original periods - 3. Links via: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - 4. Adds bounds: 0 ≤ SOC_boundary[d] + charge_state[t] ≤ capacity - - Args: - storage: Storage component to add linking for. - storage_cyclic: If True, enforce SOC_boundary[0] = SOC_boundary[end]. - """ - import xarray as xr - - label = storage.label - - # Get the charge state variable from the storage's submodel - charge_state_name = f'{label}|charge_state' - if charge_state_name not in storage.submodel.variables: - logger.warning(f'Storage {label} has no charge_state variable - skipping') - return - - charge_state = storage.submodel.variables[charge_state_name] - - # === CRITICAL FIX: Constrain each cluster's start charge_state to 0 === - # This makes charge_state relative to cluster start (like ΔE in S-N model) - # Without this, cluster starts are free variables allowing "free energy" - for c in range(self._n_clusters): - start_idx = c * self._timesteps_per_cluster - self.add_constraints( - charge_state.isel(time=start_idx) == 0, - short_name=f'cluster_start|{label}|{c}', - ) - logger.debug(f'Added {self._n_clusters} cluster start constraints for {label}') - - # Get storage capacity bounds (may have period/scenario dimensions) - capacity = storage.capacity_in_flow_hours - has_investment = hasattr(capacity, 'maximum_size') # InvestParameters - - if hasattr(capacity, 'fixed_size') and capacity.fixed_size is not None: - cap_value = capacity.fixed_size - elif hasattr(capacity, 'maximum_size') and capacity.maximum_size is not None: - cap_value = capacity.maximum_size - elif isinstance(capacity, (int, float)): - cap_value = capacity - else: - cap_value = 1e9 # Large default - - # Create SOC_boundary variables for each original period boundary - # We need n_original_periods + 1 boundaries (start of first through end of last) - n_boundaries = self._n_original_periods + 1 - boundary_coords = {'cluster_boundary': np.arange(n_boundaries)} - boundary_dims = ['cluster_boundary'] - - # Determine extra dimensions from FlowSystem (period, scenario) - # These are needed even if cap_value is scalar, because different periods/scenarios - # may have different cluster assignments - extra_dims = [] - if self.flow_system.periods is not None: - extra_dims.append('period') - boundary_coords['period'] = np.array(list(self.flow_system.periods)) - if self.flow_system.scenarios is not None: - extra_dims.append('scenario') - boundary_coords['scenario'] = np.array(list(self.flow_system.scenarios)) - - if extra_dims: - boundary_dims = ['cluster_boundary'] + extra_dims - - # Build bounds shape - lb_shape = [n_boundaries] + [len(boundary_coords[d]) for d in extra_dims] - lb = xr.DataArray(np.zeros(lb_shape), coords=boundary_coords, dims=boundary_dims) - - # Get upper bound from capacity - if isinstance(cap_value, xr.DataArray) and cap_value.dims: - # cap_value has dimensions - expand to include cluster_boundary - ub = cap_value.expand_dims({'cluster_boundary': n_boundaries}, axis=0) - ub = ub.assign_coords(cluster_boundary=np.arange(n_boundaries)) - # Ensure dims are in the right order - ub = ub.transpose('cluster_boundary', ...) - else: - # Scalar cap_value - broadcast to all dims - if hasattr(cap_value, 'item'): - cap_value = float(cap_value.item()) - else: - cap_value = float(cap_value) - ub = xr.DataArray(np.full(lb_shape, cap_value), coords=boundary_coords, dims=boundary_dims) - - soc_boundary = self.add_variables( - lower=lb, - upper=ub, - coords=boundary_coords, - dims=boundary_dims, - short_name=f'SOC_boundary|{label}', - ) - - # For investment-based storage, add bounding constraint: SOC_boundary <= investment.size - # This ensures SOC_boundary is scaled by the actual capacity investment - if has_investment and storage.submodel.investment is not None: - investment_size = storage.submodel.investment.size - self.add_constraints( - soc_boundary <= investment_size, - short_name=f'SOC_boundary_ub|{label}', - ) - - # Pre-compute delta_SOC for each representative period - # delta_SOC[c] = charge_state[c, end] - charge_state[c, start] - delta_soc_dict = {} - for c in range(self._n_clusters): - start_idx = c * self._timesteps_per_cluster - end_idx = (c + 1) * self._timesteps_per_cluster # charge_state has extra timestep - - delta_soc_dict[c] = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) - - # Create linking constraints: - # SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - if self._has_multi_dims: - # Multi-dimensional cluster_order: create constraints per (period, scenario) slice - self._add_linking_constraints_multi_dim(storage, soc_boundary, delta_soc_dict, label) - else: - # Simple case: single cluster_order for all slices - cluster_order = self.cluster_structure.get_cluster_order_for_slice() - for d in range(self._n_original_periods): - c = int(cluster_order[d]) - lhs = ( - soc_boundary.isel(cluster_boundary=d + 1) - - soc_boundary.isel(cluster_boundary=d) - - delta_soc_dict[c] - ) - self.add_constraints(lhs == 0, short_name=f'link|{label}|{d}') - - # Cyclic constraint: SOC_boundary[0] = SOC_boundary[end] - if storage_cyclic: - lhs = soc_boundary.isel(cluster_boundary=0) - soc_boundary.isel(cluster_boundary=self._n_original_periods) - self.add_constraints(lhs == 0, short_name=f'cyclic|{label}') - - # Add combined bound constraints: 0 <= SOC_boundary[d] + charge_state[t] <= capacity - # This ensures the actual SOC (boundary + relative) stays within physical bounds - self._add_combined_bound_constraints(storage, soc_boundary, charge_state, has_investment, label) - - logger.debug(f'Added inter-cluster linking for storage {label}') - - def _add_combined_bound_constraints( - self, - storage, - soc_boundary, - charge_state, - has_investment: bool, - label: str, - ) -> None: - """Add combined bound constraints: 0 <= SOC_boundary[d] + charge_state[t] <= capacity. - - Following the S-N model from Blanke et al. (2022), the actual SOC at any time t - is SOC_boundary[d] + charge_state[t] where d is the original period containing t. - This must be within [0, capacity] for physical validity. - - For efficiency, we add constraints only at cluster boundaries (first and last timestep - of each cluster) since these are the extremes due to the monotonic nature of charge_state - within a cluster. - - Args: - storage: Storage component. - soc_boundary: SOC boundary variable with cluster_boundary dimension. - charge_state: Charge state variable with time dimension. - has_investment: Whether storage has investment decision. - label: Storage label for constraint naming. - """ - cluster_order = self.cluster_structure.get_cluster_order_for_slice() - investment_size = storage.submodel.investment.size if has_investment else None - - # For each original period, ensure combined SOC is within bounds - # We sample at representative points within each cluster to limit constraint count - # Key insight: charge_state starts at 0 and evolves within the cluster - # The extremes typically occur at the end of charge/discharge cycles - - for d in range(self._n_original_periods): - c = int(cluster_order[d]) - cluster_start = c * self._timesteps_per_cluster - cluster_end = (c + 1) * self._timesteps_per_cluster # charge_state has extra timestep - - soc_d = soc_boundary.isel(cluster_boundary=d) - - # Check at representative timesteps within the cluster - # We check at: start, middle, and end to capture key points - check_indices = [ - cluster_start, # Start (should be 0) - cluster_start + self._timesteps_per_cluster // 2, # Middle - cluster_end, # End (delta_SOC) - ] - - for idx in check_indices: - if idx >= len(charge_state.coords['time']): - continue - - cs_t = charge_state.isel(time=idx) - combined = soc_d + cs_t - - # Lower bound: combined >= 0 - self.add_constraints(combined >= 0, short_name=f'soc_lb|{label}|{d}|{idx}') - - # Upper bound: combined <= capacity - if investment_size is not None: - self.add_constraints(combined <= investment_size, short_name=f'soc_ub|{label}|{d}|{idx}') - - def _add_linking_constraints_multi_dim( - self, - storage, - soc_boundary, - delta_soc_dict: dict, - label: str, - ) -> None: - """Add linking constraints when cluster_order has period/scenario dimensions. - - When different (period, scenario) slices have different cluster assignments, - we need to create constraints that select the correct delta_SOC for each slice. - - Args: - storage: Storage component being linked. - soc_boundary: SOC boundary variable with dims [cluster_boundary, period?, scenario?]. - delta_soc_dict: Dict mapping cluster ID to delta_SOC expression. - label: Storage label for constraint naming. - """ - # Determine which dimensions we're iterating over - periods = list(self.flow_system.periods) if self.flow_system.periods is not None else [None] - scenarios = list(self.flow_system.scenarios) if self.flow_system.scenarios is not None else [None] - has_periods = periods != [None] - has_scenarios = scenarios != [None] - - # Check which dimensions soc_boundary actually has - soc_dims = set(soc_boundary.dims) - - # For each (period, scenario) combination, create constraints using the slice's cluster_order - for p in periods: - for s in scenarios: - cluster_order = self.cluster_structure.get_cluster_order_for_slice(period=p, scenario=s) - - # Build selector for this slice - only include dims that exist in soc_boundary - soc_selector = {} - if has_periods and p is not None and 'period' in soc_dims: - soc_selector['period'] = p - if has_scenarios and s is not None and 'scenario' in soc_dims: - soc_selector['scenario'] = s - - # Select the slice of soc_boundary for this (period, scenario) - soc_boundary_slice = soc_boundary.sel(**soc_selector) if soc_selector else soc_boundary - - for d in range(self._n_original_periods): - c = int(cluster_order[d]) - delta_soc = delta_soc_dict[c] - - # Build selector for delta_soc - check which dims it has - delta_selector = {} - if has_periods and p is not None and 'period' in delta_soc.dims: - delta_selector['period'] = p - if has_scenarios and s is not None and 'scenario' in delta_soc.dims: - delta_selector['scenario'] = s - if delta_selector: - delta_soc = delta_soc.sel(**delta_selector) - - lhs = ( - soc_boundary_slice.isel(cluster_boundary=d + 1) - - soc_boundary_slice.isel(cluster_boundary=d) - - delta_soc - ) - - # Build constraint name with period/scenario info - slice_suffix = '' - if has_periods and p is not None: - slice_suffix += f'|p={p}' - if has_scenarios and s is not None: - slice_suffix += f'|s={s}' - - self.add_constraints(lhs == 0, short_name=f'link|{label}|{d}{slice_suffix}') diff --git a/flixopt/components.py b/flixopt/components.py index ce96b11b2..a84b2e228 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -1032,6 +1032,12 @@ def _add_intercluster_linking(self) -> None: 4. Adds bounds: 0 ≤ SOC_boundary[d] + charge_state[t] ≤ capacity 5. Optionally enforces cyclic: SOC_boundary[0] = SOC_boundary[end] """ + from .clustering.intercluster_helpers import ( + MultiDimIterator, + build_boundary_coords, + extract_capacity_bounds, + ) + clustering = self._model.flow_system.clustering if clustering is None or clustering.result.cluster_structure is None: return @@ -1044,190 +1050,161 @@ def _add_intercluster_linking(self) -> None: ) timesteps_per_cluster = cluster_structure.timesteps_per_cluster n_original_periods = cluster_structure.n_original_periods - has_multi_dims = cluster_structure.has_multi_dims - storage_cyclic = self.element.cluster_mode == 'intercluster_cyclic' - - charge_state = self.charge_state - - # Constrain each cluster's start charge_state to 0 (ΔE_0 = 0 in S-N model) - for c in range(n_clusters): - start_idx = c * timesteps_per_cluster - self.add_constraints( - charge_state.isel(time=start_idx) == 0, - short_name=f'cluster_start_{c}', - ) - # Get storage capacity bounds - capacity = self.element.capacity_in_flow_hours - has_investment = isinstance(capacity, InvestParameters) + # 1. Add cluster start constraints (ΔE_0 = 0) + self._add_cluster_start_constraints(n_clusters, timesteps_per_cluster) - if hasattr(capacity, 'fixed_size') and capacity.fixed_size is not None: - cap_value = capacity.fixed_size - elif hasattr(capacity, 'maximum_size') and capacity.maximum_size is not None: - cap_value = capacity.maximum_size - elif isinstance(capacity, (int, float)): - cap_value = capacity - else: - cap_value = 1e9 - - # Create SOC_boundary variables - n_boundaries = n_original_periods + 1 - boundary_coords = {'cluster_boundary': np.arange(n_boundaries)} - boundary_dims = ['cluster_boundary'] - - extra_dims = [] - if self._model.flow_system.periods is not None: - extra_dims.append('period') - boundary_coords['period'] = np.array(list(self._model.flow_system.periods)) - if self._model.flow_system.scenarios is not None: - extra_dims.append('scenario') - boundary_coords['scenario'] = np.array(list(self._model.flow_system.scenarios)) - - if extra_dims: - boundary_dims = ['cluster_boundary'] + extra_dims - - lb_shape = [n_boundaries] + [len(boundary_coords[d]) for d in extra_dims] - lb = xr.DataArray(np.zeros(lb_shape), coords=boundary_coords, dims=boundary_dims) - - if isinstance(cap_value, xr.DataArray) and cap_value.dims: - ub = cap_value.expand_dims({'cluster_boundary': n_boundaries}, axis=0) - ub = ub.assign_coords(cluster_boundary=np.arange(n_boundaries)) - ub = ub.transpose('cluster_boundary', ...) - else: - if hasattr(cap_value, 'item'): - cap_value = float(cap_value.item()) - else: - cap_value = float(cap_value) - ub = xr.DataArray(np.full(lb_shape, cap_value), coords=boundary_coords, dims=boundary_dims) + # 2. Create SOC_boundary variable + flow_system = self._model.flow_system + boundary_coords, boundary_dims = build_boundary_coords(n_original_periods, flow_system) + capacity_bounds = extract_capacity_bounds(self.element.capacity_in_flow_hours, boundary_coords, boundary_dims) soc_boundary = self.add_variables( - lower=lb, - upper=ub, + lower=capacity_bounds.lower, + upper=capacity_bounds.upper, coords=boundary_coords, dims=boundary_dims, short_name='SOC_boundary', ) - # Add SOC_boundary <= investment.size for investment-based storage - if has_investment and self.investment is not None: + # 3. Add SOC_boundary <= investment.size for investment-based storage + if capacity_bounds.has_investment and self.investment is not None: self.add_constraints( soc_boundary <= self.investment.size, short_name='SOC_boundary_ub', ) - # Pre-compute delta_SOC for each cluster - delta_soc_dict = {} - for c in range(n_clusters): - start_idx = c * timesteps_per_cluster - end_idx = (c + 1) * timesteps_per_cluster - delta_soc_dict[c] = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) + # 4. Pre-compute delta_SOC for each cluster + delta_soc_dict = self._compute_delta_soc(n_clusters, timesteps_per_cluster) - # Create linking constraints - if has_multi_dims: - self._add_linking_constraints_multi_dim(cluster_structure, soc_boundary, delta_soc_dict, n_original_periods) - else: - cluster_order = cluster_structure.get_cluster_order_for_slice() - for d in range(n_original_periods): - c = int(cluster_order[d]) - lhs = ( - soc_boundary.isel(cluster_boundary=d + 1) - - soc_boundary.isel(cluster_boundary=d) - - delta_soc_dict[c] - ) - self.add_constraints(lhs == 0, short_name=f'link_{d}') + # 5. Add linking constraints using unified multi-dim iterator + iterator = MultiDimIterator(flow_system, cluster_structure) + self._add_linking_constraints(iterator, soc_boundary, delta_soc_dict, n_original_periods) - # Cyclic constraint - if storage_cyclic: + # 6. Add cyclic constraint if requested + if self.element.cluster_mode == 'intercluster_cyclic': self.add_constraints( soc_boundary.isel(cluster_boundary=0) == soc_boundary.isel(cluster_boundary=n_original_periods), short_name='cyclic', ) - # Combined bound constraints + # 7. Add combined bound constraints self._add_combined_bound_constraints( - cluster_structure, soc_boundary, charge_state, has_investment, n_original_periods, timesteps_per_cluster + iterator, + cluster_structure, + soc_boundary, + capacity_bounds.has_investment, + n_original_periods, + timesteps_per_cluster, ) - def _add_linking_constraints_multi_dim( + def _add_cluster_start_constraints(self, n_clusters: int, timesteps_per_cluster: int) -> None: + """Constrain charge_state at each cluster start to 0 (ΔE_0 = 0).""" + charge_state = self.charge_state + for c in range(n_clusters): + start_idx = c * timesteps_per_cluster + self.add_constraints( + charge_state.isel(time=start_idx) == 0, + short_name=f'cluster_start_{c}', + ) + + def _compute_delta_soc(self, n_clusters: int, timesteps_per_cluster: int) -> dict[int, xr.DataArray]: + """Pre-compute delta_SOC for each representative cluster.""" + charge_state = self.charge_state + delta_soc_dict = {} + for c in range(n_clusters): + start_idx = c * timesteps_per_cluster + end_idx = (c + 1) * timesteps_per_cluster + delta_soc_dict[c] = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) + return delta_soc_dict + + def _add_linking_constraints( self, - cluster_structure, + iterator, soc_boundary, - delta_soc_dict: dict, + delta_soc_dict: dict[int, xr.DataArray], n_original_periods: int, ) -> None: - """Add linking constraints when cluster_order has period/scenario dimensions.""" - periods = list(self._model.flow_system.periods) if self._model.flow_system.periods else [None] - scenarios = list(self._model.flow_system.scenarios) if self._model.flow_system.scenarios else [None] - has_periods = periods != [None] - has_scenarios = scenarios != [None] + """Add linking constraints: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]].""" soc_dims = set(soc_boundary.dims) - for p in periods: - for s in scenarios: - cluster_order = cluster_structure.get_cluster_order_for_slice(period=p, scenario=s) - - soc_selector = {} - if has_periods and p is not None and 'period' in soc_dims: - soc_selector['period'] = p - if has_scenarios and s is not None and 'scenario' in soc_dims: - soc_selector['scenario'] = s + for ctx in iterator: + soc_selector = iterator.build_selector(ctx, soc_dims) + soc_slice = soc_boundary.sel(**soc_selector) if soc_selector else soc_boundary - soc_slice = soc_boundary.sel(**soc_selector) if soc_selector else soc_boundary - - for d in range(n_original_periods): - c = int(cluster_order[d]) - delta_soc = delta_soc_dict[c] - - delta_selector = {} - if has_periods and p is not None and 'period' in delta_soc.dims: - delta_selector['period'] = p - if has_scenarios and s is not None and 'scenario' in delta_soc.dims: - delta_selector['scenario'] = s - if delta_selector: - delta_soc = delta_soc.sel(**delta_selector) + for d in range(n_original_periods): + c = int(ctx.cluster_order[d]) + delta_soc = delta_soc_dict[c] - lhs = soc_slice.isel(cluster_boundary=d + 1) - soc_slice.isel(cluster_boundary=d) - delta_soc + # Select delta_soc for this slice if it has the dimensions + delta_selector = iterator.build_selector(ctx, set(delta_soc.dims)) + if delta_selector: + delta_soc = delta_soc.sel(**delta_selector) - suffix = '' - if has_periods and p is not None: - suffix += f'_p{p}' - if has_scenarios and s is not None: - suffix += f'_s{s}' - self.add_constraints(lhs == 0, short_name=f'link_{d}{suffix}') + lhs = soc_slice.isel(cluster_boundary=d + 1) - soc_slice.isel(cluster_boundary=d) - delta_soc + self.add_constraints(lhs == 0, short_name=f'link_{d}{ctx.suffix}') def _add_combined_bound_constraints( self, + iterator, cluster_structure, soc_boundary, - charge_state, has_investment: bool, n_original_periods: int, timesteps_per_cluster: int, ) -> None: - """Add combined bound constraints: 0 <= SOC_boundary[d] + charge_state[t] <= capacity.""" - cluster_order = cluster_structure.get_cluster_order_for_slice() + """Add combined bound constraints: 0 <= SOC_boundary[d] + charge_state[t] <= capacity. + + This method correctly handles multi-dimensional cases by iterating + over (period, scenario) combinations using the unified iterator. + """ + charge_state = self.charge_state investment_size = self.investment.size if has_investment and self.investment else None - for d in range(n_original_periods): - c = int(cluster_order[d]) - cluster_start = c * timesteps_per_cluster - cluster_end = (c + 1) * timesteps_per_cluster + soc_dims = set(soc_boundary.dims) + charge_dims = set(charge_state.dims) + inv_dims = set(investment_size.dims) if investment_size is not None else set() - soc_d = soc_boundary.isel(cluster_boundary=d) + for ctx in iterator: + soc_selector = iterator.build_selector(ctx, soc_dims) + charge_selector = iterator.build_selector(ctx, charge_dims) - check_indices = [cluster_start, cluster_start + timesteps_per_cluster // 2, cluster_end] + soc_slice = soc_boundary.sel(**soc_selector) if soc_selector else soc_boundary + charge_slice = charge_state.sel(**charge_selector) if charge_selector else charge_state - for idx in check_indices: - if idx >= len(charge_state.coords['time']): - continue + for d in range(n_original_periods): + c = int(ctx.cluster_order[d]) + cluster_start = c * timesteps_per_cluster + cluster_end = (c + 1) * timesteps_per_cluster + + soc_d = soc_slice.isel(cluster_boundary=d) + + # Sample at key timesteps (start, middle, end) + check_indices = [ + cluster_start, + cluster_start + timesteps_per_cluster // 2, + cluster_end, + ] - cs_t = charge_state.isel(time=idx) - combined = soc_d + cs_t + for idx in check_indices: + if idx >= len(charge_state.coords['time']): + continue - self.add_constraints(combined >= 0, short_name=f'soc_lb_{d}_{idx}') + cs_t = charge_slice.isel(time=idx) + combined = soc_d + cs_t - if investment_size is not None: - self.add_constraints(combined <= investment_size, short_name=f'soc_ub_{d}_{idx}') + self.add_constraints( + combined >= 0, + short_name=f'soc_lb_{d}_{idx}{ctx.suffix}', + ) + + if investment_size is not None: + inv_selector = iterator.build_selector(ctx, inv_dims) + inv_slice = investment_size.sel(**inv_selector) if inv_selector else investment_size + self.add_constraints( + combined <= inv_slice, + short_name=f'soc_ub_{d}_{idx}{ctx.suffix}', + ) @property def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: From 2a36a643ff71c3b07058d139c25939ea26b85aa1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 20:59:58 +0100 Subject: [PATCH 124/191] The inter-cluster linking in StorageModel has been refactored to be cleaner and more vectorized. Here's what changed: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before (overly complex) - 365+ individual constraints for linking and bounds (one per original period) - Dictionary-based delta_soc storage with integer keys - MultiDimIterator machinery with SliceContext and build_selector methods - Nested loops over periods, original_periods, and sample points After (simplified) - ~10 constraint groups total using vectorized operations - DataArray-based delta_soc with proper cluster dimension - No MultiDimIterator needed - xarray broadcasting handles dimensions naturally - Vectorized constraints for cluster starts, linking, and bounds Key changes in components.py: | Method | Before | After | |---------------------------------|-----------------------------------------------|------------------------------------------------------------| | _add_cluster_start_constraints | Loop over n_clusters | Single vectorized isel(time=cluster_starts) | | _compute_delta_soc | Returns dict[int, DataArray] | Returns DataArray with cluster dim | | _add_linking_constraints | Loop over n_original_periods with selectors | Single vectorized constraint using advanced indexing | | _add_combined_bound_constraints | Nested loops (periods × original_periods × 3) | Loop only over 3 sample points, vectorize original_periods | Removed from intercluster_helpers.py: - SliceContext dataclass - MultiDimIterator class (~70 lines of code) The code now follows the same pattern as the regular storage constraints (lines 928-945), using isel() with slices/arrays instead of Python loops. --- flixopt/clustering/intercluster_helpers.py | 91 ------------ flixopt/components.py | 158 ++++++++++----------- 2 files changed, 73 insertions(+), 176 deletions(-) diff --git a/flixopt/clustering/intercluster_helpers.py b/flixopt/clustering/intercluster_helpers.py index c95dd2b2d..fd0e41cce 100644 --- a/flixopt/clustering/intercluster_helpers.py +++ b/flixopt/clustering/intercluster_helpers.py @@ -13,99 +13,8 @@ import xarray as xr if TYPE_CHECKING: - from collections.abc import Iterator - from ..flow_system import FlowSystem from ..interface import InvestParameters - from .base import ClusterStructure - - -@dataclass -class SliceContext: - """Context for a (period, scenario) slice during constraint generation. - - Provides the current iteration state when iterating over multi-dimensional - cluster orders, along with helper methods for constraint naming. - """ - - period: str | int | None - scenario: str | None - cluster_order: np.ndarray - - @property - def suffix(self) -> str: - """Generate constraint name suffix like '_p2020_shigh'.""" - parts = [] - if self.period is not None: - parts.append(f'p{self.period}') - if self.scenario is not None: - parts.append(f's{self.scenario}') - return '_' + '_'.join(parts) if parts else '' - - -class MultiDimIterator: - """Unified iterator over (period, scenario) combinations. - - Provides a clean interface for iterating over multi-dimensional slices - with automatic handling of None cases and selector building. - - Example: - iterator = MultiDimIterator(flow_system, cluster_structure) - for ctx in iterator: - # ctx.period, ctx.scenario, ctx.cluster_order available - selector = iterator.build_selector(ctx, available_dims) - data_slice = data.sel(**selector) if selector else data - """ - - def __init__(self, flow_system: FlowSystem, cluster_structure: ClusterStructure): - """Initialize the iterator. - - Args: - flow_system: The FlowSystem containing period/scenario dimensions. - cluster_structure: The ClusterStructure with cluster ordering info. - """ - self.periods = list(flow_system.periods) if flow_system.periods is not None else [None] - self.scenarios = list(flow_system.scenarios) if flow_system.scenarios is not None else [None] - self.cluster_structure = cluster_structure - - @property - def has_periods(self) -> bool: - """Check if there are period dimensions.""" - return self.periods != [None] - - @property - def has_scenarios(self) -> bool: - """Check if there are scenario dimensions.""" - return self.scenarios != [None] - - @property - def is_multi_dim(self) -> bool: - """Check if there are any extra dimensions beyond time.""" - return self.has_periods or self.has_scenarios - - def __iter__(self) -> Iterator[SliceContext]: - """Iterate over all (period, scenario) combinations.""" - for p in self.periods: - for s in self.scenarios: - cluster_order = self.cluster_structure.get_cluster_order_for_slice(period=p, scenario=s) - yield SliceContext(period=p, scenario=s, cluster_order=cluster_order) - - def build_selector(self, ctx: SliceContext, available_dims: set[str]) -> dict: - """Build xarray selector dict for the given context. - - Args: - ctx: The current slice context. - available_dims: Set of dimension names available in the target data. - - Returns: - Dict suitable for xr.DataArray.sel(**selector). - """ - selector = {} - if self.has_periods and ctx.period is not None and 'period' in available_dims: - selector['period'] = ctx.period - if self.has_scenarios and ctx.scenario is not None and 'scenario' in available_dims: - selector['scenario'] = ctx.scenario - return selector @dataclass diff --git a/flixopt/components.py b/flixopt/components.py index a84b2e228..2175d4c0d 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -1033,7 +1033,6 @@ def _add_intercluster_linking(self) -> None: 5. Optionally enforces cyclic: SOC_boundary[0] = SOC_boundary[end] """ from .clustering.intercluster_helpers import ( - MultiDimIterator, build_boundary_coords, extract_capacity_bounds, ) @@ -1050,8 +1049,9 @@ def _add_intercluster_linking(self) -> None: ) timesteps_per_cluster = cluster_structure.timesteps_per_cluster n_original_periods = cluster_structure.n_original_periods + cluster_order = cluster_structure.cluster_order - # 1. Add cluster start constraints (ΔE_0 = 0) + # 1. Add cluster start constraints (ΔE_0 = 0) - vectorized self._add_cluster_start_constraints(n_clusters, timesteps_per_cluster) # 2. Create SOC_boundary variable @@ -1074,12 +1074,11 @@ def _add_intercluster_linking(self) -> None: short_name='SOC_boundary_ub', ) - # 4. Pre-compute delta_SOC for each cluster - delta_soc_dict = self._compute_delta_soc(n_clusters, timesteps_per_cluster) + # 4. Compute delta_SOC as DataArray with 'cluster' dimension + delta_soc = self._compute_delta_soc(n_clusters, timesteps_per_cluster) - # 5. Add linking constraints using unified multi-dim iterator - iterator = MultiDimIterator(flow_system, cluster_structure) - self._add_linking_constraints(iterator, soc_boundary, delta_soc_dict, n_original_periods) + # 5. Add linking constraints - vectorized + self._add_linking_constraints(soc_boundary, delta_soc, cluster_order, n_original_periods) # 6. Add cyclic constraint if requested if self.element.cluster_mode == 'intercluster_cyclic': @@ -1088,11 +1087,10 @@ def _add_intercluster_linking(self) -> None: short_name='cyclic', ) - # 7. Add combined bound constraints + # 7. Add combined bound constraints - vectorized self._add_combined_bound_constraints( - iterator, - cluster_structure, soc_boundary, + cluster_order, capacity_bounds.has_investment, n_original_periods, timesteps_per_cluster, @@ -1100,111 +1098,101 @@ def _add_intercluster_linking(self) -> None: def _add_cluster_start_constraints(self, n_clusters: int, timesteps_per_cluster: int) -> None: """Constrain charge_state at each cluster start to 0 (ΔE_0 = 0).""" - charge_state = self.charge_state - for c in range(n_clusters): - start_idx = c * timesteps_per_cluster - self.add_constraints( - charge_state.isel(time=start_idx) == 0, - short_name=f'cluster_start_{c}', - ) + cluster_starts = np.arange(0, n_clusters * timesteps_per_cluster, timesteps_per_cluster) + self.add_constraints( + self.charge_state.isel(time=cluster_starts) == 0, + short_name='cluster_start', + ) - def _compute_delta_soc(self, n_clusters: int, timesteps_per_cluster: int) -> dict[int, xr.DataArray]: - """Pre-compute delta_SOC for each representative cluster.""" - charge_state = self.charge_state - delta_soc_dict = {} - for c in range(n_clusters): - start_idx = c * timesteps_per_cluster - end_idx = (c + 1) * timesteps_per_cluster - delta_soc_dict[c] = charge_state.isel(time=end_idx) - charge_state.isel(time=start_idx) - return delta_soc_dict + def _compute_delta_soc(self, n_clusters: int, timesteps_per_cluster: int) -> xr.DataArray: + """Compute delta_SOC for each representative cluster as a DataArray. + + Returns DataArray with 'cluster' dimension containing the net charge state + change (end - start) for each cluster. + """ + starts = np.arange(0, n_clusters * timesteps_per_cluster, timesteps_per_cluster) + ends = starts + timesteps_per_cluster + # Compute delta for all clusters at once + delta_soc = self.charge_state.isel(time=ends) - self.charge_state.isel(time=starts) + # Replace 'time' dim with 'cluster' dim + return delta_soc.assign_coords(time=np.arange(n_clusters)).rename({'time': 'cluster'}) def _add_linking_constraints( self, - iterator, - soc_boundary, - delta_soc_dict: dict[int, xr.DataArray], + soc_boundary: xr.DataArray, + delta_soc: xr.DataArray, + cluster_order: xr.DataArray, n_original_periods: int, ) -> None: - """Add linking constraints: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]].""" - soc_dims = set(soc_boundary.dims) + """Add linking constraints: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]]. - for ctx in iterator: - soc_selector = iterator.build_selector(ctx, soc_dims) - soc_slice = soc_boundary.sel(**soc_selector) if soc_selector else soc_boundary + Uses vectorized xarray operations instead of loops. + """ + # SOC at boundary d+1 (after original period d completes) + soc_after = soc_boundary.isel(cluster_boundary=slice(1, None)) + # SOC at boundary d (before original period d starts) + soc_before = soc_boundary.isel(cluster_boundary=slice(None, -1)) - for d in range(n_original_periods): - c = int(ctx.cluster_order[d]) - delta_soc = delta_soc_dict[c] + # Rename cluster_boundary -> original_period for alignment + soc_after = soc_after.rename({'cluster_boundary': 'original_period'}) + soc_after = soc_after.assign_coords(original_period=np.arange(n_original_periods)) + soc_before = soc_before.rename({'cluster_boundary': 'original_period'}) + soc_before = soc_before.assign_coords(original_period=np.arange(n_original_periods)) - # Select delta_soc for this slice if it has the dimensions - delta_selector = iterator.build_selector(ctx, set(delta_soc.dims)) - if delta_selector: - delta_soc = delta_soc.sel(**delta_selector) + # Get delta_soc for each original period using cluster_order as advanced index + # cluster_order has dim 'original_period', delta_soc has dim 'cluster' + delta_soc_ordered = delta_soc.isel(cluster=cluster_order) - lhs = soc_slice.isel(cluster_boundary=d + 1) - soc_slice.isel(cluster_boundary=d) - delta_soc - self.add_constraints(lhs == 0, short_name=f'link_{d}{ctx.suffix}') + # Single vectorized constraint for all original periods + lhs = soc_after - soc_before - delta_soc_ordered + self.add_constraints(lhs == 0, short_name='link') def _add_combined_bound_constraints( self, - iterator, - cluster_structure, - soc_boundary, + soc_boundary: xr.DataArray, + cluster_order: xr.DataArray, has_investment: bool, n_original_periods: int, timesteps_per_cluster: int, ) -> None: """Add combined bound constraints: 0 <= SOC_boundary[d] + charge_state[t] <= capacity. - This method correctly handles multi-dimensional cases by iterating - over (period, scenario) combinations using the unified iterator. + Vectorizes over original_period dimension, loops over sample points. """ charge_state = self.charge_state - investment_size = self.investment.size if has_investment and self.investment else None - soc_dims = set(soc_boundary.dims) - charge_dims = set(charge_state.dims) - inv_dims = set(investment_size.dims) if investment_size is not None else set() + # Get soc_boundary for each original period (boundary d, before period d starts) + soc_d = soc_boundary.isel(cluster_boundary=slice(None, -1)) # excludes final boundary + soc_d = soc_d.rename({'cluster_boundary': 'original_period'}) + soc_d = soc_d.assign_coords(original_period=np.arange(n_original_periods)) - for ctx in iterator: - soc_selector = iterator.build_selector(ctx, soc_dims) - charge_selector = iterator.build_selector(ctx, charge_dims) + # Sample offsets within each cluster (start, middle, end) + sample_offsets = [0, timesteps_per_cluster // 2, timesteps_per_cluster] + max_time_idx = len(charge_state.coords['time']) - 1 - soc_slice = soc_boundary.sel(**soc_selector) if soc_selector else soc_boundary - charge_slice = charge_state.sel(**charge_selector) if charge_selector else charge_state + # Convert cluster_order to numpy array for indexing + cluster_order_vals = cluster_order.values.astype(int) + cluster_starts = cluster_order_vals * timesteps_per_cluster - for d in range(n_original_periods): - c = int(ctx.cluster_order[d]) - cluster_start = c * timesteps_per_cluster - cluster_end = (c + 1) * timesteps_per_cluster + for sample_name, offset in zip(['start', 'mid', 'end'], sample_offsets, strict=False): + time_indices = np.clip(cluster_starts + offset, 0, max_time_idx) - soc_d = soc_slice.isel(cluster_boundary=d) + # Get charge_state at these time indices using numpy array indexer + cs_t = charge_state.isel(time=time_indices) - # Sample at key timesteps (start, middle, end) - check_indices = [ - cluster_start, - cluster_start + timesteps_per_cluster // 2, - cluster_end, - ] + # Rename 'time' dim to 'original_period' to align with soc_d + cs_t = cs_t.rename({'time': 'original_period'}) + cs_t = cs_t.assign_coords(original_period=np.arange(n_original_periods)) - for idx in check_indices: - if idx >= len(charge_state.coords['time']): - continue + # Combined SOC = soc_boundary[d] + charge_state[t] + combined = soc_d + cs_t - cs_t = charge_slice.isel(time=idx) - combined = soc_d + cs_t + # Lower bound constraint: combined >= 0 + self.add_constraints(combined >= 0, short_name=f'soc_lb_{sample_name}') - self.add_constraints( - combined >= 0, - short_name=f'soc_lb_{d}_{idx}{ctx.suffix}', - ) - - if investment_size is not None: - inv_selector = iterator.build_selector(ctx, inv_dims) - inv_slice = investment_size.sel(**inv_selector) if inv_selector else investment_size - self.add_constraints( - combined <= inv_slice, - short_name=f'soc_ub_{d}_{idx}{ctx.suffix}', - ) + # Upper bound constraint: combined <= capacity (for investment case) + if has_investment and self.investment is not None: + self.add_constraints(combined <= self.investment.size, short_name=f'soc_ub_{sample_name}') @property def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: From 0142272cb6676c5869943a999f18c74c178a51b1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 21:09:02 +0100 Subject: [PATCH 125/191] Temp --- flixopt/components.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 2175d4c0d..3c2160b24 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -958,6 +958,13 @@ def _do_modeling(self): short_name=f'cluster_cyclic_{i}', ) + # Determine intercluster mode early (needed for investment bounding) + clustering = self._model.flow_system.clustering + is_intercluster = clustering is not None and self.element.cluster_mode in ( + 'intercluster', + 'intercluster_cyclic', + ) + # Create InvestmentModel and bounding constraints for investment if isinstance(self.element.capacity_in_flow_hours, InvestParameters): self.add_submodels( @@ -970,19 +977,18 @@ def _do_modeling(self): short_name='investment', ) - BoundingPatterns.scaled_bounds( - self, - variable=self.charge_state, - scaling_variable=self.investment.size, - relative_bounds=self._relative_charge_state_bounds, - ) + # For intercluster modes, charge_state represents delta from SOC_boundary (can be negative), + # and the combined bound constraints handle the relationship to investment.size. + # For non-intercluster modes, charge_state is absolute SOC and needs scaled bounds. + if not is_intercluster: + BoundingPatterns.scaled_bounds( + self, + variable=self.charge_state, + scaling_variable=self.investment.size, + relative_bounds=self._relative_charge_state_bounds, + ) # Initial charge state (only for non-intercluster modes) - clustering = self._model.flow_system.clustering - is_intercluster = clustering is not None and self.element.cluster_mode in ( - 'intercluster', - 'intercluster_cyclic', - ) if not is_intercluster: self._initial_and_final_charge_state() From b3346990397cdee9a39889a72bfe3190d0754b7d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 21:20:44 +0100 Subject: [PATCH 126/191] Temp --- flixopt/components.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 3c2160b24..019b6cc72 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -977,10 +977,20 @@ def _do_modeling(self): short_name='investment', ) - # For intercluster modes, charge_state represents delta from SOC_boundary (can be negative), - # and the combined bound constraints handle the relationship to investment.size. - # For non-intercluster modes, charge_state is absolute SOC and needs scaled bounds. - if not is_intercluster: + # For intercluster modes, charge_state represents delta from SOC_boundary (can be negative). + # The bound should be [-size, +size] (can discharge or charge by full capacity). + # For non-intercluster modes, charge_state is absolute SOC and needs [0, size] bounds. + if is_intercluster: + # Symmetric bounds: -size <= charge_state <= size + self.add_constraints( + self.charge_state >= -self.investment.size, + short_name='charge_state|lb', + ) + self.add_constraints( + self.charge_state <= self.investment.size, + short_name='charge_state|ub', + ) + else: BoundingPatterns.scaled_bounds( self, variable=self.charge_state, @@ -1117,7 +1127,8 @@ def _compute_delta_soc(self, n_clusters: int, timesteps_per_cluster: int) -> xr. change (end - start) for each cluster. """ starts = np.arange(0, n_clusters * timesteps_per_cluster, timesteps_per_cluster) - ends = starts + timesteps_per_cluster + # Last timestep of each cluster (not first of next cluster) + ends = starts + timesteps_per_cluster - 1 # Compute delta for all clusters at once delta_soc = self.charge_state.isel(time=ends) - self.charge_state.isel(time=starts) # Replace 'time' dim with 'cluster' dim From db6e89a533c4f2cd5443d824ab1de1b9a0ea2993 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 22:55:18 +0100 Subject: [PATCH 127/191] Set start SOC in intercluster model when non cyclic --- flixopt/components.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/flixopt/components.py b/flixopt/components.py index 019b6cc72..2cf9cbd5c 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -1096,12 +1096,28 @@ def _add_intercluster_linking(self) -> None: # 5. Add linking constraints - vectorized self._add_linking_constraints(soc_boundary, delta_soc, cluster_order, n_original_periods) - # 6. Add cyclic constraint if requested + # 6. Add cyclic or initial SOC constraint if self.element.cluster_mode == 'intercluster_cyclic': self.add_constraints( soc_boundary.isel(cluster_boundary=0) == soc_boundary.isel(cluster_boundary=n_original_periods), short_name='cyclic', ) + else: + # For non-cyclic intercluster mode, apply initial_charge_state to SOC_boundary[0] + initial = self.element.initial_charge_state + if initial is not None: + if isinstance(initial, str): + # 'equals_final' means SOC_boundary should be cyclic + self.add_constraints( + soc_boundary.isel(cluster_boundary=0) == soc_boundary.isel(cluster_boundary=n_original_periods), + short_name='initial_SOC_boundary', + ) + else: + # Numeric initial charge state + self.add_constraints( + soc_boundary.isel(cluster_boundary=0) == initial, + short_name='initial_SOC_boundary', + ) # 7. Add combined bound constraints - vectorized self._add_combined_bound_constraints( From 55fb607fb2e5f76b1f310d5bcee8fa153e1a933e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 20 Dec 2025 23:01:45 +0100 Subject: [PATCH 128/191] Simplify cyclic constraint --- flixopt/components.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 2cf9cbd5c..21cccf7d6 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -946,17 +946,11 @@ def _do_modeling(self): # For 'cyclic' mode: each cluster's start equals its end if clustering is not None and self.element.cluster_mode == 'cyclic': - starts = clustering.cluster_start_positions - for i, start_pos in enumerate(starts): - # End of cluster i is at (start of cluster i+1) - 1, or last timestep for final cluster - if i < len(starts) - 1: - end_pos = starts[i + 1] # In timesteps_extra, this is the end of cluster i - else: - end_pos = len(self._model.flow_system.timesteps) # Last position in timesteps_extra - self.add_constraints( - charge_state.isel(time=start_pos) == charge_state.isel(time=end_pos), - short_name=f'cluster_cyclic_{i}', - ) + self.add_constraints( + charge_state.isel(time=clustering.cluster_start_positions) + == charge_state.isel(time=clustering.cluster_start_positions + clustering.timesteps_per_period - 1), + short_name='cluster_cyclic', + ) # Determine intercluster mode early (needed for investment bounding) clustering = self._model.flow_system.clustering From d610da7fdbba409cafe1b6c9a12c452347f31a2a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 22 Dec 2025 13:56:34 +0100 Subject: [PATCH 129/191] Improve Storage Model --- flixopt/clustering/intercluster_helpers.py | 100 +++- flixopt/components.py | 539 ++++++++++++++------- 2 files changed, 452 insertions(+), 187 deletions(-) diff --git a/flixopt/clustering/intercluster_helpers.py b/flixopt/clustering/intercluster_helpers.py index fd0e41cce..d2a5eb9d3 100644 --- a/flixopt/clustering/intercluster_helpers.py +++ b/flixopt/clustering/intercluster_helpers.py @@ -1,7 +1,33 @@ """Helper utilities for inter-cluster storage linking. -This module provides reusable utilities for building inter-cluster storage linking +This module provides utilities for building inter-cluster storage linking constraints following the S-N model from Blanke et al. (2022). + +Background +---------- +When time series are clustered (aggregated into representative periods), storage +behavior needs special handling. The S-N linking model introduces: + +- **SOC_boundary**: Absolute state-of-charge at the boundary between original periods. + With N original periods, there are N+1 boundary points. + +- **Linking**: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + Each boundary is connected to the next via the net charge change of the + representative cluster for that period. + +These utilities help construct the coordinates and bounds for SOC_boundary variables. + +References +---------- +- Blanke, T., et al. (2022). "Inter-Cluster Storage Linking for Time Series + Aggregation in Energy System Optimization Models." +- Kotzur, L., et al. (2018). "Time series aggregation for energy system design: + Modeling seasonal storage." + +See Also +-------- +:class:`flixopt.components.InterclusterStorageModel` + The storage model that uses these utilities. """ from __future__ import annotations @@ -19,7 +45,16 @@ @dataclass class CapacityBounds: - """Extracted capacity bounds for storage SOC_boundary variables.""" + """Bounds for SOC_boundary variable creation. + + This dataclass holds the lower and upper bounds for the SOC_boundary variable, + along with a flag indicating whether investment sizing is used. + + Attributes: + lower: Lower bound DataArray (typically zeros). + upper: Upper bound DataArray (capacity or maximum investment size). + has_investment: True if the storage uses InvestParameters for sizing. + """ lower: xr.DataArray upper: xr.DataArray @@ -27,24 +62,42 @@ class CapacityBounds: def extract_capacity_bounds( - capacity_param: InvestParameters | int | float, + capacity_param: InvestParameters | int | float | None, boundary_coords: dict, boundary_dims: list[str], ) -> CapacityBounds: - """Extract capacity bounds from storage parameters. + """Extract capacity bounds from storage parameters for SOC_boundary variable. + + This function determines the appropriate bounds for the SOC_boundary variable + based on the storage's capacity parameter: + + - **Fixed capacity** (numeric): Upper bound is the fixed value. + - **InvestParameters**: Upper bound is maximum_size (or fixed_size if set). + The actual bound is enforced via separate constraints linked to investment.size. + - **None/Unbounded**: Upper bound is set to a large value (1e9). - Handles: - - Fixed numeric values - - InvestParameters with fixed_size or maximum_size - - xr.DataArray with dimensions + The lower bound is always zero (SOC cannot be negative). Args: - capacity_param: The capacity parameter (InvestParameters or scalar). - boundary_coords: Coordinates for SOC_boundary variable. + capacity_param: Storage capacity specification. Can be: + - Numeric (int/float): Fixed capacity + - InvestParameters: Investment-based sizing with min/max + - None: Unbounded storage + boundary_coords: Coordinate dictionary for SOC_boundary variable. + Must contain 'cluster_boundary' key. boundary_dims: Dimension names for SOC_boundary variable. + First dimension must be 'cluster_boundary'. Returns: CapacityBounds with lower/upper bounds and investment flag. + + Example: + >>> coords, dims = build_boundary_coords(14, flow_system) + >>> bounds = extract_capacity_bounds(InvestParameters(maximum_size=10000), coords, dims) + >>> bounds.has_investment + True + >>> bounds.upper.max() + 10000.0 """ n_boundaries = len(boundary_coords['cluster_boundary']) lb_shape = [n_boundaries] + [len(boundary_coords[d]) for d in boundary_dims[1:]] @@ -82,14 +135,33 @@ def build_boundary_coords( n_original_periods: int, flow_system: FlowSystem, ) -> tuple[dict, list[str]]: - """Build coordinates and dimensions for SOC_boundary variables. + """Build coordinates and dimensions for SOC_boundary variable. + + Creates the coordinate dictionary and dimension list needed to create the + SOC_boundary variable. The primary dimension is 'cluster_boundary' with + N+1 values (one for each boundary between N original periods). + + Additional dimensions (period, scenario) are included if present in the + FlowSystem, ensuring the SOC_boundary variable has the correct shape for + multi-period or stochastic optimizations. Args: - n_original_periods: Number of original (non-aggregated) periods. - flow_system: The FlowSystem containing period/scenario dimensions. + n_original_periods: Number of original (non-aggregated) time periods. + For example, if a year is clustered into 8 typical days but originally + had 365 days, this would be 365. + flow_system: The FlowSystem containing optional period/scenario dimensions. Returns: - Tuple of (coords dict, dims list) ready for variable creation. + Tuple of (coords, dims) where: + - coords: Dictionary mapping dimension names to coordinate arrays + - dims: List of dimension names in order + + Example: + >>> coords, dims = build_boundary_coords(14, flow_system) + >>> dims + ['cluster_boundary'] # or ['cluster_boundary', 'period'] if periods exist + >>> coords['cluster_boundary'] + array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]) """ n_boundaries = n_original_periods + 1 coords = {'cluster_boundary': np.arange(n_boundaries)} diff --git a/flixopt/components.py b/flixopt/components.py index 21cccf7d6..cb5aa63fe 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -446,8 +446,32 @@ def __init__( self.cluster_mode = cluster_mode def create_model(self, model: FlowSystemModel) -> StorageModel: + """Create the appropriate storage model based on cluster_mode and flow system state. + + For intercluster modes ('intercluster', 'intercluster_cyclic'), uses + :class:`InterclusterStorageModel` which implements S-N linking. + For other modes, uses the base :class:`StorageModel`. + + Args: + model: The FlowSystemModel to add constraints to. + + Returns: + StorageModel or InterclusterStorageModel instance. + """ self._plausibility_checks() - self.submodel = StorageModel(model, self) + + # Use InterclusterStorageModel for intercluster modes when clustering is active + clustering = model.flow_system.clustering + is_intercluster = clustering is not None and self.cluster_mode in ( + 'intercluster', + 'intercluster_cyclic', + ) + + if is_intercluster: + self.submodel = InterclusterStorageModel(model, self) + else: + self.submodel = StorageModel(model, self) + return self.submodel def link_to_flow_system(self, flow_system, prefix: str = '') -> None: @@ -952,13 +976,6 @@ def _do_modeling(self): short_name='cluster_cyclic', ) - # Determine intercluster mode early (needed for investment bounding) - clustering = self._model.flow_system.clustering - is_intercluster = clustering is not None and self.element.cluster_mode in ( - 'intercluster', - 'intercluster_cyclic', - ) - # Create InvestmentModel and bounding constraints for investment if isinstance(self.element.capacity_in_flow_hours, InvestParameters): self.add_submodels( @@ -971,34 +988,15 @@ def _do_modeling(self): short_name='investment', ) - # For intercluster modes, charge_state represents delta from SOC_boundary (can be negative). - # The bound should be [-size, +size] (can discharge or charge by full capacity). - # For non-intercluster modes, charge_state is absolute SOC and needs [0, size] bounds. - if is_intercluster: - # Symmetric bounds: -size <= charge_state <= size - self.add_constraints( - self.charge_state >= -self.investment.size, - short_name='charge_state|lb', - ) - self.add_constraints( - self.charge_state <= self.investment.size, - short_name='charge_state|ub', - ) - else: - BoundingPatterns.scaled_bounds( - self, - variable=self.charge_state, - scaling_variable=self.investment.size, - relative_bounds=self._relative_charge_state_bounds, - ) - - # Initial charge state (only for non-intercluster modes) - if not is_intercluster: - self._initial_and_final_charge_state() + BoundingPatterns.scaled_bounds( + self, + variable=self.charge_state, + scaling_variable=self.investment.size, + relative_bounds=self._relative_charge_state_bounds, + ) - # Add inter-cluster linking for intercluster modes - if is_intercluster: - self._add_intercluster_linking() + # Initial and final charge state constraints + self._initial_and_final_charge_state() # Balanced sizes if self.element.balanced: @@ -1032,15 +1030,280 @@ def _initial_and_final_charge_state(self): short_name='final_charge_min', ) + @property + def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: + """Get absolute bounds for charge_state variable. + + For base StorageModel, charge_state represents absolute SOC with bounds + derived from relative bounds scaled by capacity. + + Note: + InterclusterStorageModel overrides this to provide symmetric bounds + since charge_state represents ΔE (relative change from cluster start). + """ + relative_lower_bound, relative_upper_bound = self._relative_charge_state_bounds + + if self.element.capacity_in_flow_hours is None: + return (0, np.inf) + elif isinstance(self.element.capacity_in_flow_hours, InvestParameters): + cap_min = self.element.capacity_in_flow_hours.minimum_or_fixed_size + cap_max = self.element.capacity_in_flow_hours.maximum_or_fixed_size + return ( + relative_lower_bound * cap_min, + relative_upper_bound * cap_max, + ) + else: + cap = self.element.capacity_in_flow_hours + return ( + relative_lower_bound * cap, + relative_upper_bound * cap, + ) + + @property + def _relative_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: + """ + Get relative charge state bounds with final timestep values. + + Returns: + Tuple of (minimum_bounds, maximum_bounds) DataArrays extending to final timestep + """ + final_coords = {'time': [self._model.flow_system.timesteps_extra[-1]]} + + # Get final minimum charge state + if self.element.relative_minimum_final_charge_state is None: + min_final = self.element.relative_minimum_charge_state.isel(time=-1, drop=True) + else: + min_final = self.element.relative_minimum_final_charge_state + min_final = min_final.expand_dims('time').assign_coords(time=final_coords['time']) + + # Get final maximum charge state + if self.element.relative_maximum_final_charge_state is None: + max_final = self.element.relative_maximum_charge_state.isel(time=-1, drop=True) + else: + max_final = self.element.relative_maximum_final_charge_state + max_final = max_final.expand_dims('time').assign_coords(time=final_coords['time']) + # Concatenate with original bounds + min_bounds = xr.concat([self.element.relative_minimum_charge_state, min_final], dim='time') + max_bounds = xr.concat([self.element.relative_maximum_charge_state, max_final], dim='time') + + return min_bounds, max_bounds + + @property + def _investment(self) -> InvestmentModel | None: + """Deprecated alias for investment""" + return self.investment + + @property + def investment(self) -> InvestmentModel | None: + """Investment feature""" + if 'investment' not in self.submodels: + return None + return self.submodels['investment'] + + @property + def charge_state(self) -> linopy.Variable: + """Charge state variable""" + return self['charge_state'] + + @property + def netto_discharge(self) -> linopy.Variable: + """Netto discharge variable""" + return self['netto_discharge'] + + +class InterclusterStorageModel(StorageModel): + """Storage model with inter-cluster linking for clustered optimization. + + This class extends :class:`StorageModel` to support inter-cluster storage linking + when using time series aggregation (clustering). It implements the S-N linking model + from Blanke et al. (2022) to properly value seasonal storage in clustered optimizations. + + The Problem with Naive Clustering + --------------------------------- + When time series are clustered (e.g., 365 days → 8 typical days), storage behavior + is fundamentally misrepresented if each cluster operates independently: + + - **Seasonal patterns are lost**: A battery might charge in summer and discharge in + winter, but with independent clusters, each "typical summer day" cannot transfer + energy to the "typical winter day". + - **Storage value is underestimated**: Without inter-cluster linking, storage can only + provide intra-day flexibility, not seasonal arbitrage. + + The S-N Linking Model + --------------------- + This model introduces two key concepts: + + 1. **SOC_boundary**: Absolute state-of-charge at the boundary between original periods. + With N original periods, there are N+1 boundary points (including start and end). + + 2. **charge_state (ΔE)**: Relative change in SOC within each representative cluster, + measured from the cluster start (where ΔE = 0). + + The actual SOC at any timestep t within original period d is:: + + SOC(t) = SOC_boundary[d] + ΔE(t) + + Key Constraints + --------------- + 1. **Cluster start constraint**: ``ΔE(cluster_start) = 0`` + Each representative cluster starts with zero relative charge. + + 2. **Linking constraint**: ``SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]]`` + The boundary SOC after period d equals the boundary before plus the net + charge/discharge of the representative cluster for that period. + + 3. **Combined bounds**: ``0 ≤ SOC_boundary[d] + ΔE(t) ≤ capacity`` + The actual SOC must stay within physical bounds. + + 4. **Cyclic constraint** (for ``intercluster_cyclic`` mode): + ``SOC_boundary[0] = SOC_boundary[N]`` + The storage returns to its initial state over the full time horizon. + + Variables Created + ----------------- + - ``SOC_boundary``: Absolute SOC at each original period boundary. + Shape: (n_original_periods + 1,) plus any period/scenario dimensions. + + Constraints Created + ------------------- + - ``cluster_start``: Forces ΔE = 0 at start of each representative cluster. + - ``link``: Links consecutive SOC_boundary values via delta_SOC. + - ``cyclic`` or ``initial_SOC_boundary``: Initial/final boundary condition. + - ``soc_lb_start/mid/end``: Lower bound on combined SOC at sample points. + - ``soc_ub_start/mid/end``: Upper bound on combined SOC (if investment). + - ``SOC_boundary_ub``: Links SOC_boundary to investment size (if investment). + - ``charge_state|lb/ub``: Symmetric bounds on ΔE for intercluster modes. + + References + ---------- + - Blanke, T., et al. (2022). "Inter-Cluster Storage Linking for Time Series + Aggregation in Energy System Optimization Models." + - Kotzur, L., et al. (2018). "Time series aggregation for energy system design: + Modeling seasonal storage." + + See Also + -------- + :class:`StorageModel` : Base storage model without inter-cluster linking. + :class:`Storage` : The element class that creates this model. + + Example + ------- + The model is automatically used when a Storage has ``cluster_mode='intercluster'`` + or ``cluster_mode='intercluster_cyclic'`` and the FlowSystem has been clustered:: + + storage = Storage( + label='seasonal_storage', + charging=charge_flow, + discharging=discharge_flow, + capacity_in_flow_hours=InvestParameters(maximum_size=10000), + cluster_mode='intercluster_cyclic', # Enable inter-cluster linking + ) + + # Cluster the flow system + fs_clustered = flow_system.transform.cluster(n_clusters=8) + fs_clustered.optimize(solver) + + # Access the SOC_boundary in results + soc_boundary = fs_clustered.solution['seasonal_storage|SOC_boundary'] + """ + + def _do_modeling(self): + """Create storage model with inter-cluster linking constraints. + + Extends the base StorageModel by: + 1. Skipping initial/final charge state constraints (handled via SOC_boundary) + 2. Using symmetric bounds on charge_state (ΔE can be negative) + 3. Adding SOC_boundary variable and linking constraints + """ + # Call grandparent's _do_modeling (ComponentModel), not parent's + # We need to rebuild because intercluster mode changes bounds and constraints + ComponentModel._do_modeling(self) + + # Create charge_state with symmetric bounds for ΔE + lb, ub = self._absolute_charge_state_bounds + self.add_variables( + lower=lb, + upper=ub, + coords=self._model.get_coords(extra_timestep=True), + short_name='charge_state', + ) + + self.add_variables(coords=self._model.get_coords(), short_name='netto_discharge') + + # Create netto_discharge constraint + self.add_constraints( + self.netto_discharge + == self.element.discharging.submodel.flow_rate - self.element.charging.submodel.flow_rate, + short_name='netto_discharge', + ) + + # Build energy balance (same as base class, but with cluster boundary masking) + charge_state = self.charge_state + rel_loss = self.element.relative_loss_per_hour + timestep_duration = self._model.timestep_duration + charge_rate = self.element.charging.submodel.flow_rate + discharge_rate = self.element.discharging.submodel.flow_rate + eff_charge = self.element.eta_charge + eff_discharge = self.element.eta_discharge + + lhs = ( + charge_state.isel(time=slice(1, None)) + - charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) + - charge_rate * eff_charge * timestep_duration + + discharge_rate * timestep_duration / eff_discharge + ) + + # Mask out inter-cluster boundaries + clustering = self._model.flow_system.clustering + mask = np.ones(lhs.sizes['time'], dtype=bool) + mask[clustering.cluster_start_positions[1:] - 1] = False + mask = xr.DataArray(mask, coords={'time': lhs.coords['time']}) + + self.add_constraints(lhs == 0, short_name='charge_state', mask=mask) + + # Create InvestmentModel if needed + if isinstance(self.element.capacity_in_flow_hours, InvestParameters): + self.add_submodels( + InvestmentModel( + model=self._model, + label_of_element=self.label_of_element, + label_of_model=self.label_of_element, + parameters=self.element.capacity_in_flow_hours, + ), + short_name='investment', + ) + + # Symmetric bounds: -size <= charge_state <= size + self.add_constraints( + self.charge_state >= -self.investment.size, + short_name='charge_state|lb', + ) + self.add_constraints( + self.charge_state <= self.investment.size, + short_name='charge_state|ub', + ) + + # Add inter-cluster linking (the main contribution of this class) + self._add_intercluster_linking() + + # Balanced sizes + if self.element.balanced: + self.add_constraints( + self.element.charging.submodel._investment.size * 1 + == self.element.discharging.submodel._investment.size * 1, + short_name='balanced_sizes', + ) + def _add_intercluster_linking(self) -> None: - """Add inter-cluster storage linking for aggregated optimization. - - Following the S-N model from Blanke et al. (2022), this method: - 1. Constrains charge_state at each cluster start to 0 (ΔE_0 = 0) - 2. Creates SOC_boundary variables to track absolute SOC across original periods - 3. Links via: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - 4. Adds bounds: 0 ≤ SOC_boundary[d] + charge_state[t] ≤ capacity - 5. Optionally enforces cyclic: SOC_boundary[0] = SOC_boundary[end] + """Add inter-cluster storage linking following the S-N model. + + This method implements the core inter-cluster linking logic: + + 1. Constrains charge_state (ΔE) at each cluster start to 0 + 2. Creates SOC_boundary variables to track absolute SOC + 3. Links boundaries via: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC + 4. Adds combined bounds: 0 ≤ SOC_boundary + ΔE ≤ capacity + 5. Enforces initial/cyclic constraint on SOC_boundary """ from .clustering.intercluster_helpers import ( build_boundary_coords, @@ -1061,7 +1324,7 @@ def _add_intercluster_linking(self) -> None: n_original_periods = cluster_structure.n_original_periods cluster_order = cluster_structure.cluster_order - # 1. Add cluster start constraints (ΔE_0 = 0) - vectorized + # 1. Constrain ΔE = 0 at cluster starts self._add_cluster_start_constraints(n_clusters, timesteps_per_cluster) # 2. Create SOC_boundary variable @@ -1077,43 +1340,42 @@ def _add_intercluster_linking(self) -> None: short_name='SOC_boundary', ) - # 3. Add SOC_boundary <= investment.size for investment-based storage + # 3. Link SOC_boundary to investment size if capacity_bounds.has_investment and self.investment is not None: self.add_constraints( soc_boundary <= self.investment.size, short_name='SOC_boundary_ub', ) - # 4. Compute delta_SOC as DataArray with 'cluster' dimension + # 4. Compute delta_SOC for each cluster delta_soc = self._compute_delta_soc(n_clusters, timesteps_per_cluster) - # 5. Add linking constraints - vectorized + # 5. Add linking constraints self._add_linking_constraints(soc_boundary, delta_soc, cluster_order, n_original_periods) - # 6. Add cyclic or initial SOC constraint + # 6. Add cyclic or initial constraint if self.element.cluster_mode == 'intercluster_cyclic': self.add_constraints( soc_boundary.isel(cluster_boundary=0) == soc_boundary.isel(cluster_boundary=n_original_periods), short_name='cyclic', ) else: - # For non-cyclic intercluster mode, apply initial_charge_state to SOC_boundary[0] + # Apply initial_charge_state to SOC_boundary[0] initial = self.element.initial_charge_state if initial is not None: if isinstance(initial, str): - # 'equals_final' means SOC_boundary should be cyclic + # 'equals_final' means cyclic self.add_constraints( soc_boundary.isel(cluster_boundary=0) == soc_boundary.isel(cluster_boundary=n_original_periods), short_name='initial_SOC_boundary', ) else: - # Numeric initial charge state self.add_constraints( soc_boundary.isel(cluster_boundary=0) == initial, short_name='initial_SOC_boundary', ) - # 7. Add combined bound constraints - vectorized + # 7. Add combined bound constraints self._add_combined_bound_constraints( soc_boundary, cluster_order, @@ -1123,7 +1385,15 @@ def _add_intercluster_linking(self) -> None: ) def _add_cluster_start_constraints(self, n_clusters: int, timesteps_per_cluster: int) -> None: - """Constrain charge_state at each cluster start to 0 (ΔE_0 = 0).""" + """Constrain ΔE = 0 at the start of each representative cluster. + + This ensures that the relative charge state is measured from a known + reference point (the cluster start). + + Args: + n_clusters: Number of representative clusters. + timesteps_per_cluster: Timesteps in each cluster. + """ cluster_starts = np.arange(0, n_clusters * timesteps_per_cluster, timesteps_per_cluster) self.add_constraints( self.charge_state.isel(time=cluster_starts) == 0, @@ -1131,17 +1401,24 @@ def _add_cluster_start_constraints(self, n_clusters: int, timesteps_per_cluster: ) def _compute_delta_soc(self, n_clusters: int, timesteps_per_cluster: int) -> xr.DataArray: - """Compute delta_SOC for each representative cluster as a DataArray. + """Compute net SOC change (delta_SOC) for each representative cluster. + + The delta_SOC is the difference between the charge_state at the end + and start of each cluster: delta_SOC[c] = ΔE(end_c) - ΔE(start_c). - Returns DataArray with 'cluster' dimension containing the net charge state - change (end - start) for each cluster. + Since ΔE(start) = 0 by constraint, this simplifies to delta_SOC[c] = ΔE(end_c). + + Args: + n_clusters: Number of representative clusters. + timesteps_per_cluster: Timesteps in each cluster. + + Returns: + DataArray with 'cluster' dimension containing delta_SOC for each cluster. """ starts = np.arange(0, n_clusters * timesteps_per_cluster, timesteps_per_cluster) - # Last timestep of each cluster (not first of next cluster) ends = starts + timesteps_per_cluster - 1 - # Compute delta for all clusters at once + delta_soc = self.charge_state.isel(time=ends) - self.charge_state.isel(time=starts) - # Replace 'time' dim with 'cluster' dim return delta_soc.assign_coords(time=np.arange(n_clusters)).rename({'time': 'cluster'}) def _add_linking_constraints( @@ -1151,26 +1428,32 @@ def _add_linking_constraints( cluster_order: xr.DataArray, n_original_periods: int, ) -> None: - """Add linking constraints: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]]. + """Add constraints linking consecutive SOC_boundary values. + + Implements: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] - Uses vectorized xarray operations instead of loops. + This connects the SOC at the end of original period d to the SOC at the + start of period d+1, using the net charge change from the representative + cluster that was mapped to period d. + + Args: + soc_boundary: SOC_boundary variable. + delta_soc: Net SOC change per cluster. + cluster_order: Mapping from original periods to representative clusters. + n_original_periods: Number of original (non-clustered) periods. """ - # SOC at boundary d+1 (after original period d completes) soc_after = soc_boundary.isel(cluster_boundary=slice(1, None)) - # SOC at boundary d (before original period d starts) soc_before = soc_boundary.isel(cluster_boundary=slice(None, -1)) - # Rename cluster_boundary -> original_period for alignment + # Rename for alignment soc_after = soc_after.rename({'cluster_boundary': 'original_period'}) soc_after = soc_after.assign_coords(original_period=np.arange(n_original_periods)) soc_before = soc_before.rename({'cluster_boundary': 'original_period'}) soc_before = soc_before.assign_coords(original_period=np.arange(n_original_periods)) - # Get delta_soc for each original period using cluster_order as advanced index - # cluster_order has dim 'original_period', delta_soc has dim 'cluster' + # Get delta_soc for each original period using cluster_order delta_soc_ordered = delta_soc.isel(cluster=cluster_order) - # Single vectorized constraint for all original periods lhs = soc_after - soc_before - delta_soc_ordered self.add_constraints(lhs == 0, short_name='link') @@ -1182,138 +1465,48 @@ def _add_combined_bound_constraints( n_original_periods: int, timesteps_per_cluster: int, ) -> None: - """Add combined bound constraints: 0 <= SOC_boundary[d] + charge_state[t] <= capacity. + """Add constraints ensuring actual SOC stays within bounds. - Vectorizes over original_period dimension, loops over sample points. + The actual SOC is: SOC(t) = SOC_boundary[d] + ΔE(t) + + This must satisfy: 0 ≤ SOC(t) ≤ capacity + + Since checking every timestep is expensive, we sample at the start, + middle, and end of each cluster. + + Args: + soc_boundary: SOC_boundary variable. + cluster_order: Mapping from original periods to clusters. + has_investment: Whether the storage has investment sizing. + n_original_periods: Number of original periods. + timesteps_per_cluster: Timesteps in each cluster. """ charge_state = self.charge_state - # Get soc_boundary for each original period (boundary d, before period d starts) - soc_d = soc_boundary.isel(cluster_boundary=slice(None, -1)) # excludes final boundary + soc_d = soc_boundary.isel(cluster_boundary=slice(None, -1)) soc_d = soc_d.rename({'cluster_boundary': 'original_period'}) soc_d = soc_d.assign_coords(original_period=np.arange(n_original_periods)) - # Sample offsets within each cluster (start, middle, end) - sample_offsets = [0, timesteps_per_cluster // 2, timesteps_per_cluster] + sample_offsets = [0, timesteps_per_cluster // 2, timesteps_per_cluster - 1] max_time_idx = len(charge_state.coords['time']) - 1 - # Convert cluster_order to numpy array for indexing cluster_order_vals = cluster_order.values.astype(int) cluster_starts = cluster_order_vals * timesteps_per_cluster for sample_name, offset in zip(['start', 'mid', 'end'], sample_offsets, strict=False): time_indices = np.clip(cluster_starts + offset, 0, max_time_idx) - # Get charge_state at these time indices using numpy array indexer cs_t = charge_state.isel(time=time_indices) - - # Rename 'time' dim to 'original_period' to align with soc_d cs_t = cs_t.rename({'time': 'original_period'}) cs_t = cs_t.assign_coords(original_period=np.arange(n_original_periods)) - # Combined SOC = soc_boundary[d] + charge_state[t] combined = soc_d + cs_t - # Lower bound constraint: combined >= 0 self.add_constraints(combined >= 0, short_name=f'soc_lb_{sample_name}') - # Upper bound constraint: combined <= capacity (for investment case) if has_investment and self.investment is not None: self.add_constraints(combined <= self.investment.size, short_name=f'soc_ub_{sample_name}') - @property - def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: - relative_lower_bound, relative_upper_bound = self._relative_charge_state_bounds - - # For inter-cluster modes, charge_state represents relative change from cluster start (ΔE) - # which can be negative (discharge) or positive (charge). The actual SOC is SOC_boundary + ΔE. - # We set lower bound to -capacity to allow the full range. - clustering = self._model.flow_system.clustering - is_intercluster = clustering is not None and self.element.cluster_mode in ( - 'intercluster', - 'intercluster_cyclic', - ) - - if self.element.capacity_in_flow_hours is None: - # Unbounded storage: lower bound is 0 (or -inf for intercluster), upper bound is infinite - return (-np.inf if is_intercluster else 0, np.inf) - elif isinstance(self.element.capacity_in_flow_hours, InvestParameters): - cap_min = self.element.capacity_in_flow_hours.minimum_or_fixed_size - cap_max = self.element.capacity_in_flow_hours.maximum_or_fixed_size - if is_intercluster: - # For inter-cluster, charge_state is relative to cluster start (ΔE in S-N model) - # ΔE can be negative (discharge) or positive (charge), so allow full range. - # Create bounds with proper time dimension using the shape from relative bounds. - ones = xr.ones_like(relative_upper_bound) - return (-ones * cap_max, ones * cap_max) - else: - return ( - relative_lower_bound * cap_min, - relative_upper_bound * cap_max, - ) - else: - cap = self.element.capacity_in_flow_hours - if is_intercluster: - # Same as above: create bounds with time dimension - ones = xr.ones_like(relative_upper_bound) - return (-ones * cap, ones * cap) - else: - return ( - relative_lower_bound * cap, - relative_upper_bound * cap, - ) - - @property - def _relative_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: - """ - Get relative charge state bounds with final timestep values. - - Returns: - Tuple of (minimum_bounds, maximum_bounds) DataArrays extending to final timestep - """ - final_coords = {'time': [self._model.flow_system.timesteps_extra[-1]]} - - # Get final minimum charge state - if self.element.relative_minimum_final_charge_state is None: - min_final = self.element.relative_minimum_charge_state.isel(time=-1, drop=True) - else: - min_final = self.element.relative_minimum_final_charge_state - min_final = min_final.expand_dims('time').assign_coords(time=final_coords['time']) - - # Get final maximum charge state - if self.element.relative_maximum_final_charge_state is None: - max_final = self.element.relative_maximum_charge_state.isel(time=-1, drop=True) - else: - max_final = self.element.relative_maximum_final_charge_state - max_final = max_final.expand_dims('time').assign_coords(time=final_coords['time']) - # Concatenate with original bounds - min_bounds = xr.concat([self.element.relative_minimum_charge_state, min_final], dim='time') - max_bounds = xr.concat([self.element.relative_maximum_charge_state, max_final], dim='time') - - return min_bounds, max_bounds - - @property - def _investment(self) -> InvestmentModel | None: - """Deprecated alias for investment""" - return self.investment - - @property - def investment(self) -> InvestmentModel | None: - """Investment feature""" - if 'investment' not in self.submodels: - return None - return self.submodels['investment'] - - @property - def charge_state(self) -> linopy.Variable: - """Charge state variable""" - return self['charge_state'] - - @property - def netto_discharge(self) -> linopy.Variable: - """Netto discharge variable""" - return self['netto_discharge'] - @register_class_for_io class SourceAndSink(Component): From 5a2f08e2934f9e4633ce50f2eb97812d2ab4c3e6 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 21:29:13 +0100 Subject: [PATCH 130/191] Add plan --- docs/design/cluster_architecture.md | 652 ++++++++++++++++++++++++++++ 1 file changed, 652 insertions(+) create mode 100644 docs/design/cluster_architecture.md diff --git a/docs/design/cluster_architecture.md b/docs/design/cluster_architecture.md new file mode 100644 index 000000000..843b9e8ab --- /dev/null +++ b/docs/design/cluster_architecture.md @@ -0,0 +1,652 @@ +The enhanced# Design Document: Cluster Architecture for flixopt + +## Executive Summary + +This document explores architectural options for improving cluster representation in flixopt, addressing: +1. Enhanced cluster helpers for the current flat time structure +2. Impact on StatusModel and other Features +3. Improved UX for cluster visualization and plotting +4. Future support for variable segmentation per cluster/period/scenario + +--- + +## Part 1: Current Architecture Analysis + +### 1.1 Time Dimension Structure + +**Current Implementation:** +``` +time: (n_clusters × timesteps_per_cluster,) # Flat, e.g., (864,) for 9 clusters × 96 timesteps +``` + +**Key Properties:** +- `cluster_weight`: Shape `(time,)` with repeated values per cluster +- `timestep_duration`: Shape `(time,)` or scalar +- `aggregation_weight = timestep_duration × cluster_weight` + +**Cluster Tracking:** +- `cluster_start_positions`: Array of indices where each cluster begins +- `ClusterStructure`: Stores cluster_order, occurrences, n_clusters, timesteps_per_cluster + +### 1.2 Features Affected by Time Structure + +| Feature | Time Usage | Clustering Impact | +|---------|-----------|-------------------| +| **StatusModel** | `aggregation_weight` for active hours, `timestep_duration` for effects | Must sum correctly across clusters | +| **InvestmentModel** | Periodic (no time dim) | Unaffected by time structure | +| **PiecewiseModel** | Per-timestep lambda variables | Must preserve cluster structure | +| **ShareAllocationModel** | Uses `cluster_weight` explicitly | Directly depends on weight structure | +| **StorageModel** | Charge balance across time | Needs cluster boundary handling | +| **InterclusterStorageModel** | SOC_boundary linking | Uses cluster indices extensively | + +### 1.3 Current Plotting Structure + +**StatisticsPlotAccessor Methods:** +- `balance()`: Node flow visualization +- `storage()`: Dual-axis charge/discharge + SOC +- `heatmap()`: 2D time reshaping (days × hours) +- `duration_curve()`: Sorted load profiles +- `effects()`: Cost/emission breakdown + +**Clustering in Plots:** +- `ClusterStructure.plot()`: Shows cluster assignments +- Cluster weight applied when aggregating (`cluster_weight.sum('time')`) +- No visual separation between clusters in time series plots + +--- + +## Part 2: Architectural Options + +### 2.1 Option A: Enhanced Flat with Cluster Helpers (Recommended) + +Keep flat `time` dimension but add rich helper infrastructure: + +```python +class Clustering: + # Core properties + cluster_labels: xr.DataArray # (time,) or (time, period, scenario) + timesteps_per_cluster: xr.DataArray # (cluster,) or (cluster, period, scenario) + + # Index helpers (period/scenario-aware) + def cluster_start_indices(self, period=None, scenario=None) -> np.ndarray + def cluster_end_indices(self, period=None, scenario=None) -> np.ndarray + def cluster_slices(self, period=None, scenario=None) -> dict[int, slice] + + # Data access helpers + def get_cluster_data(self, data, cluster_id, period=None, scenario=None) + def iter_clusters(self, data, period=None, scenario=None) + def get_cluster_boundaries(self, data, period=None, scenario=None) + def compute_delta_per_cluster(self, data, period=None, scenario=None) + + # Boundary variability + boundaries_vary: bool + boundaries_vary_by_period: bool + boundaries_vary_by_scenario: bool +``` + +**Pros:** +- Supports variable-length clusters +- Supports different boundaries per period/scenario +- Minimal breaking changes +- linopy-compatible + +**Cons:** +- Less intuitive than true `(cluster, time)` shape +- Requires helper methods for clean code + +### 2.2 Option B: True (cluster, time) Dimensions + +Reshape time to 2D when clustering is active: + +```python +# Clustered mode +data.dims = ('cluster', 'time', 'period', 'scenario') +data.shape = (9, 96, ...) # 9 clusters × 96 timesteps each +``` + +**Pros:** +- Clean, intuitive structure +- Natural indexing: `data[:, -1] - data[:, 0]` for delta +- No boundary masking needed + +**Cons:** +- Requires uniform cluster lengths +- Different boundaries per period/scenario very complex +- Major refactoring across codebase + +### 2.3 Option C: Padded Rectangular with Masks + +Use `(cluster, max_time)` with NaN padding for shorter clusters: + +```python +data.shape = (9, 96, ...) # Pad shorter clusters +valid_mask.shape = (9, 96) # True where data is valid +``` + +**Pros:** +- Clean cluster dimension +- Supports variable lengths + +**Cons:** +- Wasted memory/computation +- Complex masking in all operations +- linopy constraints need `.where(mask)` + +### 2.4 Recommendation: Option A (Enhanced Flat) + +Given the requirements for: +- Variable-length clusters (future segmentation) +- Different boundaries per period/scenario +- Minimal breaking changes + +**Option A is the most practical choice.** + +--- + +## Part 3: Impact on Features + +### 3.1 StatusModel Impact + +**Current Code (features.py:200-211):** +```python +# Active hours tracking +tracked_expression=(self.status * self._model.aggregation_weight).sum('time') +``` + +**With Enhanced Helpers:** +No changes needed - `aggregation_weight` already handles clustering correctly. + +**Potential Enhancement:** +Could add per-cluster status summaries for visualization: +```python +@property +def status_per_cluster(self) -> xr.DataArray: + """Active hours per cluster.""" + clustering = self.flow_system.clustering + if clustering is None: + return None + # Use helpers to compute per-cluster active time + return clustering.aggregate_per_cluster( + self.status * self._model.timestep_duration + ) +``` + +### 3.2 StorageModel Impact + +**Current Code (components.py):** +- Uses `cluster_start_positions` for boundary masking +- InterclusterStorageModel has complex index calculations + +**With Enhanced Helpers:** +```python +# Before: Manual index calculation +start_positions = clustering.cluster_start_positions +end_positions = start_positions[1:] - 1 + +# After: Clean helper usage +clustering = self.flow_system.clustering +delta_soc = clustering.compute_delta_per_cluster(self.charge_state) +``` + +### 3.3 ShareAllocationModel Impact + +**Current Code (features.py:624):** +```python +self._eq_total.lhs -= (self.total_per_timestep * self._model.cluster_weight).sum(dim='time') +``` + +**With Enhanced Helpers:** +No changes needed - `cluster_weight` structure preserved. + +### 3.4 PiecewiseModel Impact + +**Current Code:** Creates lambda variables per timestep. + +**With Enhanced Helpers:** +No changes needed - operates on flat time dimension. + +--- + +## Part 4: Plotting Improvements + +### 4.1 Current UX Issues + +1. **No visual cluster separation**: Time series plots show continuous lines +2. **Cluster identity hidden**: Hard to see which timesteps belong to which cluster +3. **SOC continuity misleading**: Storage plots suggest continuous operation + +### 4.2 Proposed Improvements + +#### 4.2.1 Cluster-Separated Time Series + +Add visual separators between clusters: + +```python +def plot_with_cluster_separation(self, data, **kwargs): + """Plot time series with vertical lines between clusters.""" + fig = self._create_base_plot(data, **kwargs) + + if self._fs.is_clustered: + for start_idx in self._fs.clustering.cluster_start_indices()[1:]: + fig.add_vline(x=data.time[start_idx], line_dash='dash', opacity=0.3) + + return fig +``` + +#### 4.2.2 Faceted Cluster View + +Display each cluster as a separate subplot: + +```python +def storage_by_cluster(self, storage_label, **kwargs): + """Plot storage operation with one subplot per cluster.""" + data = self._get_storage_data(storage_label) + + if not self._fs.is_clustered: + return self.storage(storage_label, **kwargs) + + # Reshape to (cluster, within_cluster_time) + clustering = self._fs.clustering + facet_data = [] + for cluster_id, cluster_slice in clustering.cluster_slices().items(): + cluster_data = data.isel(time=cluster_slice) + cluster_data = cluster_data.assign_coords( + cluster=cluster_id, + within_time=range(len(cluster_slice)) + ) + facet_data.append(cluster_data) + + combined = xr.concat(facet_data, dim='cluster') + return self._plot_faceted(combined, facet_col='cluster', **kwargs) +``` + +#### 4.2.3 Cluster Summary Statistics + +Add aggregate views per cluster: + +```python +def cluster_summary(self, variable, statistic='mean'): + """Show per-cluster statistics as bar chart.""" + data = self._get_variable(variable) + clustering = self._fs.clustering + + summaries = [] + for cluster_id, cluster_slice in clustering.cluster_slices().items(): + cluster_data = data.isel(time=cluster_slice) + if statistic == 'mean': + val = cluster_data.mean('time') + elif statistic == 'max': + val = cluster_data.max('time') + elif statistic == 'min': + val = cluster_data.min('time') + summaries.append(val.assign_coords(cluster=cluster_id)) + + return self._plot_bar(xr.concat(summaries, dim='cluster')) +``` + +#### 4.2.4 Inter-Cluster SOC Visualization + +Show SOC_boundary values for intercluster storage: + +```python +def intercluster_soc(self, storage_label): + """Plot SOC boundaries across original timeline.""" + storage = self._get_component(storage_label) + if not hasattr(storage.submodel, 'SOC_boundary'): + raise ValueError("Storage not in intercluster mode") + + soc_boundary = storage.submodel.SOC_boundary.solution + cluster_order = self._fs.clustering.cluster_order + + # Plot SOC at each original period boundary + fig = go.Figure() + fig.add_trace(go.Scatter( + x=range(len(soc_boundary)), + y=soc_boundary.values, + mode='lines+markers', + name='SOC Boundary' + )) + fig.update_layout( + xaxis_title='Original Period', + yaxis_title='State of Charge', + title=f'{storage_label} Inter-Cluster SOC' + ) + return PlotResult(fig) +``` + +### 4.3 Heatmap Enhancements + +Current heatmap reshapes time to (days, hours). For clustered data: + +```python +def cluster_heatmap(self, variable): + """Heatmap with clusters on y-axis, within-cluster time on x-axis.""" + data = self._get_variable(variable) + clustering = self._fs.clustering + + # Reshape: (total_time,) -> (n_clusters, timesteps_per_cluster) + reshaped = data.values.reshape( + clustering.n_clusters, + clustering.timesteps_per_cluster + ) + + return self._plot_heatmap( + reshaped, + x_label='Within-Cluster Time', + y_label='Cluster', + colorbar_title=variable + ) +``` + +--- + +## Part 5: Variable Segmentation Architecture + +### 5.1 Segmentation Types + +| Type | Description | Complexity | +|------|-------------|------------| +| **Uniform segments** | All clusters have same structure | Current implementation | +| **Variable per cluster** | Cluster 1: 24 steps, Cluster 2: 48 steps | Medium | +| **Variable per period** | Period 1 clusters differ from Period 2 | High | +| **Variable per scenario** | Scenario A differs from Scenario B | High | +| **Full variability** | Different per (cluster, period, scenario) | Very High | + +### 5.2 TSAM Segmentation Features + +TSAM supports intra-period segmentation: +```python +tsam.TimeSeriesAggregation( + segmentation=True, # Enable subdivision + noSegments=6, # Segments per typical period + segmentRepresentationMethod='meanRepresentation' +) +``` + +**What TSAM provides:** +- Uniform segment count across all typical periods +- Various representation methods (mean, medoid, distribution) +- Segment duration = `timesteps_per_cluster / noSegments` + +**What TSAM does NOT provide:** +- Variable segment lengths within a period +- Different segment counts per cluster + +### 5.3 Implementing Variable Segmentation + +#### 5.3.1 Data Structures + +```python +@dataclass +class SegmentStructure: + """Structure for variable-length segments within clusters.""" + + # Shape: (cluster,) - number of segments in each cluster + n_segments_per_cluster: xr.DataArray + + # Shape: (cluster, max_segments) - duration of each segment (NaN if not used) + segment_durations: xr.DataArray + + # Shape: (cluster, max_segments) - start index within cluster + segment_start_indices: xr.DataArray + + # For period/scenario variation, add those dims to all arrays + + def get_segment_slice(self, cluster, segment, period=None, scenario=None) -> slice: + """Get time slice for a specific segment.""" + ... +``` + +#### 5.3.2 Enhanced ClusterStructure + +```python +class ClusterStructure: + # Existing + cluster_order: xr.DataArray + cluster_occurrences: xr.DataArray + n_clusters: int + timesteps_per_cluster: int | xr.DataArray # Allow variable + + # New for segmentation + segmentation: SegmentStructure | None + + # Period/scenario awareness + _boundaries_by_slice: dict[tuple, BoundaryInfo] # (period, scenario) -> info + + @property + def has_variable_boundaries(self) -> bool: + """True if boundaries differ across periods/scenarios.""" + return len(self._boundaries_by_slice) > 1 + + def get_boundaries(self, period=None, scenario=None) -> BoundaryInfo: + """Get cluster boundaries for specific period/scenario.""" + key = (period, scenario) + if key in self._boundaries_by_slice: + return self._boundaries_by_slice[key] + return self._default_boundaries +``` + +#### 5.3.3 Integration with FlowSystem + +```python +class FlowSystem: + @property + def is_clustered(self) -> bool: + return self.clustering is not None + + @property + def has_segmentation(self) -> bool: + return self.is_clustered and self.clustering.segmentation is not None + + @property + def has_variable_cluster_lengths(self) -> bool: + """True if clusters have different numbers of timesteps.""" + if not self.is_clustered: + return False + tpc = self.clustering.timesteps_per_cluster + if isinstance(tpc, int): + return False + return len(np.unique(tpc)) > 1 +``` + +### 5.4 Constraint Generation with Variable Segments + +When segment lengths vary, constraint generation must loop or use advanced indexing: + +```python +def _add_charge_state_constraints(self): + clustering = self.flow_system.clustering + + if not clustering.has_variable_boundaries: + # Vectorized path - all clusters have same structure + self._add_charge_state_vectorized() + else: + # Loop path - boundaries vary + for period in self.flow_system.periods or [None]: + for scenario in self.flow_system.scenarios or [None]: + self._add_charge_state_for_slice(period, scenario) + +def _add_charge_state_for_slice(self, period, scenario): + """Add constraints for specific period/scenario slice.""" + boundaries = self.clustering.get_boundaries(period, scenario) + + for cluster_id in range(boundaries.n_clusters): + slc = boundaries.cluster_slices[cluster_id] + cs_cluster = self.charge_state.isel(time=slc) + + if period is not None: + cs_cluster = cs_cluster.sel(period=period) + if scenario is not None: + cs_cluster = cs_cluster.sel(scenario=scenario) + + # Add constraints for this cluster + self._add_balance_for_cluster(cs_cluster, cluster_id, period, scenario) +``` + +--- + +## Part 6: Implementation Roadmap + +### Phase 1: Core Helpers (Minimal Change) + +**Goal:** Add cluster helpers without changing existing behavior. + +**Tasks:** +1. Add `is_clustered`, `n_clusters` to FlowSystem +2. Add `cluster_labels`, `cluster_slices`, index methods to Clustering +3. Add `boundaries_vary` flag infrastructure +4. Refactor InterclusterStorageModel to use helpers + +**Files:** +- `flixopt/clustering/base.py` +- `flixopt/flow_system.py` +- `flixopt/components.py` + +### Phase 2: Plotting Improvements + +**Goal:** Better cluster visualization UX. + +**Tasks:** +1. Add cluster separator lines to time series plots +2. Implement `storage_by_cluster()` faceted view +3. Add `cluster_summary()` statistics +4. Implement `cluster_heatmap()` +5. Add `intercluster_soc()` for inter-cluster storage + +**Files:** +- `flixopt/statistics_accessor.py` +- `flixopt/plotting.py` + +### Phase 3: Period/Scenario-Aware Helpers + +**Goal:** Support different cluster boundaries per period/scenario. + +**Tasks:** +1. Extend helper methods with period/scenario parameters +2. Add `_get_boundaries(period, scenario)` dispatch +3. Update constraint generation to loop when needed +4. Update tests for varying boundaries + +**Files:** +- `flixopt/clustering/base.py` +- `flixopt/components.py` +- `flixopt/features.py` (if needed) + +### Phase 4: Segmentation Infrastructure + +**Goal:** Prepare for tsam segmentation support. + +**Tasks:** +1. Define `SegmentStructure` dataclass +2. Integrate with `ClusterStructure` +3. Update `transform_accessor.cluster()` to accept segmentation params +4. Update constraint generation for segments + +**Files:** +- `flixopt/clustering/base.py` +- `flixopt/transform_accessor.py` +- `flixopt/components.py` + +--- + +## Part 7: Testing Strategy + +### 7.1 Unit Tests + +```python +# Test cluster helpers +def test_cluster_labels_uniform(): + """Verify cluster_labels for uniform cluster lengths.""" + +def test_cluster_slices_variable(): + """Verify cluster_slices for variable cluster lengths.""" + +def test_boundaries_vary_by_period(): + """Verify boundary dispatch for different periods.""" +``` + +### 7.2 Integration Tests + +```python +# Test storage with different cluster modes +def test_storage_intercluster_with_helpers(): + """Verify intercluster storage using new helpers.""" + +def test_storage_variable_boundaries(): + """Verify storage with period-varying boundaries.""" +``` + +### 7.3 Plotting Tests + +```python +# Test new plot methods +def test_storage_by_cluster_facets(): + """Verify faceted cluster view.""" + +def test_cluster_heatmap(): + """Verify cluster heatmap rendering.""" +``` + +--- + +## Part 8: Open Questions + +1. **Naming**: Should the coordinate be `cluster` or `cluster_idx`? +2. **Default behavior**: When boundaries vary, should helpers require period/scenario or auto-detect? +3. **Segmentation granularity**: Support arbitrary segments or only tsam's uniform segments? +4. **Backwards compatibility**: Keep old `cluster_start_positions` property or deprecate? + +--- + +## Appendix A: File Reference + +| File | Purpose | +|------|---------| +| `flixopt/clustering/base.py` | ClusterStructure, Clustering classes | +| `flixopt/clustering/intercluster_helpers.py` | SOC boundary utilities | +| `flixopt/flow_system.py` | FlowSystem with is_clustered property | +| `flixopt/transform_accessor.py` | cluster() method, solution expansion | +| `flixopt/components.py` | StorageModel, InterclusterStorageModel | +| `flixopt/features.py` | StatusModel, ShareAllocationModel | +| `flixopt/statistics_accessor.py` | Plotting methods | +| `flixopt/plotting.py` | Plot utilities | + +## Appendix B: Code Examples + +### B.1 Using Enhanced Helpers + +```python +# Get cluster boundaries +clustering = flow_system.clustering +starts, ends = clustering.get_cluster_boundaries(charge_state) + +# Compute delta SOC per cluster +delta_soc = clustering.compute_delta_per_cluster(charge_state) + +# Iterate over clusters +for cluster_id, cluster_data in clustering.iter_clusters(flow_rate): + process(cluster_data) +``` + +### B.2 Faceted Storage Plot + +```python +# Plot storage with cluster facets +fs.statistics.plot.storage_by_cluster('Battery') + +# Plot cluster summary +fs.statistics.plot.cluster_summary('HeatDemand|Q_th', statistic='max') +``` + +### B.3 Variable Boundaries + +```python +# Check if boundaries vary +if clustering.boundaries_vary: + for period in fs.periods: + slices = clustering.cluster_slices(period=period) + # Process per-period +else: + slices = clustering.cluster_slices() + # Single set of slices +``` From c099b0b51c86d594708104a967fff093f0d624d1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 21:38:47 +0100 Subject: [PATCH 131/191] Update plan --- docs/design/cluster_architecture.md | 339 ++++++++++++++++++++-------- 1 file changed, 249 insertions(+), 90 deletions(-) diff --git a/docs/design/cluster_architecture.md b/docs/design/cluster_architecture.md index 843b9e8ab..7e0725efc 100644 --- a/docs/design/cluster_architecture.md +++ b/docs/design/cluster_architecture.md @@ -1,4 +1,4 @@ -The enhanced# Design Document: Cluster Architecture for flixopt +# Design Document: Cluster Architecture for flixopt ## Executive Summary @@ -57,42 +57,135 @@ time: (n_clusters × timesteps_per_cluster,) # Flat, e.g., (864,) for 9 cluster ## Part 2: Architectural Options -### 2.1 Option A: Enhanced Flat with Cluster Helpers (Recommended) +### 2.1 Option A: Enhanced Flat with xarray-based Indexers (Recommended) -Keep flat `time` dimension but add rich helper infrastructure: +Keep flat `time` dimension but add **xarray-based indexer properties** that work seamlessly with `.isel()`: ```python class Clustering: - # Core properties - cluster_labels: xr.DataArray # (time,) or (time, period, scenario) - timesteps_per_cluster: xr.DataArray # (cluster,) or (cluster, period, scenario) - - # Index helpers (period/scenario-aware) - def cluster_start_indices(self, period=None, scenario=None) -> np.ndarray - def cluster_end_indices(self, period=None, scenario=None) -> np.ndarray - def cluster_slices(self, period=None, scenario=None) -> dict[int, slice] - - # Data access helpers - def get_cluster_data(self, data, cluster_id, period=None, scenario=None) - def iter_clusters(self, data, period=None, scenario=None) - def get_cluster_boundaries(self, data, period=None, scenario=None) - def compute_delta_per_cluster(self, data, period=None, scenario=None) - - # Boundary variability - boundaries_vary: bool - boundaries_vary_by_period: bool - boundaries_vary_by_scenario: bool + # ═══════════════════════════════════════════════════════════════ + # CORE INDEXER PROPERTIES (xarray DataArrays) + # ═══════════════════════════════════════════════════════════════ + + @property + def cluster_start(self) -> xr.DataArray: + """Time indices of cluster starts. + + Shape: (cluster,) + Values: [0, 96, 192, ...] for 96 timesteps per cluster + + Usage: + # Select start of each cluster (broadcasts across period/scenario) + data.isel(time=clustering.cluster_start) + + # Shift by 1 for "second timestep of each cluster" + data.isel(time=clustering.cluster_start + 1) + """ + + @property + def cluster_end(self) -> xr.DataArray: + """Time indices of cluster ends (last timestep, inclusive). + + Shape: (cluster,) + Values: [95, 191, 287, ...] for 96 timesteps per cluster + + Usage: + # Select end of each cluster + data.isel(time=clustering.cluster_end) + + # Compute delta (end - start) for each cluster + delta = data.isel(time=clustering.cluster_end) - data.isel(time=clustering.cluster_start) + """ + + @property + def within_cluster_time(self) -> xr.DataArray: + """Within-cluster time index for each timestep. + + Shape: (time,) + Values: [0, 1, 2, ..., 95, 0, 1, 2, ..., 95, ...] # repeating pattern + + Usage: + # Select all timesteps at position 12 within their cluster + mask = clustering.within_cluster_time == 12 + data.where(mask, drop=True) + """ + + @property + def cluster(self) -> xr.DataArray: + """Cluster ID for each timestep. + + Shape: (time,) + Values: [0, 0, ..., 0, 1, 1, ..., 1, ...] # cluster assignment + + Usage: + # Group by cluster + data.groupby(clustering.cluster).mean() + """ + + # ═══════════════════════════════════════════════════════════════ + # CONVENIENCE PROPERTIES + # ═══════════════════════════════════════════════════════════════ + + @property + def n_clusters(self) -> int: + """Number of clusters.""" + + @property + def timesteps_per_cluster(self) -> int: + """Timesteps in each cluster (uniform).""" + + @property + def cluster_coords(self) -> xr.DataArray: + """Cluster coordinate values: [0, 1, 2, ..., n_clusters-1]""" +``` + +**Key Design Principle: Indexers are xarray DataArrays** + +This enables powerful, dimension-preserving operations: + +```python +# ═══════════════════════════════════════════════════════════════ +# EXAMPLE: Select start of each cluster (works across all dims!) +# ═══════════════════════════════════════════════════════════════ +charge_state = ... # shape: (time, period, scenario) e.g., (864, 2, 3) + +# Get cluster starts - returns shape (cluster, period, scenario) +cs_at_starts = charge_state.isel(time=clustering.cluster_start) +# Result shape: (9, 2, 3) for 9 clusters + +# ═══════════════════════════════════════════════════════════════ +# EXAMPLE: Compute delta per cluster +# ═══════════════════════════════════════════════════════════════ +delta = ( + charge_state.isel(time=clustering.cluster_end) - + charge_state.isel(time=clustering.cluster_start) +) +# Result shape: (cluster, period, scenario) = (9, 2, 3) + +# ═══════════════════════════════════════════════════════════════ +# EXAMPLE: Shift indexer for charge_state (has extra timestep!) +# ═══════════════════════════════════════════════════════════════ +# charge_state has shape (time+1,) due to extra boundary timestep +# Need to shift indices by cluster position +cs_at_ends = charge_state.isel(time=clustering.cluster_end + 1) # +1 for boundary + +# ═══════════════════════════════════════════════════════════════ +# EXAMPLE: Select specific within-cluster position +# ═══════════════════════════════════════════════════════════════ +# Get all values at hour 12 within each cluster +hour_12_mask = clustering.within_cluster_time == 12 +peak_values = data.where(hour_12_mask, drop=True) ``` **Pros:** -- Supports variable-length clusters -- Supports different boundaries per period/scenario -- Minimal breaking changes -- linopy-compatible +- Pure xarray - no numpy/dict gymnastics +- Dimension-preserving: indexers broadcast across period/scenario automatically +- Easy adjustments: `cluster_start + 1`, `cluster_end - 1` +- Works with linopy variables directly +- Clean, intuitive API **Cons:** -- Less intuitive than true `(cluster, time)` shape -- Requires helper methods for clean code +- tsam uniform segments only (sufficient per user requirement) ### 2.2 Option B: True (cluster, time) Dimensions @@ -485,67 +578,104 @@ def _add_charge_state_for_slice(self, period, scenario): --- -## Part 6: Implementation Roadmap +## Part 6: Implementation Roadmap (Focused) -### Phase 1: Core Helpers (Minimal Change) +### Phase 1: xarray-based Indexers (PRIORITY) -**Goal:** Add cluster helpers without changing existing behavior. +**Goal:** Add xarray-based cluster indexer properties to `Clustering`. **Tasks:** -1. Add `is_clustered`, `n_clusters` to FlowSystem -2. Add `cluster_labels`, `cluster_slices`, index methods to Clustering -3. Add `boundaries_vary` flag infrastructure -4. Refactor InterclusterStorageModel to use helpers +1. Add `cluster_start` property → `xr.DataArray` with dims `(cluster,)` +2. Add `cluster_end` property → `xr.DataArray` with dims `(cluster,)` +3. Add `cluster` property → `xr.DataArray` with dims `(time,)` for cluster labels +4. Add `within_cluster_time` property → `xr.DataArray` with dims `(time,)` +5. Add convenience: `n_clusters`, `timesteps_per_cluster`, `cluster_coords` +6. Add `is_clustered` property to `FlowSystem` **Files:** -- `flixopt/clustering/base.py` -- `flixopt/flow_system.py` -- `flixopt/components.py` +- `flixopt/clustering/base.py` - Add indexer properties to `Clustering` +- `flixopt/flow_system.py` - Add `is_clustered` convenience property -### Phase 2: Plotting Improvements +**Example Implementation:** +```python +@property +def cluster_start(self) -> xr.DataArray: + """Time indices where each cluster starts.""" + indices = np.arange(0, self.n_clusters * self.timesteps_per_cluster, self.timesteps_per_cluster) + return xr.DataArray(indices, dims=['cluster'], coords={'cluster': np.arange(self.n_clusters)}) + +@property +def cluster_end(self) -> xr.DataArray: + """Time indices where each cluster ends (inclusive).""" + return self.cluster_start + self.timesteps_per_cluster - 1 +``` + +### Phase 2: Refactor InterclusterStorageModel -**Goal:** Better cluster visualization UX. +**Goal:** Use new xarray indexers in `InterclusterStorageModel`. **Tasks:** -1. Add cluster separator lines to time series plots -2. Implement `storage_by_cluster()` faceted view -3. Add `cluster_summary()` statistics -4. Implement `cluster_heatmap()` -5. Add `intercluster_soc()` for inter-cluster storage +1. Replace manual index calculations with `clustering.cluster_start`, `clustering.cluster_end` +2. Simplify `_compute_delta_soc()` using indexer arithmetic +3. Simplify `_add_cluster_start_constraints()` using indexers +4. Handle charge_state offset (extra timestep) cleanly **Files:** -- `flixopt/statistics_accessor.py` -- `flixopt/plotting.py` +- `flixopt/components.py` - Refactor `InterclusterStorageModel` -### Phase 3: Period/Scenario-Aware Helpers +**Before/After Example:** +```python +# BEFORE: Manual calculation +start_positions = clustering.cluster_start_positions +end_positions = start_positions[1:] - 1 +delta = charge_state.isel(time=end_indices) - charge_state.isel(time=start_indices) + +# AFTER: xarray indexers +# Note: charge_state has +1 timesteps, so shift accordingly +delta = ( + self.charge_state.isel(time=clustering.cluster_end + 1) - + self.charge_state.isel(time=clustering.cluster_start) +) +``` + +### Phase 3: expand_solution() with Offset Handling -**Goal:** Support different cluster boundaries per period/scenario. +**Goal:** Proper solution expansion for variables with different time structures. **Tasks:** -1. Extend helper methods with period/scenario parameters -2. Add `_get_boundaries(period, scenario)` dispatch -3. Update constraint generation to loop when needed -4. Update tests for varying boundaries +1. Update `expand_solution()` to detect variable type (regular vs charge_state) +2. Add offset handling for intercluster charge_state expansion +3. Map SOC_boundary values to original timeline correctly +4. Test with all storage cluster_modes **Files:** -- `flixopt/clustering/base.py` -- `flixopt/components.py` -- `flixopt/features.py` (if needed) +- `flixopt/transform_accessor.py` - Update `expand_solution()` +- `flixopt/clustering/base.py` - Add expansion helpers if needed -### Phase 4: Segmentation Infrastructure +**Key Insight:** +```python +def expand_solution(): + for var_name, var_data in solution.items(): + if 'charge_state' in var_name and is_intercluster: + # Special handling: map SOC_boundary to original period boundaries + expanded = _expand_intercluster_soc(var_data) + else: + # Normal expansion using timestep_mapping + expanded = result.expand_data(var_data) +``` + +### Phase 4: Cluster Plotting -**Goal:** Prepare for tsam segmentation support. +**Goal:** Individual cluster visualization. **Tasks:** -1. Define `SegmentStructure` dataclass -2. Integrate with `ClusterStructure` -3. Update `transform_accessor.cluster()` to accept segmentation params -4. Update constraint generation for segments +1. Add `storage_by_cluster()` - faceted view of each cluster +2. Add `cluster_heatmap()` - clusters on y-axis, within-cluster time on x-axis +3. Add cluster separator lines to existing time series plots +4. Add `intercluster_soc()` for SOC_boundary visualization **Files:** -- `flixopt/clustering/base.py` -- `flixopt/transform_accessor.py` -- `flixopt/components.py` +- `flixopt/statistics_accessor.py` - Add plot methods --- @@ -589,12 +719,14 @@ def test_cluster_heatmap(): --- -## Part 8: Open Questions +## Part 8: Decisions (Resolved) -1. **Naming**: Should the coordinate be `cluster` or `cluster_idx`? -2. **Default behavior**: When boundaries vary, should helpers require period/scenario or auto-detect? -3. **Segmentation granularity**: Support arbitrary segments or only tsam's uniform segments? -4. **Backwards compatibility**: Keep old `cluster_start_positions` property or deprecate? +| Question | Decision | +|----------|----------| +| **Naming** | Use `cluster` as the dimension/coordinate name | +| **Indexer return type** | Always return proper multi-dimensional xarray DataArrays | +| **Segmentation** | tsam uniform segments only (sufficient for current needs) | +| **Backwards compatibility** | Not a concern - this is not released yet | --- @@ -613,40 +745,67 @@ def test_cluster_heatmap(): ## Appendix B: Code Examples -### B.1 Using Enhanced Helpers +### B.1 Using xarray Indexers ```python -# Get cluster boundaries clustering = flow_system.clustering -starts, ends = clustering.get_cluster_boundaries(charge_state) -# Compute delta SOC per cluster -delta_soc = clustering.compute_delta_per_cluster(charge_state) - -# Iterate over clusters -for cluster_id, cluster_data in clustering.iter_clusters(flow_rate): - process(cluster_data) +# ═══════════════════════════════════════════════════════════════ +# Select values at cluster boundaries +# ═══════════════════════════════════════════════════════════════ +flow_at_starts = flow_rate.isel(time=clustering.cluster_start) +flow_at_ends = flow_rate.isel(time=clustering.cluster_end) + +# ═══════════════════════════════════════════════════════════════ +# Compute delta per cluster (e.g., for storage charge change) +# ═══════════════════════════════════════════════════════════════ +delta = data.isel(time=clustering.cluster_end) - data.isel(time=clustering.cluster_start) +# Result has dims: (cluster, period, scenario) if those exist + +# ═══════════════════════════════════════════════════════════════ +# Handle charge_state (has extra timestep at end of each cluster) +# ═══════════════════════════════════════════════════════════════ +# charge_state shape: (time + n_clusters,) due to boundary timesteps +cs_at_cluster_start = charge_state.isel(time=clustering.cluster_start) +cs_at_cluster_end = charge_state.isel(time=clustering.cluster_end + 1) # +1 for boundary + +# ═══════════════════════════════════════════════════════════════ +# Group operations by cluster +# ═══════════════════════════════════════════════════════════════ +mean_per_cluster = data.groupby(clustering.cluster).mean() +max_per_cluster = data.groupby(clustering.cluster).max() + +# ═══════════════════════════════════════════════════════════════ +# Select specific within-cluster timestep +# ═══════════════════════════════════════════════════════════════ +# Get all peak hours (e.g., hour 18) from each cluster +peak_mask = clustering.within_cluster_time == 18 +peak_values = data.where(peak_mask, drop=True) ``` ### B.2 Faceted Storage Plot ```python -# Plot storage with cluster facets +# Plot storage with each cluster as separate subplot fs.statistics.plot.storage_by_cluster('Battery') -# Plot cluster summary -fs.statistics.plot.cluster_summary('HeatDemand|Q_th', statistic='max') +# Heatmap: clusters on y-axis, within-cluster time on x-axis +fs.statistics.plot.cluster_heatmap('HeatDemand|Q_th') + +# Inter-cluster SOC trajectory +fs.statistics.plot.intercluster_soc('Battery') ``` -### B.3 Variable Boundaries +### B.3 Check Clustering Status ```python -# Check if boundaries vary -if clustering.boundaries_vary: - for period in fs.periods: - slices = clustering.cluster_slices(period=period) - # Process per-period +if flow_system.is_clustered: + clustering = flow_system.clustering + print(f"Clustered: {clustering.n_clusters} clusters × {clustering.timesteps_per_cluster} timesteps") + + # Access indexers + print(f"Cluster starts: {clustering.cluster_start.values}") + print(f"Cluster ends: {clustering.cluster_end.values}") else: - slices = clustering.cluster_slices() - # Single set of slices + print("Not clustered - full time resolution") ``` From c048496cc6a66f1f04e955c8eec2233f2af365e1 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 21:40:32 +0100 Subject: [PATCH 132/191] Update plan --- docs/design/cluster_architecture.md | 64 ++++++++++++++++++++++------- 1 file changed, 50 insertions(+), 14 deletions(-) diff --git a/docs/design/cluster_architecture.md b/docs/design/cluster_architecture.md index 7e0725efc..5738487b4 100644 --- a/docs/design/cluster_architecture.md +++ b/docs/design/cluster_architecture.md @@ -664,18 +664,46 @@ def expand_solution(): expanded = result.expand_data(var_data) ``` -### Phase 4: Cluster Plotting +### Phase 4: Cluster-Aware Plotting (Minimal Code) -**Goal:** Individual cluster visualization. +**Goal:** Leverage existing plot infrastructure - no new methods needed! + +**Key Insight:** Add `cluster` as a coordinate to solution data, then existing faceting works: + +```python +# EXISTING API - no new methods needed! +fs.statistics.plot.storage('Battery', facet_col='cluster') # faceted by cluster +fs.statistics.plot.balance('Heat', facet_col='cluster') # works automatically +fs.statistics.plot.flows(..., facet_col='cluster') # same pattern +``` **Tasks:** -1. Add `storage_by_cluster()` - faceted view of each cluster -2. Add `cluster_heatmap()` - clusters on y-axis, within-cluster time on x-axis -3. Add cluster separator lines to existing time series plots -4. Add `intercluster_soc()` for SOC_boundary visualization +1. Add `cluster` coordinate to solution Dataset when clustered +2. Auto-add cluster separator lines in time series plots (if clustered) +3. Ensure `facet_col='cluster'` works with existing plot methods + +**Implementation:** +```python +# In expand_solution() or statistics accessor: +if flow_system.is_clustered: + # Add cluster coordinate to all time-dimensioned variables + solution = solution.assign_coords(cluster=('time', clustering.cluster.values)) + +# In plot methods (minimal change): +def _create_base_plot(self, data, **kwargs): + fig = ... # existing logic + + # Auto-add cluster separators if clustered + if self._fs.is_clustered and 'time' in data.dims: + for idx in self._fs.clustering.cluster_start.values[1:]: + fig.add_vline(x=idx, line_dash='dot', opacity=0.3) + + return fig +``` **Files:** -- `flixopt/statistics_accessor.py` - Add plot methods +- `flixopt/transform_accessor.py` - Add cluster coord in expand_solution() +- `flixopt/statistics_accessor.py` - Add separator lines (small change to base plot) --- @@ -783,17 +811,25 @@ peak_mask = clustering.within_cluster_time == 18 peak_values = data.where(peak_mask, drop=True) ``` -### B.2 Faceted Storage Plot +### B.2 Cluster Plotting (Uses Existing API!) ```python -# Plot storage with each cluster as separate subplot -fs.statistics.plot.storage_by_cluster('Battery') +# ═══════════════════════════════════════════════════════════════ +# Facet by cluster - uses existing facet_col parameter +# ═══════════════════════════════════════════════════════════════ +fs.statistics.plot.storage('Battery', facet_col='cluster') +fs.statistics.plot.balance('Heat', facet_col='cluster') +fs.statistics.plot.flows(..., facet_col='cluster') -# Heatmap: clusters on y-axis, within-cluster time on x-axis -fs.statistics.plot.cluster_heatmap('HeatDemand|Q_th') +# ═══════════════════════════════════════════════════════════════ +# Regular plots auto-add cluster separator lines when clustered +# ═══════════════════════════════════════════════════════════════ +fs.statistics.plot.storage('Battery') # separators added automatically -# Inter-cluster SOC trajectory -fs.statistics.plot.intercluster_soc('Battery') +# ═══════════════════════════════════════════════════════════════ +# Combine with other facets +# ═══════════════════════════════════════════════════════════════ +fs.statistics.plot.balance('Heat', facet_col='cluster', facet_row='scenario') ``` ### B.3 Check Clustering Status From d12651157cdc8b0cacbbc04ff00f08fb776ad5c9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 21:46:26 +0100 Subject: [PATCH 133/191] Update plan --- docs/design/cluster_architecture.md | 115 +++++++++++++++++++++++----- 1 file changed, 95 insertions(+), 20 deletions(-) diff --git a/docs/design/cluster_architecture.md b/docs/design/cluster_architecture.md index 5738487b4..9b696b6bb 100644 --- a/docs/design/cluster_architecture.md +++ b/docs/design/cluster_architecture.md @@ -238,32 +238,65 @@ Given the requirements for: ## Part 3: Impact on Features -### 3.1 StatusModel Impact +### 3.1 StatusModel Impact - CRITICAL ISSUE -**Current Code (features.py:200-211):** -```python -# Active hours tracking -tracked_expression=(self.status * self._model.aggregation_weight).sum('time') +**Problem:** StatusModel has temporal constraints that span timesteps: + +| Constraint | Current Behavior | Problem with Clustering | +|------------|------------------|------------------------| +| `min_uptime=4` | Must stay on 4 consecutive hours | Spans cluster boundaries incorrectly | +| `min_downtime=2` | Must stay off 2 consecutive hours | Same issue | +| `initial_status` | Status before t=0 | Undefined at each cluster start | +| `effects_per_startup` | Cost per on→off transition | Counted per cluster, not per original period | + +**Example of the bug:** ``` +Cluster 0: [t=0...t=95] - component turns ON at t=90 +Cluster 1: [t=96...t=191] - different typical day! -**With Enhanced Helpers:** -No changes needed - `aggregation_weight` already handles clustering correctly. +With min_uptime=8: +- Current: Constraint forces component to stay on t=90→t=97 (spans into cluster 1) +- Reality: Cluster 1 is a DIFFERENT day, constraint makes no sense +``` + +**Options for StatusModel with Clustering:** -**Potential Enhancement:** -Could add per-cluster status summaries for visualization: +| Mode | Description | Use Case | +|------|-------------|----------| +| **independent** | Each cluster has independent status constraints | Most common - typical days are independent | +| **cyclic** | Status at cluster end = status at cluster start | Repeating patterns | +| **ignore_temporal** | Disable min_uptime/downtime when clustered | Simple approximation | + +**Recommended Approach:** ```python -@property -def status_per_cluster(self) -> xr.DataArray: - """Active hours per cluster.""" - clustering = self.flow_system.clustering - if clustering is None: - return None - # Use helpers to compute per-cluster active time - return clustering.aggregate_per_cluster( - self.status * self._model.timestep_duration - ) +class StatusParameters: + # Existing + min_uptime: float | None = None + min_downtime: float | None = None + initial_status: bool | None = None + + # NEW: How to handle with clustering + cluster_mode: Literal['independent', 'cyclic', 'ignore_temporal'] = 'independent' +``` + +**Implementation for `cluster_mode='independent'`:** +```python +# In StatusModel, when adding consecutive duration constraints: +if clustering is not None and self.cluster_mode == 'independent': + # Mask out constraints at cluster boundaries + # Each cluster is treated independently + for constraint in [uptime_constraint, downtime_constraint]: + # Don't enforce across cluster boundaries + mask = np.ones(n_timesteps, dtype=bool) + mask[clustering.cluster_start.values[1:]] = False # Break at cluster starts + constraint = constraint.where(mask) ``` +**What works correctly already:** +- `active_hours` tracking uses `aggregation_weight` → ✅ correct +- `effects_per_active_hour` uses `timestep_duration` → ✅ correct +- Total startup count (if properly weighted) → needs `cluster_weight` + ### 3.2 StorageModel Impact **Current Code (components.py):** @@ -298,6 +331,20 @@ No changes needed - `cluster_weight` structure preserved. **With Enhanced Helpers:** No changes needed - operates on flat time dimension. +### 3.5 Summary: Models Requiring Cluster-Awareness + +| Model | Has Cross-Timestep Constraints | Clustering Impact | Action Needed | +|-------|-------------------------------|-------------------|---------------| +| **StorageModel** | charge_state[t] depends on charge_state[t-1] | ✅ Already handled | InterclusterStorageModel exists | +| **StatusModel** | min_uptime, min_downtime, initial_status | ❌ **BUG** | Add cluster_mode parameter | +| **consecutive_duration_tracking** | State machine for uptime/downtime | ❌ **BUG** | Break at cluster boundaries | +| **state_transition_bounds** | activate[t] depends on status[t-1] | ⚠️ Partial | May span boundaries incorrectly | +| **PiecewiseModel** | Per-timestep only | ✅ OK | No changes needed | +| **ShareAllocationModel** | Uses cluster_weight | ✅ OK | No changes needed | +| **InvestmentModel** | No time dimension | ✅ OK | No changes needed | + +**Key Insight:** Any constraint of the form `x[t] - x[t-1]` or `x[t:t+n]` needs cluster boundary handling. + --- ## Part 4: Plotting Improvements @@ -664,7 +711,35 @@ def expand_solution(): expanded = result.expand_data(var_data) ``` -### Phase 4: Cluster-Aware Plotting (Minimal Code) +### Phase 4: Fix StatusModel & Temporal Constraints + +**Goal:** Handle cross-timestep constraints correctly with clustering. + +**Tasks:** +1. Add `cluster_mode` parameter to `StatusParameters` (default: `'independent'`) +2. Update `consecutive_duration_tracking` to break at cluster boundaries +3. Update `state_transition_bounds` to handle cluster boundaries +4. Add warning/error if incompatible constraints used with clustering + +**Implementation:** +```python +# In modeling.py consecutive_duration_tracking: +def consecutive_duration_tracking(..., clustering=None): + ... + if clustering is not None: + # Don't track duration across cluster boundaries + # Reset tracking at start of each cluster + reset_mask = np.zeros(n_timesteps, dtype=bool) + reset_mask[clustering.cluster_start.values] = True + # Modify constraints to reset at cluster starts +``` + +**Files:** +- `flixopt/interface.py` - Add `cluster_mode` to `StatusParameters` +- `flixopt/modeling.py` - Update `consecutive_duration_tracking`, `state_transition_bounds` +- `flixopt/features.py` - Update `StatusModel` to use cluster_mode + +### Phase 5: Cluster-Aware Plotting (Minimal Code) **Goal:** Leverage existing plot infrastructure - no new methods needed! From 8f9bae85dba0f6c7091368bb7246be93d0d29168 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 22:02:24 +0100 Subject: [PATCH 134/191] Update plan --- docs/design/cluster_architecture.md | 1057 +++++++++++++-------------- 1 file changed, 501 insertions(+), 556 deletions(-) diff --git a/docs/design/cluster_architecture.md b/docs/design/cluster_architecture.md index 9b696b6bb..ae124f99b 100644 --- a/docs/design/cluster_architecture.md +++ b/docs/design/cluster_architecture.md @@ -2,11 +2,31 @@ ## Executive Summary -This document explores architectural options for improving cluster representation in flixopt, addressing: -1. Enhanced cluster helpers for the current flat time structure -2. Impact on StatusModel and other Features -3. Improved UX for cluster visualization and plotting -4. Future support for variable segmentation per cluster/period/scenario +This document defines the architecture for cluster representation in flixopt using **true `(cluster, time)` dimensions**. + +### Key Decision: True Dimensions (Option B) + +**Chosen Approach:** +```python +# Clustered data structure: +data.dims = ('cluster', 'time', 'period', 'scenario') +data.shape = (9, 24, ...) # 9 clusters × 24 timesteps each +``` + +**Why True Dimensions?** +1. **Temporal constraints just work** - `x[:, 1:] - x[:, :-1]` naturally stays within clusters +2. **No boundary masking** - StorageModel, StatusModel constraints are clean and vectorized +3. **Variable segment durations supported** - `timestep_duration[cluster, time]` handles different segment lengths +4. **Plotting trivial** - existing `facet_col='cluster'` works automatically + +### Document Scope + +1. Current architecture analysis (Part 1) +2. Architectural options and recommendation (Part 2) +3. Impact on Features - StatusModel, StorageModel, etc. (Part 3) +4. Plotting improvements (Part 4) +5. Variable segment durations (Part 5) +6. Implementation roadmap (Part 6) --- @@ -57,263 +77,188 @@ time: (n_clusters × timesteps_per_cluster,) # Flat, e.g., (864,) for 9 cluster ## Part 2: Architectural Options -### 2.1 Option A: Enhanced Flat with xarray-based Indexers (Recommended) - -Keep flat `time` dimension but add **xarray-based indexer properties** that work seamlessly with `.isel()`: - -```python -class Clustering: - # ═══════════════════════════════════════════════════════════════ - # CORE INDEXER PROPERTIES (xarray DataArrays) - # ═══════════════════════════════════════════════════════════════ - - @property - def cluster_start(self) -> xr.DataArray: - """Time indices of cluster starts. +### 2.1 Option A: Enhanced Flat with Indexers - Shape: (cluster,) - Values: [0, 96, 192, ...] for 96 timesteps per cluster +Keep flat `time` dimension, add xarray indexer properties. - Usage: - # Select start of each cluster (broadcasts across period/scenario) - data.isel(time=clustering.cluster_start) +**Pros:** Supports variable-length clusters +**Cons:** Every temporal constraint needs explicit boundary masking - # Shift by 1 for "second timestep of each cluster" - data.isel(time=clustering.cluster_start + 1) - """ +**NOT RECOMMENDED** - see Option B. - @property - def cluster_end(self) -> xr.DataArray: - """Time indices of cluster ends (last timestep, inclusive). +### 2.2 Option B: True (cluster, time) Dimensions (RECOMMENDED) - Shape: (cluster,) - Values: [95, 191, 287, ...] for 96 timesteps per cluster - - Usage: - # Select end of each cluster - data.isel(time=clustering.cluster_end) - - # Compute delta (end - start) for each cluster - delta = data.isel(time=clustering.cluster_end) - data.isel(time=clustering.cluster_start) - """ - - @property - def within_cluster_time(self) -> xr.DataArray: - """Within-cluster time index for each timestep. - - Shape: (time,) - Values: [0, 1, 2, ..., 95, 0, 1, 2, ..., 95, ...] # repeating pattern - - Usage: - # Select all timesteps at position 12 within their cluster - mask = clustering.within_cluster_time == 12 - data.where(mask, drop=True) - """ - - @property - def cluster(self) -> xr.DataArray: - """Cluster ID for each timestep. - - Shape: (time,) - Values: [0, 0, ..., 0, 1, 1, ..., 1, ...] # cluster assignment - - Usage: - # Group by cluster - data.groupby(clustering.cluster).mean() - """ +Reshape time to 2D when clustering is active: - # ═══════════════════════════════════════════════════════════════ - # CONVENIENCE PROPERTIES - # ═══════════════════════════════════════════════════════════════ +```python +# ═══════════════════════════════════════════════════════════════ +# DIMENSION STRUCTURE +# ═══════════════════════════════════════════════════════════════ - @property - def n_clusters(self) -> int: - """Number of clusters.""" +# Non-clustered: +data.dims = ('time', 'period', 'scenario') +data.shape = (8760, ...) # Full year hourly - @property - def timesteps_per_cluster(self) -> int: - """Timesteps in each cluster (uniform).""" +# Clustered: +data.dims = ('cluster', 'time', 'period', 'scenario') +data.shape = (9, 24, ...) # 9 clusters × 24 timesteps each - @property - def cluster_coords(self) -> xr.DataArray: - """Cluster coordinate values: [0, 1, 2, ..., n_clusters-1]""" +# Varying segment durations supported: +timestep_duration.dims = ('cluster', 'time') +timestep_duration.shape = (9, 24) # Different durations per segment per cluster ``` -**Key Design Principle: Indexers are xarray DataArrays** - -This enables powerful, dimension-preserving operations: +**Key Benefits - Temporal Constraints Just Work!** ```python # ═══════════════════════════════════════════════════════════════ -# EXAMPLE: Select start of each cluster (works across all dims!) +# STORAGE: Charge balance naturally within clusters # ═══════════════════════════════════════════════════════════════ -charge_state = ... # shape: (time, period, scenario) e.g., (864, 2, 3) +# charge_state shape: (cluster, time+1, period, scenario) - extra timestep for boundaries +charge_state = ... # (9, 25, ...) -# Get cluster starts - returns shape (cluster, period, scenario) -cs_at_starts = charge_state.isel(time=clustering.cluster_start) -# Result shape: (9, 2, 3) for 9 clusters +# Balance constraint - NO MASKING NEEDED! +lhs = charge_state[:, 1:] - charge_state[:, :-1] * (1 - loss) - charge + discharge +# Shape: (cluster, time, period, scenario) = (9, 24, ...) -# ═══════════════════════════════════════════════════════════════ -# EXAMPLE: Compute delta per cluster -# ═══════════════════════════════════════════════════════════════ -delta = ( - charge_state.isel(time=clustering.cluster_end) - - charge_state.isel(time=clustering.cluster_start) -) -# Result shape: (cluster, period, scenario) = (9, 2, 3) +# Delta per cluster (for inter-cluster linking): +delta_soc = charge_state[:, -1] - charge_state[:, 0] # Shape: (cluster, ...) # ═══════════════════════════════════════════════════════════════ -# EXAMPLE: Shift indexer for charge_state (has extra timestep!) +# STATUS: Uptime/downtime constraints stay within clusters # ═══════════════════════════════════════════════════════════════ -# charge_state has shape (time+1,) due to extra boundary timestep -# Need to shift indices by cluster position -cs_at_ends = charge_state.isel(time=clustering.cluster_end + 1) # +1 for boundary - -# ═══════════════════════════════════════════════════════════════ -# EXAMPLE: Select specific within-cluster position -# ═══════════════════════════════════════════════════════════════ -# Get all values at hour 12 within each cluster -hour_12_mask = clustering.within_cluster_time == 12 -peak_values = data.where(hour_12_mask, drop=True) -``` - -**Pros:** -- Pure xarray - no numpy/dict gymnastics -- Dimension-preserving: indexers broadcast across period/scenario automatically -- Easy adjustments: `cluster_start + 1`, `cluster_end - 1` -- Works with linopy variables directly -- Clean, intuitive API +status = ... # (cluster, time, ...) -**Cons:** -- tsam uniform segments only (sufficient per user requirement) +# State transitions - naturally per cluster! +activate = status[:, 1:] - status[:, :-1] # No boundary issues! -### 2.2 Option B: True (cluster, time) Dimensions +# min_uptime constraint - works correctly, can't span clusters -Reshape time to 2D when clustering is active: +# ═══════════════════════════════════════════════════════════════ +# INTER-CLUSTER OPERATIONS +# ═══════════════════════════════════════════════════════════════ +# Select first/last timestep of each cluster: +at_start = data.isel(time=0) # Shape: (cluster, period, scenario) +at_end = data.isel(time=-1) # Shape: (cluster, period, scenario) -```python -# Clustered mode -data.dims = ('cluster', 'time', 'period', 'scenario') -data.shape = (9, 96, ...) # 9 clusters × 96 timesteps each +# Compute per-cluster statistics: +mean_per_cluster = data.mean(dim='time') +max_per_cluster = data.max(dim='time') ``` -**Pros:** -- Clean, intuitive structure -- Natural indexing: `data[:, -1] - data[:, 0]` for delta -- No boundary masking needed - -**Cons:** -- Requires uniform cluster lengths -- Different boundaries per period/scenario very complex -- Major refactoring across codebase - -### 2.3 Option C: Padded Rectangular with Masks - -Use `(cluster, max_time)` with NaN padding for shorter clusters: +**Varying Segment Durations (Future Segmentation):** ```python -data.shape = (9, 96, ...) # Pad shorter clusters -valid_mask.shape = (9, 96) # True where data is valid +# Same NUMBER of segments per cluster, different DURATIONS: +timestep_duration = xr.DataArray( + [ + [2, 2, 1, 1, 2, 4], # Cluster 0: segments sum to 12h + [1, 3, 2, 2, 2, 2], # Cluster 1: segments sum to 12h + ... + ], + dims=['cluster', 'time'], + coords={'cluster': range(9), 'time': range(6)} +) + +# aggregation_weight still works: +aggregation_weight = timestep_duration * cluster_weight # (cluster, time) * (cluster,) ``` **Pros:** -- Clean cluster dimension -- Supports variable lengths +- Temporal constraints naturally stay within clusters - NO MASKING! +- StatusModel uptime/downtime just works +- Storage balance is clean +- Much less code, fewer bugs +- Supports varying segment durations (same count, different lengths) **Cons:** -- Wasted memory/computation -- Complex masking in all operations -- linopy constraints need `.where(mask)` +- More upfront refactoring +- All code paths need to handle `(cluster, time)` vs `(time,)` based on `is_clustered` -### 2.4 Recommendation: Option A (Enhanced Flat) +### 2.3 Recommendation: Option B (True Dimensions) -Given the requirements for: -- Variable-length clusters (future segmentation) -- Different boundaries per period/scenario -- Minimal breaking changes +Given: +- Uniform timestep COUNT per cluster (tsam default) +- Variable segment DURATIONS supported via `timestep_duration[cluster, time]` +- Much cleaner constraint handling -**Option A is the most practical choice.** +**Option B is the recommended choice.** --- ## Part 3: Impact on Features -### 3.1 StatusModel Impact - CRITICAL ISSUE +### 3.1 StatusModel Impact - SOLVED BY TRUE DIMENSIONS -**Problem:** StatusModel has temporal constraints that span timesteps: +**With `(cluster, time)` dimensions, temporal constraints naturally stay within clusters!** -| Constraint | Current Behavior | Problem with Clustering | -|------------|------------------|------------------------| -| `min_uptime=4` | Must stay on 4 consecutive hours | Spans cluster boundaries incorrectly | -| `min_downtime=2` | Must stay off 2 consecutive hours | Same issue | -| `initial_status` | Status before t=0 | Undefined at each cluster start | -| `effects_per_startup` | Cost per on→off transition | Counted per cluster, not per original period | +```python +status.dims = ('cluster', 'time', 'period', 'scenario') +status.shape = (9, 24, ...) -**Example of the bug:** -``` -Cluster 0: [t=0...t=95] - component turns ON at t=90 -Cluster 1: [t=96...t=191] - different typical day! +# State transitions - per cluster, no boundary issues! +activate = status[:, 1:] - status[:, :-1] -With min_uptime=8: -- Current: Constraint forces component to stay on t=90→t=97 (spans into cluster 1) -- Reality: Cluster 1 is a DIFFERENT day, constraint makes no sense +# min_uptime constraint operates within each cluster's time dimension +# Cannot accidentally span cluster boundaries ``` -**Options for StatusModel with Clustering:** +**What works automatically:** +- ✅ `min_uptime`, `min_downtime` - constraints stay within clusters +- ✅ `initial_status` - applies to each cluster's first timestep +- ✅ State transitions - naturally per cluster +- ✅ `active_hours` - uses `aggregation_weight` correctly +- ✅ `effects_per_startup` - counted per cluster, weighted by `cluster_weight` -| Mode | Description | Use Case | -|------|-------------|----------| -| **independent** | Each cluster has independent status constraints | Most common - typical days are independent | -| **cyclic** | Status at cluster end = status at cluster start | Repeating patterns | -| **ignore_temporal** | Disable min_uptime/downtime when clustered | Simple approximation | - -**Recommended Approach:** +**Optional Enhancement - cluster_mode for special cases:** ```python class StatusParameters: - # Existing - min_uptime: float | None = None - min_downtime: float | None = None - initial_status: bool | None = None - - # NEW: How to handle with clustering - cluster_mode: Literal['independent', 'cyclic', 'ignore_temporal'] = 'independent' -``` - -**Implementation for `cluster_mode='independent'`:** -```python -# In StatusModel, when adding consecutive duration constraints: -if clustering is not None and self.cluster_mode == 'independent': - # Mask out constraints at cluster boundaries - # Each cluster is treated independently - for constraint in [uptime_constraint, downtime_constraint]: - # Don't enforce across cluster boundaries - mask = np.ones(n_timesteps, dtype=bool) - mask[clustering.cluster_start.values[1:]] = False # Break at cluster starts - constraint = constraint.where(mask) + # NEW: How to handle cluster boundaries (default: independent) + cluster_mode: Literal['independent', 'cyclic'] = 'independent' ``` -**What works correctly already:** -- `active_hours` tracking uses `aggregation_weight` → ✅ correct -- `effects_per_active_hour` uses `timestep_duration` → ✅ correct -- Total startup count (if properly weighted) → needs `cluster_weight` +| Mode | Behavior | +|------|----------| +| `independent` | Each cluster starts fresh (default, most common) | +| `cyclic` | `status[:, 0] == status[:, -1]` - status returns to start | -### 3.2 StorageModel Impact +### 3.2 StorageModel Impact - SIMPLIFIED -**Current Code (components.py):** -- Uses `cluster_start_positions` for boundary masking -- InterclusterStorageModel has complex index calculations +**With `(cluster, time)` dimensions, storage constraints become trivial:** -**With Enhanced Helpers:** ```python -# Before: Manual index calculation -start_positions = clustering.cluster_start_positions -end_positions = start_positions[1:] - 1 +charge_state.dims = ('cluster', 'time_extra', 'period', 'scenario') +charge_state.shape = (9, 25, ...) # 24 timesteps + 1 boundary per cluster + +# ═══════════════════════════════════════════════════════════════ +# Charge balance - NO MASKING! +# ═══════════════════════════════════════════════════════════════ +lhs = ( + charge_state[:, 1:] - + charge_state[:, :-1] * (1 - loss_rate) - + charge * eta_charge + + discharge / eta_discharge +) +self.add_constraints(lhs == 0, name='charge_balance') # Clean! + +# ═══════════════════════════════════════════════════════════════ +# Delta SOC per cluster (for inter-cluster linking) +# ═══════════════════════════════════════════════════════════════ +delta_soc = charge_state[:, -1] - charge_state[:, 0] # Shape: (cluster, ...) + +# ═══════════════════════════════════════════════════════════════ +# Cluster start constraint (relative SOC starts at 0) +# ═══════════════════════════════════════════════════════════════ +self.add_constraints(charge_state[:, 0] == 0, name='cluster_start') -# After: Clean helper usage -clustering = self.flow_system.clustering -delta_soc = clustering.compute_delta_per_cluster(self.charge_state) +# ═══════════════════════════════════════════════════════════════ +# Cyclic constraint (optional) +# ═══════════════════════════════════════════════════════════════ +self.add_constraints(charge_state[:, 0] == charge_state[:, -1], name='cyclic') ``` +**InterclusterStorageModel also simplified** - SOC_boundary linking uses clean slicing. + ### 3.3 ShareAllocationModel Impact **Current Code (features.py:624):** @@ -331,166 +276,119 @@ No changes needed - `cluster_weight` structure preserved. **With Enhanced Helpers:** No changes needed - operates on flat time dimension. -### 3.5 Summary: Models Requiring Cluster-Awareness +### 3.5 Summary: Models with True (cluster, time) Dimensions -| Model | Has Cross-Timestep Constraints | Clustering Impact | Action Needed | -|-------|-------------------------------|-------------------|---------------| -| **StorageModel** | charge_state[t] depends on charge_state[t-1] | ✅ Already handled | InterclusterStorageModel exists | -| **StatusModel** | min_uptime, min_downtime, initial_status | ❌ **BUG** | Add cluster_mode parameter | -| **consecutive_duration_tracking** | State machine for uptime/downtime | ❌ **BUG** | Break at cluster boundaries | -| **state_transition_bounds** | activate[t] depends on status[t-1] | ⚠️ Partial | May span boundaries incorrectly | -| **PiecewiseModel** | Per-timestep only | ✅ OK | No changes needed | -| **ShareAllocationModel** | Uses cluster_weight | ✅ OK | No changes needed | -| **InvestmentModel** | No time dimension | ✅ OK | No changes needed | +| Model | Cross-Timestep Constraints | With True Dims | Action Needed | +|-------|---------------------------|----------------|---------------| +| **StorageModel** | `cs[t] - cs[t-1]` | ✅ Just works | Simplify code | +| **StatusModel** | min_uptime, min_downtime | ✅ Just works | Optional cluster_mode | +| **consecutive_duration_tracking** | State machine | ✅ Just works | No changes | +| **state_transition_bounds** | `activate[t] - status[t-1]` | ✅ Just works | No changes | +| **PiecewiseModel** | Per-timestep only | ✅ Just works | No changes | +| **ShareAllocationModel** | Sum with cluster_weight | ✅ Just works | No changes | +| **InvestmentModel** | No time dimension | ✅ Just works | No changes | -**Key Insight:** Any constraint of the form `x[t] - x[t-1]` or `x[t:t+n]` needs cluster boundary handling. +**Key Insight:** With true `(cluster, time)` dimensions, `x[:, 1:] - x[:, :-1]` naturally stays within clusters! --- ## Part 4: Plotting Improvements -### 4.1 Current UX Issues +### 4.1 Key Benefit of True Dimensions: Minimal Plotting Changes -1. **No visual cluster separation**: Time series plots show continuous lines -2. **Cluster identity hidden**: Hard to see which timesteps belong to which cluster -3. **SOC continuity misleading**: Storage plots suggest continuous operation +With true `(cluster, time)` dimensions, plotting becomes trivial because: +1. Data already has the right shape - no reshaping needed +2. Existing `facet_col='cluster'` parameter just works +3. Only minimal changes needed: auto-add cluster separators in combined views -### 4.2 Proposed Improvements +### 4.2 Proposed Approach: Leverage Existing Infrastructure -#### 4.2.1 Cluster-Separated Time Series +#### 4.2.1 Use Existing facet_col Parameter -Add visual separators between clusters: +**No new plot methods needed!** The existing infrastructure handles `cluster` dimension: ```python -def plot_with_cluster_separation(self, data, **kwargs): - """Plot time series with vertical lines between clusters.""" - fig = self._create_base_plot(data, **kwargs) - - if self._fs.is_clustered: - for start_idx in self._fs.clustering.cluster_start_indices()[1:]: - fig.add_vline(x=data.time[start_idx], line_dash='dash', opacity=0.3) +# ═══════════════════════════════════════════════════════════════ +# EXISTING API - works automatically with (cluster, time) dims! +# ═══════════════════════════════════════════════════════════════ +fs.statistics.plot.storage('Battery', facet_col='cluster') # One subplot per cluster +fs.statistics.plot.balance('Heat', facet_col='cluster') # One subplot per cluster +fs.statistics.plot.flows(..., facet_col='cluster') # Same pattern - return fig +# Combine with other dimensions +fs.statistics.plot.balance('Heat', facet_col='cluster', facet_row='scenario') ``` -#### 4.2.2 Faceted Cluster View +#### 4.2.2 Auto-Add Cluster Separators (Small Change) -Display each cluster as a separate subplot: +For combined views (no faceting), add visual separators: ```python -def storage_by_cluster(self, storage_label, **kwargs): - """Plot storage operation with one subplot per cluster.""" - data = self._get_storage_data(storage_label) - - if not self._fs.is_clustered: - return self.storage(storage_label, **kwargs) - - # Reshape to (cluster, within_cluster_time) - clustering = self._fs.clustering - facet_data = [] - for cluster_id, cluster_slice in clustering.cluster_slices().items(): - cluster_data = data.isel(time=cluster_slice) - cluster_data = cluster_data.assign_coords( - cluster=cluster_id, - within_time=range(len(cluster_slice)) - ) - facet_data.append(cluster_data) - - combined = xr.concat(facet_data, dim='cluster') - return self._plot_faceted(combined, facet_col='cluster', **kwargs) -``` - -#### 4.2.3 Cluster Summary Statistics +def _create_base_plot(self, data, **kwargs): + """Base plot creation - add cluster separators if combined view.""" + fig = ... # existing logic -Add aggregate views per cluster: + # Auto-add cluster separators if clustered and showing combined time + if self._fs.is_clustered and 'cluster' not in kwargs.get('facet_col', ''): + # Add subtle vertical lines between clusters + for cluster_idx in range(1, self._fs.clustering.n_clusters): + x_pos = cluster_idx * self._fs.clustering.timesteps_per_cluster + fig.add_vline(x=x_pos, line_dash='dot', opacity=0.3, line_color='gray') -```python -def cluster_summary(self, variable, statistic='mean'): - """Show per-cluster statistics as bar chart.""" - data = self._get_variable(variable) - clustering = self._fs.clustering - - summaries = [] - for cluster_id, cluster_slice in clustering.cluster_slices().items(): - cluster_data = data.isel(time=cluster_slice) - if statistic == 'mean': - val = cluster_data.mean('time') - elif statistic == 'max': - val = cluster_data.max('time') - elif statistic == 'min': - val = cluster_data.min('time') - summaries.append(val.assign_coords(cluster=cluster_id)) - - return self._plot_bar(xr.concat(summaries, dim='cluster')) + return fig ``` -#### 4.2.4 Inter-Cluster SOC Visualization +#### 4.2.3 Per-Cluster Statistics (Natural with True Dims) -Show SOC_boundary values for intercluster storage: +With `(cluster, time)` dimensions, aggregation is trivial: ```python -def intercluster_soc(self, storage_label): - """Plot SOC boundaries across original timeline.""" - storage = self._get_component(storage_label) - if not hasattr(storage.submodel, 'SOC_boundary'): - raise ValueError("Storage not in intercluster mode") - - soc_boundary = storage.submodel.SOC_boundary.solution - cluster_order = self._fs.clustering.cluster_order - - # Plot SOC at each original period boundary - fig = go.Figure() - fig.add_trace(go.Scatter( - x=range(len(soc_boundary)), - y=soc_boundary.values, - mode='lines+markers', - name='SOC Boundary' - )) - fig.update_layout( - xaxis_title='Original Period', - yaxis_title='State of Charge', - title=f'{storage_label} Inter-Cluster SOC' - ) - return PlotResult(fig) +# Mean per cluster - just use xarray +mean_per_cluster = data.mean(dim='time') # Shape: (cluster, ...) +max_per_cluster = data.max(dim='time') + +# Can plot directly +fs.statistics.plot.bar(data.mean('time'), x='cluster', title='Mean by Cluster') ``` -### 4.3 Heatmap Enhancements +#### 4.2.4 Heatmap (Already Correct Shape) -Current heatmap reshapes time to (days, hours). For clustered data: +With true dimensions, heatmaps work directly: ```python +# Data already has (cluster, time) shape - heatmap just works! def cluster_heatmap(self, variable): - """Heatmap with clusters on y-axis, within-cluster time on x-axis.""" data = self._get_variable(variable) - clustering = self._fs.clustering - - # Reshape: (total_time,) -> (n_clusters, timesteps_per_cluster) - reshaped = data.values.reshape( - clustering.n_clusters, - clustering.timesteps_per_cluster - ) + # With (cluster, time) dims, no reshaping needed! return self._plot_heatmap( - reshaped, - x_label='Within-Cluster Time', - y_label='Cluster', + data, # Already (cluster, time, ...) + x='time', + y='cluster', colorbar_title=variable ) ``` +### 4.3 Summary: Plotting Changes Required + +| Change | Scope | Complexity | +|--------|-------|------------| +| Auto cluster separators in base plot | ~10 lines in `_create_base_plot` | Low | +| Ensure facet_col='cluster' works | Should work already | None | +| Heatmap with cluster dim | Works automatically | None | +| No new plot methods needed | - | - | + --- -## Part 5: Variable Segmentation Architecture +## Part 5: Variable Segment Durations (Future) + +### 5.1 Clarification: Variable Durations, NOT Variable Counts -### 5.1 Segmentation Types +With true `(cluster, time)` dimensions: +- **Same number** of timesteps per cluster (required for rectangular array) +- **Different durations** per timestep within each cluster (via `timestep_duration`) -| Type | Description | Complexity | -|------|-------------|------------| -| **Uniform segments** | All clusters have same structure | Current implementation | -| **Variable per cluster** | Cluster 1: 24 steps, Cluster 2: 48 steps | Medium | -| **Variable per period** | Period 1 clusters differ from Period 2 | High | -| **Variable per scenario** | Scenario A differs from Scenario B | High | -| **Full variability** | Different per (cluster, period, scenario) | Very High | +This is exactly what tsam segmentation provides and what we need. ### 5.2 TSAM Segmentation Features @@ -504,281 +402,256 @@ tsam.TimeSeriesAggregation( ``` **What TSAM provides:** -- Uniform segment count across all typical periods +- Uniform segment count across all typical periods ✅ - Various representation methods (mean, medoid, distribution) -- Segment duration = `timesteps_per_cluster / noSegments` - -**What TSAM does NOT provide:** -- Variable segment lengths within a period -- Different segment counts per cluster +- Different segment durations per cluster ✅ -### 5.3 Implementing Variable Segmentation +### 5.3 Implementation with True Dimensions -#### 5.3.1 Data Structures +With `(cluster, time)` dimensions, variable segment durations are trivial: ```python -@dataclass -class SegmentStructure: - """Structure for variable-length segments within clusters.""" - - # Shape: (cluster,) - number of segments in each cluster - n_segments_per_cluster: xr.DataArray - - # Shape: (cluster, max_segments) - duration of each segment (NaN if not used) - segment_durations: xr.DataArray - - # Shape: (cluster, max_segments) - start index within cluster - segment_start_indices: xr.DataArray - - # For period/scenario variation, add those dims to all arrays +# ═══════════════════════════════════════════════════════════════ +# DIMENSION STRUCTURE +# ═══════════════════════════════════════════════════════════════ +data.dims = ('cluster', 'time', 'period', 'scenario') +data.shape = (9, 6, ...) # 9 clusters × 6 segments each - def get_segment_slice(self, cluster, segment, period=None, scenario=None) -> slice: - """Get time slice for a specific segment.""" +# ═══════════════════════════════════════════════════════════════ +# VARIABLE SEGMENT DURATIONS - just a 2D array! +# ═══════════════════════════════════════════════════════════════ +timestep_duration = xr.DataArray( + [ + [2, 2, 4, 4, 6, 6], # Cluster 0: short-short-long pattern + [1, 1, 4, 8, 4, 6], # Cluster 1: different pattern + [3, 3, 3, 3, 6, 6], # Cluster 2: uniform start, longer end ... -``` + ], + dims=['cluster', 'time'], + coords={'cluster': range(9), 'time': range(6)} +) -#### 5.3.2 Enhanced ClusterStructure +# ═══════════════════════════════════════════════════════════════ +# AGGREGATION WEIGHT - combines duration and cluster weight +# ═══════════════════════════════════════════════════════════════ +# cluster_weight shape: (cluster,) - how many days each cluster represents +# timestep_duration shape: (cluster, time) - duration of each segment +aggregation_weight = timestep_duration * cluster_weight # Broadcasting! -```python -class ClusterStructure: - # Existing - cluster_order: xr.DataArray - cluster_occurrences: xr.DataArray - n_clusters: int - timesteps_per_cluster: int | xr.DataArray # Allow variable - - # New for segmentation - segmentation: SegmentStructure | None - - # Period/scenario awareness - _boundaries_by_slice: dict[tuple, BoundaryInfo] # (period, scenario) -> info - - @property - def has_variable_boundaries(self) -> bool: - """True if boundaries differ across periods/scenarios.""" - return len(self._boundaries_by_slice) > 1 - - def get_boundaries(self, period=None, scenario=None) -> BoundaryInfo: - """Get cluster boundaries for specific period/scenario.""" - key = (period, scenario) - if key in self._boundaries_by_slice: - return self._boundaries_by_slice[key] - return self._default_boundaries +# ═══════════════════════════════════════════════════════════════ +# ALL EXISTING CONSTRAINTS JUST WORK +# ═══════════════════════════════════════════════════════════════ +# Storage balance: uses aggregation_weight correctly +# StatusModel: active_hours weighted by aggregation_weight +# Cost calculations: weighted by aggregation_weight ``` -#### 5.3.3 Integration with FlowSystem +### 5.4 No Complex Infrastructure Needed -```python -class FlowSystem: - @property - def is_clustered(self) -> bool: - return self.clustering is not None - - @property - def has_segmentation(self) -> bool: - return self.is_clustered and self.clustering.segmentation is not None - - @property - def has_variable_cluster_lengths(self) -> bool: - """True if clusters have different numbers of timesteps.""" - if not self.is_clustered: - return False - tpc = self.clustering.timesteps_per_cluster - if isinstance(tpc, int): - return False - return len(np.unique(tpc)) > 1 -``` - -### 5.4 Constraint Generation with Variable Segments +With true dimensions, segmentation requires **no special infrastructure**: -When segment lengths vary, constraint generation must loop or use advanced indexing: +| Aspect | With True Dims | +|--------|----------------| +| Different segment durations | Just set `timestep_duration[cluster, time]` | +| Constraint generation | No changes - already works | +| Cost calculations | No changes - uses `aggregation_weight` | +| Plotting | No changes - `cluster` dim exists | -```python -def _add_charge_state_constraints(self): - clustering = self.flow_system.clustering - - if not clustering.has_variable_boundaries: - # Vectorized path - all clusters have same structure - self._add_charge_state_vectorized() - else: - # Loop path - boundaries vary - for period in self.flow_system.periods or [None]: - for scenario in self.flow_system.scenarios or [None]: - self._add_charge_state_for_slice(period, scenario) - -def _add_charge_state_for_slice(self, period, scenario): - """Add constraints for specific period/scenario slice.""" - boundaries = self.clustering.get_boundaries(period, scenario) - - for cluster_id in range(boundaries.n_clusters): - slc = boundaries.cluster_slices[cluster_id] - cs_cluster = self.charge_state.isel(time=slc) - - if period is not None: - cs_cluster = cs_cluster.sel(period=period) - if scenario is not None: - cs_cluster = cs_cluster.sel(scenario=scenario) - - # Add constraints for this cluster - self._add_balance_for_cluster(cs_cluster, cluster_id, period, scenario) -``` +**The only addition needed:** Update `transform_accessor.cluster()` to accept tsam segmentation parameters and construct the 2D `timestep_duration` array. --- -## Part 6: Implementation Roadmap (Focused) +## Part 6: Implementation Roadmap -### Phase 1: xarray-based Indexers (PRIORITY) +### Phase 1: Core Dimension Refactoring (PRIORITY) -**Goal:** Add xarray-based cluster indexer properties to `Clustering`. +**Goal:** Introduce true `(cluster, time)` dimensions throughout the codebase. **Tasks:** -1. Add `cluster_start` property → `xr.DataArray` with dims `(cluster,)` -2. Add `cluster_end` property → `xr.DataArray` with dims `(cluster,)` -3. Add `cluster` property → `xr.DataArray` with dims `(time,)` for cluster labels -4. Add `within_cluster_time` property → `xr.DataArray` with dims `(time,)` -5. Add convenience: `n_clusters`, `timesteps_per_cluster`, `cluster_coords` -6. Add `is_clustered` property to `FlowSystem` +1. Update `FlowSystem` to support `(cluster, time)` dimension structure when clustered +2. Add `is_clustered` property to `FlowSystem` +3. Update `Clustering` class with: + - `n_clusters: int` property + - `timesteps_per_cluster: int` property + - Coordinate accessors for cluster dimension +4. Update `cluster_weight` to have shape `(cluster,)` instead of `(time,)` +5. Update `timestep_duration` to have shape `(cluster, time)` when clustered +6. Update `aggregation_weight` computation to broadcast correctly **Files:** -- `flixopt/clustering/base.py` - Add indexer properties to `Clustering` -- `flixopt/flow_system.py` - Add `is_clustered` convenience property +- `flixopt/flow_system.py` - Core dimension handling +- `flixopt/clustering/base.py` - Updated Clustering class -**Example Implementation:** +**Key Changes:** ```python +# FlowSystem property updates: @property -def cluster_start(self) -> xr.DataArray: - """Time indices where each cluster starts.""" - indices = np.arange(0, self.n_clusters * self.timesteps_per_cluster, self.timesteps_per_cluster) - return xr.DataArray(indices, dims=['cluster'], coords={'cluster': np.arange(self.n_clusters)}) +def is_clustered(self) -> bool: + return self.clustering is not None @property -def cluster_end(self) -> xr.DataArray: - """Time indices where each cluster ends (inclusive).""" - return self.cluster_start + self.timesteps_per_cluster - 1 +def cluster_weight(self) -> xr.DataArray: + if not self.is_clustered: + return xr.DataArray(1.0) + # Shape: (cluster,) - one weight per cluster + return xr.DataArray( + self.clustering.cluster_occurrences, + dims=['cluster'], + coords={'cluster': range(self.clustering.n_clusters)} + ) + +@property +def timestep_duration(self) -> xr.DataArray: + if not self.is_clustered: + return self._timestep_duration # Shape: (time,) or scalar + # Shape: (cluster, time) when clustered + return self._timestep_duration # Already 2D from clustering + +@property +def aggregation_weight(self) -> xr.DataArray: + return self.timestep_duration * self.cluster_weight # Broadcasting handles shapes ``` -### Phase 2: Refactor InterclusterStorageModel +### Phase 2: Update Variable/Constraint Creation -**Goal:** Use new xarray indexers in `InterclusterStorageModel`. +**Goal:** All variables and constraints use `(cluster, time)` dimensions when clustered. **Tasks:** -1. Replace manual index calculations with `clustering.cluster_start`, `clustering.cluster_end` -2. Simplify `_compute_delta_soc()` using indexer arithmetic -3. Simplify `_add_cluster_start_constraints()` using indexers -4. Handle charge_state offset (extra timestep) cleanly +1. Update `create_variable` to use `(cluster, time, period, scenario)` dims when clustered +2. Update constraint generation in all models +3. Verify linopy handles multi-dimensional constraint arrays correctly +4. Add tests for both clustered and non-clustered paths **Files:** -- `flixopt/components.py` - Refactor `InterclusterStorageModel` +- `flixopt/core.py` - Variable creation +- `flixopt/components.py` - StorageModel, other component models +- `flixopt/features.py` - StatusModel, other feature models -**Before/After Example:** +**Key Pattern:** ```python -# BEFORE: Manual calculation -start_positions = clustering.cluster_start_positions -end_positions = start_positions[1:] - 1 -delta = charge_state.isel(time=end_indices) - charge_state.isel(time=start_indices) +# Dimension-aware variable creation: +def _get_time_dims(self) -> list[str]: + if self.flow_system.is_clustered: + return ['cluster', 'time'] + return ['time'] -# AFTER: xarray indexers -# Note: charge_state has +1 timesteps, so shift accordingly -delta = ( - self.charge_state.isel(time=clustering.cluster_end + 1) - - self.charge_state.isel(time=clustering.cluster_start) -) +def _get_time_coords(self) -> dict: + if self.flow_system.is_clustered: + return { + 'cluster': range(self.flow_system.clustering.n_clusters), + 'time': range(self.flow_system.clustering.timesteps_per_cluster) + } + return {'time': self.flow_system.time_coords} ``` -### Phase 3: expand_solution() with Offset Handling +### Phase 3: Simplify StorageModel and InterclusterStorageModel -**Goal:** Proper solution expansion for variables with different time structures. +**Goal:** Leverage true dimensions for clean constraint generation. **Tasks:** -1. Update `expand_solution()` to detect variable type (regular vs charge_state) -2. Add offset handling for intercluster charge_state expansion -3. Map SOC_boundary values to original timeline correctly -4. Test with all storage cluster_modes +1. Simplify `StorageModel.charge_balance` - no boundary masking needed +2. Simplify delta SOC calculation: `charge_state[:, -1] - charge_state[:, 0]` +3. Simplify `InterclusterStorageModel` linking constraints +4. Update `intercluster_helpers.py` utilities **Files:** -- `flixopt/transform_accessor.py` - Update `expand_solution()` -- `flixopt/clustering/base.py` - Add expansion helpers if needed +- `flixopt/components.py` - StorageModel, InterclusterStorageModel +- `flixopt/clustering/intercluster_helpers.py` - Simplified helpers -**Key Insight:** +**Before/After:** ```python -def expand_solution(): - for var_name, var_data in solution.items(): - if 'charge_state' in var_name and is_intercluster: - # Special handling: map SOC_boundary to original period boundaries - expanded = _expand_intercluster_soc(var_data) - else: - # Normal expansion using timestep_mapping - expanded = result.expand_data(var_data) +# BEFORE (flat time with masking): +start_positions = clustering.cluster_start_positions +end_positions = start_positions[1:] - 1 +mask = _build_boundary_mask(...) +balance = charge_state.isel(time=slice(1, None)).where(~mask) - ... + +# AFTER (true dimensions): +# charge_state shape: (cluster, time+1, ...) +balance = ( + charge_state[:, 1:] - + charge_state[:, :-1] * (1 - loss_rate) - + charge * eta_charge + + discharge / eta_discharge +) +# No masking needed - constraints naturally stay within clusters! ``` -### Phase 4: Fix StatusModel & Temporal Constraints +### Phase 4: Update transform_accessor.cluster() -**Goal:** Handle cross-timestep constraints correctly with clustering. +**Goal:** Produce true `(cluster, time)` shaped data. **Tasks:** -1. Add `cluster_mode` parameter to `StatusParameters` (default: `'independent'`) -2. Update `consecutive_duration_tracking` to break at cluster boundaries -3. Update `state_transition_bounds` to handle cluster boundaries -4. Add warning/error if incompatible constraints used with clustering - -**Implementation:** -```python -# In modeling.py consecutive_duration_tracking: -def consecutive_duration_tracking(..., clustering=None): - ... - if clustering is not None: - # Don't track duration across cluster boundaries - # Reset tracking at start of each cluster - reset_mask = np.zeros(n_timesteps, dtype=bool) - reset_mask[clustering.cluster_start.values] = True - # Modify constraints to reset at cluster starts -``` +1. Update `cluster()` to reshape time series to `(cluster, time)` +2. Generate proper coordinates for cluster dimension +3. Update `expand_solution()` to handle reverse transformation +4. Handle SOC_boundary expansion for inter-cluster storage **Files:** -- `flixopt/interface.py` - Add `cluster_mode` to `StatusParameters` -- `flixopt/modeling.py` - Update `consecutive_duration_tracking`, `state_transition_bounds` -- `flixopt/features.py` - Update `StatusModel` to use cluster_mode +- `flixopt/transform_accessor.py` - cluster() and expand_solution() -### Phase 5: Cluster-Aware Plotting (Minimal Code) +**Key Implementation:** +```python +def cluster(self, n_clusters, cluster_duration, ...): + """Create clustered FlowSystem with (cluster, time) dimensions.""" + ... + # Reshape all time series: (flat_time,) → (cluster, time) + for key, ts in time_series.items(): + reshaped = ts.values.reshape(n_clusters, timesteps_per_cluster) + new_ts = xr.DataArray( + reshaped, + dims=['cluster', 'time'], + coords={'cluster': range(n_clusters), 'time': range(timesteps_per_cluster)} + ) + clustered_time_series[key] = new_ts + ... -**Goal:** Leverage existing plot infrastructure - no new methods needed! +def expand_solution(self): + """Expand clustered solution back to original timeline.""" + expanded = {} + for var_name, var_data in self.solution.items(): + if 'cluster' in var_data.dims: + # Expand using cluster_order to map back to original periods + expanded[var_name] = self._expand_clustered_data(var_data) + else: + expanded[var_name] = var_data + return xr.Dataset(expanded) +``` -**Key Insight:** Add `cluster` as a coordinate to solution data, then existing faceting works: +### Phase 5: Plotting Integration -```python -# EXISTING API - no new methods needed! -fs.statistics.plot.storage('Battery', facet_col='cluster') # faceted by cluster -fs.statistics.plot.balance('Heat', facet_col='cluster') # works automatically -fs.statistics.plot.flows(..., facet_col='cluster') # same pattern -``` +**Goal:** Minimal changes - leverage existing infrastructure. **Tasks:** -1. Add `cluster` coordinate to solution Dataset when clustered -2. Auto-add cluster separator lines in time series plots (if clustered) -3. Ensure `facet_col='cluster'` works with existing plot methods +1. Ensure `facet_col='cluster'` works with existing plot methods +2. Add auto cluster separators in combined time series views +3. Test heatmaps with `(cluster, time)` data + +**Files:** +- `flixopt/statistics_accessor.py` - Minor update to base plot method **Implementation:** ```python -# In expand_solution() or statistics accessor: -if flow_system.is_clustered: - # Add cluster coordinate to all time-dimensioned variables - solution = solution.assign_coords(cluster=('time', clustering.cluster.values)) - -# In plot methods (minimal change): -def _create_base_plot(self, data, **kwargs): - fig = ... # existing logic +# In _create_base_plot or similar: +def _add_cluster_separators(self, fig): + """Add subtle separators between clusters in combined view.""" + if self._fs.is_clustered: + for cluster_idx in range(1, self._fs.clustering.n_clusters): + x_pos = cluster_idx * self._fs.clustering.timesteps_per_cluster + fig.add_vline(x=x_pos, line_dash='dot', opacity=0.3) +``` - # Auto-add cluster separators if clustered - if self._fs.is_clustered and 'time' in data.dims: - for idx in self._fs.clustering.cluster_start.values[1:]: - fig.add_vline(x=idx, line_dash='dot', opacity=0.3) +### Phase Summary - return fig -``` +| Phase | Goal | Complexity | StatusModel Fix? | +|-------|------|------------|------------------| +| 1 | Core dimension refactoring | High | N/A (prep work) | +| 2 | Variable/constraint creation | Medium | ✅ Automatic | +| 3 | StorageModel simplification | Medium | N/A | +| 4 | transform_accessor updates | Medium | N/A | +| 5 | Plotting integration | Low | N/A | -**Files:** -- `flixopt/transform_accessor.py` - Add cluster coord in expand_solution() -- `flixopt/statistics_accessor.py` - Add separator lines (small change to base plot) +**Key Insight:** With true `(cluster, time)` dimensions, StatusModel and other temporal constraints **just work** without any special handling. The dimension structure naturally prevents constraints from spanning cluster boundaries. --- @@ -848,75 +721,147 @@ def test_cluster_heatmap(): ## Appendix B: Code Examples -### B.1 Using xarray Indexers +### B.1 Working with True (cluster, time) Dimensions ```python -clustering = flow_system.clustering +# ═══════════════════════════════════════════════════════════════ +# DIMENSION STRUCTURE +# ═══════════════════════════════════════════════════════════════ +# Non-clustered: +flow_rate.dims # ('time', 'period', 'scenario') +flow_rate.shape # (8760, ...) + +# Clustered: +flow_rate.dims # ('cluster', 'time', 'period', 'scenario') +flow_rate.shape # (9, 24, ...) # 9 clusters × 24 timesteps # ═══════════════════════════════════════════════════════════════ -# Select values at cluster boundaries +# NATURAL CLUSTER BOUNDARY OPERATIONS # ═══════════════════════════════════════════════════════════════ -flow_at_starts = flow_rate.isel(time=clustering.cluster_start) -flow_at_ends = flow_rate.isel(time=clustering.cluster_end) +# First/last timestep of each cluster - just use isel! +flow_at_start = flow_rate.isel(time=0) # Shape: (cluster, period, scenario) +flow_at_end = flow_rate.isel(time=-1) # Shape: (cluster, period, scenario) + +# Delta per cluster - trivial! +delta_per_cluster = flow_rate.isel(time=-1) - flow_rate.isel(time=0) # ═══════════════════════════════════════════════════════════════ -# Compute delta per cluster (e.g., for storage charge change) +# TEMPORAL CONSTRAINTS - JUST WORK! # ═══════════════════════════════════════════════════════════════ -delta = data.isel(time=clustering.cluster_end) - data.isel(time=clustering.cluster_start) -# Result has dims: (cluster, period, scenario) if those exist +# Storage balance - naturally stays within clusters +balance = charge_state[:, 1:] - charge_state[:, :-1] # No masking needed! + +# Status transitions - naturally per cluster +activate = status[:, 1:] - status[:, :-1] # No boundary issues! # ═══════════════════════════════════════════════════════════════ -# Handle charge_state (has extra timestep at end of each cluster) +# PER-CLUSTER AGGREGATION - use xarray directly # ═══════════════════════════════════════════════════════════════ -# charge_state shape: (time + n_clusters,) due to boundary timesteps -cs_at_cluster_start = charge_state.isel(time=clustering.cluster_start) -cs_at_cluster_end = charge_state.isel(time=clustering.cluster_end + 1) # +1 for boundary +mean_per_cluster = flow_rate.mean(dim='time') # Shape: (cluster, ...) +max_per_cluster = flow_rate.max(dim='time') +total_per_cluster = (flow_rate * timestep_duration).sum(dim='time') # ═══════════════════════════════════════════════════════════════ -# Group operations by cluster +# SELECT SPECIFIC WITHIN-CLUSTER TIMESTEP # ═══════════════════════════════════════════════════════════════ -mean_per_cluster = data.groupby(clustering.cluster).mean() -max_per_cluster = data.groupby(clustering.cluster).max() +# Peak hour (hour 18) from each cluster +peak_values = flow_rate.isel(time=18) # Shape: (cluster, ...) + +# Multiple timesteps +morning_values = flow_rate.isel(time=slice(6, 12)) # Hours 6-11 from each cluster +``` +### B.2 Storage Constraints with True Dimensions + +```python # ═══════════════════════════════════════════════════════════════ -# Select specific within-cluster timestep +# charge_state has one extra timestep per cluster for boundaries # ═══════════════════════════════════════════════════════════════ -# Get all peak hours (e.g., hour 18) from each cluster -peak_mask = clustering.within_cluster_time == 18 -peak_values = data.where(peak_mask, drop=True) +# charge_state.dims = ('cluster', 'time_cs', 'period', 'scenario') +# charge_state.shape = (9, 25, ...) # 24 timesteps + 1 boundary + +# Charge balance - vectorized, no loops! +lhs = ( + charge_state[:, 1:] - # SOC at end of timestep + charge_state[:, :-1] * (1 - loss_rate) - # SOC at start, with loss + charge * eta_charge + # Charging adds energy + discharge / eta_discharge # Discharging removes energy +) +model.add_constraints(lhs == 0, name='charge_balance') + +# Delta SOC per cluster (for inter-cluster linking) +delta_soc = charge_state[:, -1] - charge_state[:, 0] # Shape: (cluster, ...) + +# Cluster start constraint (relative SOC starts at 0 within each cluster) +model.add_constraints(charge_state[:, 0] == 0, name='cluster_start') + +# Cyclic constraint (optional) +model.add_constraints( + charge_state[:, 0] == charge_state[:, -1], + name='cyclic' +) ``` -### B.2 Cluster Plotting (Uses Existing API!) +### B.3 Cluster Plotting (Uses Existing API!) ```python # ═══════════════════════════════════════════════════════════════ -# Facet by cluster - uses existing facet_col parameter +# FACET BY CLUSTER - uses existing facet_col parameter # ═══════════════════════════════════════════════════════════════ fs.statistics.plot.storage('Battery', facet_col='cluster') fs.statistics.plot.balance('Heat', facet_col='cluster') fs.statistics.plot.flows(..., facet_col='cluster') # ═══════════════════════════════════════════════════════════════ -# Regular plots auto-add cluster separator lines when clustered +# REGULAR PLOTS - auto-add cluster separators when clustered # ═══════════════════════════════════════════════════════════════ fs.statistics.plot.storage('Battery') # separators added automatically # ═══════════════════════════════════════════════════════════════ -# Combine with other facets +# COMBINE WITH OTHER FACETS # ═══════════════════════════════════════════════════════════════ fs.statistics.plot.balance('Heat', facet_col='cluster', facet_row='scenario') ``` -### B.3 Check Clustering Status +### B.4 Check Clustering Status and Access Properties ```python if flow_system.is_clustered: clustering = flow_system.clustering print(f"Clustered: {clustering.n_clusters} clusters × {clustering.timesteps_per_cluster} timesteps") - # Access indexers - print(f"Cluster starts: {clustering.cluster_start.values}") - print(f"Cluster ends: {clustering.cluster_end.values}") + # Dimension information + print(f"Data shape: (cluster={clustering.n_clusters}, time={clustering.timesteps_per_cluster})") + + # Cluster weights (how many original periods each cluster represents) + print(f"Cluster weights: {flow_system.cluster_weight.values}") + + # Aggregation weight (cluster_weight × timestep_duration) + print(f"Aggregation weight shape: {flow_system.aggregation_weight.shape}") else: print("Not clustered - full time resolution") ``` + +### B.5 Variable Segment Durations (Future) + +```python +# ═══════════════════════════════════════════════════════════════ +# timestep_duration varies per (cluster, time) +# ═══════════════════════════════════════════════════════════════ +timestep_duration = xr.DataArray( + [ + [2, 2, 4, 4, 6, 6], # Cluster 0: short-short-long pattern + [1, 1, 4, 8, 4, 6], # Cluster 1: different pattern + ... + ], + dims=['cluster', 'time'], + coords={'cluster': range(9), 'time': range(6)} +) + +# aggregation_weight combines duration and cluster importance +aggregation_weight = timestep_duration * cluster_weight +# Shape: (cluster, time) × (cluster,) → (cluster, time) via broadcasting + +# All constraints use aggregation_weight for proper weighting +total_cost = (cost_per_timestep * aggregation_weight).sum(dim=['cluster', 'time']) +``` From aa9653ba732679ba489436d953b3c0c13bba10f9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 22:06:03 +0100 Subject: [PATCH 135/191] Update plan --- docs/design/cluster_architecture.md | 110 ++-------------------------- 1 file changed, 8 insertions(+), 102 deletions(-) diff --git a/docs/design/cluster_architecture.md b/docs/design/cluster_architecture.md index ae124f99b..e90aeccff 100644 --- a/docs/design/cluster_architecture.md +++ b/docs/design/cluster_architecture.md @@ -16,8 +16,7 @@ data.shape = (9, 24, ...) # 9 clusters × 24 timesteps each **Why True Dimensions?** 1. **Temporal constraints just work** - `x[:, 1:] - x[:, :-1]` naturally stays within clusters 2. **No boundary masking** - StorageModel, StatusModel constraints are clean and vectorized -3. **Variable segment durations supported** - `timestep_duration[cluster, time]` handles different segment lengths -4. **Plotting trivial** - existing `facet_col='cluster'` works automatically +3. **Plotting trivial** - existing `facet_col='cluster'` works automatically ### Document Scope @@ -25,7 +24,7 @@ data.shape = (9, 24, ...) # 9 clusters × 24 timesteps each 2. Architectural options and recommendation (Part 2) 3. Impact on Features - StatusModel, StorageModel, etc. (Part 3) 4. Plotting improvements (Part 4) -5. Variable segment durations (Part 5) +5. Future considerations (Part 5) 6. Implementation roadmap (Part 6) --- @@ -380,84 +379,15 @@ def cluster_heatmap(self, variable): --- -## Part 5: Variable Segment Durations (Future) +## Part 5: Future Considerations -### 5.1 Clarification: Variable Durations, NOT Variable Counts +### 5.1 Variable Segment Durations (Out of Scope) -With true `(cluster, time)` dimensions: -- **Same number** of timesteps per cluster (required for rectangular array) -- **Different durations** per timestep within each cluster (via `timestep_duration`) +tsam supports intra-period segmentation with variable segment durations. This could be supported in the future via: +- Integer-based `time` index (0, 1, 2, ...) instead of timestamps +- `timestep_duration[cluster, time]` array for variable durations per segment -This is exactly what tsam segmentation provides and what we need. - -### 5.2 TSAM Segmentation Features - -TSAM supports intra-period segmentation: -```python -tsam.TimeSeriesAggregation( - segmentation=True, # Enable subdivision - noSegments=6, # Segments per typical period - segmentRepresentationMethod='meanRepresentation' -) -``` - -**What TSAM provides:** -- Uniform segment count across all typical periods ✅ -- Various representation methods (mean, medoid, distribution) -- Different segment durations per cluster ✅ - -### 5.3 Implementation with True Dimensions - -With `(cluster, time)` dimensions, variable segment durations are trivial: - -```python -# ═══════════════════════════════════════════════════════════════ -# DIMENSION STRUCTURE -# ═══════════════════════════════════════════════════════════════ -data.dims = ('cluster', 'time', 'period', 'scenario') -data.shape = (9, 6, ...) # 9 clusters × 6 segments each - -# ═══════════════════════════════════════════════════════════════ -# VARIABLE SEGMENT DURATIONS - just a 2D array! -# ═══════════════════════════════════════════════════════════════ -timestep_duration = xr.DataArray( - [ - [2, 2, 4, 4, 6, 6], # Cluster 0: short-short-long pattern - [1, 1, 4, 8, 4, 6], # Cluster 1: different pattern - [3, 3, 3, 3, 6, 6], # Cluster 2: uniform start, longer end - ... - ], - dims=['cluster', 'time'], - coords={'cluster': range(9), 'time': range(6)} -) - -# ═══════════════════════════════════════════════════════════════ -# AGGREGATION WEIGHT - combines duration and cluster weight -# ═══════════════════════════════════════════════════════════════ -# cluster_weight shape: (cluster,) - how many days each cluster represents -# timestep_duration shape: (cluster, time) - duration of each segment -aggregation_weight = timestep_duration * cluster_weight # Broadcasting! - -# ═══════════════════════════════════════════════════════════════ -# ALL EXISTING CONSTRAINTS JUST WORK -# ═══════════════════════════════════════════════════════════════ -# Storage balance: uses aggregation_weight correctly -# StatusModel: active_hours weighted by aggregation_weight -# Cost calculations: weighted by aggregation_weight -``` - -### 5.4 No Complex Infrastructure Needed - -With true dimensions, segmentation requires **no special infrastructure**: - -| Aspect | With True Dims | -|--------|----------------| -| Different segment durations | Just set `timestep_duration[cluster, time]` | -| Constraint generation | No changes - already works | -| Cost calculations | No changes - uses `aggregation_weight` | -| Plotting | No changes - `cluster` dim exists | - -**The only addition needed:** Update `transform_accessor.cluster()` to accept tsam segmentation parameters and construct the 2D `timestep_duration` array. +**Not implemented in initial version** - the architecture supports it, but it's not a priority. --- @@ -841,27 +771,3 @@ if flow_system.is_clustered: else: print("Not clustered - full time resolution") ``` - -### B.5 Variable Segment Durations (Future) - -```python -# ═══════════════════════════════════════════════════════════════ -# timestep_duration varies per (cluster, time) -# ═══════════════════════════════════════════════════════════════ -timestep_duration = xr.DataArray( - [ - [2, 2, 4, 4, 6, 6], # Cluster 0: short-short-long pattern - [1, 1, 4, 8, 4, 6], # Cluster 1: different pattern - ... - ], - dims=['cluster', 'time'], - coords={'cluster': range(9), 'time': range(6)} -) - -# aggregation_weight combines duration and cluster importance -aggregation_weight = timestep_duration * cluster_weight -# Shape: (cluster, time) × (cluster,) → (cluster, time) via broadcasting - -# All constraints use aggregation_weight for proper weighting -total_cost = (cost_per_timestep * aggregation_weight).sum(dim=['cluster', 'time']) -``` From 06ad42b93a139f1fc37fb919669dbabf137428fa Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 22:31:41 +0100 Subject: [PATCH 136/191] Update plan I've completed the infrastructure for true (cluster, time) dimensions: Completed 1. Added is_clustered property to FlowSystem - checks if clustering is set 2. Added timesteps_per_cluster alias to Clustering class 3. Added n_timesteps property to FlowSystem 4. Updated coords property - conditionally returns (cluster, time) structure when _use_true_cluster_dims is True 5. Updated get_coords in structure.py - handles extra_timestep for clustered mode 6. Added _combine_slices_to_dataarray_2d helper in transform_accessor What Remains The main blocker for full implementation is FlowSystem.from_dataset(): - Currently expects a flat DatetimeIndex for time - With true 2D dimensions, we have integer (cluster, time) coordinates - This requires changes to how the FlowSystem is constructed from clustered data Options to complete: 1. Bypass from_dataset - Create clustered FlowSystem directly without the dataset round-trip 2. Extend from_dataset - Add support for clustered datasets with 2D time structure 3. Keep synthetic flat time - Store flat DatetimeIndex in timesteps, but use 2D for variables/constraints --- flixopt/clustering/base.py | 10 +++++- flixopt/flow_system.py | 57 ++++++++++++++++++++++++++++++++++- flixopt/structure.py | 10 ++++-- flixopt/transform_accessor.py | 49 ++++++++++++++++++++++++++++++ 4 files changed, 122 insertions(+), 4 deletions(-) diff --git a/flixopt/clustering/base.py b/flixopt/clustering/base.py index 81c2b0bfc..fcc47b772 100644 --- a/flixopt/clustering/base.py +++ b/flixopt/clustering/base.py @@ -964,7 +964,15 @@ def n_original_periods(self) -> int: @property def timesteps_per_period(self) -> int: - """Number of timesteps in each period/cluster.""" + """Number of timesteps in each period/cluster. + + Alias for :attr:`timesteps_per_cluster`. + """ + return self.timesteps_per_cluster + + @property + def timesteps_per_cluster(self) -> int: + """Number of timesteps in each cluster.""" if self.result.cluster_structure is None: raise ValueError('No cluster_structure available') return self.result.cluster_structure.timesteps_per_cluster diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 40ab95b48..8178e6c49 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -1839,13 +1839,51 @@ def storages(self) -> ElementContainer[Storage]: @property def coords(self) -> dict[FlowSystemDimensions, pd.Index]: - active_coords = {'time': self.timesteps} + """Active coordinates for variable creation. + + Returns a dict of dimension names to coordinate arrays. When clustered + with true dimensions enabled, includes 'cluster' dimension before 'time'. + + Returns: + Dict mapping dimension names to coordinate arrays. + """ + if self.is_clustered and self._use_true_cluster_dims: + # True (cluster, time) dimensions + active_coords = { + 'cluster': pd.Index(range(self.clustering.n_clusters), name='cluster'), + 'time': pd.Index(range(self.clustering.timesteps_per_cluster), name='time'), + } + else: + active_coords = {'time': self.timesteps} + if self.periods is not None: active_coords['period'] = self.periods if self.scenarios is not None: active_coords['scenario'] = self.scenarios return active_coords + @property + def _use_true_cluster_dims(self) -> bool: + """Check if true (cluster, time) dimensions should be used. + + This enables the new 2D cluster structure. Currently checks if the + clustered data is stored with (cluster, time) dimensions. + """ + if not self.is_clustered: + return False + # Check if the clustered data has 2D structure + # This is indicated by 'cluster' being a dimension in the dataset + if hasattr(self, '_clustered_data') and self._clustered_data is not None: + return 'cluster' in self._clustered_data.dims + return False + + @property + def n_timesteps(self) -> int: + """Number of timesteps (within each cluster if clustered).""" + if self.is_clustered: + return self.clustering.timesteps_per_cluster + return len(self.timesteps) + @property def used_in_calculation(self) -> bool: return self._used_in_optimization @@ -1925,6 +1963,23 @@ def aggregation_weight(self) -> xr.DataArray: """ return self.timestep_duration * self.cluster_weight + @property + def is_clustered(self) -> bool: + """Check if this FlowSystem uses time series clustering. + + Returns: + True if the FlowSystem was created with transform.cluster(), + False otherwise. + + Example: + >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') + >>> fs_clustered.is_clustered + True + >>> flow_system.is_clustered + False + """ + return getattr(self, 'clustering', None) is not None + def _validate_scenario_parameter(self, value: bool | list[str], param_name: str, element_type: str) -> None: """ Validate scenario parameter value. diff --git a/flixopt/structure.py b/flixopt/structure.py index d401451c1..89e75d2bd 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -367,7 +367,8 @@ def get_coords( Args: dims: The dimensions to include in the coordinates. If None, includes all dimensions - extra_timestep: If True, uses extra timesteps instead of regular timesteps + extra_timestep: If True, uses extra timesteps instead of regular timesteps. + For clustered FlowSystems, extends time by 1 (for charge_state boundaries). Returns: The coordinates of the model, or None if no coordinates are available @@ -384,7 +385,12 @@ def get_coords( coords = {k: v for k, v in self.flow_system.coords.items() if k in dims} if extra_timestep and coords: - coords['time'] = self.flow_system.timesteps_extra + if self.flow_system.is_clustered: + # For clustered: extend time by 1 within each cluster (for charge_state) + n_time = self.flow_system.clustering.timesteps_per_cluster + 1 + coords['time'] = pd.Index(range(n_time), name='time') + else: + coords['time'] = self.flow_system.timesteps_extra return xr.Coordinates(coords) if coords else None diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index a209ce4ab..687db5d18 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -721,6 +721,7 @@ def cluster( actual_n_clusters = len(first_tsam.clusterPeriodNoOccur) # Create new time index (needed for weights and typical periods) + # Currently using flat time dimension - true (cluster, time) dims to be implemented new_time_index = pd.date_range( start=self._fs.timesteps[0], periods=n_reduced_timesteps, freq=pd.Timedelta(hours=dt) ) @@ -973,6 +974,54 @@ def _combine_slices_to_dataarray_generic( return result.rename(name) + @staticmethod + def _combine_slices_to_dataarray_2d( + slices: dict[tuple, xr.DataArray], + original_da: xr.DataArray, + cluster_coords: np.ndarray, + time_coords: np.ndarray, + periods: list, + scenarios: list, + ) -> xr.DataArray: + """Combine per-(period, scenario) slices into a multi-dimensional DataArray with (cluster, time) dims. + + Args: + slices: Dict mapping (period, scenario) tuples to DataArrays with (cluster, time) dims. + original_da: Original DataArray to get attrs from. + cluster_coords: Cluster coordinate values. + time_coords: Within-cluster time coordinate values. + periods: List of period labels ([None] if no periods dimension). + scenarios: List of scenario labels ([None] if no scenarios dimension). + + Returns: + DataArray with dimensions (cluster, time, period?, scenario?). + """ + first_key = (periods[0], scenarios[0]) + has_periods = periods != [None] + has_scenarios = scenarios != [None] + + # Simple case: no period/scenario dimensions + if not has_periods and not has_scenarios: + return slices[first_key].assign_attrs(original_da.attrs) + + # Multi-dimensional: use xr.concat to stack along period/scenario dims + if has_periods and has_scenarios: + # Stack scenarios first, then periods + period_arrays = [] + for p in periods: + scenario_arrays = [slices[(p, s)] for s in scenarios] + period_arrays.append(xr.concat(scenario_arrays, dim=pd.Index(scenarios, name='scenario'))) + result = xr.concat(period_arrays, dim=pd.Index(periods, name='period')) + elif has_periods: + result = xr.concat([slices[(p, None)] for p in periods], dim=pd.Index(periods, name='period')) + else: + result = xr.concat([slices[(None, s)] for s in scenarios], dim=pd.Index(scenarios, name='scenario')) + + # Put cluster and time first (standard order for clustered data) + result = result.transpose('cluster', 'time', ...) + + return result.assign_attrs(original_da.attrs) + def expand_solution(self) -> FlowSystem: """Expand a reduced (clustered) FlowSystem back to full original timesteps. From 934c97dfdb065e10dfe31b7862b97e0d1e6b272f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 22:45:58 +0100 Subject: [PATCH 137/191] Temp --- flixopt/core.py | 4 +- flixopt/flow_system.py | 100 +++++++++++++++++++++++++++++----- flixopt/structure.py | 4 +- flixopt/transform_accessor.py | 98 ++++++++++++++++++++++----------- 4 files changed, 158 insertions(+), 48 deletions(-) diff --git a/flixopt/core.py b/flixopt/core.py index a14aa6654..fdcab029b 100644 --- a/flixopt/core.py +++ b/flixopt/core.py @@ -522,7 +522,9 @@ def _validate_and_prepare_target_coordinates( coord_index = coord_index.rename(dim_name) # Special validation for time dimensions (common pattern) - if dim_name == 'time' and not isinstance(coord_index, pd.DatetimeIndex): + # Allow integer indices when 'cluster' dimension is present (clustered mode) + has_cluster_dim = 'cluster' in coords + if dim_name == 'time' and not isinstance(coord_index, pd.DatetimeIndex) and not has_cluster_dim: raise ConversionError( f'Dimension named "time" should use DatetimeIndex for proper ' f'time-series functionality, got {type(coord_index).__name__}' diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 8178e6c49..5a1619154 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -658,6 +658,10 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: the solution will be restored to the FlowSystem. Solution time coordinates are renamed back from 'solution_time' to 'time'. + Supports clustered datasets with (cluster, time) dimensions. When detected, + creates a synthetic DatetimeIndex for compatibility and stores the clustered + data structure for later use. + Args: ds: Dataset containing the FlowSystem data @@ -682,9 +686,39 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: # Create arrays dictionary from config variables only arrays_dict = config_vars + # Detect clustered dataset with (cluster, time) dimensions + is_clustered_dataset = 'cluster' in ds.dims and reference_structure.get('is_clustered', False) + + if is_clustered_dataset: + # Clustered dataset: create synthetic DatetimeIndex + n_clusters = ds.sizes['cluster'] + timesteps_per_cluster = ds.sizes['time'] + n_total_timesteps = n_clusters * timesteps_per_cluster + timestep_duration_hours = reference_structure.get('timestep_duration', 1.0) + + # Create synthetic DatetimeIndex for compatibility + synthetic_timesteps = pd.date_range( + start='2000-01-01', + periods=n_total_timesteps, + freq=pd.Timedelta(hours=timestep_duration_hours), + name='time', + ) + + # cluster_weight for clustered mode is (cluster,) shaped - don't pass to constructor + # It will be set separately after FlowSystem creation + cluster_weight_for_constructor = None + else: + # Regular dataset: use time index directly + synthetic_timesteps = ds.indexes['time'] + cluster_weight_for_constructor = ( + cls._resolve_dataarray_reference(reference_structure['cluster_weight'], arrays_dict) + if 'cluster_weight' in reference_structure + else None + ) + # Create FlowSystem instance with constructor parameters flow_system = cls( - timesteps=ds.indexes['time'], + timesteps=synthetic_timesteps, periods=ds.indexes.get('period'), scenarios=ds.indexes.get('scenario'), hours_of_last_timestep=reference_structure.get('hours_of_last_timestep'), @@ -693,14 +727,30 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: scenario_weights=cls._resolve_dataarray_reference(reference_structure['scenario_weights'], arrays_dict) if 'scenario_weights' in reference_structure else None, - cluster_weight=cls._resolve_dataarray_reference(reference_structure['cluster_weight'], arrays_dict) - if 'cluster_weight' in reference_structure - else None, + cluster_weight=cluster_weight_for_constructor, scenario_independent_sizes=reference_structure.get('scenario_independent_sizes', True), scenario_independent_flow_rates=reference_structure.get('scenario_independent_flow_rates', False), name=reference_structure.get('name'), ) + # For clustered datasets, store the 2D data and set up cluster structure + if is_clustered_dataset: + flow_system._clustered_data = ds + flow_system._cluster_info = { + 'n_clusters': n_clusters, + 'timesteps_per_cluster': timesteps_per_cluster, + 'timestep_duration': timestep_duration_hours, + } + # Override timestep_duration to have correct shape for 2D cluster structure + # Shape: (time,) = (timesteps_per_cluster,) - broadcasts with (cluster, time) + flow_system.timestep_duration = xr.DataArray( + np.full(timesteps_per_cluster, timestep_duration_hours), + dims=['time'], + coords={'time': np.arange(timesteps_per_cluster)}, + name='timestep_duration', + ) + # cluster_weight will be set after Clustering object is attached + # Restore components components_structure = reference_structure.get('components', {}) for comp_label, comp_data in components_structure.items(): @@ -1847,11 +1897,13 @@ def coords(self) -> dict[FlowSystemDimensions, pd.Index]: Returns: Dict mapping dimension names to coordinate arrays. """ - if self.is_clustered and self._use_true_cluster_dims: + if self._use_true_cluster_dims: # True (cluster, time) dimensions + n_clusters = self._cluster_n_clusters + timesteps_per_cluster = self._cluster_timesteps_per_cluster active_coords = { - 'cluster': pd.Index(range(self.clustering.n_clusters), name='cluster'), - 'time': pd.Index(range(self.clustering.timesteps_per_cluster), name='time'), + 'cluster': pd.Index(range(n_clusters), name='cluster'), + 'time': pd.Index(range(timesteps_per_cluster), name='time'), } else: active_coords = {'time': self.timesteps} @@ -1866,17 +1918,39 @@ def coords(self) -> dict[FlowSystemDimensions, pd.Index]: def _use_true_cluster_dims(self) -> bool: """Check if true (cluster, time) dimensions should be used. - This enables the new 2D cluster structure. Currently checks if the - clustered data is stored with (cluster, time) dimensions. + This enables the new 2D cluster structure. Returns True if: + 1. The FlowSystem has _clustered_data with 'cluster' dimension, OR + 2. The FlowSystem has _cluster_info set (from from_dataset) + + Note: This can be True even before clustering is fully set up, + to allow variable creation with correct dimensions. """ - if not self.is_clustered: - return False - # Check if the clustered data has 2D structure - # This is indicated by 'cluster' being a dimension in the dataset + # Check for 2D clustered data structure if hasattr(self, '_clustered_data') and self._clustered_data is not None: return 'cluster' in self._clustered_data.dims + # Check for cluster info from from_dataset + if hasattr(self, '_cluster_info') and self._cluster_info is not None: + return True return False + @property + def _cluster_n_clusters(self) -> int | None: + """Get number of clusters from cluster info or clustering object.""" + if hasattr(self, '_cluster_info') and self._cluster_info is not None: + return self._cluster_info['n_clusters'] + if self.is_clustered: + return self.clustering.n_clusters + return None + + @property + def _cluster_timesteps_per_cluster(self) -> int | None: + """Get timesteps per cluster from cluster info or clustering object.""" + if hasattr(self, '_cluster_info') and self._cluster_info is not None: + return self._cluster_info['timesteps_per_cluster'] + if self.is_clustered: + return self.clustering.timesteps_per_cluster + return None + @property def n_timesteps(self) -> int: """Number of timesteps (within each cluster if clustered).""" diff --git a/flixopt/structure.py b/flixopt/structure.py index 89e75d2bd..52aecc920 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -385,9 +385,9 @@ def get_coords( coords = {k: v for k, v in self.flow_system.coords.items() if k in dims} if extra_timestep and coords: - if self.flow_system.is_clustered: + if self.flow_system._use_true_cluster_dims: # For clustered: extend time by 1 within each cluster (for charge_state) - n_time = self.flow_system.clustering.timesteps_per_cluster + 1 + n_time = self.flow_system._cluster_timesteps_per_cluster + 1 coords['time'] = pd.Index(range(n_time), name='time') else: coords['time'] = self.flow_system.timesteps_extra diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 687db5d18..c43565e78 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -720,55 +720,77 @@ def cluster( n_reduced_timesteps = len(first_tsam.typicalPeriods) actual_n_clusters = len(first_tsam.clusterPeriodNoOccur) - # Create new time index (needed for weights and typical periods) - # Currently using flat time dimension - true (cluster, time) dims to be implemented - new_time_index = pd.date_range( - start=self._fs.timesteps[0], periods=n_reduced_timesteps, freq=pd.Timedelta(hours=dt) - ) - - # Create timestep weights from cluster occurrences (per period/scenario) - def _build_weights_for_key(key: tuple) -> xr.DataArray: + # ═══════════════════════════════════════════════════════════════════════ + # TRUE (cluster, time) DIMENSIONS + # ═══════════════════════════════════════════════════════════════════════ + # Create coordinates for the 2D cluster structure + cluster_coords = np.arange(actual_n_clusters) + time_coords = np.arange(timesteps_per_cluster) # Integer indices within cluster + + # Create cluster_weight: shape (cluster,) - one weight per cluster + # This is the number of original periods each cluster represents + def _build_cluster_weight_for_key(key: tuple) -> xr.DataArray: occurrences = cluster_occurrences_all[key] - weights = np.repeat([occurrences.get(c, 1) for c in range(actual_n_clusters)], timesteps_per_cluster) - return xr.DataArray(weights, dims=['time'], coords={'time': new_time_index}) - - # Build weights - use _combine_slices_to_dataarray for consistent multi-dim handling - weights_slices = {key: _build_weights_for_key(key) for key in cluster_occurrences_all} - # Create a dummy 1D DataArray as template for _combine_slices_to_dataarray - dummy_template = xr.DataArray(np.zeros(n_reduced_timesteps), dims=['time']) - timestep_weights = self._combine_slices_to_dataarray( - weights_slices, dummy_template, new_time_index, periods, scenarios + weights = np.array([occurrences.get(c, 1) for c in range(actual_n_clusters)]) + return xr.DataArray(weights, dims=['cluster'], coords={'cluster': cluster_coords}) + + # Build cluster_weight - use _combine_slices_to_dataarray_generic for multi-dim handling + weight_slices = {key: _build_cluster_weight_for_key(key) for key in cluster_occurrences_all} + cluster_weight = self._combine_slices_to_dataarray_generic( + weight_slices, ['cluster'], periods, scenarios, 'cluster_weight' ) - logger.info(f'Reduced from {len(self._fs.timesteps)} to {n_reduced_timesteps} timesteps') + logger.info( + f'Reduced from {len(self._fs.timesteps)} to {actual_n_clusters} clusters × {timesteps_per_cluster} timesteps' + ) logger.info(f'Clusters: {actual_n_clusters} (requested: {n_clusters})') - # Build typical periods DataArrays keyed by (variable_name, (period, scenario)) + # Build typical periods DataArrays with (cluster, time) shape typical_das: dict[str, dict[tuple, xr.DataArray]] = {} for key, tsam_agg in tsam_results.items(): typical_df = tsam_agg.typicalPeriods for col in typical_df.columns: + # Reshape flat data to (cluster, time) + flat_data = typical_df[col].values + reshaped = flat_data.reshape(actual_n_clusters, timesteps_per_cluster) typical_das.setdefault(col, {})[key] = xr.DataArray( - typical_df[col].values, dims=['time'], coords={'time': new_time_index} + reshaped, + dims=['cluster', 'time'], + coords={'cluster': cluster_coords, 'time': time_coords}, ) - # Build reduced dataset + # Build reduced dataset with (cluster, time) dimensions all_keys = {(p, s) for p in periods for s in scenarios} ds_new_vars = {} for name, original_da in ds.data_vars.items(): if 'time' not in original_da.dims: ds_new_vars[name] = original_da.copy() elif name not in typical_das or set(typical_das[name].keys()) != all_keys: - # Time-dependent but constant (or not present in all clustering results): slice to new time length - ds_new_vars[name] = original_da.isel(time=slice(0, n_reduced_timesteps)).assign_coords( - time=new_time_index + # Time-dependent but constant: reshape to (cluster, time, ...) + sliced = original_da.isel(time=slice(0, n_reduced_timesteps)) + # Get the shape - time is first, other dims follow + other_dims = [d for d in sliced.dims if d != 'time'] + other_shape = [sliced.sizes[d] for d in other_dims] + # Reshape: (n_reduced_timesteps, ...) -> (n_clusters, timesteps_per_cluster, ...) + new_shape = [actual_n_clusters, timesteps_per_cluster] + other_shape + reshaped = sliced.values.reshape(new_shape) + # Build coords + new_coords = {'cluster': cluster_coords, 'time': time_coords} + for dim in other_dims: + new_coords[dim] = sliced.coords[dim].values + ds_new_vars[name] = xr.DataArray( + reshaped, + dims=['cluster', 'time'] + other_dims, + coords=new_coords, + attrs=original_da.attrs, ) else: - # Time-varying: combine per-(period, scenario) slices - da = self._combine_slices_to_dataarray( + # Time-varying: combine per-(period, scenario) slices with (cluster, time) dims + da = self._combine_slices_to_dataarray_2d( slices=typical_das[name], original_da=original_da, - new_time_index=new_time_index, + cluster_coords=cluster_coords, + time_coords=time_coords, periods=periods, scenarios=scenarios, ) @@ -779,12 +801,12 @@ def _build_weights_for_key(key: tuple) -> xr.DataArray: ds_new = xr.Dataset(ds_new_vars, attrs=ds.attrs) ds_new.attrs['timesteps_per_cluster'] = timesteps_per_cluster ds_new.attrs['timestep_duration'] = dt + ds_new.attrs['n_clusters'] = actual_n_clusters + ds_new.attrs['is_clustered'] = True reduced_fs = FlowSystem.from_dataset(ds_new) - # Set cluster_weight - might have period/scenario dimensions - reduced_fs.cluster_weight = reduced_fs.fit_to_model_coords( - 'cluster_weight', timestep_weights, dims=['scenario', 'period', 'time'] - ) + # Set cluster_weight - shape (cluster,) possibly with period/scenario dimensions + reduced_fs.cluster_weight = cluster_weight # Remove 'equals_final' from storages - doesn't make sense on reduced timesteps # Set to None so initial SOC is free (handled by storage_mode constraints) @@ -863,10 +885,22 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: timesteps_per_cluster=timesteps_per_cluster, ) + # Create representative_weights in flat format for ClusterResult compatibility + # This repeats each cluster's weight for all timesteps within that cluster + def _build_flat_weights_for_key(key: tuple) -> xr.DataArray: + occurrences = cluster_occurrences_all[key] + weights = np.repeat([occurrences.get(c, 1) for c in range(actual_n_clusters)], timesteps_per_cluster) + return xr.DataArray(weights, dims=['time'], name='representative_weights') + + flat_weights_slices = {key: _build_flat_weights_for_key(key) for key in cluster_occurrences_all} + representative_weights = self._combine_slices_to_dataarray_generic( + flat_weights_slices, ['time'], periods, scenarios, 'representative_weights' + ) + aggregation_result = ClusterResult( timestep_mapping=timestep_mapping_da, n_representatives=n_reduced_timesteps, - representative_weights=timestep_weights.rename('representative_weights'), + representative_weights=representative_weights, cluster_structure=cluster_structure, original_data=ds, aggregated_data=ds_new, From 949c903c7c24e06dc694b726aed4cc525d045947 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 22:51:57 +0100 Subject: [PATCH 138/191] Temp --- flixopt/elements.py | 13 ++++++++++--- flixopt/features.py | 14 +++++++++++--- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/flixopt/elements.py b/flixopt/elements.py index 608b6ac70..83a159089 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -677,10 +677,13 @@ def _do_modeling(self): self._constraint_flow_rate() # Total flow hours tracking (per period) + # Sum over all temporal dimensions (time, and cluster if present) + weighted_flow = self.flow_rate * self._model.aggregation_weight + temporal_dims = [d for d in weighted_flow.dims if d not in ('period', 'scenario')] ModelingPrimitives.expression_tracking_variable( model=self, name=f'{self.label_full}|total_flow_hours', - tracked_expression=(self.flow_rate * self._model.aggregation_weight).sum('time'), + tracked_expression=weighted_flow.sum(temporal_dims), bounds=( self.element.flow_hours_min if self.element.flow_hours_min is not None else 0, self.element.flow_hours_max if self.element.flow_hours_max is not None else None, @@ -837,9 +840,13 @@ def _create_bounds_for_load_factor(self): # Get the size (either from element or investment) size = self.investment.size if self.with_investment else self.element.size + # Sum over all temporal dimensions (time, and cluster if present) + temporal_dims = [d for d in self._model.aggregation_weight.dims if d not in ('period', 'scenario')] + total_hours = self._model.aggregation_weight.sum(temporal_dims) + # Maximum load factor constraint if self.element.load_factor_max is not None: - flow_hours_per_size_max = self._model.aggregation_weight.sum('time') * self.element.load_factor_max + flow_hours_per_size_max = total_hours * self.element.load_factor_max self.add_constraints( self.total_flow_hours <= size * flow_hours_per_size_max, short_name='load_factor_max', @@ -847,7 +854,7 @@ def _create_bounds_for_load_factor(self): # Minimum load factor constraint if self.element.load_factor_min is not None: - flow_hours_per_size_min = self._model.aggregation_weight.sum('time') * self.element.load_factor_min + flow_hours_per_size_min = total_hours * self.element.load_factor_min self.add_constraints( self.total_flow_hours >= size * flow_hours_per_size_min, short_name='load_factor_min', diff --git a/flixopt/features.py b/flixopt/features.py index 75cb3d92c..da98821ec 100644 --- a/flixopt/features.py +++ b/flixopt/features.py @@ -197,14 +197,20 @@ def _do_modeling(self): self.add_constraints(self.status + inactive == 1, short_name='complementary') # 3. Total duration tracking using existing pattern + # Sum over all temporal dimensions (time, and cluster if present) + weighted_status = self.status * self._model.aggregation_weight + temporal_dims = [d for d in weighted_status.dims if d not in ('period', 'scenario')] + agg_weight_sum = self._model.aggregation_weight.sum(temporal_dims) ModelingPrimitives.expression_tracking_variable( self, - tracked_expression=(self.status * self._model.aggregation_weight).sum('time'), + tracked_expression=weighted_status.sum(temporal_dims), bounds=( self.parameters.active_hours_min if self.parameters.active_hours_min is not None else 0, self.parameters.active_hours_max if self.parameters.active_hours_max is not None - else self._model.aggregation_weight.sum('time').max().item(), + else agg_weight_sum.max().item() + if hasattr(agg_weight_sum, 'max') + else agg_weight_sum, ), short_name='active_hours', coords=['period', 'scenario'], @@ -232,7 +238,9 @@ def _do_modeling(self): coords=self._model.get_coords(('period', 'scenario')), short_name='startup_count', ) - self.add_constraints(count == self.startup.sum('time'), short_name='startup_count') + # Sum over all temporal dimensions (time, and cluster if present) + startup_temporal_dims = [d for d in self.startup.dims if d not in ('period', 'scenario')] + self.add_constraints(count == self.startup.sum(startup_temporal_dims), short_name='startup_count') # 5. Consecutive active duration (uptime) using existing pattern if self.parameters.use_uptime_tracking: From 923cd4747d6499d995fb0df050b8b891b085ec40 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 22:54:20 +0100 Subject: [PATCH 139/191] Temp --- flixopt/features.py | 5 ++++- flixopt/structure.py | 7 ++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/flixopt/features.py b/flixopt/features.py index da98821ec..b7a457f63 100644 --- a/flixopt/features.py +++ b/flixopt/features.py @@ -629,7 +629,10 @@ def _do_modeling(self): self._eq_total_per_timestep = self.add_constraints(self.total_per_timestep == 0, short_name='per_timestep') # Add it to the total (cluster_weight handles cluster representation, defaults to 1.0) - self._eq_total.lhs -= (self.total_per_timestep * self._model.cluster_weight).sum(dim='time') + # Sum over all temporal dimensions (time, and cluster if present) + weighted_per_timestep = self.total_per_timestep * self._model.cluster_weight + temporal_dims = [d for d in weighted_per_timestep.dims if d not in ('period', 'scenario')] + self._eq_total.lhs -= weighted_per_timestep.sum(dim=temporal_dims) def add_share( self, diff --git a/flixopt/structure.py b/flixopt/structure.py index 52aecc920..9f74d5665 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -382,7 +382,12 @@ def get_coords( if dims is None: coords = dict(self.flow_system.coords) else: - coords = {k: v for k, v in self.flow_system.coords.items() if k in dims} + # In clustered systems, 'time' is always paired with 'cluster' + # So when 'time' is requested, also include 'cluster' if available + effective_dims = set(dims) + if 'time' in dims and self.flow_system._use_true_cluster_dims: + effective_dims.add('cluster') + coords = {k: v for k, v in self.flow_system.coords.items() if k in effective_dims} if extra_timestep and coords: if self.flow_system._use_true_cluster_dims: From c18a126a93e8e1cff9584bd442926fb6d97ec02b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 23:03:24 +0100 Subject: [PATCH 140/191] Temp --- flixopt/clustering/base.py | 23 ++++++++++++++++-- flixopt/statistics_accessor.py | 5 +++- flixopt/structure.py | 6 +++-- tests/test_cluster_reduce_expand.py | 37 ++++++++++++++++++----------- 4 files changed, 52 insertions(+), 19 deletions(-) diff --git a/flixopt/clustering/base.py b/flixopt/clustering/base.py index fcc47b772..1470f0634 100644 --- a/flixopt/clustering/base.py +++ b/flixopt/clustering/base.py @@ -368,11 +368,20 @@ def expand_data(self, aggregated: xr.DataArray, original_time: xr.DataArray | No timestep_mapping = self.timestep_mapping has_periods = 'period' in timestep_mapping.dims has_scenarios = 'scenario' in timestep_mapping.dims + has_cluster_dim = 'cluster' in aggregated.dims # Simple case: no period/scenario dimensions if not has_periods and not has_scenarios: mapping = timestep_mapping.values - expanded_values = aggregated.values[mapping] + if has_cluster_dim: + # 2D cluster structure: convert flat indices to (cluster, time_within) + # n_clusters = aggregated.sizes['cluster'] + timesteps_per_cluster = aggregated.sizes['time'] + cluster_ids = mapping // timesteps_per_cluster + time_within = mapping % timesteps_per_cluster + expanded_values = aggregated.values[cluster_ids, time_within] + else: + expanded_values = aggregated.values[mapping] return xr.DataArray( expanded_values, coords={'time': original_time}, @@ -403,7 +412,17 @@ def expand_data(self, aggregated: xr.DataArray, original_time: xr.DataArray | No selector['scenario'] = s slice_da = aggregated.sel(**selector, drop=True) if selector else aggregated - expanded = slice_da.isel(time=xr.DataArray(mapping, dims=['time'])) + + if has_cluster_dim: + # 2D cluster structure: convert flat indices to (cluster, time_within) + _n_clusters = slice_da.sizes['cluster'] + timesteps_per_cluster = slice_da.sizes['time'] + cluster_ids = mapping // timesteps_per_cluster + time_within = mapping % timesteps_per_cluster + expanded_values = slice_da.values[cluster_ids, time_within] + expanded = xr.DataArray(expanded_values, dims=['time']) + else: + expanded = slice_da.isel(time=xr.DataArray(mapping, dims=['time'])) expanded_slices[(p, s)] = expanded.assign_coords(time=original_time) # Recombine slices using xr.concat diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index 572363be8..17ca6cb42 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -785,8 +785,11 @@ def get_contributor_type(contributor: str) -> str: if label in solution: da = solution[label] * factor # For total mode, sum temporal over time (apply cluster_weight for proper weighting) + # Sum over all temporal dimensions (time, and cluster if present) if mode == 'total' and current_mode == 'temporal' and 'time' in da.dims: - da = (da * self._fs.cluster_weight).sum('time') + weighted = da * self._fs.cluster_weight + temporal_dims = [d for d in weighted.dims if d not in ('period', 'scenario')] + da = weighted.sum(temporal_dims) if share_total is None: share_total = da else: diff --git a/flixopt/structure.py b/flixopt/structure.py index 9f74d5665..bcd23e3fd 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -290,8 +290,10 @@ def solution(self): } # Ensure solution is always indexed by timesteps_extra for consistency. # Variables without extra timestep data will have NaN at the final timestep. - if 'time' in solution.coords and not solution.indexes['time'].equals(self.flow_system.timesteps_extra): - solution = solution.reindex(time=self.flow_system.timesteps_extra) + # Skip reindexing for clustered systems which use integer time indices + if 'time' in solution.coords and not self.flow_system._use_true_cluster_dims: + if not solution.indexes['time'].equals(self.flow_system.timesteps_extra): + solution = solution.reindex(time=self.flow_system.timesteps_extra) return solution @property diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index 9b8095422..806d545a9 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -127,12 +127,17 @@ def test_expand_solution_maps_values_correctly(solver_fixture, timesteps_8_days) orig_start = orig_segment_idx * timesteps_per_cluster orig_end = orig_start + timesteps_per_cluster - typical_start = cluster_id * timesteps_per_cluster - typical_end = typical_start + timesteps_per_cluster - # Values in the expanded solution for this original segment # should match the reduced solution for the corresponding typical cluster - expected = reduced_flow[typical_start:typical_end] + # With 2D cluster structure, use cluster_id to index the cluster dimension + if reduced_flow.ndim == 2: + # 2D structure: (cluster, time) + expected = reduced_flow[cluster_id, :] + else: + # Flat structure: (time,) + typical_start = cluster_id * timesteps_per_cluster + typical_end = typical_start + timesteps_per_cluster + expected = reduced_flow[typical_start:typical_end] actual = expanded_flow[orig_start:orig_end] assert_allclose(actual, expected, rtol=1e-10) @@ -178,12 +183,11 @@ def test_expand_solution_statistics_match_clustered(solver_fixture, timesteps_8_ assert_allclose(reduced_total, expanded_total, rtol=1e-6) # Flow hours should also match (need to sum over time with proper weighting) - reduced_flow_hours = ( - (fs_reduced.statistics.flow_hours['Boiler(Q_th)'] * fs_reduced.cluster_weight).sum('time').item() - ) - expanded_flow_hours = ( - (fs_expanded.statistics.flow_hours['Boiler(Q_th)'] * fs_expanded.cluster_weight).sum('time').item() - ) + # With 2D cluster structure, sum over both cluster and time dimensions + reduced_fh = fs_reduced.statistics.flow_hours['Boiler(Q_th)'] * fs_reduced.cluster_weight + reduced_flow_hours = reduced_fh.sum().item() # Sum over all dimensions + expanded_fh = fs_expanded.statistics.flow_hours['Boiler(Q_th)'] * fs_expanded.cluster_weight + expanded_flow_hours = expanded_fh.sum().item() assert_allclose(reduced_flow_hours, expanded_flow_hours, rtol=1e-6) @@ -338,10 +342,15 @@ def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_ orig_start = orig_segment_idx * timesteps_per_cluster orig_end = orig_start + timesteps_per_cluster - typical_start = cluster_id * timesteps_per_cluster - typical_end = typical_start + timesteps_per_cluster - - expected = reduced_scenario[typical_start:typical_end] + # With 2D cluster structure, use cluster_id to index the cluster dimension + if reduced_scenario.ndim == 2: + # 2D structure: (cluster, time) + expected = reduced_scenario[cluster_id, :] + else: + # Flat structure: (time,) + typical_start = cluster_id * timesteps_per_cluster + typical_end = typical_start + timesteps_per_cluster + expected = reduced_scenario[typical_start:typical_end] actual = expanded_scenario[orig_start:orig_end] assert_allclose(actual, expected, rtol=1e-10, err_msg=f'Mismatch for scenario {scenario}') From 11d90e97eef2485d5e6422d1d3f4ba4c007defe9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 23:08:24 +0100 Subject: [PATCH 141/191] Temp --- flixopt/elements.py | 3 ++- flixopt/features.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/flixopt/elements.py b/flixopt/elements.py index 83a159089..ba2b72f80 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -679,7 +679,8 @@ def _do_modeling(self): # Total flow hours tracking (per period) # Sum over all temporal dimensions (time, and cluster if present) weighted_flow = self.flow_rate * self._model.aggregation_weight - temporal_dims = [d for d in weighted_flow.dims if d not in ('period', 'scenario')] + # Get temporal_dims from aggregation_weight (not weighted_flow which has linopy's _term dim) + temporal_dims = [d for d in self._model.aggregation_weight.dims if d not in ('period', 'scenario')] ModelingPrimitives.expression_tracking_variable( model=self, name=f'{self.label_full}|total_flow_hours', diff --git a/flixopt/features.py b/flixopt/features.py index b7a457f63..e0a018a7f 100644 --- a/flixopt/features.py +++ b/flixopt/features.py @@ -199,7 +199,8 @@ def _do_modeling(self): # 3. Total duration tracking using existing pattern # Sum over all temporal dimensions (time, and cluster if present) weighted_status = self.status * self._model.aggregation_weight - temporal_dims = [d for d in weighted_status.dims if d not in ('period', 'scenario')] + # Get temporal_dims from aggregation_weight (not weighted_status which has linopy's _term dim) + temporal_dims = [d for d in self._model.aggregation_weight.dims if d not in ('period', 'scenario')] agg_weight_sum = self._model.aggregation_weight.sum(temporal_dims) ModelingPrimitives.expression_tracking_variable( self, @@ -631,7 +632,8 @@ def _do_modeling(self): # Add it to the total (cluster_weight handles cluster representation, defaults to 1.0) # Sum over all temporal dimensions (time, and cluster if present) weighted_per_timestep = self.total_per_timestep * self._model.cluster_weight - temporal_dims = [d for d in weighted_per_timestep.dims if d not in ('period', 'scenario')] + # Get temporal_dims from total_per_timestep (linopy Variable) - its coords are the actual dims + temporal_dims = [d for d in self.total_per_timestep.dims if d not in ('period', 'scenario')] self._eq_total.lhs -= weighted_per_timestep.sum(dim=temporal_dims) def add_share( From 4c7aeca598a7123687f98b7116463ccd17ef448a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 23:26:37 +0100 Subject: [PATCH 142/191] Temp --- flixopt/components.py | 11 ++++++- flixopt/flow_system.py | 54 +++++++++++++++++++++++++++++++++-- flixopt/transform_accessor.py | 8 +++++- 3 files changed, 68 insertions(+), 5 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index cb5aa63fe..8fe04e23b 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -8,6 +8,7 @@ from typing import TYPE_CHECKING, Literal import numpy as np +import pandas as pd import xarray as xr from . import io as fx_io @@ -1067,7 +1068,15 @@ def _relative_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: Returns: Tuple of (minimum_bounds, maximum_bounds) DataArrays extending to final timestep """ - final_coords = {'time': [self._model.flow_system.timesteps_extra[-1]]} + # Get the final time coordinate (DatetimeIndex for both clustered and non-clustered) + if self._model.flow_system._use_true_cluster_dims: + # For clustered systems, add one timestep to the DatetimeIndex + time_coords = self._model.flow_system._cluster_time_coords + dt = self._model.flow_system._cluster_info.get('timestep_duration', 1.0) + final_time = time_coords[-1] + pd.Timedelta(hours=dt) + final_coords = {'time': [final_time]} + else: + final_coords = {'time': [self._model.flow_system.timesteps_extra[-1]]} # Get final minimum charge state if self.element.relative_minimum_final_charge_state is None: diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 5a1619154..4e29385a9 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -743,10 +743,17 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: } # Override timestep_duration to have correct shape for 2D cluster structure # Shape: (time,) = (timesteps_per_cluster,) - broadcasts with (cluster, time) + # Use DatetimeIndex consistent with _cluster_time_coords + time_coords_2d = pd.date_range( + start='2000-01-01', + periods=timesteps_per_cluster, + freq=pd.Timedelta(hours=timestep_duration_hours), + name='time', + ) flow_system.timestep_duration = xr.DataArray( np.full(timesteps_per_cluster, timestep_duration_hours), dims=['time'], - coords={'time': np.arange(timesteps_per_cluster)}, + coords={'time': time_coords_2d}, name='timestep_duration', ) # cluster_weight will be set after Clustering object is attached @@ -1900,10 +1907,10 @@ def coords(self) -> dict[FlowSystemDimensions, pd.Index]: if self._use_true_cluster_dims: # True (cluster, time) dimensions n_clusters = self._cluster_n_clusters - timesteps_per_cluster = self._cluster_timesteps_per_cluster + time_coords = self._cluster_time_coords active_coords = { 'cluster': pd.Index(range(n_clusters), name='cluster'), - 'time': pd.Index(range(timesteps_per_cluster), name='time'), + 'time': time_coords, } else: active_coords = {'time': self.timesteps} @@ -1951,6 +1958,47 @@ def _cluster_timesteps_per_cluster(self) -> int | None: return self.clustering.timesteps_per_cluster return None + @property + def _cluster_time_coords(self) -> pd.DatetimeIndex | None: + """Get time coordinates for clustered system. + + Returns DatetimeIndex for time within cluster (e.g., 00:00-23:00 for daily clustering). + """ + # Try to get from _clustered_data first (has the actual coords) + if hasattr(self, '_clustered_data') and self._clustered_data is not None: + if 'time' in self._clustered_data.coords: + time_coord = self._clustered_data.coords['time'].values + if isinstance(time_coord, np.ndarray) and np.issubdtype(time_coord.dtype, np.datetime64): + return pd.DatetimeIndex(time_coord, name='time') + + # Fall back to generating from _cluster_info + if hasattr(self, '_cluster_info') and self._cluster_info is not None: + timesteps_per_cluster = self._cluster_info['timesteps_per_cluster'] + dt = self._cluster_info.get('timestep_duration', 1.0) + return pd.date_range( + start='2000-01-01', + periods=timesteps_per_cluster, + freq=pd.Timedelta(hours=dt), + name='time', + ) + + # Fall back to clustering object + if self.is_clustered: + timesteps_per_cluster = self.clustering.timesteps_per_cluster + # Try to get dt from timestep_duration + if hasattr(self, 'timestep_duration') and self.timestep_duration is not None: + dt = float(self.timestep_duration.mean()) + else: + dt = 1.0 + return pd.date_range( + start='2000-01-01', + periods=timesteps_per_cluster, + freq=pd.Timedelta(hours=dt), + name='time', + ) + + return None + @property def n_timesteps(self) -> int: """Number of timesteps (within each cluster if clustered).""" diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index c43565e78..af07fd01e 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -725,7 +725,13 @@ def cluster( # ═══════════════════════════════════════════════════════════════════════ # Create coordinates for the 2D cluster structure cluster_coords = np.arange(actual_n_clusters) - time_coords = np.arange(timesteps_per_cluster) # Integer indices within cluster + # Use DatetimeIndex for time within cluster (e.g., 00:00-23:00 for daily clustering) + time_coords = pd.date_range( + start='2000-01-01', + periods=timesteps_per_cluster, + freq=pd.Timedelta(hours=dt), + name='time', + ) # Create cluster_weight: shape (cluster,) - one weight per cluster # This is the number of original periods each cluster represents From 322d3c185ae58f1e8b1062d337383509c9a5870f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 23:27:35 +0100 Subject: [PATCH 143/191] Temp --- flixopt/structure.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/flixopt/structure.py b/flixopt/structure.py index bcd23e3fd..5ce151385 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -394,8 +394,10 @@ def get_coords( if extra_timestep and coords: if self.flow_system._use_true_cluster_dims: # For clustered: extend time by 1 within each cluster (for charge_state) - n_time = self.flow_system._cluster_timesteps_per_cluster + 1 - coords['time'] = pd.Index(range(n_time), name='time') + time_coords = self.flow_system._cluster_time_coords + dt = self.flow_system._cluster_info.get('timestep_duration', 1.0) + final_time = time_coords[-1] + pd.Timedelta(hours=dt) + coords['time'] = time_coords.append(pd.DatetimeIndex([final_time])) else: coords['time'] = self.flow_system.timesteps_extra From 611c752f8d7879d6dbd5888fc0cb3dc26f86f23f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 23:32:14 +0100 Subject: [PATCH 144/191] Temp --- flixopt/flow_system.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 4e29385a9..16eaf8cc1 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -643,6 +643,18 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: carriers_structure[name] = carrier_ref ds.attrs['carriers'] = json.dumps(carriers_structure) + # Include cluster info for 2D clustered FlowSystems + if self._use_true_cluster_dims: + ds.attrs['is_clustered'] = True + ds.attrs['n_clusters'] = self._cluster_n_clusters + ds.attrs['timesteps_per_cluster'] = self._cluster_timesteps_per_cluster + if hasattr(self, '_cluster_info') and self._cluster_info is not None: + ds.attrs['timestep_duration'] = self._cluster_info.get('timestep_duration', 1.0) + elif hasattr(self, 'timestep_duration') and self.timestep_duration is not None: + ds.attrs['timestep_duration'] = float(self.timestep_duration.mean()) + else: + ds.attrs['timestep_duration'] = 1.0 + # Add version info ds.attrs['flixopt_version'] = __version__ From f232e0bdcc3dc4b985623f2c3fedd3e7b95c939d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 23:37:23 +0100 Subject: [PATCH 145/191] nitpicks and fixes --- CHANGELOG.md | 62 ++++++++----------- .../example_optimization_modes.py | 10 ++- tests/deprecated/test_integration.py | 28 ++------- tests/test_clustering/__init__.py | 2 +- 4 files changed, 39 insertions(+), 63 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f423f4609..db39c86ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -57,28 +57,25 @@ Until here --> ### ✨ Added -**New Aggregation Module** (`flixopt.aggregation`): A backend-agnostic abstraction for time series aggregation: +**New Clustering Module** (`flixopt.clustering`): Data structures for time series clustering: ```python -from flixopt import aggregation +from flixopt import clustering -# Available backends -aggregation.list_backends() # ['tsam', 'manual'] - -# Core data structures for any aggregation method -aggregation.AggregationResult # Universal result format -aggregation.ClusterStructure # For storage inter-cluster linking -aggregation.Aggregator # Protocol for custom backends +# Core data structures for clustering +clustering.ClusterResult # Universal result format +clustering.ClusterStructure # For storage inter-cluster linking +clustering.Clustering # Stored on FlowSystem after clustering ``` -**Unified Aggregation API**: New `transform.aggregate()` method supporting multiple backends: +**Unified Clustering API**: New `transform.cluster()` method for time series reduction: ```python # TSAM clustering (default) - clusters 365 days into 8 typical days -fs_reduced = flow_system.transform.aggregate( - method='tsam', - n_representatives=8, +fs_reduced = flow_system.transform.cluster( + n_clusters=8, cluster_duration='1D', + time_series_for_high_peaks=['Demand|fixed_relative_profile'], ) fs_reduced.optimize(solver) @@ -100,20 +97,17 @@ weights.effective_objective # For objective function (with optional override) total_energy = weights.sum_over_time(flow_rates) ``` -**Manual Aggregation Backend**: Enables PyPSA-style workflow with external clustering tools: +**Manual Clustering Support**: Helper function for creating cluster structures from external tools: ```python -from flixopt.aggregation import ManualBackend, create_manual_backend_from_labels +from flixopt.clustering import create_cluster_structure_from_mapping # Use sklearn or any clustering algorithm from sklearn.cluster import KMeans # ... perform clustering, get labels ... -# Create backend from cluster labels -backend = create_manual_backend_from_labels(labels, timesteps_per_cluster=24) - -# Or directly with mapping and weights -backend = ManualBackend( +# Create cluster structure from mapping +cluster_structure = create_cluster_structure_from_mapping( timestep_mapping=my_mapping, # xr.DataArray: original → representative representative_weights=my_weights, # xr.DataArray: weight per representative ) @@ -131,50 +125,46 @@ fs_agg = flow_system.transform.set_aggregation( ### 💥 Breaking Changes -**Removed `transform.cluster()` method**: The constraint-based clustering approach has been removed. Use `cluster_reduce()` instead: +**Simplified `transform.cluster()` API**: The constraint-based clustering approach has been replaced with timestep reduction: ```python -# Old (removed): -clustered_fs = flow_system.transform.cluster( - n_clusters=8, - cluster_duration='1D', -) - -# New (use cluster_reduce instead): -reduced_fs = flow_system.transform.cluster_reduce( +# New API - reduces timesteps via TSAM clustering +reduced_fs = flow_system.transform.cluster( n_clusters=8, cluster_duration='1D', + time_series_for_high_peaks=['Demand|fixed_relative_profile'], ) +reduced_fs.optimize(solver) ``` **Removed constraint-based clustering infrastructure**: -- `transform.cluster()` - removed (use `cluster_reduce()`) +- `ClusteredOptimization` class - removed (use `transform.cluster()` + `Optimization`) +- `ClusteringParameters` class - removed (parameters passed directly to `transform.cluster()`) - `transform.add_clustering()` - removed -- `FlowSystem._clustering_info` - removed (only `_cluster_info` for `cluster_reduce` remains) - `FlowSystem._add_clustering_constraints()` - removed ### ♻️ Changed -**Terminology clarification** in aggregation module: +**Terminology clarification** in clustering module: - "cluster" = a group of similar time chunks (e.g., similar days grouped together) - "typical period" = a representative time chunk for a cluster (TSAM terminology) - "cluster duration" = the length of each time chunk (e.g., 24h for daily clustering) Note: This is separate from the model's "period" dimension (years/months) and "scenario" dimension. -**xarray-native data structures**: All aggregation interfaces use `xr.DataArray` and `xr.Dataset` for proper coordinate handling. +**xarray-native data structures**: All clustering interfaces use `xr.DataArray` and `xr.Dataset` for proper coordinate handling. ### 🔥 Removed -- `transform.cluster()` method (constraint-based clustering) +- `ClusteredOptimization` class (use `transform.cluster()` + `Optimization`) +- `ClusteringParameters` class (parameters passed directly to `transform.cluster()`) - `transform.add_clustering()` method - `ClusteringModel` constraint generation (internal) -- `_clustering_info` storage on FlowSystem ### 📝 Docs - Improved terminology: clarified distinction between clustering "typical periods" and model "period" dimension -- Added aggregation module documentation with backend examples +- Added clustering module documentation with examples --- diff --git a/tests/deprecated/examples/03_Optimization_modes/example_optimization_modes.py b/tests/deprecated/examples/03_Optimization_modes/example_optimization_modes.py index 91bbaeaaa..02e167c40 100644 --- a/tests/deprecated/examples/03_Optimization_modes/example_optimization_modes.py +++ b/tests/deprecated/examples/03_Optimization_modes/example_optimization_modes.py @@ -191,13 +191,17 @@ def get_solutions(optimizations: list, variable: str) -> xr.Dataset: if aggregated: # Use the new transform.cluster() API - time_series_for_high_peaks = [TS_heat_demand] if keep_extreme_periods else None - time_series_for_low_peaks = [TS_electricity_demand, TS_heat_demand] if keep_extreme_periods else None + # Note: time_series_for_high_peaks/low_peaks expect string labels matching dataset variables + time_series_for_high_peaks = ['Wärmelast(Q_th_Last)|fixed_relative_profile'] if keep_extreme_periods else None + time_series_for_low_peaks = ( + ['Stromlast(P_el_Last)|fixed_relative_profile', 'Wärmelast(Q_th_Last)|fixed_relative_profile'] + if keep_extreme_periods + else None + ) clustered_fs = flow_system.copy().transform.cluster( n_clusters=n_clusters, cluster_duration=cluster_duration, - include_storage=include_storage, time_series_for_high_peaks=time_series_for_high_peaks, time_series_for_low_peaks=time_series_for_low_peaks, ) diff --git a/tests/deprecated/test_integration.py b/tests/deprecated/test_integration.py index 9b05a5c10..8ec23265e 100644 --- a/tests/deprecated/test_integration.py +++ b/tests/deprecated/test_integration.py @@ -258,15 +258,15 @@ def test_piecewise_conversion(self, flow_system_piecewise_conversion, highs_solv @pytest.mark.slow class TestModelingTypes: - @pytest.fixture(params=['full', 'segmented', 'aggregated']) + # Note: 'aggregated' case removed - ClusteredOptimization has been replaced by + # FlowSystem.transform.cluster(). See tests/test_clustering/ for new clustering tests. + @pytest.fixture(params=['full', 'segmented']) def modeling_calculation(self, request, flow_system_long, highs_solver): """ Fixture to run optimizations with different modeling types """ # Extract flow system and data from the fixture flow_system = flow_system_long[0] - thermal_load_ts = flow_system_long[1]['thermal_load_ts'] - electrical_load_ts = flow_system_long[1]['electrical_load_ts'] # Create calculation based on modeling type modeling_type = request.param @@ -277,23 +277,6 @@ def modeling_calculation(self, request, flow_system_long, highs_solver): elif modeling_type == 'segmented': calc = fx.SegmentedOptimization('segModel', flow_system, timesteps_per_segment=96, overlap_timesteps=1) calc.do_modeling_and_solve(highs_solver) - elif modeling_type == 'aggregated': - calc = fx.ClusteredOptimization( - 'aggModel', - flow_system, - fx.ClusteringParameters( - n_clusters=4, - cluster_duration='6h', - include_storage=False, - aggregate_data=True, - flexibility_percent=0, - flexibility_penalty=0, - time_series_for_low_peaks=[electrical_load_ts, thermal_load_ts], - time_series_for_high_peaks=[thermal_load_ts], - ), - ) - calc.do_modeling() - calc.solve(highs_solver) return calc, modeling_type @@ -306,16 +289,15 @@ def test_modeling_types_costs(self, modeling_calculation): expected_costs = { 'full': 343613, 'segmented': 343613, # Approximate value - 'aggregated': 342967.0, } - if modeling_type in ['full', 'aggregated']: + if modeling_type == 'full': assert_almost_equal_numeric( calc.results.model['costs'].solution.item(), expected_costs[modeling_type], f'costs do not match for {modeling_type} modeling type', ) - else: + elif modeling_type == 'segmented': assert_almost_equal_numeric( calc.results.solution_without_overlap('costs(temporal)|per_timestep').sum(), expected_costs[modeling_type], diff --git a/tests/test_clustering/__init__.py b/tests/test_clustering/__init__.py index 4a026052c..3d546645c 100644 --- a/tests/test_clustering/__init__.py +++ b/tests/test_clustering/__init__.py @@ -1 +1 @@ -"""Tests for the flixopt.aggregation module.""" +"""Tests for the flixopt.clustering module.""" From e200441df33a1afc19f384a81c09f50b7ae19dfe Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Thu, 25 Dec 2025 23:44:39 +0100 Subject: [PATCH 146/191] Temp --- flixopt/components.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/flixopt/components.py b/flixopt/components.py index 8fe04e23b..b1ee14778 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -1515,6 +1515,11 @@ def _add_combined_bound_constraints( if has_investment and self.investment is not None: self.add_constraints(combined <= self.investment.size, short_name=f'soc_ub_{sample_name}') + elif not has_investment and isinstance(self.element.capacity_in_flow_hours, (int, float)): + # Fixed-capacity storage: upper bound is the fixed capacity + self.add_constraints( + combined <= self.element.capacity_in_flow_hours, short_name=f'soc_ub_{sample_name}' + ) @register_class_for_io From 19f149f048db53e74f8dd0066d9b50f2414c1c4c Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 00:02:47 +0100 Subject: [PATCH 147/191] Temp --- flixopt/components.py | 130 +++++++++++++++++++----------------------- 1 file changed, 59 insertions(+), 71 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index b1ee14778..8c836555d 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -941,39 +941,17 @@ def _do_modeling(self): short_name='netto_discharge', ) - charge_state = self.charge_state - rel_loss = self.element.relative_loss_per_hour - timestep_duration = self._model.timestep_duration - charge_rate = self.element.charging.submodel.flow_rate - discharge_rate = self.element.discharging.submodel.flow_rate - eff_charge = self.element.eta_charge - eff_discharge = self.element.eta_discharge - - # Build balance expression - lhs = ( - charge_state.isel(time=slice(1, None)) - - charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) - - charge_rate * eff_charge * timestep_duration - + discharge_rate * timestep_duration / eff_discharge - ) - - # Handle clustering modes for storage - clustering = self._model.flow_system.clustering - mask = None + # Build and add energy balance constraint + lhs = self._build_energy_balance_lhs() - if clustering is not None: - # All modes skip inter-cluster boundaries: removes naive link between end of cluster N and start of N+1 - mask = np.ones(lhs.sizes['time'], dtype=bool) - mask[clustering.cluster_start_positions[1:] - 1] = False - mask = xr.DataArray(mask, coords={'time': lhs.coords['time']}) - - self.add_constraints(lhs == 0, short_name='charge_state', mask=mask) + # With 2D (cluster, time) structure, no masking needed - constraint applies within each cluster + self.add_constraints(lhs == 0, short_name='charge_state') # For 'cyclic' mode: each cluster's start equals its end - if clustering is not None and self.element.cluster_mode == 'cyclic': + if self._model.flow_system._use_true_cluster_dims and self.element.cluster_mode == 'cyclic': + # 2D structure: time=0 is start, time=-2 is last regular timestep (before extra timestep) self.add_constraints( - charge_state.isel(time=clustering.cluster_start_positions) - == charge_state.isel(time=clustering.cluster_start_positions + clustering.timesteps_per_period - 1), + self.charge_state.isel(time=0) == self.charge_state.isel(time=-2), short_name='cluster_cyclic', ) @@ -1007,6 +985,32 @@ def _do_modeling(self): short_name='balanced_sizes', ) + def _build_energy_balance_lhs(self): + """Build the left-hand side of the energy balance constraint. + + The energy balance equation is: + charge_state[t+1] = charge_state[t] * (1 - loss)^dt + + charge_rate * eta_charge * dt + - discharge_rate / eta_discharge * dt + + Returns: + The LHS expression (should equal 0). + """ + charge_state = self.charge_state + rel_loss = self.element.relative_loss_per_hour + timestep_duration = self._model.timestep_duration + charge_rate = self.element.charging.submodel.flow_rate + discharge_rate = self.element.discharging.submodel.flow_rate + eff_charge = self.element.eta_charge + eff_discharge = self.element.eta_discharge + + return ( + charge_state.isel(time=slice(1, None)) + - charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) + - charge_rate * eff_charge * timestep_duration + + discharge_rate * timestep_duration / eff_discharge + ) + def _initial_and_final_charge_state(self): if self.element.initial_charge_state is not None: if isinstance(self.element.initial_charge_state, str): @@ -1246,29 +1250,10 @@ def _do_modeling(self): short_name='netto_discharge', ) - # Build energy balance (same as base class, but with cluster boundary masking) - charge_state = self.charge_state - rel_loss = self.element.relative_loss_per_hour - timestep_duration = self._model.timestep_duration - charge_rate = self.element.charging.submodel.flow_rate - discharge_rate = self.element.discharging.submodel.flow_rate - eff_charge = self.element.eta_charge - eff_discharge = self.element.eta_discharge - - lhs = ( - charge_state.isel(time=slice(1, None)) - - charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) - - charge_rate * eff_charge * timestep_duration - + discharge_rate * timestep_duration / eff_discharge - ) - - # Mask out inter-cluster boundaries - clustering = self._model.flow_system.clustering - mask = np.ones(lhs.sizes['time'], dtype=bool) - mask[clustering.cluster_start_positions[1:] - 1] = False - mask = xr.DataArray(mask, coords={'time': lhs.coords['time']}) - - self.add_constraints(lhs == 0, short_name='charge_state', mask=mask) + # Build energy balance using shared helper method + # With 2D (cluster, time) structure, no masking needed - constraint applies within each cluster + lhs = self._build_energy_balance_lhs() + self.add_constraints(lhs == 0, short_name='charge_state') # Create InvestmentModel if needed if isinstance(self.element.capacity_in_flow_hours, InvestParameters): @@ -1399,13 +1384,16 @@ def _add_cluster_start_constraints(self, n_clusters: int, timesteps_per_cluster: This ensures that the relative charge state is measured from a known reference point (the cluster start). + With 2D (cluster, time) structure, time=0 is the start of every cluster, + so we simply select isel(time=0) which broadcasts across the cluster dimension. + Args: - n_clusters: Number of representative clusters. - timesteps_per_cluster: Timesteps in each cluster. + n_clusters: Number of representative clusters (unused with 2D structure). + timesteps_per_cluster: Timesteps in each cluster (unused with 2D structure). """ - cluster_starts = np.arange(0, n_clusters * timesteps_per_cluster, timesteps_per_cluster) + # With 2D structure: time=0 is start of every cluster self.add_constraints( - self.charge_state.isel(time=cluster_starts) == 0, + self.charge_state.isel(time=0) == 0, short_name='cluster_start', ) @@ -1417,18 +1405,18 @@ def _compute_delta_soc(self, n_clusters: int, timesteps_per_cluster: int) -> xr. Since ΔE(start) = 0 by constraint, this simplifies to delta_SOC[c] = ΔE(end_c). + With 2D (cluster, time) structure, we can simply select isel(time=-1) and isel(time=0), + which already have the 'cluster' dimension. + Args: - n_clusters: Number of representative clusters. - timesteps_per_cluster: Timesteps in each cluster. + n_clusters: Number of representative clusters (unused with 2D structure). + timesteps_per_cluster: Timesteps in each cluster (unused with 2D structure). Returns: DataArray with 'cluster' dimension containing delta_SOC for each cluster. """ - starts = np.arange(0, n_clusters * timesteps_per_cluster, timesteps_per_cluster) - ends = starts + timesteps_per_cluster - 1 - - delta_soc = self.charge_state.isel(time=ends) - self.charge_state.isel(time=starts) - return delta_soc.assign_coords(time=np.arange(n_clusters)).rename({'time': 'cluster'}) + # With 2D structure: result already has cluster dimension + return self.charge_state.isel(time=-1) - self.charge_state.isel(time=0) def _add_linking_constraints( self, @@ -1483,6 +1471,9 @@ def _add_combined_bound_constraints( Since checking every timestep is expensive, we sample at the start, middle, and end of each cluster. + With 2D (cluster, time) structure, we simply select charge_state at a + given time offset, then reorder by cluster_order to get original_period order. + Args: soc_boundary: SOC_boundary variable. cluster_order: Mapping from original periods to clusters. @@ -1492,21 +1483,18 @@ def _add_combined_bound_constraints( """ charge_state = self.charge_state + # soc_d: SOC at start of each original period soc_d = soc_boundary.isel(cluster_boundary=slice(None, -1)) soc_d = soc_d.rename({'cluster_boundary': 'original_period'}) soc_d = soc_d.assign_coords(original_period=np.arange(n_original_periods)) sample_offsets = [0, timesteps_per_cluster // 2, timesteps_per_cluster - 1] - max_time_idx = len(charge_state.coords['time']) - 1 - - cluster_order_vals = cluster_order.values.astype(int) - cluster_starts = cluster_order_vals * timesteps_per_cluster for sample_name, offset in zip(['start', 'mid', 'end'], sample_offsets, strict=False): - time_indices = np.clip(cluster_starts + offset, 0, max_time_idx) - - cs_t = charge_state.isel(time=time_indices) - cs_t = cs_t.rename({'time': 'original_period'}) + # With 2D structure: select time offset, then reorder by cluster_order + cs_at_offset = charge_state.isel(time=offset) # Shape: (cluster, ...) + cs_t = cs_at_offset.isel(cluster=cluster_order) # Reorder to original_period order + cs_t = cs_t.rename({'cluster': 'original_period'}) cs_t = cs_t.assign_coords(original_period=np.arange(n_original_periods)) combined = soc_d + cs_t From 82ac4c291abb549712ca4b08145c19a93889c74a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 00:12:14 +0100 Subject: [PATCH 148/191] Temp --- flixopt/components.py | 164 +++++++++++++++++++++--------------------- 1 file changed, 82 insertions(+), 82 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 8c836555d..ef5c3c4e5 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -910,6 +910,10 @@ class StorageModel(ComponentModel): Mathematical Formulation: See + + Note: + This class uses a template method pattern. Subclasses (e.g., InterclusterStorageModel) + can override individual methods to customize behavior without duplicating code. """ element: Storage @@ -918,10 +922,18 @@ def __init__(self, model: FlowSystemModel, element: Storage): super().__init__(model, element) def _do_modeling(self): - """Create charge state variables, energy balance equations, and optional investment submodels""" + """Create charge state variables, energy balance equations, and optional investment submodels.""" super()._do_modeling() - - # Create variables + self._create_storage_variables() + self._add_netto_discharge_constraint() + self._add_energy_balance_constraint() + self._add_cluster_cyclic_constraint() + self._add_investment_model() + self._add_initial_final_constraints() + self._add_balanced_sizes_constraint() + + def _create_storage_variables(self): + """Create charge_state and netto_discharge variables.""" lb, ub = self._absolute_charge_state_bounds self.add_variables( lower=lb, @@ -929,33 +941,31 @@ def _do_modeling(self): coords=self._model.get_coords(extra_timestep=True), short_name='charge_state', ) - self.add_variables(coords=self._model.get_coords(), short_name='netto_discharge') - # Create constraints (can now access flow.submodel.flow_rate) - # netto_discharge: - # eq: nettoFlow(t) - discharging(t) + charging(t) = 0 + def _add_netto_discharge_constraint(self): + """Add constraint: netto_discharge = discharging - charging.""" self.add_constraints( self.netto_discharge == self.element.discharging.submodel.flow_rate - self.element.charging.submodel.flow_rate, short_name='netto_discharge', ) - # Build and add energy balance constraint + def _add_energy_balance_constraint(self): + """Add energy balance constraint linking charge states across timesteps.""" lhs = self._build_energy_balance_lhs() - - # With 2D (cluster, time) structure, no masking needed - constraint applies within each cluster self.add_constraints(lhs == 0, short_name='charge_state') - # For 'cyclic' mode: each cluster's start equals its end + def _add_cluster_cyclic_constraint(self): + """For 'cyclic' cluster mode: each cluster's start equals its end.""" if self._model.flow_system._use_true_cluster_dims and self.element.cluster_mode == 'cyclic': - # 2D structure: time=0 is start, time=-2 is last regular timestep (before extra timestep) self.add_constraints( self.charge_state.isel(time=0) == self.charge_state.isel(time=-2), short_name='cluster_cyclic', ) - # Create InvestmentModel and bounding constraints for investment + def _add_investment_model(self): + """Create InvestmentModel and add capacity-scaled bounds if using investment sizing.""" if isinstance(self.element.capacity_in_flow_hours, InvestParameters): self.add_submodels( InvestmentModel( @@ -966,7 +976,6 @@ def _do_modeling(self): ), short_name='investment', ) - BoundingPatterns.scaled_bounds( self, variable=self.charge_state, @@ -974,10 +983,34 @@ def _do_modeling(self): relative_bounds=self._relative_charge_state_bounds, ) - # Initial and final charge state constraints - self._initial_and_final_charge_state() + def _add_initial_final_constraints(self): + """Add initial and final charge state constraints.""" + if self.element.initial_charge_state is not None: + if isinstance(self.element.initial_charge_state, str): + self.add_constraints( + self.charge_state.isel(time=0) == self.charge_state.isel(time=-1), + short_name='initial_charge_state', + ) + else: + self.add_constraints( + self.charge_state.isel(time=0) == self.element.initial_charge_state, + short_name='initial_charge_state', + ) - # Balanced sizes + if self.element.maximal_final_charge_state is not None: + self.add_constraints( + self.charge_state.isel(time=-1) <= self.element.maximal_final_charge_state, + short_name='final_charge_max', + ) + + if self.element.minimal_final_charge_state is not None: + self.add_constraints( + self.charge_state.isel(time=-1) >= self.element.minimal_final_charge_state, + short_name='final_charge_min', + ) + + def _add_balanced_sizes_constraint(self): + """Add constraint ensuring charging and discharging capacities are equal.""" if self.element.balanced: self.add_constraints( self.element.charging.submodel._investment.size * 1 @@ -1011,30 +1044,6 @@ def _build_energy_balance_lhs(self): + discharge_rate * timestep_duration / eff_discharge ) - def _initial_and_final_charge_state(self): - if self.element.initial_charge_state is not None: - if isinstance(self.element.initial_charge_state, str): - self.add_constraints( - self.charge_state.isel(time=0) == self.charge_state.isel(time=-1), short_name='initial_charge_state' - ) - else: - self.add_constraints( - self.charge_state.isel(time=0) == self.element.initial_charge_state, - short_name='initial_charge_state', - ) - - if self.element.maximal_final_charge_state is not None: - self.add_constraints( - self.charge_state.isel(time=-1) <= self.element.maximal_final_charge_state, - short_name='final_charge_max', - ) - - if self.element.minimal_final_charge_state is not None: - self.add_constraints( - self.charge_state.isel(time=-1) >= self.element.minimal_final_charge_state, - short_name='final_charge_min', - ) - @property def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: """Get absolute bounds for charge_state variable. @@ -1220,42 +1229,41 @@ class InterclusterStorageModel(StorageModel): soc_boundary = fs_clustered.solution['seasonal_storage|SOC_boundary'] """ - def _do_modeling(self): - """Create storage model with inter-cluster linking constraints. + @property + def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: + """Get symmetric bounds for charge_state (ΔE) variable. - Extends the base StorageModel by: - 1. Skipping initial/final charge state constraints (handled via SOC_boundary) - 2. Using symmetric bounds on charge_state (ΔE can be negative) - 3. Adding SOC_boundary variable and linking constraints - """ - # Call grandparent's _do_modeling (ComponentModel), not parent's - # We need to rebuild because intercluster mode changes bounds and constraints - ComponentModel._do_modeling(self) + For InterclusterStorageModel, charge_state represents ΔE (relative change + from cluster start), which can be negative. Therefore, we need symmetric + bounds: -capacity <= ΔE <= capacity. - # Create charge_state with symmetric bounds for ΔE - lb, ub = self._absolute_charge_state_bounds - self.add_variables( - lower=lb, - upper=ub, - coords=self._model.get_coords(extra_timestep=True), - short_name='charge_state', - ) + Note that for investment-based sizing, additional constraints are added + in _add_investment_model to link bounds to the actual investment size. + """ + if self.element.capacity_in_flow_hours is None: + return (-np.inf, np.inf) + elif isinstance(self.element.capacity_in_flow_hours, InvestParameters): + cap_max = self.element.capacity_in_flow_hours.maximum_or_fixed_size + return (-cap_max, cap_max) + else: + cap = self.element.capacity_in_flow_hours + return (-cap, cap) - self.add_variables(coords=self._model.get_coords(), short_name='netto_discharge') + def _do_modeling(self): + """Create storage model with inter-cluster linking constraints. - # Create netto_discharge constraint - self.add_constraints( - self.netto_discharge - == self.element.discharging.submodel.flow_rate - self.element.charging.submodel.flow_rate, - short_name='netto_discharge', - ) + Uses template method pattern: calls parent's _do_modeling, then adds + inter-cluster linking. Overrides specific methods to customize behavior. + """ + super()._do_modeling() + self._add_intercluster_linking() - # Build energy balance using shared helper method - # With 2D (cluster, time) structure, no masking needed - constraint applies within each cluster - lhs = self._build_energy_balance_lhs() - self.add_constraints(lhs == 0, short_name='charge_state') + def _add_cluster_cyclic_constraint(self): + """Skip cluster cyclic constraint - handled by inter-cluster linking.""" + pass - # Create InvestmentModel if needed + def _add_investment_model(self): + """Create InvestmentModel with symmetric bounds for ΔE.""" if isinstance(self.element.capacity_in_flow_hours, InvestParameters): self.add_submodels( InvestmentModel( @@ -1266,7 +1274,6 @@ def _do_modeling(self): ), short_name='investment', ) - # Symmetric bounds: -size <= charge_state <= size self.add_constraints( self.charge_state >= -self.investment.size, @@ -1277,16 +1284,9 @@ def _do_modeling(self): short_name='charge_state|ub', ) - # Add inter-cluster linking (the main contribution of this class) - self._add_intercluster_linking() - - # Balanced sizes - if self.element.balanced: - self.add_constraints( - self.element.charging.submodel._investment.size * 1 - == self.element.discharging.submodel._investment.size * 1, - short_name='balanced_sizes', - ) + def _add_initial_final_constraints(self): + """Skip initial/final constraints - handled by SOC_boundary in inter-cluster linking.""" + pass def _add_intercluster_linking(self) -> None: """Add inter-cluster storage linking following the S-N model. From d1baccb6dba401a5b5405c527876d8ffa0e9f03e Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 00:36:02 +0100 Subject: [PATCH 149/191] Temp --- flixopt/components.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index ef5c3c4e5..bf4d7fa59 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -1058,7 +1058,7 @@ def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: relative_lower_bound, relative_upper_bound = self._relative_charge_state_bounds if self.element.capacity_in_flow_hours is None: - return (0, np.inf) + return 0, np.inf elif isinstance(self.element.capacity_in_flow_hours, InvestParameters): cap_min = self.element.capacity_in_flow_hours.minimum_or_fixed_size cap_max = self.element.capacity_in_flow_hours.maximum_or_fixed_size @@ -1241,13 +1241,15 @@ def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: in _add_investment_model to link bounds to the actual investment size. """ if self.element.capacity_in_flow_hours is None: - return (-np.inf, np.inf) + return -np.inf, np.inf elif isinstance(self.element.capacity_in_flow_hours, InvestParameters): - cap_max = self.element.capacity_in_flow_hours.maximum_or_fixed_size - return (-cap_max, cap_max) + cap_max = ( + self.element.capacity_in_flow_hours.maximum_or_fixed_size * self.element.relative_maximum_charge_state + ) + return -cap_max, cap_max else: - cap = self.element.capacity_in_flow_hours - return (-cap, cap) + cap = self.element.capacity_in_flow_hours * self.element.relative_maximum_charge_state + return -cap, cap def _do_modeling(self): """Create storage model with inter-cluster linking constraints. From 78ab16c5250861f77e48955a9bf8d2c3db112f06 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 00:47:48 +0100 Subject: [PATCH 150/191] Temp --- flixopt/components.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index bf4d7fa59..58025f6df 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -953,8 +953,7 @@ def _add_netto_discharge_constraint(self): def _add_energy_balance_constraint(self): """Add energy balance constraint linking charge states across timesteps.""" - lhs = self._build_energy_balance_lhs() - self.add_constraints(lhs == 0, short_name='charge_state') + self.add_constraints(self._build_energy_balance_lhs(), short_name='charge_state') def _add_cluster_cyclic_constraint(self): """For 'cyclic' cluster mode: each cluster's start equals its end.""" @@ -1039,9 +1038,9 @@ def _build_energy_balance_lhs(self): return ( charge_state.isel(time=slice(1, None)) - - charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) - - charge_rate * eff_charge * timestep_duration - + discharge_rate * timestep_duration / eff_discharge + == charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) + + charge_rate * eff_charge * timestep_duration + - discharge_rate * timestep_duration / eff_discharge ) @property From 97a344061b3e199ca25256f126351afa281d376a Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 01:12:09 +0100 Subject: [PATCH 151/191] Temp --- flixopt/components.py | 29 ++++++++++------------------- 1 file changed, 10 insertions(+), 19 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 58025f6df..303ce1716 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -8,7 +8,6 @@ from typing import TYPE_CHECKING, Literal import numpy as np -import pandas as pd import xarray as xr from . import io as fx_io @@ -1037,11 +1036,11 @@ def _build_energy_balance_lhs(self): eff_discharge = self.element.eta_discharge return ( - charge_state.isel(time=slice(1, None)) - == charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) - + charge_rate * eff_charge * timestep_duration - - discharge_rate * timestep_duration / eff_discharge - ) + charge_state.shift(time=-1) + - charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) + - charge_rate * eff_charge * timestep_duration + + discharge_rate * timestep_duration / eff_discharge + ).isel(time=slice(None, -1)) == 0 @property def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: @@ -1080,15 +1079,7 @@ def _relative_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: Returns: Tuple of (minimum_bounds, maximum_bounds) DataArrays extending to final timestep """ - # Get the final time coordinate (DatetimeIndex for both clustered and non-clustered) - if self._model.flow_system._use_true_cluster_dims: - # For clustered systems, add one timestep to the DatetimeIndex - time_coords = self._model.flow_system._cluster_time_coords - dt = self._model.flow_system._cluster_info.get('timestep_duration', 1.0) - final_time = time_coords[-1] + pd.Timedelta(hours=dt) - final_coords = {'time': [final_time]} - else: - final_coords = {'time': [self._model.flow_system.timesteps_extra[-1]]} + final_coords = {'time': [self._model.flow_system.timesteps_extra[-1]]} # Get final minimum charge state if self.element.relative_minimum_final_charge_state is None: @@ -1239,15 +1230,15 @@ def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: Note that for investment-based sizing, additional constraints are added in _add_investment_model to link bounds to the actual investment size. """ + _, relative_upper_bound = self._relative_charge_state_bounds + if self.element.capacity_in_flow_hours is None: return -np.inf, np.inf elif isinstance(self.element.capacity_in_flow_hours, InvestParameters): - cap_max = ( - self.element.capacity_in_flow_hours.maximum_or_fixed_size * self.element.relative_maximum_charge_state - ) + cap_max = self.element.capacity_in_flow_hours.maximum_or_fixed_size * relative_upper_bound return -cap_max, cap_max else: - cap = self.element.capacity_in_flow_hours * self.element.relative_maximum_charge_state + cap = self.element.capacity_in_flow_hours * relative_upper_bound return -cap, cap def _do_modeling(self): From 5e4a856427857ff42cfe21b11569b4bbc3a8e302 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 01:12:24 +0100 Subject: [PATCH 152/191] Temp --- flixopt/structure.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/flixopt/structure.py b/flixopt/structure.py index 5ce151385..0c07a2602 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -392,14 +392,7 @@ def get_coords( coords = {k: v for k, v in self.flow_system.coords.items() if k in effective_dims} if extra_timestep and coords: - if self.flow_system._use_true_cluster_dims: - # For clustered: extend time by 1 within each cluster (for charge_state) - time_coords = self.flow_system._cluster_time_coords - dt = self.flow_system._cluster_info.get('timestep_duration', 1.0) - final_time = time_coords[-1] + pd.Timedelta(hours=dt) - coords['time'] = time_coords.append(pd.DatetimeIndex([final_time])) - else: - coords['time'] = self.flow_system.timesteps_extra + coords['time'] = self.flow_system.timesteps_extra return xr.Coordinates(coords) if coords else None From d91c7e4365be9be7fb9f3b7396e8573ba749253b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 11:20:11 +0100 Subject: [PATCH 153/191] Better plotting defaults --- flixopt/config.py | 15 ++ flixopt/statistics_accessor.py | 302 ++++++++++++++++++++++++--------- 2 files changed, 236 insertions(+), 81 deletions(-) diff --git a/flixopt/config.py b/flixopt/config.py index a29027d65..4b1de189b 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -163,6 +163,9 @@ def format(self, record): 'default_facet_cols': 3, 'default_sequential_colorscale': 'turbo', 'default_qualitative_colorscale': 'plotly', + 'facet_col_priority': ('cluster', 'period', 'scenario'), + 'facet_row_priority': ('period', 'scenario'), + 'animation_frame_priority': ('scenario',), } ), 'solving': MappingProxyType( @@ -558,6 +561,9 @@ class Plotting: default_facet_cols: Default number of columns for faceted plots. default_sequential_colorscale: Default colorscale for heatmaps and continuous data. default_qualitative_colorscale: Default colormap for categorical plots (bar/line/area charts). + facet_col_priority: Priority order for auto-resolving facet_col dimension. + facet_row_priority: Priority order for auto-resolving facet_row dimension. + animation_frame_priority: Priority order for auto-resolving animation_frame dimension. Examples: ```python @@ -565,6 +571,9 @@ class Plotting: CONFIG.Plotting.default_dpi = 600 CONFIG.Plotting.default_sequential_colorscale = 'plasma' CONFIG.Plotting.default_qualitative_colorscale = 'Dark24' + + # Customize auto-faceting priority + CONFIG.Plotting.facet_col_priority = ('period', 'cluster', 'scenario') ``` """ @@ -574,6 +583,9 @@ class Plotting: default_facet_cols: int = _DEFAULTS['plotting']['default_facet_cols'] default_sequential_colorscale: str = _DEFAULTS['plotting']['default_sequential_colorscale'] default_qualitative_colorscale: str = _DEFAULTS['plotting']['default_qualitative_colorscale'] + facet_col_priority: tuple[str, ...] = _DEFAULTS['plotting']['facet_col_priority'] + facet_row_priority: tuple[str, ...] = _DEFAULTS['plotting']['facet_row_priority'] + animation_frame_priority: tuple[str, ...] = _DEFAULTS['plotting']['animation_frame_priority'] class Carriers: """Default carrier definitions for common energy types. @@ -674,6 +686,9 @@ def to_dict(cls) -> dict: 'default_facet_cols': cls.Plotting.default_facet_cols, 'default_sequential_colorscale': cls.Plotting.default_sequential_colorscale, 'default_qualitative_colorscale': cls.Plotting.default_qualitative_colorscale, + 'facet_col_priority': cls.Plotting.facet_col_priority, + 'facet_row_priority': cls.Plotting.facet_row_priority, + 'animation_frame_priority': cls.Plotting.animation_frame_priority, }, } diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index 17ca6cb42..1c835e9d2 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -228,15 +228,76 @@ def _filter_by_carrier(ds: xr.Dataset, carrier: str | list[str] | None) -> xr.Da return ds[matching_vars] if matching_vars else xr.Dataset() +def _resolve_auto_facets( + ds: xr.Dataset, + facet_col: str | Literal['auto'] | None, + facet_row: str | Literal['auto'] | None, + animation_frame: str | Literal['auto'] | None = None, +) -> tuple[str | None, str | None, str | None]: + """Resolve 'auto' facet/animation dimensions based on available data dimensions. + + When 'auto' is specified, dimensions are assigned based on priority: + - facet_col: cluster → period → scenario (first available with size > 1) + - facet_row: period → scenario (after facet_col is assigned) + - animation_frame: scenario (after others are assigned) + + Priority order is configurable via CONFIG.Plotting.facet_col_priority, etc. + + Args: + ds: Dataset to check for available dimensions. + facet_col: Dimension name, 'auto', or None. + facet_row: Dimension name, 'auto', or None. + animation_frame: Dimension name, 'auto', or None. + + Returns: + Tuple of (resolved_facet_col, resolved_facet_row, resolved_animation_frame). + Each is either a valid dimension name or None. + """ + # Get available dimensions with size > 1 + available = {d for d in ds.dims if ds.sizes[d] > 1} + used: set[str] = set() + + def resolve_one( + value: str | Literal['auto'] | None, + priority: tuple[str, ...], + ) -> str | None: + if value is None: + return None + if value != 'auto': + # Explicit dimension - use if available, else None + return value if value in available and value not in used else None + + # Auto mode: pick first available from priority list + for dim in priority: + if dim in available and dim not in used: + used.add(dim) + return dim + return None + + resolved_col = resolve_one(facet_col, CONFIG.Plotting.facet_col_priority) + if resolved_col: + used.add(resolved_col) + + resolved_row = resolve_one(facet_row, CONFIG.Plotting.facet_row_priority) + if resolved_row: + used.add(resolved_row) + + resolved_anim = resolve_one(animation_frame, CONFIG.Plotting.animation_frame_priority) + + return resolved_col, resolved_row, resolved_anim + + def _resolve_facets( ds: xr.Dataset, - facet_col: str | None, - facet_row: str | None, + facet_col: str | Literal['auto'] | None, + facet_row: str | Literal['auto'] | None, ) -> tuple[str | None, str | None]: - """Resolve facet dimensions, returning None if not present in data.""" - actual_facet_col = facet_col if facet_col and facet_col in ds.dims else None - actual_facet_row = facet_row if facet_row and facet_row in ds.dims else None - return actual_facet_col, actual_facet_row + """Resolve facet dimensions, returning None if not present in data. + + Legacy wrapper for _resolve_auto_facets for backward compatibility. + """ + resolved_col, resolved_row, _ = _resolve_auto_facets(ds, facet_col, facet_row, None) + return resolved_col, resolved_row def _dataset_to_long_df(ds: xr.Dataset, value_name: str = 'value', var_name: str = 'variable') -> pd.DataFrame: @@ -258,6 +319,7 @@ def _create_stacked_bar( title: str, facet_col: str | None, facet_row: str | None, + animation_frame: str | None = None, **plotly_kwargs: Any, ) -> go.Figure: """Create a stacked bar chart from xarray Dataset.""" @@ -274,6 +336,7 @@ def _create_stacked_bar( color='variable', facet_col=facet_col, facet_row=facet_row, + animation_frame=animation_frame, color_discrete_map=color_map, title=title, **plotly_kwargs, @@ -289,6 +352,7 @@ def _create_line( title: str, facet_col: str | None, facet_row: str | None, + animation_frame: str | None = None, **plotly_kwargs: Any, ) -> go.Figure: """Create a line chart from xarray Dataset.""" @@ -305,6 +369,7 @@ def _create_line( color='variable', facet_col=facet_col, facet_row=facet_row, + animation_frame=animation_frame, color_discrete_map=color_map, title=title, **plotly_kwargs, @@ -1377,8 +1442,9 @@ def balance( exclude: FilterType | None = None, unit: Literal['flow_rate', 'flow_hours'] = 'flow_rate', colors: ColorType | None = None, - facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = None, + animation_frame: str | Literal['auto'] | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1391,8 +1457,11 @@ def balance( exclude: Exclude flows containing these substrings. unit: 'flow_rate' (power) or 'flow_hours' (energy). colors: Color specification (colorscale name, color list, or label-to-color dict). - facet_col: Dimension for column facets. - facet_row: Dimension for row facets. + facet_col: Dimension for column facets. 'auto' uses first available of + cluster/period/scenario. None disables. + facet_row: Dimension for row facets. 'auto' uses first available after facet_col. + animation_frame: Dimension for animation slider. 'auto' uses first available + after facets. show: Whether to display the plot. Returns: @@ -1429,7 +1498,9 @@ def balance( ds[label] = -ds[label] ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row = _resolve_facets(ds, facet_col, facet_row) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + ds, facet_col, facet_row, animation_frame + ) # Build color map from Element.color attributes if no colors specified if colors is None: @@ -1447,6 +1518,7 @@ def balance( title=f'{node} [{unit_label}]' if unit_label else node, facet_col=actual_facet_col, facet_row=actual_facet_row, + animation_frame=actual_anim, **plotly_kwargs, ) @@ -1466,8 +1538,9 @@ def carrier_balance( exclude: FilterType | None = None, unit: Literal['flow_rate', 'flow_hours'] = 'flow_rate', colors: ColorType | None = None, - facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = None, + animation_frame: str | Literal['auto'] | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1483,8 +1556,10 @@ def carrier_balance( exclude: Exclude flows containing these substrings. unit: 'flow_rate' (power) or 'flow_hours' (energy). colors: Color specification (colorscale name, color list, or label-to-color dict). - facet_col: Dimension for column facets. - facet_row: Dimension for row facets. + facet_col: Dimension for column facets. 'auto' uses first available of + cluster/period/scenario. + facet_row: Dimension for row facets. 'auto' uses first available after facet_col. + animation_frame: Dimension for animation slider. show: Whether to display the plot. Returns: @@ -1535,7 +1610,9 @@ def carrier_balance( ds[label] = -ds[label] ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row = _resolve_facets(ds, facet_col, facet_row) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + ds, facet_col, facet_row, animation_frame + ) # Use cached component colors for flows if colors is None: @@ -1566,6 +1643,7 @@ def carrier_balance( title=f'{carrier.capitalize()} Balance [{unit_label}]' if unit_label else f'{carrier.capitalize()} Balance', facet_col=actual_facet_col, facet_row=actual_facet_row, + animation_frame=actual_anim, **plotly_kwargs, ) @@ -1581,18 +1659,20 @@ def heatmap( variables: str | list[str], *, select: SelectType | None = None, - reshape: tuple[str, str] | None = ('D', 'h'), + reshape: tuple[str, str] | Literal['auto'] | None = 'auto', colors: str | list[str] | None = None, - facet_col: str | None = 'period', - animation_frame: str | None = 'scenario', + facet_col: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: """Plot heatmap of time series data. - Time is reshaped into 2D (e.g., days × hours) when possible. Multiple variables - are shown as facets. If too many dimensions exist to display without data loss, - reshaping is skipped and variables are shown on the y-axis with time on x-axis. + Time is reshaped into 2D (e.g., days × hours) when possible. For clustered data, + the natural (cluster, time) shape is used directly without reshaping. + + Multiple variables are shown as facets. If too many dimensions exist to display + without data loss, reshaping is skipped and variables are shown on the y-axis. Args: variables: Flow label(s) or variable name(s). Flow labels like 'Boiler(Q_th)' @@ -1600,12 +1680,13 @@ def heatmap( names like 'Storage|charge_state' are used as-is. select: xarray-style selection, e.g. {'scenario': 'Base Case'}. reshape: Time reshape frequencies as (outer, inner), e.g. ('D', 'h') for - days × hours. Set to None to disable reshaping. + days × hours. 'auto' uses (cluster, time) for clustered data or + ('D', 'h') otherwise. None disables reshaping. colors: Colorscale name (str) or list of colors for heatmap coloring. Dicts are not supported for heatmaps (use str or list[str]). - facet_col: Dimension for subplot columns (default: 'period'). - With multiple variables, 'variable' is used instead. - animation_frame: Dimension for animation slider (default: 'scenario'). + facet_col: Dimension for subplot columns. 'auto' uses first available of + cluster/period/scenario. With multiple variables, 'variable' is used. + animation_frame: Dimension for animation slider. 'auto' uses first available. show: Whether to display the figure. **plotly_kwargs: Additional arguments passed to px.imshow. @@ -1628,38 +1709,48 @@ def heatmap( dataarrays = [ds[var] for var in variable_names] da = xr.concat(dataarrays, dim=pd.Index(variable_names, name='variable')) + # Check if data is clustered (has cluster dimension with size > 1) + is_clustered = 'cluster' in da.dims and da.sizes['cluster'] > 1 + # Determine facet and animation from available dims has_multiple_vars = 'variable' in da.dims and da.sizes['variable'] > 1 if has_multiple_vars: actual_facet = 'variable' - actual_animation = ( - animation_frame - if animation_frame in da.dims - else (facet_col if facet_col in da.dims and da.sizes.get(facet_col, 1) > 1 else None) - ) + # Resolve animation using auto logic, excluding 'variable' which is used for facet + _, _, actual_animation = _resolve_auto_facets(da.to_dataset(name='value'), None, None, animation_frame) + if actual_animation == 'variable': + actual_animation = None else: - actual_facet = facet_col if facet_col in da.dims and da.sizes.get(facet_col, 0) > 1 else None - actual_animation = ( - animation_frame if animation_frame in da.dims and da.sizes.get(animation_frame, 0) > 1 else None + # Resolve facet and animation using auto logic + actual_facet, _, actual_animation = _resolve_auto_facets( + da.to_dataset(name='value'), facet_col, None, animation_frame ) - # Count non-time dims with size > 1 (these need facet/animation slots) - extra_dims = [d for d in da.dims if d != 'time' and da.sizes[d] > 1] + # Count non-time/non-cluster dims with size > 1 (these need facet/animation slots) + heatmap_core_dims = {'time', 'cluster'} if is_clustered else {'time'} + extra_dims = [d for d in da.dims if d not in heatmap_core_dims and da.sizes[d] > 1] used_slots = len([d for d in [actual_facet, actual_animation] if d]) would_drop = len(extra_dims) > used_slots - # Reshape time only if we wouldn't lose data (all extra dims fit in facet + animation) - if reshape and 'time' in da.dims and not would_drop: + # Determine heatmap dimensions based on data structure + if is_clustered and (reshape == 'auto' or reshape is None): + # Clustered data: use (time, cluster) as natural 2D heatmap axes + heatmap_dims = ['time', 'cluster'] + elif reshape and reshape != 'auto' and 'time' in da.dims and not would_drop: + # Non-clustered with explicit reshape: reshape time to (day, hour) etc. da = _reshape_time_for_heatmap(da, reshape) heatmap_dims = ['timestep', 'timeframe'] + elif reshape == 'auto' and 'time' in da.dims and not would_drop and not is_clustered: + # Auto mode for non-clustered: use default ('D', 'h') reshape + da = _reshape_time_for_heatmap(da, ('D', 'h')) + heatmap_dims = ['timestep', 'timeframe'] elif has_multiple_vars: # Can't reshape but have multiple vars: use variable + time as heatmap axes heatmap_dims = ['variable', 'time'] # variable is now a heatmap dim, use period/scenario for facet/animation - actual_facet = facet_col if facet_col in da.dims and da.sizes.get(facet_col, 0) > 1 else None - actual_animation = ( - animation_frame if animation_frame in da.dims and da.sizes.get(animation_frame, 0) > 1 else None + actual_facet, _, actual_animation = _resolve_auto_facets( + da.to_dataset(name='value'), facet_col, None, animation_frame ) else: heatmap_dims = ['time'] if 'time' in da.dims else list(da.dims)[:1] @@ -1702,8 +1793,9 @@ def flows( select: SelectType | None = None, unit: Literal['flow_rate', 'flow_hours'] = 'flow_rate', colors: ColorType | None = None, - facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = None, + animation_frame: str | Literal['auto'] | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1716,8 +1808,9 @@ def flows( select: xarray-style selection. unit: 'flow_rate' or 'flow_hours'. colors: Color specification (colorscale name, color list, or label-to-color dict). - facet_col: Dimension for column facets. + facet_col: Dimension for column facets. 'auto' uses first available. facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. show: Whether to display. Returns: @@ -1760,7 +1853,9 @@ def flows( ds = ds[[lbl for lbl in matching_labels if lbl in ds]] ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row = _resolve_facets(ds, facet_col, facet_row) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + ds, facet_col, facet_row, animation_frame + ) # Get unit label from first data variable's attributes unit_label = '' @@ -1774,6 +1869,7 @@ def flows( title=f'Flows [{unit_label}]' if unit_label else 'Flows', facet_col=actual_facet_col, facet_row=actual_facet_row, + animation_frame=actual_anim, **plotly_kwargs, ) @@ -1790,8 +1886,9 @@ def sizes( max_size: float | None = 1e6, select: SelectType | None = None, colors: ColorType | None = None, - facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = None, + animation_frame: str | Literal['auto'] | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1801,8 +1898,9 @@ def sizes( max_size: Maximum size to include (filters defaults). select: xarray-style selection. colors: Color specification (colorscale name, color list, or label-to-color dict). - facet_col: Dimension for column facets. + facet_col: Dimension for column facets. 'auto' uses first available. facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. show: Whether to display. Returns: @@ -1817,7 +1915,9 @@ def sizes( valid_labels = [lbl for lbl in ds.data_vars if float(ds[lbl].max()) < max_size] ds = ds[valid_labels] - actual_facet_col, actual_facet_row = _resolve_facets(ds, facet_col, facet_row) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + ds, facet_col, facet_row, animation_frame + ) df = _dataset_to_long_df(ds) if df.empty: @@ -1832,6 +1932,7 @@ def sizes( color='variable', facet_col=actual_facet_col, facet_row=actual_facet_row, + animation_frame=actual_anim, color_discrete_map=color_map, title='Investment Sizes', labels={'variable': 'Flow', 'value': 'Size'}, @@ -1852,8 +1953,9 @@ def duration_curve( select: SelectType | None = None, normalize: bool = False, colors: ColorType | None = None, - facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = None, + animation_frame: str | Literal['auto'] | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1867,8 +1969,9 @@ def duration_curve( select: xarray-style selection. normalize: If True, normalize x-axis to 0-100%. colors: Color specification (colorscale name, color list, or label-to-color dict). - facet_col: Dimension for column facets. + facet_col: Dimension for column facets. 'auto' uses first available. facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. show: Whether to display. Returns: @@ -1927,7 +2030,9 @@ def sort_descending(arr: np.ndarray) -> np.ndarray: duration_coord = np.linspace(0, 100, n_timesteps) if normalize else np.arange(n_timesteps) result_ds = result_ds.assign_coords({duration_name: duration_coord}) - actual_facet_col, actual_facet_row = _resolve_facets(result_ds, facet_col, facet_row) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + result_ds, facet_col, facet_row, animation_frame + ) # Get unit label from first data variable's attributes unit_label = '' @@ -1941,6 +2046,7 @@ def sort_descending(arr: np.ndarray) -> np.ndarray: title=f'Duration Curve [{unit_label}]' if unit_label else 'Duration Curve', facet_col=actual_facet_col, facet_row=actual_facet_row, + animation_frame=actual_anim, **plotly_kwargs, ) @@ -1962,8 +2068,9 @@ def effects( by: Literal['component', 'contributor', 'time'] | None = None, select: SelectType | None = None, colors: ColorType | None = None, - facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = None, + animation_frame: str | Literal['auto'] | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1977,8 +2084,9 @@ def effects( or None to show aggregated totals per effect. select: xarray-style selection. colors: Color specification (colorscale name, color list, or label-to-color dict). - facet_col: Dimension for column facets (ignored if not in data). - facet_row: Dimension for row facets (ignored if not in data). + facet_col: Dimension for column facets. 'auto' uses first available. + facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. show: Whether to display. Returns: @@ -2068,7 +2176,9 @@ def effects( raise ValueError(f"'by' must be one of 'component', 'contributor', 'time', or None, got {by!r}") # Resolve facets - actual_facet_col, actual_facet_row = _resolve_facets(combined.to_dataset(name='value'), facet_col, facet_row) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + combined.to_dataset(name='value'), facet_col, facet_row, animation_frame + ) # Convert to DataFrame for plotly express df = combined.to_dataframe(name='value').reset_index() @@ -2097,6 +2207,7 @@ def effects( color_discrete_map=color_map, facet_col=actual_facet_col, facet_row=actual_facet_row, + animation_frame=actual_anim, title=title, **plotly_kwargs, ) @@ -2116,8 +2227,9 @@ def charge_states( *, select: SelectType | None = None, colors: ColorType | None = None, - facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = None, + animation_frame: str | Literal['auto'] | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -2127,8 +2239,9 @@ def charge_states( storages: Storage label(s) to plot. If None, plots all storages. select: xarray-style selection. colors: Color specification (colorscale name, color list, or label-to-color dict). - facet_col: Dimension for column facets. + facet_col: Dimension for column facets. 'auto' uses first available. facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. show: Whether to display. Returns: @@ -2143,7 +2256,9 @@ def charge_states( ds = ds[[s for s in storages if s in ds]] ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row = _resolve_facets(ds, facet_col, facet_row) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + ds, facet_col, facet_row, animation_frame + ) fig = _create_line( ds, @@ -2151,6 +2266,7 @@ def charge_states( title='Storage Charge States', facet_col=actual_facet_col, facet_row=actual_facet_row, + animation_frame=actual_anim, **plotly_kwargs, ) fig.update_yaxes(title_text='Charge State') @@ -2170,8 +2286,9 @@ def storage( unit: Literal['flow_rate', 'flow_hours'] = 'flow_rate', colors: ColorType | None = None, charge_state_color: str = 'black', - facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = None, + animation_frame: str | Literal['auto'] | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -2187,8 +2304,9 @@ def storage( unit: 'flow_rate' (power) or 'flow_hours' (energy). colors: Color specification for flow bars. charge_state_color: Color for the charge state line overlay. - facet_col: Dimension for column facets. + facet_col: Dimension for column facets. 'auto' uses first available. facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. show: Whether to display. Returns: @@ -2232,7 +2350,9 @@ def storage( # Apply selection ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row = _resolve_facets(ds, facet_col, facet_row) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + ds, facet_col, facet_row, animation_frame + ) # Build color map flow_labels = [lbl for lbl in ds.data_vars if lbl != 'charge_state'] @@ -2244,7 +2364,7 @@ def storage( # Convert to long-form DataFrame df = _dataset_to_long_df(ds) - # Create figure with facets using px.bar for flows, then add charge_state line + # Create figure with facets using px.bar for flows flow_df = df[df['variable'] != 'charge_state'] charge_df = df[df['variable'] == 'charge_state'] @@ -2255,6 +2375,7 @@ def storage( color='variable', facet_col=actual_facet_col, facet_row=actual_facet_row, + animation_frame=actual_anim, color_discrete_map=color_map, title=f'{storage} Operation ({unit})', **plotly_kwargs, @@ -2262,32 +2383,51 @@ def storage( fig.update_layout(bargap=0, bargroupgap=0) fig.update_traces(marker_line_width=0) - # Add charge state as line on secondary y-axis using px.line, then merge traces + # Add charge state as line on secondary y-axis if not charge_df.empty: + # Create line figure with same facets to get matching trace structure line_fig = px.line( charge_df, x='time', y='value', facet_col=actual_facet_col, facet_row=actual_facet_row, + animation_frame=actual_anim, ) - # Update line traces and add to main figure - for trace in line_fig.data: - trace.name = 'charge_state' - trace.line = dict(color=charge_state_color, width=2) - trace.yaxis = 'y2' - trace.showlegend = True - fig.add_trace(trace) - # Add secondary y-axis - fig.update_layout( - yaxis2=dict( - title='Charge State', - overlaying='y', + # Get the primary y-axes from the bar figure to create matching secondary axes + # px creates axes named: yaxis, yaxis2, yaxis3, etc. + primary_yaxes = [key for key in fig.layout if key.startswith('yaxis')] + + # For each primary y-axis, create a secondary y-axis + for i, primary_key in enumerate(sorted(primary_yaxes, key=lambda x: int(x[5:]) if x[5:] else 0)): + # Determine secondary axis name (y -> y2, y2 -> y3 pattern won't work) + # Instead use a consistent offset: yaxis -> yaxis10, yaxis2 -> yaxis11, etc. + primary_num = primary_key[5:] if primary_key[5:] else '1' + secondary_num = int(primary_num) + 100 # Use high offset to avoid conflicts + secondary_key = f'yaxis{secondary_num}' + secondary_anchor = f'x{primary_num}' if primary_num != '1' else 'x' + + fig.layout[secondary_key] = dict( + overlaying=f'y{primary_num}' if primary_num != '1' else 'y', side='right', showgrid=False, + title='Charge State' if i == len(primary_yaxes) - 1 else None, + anchor=secondary_anchor, ) - ) + + # Add line traces with correct axis assignments + for i, trace in enumerate(line_fig.data): + # Map trace index to secondary y-axis + primary_num = i + 1 if i > 0 else 1 + secondary_yaxis = f'y{primary_num + 100}' + + trace.name = 'charge_state' + trace.line = dict(color=charge_state_color, width=2) + trace.yaxis = secondary_yaxis + trace.showlegend = i == 0 # Only show legend for first trace + trace.legendgroup = 'charge_state' + fig.add_trace(trace) if show is None: show = CONFIG.Plotting.default_show From 3e07bca86e22b36f9942df4cb160d2d73bc4d600 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 15:04:23 +0100 Subject: [PATCH 154/191] timesteps (96) and timesteps_extra (97) correctly match the 2D (cluster, time) data structure. --- flixopt/flow_system.py | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 16eaf8cc1..6ca79dce1 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -702,16 +702,16 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: is_clustered_dataset = 'cluster' in ds.dims and reference_structure.get('is_clustered', False) if is_clustered_dataset: - # Clustered dataset: create synthetic DatetimeIndex + # Clustered dataset: use intra-cluster time coordinate n_clusters = ds.sizes['cluster'] timesteps_per_cluster = ds.sizes['time'] - n_total_timesteps = n_clusters * timesteps_per_cluster timestep_duration_hours = reference_structure.get('timestep_duration', 1.0) - # Create synthetic DatetimeIndex for compatibility - synthetic_timesteps = pd.date_range( + # Use the actual intra-cluster time coordinate (e.g., 96 elements for daily clustering) + # This matches coords['time'] and the 2D (cluster, time) data structure + cluster_timesteps = pd.date_range( start='2000-01-01', - periods=n_total_timesteps, + periods=timesteps_per_cluster, freq=pd.Timedelta(hours=timestep_duration_hours), name='time', ) @@ -730,7 +730,7 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: # Create FlowSystem instance with constructor parameters flow_system = cls( - timesteps=synthetic_timesteps, + timesteps=cluster_timesteps if is_clustered_dataset else synthetic_timesteps, periods=ds.indexes.get('period'), scenarios=ds.indexes.get('scenario'), hours_of_last_timestep=reference_structure.get('hours_of_last_timestep'), @@ -755,17 +755,11 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: } # Override timestep_duration to have correct shape for 2D cluster structure # Shape: (time,) = (timesteps_per_cluster,) - broadcasts with (cluster, time) - # Use DatetimeIndex consistent with _cluster_time_coords - time_coords_2d = pd.date_range( - start='2000-01-01', - periods=timesteps_per_cluster, - freq=pd.Timedelta(hours=timestep_duration_hours), - name='time', - ) + # Use flow_system.timesteps which is now the intra-cluster time coordinate flow_system.timestep_duration = xr.DataArray( np.full(timesteps_per_cluster, timestep_duration_hours), dims=['time'], - coords={'time': time_coords_2d}, + coords={'time': flow_system.timesteps}, name='timestep_duration', ) # cluster_weight will be set after Clustering object is attached From e5c3cf3f2da9840ad1847d69d91cbb54413f20d8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 15:04:38 +0100 Subject: [PATCH 155/191] Revert storage energy balance reorder --- flixopt/components.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 303ce1716..a308050f6 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -952,7 +952,7 @@ def _add_netto_discharge_constraint(self): def _add_energy_balance_constraint(self): """Add energy balance constraint linking charge states across timesteps.""" - self.add_constraints(self._build_energy_balance_lhs(), short_name='charge_state') + self.add_constraints(self._build_energy_balance_lhs() == 0, short_name='charge_state') def _add_cluster_cyclic_constraint(self): """For 'cyclic' cluster mode: each cluster's start equals its end.""" @@ -1024,6 +1024,11 @@ def _build_energy_balance_lhs(self): + charge_rate * eta_charge * dt - discharge_rate / eta_discharge * dt + Rearranged as LHS = 0: + charge_state[t+1] - charge_state[t] * (1 - loss)^dt + - charge_rate * eta_charge * dt + + discharge_rate / eta_discharge * dt = 0 + Returns: The LHS expression (should equal 0). """ @@ -1036,11 +1041,11 @@ def _build_energy_balance_lhs(self): eff_discharge = self.element.eta_discharge return ( - charge_state.shift(time=-1) + charge_state.isel(time=slice(1, None)) - charge_state.isel(time=slice(None, -1)) * ((1 - rel_loss) ** timestep_duration) - charge_rate * eff_charge * timestep_duration + discharge_rate * timestep_duration / eff_discharge - ).isel(time=slice(None, -1)) == 0 + ) @property def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: From d78179bbbcef5d2495363803866b235e5f92ce45 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 15:48:01 +0100 Subject: [PATCH 156/191] =?UTF-8?q?=E2=8F=BA=20Both=20clustered=20and=20no?= =?UTF-8?q?n-clustered=20FlowSystems=20work=20correctly.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Summary of simplification: 1. Added clusters: pd.Index | None = None parameter to constructor (like periods, scenarios) 2. Simplified coords property - just adds cluster if self.clusters is not None 3. Simplified _use_true_cluster_dims → return self.clusters is not None 4. Simplified _cluster_n_clusters → return len(self.clusters) 5. Simplified _cluster_timesteps_per_cluster → return len(self.timesteps) 6. Simplified _cluster_time_coords → return self.timesteps 7. Simplified from_dataset - just uses ds.indexes.get('cluster') and ds.indexes['time'] 8. Simplified to_dataset - uses self.clusters directly 9. Removed all _cluster_info and _clustered_data handling The cluster dimension is now treated just like period and scenario - simply another dimension in the coordinate system. The code is much cleaner and more consistent. --- flixopt/flow_system.py | 157 ++++++++--------------------------------- 1 file changed, 28 insertions(+), 129 deletions(-) diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index 6ca79dce1..c10a1defb 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -172,6 +172,7 @@ def __init__( timesteps: pd.DatetimeIndex, periods: pd.Index | None = None, scenarios: pd.Index | None = None, + clusters: pd.Index | None = None, hours_of_last_timestep: int | float | None = None, hours_of_previous_timesteps: int | float | np.ndarray | None = None, weight_of_last_period: int | float | None = None, @@ -193,6 +194,7 @@ def __init__( self.periods = None if periods is None else self._validate_periods(periods) self.scenarios = None if scenarios is None else self._validate_scenarios(scenarios) + self.clusters = clusters # Cluster dimension for clustered FlowSystems self.timestep_duration = self.fit_to_model_coords('timestep_duration', timestep_duration) @@ -643,17 +645,12 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: carriers_structure[name] = carrier_ref ds.attrs['carriers'] = json.dumps(carriers_structure) - # Include cluster info for 2D clustered FlowSystems - if self._use_true_cluster_dims: + # Include cluster info for clustered FlowSystems + if self.clusters is not None: ds.attrs['is_clustered'] = True - ds.attrs['n_clusters'] = self._cluster_n_clusters - ds.attrs['timesteps_per_cluster'] = self._cluster_timesteps_per_cluster - if hasattr(self, '_cluster_info') and self._cluster_info is not None: - ds.attrs['timestep_duration'] = self._cluster_info.get('timestep_duration', 1.0) - elif hasattr(self, 'timestep_duration') and self.timestep_duration is not None: - ds.attrs['timestep_duration'] = float(self.timestep_duration.mean()) - else: - ds.attrs['timestep_duration'] = 1.0 + ds.attrs['n_clusters'] = len(self.clusters) + ds.attrs['timesteps_per_cluster'] = len(self.timesteps) + ds.attrs['timestep_duration'] = float(self.timestep_duration.mean()) # Add version info ds.attrs['flixopt_version'] = __version__ @@ -698,30 +695,13 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: # Create arrays dictionary from config variables only arrays_dict = config_vars - # Detect clustered dataset with (cluster, time) dimensions - is_clustered_dataset = 'cluster' in ds.dims and reference_structure.get('is_clustered', False) - - if is_clustered_dataset: - # Clustered dataset: use intra-cluster time coordinate - n_clusters = ds.sizes['cluster'] - timesteps_per_cluster = ds.sizes['time'] - timestep_duration_hours = reference_structure.get('timestep_duration', 1.0) - - # Use the actual intra-cluster time coordinate (e.g., 96 elements for daily clustering) - # This matches coords['time'] and the 2D (cluster, time) data structure - cluster_timesteps = pd.date_range( - start='2000-01-01', - periods=timesteps_per_cluster, - freq=pd.Timedelta(hours=timestep_duration_hours), - name='time', - ) + # Extract cluster index if present (clustered FlowSystem) + clusters = ds.indexes.get('cluster') - # cluster_weight for clustered mode is (cluster,) shaped - don't pass to constructor - # It will be set separately after FlowSystem creation + # For clustered datasets, cluster_weight is (cluster,) shaped - set separately + if clusters is not None: cluster_weight_for_constructor = None else: - # Regular dataset: use time index directly - synthetic_timesteps = ds.indexes['time'] cluster_weight_for_constructor = ( cls._resolve_dataarray_reference(reference_structure['cluster_weight'], arrays_dict) if 'cluster_weight' in reference_structure @@ -730,9 +710,10 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: # Create FlowSystem instance with constructor parameters flow_system = cls( - timesteps=cluster_timesteps if is_clustered_dataset else synthetic_timesteps, + timesteps=ds.indexes['time'], periods=ds.indexes.get('period'), scenarios=ds.indexes.get('scenario'), + clusters=clusters, hours_of_last_timestep=reference_structure.get('hours_of_last_timestep'), hours_of_previous_timesteps=reference_structure.get('hours_of_previous_timesteps'), weight_of_last_period=reference_structure.get('weight_of_last_period'), @@ -745,25 +726,6 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: name=reference_structure.get('name'), ) - # For clustered datasets, store the 2D data and set up cluster structure - if is_clustered_dataset: - flow_system._clustered_data = ds - flow_system._cluster_info = { - 'n_clusters': n_clusters, - 'timesteps_per_cluster': timesteps_per_cluster, - 'timestep_duration': timestep_duration_hours, - } - # Override timestep_duration to have correct shape for 2D cluster structure - # Shape: (time,) = (timesteps_per_cluster,) - broadcasts with (cluster, time) - # Use flow_system.timesteps which is now the intra-cluster time coordinate - flow_system.timestep_duration = xr.DataArray( - np.full(timesteps_per_cluster, timestep_duration_hours), - dims=['time'], - coords={'time': flow_system.timesteps}, - name='timestep_duration', - ) - # cluster_weight will be set after Clustering object is attached - # Restore components components_structure = reference_structure.get('components', {}) for comp_label, comp_data in components_structure.items(): @@ -1904,22 +1866,17 @@ def storages(self) -> ElementContainer[Storage]: def coords(self) -> dict[FlowSystemDimensions, pd.Index]: """Active coordinates for variable creation. - Returns a dict of dimension names to coordinate arrays. When clustered - with true dimensions enabled, includes 'cluster' dimension before 'time'. + Returns a dict of dimension names to coordinate arrays. When clustered, + includes 'cluster' dimension before 'time'. Returns: Dict mapping dimension names to coordinate arrays. """ - if self._use_true_cluster_dims: - # True (cluster, time) dimensions - n_clusters = self._cluster_n_clusters - time_coords = self._cluster_time_coords - active_coords = { - 'cluster': pd.Index(range(n_clusters), name='cluster'), - 'time': time_coords, - } - else: - active_coords = {'time': self.timesteps} + active_coords: dict[str, pd.Index] = {} + + if self.clusters is not None: + active_coords['cluster'] = self.clusters + active_coords['time'] = self.timesteps if self.periods is not None: active_coords['period'] = self.periods @@ -1929,81 +1886,23 @@ def coords(self) -> dict[FlowSystemDimensions, pd.Index]: @property def _use_true_cluster_dims(self) -> bool: - """Check if true (cluster, time) dimensions should be used. - - This enables the new 2D cluster structure. Returns True if: - 1. The FlowSystem has _clustered_data with 'cluster' dimension, OR - 2. The FlowSystem has _cluster_info set (from from_dataset) - - Note: This can be True even before clustering is fully set up, - to allow variable creation with correct dimensions. - """ - # Check for 2D clustered data structure - if hasattr(self, '_clustered_data') and self._clustered_data is not None: - return 'cluster' in self._clustered_data.dims - # Check for cluster info from from_dataset - if hasattr(self, '_cluster_info') and self._cluster_info is not None: - return True - return False + """Check if true (cluster, time) dimensions should be used.""" + return self.clusters is not None @property def _cluster_n_clusters(self) -> int | None: - """Get number of clusters from cluster info or clustering object.""" - if hasattr(self, '_cluster_info') and self._cluster_info is not None: - return self._cluster_info['n_clusters'] - if self.is_clustered: - return self.clustering.n_clusters - return None + """Get number of clusters.""" + return len(self.clusters) if self.clusters is not None else None @property def _cluster_timesteps_per_cluster(self) -> int | None: - """Get timesteps per cluster from cluster info or clustering object.""" - if hasattr(self, '_cluster_info') and self._cluster_info is not None: - return self._cluster_info['timesteps_per_cluster'] - if self.is_clustered: - return self.clustering.timesteps_per_cluster - return None + """Get timesteps per cluster (same as len(timesteps) for clustered systems).""" + return len(self.timesteps) if self.clusters is not None else None @property def _cluster_time_coords(self) -> pd.DatetimeIndex | None: - """Get time coordinates for clustered system. - - Returns DatetimeIndex for time within cluster (e.g., 00:00-23:00 for daily clustering). - """ - # Try to get from _clustered_data first (has the actual coords) - if hasattr(self, '_clustered_data') and self._clustered_data is not None: - if 'time' in self._clustered_data.coords: - time_coord = self._clustered_data.coords['time'].values - if isinstance(time_coord, np.ndarray) and np.issubdtype(time_coord.dtype, np.datetime64): - return pd.DatetimeIndex(time_coord, name='time') - - # Fall back to generating from _cluster_info - if hasattr(self, '_cluster_info') and self._cluster_info is not None: - timesteps_per_cluster = self._cluster_info['timesteps_per_cluster'] - dt = self._cluster_info.get('timestep_duration', 1.0) - return pd.date_range( - start='2000-01-01', - periods=timesteps_per_cluster, - freq=pd.Timedelta(hours=dt), - name='time', - ) - - # Fall back to clustering object - if self.is_clustered: - timesteps_per_cluster = self.clustering.timesteps_per_cluster - # Try to get dt from timestep_duration - if hasattr(self, 'timestep_duration') and self.timestep_duration is not None: - dt = float(self.timestep_duration.mean()) - else: - dt = 1.0 - return pd.date_range( - start='2000-01-01', - periods=timesteps_per_cluster, - freq=pd.Timedelta(hours=dt), - name='time', - ) - - return None + """Get time coordinates for clustered system (same as timesteps).""" + return self.timesteps if self.clusters is not None else None @property def n_timesteps(self) -> int: From 8815eaac3f5f298cf30c227077d3161307a565a6 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Fri, 26 Dec 2025 18:40:31 +0100 Subject: [PATCH 157/191] minor simplifications --- flixopt/components.py | 2 +- flixopt/structure.py | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index a308050f6..d0a55104a 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -956,7 +956,7 @@ def _add_energy_balance_constraint(self): def _add_cluster_cyclic_constraint(self): """For 'cyclic' cluster mode: each cluster's start equals its end.""" - if self._model.flow_system._use_true_cluster_dims and self.element.cluster_mode == 'cyclic': + if self._model.flow_system.clusters is not None and self.element.cluster_mode == 'cyclic': self.add_constraints( self.charge_state.isel(time=0) == self.charge_state.isel(time=-2), short_name='cluster_cyclic', diff --git a/flixopt/structure.py b/flixopt/structure.py index 0c07a2602..69925ae77 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -290,8 +290,7 @@ def solution(self): } # Ensure solution is always indexed by timesteps_extra for consistency. # Variables without extra timestep data will have NaN at the final timestep. - # Skip reindexing for clustered systems which use integer time indices - if 'time' in solution.coords and not self.flow_system._use_true_cluster_dims: + if 'time' in solution.coords: if not solution.indexes['time'].equals(self.flow_system.timesteps_extra): solution = solution.reindex(time=self.flow_system.timesteps_extra) return solution @@ -387,7 +386,7 @@ def get_coords( # In clustered systems, 'time' is always paired with 'cluster' # So when 'time' is requested, also include 'cluster' if available effective_dims = set(dims) - if 'time' in dims and self.flow_system._use_true_cluster_dims: + if 'time' in dims and 'cluster' in self.flow_system.coords: effective_dims.add('cluster') coords = {k: v for k, v in self.flow_system.coords.items() if k in effective_dims} From 5df515f5635d49d3090e92d16a795fad405d2f1f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 27 Dec 2025 00:36:44 +0100 Subject: [PATCH 158/191] Add options for states in cluster mode --- flixopt/elements.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/flixopt/elements.py b/flixopt/elements.py index ba2b72f80..4ca88184b 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -5,7 +5,7 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Literal import numpy as np import xarray as xr @@ -372,6 +372,13 @@ class Flow(Element): fixed_relative_profile: Predetermined pattern as fraction of size. Flow rate = size × fixed_relative_profile(t). previous_flow_rate: Initial flow state for active/inactive status at model start. Default: None (inactive). + cluster_mode: How inter-timestep constraints are handled at cluster boundaries. + Only relevant when using ``transform.cluster()``. Options: + + - ``'independent'``: Each cluster uses ``previous_flow_rate`` as initial state. + Clusters are optimized independently. (default) + - ``'cyclic'``: Each cluster's final state equals its initial state. + Ensures consistent behavior within each representative period. meta_data: Additional info stored in results. Python native types only. Examples: @@ -485,6 +492,7 @@ def __init__( load_factor_min: Numeric_PS | None = None, load_factor_max: Numeric_PS | None = None, previous_flow_rate: Scalar | list[Scalar] | None = None, + cluster_mode: Literal['independent', 'cyclic'] = 'independent', meta_data: dict | None = None, ): super().__init__(label, meta_data=meta_data) @@ -505,6 +513,7 @@ def __init__( self.status_parameters = status_parameters self.previous_flow_rate = previous_flow_rate + self.cluster_mode = cluster_mode self.component: str = 'UnknownComponent' self.is_input_in_component: bool | None = None @@ -742,6 +751,15 @@ def _create_status_model(self): ), short_name='status', ) + self._add_cluster_cyclic_constraint() + + def _add_cluster_cyclic_constraint(self): + """For 'cyclic' cluster mode: each cluster's start status equals its end status.""" + if self._model.flow_system.clusters is not None and self.element.cluster_mode == 'cyclic': + self.add_constraints( + self.status.status.isel(time=0) == self.status.status.isel(time=-1), + short_name='cluster_cyclic', + ) def _create_investment_model(self): self.add_submodels( From fcf22698fd1e03cf9e55f222dd28cdf5367518d9 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 27 Dec 2025 01:29:21 +0100 Subject: [PATCH 159/191] Revert "Add options for states in cluster mode" This reverts commit 5df515f5635d49d3090e92d16a795fad405d2f1f. --- flixopt/elements.py | 20 +------------------- 1 file changed, 1 insertion(+), 19 deletions(-) diff --git a/flixopt/elements.py b/flixopt/elements.py index 4ca88184b..ba2b72f80 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -5,7 +5,7 @@ from __future__ import annotations import logging -from typing import TYPE_CHECKING, Literal +from typing import TYPE_CHECKING import numpy as np import xarray as xr @@ -372,13 +372,6 @@ class Flow(Element): fixed_relative_profile: Predetermined pattern as fraction of size. Flow rate = size × fixed_relative_profile(t). previous_flow_rate: Initial flow state for active/inactive status at model start. Default: None (inactive). - cluster_mode: How inter-timestep constraints are handled at cluster boundaries. - Only relevant when using ``transform.cluster()``. Options: - - - ``'independent'``: Each cluster uses ``previous_flow_rate`` as initial state. - Clusters are optimized independently. (default) - - ``'cyclic'``: Each cluster's final state equals its initial state. - Ensures consistent behavior within each representative period. meta_data: Additional info stored in results. Python native types only. Examples: @@ -492,7 +485,6 @@ def __init__( load_factor_min: Numeric_PS | None = None, load_factor_max: Numeric_PS | None = None, previous_flow_rate: Scalar | list[Scalar] | None = None, - cluster_mode: Literal['independent', 'cyclic'] = 'independent', meta_data: dict | None = None, ): super().__init__(label, meta_data=meta_data) @@ -513,7 +505,6 @@ def __init__( self.status_parameters = status_parameters self.previous_flow_rate = previous_flow_rate - self.cluster_mode = cluster_mode self.component: str = 'UnknownComponent' self.is_input_in_component: bool | None = None @@ -751,15 +742,6 @@ def _create_status_model(self): ), short_name='status', ) - self._add_cluster_cyclic_constraint() - - def _add_cluster_cyclic_constraint(self): - """For 'cyclic' cluster mode: each cluster's start status equals its end status.""" - if self._model.flow_system.clusters is not None and self.element.cluster_mode == 'cyclic': - self.add_constraints( - self.status.status.isel(time=0) == self.status.status.isel(time=-1), - short_name='cluster_cyclic', - ) def _create_investment_model(self): self.add_submodels( From 820fe6a020953bd93b466841efe2f458a6295872 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 27 Dec 2025 01:34:49 +0100 Subject: [PATCH 160/191] Update tests --- tests/test_cluster_reduce_expand.py | 23 +++++++++++++++++------ tests/test_clustering/test_integration.py | 10 +++++++--- 2 files changed, 24 insertions(+), 9 deletions(-) diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index 806d545a9..e7bb602e2 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -53,8 +53,13 @@ def test_cluster_creates_reduced_timesteps(timesteps_8_days): cluster_duration='1D', ) - # Should have 2 * 24 = 48 timesteps instead of 192 - assert len(fs_reduced.timesteps) == 48 + # Clustered FlowSystem has 2D structure: (cluster, time) + # - timesteps: within-cluster time (24 hours) + # - clusters: cluster indices (2 clusters) + # Total effective timesteps = 2 * 24 = 48 + assert len(fs_reduced.timesteps) == 24 # Within-cluster time + assert len(fs_reduced.clusters) == 2 # Number of clusters + assert len(fs_reduced.timesteps) * len(fs_reduced.clusters) == 48 # Total assert hasattr(fs_reduced, 'clustering') assert fs_reduced.clustering.result.cluster_structure.n_clusters == 2 @@ -72,13 +77,16 @@ def test_expand_solution_restores_full_timesteps(solver_fixture, timesteps_8_day # Optimize fs_reduced.optimize(solver_fixture) assert fs_reduced.solution is not None - assert len(fs_reduced.timesteps) == 48 + # Clustered: 24 within-cluster timesteps, 2 clusters + assert len(fs_reduced.timesteps) == 24 + assert len(fs_reduced.clusters) == 2 # Expand back to full fs_expanded = fs_reduced.transform.expand_solution() - # Should have original timestep count + # Should have original timestep count (flat, no clusters) assert len(fs_expanded.timesteps) == 192 + assert fs_expanded.clusters is None # Expanded FlowSystem has no cluster dimension assert fs_expanded.solution is not None @@ -272,8 +280,11 @@ def test_cluster_with_scenarios(timesteps_8_days, scenarios_2): cluster_duration='1D', ) - # Should have 2 * 24 = 48 timesteps - assert len(fs_reduced.timesteps) == 48 + # Clustered: 24 within-cluster timesteps, 2 clusters + # Total effective timesteps = 2 * 24 = 48 + assert len(fs_reduced.timesteps) == 24 + assert len(fs_reduced.clusters) == 2 + assert len(fs_reduced.timesteps) * len(fs_reduced.clusters) == 48 # Should have aggregation info with cluster structure info = fs_reduced.clustering diff --git a/tests/test_clustering/test_integration.py b/tests/test_clustering/test_integration.py index e3c6083a0..587e39160 100644 --- a/tests/test_clustering/test_integration.py +++ b/tests/test_clustering/test_integration.py @@ -125,9 +125,13 @@ def test_cluster_reduces_timesteps(self): cluster_duration='1D', ) - # Check that timesteps were reduced (from 168 hours to 48 hours = 2 days x 24 hours) - assert len(fs_clustered.timesteps) < len(fs.timesteps) - assert len(fs_clustered.timesteps) == 48 # 2 representative days x 24 hours + # Clustered FlowSystem has 2D structure: (cluster, time) + # - timesteps: within-cluster time (24 hours) + # - clusters: cluster indices (2 clusters) + # Total effective timesteps = 2 * 24 = 48 + assert len(fs_clustered.timesteps) == 24 # Within-cluster time + assert len(fs_clustered.clusters) == 2 # Number of clusters + assert len(fs_clustered.timesteps) * len(fs_clustered.clusters) == 48 class TestClusteringModuleImports: From 12950f66d908733eba8ee6580ad7f073abda0baf Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 27 Dec 2025 01:44:41 +0100 Subject: [PATCH 161/191] Fix expand_solution() --- flixopt/clustering/base.py | 10 ++++++---- tests/test_cluster_reduce_expand.py | 10 ++++++---- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/flixopt/clustering/base.py b/flixopt/clustering/base.py index 1470f0634..2c442e3d5 100644 --- a/flixopt/clustering/base.py +++ b/flixopt/clustering/base.py @@ -375,8 +375,9 @@ def expand_data(self, aggregated: xr.DataArray, original_time: xr.DataArray | No mapping = timestep_mapping.values if has_cluster_dim: # 2D cluster structure: convert flat indices to (cluster, time_within) - # n_clusters = aggregated.sizes['cluster'] - timesteps_per_cluster = aggregated.sizes['time'] + # Use cluster_structure's timesteps_per_cluster, not aggregated.sizes['time'] + # because the solution may include extra timesteps (timesteps_extra) + timesteps_per_cluster = self.cluster_structure.timesteps_per_cluster cluster_ids = mapping // timesteps_per_cluster time_within = mapping % timesteps_per_cluster expanded_values = aggregated.values[cluster_ids, time_within] @@ -415,8 +416,9 @@ def expand_data(self, aggregated: xr.DataArray, original_time: xr.DataArray | No if has_cluster_dim: # 2D cluster structure: convert flat indices to (cluster, time_within) - _n_clusters = slice_da.sizes['cluster'] - timesteps_per_cluster = slice_da.sizes['time'] + # Use cluster_structure's timesteps_per_cluster, not slice_da.sizes['time'] + # because the solution may include extra timesteps (timesteps_extra) + timesteps_per_cluster = self.cluster_structure.timesteps_per_cluster cluster_ids = mapping // timesteps_per_cluster time_within = mapping % timesteps_per_cluster expanded_values = slice_da.values[cluster_ids, time_within] diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index e7bb602e2..a25fd9eaa 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -138,9 +138,10 @@ def test_expand_solution_maps_values_correctly(solver_fixture, timesteps_8_days) # Values in the expanded solution for this original segment # should match the reduced solution for the corresponding typical cluster # With 2D cluster structure, use cluster_id to index the cluster dimension + # Note: solution may have extra timesteps (timesteps_extra), so slice to timesteps_per_cluster if reduced_flow.ndim == 2: - # 2D structure: (cluster, time) - expected = reduced_flow[cluster_id, :] + # 2D structure: (cluster, time) - exclude extra timestep if present + expected = reduced_flow[cluster_id, :timesteps_per_cluster] else: # Flat structure: (time,) typical_start = cluster_id * timesteps_per_cluster @@ -354,9 +355,10 @@ def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_ orig_end = orig_start + timesteps_per_cluster # With 2D cluster structure, use cluster_id to index the cluster dimension + # Note: solution may have extra timesteps (timesteps_extra), so slice to timesteps_per_cluster if reduced_scenario.ndim == 2: - # 2D structure: (cluster, time) - expected = reduced_scenario[cluster_id, :] + # 2D structure: (cluster, time) - exclude extra timestep if present + expected = reduced_scenario[cluster_id, :timesteps_per_cluster] else: # Flat structure: (time,) typical_start = cluster_id * timesteps_per_cluster From f4ef7411169dcb45afc74fc9f3168f45a7eb7cb8 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 27 Dec 2025 01:45:14 +0100 Subject: [PATCH 162/191] Fix Storage --- flixopt/components.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/flixopt/components.py b/flixopt/components.py index d0a55104a..5056bb694 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -982,7 +982,22 @@ def _add_investment_model(self): ) def _add_initial_final_constraints(self): - """Add initial and final charge state constraints.""" + """Add initial and final charge state constraints. + + For clustered systems with 'independent' or 'cyclic' mode, these constraints + are skipped because: + - 'independent': Each cluster has free start/end SOC + - 'cyclic': Start == end is handled by _add_cluster_cyclic_constraint, + but no specific initial value is enforced + """ + # Skip initial/final constraints for clustered systems with independent/cyclic mode + # These modes should have free or cyclic SOC, not a fixed initial value per cluster + if self._model.flow_system.clusters is not None and self.element.cluster_mode in ( + 'independent', + 'cyclic', + ): + return + if self.element.initial_charge_state is not None: if isinstance(self.element.initial_charge_state, str): self.add_constraints( From 937f4e2734b509502a254a4767bf8942a4811cfe Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 27 Dec 2025 02:16:17 +0100 Subject: [PATCH 163/191] Recombine charge state in intercluster mode --- flixopt/transform_accessor.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index af07fd01e..7dc1089b2 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1178,6 +1178,33 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: attrs=reduced_solution.attrs, ) + # 3. Combine charge_state with SOC_boundary for InterclusterStorageModel storages + # For intercluster storages, charge_state is relative (ΔE) and can be negative. + # The actual SOC is: SOC_boundary[d] + charge_state(t), where d is the original period. + soc_boundary_vars = [name for name in reduced_solution.data_vars if name.endswith('|SOC_boundary')] + for soc_boundary_name in soc_boundary_vars: + storage_name = soc_boundary_name.rsplit('|', 1)[0] + charge_state_name = f'{storage_name}|charge_state' + if charge_state_name not in expanded_fs._solution: + continue + + soc_boundary = reduced_solution[soc_boundary_name] + expanded_charge_state = expanded_fs._solution[charge_state_name] + + # Map each original timestep to its original period index + original_period_indices = np.arange(n_original_timesteps) // timesteps_per_cluster + + # Select SOC_boundary for each timestep (boundary[d] for period d) + # SOC_boundary has dim 'cluster_boundary', we select indices 0..n_original_periods-1 + soc_boundary_per_timestep = soc_boundary.isel( + cluster_boundary=xr.DataArray(original_period_indices, dims=['time']) + ) + soc_boundary_per_timestep = soc_boundary_per_timestep.assign_coords(time=original_timesteps) + + # Combine: actual_SOC = SOC_boundary + charge_state + combined_charge_state = expanded_charge_state + soc_boundary_per_timestep + expanded_fs._solution[charge_state_name] = combined_charge_state.assign_attrs(expanded_charge_state.attrs) + n_combinations = len(periods) * len(scenarios) n_original_segments = cluster_structure.n_original_periods logger.info( From 116415a9457f8cd86eced1c7ca55f467e57cf494 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 27 Dec 2025 02:39:11 +0100 Subject: [PATCH 164/191] Fix storage model --- flixopt/components.py | 39 ++++++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 11 deletions(-) diff --git a/flixopt/components.py b/flixopt/components.py index 5056bb694..47db39441 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -1301,14 +1301,14 @@ def _add_initial_final_constraints(self): pass def _add_intercluster_linking(self) -> None: - """Add inter-cluster storage linking following the S-N model. + """Add inter-cluster storage linking following the S-K model from Blanke et al. (2022). This method implements the core inter-cluster linking logic: 1. Constrains charge_state (ΔE) at each cluster start to 0 - 2. Creates SOC_boundary variables to track absolute SOC - 3. Links boundaries via: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC - 4. Adds combined bounds: 0 ≤ SOC_boundary + ΔE ≤ capacity + 2. Creates SOC_boundary variables to track absolute SOC at period boundaries + 3. Links boundaries via Eq. 5: SOC_boundary[d+1] = SOC_boundary[d] * (1-loss)^N + delta_SOC + 4. Adds combined bounds per Eq. 9: 0 ≤ SOC_boundary * (1-loss)^t + ΔE ≤ capacity 5. Enforces initial/cyclic constraint on SOC_boundary """ from .clustering.intercluster_helpers import ( @@ -1357,7 +1357,7 @@ def _add_intercluster_linking(self) -> None: delta_soc = self._compute_delta_soc(n_clusters, timesteps_per_cluster) # 5. Add linking constraints - self._add_linking_constraints(soc_boundary, delta_soc, cluster_order, n_original_periods) + self._add_linking_constraints(soc_boundary, delta_soc, cluster_order, n_original_periods, timesteps_per_cluster) # 6. Add cyclic or initial constraint if self.element.cluster_mode == 'intercluster_cyclic': @@ -1436,20 +1436,24 @@ def _add_linking_constraints( delta_soc: xr.DataArray, cluster_order: xr.DataArray, n_original_periods: int, + timesteps_per_cluster: int, ) -> None: """Add constraints linking consecutive SOC_boundary values. - Implements: SOC_boundary[d+1] = SOC_boundary[d] + delta_SOC[cluster_order[d]] + Per Blanke et al. (2022) Eq. 5, implements: + SOC_boundary[d+1] = SOC_boundary[d] * (1-loss)^N + delta_SOC[cluster_order[d]] + + where N is timesteps_per_cluster and loss is self-discharge rate per timestep. This connects the SOC at the end of original period d to the SOC at the - start of period d+1, using the net charge change from the representative - cluster that was mapped to period d. + start of period d+1, accounting for self-discharge decay over the period. Args: soc_boundary: SOC_boundary variable. delta_soc: Net SOC change per cluster. cluster_order: Mapping from original periods to representative clusters. n_original_periods: Number of original (non-clustered) periods. + timesteps_per_cluster: Number of timesteps in each cluster period. """ soc_after = soc_boundary.isel(cluster_boundary=slice(1, None)) soc_before = soc_boundary.isel(cluster_boundary=slice(None, -1)) @@ -1463,7 +1467,13 @@ def _add_linking_constraints( # Get delta_soc for each original period using cluster_order delta_soc_ordered = delta_soc.isel(cluster=cluster_order) - lhs = soc_after - soc_before - delta_soc_ordered + # Apply self-discharge decay factor (1-loss)^N to soc_before per Eq. 5 + # Use mean over time (linking operates at period level, not timestep) + # Keep as DataArray to respect per-period/scenario values + rel_loss = self.element.relative_loss_per_hour.mean('time') + decay_n = (1 - rel_loss) ** timesteps_per_cluster + + lhs = soc_after - soc_before * decay_n - delta_soc_ordered self.add_constraints(lhs == 0, short_name='link') def _add_combined_bound_constraints( @@ -1476,7 +1486,8 @@ def _add_combined_bound_constraints( ) -> None: """Add constraints ensuring actual SOC stays within bounds. - The actual SOC is: SOC(t) = SOC_boundary[d] + ΔE(t) + Per Blanke et al. (2022) Eq. 9, the actual SOC at time t in period d is: + SOC(t) = SOC_boundary[d] * (1-loss)^t + ΔE(t) This must satisfy: 0 ≤ SOC(t) ≤ capacity @@ -1500,6 +1511,10 @@ def _add_combined_bound_constraints( soc_d = soc_d.rename({'cluster_boundary': 'original_period'}) soc_d = soc_d.assign_coords(original_period=np.arange(n_original_periods)) + # Get self-discharge rate for decay calculation + # Keep as DataArray to respect per-period/scenario values + rel_loss = self.element.relative_loss_per_hour.mean('time') + sample_offsets = [0, timesteps_per_cluster // 2, timesteps_per_cluster - 1] for sample_name, offset in zip(['start', 'mid', 'end'], sample_offsets, strict=False): @@ -1509,7 +1524,9 @@ def _add_combined_bound_constraints( cs_t = cs_t.rename({'cluster': 'original_period'}) cs_t = cs_t.assign_coords(original_period=np.arange(n_original_periods)) - combined = soc_d + cs_t + # Apply decay factor (1-loss)^t to SOC_boundary per Eq. 9 + decay_t = (1 - rel_loss) ** offset + combined = soc_d * decay_t + cs_t self.add_constraints(combined >= 0, short_name=f'soc_lb_{sample_name}') From 358af3c58f3d694c4f9078a5e77a4e560e502dc4 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 27 Dec 2025 11:05:04 +0100 Subject: [PATCH 165/191] Summary of Changes 1. flixopt/components.py - InterclusterStorageModel fixes _add_linking_constraints (line 1470-1476): - Added decay factor (1-loss)^N to link SOC between periods per Blanke et al. (2022) Eq. 5 - Changed from scalar float() conversion to keeping rel_loss.mean('time') as DataArray to respect period/scenario dimensions _add_combined_bound_constraints (line 1514-1528): - Added decay factor (1-loss)^t to the combined bounds check per Blanke et al. (2022) Eq. 9 - Changed from scalar float() conversion to keeping rel_loss.mean('time') as DataArray 2. flixopt/transform_accessor.py - expand_solution fix Lines 1206-1220: - Fixed self-discharge decay calculation to use xarray DataArrays properly - Created time_within_period_da as a DataArray with 'time' dimension - Compute decay as (1 - loss_value) ** time_within_period_da to properly broadcast across period/scenario dimensions Key Technical Details - All parameters use xr.DataArray to respect per-period/scenario values - Decay factors use mean loss rate over time (period-level linking doesn't use per-timestep loss) - xarray broadcasting handles alignment across dimensions automatically --- flixopt/transform_accessor.py | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 7dc1089b2..9a4d6e804 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -1180,7 +1180,9 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: # 3. Combine charge_state with SOC_boundary for InterclusterStorageModel storages # For intercluster storages, charge_state is relative (ΔE) and can be negative. - # The actual SOC is: SOC_boundary[d] + charge_state(t), where d is the original period. + # Per Blanke et al. (2022) Eq. 9, actual SOC at time t in period d is: + # SOC(t) = SOC_boundary[d] * (1 - loss)^t_within_period + charge_state(t) + # where t_within_period is hours from period start (accounts for self-discharge decay). soc_boundary_vars = [name for name in reduced_solution.data_vars if name.endswith('|SOC_boundary')] for soc_boundary_name in soc_boundary_vars: storage_name = soc_boundary_name.rsplit('|', 1)[0] @@ -1201,8 +1203,26 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: ) soc_boundary_per_timestep = soc_boundary_per_timestep.assign_coords(time=original_timesteps) - # Combine: actual_SOC = SOC_boundary + charge_state - combined_charge_state = expanded_charge_state + soc_boundary_per_timestep + # Apply self-discharge decay to SOC_boundary based on time within period + # Get the storage's relative_loss_per_hour from original flow system + storage = original_fs.storages[storage_name] + if storage is not None: + # Time within period for each timestep (0, 1, 2, ..., timesteps_per_cluster-1, 0, 1, ...) + time_within_period = np.arange(n_original_timesteps) % timesteps_per_cluster + time_within_period_da = xr.DataArray( + time_within_period, dims=['time'], coords={'time': original_timesteps} + ) + # Decay factor: (1 - loss)^t, using mean loss over time + # Keep as DataArray to respect per-period/scenario values + loss_value = storage.relative_loss_per_hour.mean('time') + if (loss_value > 0).any(): + decay_da = (1 - loss_value) ** time_within_period_da + soc_boundary_per_timestep = soc_boundary_per_timestep * decay_da + + # Combine: actual_SOC = SOC_boundary * decay + charge_state + # Clip to non-negative since actual SOC cannot be negative + # (small negative values may occur due to constraint approximations in the model) + combined_charge_state = (expanded_charge_state + soc_boundary_per_timestep).clip(min=0) expanded_fs._solution[charge_state_name] = combined_charge_state.assign_attrs(expanded_charge_state.attrs) n_combinations = len(periods) * len(scenarios) From b8540ed7424516ee9a92313203ffc1f37cdc488f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 12:09:48 +0100 Subject: [PATCH 166/191] Feature/better data for notebooks (#542) * Add realsitic data to notbeooks * Add local catch warnings to solution of model * Add local catch warnings to solution of model * Remove solving from generate_example_systems.py script * Remove new system * Remove new system --- .pre-commit-config.yaml | 2 +- docs/notebooks/data/__init__.py | 1 + .../data/generate_example_systems.py | 238 +- .../data/generate_realistic_profiles.py | 259 + docs/notebooks/data/raw/README.md | 31 + .../data/raw/electricity_prices_de_2020.csv | 6574 +++++++++++++ docs/notebooks/data/raw/tmy_dresden.csv | 8761 +++++++++++++++++ flixopt/structure.py | 11 +- pyproject.toml | 4 + 9 files changed, 15763 insertions(+), 118 deletions(-) create mode 100644 docs/notebooks/data/__init__.py create mode 100644 docs/notebooks/data/generate_realistic_profiles.py create mode 100644 docs/notebooks/data/raw/README.md create mode 100644 docs/notebooks/data/raw/electricity_prices_de_2020.csv create mode 100644 docs/notebooks/data/raw/tmy_dresden.csv diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f30c2b5cb..18b1eb4be 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ repos: - id: check-yaml exclude: ^mkdocs\.yml$ # Skip mkdocs.yml - id: check-added-large-files - exclude: .*Zeitreihen2020\.csv$ + exclude: (.*Zeitreihen2020\.csv$|docs/notebooks/data/raw/.*) - repo: https://github.com/astral-sh/ruff-pre-commit rev: v0.12.4 diff --git a/docs/notebooks/data/__init__.py b/docs/notebooks/data/__init__.py new file mode 100644 index 000000000..fd6d62d1d --- /dev/null +++ b/docs/notebooks/data/__init__.py @@ -0,0 +1 @@ +# Data generation utilities for flixopt documentation examples diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index c8e81167f..15b6b13a7 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -15,6 +15,13 @@ import numpy as np import pandas as pd +from generate_realistic_profiles import ( + ElectricityLoadGenerator, + GasPriceGenerator, + ThermalLoadGenerator, + load_electricity_prices, + load_weather, +) import flixopt as fx @@ -27,6 +34,11 @@ OUTPUT_DIR = Path('docs/notebooks/data') DATA_DIR = Path('docs/notebooks/data') +# Load shared data +_weather = load_weather() +_elec_prices = load_electricity_prices() +_elec_prices.index = _elec_prices.index.tz_localize(None) # Remove timezone for compatibility + def create_simple_system() -> fx.FlowSystem: """Create a simple heat system with boiler, storage, and demand. @@ -34,27 +46,22 @@ def create_simple_system() -> fx.FlowSystem: Components: - Gas boiler (150 kW) - Thermal storage (500 kWh) - - Office heat demand + - Office heat demand (BDEW profile) - One week, hourly resolution. + One week (January 2020), hourly resolution. + Uses realistic BDEW heat demand and seasonal gas prices. """ - # One week, hourly - timesteps = pd.date_range('2024-01-15', periods=168, freq='h') - - # Create demand pattern - hours = np.arange(168) - hour_of_day = hours % 24 - day_of_week = (hours // 24) % 7 - - base_demand = np.where((hour_of_day >= 7) & (hour_of_day <= 18), 80, 30) - weekend_factor = np.where(day_of_week >= 5, 0.5, 1.0) + # One week, hourly (January 2020 for realistic data) + timesteps = pd.date_range('2020-01-15', periods=168, freq='h') + temp = _weather.loc[timesteps, 'temperature_C'].values - np.random.seed(42) - heat_demand = base_demand * weekend_factor + np.random.normal(0, 5, len(hours)) - heat_demand = np.clip(heat_demand, 20, 100) + # BDEW office heat demand profile (scaled to fit 150 kW boiler) + thermal_gen = ThermalLoadGenerator() + heat_demand = thermal_gen.generate(timesteps, temp, 'office', annual_demand_kwh=15_000) - # Time-varying gas price - gas_price = np.where((hour_of_day >= 6) & (hour_of_day <= 22), 0.08, 0.05) + # Seasonal gas price + gas_gen = GasPriceGenerator() + gas_price = gas_gen.generate(timesteps) / 1000 # EUR/kWh fs = fx.FlowSystem(timesteps) fs.add_carriers( @@ -98,30 +105,32 @@ def create_complex_system() -> fx.FlowSystem: - Heat pump - Gas boiler (backup) - Thermal storage - - Heat demand + - Heat demand (BDEW retail profile) + - Electricity demand (BDEW commercial profile) Effects: costs (objective), CO2 - Three days, hourly resolution. + Three days (June 2020), hourly resolution. + Uses realistic BDEW profiles and OPSD electricity prices. """ - timesteps = pd.date_range('2024-06-01', periods=72, freq='h') - hours = np.arange(72) - hour_of_day = hours % 24 + timesteps = pd.date_range('2020-06-01', periods=72, freq='h') + temp = _weather.loc[timesteps, 'temperature_C'].values - # Demand profiles - np.random.seed(123) - heat_demand = 50 + 30 * np.sin(2 * np.pi * hour_of_day / 24 - np.pi / 2) + np.random.normal(0, 5, 72) - heat_demand = np.clip(heat_demand, 20, 100) + # BDEW demand profiles (scaled to fit component sizes) + thermal_gen = ThermalLoadGenerator() + heat_demand = thermal_gen.generate(timesteps, temp, 'retail', annual_demand_kwh=2_000) - electricity_demand = 20 + 15 * np.sin(2 * np.pi * hour_of_day / 24) + np.random.normal(0, 3, 72) - electricity_demand = np.clip(electricity_demand, 10, 50) + elec_gen = ElectricityLoadGenerator() + electricity_demand = elec_gen.generate(timesteps, 'commercial', annual_demand_kwh=50_000) - # Price profiles - electricity_price = np.where((hour_of_day >= 8) & (hour_of_day <= 20), 0.25, 0.12) - gas_price = 0.06 + # Real electricity prices (OPSD) and seasonal gas prices + electricity_price = _elec_prices.reindex(timesteps, method='ffill').values / 1000 # EUR/kWh + gas_gen = GasPriceGenerator() + gas_price = gas_gen.generate(timesteps) / 1000 # EUR/kWh - # CO2 factors (kg/kWh) - electricity_co2 = np.where((hour_of_day >= 8) & (hour_of_day <= 20), 0.4, 0.3) # Higher during peak + # CO2 factors (kg/kWh) - higher during peak hours + hour_of_day = timesteps.hour.values + electricity_co2 = np.where((hour_of_day >= 8) & (hour_of_day <= 20), 0.4, 0.3) gas_co2 = 0.2 fs = fx.FlowSystem(timesteps) @@ -235,26 +244,34 @@ def create_complex_system() -> fx.FlowSystem: def create_district_heating_system() -> fx.FlowSystem: - """Create a district heating system using real-world data. - - Based on Zeitreihen2020.csv data: - - One month of data at 15-minute resolution + """Create a district heating system with BDEW profiles. + + Uses realistic German data: + - One month (January 2020), hourly resolution + - BDEW industrial heat profile + - BDEW commercial electricity profile + - OPSD electricity prices + - Seasonal gas prices - CHP, boiler, storage, and grid connections - Investment optimization for sizing - Used by: 08a-aggregation, 08b-rolling-horizon, 08c-clustering notebooks + Used by: 08a-aggregation, 08c-clustering, 08e-clustering-internals notebooks """ - # Load real data - data_path = DATA_DIR / 'Zeitreihen2020.csv' - data = pd.read_csv(data_path, index_col=0, parse_dates=True).sort_index() - data = data['2020-01-01':'2020-01-31 23:45:00'] # One month - data.index.name = 'time' - - timesteps = data.index - electricity_demand = data['P_Netz/MW'].to_numpy() - heat_demand = data['Q_Netz/MW'].to_numpy() - electricity_price = data['Strompr.€/MWh'].to_numpy() - gas_price = data['Gaspr.€/MWh'].to_numpy() + # One month, hourly + timesteps = pd.date_range('2020-01-01', '2020-01-31 23:00:00', freq='h') + temp = _weather.loc[timesteps, 'temperature_C'].values + + # BDEW profiles (MW scale for district heating) + thermal_gen = ThermalLoadGenerator() + heat_demand = thermal_gen.generate(timesteps, temp, 'industrial', annual_demand_kwh=15_000_000) / 1000 # MW + + elec_gen = ElectricityLoadGenerator() + electricity_demand = elec_gen.generate(timesteps, 'commercial', annual_demand_kwh=5_000_000) / 1000 # MW + + # Prices + electricity_price = _elec_prices.reindex(timesteps, method='ffill').values # EUR/MWh + gas_gen = GasPriceGenerator() + gas_price = gas_gen.generate(timesteps) # EUR/MWh fs = fx.FlowSystem(timesteps) fs.add_elements( @@ -354,7 +371,11 @@ def create_district_heating_system() -> fx.FlowSystem: def create_operational_system() -> fx.FlowSystem: """Create an operational district heating system (no investments). - Based on Zeitreihen2020.csv data (two weeks): + Uses realistic German data (two weeks, January 2020): + - BDEW industrial heat profile + - BDEW commercial electricity profile + - OPSD electricity prices + - Seasonal gas prices - CHP with startup costs - Boiler with startup costs - Storage with fixed capacity @@ -362,17 +383,21 @@ def create_operational_system() -> fx.FlowSystem: Used by: 08b-rolling-horizon notebook """ - # Load real data - data_path = DATA_DIR / 'Zeitreihen2020.csv' - data = pd.read_csv(data_path, index_col=0, parse_dates=True).sort_index() - data = data['2020-01-01':'2020-01-14 23:45:00'] # Two weeks - data.index.name = 'time' - - timesteps = data.index - electricity_demand = data['P_Netz/MW'].to_numpy() - heat_demand = data['Q_Netz/MW'].to_numpy() - electricity_price = data['Strompr.€/MWh'].to_numpy() - gas_price = data['Gaspr.€/MWh'].to_numpy() + # Two weeks, hourly + timesteps = pd.date_range('2020-01-01', '2020-01-14 23:00:00', freq='h') + temp = _weather.loc[timesteps, 'temperature_C'].values + + # BDEW profiles (MW scale) + thermal_gen = ThermalLoadGenerator() + heat_demand = thermal_gen.generate(timesteps, temp, 'industrial', annual_demand_kwh=15_000_000) / 1000 # MW + + elec_gen = ElectricityLoadGenerator() + electricity_demand = elec_gen.generate(timesteps, 'commercial', annual_demand_kwh=5_000_000) / 1000 # MW + + # Prices + electricity_price = _elec_prices.reindex(timesteps, method='ffill').values # EUR/MWh + gas_gen = GasPriceGenerator() + gas_price = gas_gen.generate(timesteps) # EUR/MWh fs = fx.FlowSystem(timesteps) fs.add_elements( @@ -456,8 +481,8 @@ def create_seasonal_storage_system() -> fx.FlowSystem: Demonstrates seasonal storage value with: - Full year at hourly resolution (8760 timesteps) - - Solar thermal: high in summer, low in winter - - Heat demand: high in winter, low in summer + - Solar thermal from PVGIS irradiance data + - Heat demand from BDEW industrial profile - Large seasonal pit storage (bridges seasons) - Gas boiler backup @@ -465,42 +490,30 @@ def create_seasonal_storage_system() -> fx.FlowSystem: - Summer: excess solar heat stored in pit - Winter: stored heat reduces gas consumption + Uses realistic PVGIS solar irradiance and BDEW heat profiles. Used by: 08c-clustering, 08c2-clustering-storage-modes notebooks """ - # Full year, hourly - timesteps = pd.date_range('2024-01-01', periods=8760, freq='h') - hours = np.arange(8760) - hour_of_day = hours % 24 - day_of_year = hours // 24 - - np.random.seed(42) - - # --- Solar irradiance profile --- - # Seasonal variation: peaks in summer (day ~180), low in winter - seasonal_solar = 0.5 + 0.5 * np.cos(2 * np.pi * (day_of_year - 172) / 365) # Peak around June 21 - - # Daily variation: peaks at noon - daily_solar = np.maximum(0, np.cos(2 * np.pi * (hour_of_day - 12) / 24)) - - # Combine and scale (MW of solar thermal potential per MW installed) - solar_profile = seasonal_solar * daily_solar - solar_profile = solar_profile * (0.8 + 0.2 * np.random.random(8760)) # Add some variation + # Full year, hourly (use non-leap year to match TMY data which has 8760 hours) + timesteps = pd.date_range('2019-01-01', periods=8760, freq='h') + # Map to 2020 weather data (TMY has 8760 hours, no Feb 29) + temp = _weather['temperature_C'].values + ghi = _weather['ghi_W_m2'].values + + # --- Solar thermal profile from PVGIS irradiance --- + # Normalize GHI to 0-1 range and apply collector efficiency + solar_profile = ghi / 1000 # Normalized (1000 W/m² = 1.0) solar_profile = np.clip(solar_profile, 0, 1) - # --- Heat demand profile --- - # Seasonal: high in winter, low in summer - seasonal_demand = 0.6 + 0.4 * np.cos(2 * np.pi * day_of_year / 365) # Peak Jan 1 - - # Daily: higher during day, lower at night - daily_demand = 0.7 + 0.3 * np.sin(2 * np.pi * (hour_of_day - 6) / 24) + # --- Heat demand from BDEW industrial profile --- + # Scale to MW (district heating scale) + # Use 2019 year for demandlib (non-leap year) + thermal_gen = ThermalLoadGenerator(year=2019) + heat_demand_kw = thermal_gen.generate(timesteps, temp, 'industrial', annual_demand_kwh=20_000_000) + heat_demand = heat_demand_kw / 1000 # Convert to MW - # Combine and scale to ~5 MW peak - heat_demand = 5 * seasonal_demand * daily_demand - heat_demand = heat_demand * (0.9 + 0.2 * np.random.random(8760)) # Add variation - heat_demand = np.clip(heat_demand, 0.5, 6) # MW - - # --- Gas price (slight seasonal variation) --- - gas_price = 40 + 10 * np.cos(2 * np.pi * day_of_year / 365) # €/MWh, higher in winter + # --- Gas price with seasonal variation --- + gas_gen = GasPriceGenerator() + gas_price = gas_gen.generate(timesteps) # EUR/MWh fs = fx.FlowSystem(timesteps) fs.add_carriers( @@ -599,11 +612,12 @@ def create_multiperiod_system() -> fx.FlowSystem: Each period: 336 hours (2 weeks) - suitable for clustering demonstrations. Use transform.sisel() to select subsets if needed. + + Uses BDEW residential heat profile as base, scaled for scenarios. """ n_hours = 336 # 2 weeks - timesteps = pd.date_range('2024-01-01', periods=n_hours, freq='h') - hour_of_day = np.arange(n_hours) % 24 - day_of_week = (np.arange(n_hours) // 24) % 7 + timesteps = pd.date_range('2020-01-01', periods=n_hours, freq='h') + temp = _weather.loc[timesteps, 'temperature_C'].values # Period definitions (years) periods = pd.Index([2024, 2025, 2026], name='period') @@ -612,27 +626,27 @@ def create_multiperiod_system() -> fx.FlowSystem: scenarios = pd.Index(['high_demand', 'low_demand'], name='scenario') scenario_weights = np.array([0.3, 0.7]) - # Base demand pattern (hourly) with daily and weekly variation - base_pattern = np.where((hour_of_day >= 7) & (hour_of_day <= 18), 80.0, 35.0) - weekend_factor = np.where(day_of_week >= 5, 0.6, 1.0) - base_pattern = base_pattern * weekend_factor + # BDEW residential heat profile as base (scaled to fit 250 kW boiler with scenarios) + thermal_gen = ThermalLoadGenerator() + base_demand = thermal_gen.generate(timesteps, temp, 'residential', annual_demand_kwh=30_000) # Scenario-specific scaling - np.random.seed(42) - high_demand = base_pattern * 1.3 + np.random.normal(0, 8, n_hours) - low_demand = base_pattern * 0.8 + np.random.normal(0, 5, n_hours) + high_demand = base_demand * 1.3 + low_demand = base_demand * 0.7 # Create DataFrame with scenario columns heat_demand = pd.DataFrame( { - 'high_demand': np.clip(high_demand, 20, 150), - 'low_demand': np.clip(low_demand, 15, 100), + 'high_demand': high_demand, + 'low_demand': low_demand, }, index=timesteps, ) - # Gas price varies by period (rising costs) - gas_prices = np.array([0.06, 0.08, 0.10]) # Per period + # Gas price varies by period (rising costs, based on seasonal price) + gas_gen = GasPriceGenerator() + base_gas = gas_gen.generate(timesteps).mean() / 1000 # Average EUR/kWh + gas_prices = np.array([base_gas, base_gas * 1.2, base_gas * 1.5]) # Rising costs per period fs = fx.FlowSystem( timesteps, @@ -682,8 +696,6 @@ def create_multiperiod_system() -> fx.FlowSystem: def main(): """Generate all example systems and save to netCDF.""" - solver = fx.solvers.HighsSolver(log_to_console=False) - systems = [ ('simple_system', create_simple_system), ('complex_system', create_complex_system), @@ -697,16 +709,10 @@ def main(): print(f'Creating {name}...') fs = create_func() - print(' Optimizing...') - fs.optimize(solver) - output_path = OUTPUT_DIR / f'{name}.nc4' print(f' Saving to {output_path}...') fs.to_netcdf(output_path, overwrite=True) - print(f' Done. Objective: {fs.solution["objective"].item():.2f}') - print() - print('All systems generated successfully!') diff --git a/docs/notebooks/data/generate_realistic_profiles.py b/docs/notebooks/data/generate_realistic_profiles.py new file mode 100644 index 000000000..0a326362c --- /dev/null +++ b/docs/notebooks/data/generate_realistic_profiles.py @@ -0,0 +1,259 @@ +"""Generate realistic German energy profiles for flixOpt examples. + +This module provides functions to create realistic time series data for: +- Thermal load profiles (BDEW standard load profiles via demandlib) +- Electricity load profiles (BDEW standard load profiles via demandlib) +- Solar generation profiles (via pvlib) +- Energy prices (bundled OPSD data) +- Weather data (bundled PVGIS TMY data for Dresden) + +Example: + >>> from generate_realistic_profiles import load_weather, ThermalLoadGenerator + >>> weather = load_weather() + >>> thermal = ThermalLoadGenerator() + >>> heat_demand = thermal.generate(weather.index, weather['temperature_C'], 'residential', 50000) +""" + +from __future__ import annotations + +from pathlib import Path + +import holidays +import numpy as np +import pandas as pd +import pvlib +from demandlib import bdew + +# Data directory +DATA_DIR = Path(__file__).parent / 'raw' + + +# === Data Loading === + + +def load_weather() -> pd.DataFrame: + """Load PVGIS TMY weather data for Dresden. + + Returns + ------- + pd.DataFrame + Hourly weather data with columns: + - temperature_C: Ambient temperature (°C) + - ghi_W_m2: Global horizontal irradiance (W/m²) + - dni_W_m2: Direct normal irradiance (W/m²) + - dhi_W_m2: Diffuse horizontal irradiance (W/m²) + - wind_speed_m_s: Wind speed at 10m (m/s) + """ + return pd.read_csv(DATA_DIR / 'tmy_dresden.csv', parse_dates=['time'], index_col='time') + + +def load_electricity_prices() -> pd.Series: + """Load German day-ahead electricity prices (2020). + + Returns + ------- + pd.Series + Hourly electricity prices in EUR/MWh + """ + df = pd.read_csv(DATA_DIR / 'electricity_prices_de_2020.csv', parse_dates=['time'], index_col='time') + return df['price_eur_mwh'] + + +# === Profile Generators === + + +class ThermalLoadGenerator: + """Generate thermal load profiles using BDEW standard load profiles. + + Uses demandlib to create realistic heat demand profiles based on + German BDEW (Bundesverband der Energie- und Wasserwirtschaft) standards. + """ + + BUILDING_TYPES = { + 'residential': {'shlp_type': 'EFH', 'building_class': 5}, # Single-family house + 'residential_multi': {'shlp_type': 'MFH', 'building_class': 5}, # Multi-family + 'office': {'shlp_type': 'GKO', 'building_class': 0}, # Commercial office + 'retail': {'shlp_type': 'GHA', 'building_class': 0}, # Retail/shops + 'industrial': {'shlp_type': 'GMK', 'building_class': 0}, # Industrial + } + + def __init__(self, year: int = 2020): + self.year = year + self.holidays = holidays.Germany(years=year) + + def generate( + self, + timesteps: pd.DatetimeIndex, + temperature: np.ndarray | pd.Series, + building_type: str = 'residential', + annual_demand_kwh: float = 20000, + ) -> np.ndarray: + """Generate thermal load profile. + + Parameters + ---------- + timesteps + Time index for the profile + temperature + Ambient temperature in Celsius (same length as timesteps) + building_type + One of: 'residential', 'residential_multi', 'office', 'retail', 'industrial' + annual_demand_kwh + Total annual heat demand in kWh + + Returns + ------- + np.ndarray + Heat demand profile in kW + """ + params = self.BUILDING_TYPES[building_type] + temp_series = pd.Series(temperature, index=timesteps) + + profile = bdew.HeatBuilding( + timesteps, + holidays=self.holidays, + temperature=temp_series, + shlp_type=params['shlp_type'], + building_class=params['building_class'], + wind_class=0, + annual_heat_demand=annual_demand_kwh, + name=building_type, + ) + return profile.get_bdew_profile().values + + +class ElectricityLoadGenerator: + """Generate electricity load profiles using BDEW standard load profiles.""" + + CONSUMER_TYPES = { + 'household': 'h0', + 'commercial': 'g0', + 'commercial_office': 'g1', + 'commercial_retail': 'g4', + 'agricultural': 'l0', + } + + def __init__(self, year: int = 2020): + self.year = year + self.holidays = holidays.Germany(years=year) + + def generate( + self, + timesteps: pd.DatetimeIndex, + consumer_type: str = 'household', + annual_demand_kwh: float = 4000, + ) -> np.ndarray: + """Generate electricity load profile. + + Parameters + ---------- + timesteps + Time index for the profile + consumer_type + One of: 'household', 'commercial', 'commercial_office', 'commercial_retail', 'agricultural' + annual_demand_kwh + Total annual electricity demand in kWh + + Returns + ------- + np.ndarray + Electricity demand profile in kW + """ + slp_type = self.CONSUMER_TYPES[consumer_type] + e_slp = bdew.ElecSlp(self.year, holidays=self.holidays) + profile = e_slp.get_scaled_power_profiles({slp_type: annual_demand_kwh}) + # Resample to hourly and align with requested timesteps + profile_hourly = profile[slp_type].resample('h').mean() + return profile_hourly.reindex(timesteps, method='ffill').values + + +class SolarGenerator: + """Generate solar irradiance and PV generation profiles using pvlib. + + Uses Dresden location (51.05°N, 13.74°E) as default. + """ + + def __init__(self, latitude: float = 51.05, longitude: float = 13.74): + self.location = pvlib.location.Location(latitude, longitude, 'Europe/Berlin', 120, 'Dresden') + + def generate_pv_profile( + self, + timesteps: pd.DatetimeIndex, + weather: pd.DataFrame, + surface_tilt: float = 35, + surface_azimuth: float = 180, # South-facing + capacity_kw: float = 1.0, + ) -> np.ndarray: + """Generate PV power output profile. + + Parameters + ---------- + timesteps + Time index for the profile + weather + Weather data with 'ghi_W_m2', 'dni_W_m2', 'dhi_W_m2', 'temperature_C' + surface_tilt + Panel tilt angle in degrees (0=horizontal, 90=vertical) + surface_azimuth + Panel azimuth in degrees (180=south, 90=east, 270=west) + capacity_kw + Installed PV capacity in kW + + Returns + ------- + np.ndarray + PV power output in kW + """ + # Ensure weather is aligned with timesteps + weather = weather.reindex(timesteps, method='ffill') + + # Get solar position + solar_position = self.location.get_solarposition(timesteps) + + # Calculate plane-of-array irradiance + poa = pvlib.irradiance.get_total_irradiance( + surface_tilt=surface_tilt, + surface_azimuth=surface_azimuth, + solar_zenith=solar_position['apparent_zenith'], + solar_azimuth=solar_position['azimuth'], + dni=weather['dni_W_m2'], + ghi=weather['ghi_W_m2'], + dhi=weather['dhi_W_m2'], + ) + + # Simple efficiency model: ~15% module efficiency, ~85% system efficiency + system_efficiency = 0.15 * 0.85 + pv_output = poa['poa_global'] * system_efficiency * capacity_kw / 1000 + + return np.clip(pv_output.fillna(0).values, 0, capacity_kw) + + +class GasPriceGenerator: + """Generate synthetic gas price profiles with seasonal variation.""" + + def generate( + self, + timesteps: pd.DatetimeIndex, + base_price: float = 35, + winter_premium: float = 10, + ) -> np.ndarray: + """Generate gas price profile. + + Parameters + ---------- + timesteps + Time index for the profile + base_price + Base gas price in EUR/MWh + winter_premium + Additional winter price in EUR/MWh + + Returns + ------- + np.ndarray + Gas prices in EUR/MWh + """ + day_of_year = timesteps.dayofyear.values + # Peak in mid-January (day 15), trough in mid-July + seasonal = winter_premium * np.cos(2 * np.pi * (day_of_year - 15) / 365) + return base_price + seasonal diff --git a/docs/notebooks/data/raw/README.md b/docs/notebooks/data/raw/README.md new file mode 100644 index 000000000..37c83b1e5 --- /dev/null +++ b/docs/notebooks/data/raw/README.md @@ -0,0 +1,31 @@ +# Bundled Data Sources + +## Weather Data (TMY) + +**File:** `tmy_dresden.csv` +**Location:** Dresden, Germany (51.05°N, 13.74°E) +**Source:** PVGIS - Photovoltaic Geographical Information System +**Provider:** European Commission Joint Research Centre +**License:** Free for any use +**URL:** https://re.jrc.ec.europa.eu/pvg_tools/en/ + +**Columns:** +- `temperature_C`: 2m air temperature (°C) +- `ghi_W_m2`: Global horizontal irradiance (W/m²) +- `dni_W_m2`: Direct normal irradiance (W/m²) +- `dhi_W_m2`: Diffuse horizontal irradiance (W/m²) +- `wind_speed_m_s`: Wind speed at 10m (m/s) +- `relative_humidity_percent`: Relative humidity (%) + +## Electricity Prices + +**File:** `electricity_prices_de_2020.csv` +**Coverage:** Germany, Jan-Sep 2020, hourly +**Source:** Open Power System Data +**License:** Open Database License (ODbL) +**URL:** https://data.open-power-system-data.org/time_series/ + +**Attribution required:** "Data from Open Power System Data. https://open-power-system-data.org" + +**Columns:** +- `price_eur_mwh`: Day-ahead electricity price (EUR/MWh) diff --git a/docs/notebooks/data/raw/electricity_prices_de_2020.csv b/docs/notebooks/data/raw/electricity_prices_de_2020.csv new file mode 100644 index 000000000..25a0f2e24 --- /dev/null +++ b/docs/notebooks/data/raw/electricity_prices_de_2020.csv @@ -0,0 +1,6574 @@ +time,price_eur_mwh +2020-01-01 00:00:00+00:00,38.6 +2020-01-01 01:00:00+00:00,36.55 +2020-01-01 02:00:00+00:00,32.32 +2020-01-01 03:00:00+00:00,30.85 +2020-01-01 04:00:00+00:00,30.14 +2020-01-01 05:00:00+00:00,30.17 +2020-01-01 06:00:00+00:00,30.0 +2020-01-01 07:00:00+00:00,30.65 +2020-01-01 08:00:00+00:00,30.65 +2020-01-01 09:00:00+00:00,30.27 +2020-01-01 10:00:00+00:00,30.34 +2020-01-01 11:00:00+00:00,30.99 +2020-01-01 12:00:00+00:00,30.04 +2020-01-01 13:00:00+00:00,30.75 +2020-01-01 14:00:00+00:00,32.11 +2020-01-01 15:00:00+00:00,35.98 +2020-01-01 16:00:00+00:00,40.4 +2020-01-01 17:00:00+00:00,44.05 +2020-01-01 18:00:00+00:00,43.15 +2020-01-01 19:00:00+00:00,43.45 +2020-01-01 20:00:00+00:00,40.68 +2020-01-01 21:00:00+00:00,40.27 +2020-01-01 22:00:00+00:00,34.85 +2020-01-01 23:00:00+00:00,35.4 +2020-01-02 00:00:00+00:00,31.98 +2020-01-02 01:00:00+00:00,30.5 +2020-01-02 02:00:00+00:00,28.79 +2020-01-02 03:00:00+00:00,28.42 +2020-01-02 04:00:00+00:00,28.75 +2020-01-02 05:00:00+00:00,34.16 +2020-01-02 06:00:00+00:00,42.07 +2020-01-02 07:00:00+00:00,44.89 +2020-01-02 08:00:00+00:00,45.26 +2020-01-02 09:00:00+00:00,45.57 +2020-01-02 10:00:00+00:00,45.09 +2020-01-02 11:00:00+00:00,45.16 +2020-01-02 12:00:00+00:00,44.9 +2020-01-02 13:00:00+00:00,44.06 +2020-01-02 14:00:00+00:00,44.84 +2020-01-02 15:00:00+00:00,44.4 +2020-01-02 16:00:00+00:00,46.05 +2020-01-02 17:00:00+00:00,46.72 +2020-01-02 18:00:00+00:00,45.26 +2020-01-02 19:00:00+00:00,39.32 +2020-01-02 20:00:00+00:00,34.06 +2020-01-02 21:00:00+00:00,32.22 +2020-01-02 22:00:00+00:00,24.99 +2020-01-02 23:00:00+00:00,21.47 +2020-01-03 00:00:00+00:00,13.04 +2020-01-03 01:00:00+00:00,1.53 +2020-01-03 02:00:00+00:00,0.14 +2020-01-03 03:00:00+00:00,0.85 +2020-01-03 04:00:00+00:00,9.92 +2020-01-03 05:00:00+00:00,24.48 +2020-01-03 06:00:00+00:00,26.68 +2020-01-03 07:00:00+00:00,28.81 +2020-01-03 08:00:00+00:00,29.28 +2020-01-03 09:00:00+00:00,28.85 +2020-01-03 10:00:00+00:00,31.8 +2020-01-03 11:00:00+00:00,37.94 +2020-01-03 12:00:00+00:00,37.9 +2020-01-03 13:00:00+00:00,38.11 +2020-01-03 14:00:00+00:00,37.91 +2020-01-03 15:00:00+00:00,38.44 +2020-01-03 16:00:00+00:00,40.47 +2020-01-03 17:00:00+00:00,41.35 +2020-01-03 18:00:00+00:00,33.37 +2020-01-03 19:00:00+00:00,28.89 +2020-01-03 20:00:00+00:00,27.7 +2020-01-03 21:00:00+00:00,25.7 +2020-01-03 22:00:00+00:00,22.04 +2020-01-03 23:00:00+00:00,22.9 +2020-01-04 00:00:00+00:00,15.95 +2020-01-04 01:00:00+00:00,16.63 +2020-01-04 02:00:00+00:00,6.45 +2020-01-04 03:00:00+00:00,3.83 +2020-01-04 04:00:00+00:00,0.12 +2020-01-04 05:00:00+00:00,0.07 +2020-01-04 06:00:00+00:00,19.07 +2020-01-04 07:00:00+00:00,17.49 +2020-01-04 08:00:00+00:00,23.98 +2020-01-04 09:00:00+00:00,8.8 +2020-01-04 10:00:00+00:00,17.95 +2020-01-04 11:00:00+00:00,19.5 +2020-01-04 12:00:00+00:00,13.74 +2020-01-04 13:00:00+00:00,17.42 +2020-01-04 14:00:00+00:00,20.38 +2020-01-04 15:00:00+00:00,25.08 +2020-01-04 16:00:00+00:00,28.88 +2020-01-04 17:00:00+00:00,32.02 +2020-01-04 18:00:00+00:00,35.35 +2020-01-04 19:00:00+00:00,29.98 +2020-01-04 20:00:00+00:00,34.46 +2020-01-04 21:00:00+00:00,39.75 +2020-01-04 22:00:00+00:00,37.95 +2020-01-04 23:00:00+00:00,33.1 +2020-01-05 00:00:00+00:00,32.28 +2020-01-05 01:00:00+00:00,31.18 +2020-01-05 02:00:00+00:00,30.1 +2020-01-05 03:00:00+00:00,29.96 +2020-01-05 04:00:00+00:00,29.88 +2020-01-05 05:00:00+00:00,30.38 +2020-01-05 06:00:00+00:00,31.15 +2020-01-05 07:00:00+00:00,32.09 +2020-01-05 08:00:00+00:00,34.27 +2020-01-05 09:00:00+00:00,37.53 +2020-01-05 10:00:00+00:00,38.99 +2020-01-05 11:00:00+00:00,38.15 +2020-01-05 12:00:00+00:00,35.37 +2020-01-05 13:00:00+00:00,34.44 +2020-01-05 14:00:00+00:00,36.1 +2020-01-05 15:00:00+00:00,40.59 +2020-01-05 16:00:00+00:00,44.68 +2020-01-05 17:00:00+00:00,46.16 +2020-01-05 18:00:00+00:00,44.62 +2020-01-05 19:00:00+00:00,39.5 +2020-01-05 20:00:00+00:00,35.76 +2020-01-05 21:00:00+00:00,36.49 +2020-01-05 22:00:00+00:00,30.49 +2020-01-05 23:00:00+00:00,29.16 +2020-01-06 00:00:00+00:00,29.0 +2020-01-06 01:00:00+00:00,29.08 +2020-01-06 02:00:00+00:00,27.72 +2020-01-06 03:00:00+00:00,27.03 +2020-01-06 04:00:00+00:00,28.98 +2020-01-06 05:00:00+00:00,33.18 +2020-01-06 06:00:00+00:00,43.13 +2020-01-06 07:00:00+00:00,44.52 +2020-01-06 08:00:00+00:00,44.96 +2020-01-06 09:00:00+00:00,44.0 +2020-01-06 10:00:00+00:00,42.46 +2020-01-06 11:00:00+00:00,41.3 +2020-01-06 12:00:00+00:00,40.51 +2020-01-06 13:00:00+00:00,41.22 +2020-01-06 14:00:00+00:00,43.28 +2020-01-06 15:00:00+00:00,43.68 +2020-01-06 16:00:00+00:00,47.9 +2020-01-06 17:00:00+00:00,48.91 +2020-01-06 18:00:00+00:00,45.04 +2020-01-06 19:00:00+00:00,40.28 +2020-01-06 20:00:00+00:00,33.89 +2020-01-06 21:00:00+00:00,33.58 +2020-01-06 22:00:00+00:00,32.41 +2020-01-06 23:00:00+00:00,30.75 +2020-01-07 00:00:00+00:00,31.03 +2020-01-07 01:00:00+00:00,29.88 +2020-01-07 02:00:00+00:00,29.0 +2020-01-07 03:00:00+00:00,29.65 +2020-01-07 04:00:00+00:00,31.78 +2020-01-07 05:00:00+00:00,40.87 +2020-01-07 06:00:00+00:00,49.01 +2020-01-07 07:00:00+00:00,51.09 +2020-01-07 08:00:00+00:00,51.12 +2020-01-07 09:00:00+00:00,49.83 +2020-01-07 10:00:00+00:00,49.16 +2020-01-07 11:00:00+00:00,48.43 +2020-01-07 12:00:00+00:00,47.99 +2020-01-07 13:00:00+00:00,47.41 +2020-01-07 14:00:00+00:00,45.87 +2020-01-07 15:00:00+00:00,45.9 +2020-01-07 16:00:00+00:00,47.96 +2020-01-07 17:00:00+00:00,48.11 +2020-01-07 18:00:00+00:00,43.63 +2020-01-07 19:00:00+00:00,33.6 +2020-01-07 20:00:00+00:00,32.93 +2020-01-07 21:00:00+00:00,31.29 +2020-01-07 22:00:00+00:00,26.28 +2020-01-07 23:00:00+00:00,18.95 +2020-01-08 00:00:00+00:00,4.96 +2020-01-08 01:00:00+00:00,0.1 +2020-01-08 02:00:00+00:00,0.11 +2020-01-08 03:00:00+00:00,1.75 +2020-01-08 04:00:00+00:00,20.74 +2020-01-08 05:00:00+00:00,25.57 +2020-01-08 06:00:00+00:00,32.47 +2020-01-08 07:00:00+00:00,33.07 +2020-01-08 08:00:00+00:00,33.05 +2020-01-08 09:00:00+00:00,34.18 +2020-01-08 10:00:00+00:00,39.63 +2020-01-08 11:00:00+00:00,41.35 +2020-01-08 12:00:00+00:00,44.83 +2020-01-08 13:00:00+00:00,46.04 +2020-01-08 14:00:00+00:00,46.33 +2020-01-08 15:00:00+00:00,47.9 +2020-01-08 16:00:00+00:00,51.21 +2020-01-08 17:00:00+00:00,55.92 +2020-01-08 18:00:00+00:00,53.69 +2020-01-08 19:00:00+00:00,48.1 +2020-01-08 20:00:00+00:00,44.92 +2020-01-08 21:00:00+00:00,41.67 +2020-01-08 22:00:00+00:00,39.41 +2020-01-08 23:00:00+00:00,34.08 +2020-01-09 00:00:00+00:00,32.2 +2020-01-09 01:00:00+00:00,32.56 +2020-01-09 02:00:00+00:00,32.35 +2020-01-09 03:00:00+00:00,29.0 +2020-01-09 04:00:00+00:00,30.86 +2020-01-09 05:00:00+00:00,38.95 +2020-01-09 06:00:00+00:00,46.86 +2020-01-09 07:00:00+00:00,47.92 +2020-01-09 08:00:00+00:00,45.68 +2020-01-09 09:00:00+00:00,43.61 +2020-01-09 10:00:00+00:00,40.0 +2020-01-09 11:00:00+00:00,37.06 +2020-01-09 12:00:00+00:00,33.45 +2020-01-09 13:00:00+00:00,32.2 +2020-01-09 14:00:00+00:00,31.86 +2020-01-09 15:00:00+00:00,32.59 +2020-01-09 16:00:00+00:00,42.85 +2020-01-09 17:00:00+00:00,41.73 +2020-01-09 18:00:00+00:00,40.31 +2020-01-09 19:00:00+00:00,32.96 +2020-01-09 20:00:00+00:00,30.72 +2020-01-09 21:00:00+00:00,31.02 +2020-01-09 22:00:00+00:00,29.14 +2020-01-09 23:00:00+00:00,26.94 +2020-01-10 00:00:00+00:00,26.59 +2020-01-10 01:00:00+00:00,25.81 +2020-01-10 02:00:00+00:00,25.89 +2020-01-10 03:00:00+00:00,26.2 +2020-01-10 04:00:00+00:00,26.95 +2020-01-10 05:00:00+00:00,28.95 +2020-01-10 06:00:00+00:00,43.29 +2020-01-10 07:00:00+00:00,47.4 +2020-01-10 08:00:00+00:00,40.63 +2020-01-10 09:00:00+00:00,36.26 +2020-01-10 10:00:00+00:00,32.05 +2020-01-10 11:00:00+00:00,28.64 +2020-01-10 12:00:00+00:00,28.45 +2020-01-10 13:00:00+00:00,28.23 +2020-01-10 14:00:00+00:00,29.56 +2020-01-10 15:00:00+00:00,36.43 +2020-01-10 16:00:00+00:00,45.0 +2020-01-10 17:00:00+00:00,46.73 +2020-01-10 18:00:00+00:00,45.94 +2020-01-10 19:00:00+00:00,45.02 +2020-01-10 20:00:00+00:00,41.29 +2020-01-10 21:00:00+00:00,39.9 +2020-01-10 22:00:00+00:00,33.04 +2020-01-10 23:00:00+00:00,35.01 +2020-01-11 00:00:00+00:00,34.0 +2020-01-11 01:00:00+00:00,31.43 +2020-01-11 02:00:00+00:00,29.14 +2020-01-11 03:00:00+00:00,28.86 +2020-01-11 04:00:00+00:00,28.43 +2020-01-11 05:00:00+00:00,29.22 +2020-01-11 06:00:00+00:00,31.22 +2020-01-11 07:00:00+00:00,35.68 +2020-01-11 08:00:00+00:00,40.0 +2020-01-11 09:00:00+00:00,38.01 +2020-01-11 10:00:00+00:00,37.9 +2020-01-11 11:00:00+00:00,36.08 +2020-01-11 12:00:00+00:00,32.96 +2020-01-11 13:00:00+00:00,31.1 +2020-01-11 14:00:00+00:00,32.25 +2020-01-11 15:00:00+00:00,32.55 +2020-01-11 16:00:00+00:00,40.56 +2020-01-11 17:00:00+00:00,34.46 +2020-01-11 18:00:00+00:00,30.01 +2020-01-11 19:00:00+00:00,25.74 +2020-01-11 20:00:00+00:00,23.73 +2020-01-11 21:00:00+00:00,25.24 +2020-01-11 22:00:00+00:00,20.96 +2020-01-11 23:00:00+00:00,22.82 +2020-01-12 00:00:00+00:00,19.37 +2020-01-12 01:00:00+00:00,18.36 +2020-01-12 02:00:00+00:00,18.34 +2020-01-12 03:00:00+00:00,18.16 +2020-01-12 04:00:00+00:00,18.66 +2020-01-12 05:00:00+00:00,17.39 +2020-01-12 06:00:00+00:00,18.22 +2020-01-12 07:00:00+00:00,22.1 +2020-01-12 08:00:00+00:00,23.93 +2020-01-12 09:00:00+00:00,24.23 +2020-01-12 10:00:00+00:00,24.84 +2020-01-12 11:00:00+00:00,24.45 +2020-01-12 12:00:00+00:00,22.46 +2020-01-12 13:00:00+00:00,20.05 +2020-01-12 14:00:00+00:00,21.48 +2020-01-12 15:00:00+00:00,24.71 +2020-01-12 16:00:00+00:00,26.5 +2020-01-12 17:00:00+00:00,26.68 +2020-01-12 18:00:00+00:00,26.16 +2020-01-12 19:00:00+00:00,25.62 +2020-01-12 20:00:00+00:00,25.53 +2020-01-12 21:00:00+00:00,27.1 +2020-01-12 22:00:00+00:00,26.14 +2020-01-12 23:00:00+00:00,21.82 +2020-01-13 00:00:00+00:00,23.98 +2020-01-13 01:00:00+00:00,25.23 +2020-01-13 02:00:00+00:00,24.85 +2020-01-13 03:00:00+00:00,25.01 +2020-01-13 04:00:00+00:00,27.32 +2020-01-13 05:00:00+00:00,38.4 +2020-01-13 06:00:00+00:00,48.64 +2020-01-13 07:00:00+00:00,52.93 +2020-01-13 08:00:00+00:00,49.89 +2020-01-13 09:00:00+00:00,48.89 +2020-01-13 10:00:00+00:00,47.2 +2020-01-13 11:00:00+00:00,47.0 +2020-01-13 12:00:00+00:00,45.95 +2020-01-13 13:00:00+00:00,45.0 +2020-01-13 14:00:00+00:00,45.96 +2020-01-13 15:00:00+00:00,43.68 +2020-01-13 16:00:00+00:00,46.81 +2020-01-13 17:00:00+00:00,45.27 +2020-01-13 18:00:00+00:00,42.55 +2020-01-13 19:00:00+00:00,31.9 +2020-01-13 20:00:00+00:00,26.68 +2020-01-13 21:00:00+00:00,25.96 +2020-01-13 22:00:00+00:00,23.4 +2020-01-13 23:00:00+00:00,22.59 +2020-01-14 00:00:00+00:00,16.5 +2020-01-14 01:00:00+00:00,8.89 +2020-01-14 02:00:00+00:00,1.52 +2020-01-14 03:00:00+00:00,1.58 +2020-01-14 04:00:00+00:00,14.13 +2020-01-14 05:00:00+00:00,25.23 +2020-01-14 06:00:00+00:00,27.13 +2020-01-14 07:00:00+00:00,28.55 +2020-01-14 08:00:00+00:00,27.68 +2020-01-14 09:00:00+00:00,27.42 +2020-01-14 10:00:00+00:00,27.43 +2020-01-14 11:00:00+00:00,27.68 +2020-01-14 12:00:00+00:00,29.56 +2020-01-14 13:00:00+00:00,32.0 +2020-01-14 14:00:00+00:00,32.74 +2020-01-14 15:00:00+00:00,30.38 +2020-01-14 16:00:00+00:00,38.0 +2020-01-14 17:00:00+00:00,30.32 +2020-01-14 18:00:00+00:00,26.94 +2020-01-14 19:00:00+00:00,25.74 +2020-01-14 20:00:00+00:00,23.82 +2020-01-14 21:00:00+00:00,22.3 +2020-01-14 22:00:00+00:00,12.4 +2020-01-14 23:00:00+00:00,16.14 +2020-01-15 00:00:00+00:00,5.06 +2020-01-15 01:00:00+00:00,0.11 +2020-01-15 02:00:00+00:00,1.77 +2020-01-15 03:00:00+00:00,7.13 +2020-01-15 04:00:00+00:00,17.86 +2020-01-15 05:00:00+00:00,25.18 +2020-01-15 06:00:00+00:00,35.52 +2020-01-15 07:00:00+00:00,36.56 +2020-01-15 08:00:00+00:00,33.33 +2020-01-15 09:00:00+00:00,25.34 +2020-01-15 10:00:00+00:00,24.98 +2020-01-15 11:00:00+00:00,25.05 +2020-01-15 12:00:00+00:00,25.12 +2020-01-15 13:00:00+00:00,25.24 +2020-01-15 14:00:00+00:00,30.17 +2020-01-15 15:00:00+00:00,29.98 +2020-01-15 16:00:00+00:00,36.49 +2020-01-15 17:00:00+00:00,35.46 +2020-01-15 18:00:00+00:00,35.28 +2020-01-15 19:00:00+00:00,33.65 +2020-01-15 20:00:00+00:00,29.57 +2020-01-15 21:00:00+00:00,33.59 +2020-01-15 22:00:00+00:00,30.46 +2020-01-15 23:00:00+00:00,28.38 +2020-01-16 00:00:00+00:00,30.26 +2020-01-16 01:00:00+00:00,29.92 +2020-01-16 02:00:00+00:00,29.39 +2020-01-16 03:00:00+00:00,29.64 +2020-01-16 04:00:00+00:00,31.1 +2020-01-16 05:00:00+00:00,39.04 +2020-01-16 06:00:00+00:00,45.42 +2020-01-16 07:00:00+00:00,52.4 +2020-01-16 08:00:00+00:00,47.0 +2020-01-16 09:00:00+00:00,43.51 +2020-01-16 10:00:00+00:00,42.1 +2020-01-16 11:00:00+00:00,40.36 +2020-01-16 12:00:00+00:00,41.16 +2020-01-16 13:00:00+00:00,42.91 +2020-01-16 14:00:00+00:00,46.0 +2020-01-16 15:00:00+00:00,46.45 +2020-01-16 16:00:00+00:00,44.61 +2020-01-16 17:00:00+00:00,43.15 +2020-01-16 18:00:00+00:00,38.86 +2020-01-16 19:00:00+00:00,33.42 +2020-01-16 20:00:00+00:00,30.8 +2020-01-16 21:00:00+00:00,31.56 +2020-01-16 22:00:00+00:00,28.64 +2020-01-16 23:00:00+00:00,27.36 +2020-01-17 00:00:00+00:00,27.16 +2020-01-17 01:00:00+00:00,26.58 +2020-01-17 02:00:00+00:00,25.71 +2020-01-17 03:00:00+00:00,26.01 +2020-01-17 04:00:00+00:00,27.97 +2020-01-17 05:00:00+00:00,31.09 +2020-01-17 06:00:00+00:00,40.2 +2020-01-17 07:00:00+00:00,43.8 +2020-01-17 08:00:00+00:00,42.94 +2020-01-17 09:00:00+00:00,42.34 +2020-01-17 10:00:00+00:00,40.79 +2020-01-17 11:00:00+00:00,41.04 +2020-01-17 12:00:00+00:00,39.94 +2020-01-17 13:00:00+00:00,39.02 +2020-01-17 14:00:00+00:00,43.01 +2020-01-17 15:00:00+00:00,42.8 +2020-01-17 16:00:00+00:00,45.79 +2020-01-17 17:00:00+00:00,46.0 +2020-01-17 18:00:00+00:00,44.66 +2020-01-17 19:00:00+00:00,40.99 +2020-01-17 20:00:00+00:00,33.9 +2020-01-17 21:00:00+00:00,33.38 +2020-01-17 22:00:00+00:00,33.0 +2020-01-17 23:00:00+00:00,22.15 +2020-01-18 00:00:00+00:00,24.86 +2020-01-18 01:00:00+00:00,22.24 +2020-01-18 02:00:00+00:00,25.36 +2020-01-18 03:00:00+00:00,25.39 +2020-01-18 04:00:00+00:00,25.96 +2020-01-18 05:00:00+00:00,22.99 +2020-01-18 06:00:00+00:00,28.46 +2020-01-18 07:00:00+00:00,28.1 +2020-01-18 08:00:00+00:00,37.73 +2020-01-18 09:00:00+00:00,35.21 +2020-01-18 10:00:00+00:00,33.34 +2020-01-18 11:00:00+00:00,29.2 +2020-01-18 12:00:00+00:00,31.55 +2020-01-18 13:00:00+00:00,34.78 +2020-01-18 14:00:00+00:00,35.09 +2020-01-18 15:00:00+00:00,37.27 +2020-01-18 16:00:00+00:00,41.3 +2020-01-18 17:00:00+00:00,43.04 +2020-01-18 18:00:00+00:00,41.91 +2020-01-18 19:00:00+00:00,38.59 +2020-01-18 20:00:00+00:00,35.13 +2020-01-18 21:00:00+00:00,33.48 +2020-01-18 22:00:00+00:00,31.22 +2020-01-18 23:00:00+00:00,33.1 +2020-01-19 00:00:00+00:00,30.67 +2020-01-19 01:00:00+00:00,29.47 +2020-01-19 02:00:00+00:00,28.79 +2020-01-19 03:00:00+00:00,27.71 +2020-01-19 04:00:00+00:00,27.26 +2020-01-19 05:00:00+00:00,27.82 +2020-01-19 06:00:00+00:00,30.01 +2020-01-19 07:00:00+00:00,31.47 +2020-01-19 08:00:00+00:00,35.2 +2020-01-19 09:00:00+00:00,35.2 +2020-01-19 10:00:00+00:00,34.47 +2020-01-19 11:00:00+00:00,33.36 +2020-01-19 12:00:00+00:00,29.9 +2020-01-19 13:00:00+00:00,29.92 +2020-01-19 14:00:00+00:00,30.86 +2020-01-19 15:00:00+00:00,33.07 +2020-01-19 16:00:00+00:00,42.98 +2020-01-19 17:00:00+00:00,44.24 +2020-01-19 18:00:00+00:00,43.89 +2020-01-19 19:00:00+00:00,41.14 +2020-01-19 20:00:00+00:00,36.74 +2020-01-19 21:00:00+00:00,40.0 +2020-01-19 22:00:00+00:00,36.15 +2020-01-19 23:00:00+00:00,34.24 +2020-01-20 00:00:00+00:00,34.01 +2020-01-20 01:00:00+00:00,33.08 +2020-01-20 02:00:00+00:00,32.1 +2020-01-20 03:00:00+00:00,31.5 +2020-01-20 04:00:00+00:00,34.23 +2020-01-20 05:00:00+00:00,45.63 +2020-01-20 06:00:00+00:00,55.97 +2020-01-20 07:00:00+00:00,60.0 +2020-01-20 08:00:00+00:00,57.01 +2020-01-20 09:00:00+00:00,50.63 +2020-01-20 10:00:00+00:00,48.17 +2020-01-20 11:00:00+00:00,43.96 +2020-01-20 12:00:00+00:00,43.02 +2020-01-20 13:00:00+00:00,43.11 +2020-01-20 14:00:00+00:00,44.95 +2020-01-20 15:00:00+00:00,46.0 +2020-01-20 16:00:00+00:00,55.91 +2020-01-20 17:00:00+00:00,57.05 +2020-01-20 18:00:00+00:00,54.02 +2020-01-20 19:00:00+00:00,48.68 +2020-01-20 20:00:00+00:00,41.51 +2020-01-20 21:00:00+00:00,40.0 +2020-01-20 22:00:00+00:00,34.03 +2020-01-20 23:00:00+00:00,33.17 +2020-01-21 00:00:00+00:00,33.01 +2020-01-21 01:00:00+00:00,32.28 +2020-01-21 02:00:00+00:00,32.42 +2020-01-21 03:00:00+00:00,32.9 +2020-01-21 04:00:00+00:00,33.0 +2020-01-21 05:00:00+00:00,37.98 +2020-01-21 06:00:00+00:00,49.77 +2020-01-21 07:00:00+00:00,52.36 +2020-01-21 08:00:00+00:00,48.07 +2020-01-21 09:00:00+00:00,42.1 +2020-01-21 10:00:00+00:00,39.11 +2020-01-21 11:00:00+00:00,37.29 +2020-01-21 12:00:00+00:00,38.1 +2020-01-21 13:00:00+00:00,42.6 +2020-01-21 14:00:00+00:00,47.71 +2020-01-21 15:00:00+00:00,49.97 +2020-01-21 16:00:00+00:00,55.81 +2020-01-21 17:00:00+00:00,55.29 +2020-01-21 18:00:00+00:00,50.7 +2020-01-21 19:00:00+00:00,43.75 +2020-01-21 20:00:00+00:00,40.42 +2020-01-21 21:00:00+00:00,37.52 +2020-01-21 22:00:00+00:00,33.01 +2020-01-21 23:00:00+00:00,34.7 +2020-01-22 00:00:00+00:00,33.87 +2020-01-22 01:00:00+00:00,32.6 +2020-01-22 02:00:00+00:00,32.42 +2020-01-22 03:00:00+00:00,32.64 +2020-01-22 04:00:00+00:00,33.0 +2020-01-22 05:00:00+00:00,39.12 +2020-01-22 06:00:00+00:00,49.26 +2020-01-22 07:00:00+00:00,56.53 +2020-01-22 08:00:00+00:00,50.5 +2020-01-22 09:00:00+00:00,46.27 +2020-01-22 10:00:00+00:00,45.02 +2020-01-22 11:00:00+00:00,42.82 +2020-01-22 12:00:00+00:00,43.45 +2020-01-22 13:00:00+00:00,49.96 +2020-01-22 14:00:00+00:00,52.4 +2020-01-22 15:00:00+00:00,55.63 +2020-01-22 16:00:00+00:00,62.07 +2020-01-22 17:00:00+00:00,65.09 +2020-01-22 18:00:00+00:00,59.58 +2020-01-22 19:00:00+00:00,52.52 +2020-01-22 20:00:00+00:00,47.01 +2020-01-22 21:00:00+00:00,43.99 +2020-01-22 22:00:00+00:00,41.2 +2020-01-22 23:00:00+00:00,40.1 +2020-01-23 00:00:00+00:00,39.23 +2020-01-23 01:00:00+00:00,38.34 +2020-01-23 02:00:00+00:00,36.32 +2020-01-23 03:00:00+00:00,36.6 +2020-01-23 04:00:00+00:00,41.97 +2020-01-23 05:00:00+00:00,47.0 +2020-01-23 06:00:00+00:00,58.74 +2020-01-23 07:00:00+00:00,66.62 +2020-01-23 08:00:00+00:00,64.86 +2020-01-23 09:00:00+00:00,61.68 +2020-01-23 10:00:00+00:00,57.42 +2020-01-23 11:00:00+00:00,55.36 +2020-01-23 12:00:00+00:00,53.68 +2020-01-23 13:00:00+00:00,51.2 +2020-01-23 14:00:00+00:00,53.54 +2020-01-23 15:00:00+00:00,54.75 +2020-01-23 16:00:00+00:00,62.35 +2020-01-23 17:00:00+00:00,68.64 +2020-01-23 18:00:00+00:00,60.4 +2020-01-23 19:00:00+00:00,55.53 +2020-01-23 20:00:00+00:00,47.16 +2020-01-23 21:00:00+00:00,43.9 +2020-01-23 22:00:00+00:00,41.51 +2020-01-23 23:00:00+00:00,36.23 +2020-01-24 00:00:00+00:00,37.6 +2020-01-24 01:00:00+00:00,36.76 +2020-01-24 02:00:00+00:00,35.1 +2020-01-24 03:00:00+00:00,36.47 +2020-01-24 04:00:00+00:00,36.66 +2020-01-24 05:00:00+00:00,44.54 +2020-01-24 06:00:00+00:00,58.29 +2020-01-24 07:00:00+00:00,66.74 +2020-01-24 08:00:00+00:00,65.0 +2020-01-24 09:00:00+00:00,61.12 +2020-01-24 10:00:00+00:00,58.84 +2020-01-24 11:00:00+00:00,56.61 +2020-01-24 12:00:00+00:00,53.29 +2020-01-24 13:00:00+00:00,52.0 +2020-01-24 14:00:00+00:00,53.06 +2020-01-24 15:00:00+00:00,54.01 +2020-01-24 16:00:00+00:00,60.79 +2020-01-24 17:00:00+00:00,65.1 +2020-01-24 18:00:00+00:00,59.79 +2020-01-24 19:00:00+00:00,49.17 +2020-01-24 20:00:00+00:00,44.32 +2020-01-24 21:00:00+00:00,43.7 +2020-01-24 22:00:00+00:00,40.67 +2020-01-24 23:00:00+00:00,36.19 +2020-01-25 00:00:00+00:00,35.09 +2020-01-25 01:00:00+00:00,37.01 +2020-01-25 02:00:00+00:00,35.14 +2020-01-25 03:00:00+00:00,33.11 +2020-01-25 04:00:00+00:00,33.01 +2020-01-25 05:00:00+00:00,34.12 +2020-01-25 06:00:00+00:00,37.1 +2020-01-25 07:00:00+00:00,42.42 +2020-01-25 08:00:00+00:00,44.97 +2020-01-25 09:00:00+00:00,45.96 +2020-01-25 10:00:00+00:00,44.89 +2020-01-25 11:00:00+00:00,43.67 +2020-01-25 12:00:00+00:00,41.14 +2020-01-25 13:00:00+00:00,39.61 +2020-01-25 14:00:00+00:00,41.51 +2020-01-25 15:00:00+00:00,43.53 +2020-01-25 16:00:00+00:00,46.25 +2020-01-25 17:00:00+00:00,49.99 +2020-01-25 18:00:00+00:00,45.84 +2020-01-25 19:00:00+00:00,43.01 +2020-01-25 20:00:00+00:00,38.25 +2020-01-25 21:00:00+00:00,40.57 +2020-01-25 22:00:00+00:00,36.02 +2020-01-25 23:00:00+00:00,34.39 +2020-01-26 00:00:00+00:00,33.64 +2020-01-26 01:00:00+00:00,32.4 +2020-01-26 02:00:00+00:00,29.84 +2020-01-26 03:00:00+00:00,29.33 +2020-01-26 04:00:00+00:00,29.04 +2020-01-26 05:00:00+00:00,30.02 +2020-01-26 06:00:00+00:00,29.49 +2020-01-26 07:00:00+00:00,31.17 +2020-01-26 08:00:00+00:00,34.61 +2020-01-26 09:00:00+00:00,38.92 +2020-01-26 10:00:00+00:00,41.16 +2020-01-26 11:00:00+00:00,41.86 +2020-01-26 12:00:00+00:00,38.23 +2020-01-26 13:00:00+00:00,36.03 +2020-01-26 14:00:00+00:00,34.56 +2020-01-26 15:00:00+00:00,35.81 +2020-01-26 16:00:00+00:00,39.99 +2020-01-26 17:00:00+00:00,42.0 +2020-01-26 18:00:00+00:00,35.47 +2020-01-26 19:00:00+00:00,31.27 +2020-01-26 20:00:00+00:00,27.53 +2020-01-26 21:00:00+00:00,29.09 +2020-01-26 22:00:00+00:00,27.5 +2020-01-26 23:00:00+00:00,28.09 +2020-01-27 00:00:00+00:00,27.73 +2020-01-27 01:00:00+00:00,26.13 +2020-01-27 02:00:00+00:00,23.4 +2020-01-27 03:00:00+00:00,21.98 +2020-01-27 04:00:00+00:00,26.17 +2020-01-27 05:00:00+00:00,28.83 +2020-01-27 06:00:00+00:00,42.2 +2020-01-27 07:00:00+00:00,41.56 +2020-01-27 08:00:00+00:00,40.29 +2020-01-27 09:00:00+00:00,42.71 +2020-01-27 10:00:00+00:00,39.95 +2020-01-27 11:00:00+00:00,38.02 +2020-01-27 12:00:00+00:00,40.72 +2020-01-27 13:00:00+00:00,43.93 +2020-01-27 14:00:00+00:00,45.22 +2020-01-27 15:00:00+00:00,43.17 +2020-01-27 16:00:00+00:00,47.08 +2020-01-27 17:00:00+00:00,51.21 +2020-01-27 18:00:00+00:00,47.12 +2020-01-27 19:00:00+00:00,43.86 +2020-01-27 20:00:00+00:00,40.6 +2020-01-27 21:00:00+00:00,36.61 +2020-01-27 22:00:00+00:00,33.14 +2020-01-27 23:00:00+00:00,27.54 +2020-01-28 00:00:00+00:00,26.84 +2020-01-28 01:00:00+00:00,25.64 +2020-01-28 02:00:00+00:00,24.99 +2020-01-28 03:00:00+00:00,25.17 +2020-01-28 04:00:00+00:00,26.32 +2020-01-28 05:00:00+00:00,31.76 +2020-01-28 06:00:00+00:00,41.94 +2020-01-28 07:00:00+00:00,44.94 +2020-01-28 08:00:00+00:00,44.33 +2020-01-28 09:00:00+00:00,42.9 +2020-01-28 10:00:00+00:00,39.93 +2020-01-28 11:00:00+00:00,33.98 +2020-01-28 12:00:00+00:00,30.0 +2020-01-28 13:00:00+00:00,28.0 +2020-01-28 14:00:00+00:00,27.91 +2020-01-28 15:00:00+00:00,29.68 +2020-01-28 16:00:00+00:00,39.83 +2020-01-28 17:00:00+00:00,39.98 +2020-01-28 18:00:00+00:00,30.62 +2020-01-28 19:00:00+00:00,25.8 +2020-01-28 20:00:00+00:00,25.42 +2020-01-28 21:00:00+00:00,25.04 +2020-01-28 22:00:00+00:00,20.08 +2020-01-28 23:00:00+00:00,20.44 +2020-01-29 00:00:00+00:00,19.58 +2020-01-29 01:00:00+00:00,19.16 +2020-01-29 02:00:00+00:00,19.61 +2020-01-29 03:00:00+00:00,20.46 +2020-01-29 04:00:00+00:00,25.16 +2020-01-29 05:00:00+00:00,29.03 +2020-01-29 06:00:00+00:00,41.12 +2020-01-29 07:00:00+00:00,44.41 +2020-01-29 08:00:00+00:00,41.85 +2020-01-29 09:00:00+00:00,40.82 +2020-01-29 10:00:00+00:00,36.86 +2020-01-29 11:00:00+00:00,31.34 +2020-01-29 12:00:00+00:00,32.4 +2020-01-29 13:00:00+00:00,34.24 +2020-01-29 14:00:00+00:00,37.92 +2020-01-29 15:00:00+00:00,39.02 +2020-01-29 16:00:00+00:00,42.89 +2020-01-29 17:00:00+00:00,45.01 +2020-01-29 18:00:00+00:00,45.1 +2020-01-29 19:00:00+00:00,40.98 +2020-01-29 20:00:00+00:00,33.02 +2020-01-29 21:00:00+00:00,29.63 +2020-01-29 22:00:00+00:00,27.33 +2020-01-29 23:00:00+00:00,28.97 +2020-01-30 00:00:00+00:00,27.43 +2020-01-30 01:00:00+00:00,25.15 +2020-01-30 02:00:00+00:00,20.24 +2020-01-30 03:00:00+00:00,19.91 +2020-01-30 04:00:00+00:00,24.96 +2020-01-30 05:00:00+00:00,29.7 +2020-01-30 06:00:00+00:00,39.46 +2020-01-30 07:00:00+00:00,40.4 +2020-01-30 08:00:00+00:00,37.28 +2020-01-30 09:00:00+00:00,35.9 +2020-01-30 10:00:00+00:00,35.15 +2020-01-30 11:00:00+00:00,36.69 +2020-01-30 12:00:00+00:00,39.79 +2020-01-30 13:00:00+00:00,37.53 +2020-01-30 14:00:00+00:00,35.51 +2020-01-30 15:00:00+00:00,37.85 +2020-01-30 16:00:00+00:00,44.12 +2020-01-30 17:00:00+00:00,43.04 +2020-01-30 18:00:00+00:00,41.1 +2020-01-30 19:00:00+00:00,31.13 +2020-01-30 20:00:00+00:00,29.04 +2020-01-30 21:00:00+00:00,27.3 +2020-01-30 22:00:00+00:00,20.04 +2020-01-30 23:00:00+00:00,-0.04 +2020-01-31 00:00:00+00:00,0.02 +2020-01-31 01:00:00+00:00,-8.77 +2020-01-31 02:00:00+00:00,-3.89 +2020-01-31 03:00:00+00:00,0.01 +2020-01-31 04:00:00+00:00,13.04 +2020-01-31 05:00:00+00:00,24.01 +2020-01-31 06:00:00+00:00,36.96 +2020-01-31 07:00:00+00:00,39.59 +2020-01-31 08:00:00+00:00,37.39 +2020-01-31 09:00:00+00:00,34.9 +2020-01-31 10:00:00+00:00,35.27 +2020-01-31 11:00:00+00:00,27.97 +2020-01-31 12:00:00+00:00,28.84 +2020-01-31 13:00:00+00:00,29.0 +2020-01-31 14:00:00+00:00,30.07 +2020-01-31 15:00:00+00:00,28.1 +2020-01-31 16:00:00+00:00,33.78 +2020-01-31 17:00:00+00:00,37.05 +2020-01-31 18:00:00+00:00,32.58 +2020-01-31 19:00:00+00:00,25.1 +2020-01-31 20:00:00+00:00,21.24 +2020-01-31 21:00:00+00:00,20.29 +2020-01-31 22:00:00+00:00,17.09 +2020-01-31 23:00:00+00:00,0.07 +2020-02-01 00:00:00+00:00,0.02 +2020-02-01 01:00:00+00:00,-0.7 +2020-02-01 02:00:00+00:00,-1.94 +2020-02-01 03:00:00+00:00,-1.67 +2020-02-01 04:00:00+00:00,-2.4 +2020-02-01 05:00:00+00:00,0.04 +2020-02-01 06:00:00+00:00,8.52 +2020-02-01 07:00:00+00:00,14.1 +2020-02-01 08:00:00+00:00,14.7 +2020-02-01 09:00:00+00:00,14.74 +2020-02-01 10:00:00+00:00,14.47 +2020-02-01 11:00:00+00:00,14.07 +2020-02-01 12:00:00+00:00,14.45 +2020-02-01 13:00:00+00:00,13.18 +2020-02-01 14:00:00+00:00,11.11 +2020-02-01 15:00:00+00:00,13.07 +2020-02-01 16:00:00+00:00,16.79 +2020-02-01 17:00:00+00:00,16.97 +2020-02-01 18:00:00+00:00,15.99 +2020-02-01 19:00:00+00:00,0.42 +2020-02-01 20:00:00+00:00,-0.8 +2020-02-01 21:00:00+00:00,0.0 +2020-02-01 22:00:00+00:00,-11.16 +2020-02-01 23:00:00+00:00,-4.97 +2020-02-02 00:00:00+00:00,-10.1 +2020-02-02 01:00:00+00:00,-16.95 +2020-02-02 02:00:00+00:00,-11.7 +2020-02-02 03:00:00+00:00,-5.98 +2020-02-02 04:00:00+00:00,-5.21 +2020-02-02 05:00:00+00:00,-4.98 +2020-02-02 06:00:00+00:00,0.09 +2020-02-02 07:00:00+00:00,13.06 +2020-02-02 08:00:00+00:00,24.5 +2020-02-02 09:00:00+00:00,27.12 +2020-02-02 10:00:00+00:00,29.43 +2020-02-02 11:00:00+00:00,35.0 +2020-02-02 12:00:00+00:00,33.36 +2020-02-02 13:00:00+00:00,33.75 +2020-02-02 14:00:00+00:00,34.2 +2020-02-02 15:00:00+00:00,28.87 +2020-02-02 16:00:00+00:00,40.27 +2020-02-02 17:00:00+00:00,40.73 +2020-02-02 18:00:00+00:00,36.93 +2020-02-02 19:00:00+00:00,27.05 +2020-02-02 20:00:00+00:00,21.99 +2020-02-02 21:00:00+00:00,23.87 +2020-02-02 22:00:00+00:00,17.38 +2020-02-02 23:00:00+00:00,15.92 +2020-02-03 00:00:00+00:00,15.55 +2020-02-03 01:00:00+00:00,14.38 +2020-02-03 02:00:00+00:00,9.32 +2020-02-03 03:00:00+00:00,13.26 +2020-02-03 04:00:00+00:00,14.03 +2020-02-03 05:00:00+00:00,27.06 +2020-02-03 06:00:00+00:00,36.49 +2020-02-03 07:00:00+00:00,39.97 +2020-02-03 08:00:00+00:00,39.26 +2020-02-03 09:00:00+00:00,35.52 +2020-02-03 10:00:00+00:00,32.97 +2020-02-03 11:00:00+00:00,29.08 +2020-02-03 12:00:00+00:00,28.38 +2020-02-03 13:00:00+00:00,28.73 +2020-02-03 14:00:00+00:00,28.29 +2020-02-03 15:00:00+00:00,33.02 +2020-02-03 16:00:00+00:00,37.95 +2020-02-03 17:00:00+00:00,37.99 +2020-02-03 18:00:00+00:00,36.57 +2020-02-03 19:00:00+00:00,31.0 +2020-02-03 20:00:00+00:00,27.16 +2020-02-03 21:00:00+00:00,27.13 +2020-02-03 22:00:00+00:00,24.76 +2020-02-03 23:00:00+00:00,20.79 +2020-02-04 00:00:00+00:00,17.41 +2020-02-04 01:00:00+00:00,16.24 +2020-02-04 02:00:00+00:00,12.96 +2020-02-04 03:00:00+00:00,13.42 +2020-02-04 04:00:00+00:00,15.88 +2020-02-04 05:00:00+00:00,24.88 +2020-02-04 06:00:00+00:00,29.7 +2020-02-04 07:00:00+00:00,35.01 +2020-02-04 08:00:00+00:00,33.48 +2020-02-04 09:00:00+00:00,29.9 +2020-02-04 10:00:00+00:00,29.03 +2020-02-04 11:00:00+00:00,27.07 +2020-02-04 12:00:00+00:00,26.43 +2020-02-04 13:00:00+00:00,27.02 +2020-02-04 14:00:00+00:00,29.05 +2020-02-04 15:00:00+00:00,31.42 +2020-02-04 16:00:00+00:00,39.92 +2020-02-04 17:00:00+00:00,41.3 +2020-02-04 18:00:00+00:00,40.92 +2020-02-04 19:00:00+00:00,39.75 +2020-02-04 20:00:00+00:00,30.13 +2020-02-04 21:00:00+00:00,30.36 +2020-02-04 22:00:00+00:00,26.94 +2020-02-04 23:00:00+00:00,25.44 +2020-02-05 00:00:00+00:00,25.0 +2020-02-05 01:00:00+00:00,24.43 +2020-02-05 02:00:00+00:00,23.63 +2020-02-05 03:00:00+00:00,24.83 +2020-02-05 04:00:00+00:00,26.62 +2020-02-05 05:00:00+00:00,37.54 +2020-02-05 06:00:00+00:00,44.91 +2020-02-05 07:00:00+00:00,49.16 +2020-02-05 08:00:00+00:00,44.78 +2020-02-05 09:00:00+00:00,41.37 +2020-02-05 10:00:00+00:00,40.0 +2020-02-05 11:00:00+00:00,37.07 +2020-02-05 12:00:00+00:00,35.16 +2020-02-05 13:00:00+00:00,35.17 +2020-02-05 14:00:00+00:00,37.13 +2020-02-05 15:00:00+00:00,40.25 +2020-02-05 16:00:00+00:00,45.45 +2020-02-05 17:00:00+00:00,45.83 +2020-02-05 18:00:00+00:00,45.66 +2020-02-05 19:00:00+00:00,41.74 +2020-02-05 20:00:00+00:00,35.92 +2020-02-05 21:00:00+00:00,31.99 +2020-02-05 22:00:00+00:00,29.7 +2020-02-05 23:00:00+00:00,26.51 +2020-02-06 00:00:00+00:00,25.35 +2020-02-06 01:00:00+00:00,24.47 +2020-02-06 02:00:00+00:00,24.44 +2020-02-06 03:00:00+00:00,25.25 +2020-02-06 04:00:00+00:00,27.97 +2020-02-06 05:00:00+00:00,32.96 +2020-02-06 06:00:00+00:00,43.15 +2020-02-06 07:00:00+00:00,44.88 +2020-02-06 08:00:00+00:00,42.9 +2020-02-06 09:00:00+00:00,42.19 +2020-02-06 10:00:00+00:00,40.81 +2020-02-06 11:00:00+00:00,39.25 +2020-02-06 12:00:00+00:00,38.01 +2020-02-06 13:00:00+00:00,37.94 +2020-02-06 14:00:00+00:00,38.06 +2020-02-06 15:00:00+00:00,39.09 +2020-02-06 16:00:00+00:00,45.95 +2020-02-06 17:00:00+00:00,46.83 +2020-02-06 18:00:00+00:00,47.21 +2020-02-06 19:00:00+00:00,43.63 +2020-02-06 20:00:00+00:00,37.98 +2020-02-06 21:00:00+00:00,37.66 +2020-02-06 22:00:00+00:00,33.58 +2020-02-06 23:00:00+00:00,33.32 +2020-02-07 00:00:00+00:00,32.18 +2020-02-07 01:00:00+00:00,32.03 +2020-02-07 02:00:00+00:00,31.44 +2020-02-07 03:00:00+00:00,32.17 +2020-02-07 04:00:00+00:00,33.71 +2020-02-07 05:00:00+00:00,40.68 +2020-02-07 06:00:00+00:00,47.87 +2020-02-07 07:00:00+00:00,56.96 +2020-02-07 08:00:00+00:00,47.0 +2020-02-07 09:00:00+00:00,41.81 +2020-02-07 10:00:00+00:00,38.52 +2020-02-07 11:00:00+00:00,37.78 +2020-02-07 12:00:00+00:00,34.68 +2020-02-07 13:00:00+00:00,33.09 +2020-02-07 14:00:00+00:00,38.29 +2020-02-07 15:00:00+00:00,39.08 +2020-02-07 16:00:00+00:00,40.96 +2020-02-07 17:00:00+00:00,39.93 +2020-02-07 18:00:00+00:00,35.47 +2020-02-07 19:00:00+00:00,32.23 +2020-02-07 20:00:00+00:00,27.93 +2020-02-07 21:00:00+00:00,27.35 +2020-02-07 22:00:00+00:00,26.0 +2020-02-07 23:00:00+00:00,25.21 +2020-02-08 00:00:00+00:00,24.28 +2020-02-08 01:00:00+00:00,23.98 +2020-02-08 02:00:00+00:00,22.9 +2020-02-08 03:00:00+00:00,21.75 +2020-02-08 04:00:00+00:00,20.12 +2020-02-08 05:00:00+00:00,22.34 +2020-02-08 06:00:00+00:00,25.54 +2020-02-08 07:00:00+00:00,28.05 +2020-02-08 08:00:00+00:00,29.07 +2020-02-08 09:00:00+00:00,26.87 +2020-02-08 10:00:00+00:00,26.74 +2020-02-08 11:00:00+00:00,25.0 +2020-02-08 12:00:00+00:00,25.61 +2020-02-08 13:00:00+00:00,26.8 +2020-02-08 14:00:00+00:00,29.92 +2020-02-08 15:00:00+00:00,37.6 +2020-02-08 16:00:00+00:00,39.5 +2020-02-08 17:00:00+00:00,41.3 +2020-02-08 18:00:00+00:00,38.71 +2020-02-08 19:00:00+00:00,31.29 +2020-02-08 20:00:00+00:00,26.84 +2020-02-08 21:00:00+00:00,27.39 +2020-02-08 22:00:00+00:00,26.4 +2020-02-08 23:00:00+00:00,23.06 +2020-02-09 00:00:00+00:00,14.93 +2020-02-09 01:00:00+00:00,12.8 +2020-02-09 02:00:00+00:00,9.18 +2020-02-09 03:00:00+00:00,7.54 +2020-02-09 04:00:00+00:00,4.24 +2020-02-09 05:00:00+00:00,3.85 +2020-02-09 06:00:00+00:00,5.06 +2020-02-09 07:00:00+00:00,1.72 +2020-02-09 08:00:00+00:00,-0.07 +2020-02-09 09:00:00+00:00,-4.94 +2020-02-09 10:00:00+00:00,-3.81 +2020-02-09 11:00:00+00:00,-8.8 +2020-02-09 12:00:00+00:00,-16.95 +2020-02-09 13:00:00+00:00,-13.64 +2020-02-09 14:00:00+00:00,-2.96 +2020-02-09 15:00:00+00:00,-0.1 +2020-02-09 16:00:00+00:00,0.52 +2020-02-09 17:00:00+00:00,11.97 +2020-02-09 18:00:00+00:00,5.22 +2020-02-09 19:00:00+00:00,-4.01 +2020-02-09 20:00:00+00:00,-3.04 +2020-02-09 21:00:00+00:00,-0.08 +2020-02-09 22:00:00+00:00,-4.1 +2020-02-09 23:00:00+00:00,-4.97 +2020-02-10 00:00:00+00:00,-15.89 +2020-02-10 01:00:00+00:00,-15.11 +2020-02-10 02:00:00+00:00,-16.16 +2020-02-10 03:00:00+00:00,-14.93 +2020-02-10 04:00:00+00:00,-2.0 +2020-02-10 05:00:00+00:00,0.41 +2020-02-10 06:00:00+00:00,14.11 +2020-02-10 07:00:00+00:00,21.38 +2020-02-10 08:00:00+00:00,13.2 +2020-02-10 09:00:00+00:00,12.54 +2020-02-10 10:00:00+00:00,12.57 +2020-02-10 11:00:00+00:00,12.01 +2020-02-10 12:00:00+00:00,11.99 +2020-02-10 13:00:00+00:00,11.98 +2020-02-10 14:00:00+00:00,12.24 +2020-02-10 15:00:00+00:00,12.77 +2020-02-10 16:00:00+00:00,23.14 +2020-02-10 17:00:00+00:00,24.96 +2020-02-10 18:00:00+00:00,25.69 +2020-02-10 19:00:00+00:00,19.93 +2020-02-10 20:00:00+00:00,12.47 +2020-02-10 21:00:00+00:00,11.97 +2020-02-10 22:00:00+00:00,4.86 +2020-02-10 23:00:00+00:00,10.96 +2020-02-11 00:00:00+00:00,4.83 +2020-02-11 01:00:00+00:00,0.11 +2020-02-11 02:00:00+00:00,-0.08 +2020-02-11 03:00:00+00:00,-1.86 +2020-02-11 04:00:00+00:00,1.39 +2020-02-11 05:00:00+00:00,11.68 +2020-02-11 06:00:00+00:00,14.95 +2020-02-11 07:00:00+00:00,14.95 +2020-02-11 08:00:00+00:00,13.24 +2020-02-11 09:00:00+00:00,13.11 +2020-02-11 10:00:00+00:00,13.0 +2020-02-11 11:00:00+00:00,12.85 +2020-02-11 12:00:00+00:00,12.77 +2020-02-11 13:00:00+00:00,13.0 +2020-02-11 14:00:00+00:00,12.94 +2020-02-11 15:00:00+00:00,13.67 +2020-02-11 16:00:00+00:00,23.98 +2020-02-11 17:00:00+00:00,24.55 +2020-02-11 18:00:00+00:00,24.54 +2020-02-11 19:00:00+00:00,12.94 +2020-02-11 20:00:00+00:00,12.22 +2020-02-11 21:00:00+00:00,12.04 +2020-02-11 22:00:00+00:00,7.1 +2020-02-11 23:00:00+00:00,3.12 +2020-02-12 00:00:00+00:00,0.88 +2020-02-12 01:00:00+00:00,0.07 +2020-02-12 02:00:00+00:00,0.08 +2020-02-12 03:00:00+00:00,5.29 +2020-02-12 04:00:00+00:00,12.94 +2020-02-12 05:00:00+00:00,20.93 +2020-02-12 06:00:00+00:00,34.24 +2020-02-12 07:00:00+00:00,32.01 +2020-02-12 08:00:00+00:00,24.17 +2020-02-12 09:00:00+00:00,22.31 +2020-02-12 10:00:00+00:00,17.9 +2020-02-12 11:00:00+00:00,13.69 +2020-02-12 12:00:00+00:00,14.02 +2020-02-12 13:00:00+00:00,13.88 +2020-02-12 14:00:00+00:00,22.98 +2020-02-12 15:00:00+00:00,27.87 +2020-02-12 16:00:00+00:00,31.2 +2020-02-12 17:00:00+00:00,37.45 +2020-02-12 18:00:00+00:00,33.19 +2020-02-12 19:00:00+00:00,31.23 +2020-02-12 20:00:00+00:00,26.53 +2020-02-12 21:00:00+00:00,24.16 +2020-02-12 22:00:00+00:00,16.93 +2020-02-12 23:00:00+00:00,25.53 +2020-02-13 00:00:00+00:00,25.91 +2020-02-13 01:00:00+00:00,24.37 +2020-02-13 02:00:00+00:00,24.31 +2020-02-13 03:00:00+00:00,22.8 +2020-02-13 04:00:00+00:00,26.41 +2020-02-13 05:00:00+00:00,39.37 +2020-02-13 06:00:00+00:00,58.88 +2020-02-13 07:00:00+00:00,53.48 +2020-02-13 08:00:00+00:00,48.17 +2020-02-13 09:00:00+00:00,40.5 +2020-02-13 10:00:00+00:00,38.13 +2020-02-13 11:00:00+00:00,36.94 +2020-02-13 12:00:00+00:00,35.97 +2020-02-13 13:00:00+00:00,35.73 +2020-02-13 14:00:00+00:00,36.02 +2020-02-13 15:00:00+00:00,37.76 +2020-02-13 16:00:00+00:00,43.0 +2020-02-13 17:00:00+00:00,53.03 +2020-02-13 18:00:00+00:00,44.83 +2020-02-13 19:00:00+00:00,41.96 +2020-02-13 20:00:00+00:00,39.53 +2020-02-13 21:00:00+00:00,37.93 +2020-02-13 22:00:00+00:00,31.57 +2020-02-13 23:00:00+00:00,27.36 +2020-02-14 00:00:00+00:00,27.09 +2020-02-14 01:00:00+00:00,27.0 +2020-02-14 02:00:00+00:00,25.03 +2020-02-14 03:00:00+00:00,26.2 +2020-02-14 04:00:00+00:00,27.04 +2020-02-14 05:00:00+00:00,38.3 +2020-02-14 06:00:00+00:00,44.86 +2020-02-14 07:00:00+00:00,56.74 +2020-02-14 08:00:00+00:00,49.39 +2020-02-14 09:00:00+00:00,43.93 +2020-02-14 10:00:00+00:00,41.44 +2020-02-14 11:00:00+00:00,39.28 +2020-02-14 12:00:00+00:00,37.19 +2020-02-14 13:00:00+00:00,36.98 +2020-02-14 14:00:00+00:00,37.88 +2020-02-14 15:00:00+00:00,39.25 +2020-02-14 16:00:00+00:00,42.5 +2020-02-14 17:00:00+00:00,43.61 +2020-02-14 18:00:00+00:00,39.31 +2020-02-14 19:00:00+00:00,33.86 +2020-02-14 20:00:00+00:00,28.35 +2020-02-14 21:00:00+00:00,27.05 +2020-02-14 22:00:00+00:00,25.19 +2020-02-14 23:00:00+00:00,28.2 +2020-02-15 00:00:00+00:00,27.06 +2020-02-15 01:00:00+00:00,26.2 +2020-02-15 02:00:00+00:00,24.31 +2020-02-15 03:00:00+00:00,23.2 +2020-02-15 04:00:00+00:00,23.14 +2020-02-15 05:00:00+00:00,20.99 +2020-02-15 06:00:00+00:00,23.19 +2020-02-15 07:00:00+00:00,25.71 +2020-02-15 08:00:00+00:00,31.49 +2020-02-15 09:00:00+00:00,27.41 +2020-02-15 10:00:00+00:00,22.96 +2020-02-15 11:00:00+00:00,21.29 +2020-02-15 12:00:00+00:00,18.83 +2020-02-15 13:00:00+00:00,15.58 +2020-02-15 14:00:00+00:00,22.92 +2020-02-15 15:00:00+00:00,22.99 +2020-02-15 16:00:00+00:00,24.24 +2020-02-15 17:00:00+00:00,25.12 +2020-02-15 18:00:00+00:00,21.8 +2020-02-15 19:00:00+00:00,8.27 +2020-02-15 20:00:00+00:00,1.58 +2020-02-15 21:00:00+00:00,3.97 +2020-02-15 22:00:00+00:00,0.02 +2020-02-15 23:00:00+00:00,-5.9 +2020-02-16 00:00:00+00:00,-8.65 +2020-02-16 01:00:00+00:00,-4.93 +2020-02-16 02:00:00+00:00,-4.99 +2020-02-16 03:00:00+00:00,-5.76 +2020-02-16 04:00:00+00:00,-6.91 +2020-02-16 05:00:00+00:00,-8.51 +2020-02-16 06:00:00+00:00,-4.96 +2020-02-16 07:00:00+00:00,-0.08 +2020-02-16 08:00:00+00:00,-1.36 +2020-02-16 09:00:00+00:00,-8.46 +2020-02-16 10:00:00+00:00,-6.55 +2020-02-16 11:00:00+00:00,-15.24 +2020-02-16 12:00:00+00:00,-20.88 +2020-02-16 13:00:00+00:00,-21.02 +2020-02-16 14:00:00+00:00,-32.14 +2020-02-16 15:00:00+00:00,-19.26 +2020-02-16 16:00:00+00:00,-8.02 +2020-02-16 17:00:00+00:00,-2.5 +2020-02-16 18:00:00+00:00,-3.72 +2020-02-16 19:00:00+00:00,-4.96 +2020-02-16 20:00:00+00:00,-3.39 +2020-02-16 21:00:00+00:00,0.74 +2020-02-16 22:00:00+00:00,-1.13 +2020-02-16 23:00:00+00:00,-25.04 +2020-02-17 00:00:00+00:00,-27.53 +2020-02-17 01:00:00+00:00,-21.99 +2020-02-17 02:00:00+00:00,-12.05 +2020-02-17 03:00:00+00:00,-4.95 +2020-02-17 04:00:00+00:00,-0.08 +2020-02-17 05:00:00+00:00,22.91 +2020-02-17 06:00:00+00:00,30.06 +2020-02-17 07:00:00+00:00,36.44 +2020-02-17 08:00:00+00:00,33.57 +2020-02-17 09:00:00+00:00,29.8 +2020-02-17 10:00:00+00:00,29.8 +2020-02-17 11:00:00+00:00,29.76 +2020-02-17 12:00:00+00:00,27.35 +2020-02-17 13:00:00+00:00,27.69 +2020-02-17 14:00:00+00:00,32.38 +2020-02-17 15:00:00+00:00,34.01 +2020-02-17 16:00:00+00:00,36.7 +2020-02-17 17:00:00+00:00,36.79 +2020-02-17 18:00:00+00:00,36.95 +2020-02-17 19:00:00+00:00,32.27 +2020-02-17 20:00:00+00:00,24.86 +2020-02-17 21:00:00+00:00,18.44 +2020-02-17 22:00:00+00:00,9.78 +2020-02-17 23:00:00+00:00,20.64 +2020-02-18 00:00:00+00:00,13.11 +2020-02-18 01:00:00+00:00,9.0 +2020-02-18 02:00:00+00:00,8.58 +2020-02-18 03:00:00+00:00,8.94 +2020-02-18 04:00:00+00:00,15.44 +2020-02-18 05:00:00+00:00,26.14 +2020-02-18 06:00:00+00:00,35.93 +2020-02-18 07:00:00+00:00,34.55 +2020-02-18 08:00:00+00:00,26.62 +2020-02-18 09:00:00+00:00,11.73 +2020-02-18 10:00:00+00:00,10.91 +2020-02-18 11:00:00+00:00,10.46 +2020-02-18 12:00:00+00:00,10.47 +2020-02-18 13:00:00+00:00,11.34 +2020-02-18 14:00:00+00:00,24.81 +2020-02-18 15:00:00+00:00,29.14 +2020-02-18 16:00:00+00:00,34.96 +2020-02-18 17:00:00+00:00,38.88 +2020-02-18 18:00:00+00:00,38.51 +2020-02-18 19:00:00+00:00,32.4 +2020-02-18 20:00:00+00:00,25.82 +2020-02-18 21:00:00+00:00,25.21 +2020-02-18 22:00:00+00:00,21.22 +2020-02-18 23:00:00+00:00,23.0 +2020-02-19 00:00:00+00:00,22.6 +2020-02-19 01:00:00+00:00,20.18 +2020-02-19 02:00:00+00:00,19.48 +2020-02-19 03:00:00+00:00,22.08 +2020-02-19 04:00:00+00:00,25.52 +2020-02-19 05:00:00+00:00,29.62 +2020-02-19 06:00:00+00:00,36.53 +2020-02-19 07:00:00+00:00,38.98 +2020-02-19 08:00:00+00:00,38.99 +2020-02-19 09:00:00+00:00,37.0 +2020-02-19 10:00:00+00:00,34.0 +2020-02-19 11:00:00+00:00,27.69 +2020-02-19 12:00:00+00:00,25.68 +2020-02-19 13:00:00+00:00,25.09 +2020-02-19 14:00:00+00:00,26.85 +2020-02-19 15:00:00+00:00,32.75 +2020-02-19 16:00:00+00:00,36.93 +2020-02-19 17:00:00+00:00,44.65 +2020-02-19 18:00:00+00:00,43.38 +2020-02-19 19:00:00+00:00,36.94 +2020-02-19 20:00:00+00:00,33.91 +2020-02-19 21:00:00+00:00,32.1 +2020-02-19 22:00:00+00:00,27.91 +2020-02-19 23:00:00+00:00,26.45 +2020-02-20 00:00:00+00:00,25.01 +2020-02-20 01:00:00+00:00,24.56 +2020-02-20 02:00:00+00:00,24.1 +2020-02-20 03:00:00+00:00,24.2 +2020-02-20 04:00:00+00:00,24.95 +2020-02-20 05:00:00+00:00,34.65 +2020-02-20 06:00:00+00:00,37.79 +2020-02-20 07:00:00+00:00,40.21 +2020-02-20 08:00:00+00:00,36.97 +2020-02-20 09:00:00+00:00,34.41 +2020-02-20 10:00:00+00:00,34.49 +2020-02-20 11:00:00+00:00,32.58 +2020-02-20 12:00:00+00:00,30.11 +2020-02-20 13:00:00+00:00,28.3 +2020-02-20 14:00:00+00:00,25.95 +2020-02-20 15:00:00+00:00,28.22 +2020-02-20 16:00:00+00:00,31.11 +2020-02-20 17:00:00+00:00,34.98 +2020-02-20 18:00:00+00:00,33.33 +2020-02-20 19:00:00+00:00,25.77 +2020-02-20 20:00:00+00:00,23.31 +2020-02-20 21:00:00+00:00,18.77 +2020-02-20 22:00:00+00:00,8.03 +2020-02-20 23:00:00+00:00,8.67 +2020-02-21 00:00:00+00:00,7.6 +2020-02-21 01:00:00+00:00,7.37 +2020-02-21 02:00:00+00:00,7.94 +2020-02-21 03:00:00+00:00,10.78 +2020-02-21 04:00:00+00:00,24.0 +2020-02-21 05:00:00+00:00,29.88 +2020-02-21 06:00:00+00:00,38.08 +2020-02-21 07:00:00+00:00,39.13 +2020-02-21 08:00:00+00:00,33.1 +2020-02-21 09:00:00+00:00,28.6 +2020-02-21 10:00:00+00:00,27.07 +2020-02-21 11:00:00+00:00,24.87 +2020-02-21 12:00:00+00:00,23.06 +2020-02-21 13:00:00+00:00,24.81 +2020-02-21 14:00:00+00:00,25.64 +2020-02-21 15:00:00+00:00,28.68 +2020-02-21 16:00:00+00:00,32.92 +2020-02-21 17:00:00+00:00,36.76 +2020-02-21 18:00:00+00:00,37.71 +2020-02-21 19:00:00+00:00,29.64 +2020-02-21 20:00:00+00:00,25.91 +2020-02-21 21:00:00+00:00,24.57 +2020-02-21 22:00:00+00:00,7.08 +2020-02-21 23:00:00+00:00,8.02 +2020-02-22 00:00:00+00:00,0.12 +2020-02-22 01:00:00+00:00,0.1 +2020-02-22 02:00:00+00:00,0.0 +2020-02-22 03:00:00+00:00,-2.99 +2020-02-22 04:00:00+00:00,-2.46 +2020-02-22 05:00:00+00:00,-7.99 +2020-02-22 06:00:00+00:00,-0.94 +2020-02-22 07:00:00+00:00,-0.02 +2020-02-22 08:00:00+00:00,-0.57 +2020-02-22 09:00:00+00:00,-4.95 +2020-02-22 10:00:00+00:00,-9.83 +2020-02-22 11:00:00+00:00,-5.1 +2020-02-22 12:00:00+00:00,-10.93 +2020-02-22 13:00:00+00:00,-9.94 +2020-02-22 14:00:00+00:00,-4.01 +2020-02-22 15:00:00+00:00,0.52 +2020-02-22 16:00:00+00:00,0.98 +2020-02-22 17:00:00+00:00,8.76 +2020-02-22 18:00:00+00:00,5.01 +2020-02-22 19:00:00+00:00,0.06 +2020-02-22 20:00:00+00:00,-0.09 +2020-02-22 21:00:00+00:00,0.04 +2020-02-22 22:00:00+00:00,0.0 +2020-02-22 23:00:00+00:00,-4.87 +2020-02-23 00:00:00+00:00,-2.99 +2020-02-23 01:00:00+00:00,-2.62 +2020-02-23 02:00:00+00:00,-4.89 +2020-02-23 03:00:00+00:00,-3.76 +2020-02-23 04:00:00+00:00,-0.07 +2020-02-23 05:00:00+00:00,-1.57 +2020-02-23 06:00:00+00:00,-2.44 +2020-02-23 07:00:00+00:00,0.87 +2020-02-23 08:00:00+00:00,14.14 +2020-02-23 09:00:00+00:00,17.1 +2020-02-23 10:00:00+00:00,23.57 +2020-02-23 11:00:00+00:00,22.76 +2020-02-23 12:00:00+00:00,15.03 +2020-02-23 13:00:00+00:00,9.05 +2020-02-23 14:00:00+00:00,8.71 +2020-02-23 15:00:00+00:00,8.51 +2020-02-23 16:00:00+00:00,10.25 +2020-02-23 17:00:00+00:00,15.77 +2020-02-23 18:00:00+00:00,10.25 +2020-02-23 19:00:00+00:00,8.6 +2020-02-23 20:00:00+00:00,8.34 +2020-02-23 21:00:00+00:00,8.94 +2020-02-23 22:00:00+00:00,8.18 +2020-02-23 23:00:00+00:00,-0.08 +2020-02-24 00:00:00+00:00,0.09 +2020-02-24 01:00:00+00:00,1.28 +2020-02-24 02:00:00+00:00,5.38 +2020-02-24 03:00:00+00:00,18.44 +2020-02-24 04:00:00+00:00,26.61 +2020-02-24 05:00:00+00:00,35.91 +2020-02-24 06:00:00+00:00,43.0 +2020-02-24 07:00:00+00:00,47.96 +2020-02-24 08:00:00+00:00,46.42 +2020-02-24 09:00:00+00:00,45.27 +2020-02-24 10:00:00+00:00,45.1 +2020-02-24 11:00:00+00:00,42.28 +2020-02-24 12:00:00+00:00,40.2 +2020-02-24 13:00:00+00:00,38.41 +2020-02-24 14:00:00+00:00,35.98 +2020-02-24 15:00:00+00:00,32.07 +2020-02-24 16:00:00+00:00,31.0 +2020-02-24 17:00:00+00:00,36.1 +2020-02-24 18:00:00+00:00,35.08 +2020-02-24 19:00:00+00:00,28.62 +2020-02-24 20:00:00+00:00,21.68 +2020-02-24 21:00:00+00:00,16.38 +2020-02-24 22:00:00+00:00,10.48 +2020-02-24 23:00:00+00:00,9.92 +2020-02-25 00:00:00+00:00,8.22 +2020-02-25 01:00:00+00:00,7.09 +2020-02-25 02:00:00+00:00,7.03 +2020-02-25 03:00:00+00:00,9.52 +2020-02-25 04:00:00+00:00,11.62 +2020-02-25 05:00:00+00:00,25.84 +2020-02-25 06:00:00+00:00,31.66 +2020-02-25 07:00:00+00:00,32.69 +2020-02-25 08:00:00+00:00,30.69 +2020-02-25 09:00:00+00:00,26.68 +2020-02-25 10:00:00+00:00,27.7 +2020-02-25 11:00:00+00:00,26.3 +2020-02-25 12:00:00+00:00,25.09 +2020-02-25 13:00:00+00:00,23.44 +2020-02-25 14:00:00+00:00,23.56 +2020-02-25 15:00:00+00:00,25.8 +2020-02-25 16:00:00+00:00,28.46 +2020-02-25 17:00:00+00:00,35.97 +2020-02-25 18:00:00+00:00,39.49 +2020-02-25 19:00:00+00:00,35.21 +2020-02-25 20:00:00+00:00,30.1 +2020-02-25 21:00:00+00:00,29.9 +2020-02-25 22:00:00+00:00,26.46 +2020-02-25 23:00:00+00:00,25.56 +2020-02-26 00:00:00+00:00,25.2 +2020-02-26 01:00:00+00:00,24.35 +2020-02-26 02:00:00+00:00,23.53 +2020-02-26 03:00:00+00:00,24.08 +2020-02-26 04:00:00+00:00,25.01 +2020-02-26 05:00:00+00:00,31.24 +2020-02-26 06:00:00+00:00,36.34 +2020-02-26 07:00:00+00:00,37.64 +2020-02-26 08:00:00+00:00,37.01 +2020-02-26 09:00:00+00:00,34.02 +2020-02-26 10:00:00+00:00,33.13 +2020-02-26 11:00:00+00:00,32.1 +2020-02-26 12:00:00+00:00,30.45 +2020-02-26 13:00:00+00:00,28.14 +2020-02-26 14:00:00+00:00,30.17 +2020-02-26 15:00:00+00:00,32.97 +2020-02-26 16:00:00+00:00,37.72 +2020-02-26 17:00:00+00:00,38.46 +2020-02-26 18:00:00+00:00,42.17 +2020-02-26 19:00:00+00:00,36.71 +2020-02-26 20:00:00+00:00,33.83 +2020-02-26 21:00:00+00:00,32.08 +2020-02-26 22:00:00+00:00,28.2 +2020-02-26 23:00:00+00:00,31.57 +2020-02-27 00:00:00+00:00,28.54 +2020-02-27 01:00:00+00:00,27.04 +2020-02-27 02:00:00+00:00,25.17 +2020-02-27 03:00:00+00:00,25.45 +2020-02-27 04:00:00+00:00,28.81 +2020-02-27 05:00:00+00:00,36.8 +2020-02-27 06:00:00+00:00,42.28 +2020-02-27 07:00:00+00:00,44.42 +2020-02-27 08:00:00+00:00,42.41 +2020-02-27 09:00:00+00:00,40.15 +2020-02-27 10:00:00+00:00,40.05 +2020-02-27 11:00:00+00:00,38.82 +2020-02-27 12:00:00+00:00,39.81 +2020-02-27 13:00:00+00:00,43.71 +2020-02-27 14:00:00+00:00,45.19 +2020-02-27 15:00:00+00:00,45.71 +2020-02-27 16:00:00+00:00,51.52 +2020-02-27 17:00:00+00:00,59.98 +2020-02-27 18:00:00+00:00,48.06 +2020-02-27 19:00:00+00:00,39.13 +2020-02-27 20:00:00+00:00,35.68 +2020-02-27 21:00:00+00:00,33.77 +2020-02-27 22:00:00+00:00,28.54 +2020-02-27 23:00:00+00:00,29.54 +2020-02-28 00:00:00+00:00,26.7 +2020-02-28 01:00:00+00:00,25.34 +2020-02-28 02:00:00+00:00,24.87 +2020-02-28 03:00:00+00:00,24.87 +2020-02-28 04:00:00+00:00,27.3 +2020-02-28 05:00:00+00:00,34.97 +2020-02-28 06:00:00+00:00,40.74 +2020-02-28 07:00:00+00:00,43.88 +2020-02-28 08:00:00+00:00,40.12 +2020-02-28 09:00:00+00:00,37.81 +2020-02-28 10:00:00+00:00,32.5 +2020-02-28 11:00:00+00:00,26.55 +2020-02-28 12:00:00+00:00,26.0 +2020-02-28 13:00:00+00:00,26.12 +2020-02-28 14:00:00+00:00,27.1 +2020-02-28 15:00:00+00:00,34.01 +2020-02-28 16:00:00+00:00,37.49 +2020-02-28 17:00:00+00:00,38.95 +2020-02-28 18:00:00+00:00,35.34 +2020-02-28 19:00:00+00:00,27.77 +2020-02-28 20:00:00+00:00,27.06 +2020-02-28 21:00:00+00:00,26.14 +2020-02-28 22:00:00+00:00,23.79 +2020-02-28 23:00:00+00:00,0.4 +2020-02-29 00:00:00+00:00,3.11 +2020-02-29 01:00:00+00:00,9.42 +2020-02-29 02:00:00+00:00,9.49 +2020-02-29 03:00:00+00:00,9.39 +2020-02-29 04:00:00+00:00,8.67 +2020-02-29 05:00:00+00:00,10.18 +2020-02-29 06:00:00+00:00,11.56 +2020-02-29 07:00:00+00:00,12.99 +2020-02-29 08:00:00+00:00,14.01 +2020-02-29 09:00:00+00:00,12.36 +2020-02-29 10:00:00+00:00,9.97 +2020-02-29 11:00:00+00:00,6.71 +2020-02-29 12:00:00+00:00,4.22 +2020-02-29 13:00:00+00:00,4.21 +2020-02-29 14:00:00+00:00,7.63 +2020-02-29 15:00:00+00:00,5.22 +2020-02-29 16:00:00+00:00,12.25 +2020-02-29 17:00:00+00:00,13.92 +2020-02-29 18:00:00+00:00,12.78 +2020-02-29 19:00:00+00:00,8.06 +2020-02-29 20:00:00+00:00,7.38 +2020-02-29 21:00:00+00:00,10.41 +2020-02-29 22:00:00+00:00,9.51 +2020-02-29 23:00:00+00:00,-4.92 +2020-03-01 00:00:00+00:00,-3.88 +2020-03-01 01:00:00+00:00,-6.98 +2020-03-01 02:00:00+00:00,-3.88 +2020-03-01 03:00:00+00:00,-1.02 +2020-03-01 04:00:00+00:00,-1.03 +2020-03-01 05:00:00+00:00,-5.3 +2020-03-01 06:00:00+00:00,-3.89 +2020-03-01 07:00:00+00:00,-0.05 +2020-03-01 08:00:00+00:00,-0.23 +2020-03-01 09:00:00+00:00,-4.03 +2020-03-01 10:00:00+00:00,-2.16 +2020-03-01 11:00:00+00:00,-6.97 +2020-03-01 12:00:00+00:00,-7.85 +2020-03-01 13:00:00+00:00,-5.06 +2020-03-01 14:00:00+00:00,0.03 +2020-03-01 15:00:00+00:00,15.11 +2020-03-01 16:00:00+00:00,33.07 +2020-03-01 17:00:00+00:00,34.7 +2020-03-01 18:00:00+00:00,36.12 +2020-03-01 19:00:00+00:00,31.49 +2020-03-01 20:00:00+00:00,26.1 +2020-03-01 21:00:00+00:00,27.76 +2020-03-01 22:00:00+00:00,24.24 +2020-03-01 23:00:00+00:00,24.31 +2020-03-02 00:00:00+00:00,23.3 +2020-03-02 01:00:00+00:00,21.71 +2020-03-02 02:00:00+00:00,19.2 +2020-03-02 03:00:00+00:00,19.15 +2020-03-02 04:00:00+00:00,23.53 +2020-03-02 05:00:00+00:00,30.91 +2020-03-02 06:00:00+00:00,37.0 +2020-03-02 07:00:00+00:00,39.92 +2020-03-02 08:00:00+00:00,36.94 +2020-03-02 09:00:00+00:00,36.71 +2020-03-02 10:00:00+00:00,37.03 +2020-03-02 11:00:00+00:00,35.13 +2020-03-02 12:00:00+00:00,35.35 +2020-03-02 13:00:00+00:00,34.74 +2020-03-02 14:00:00+00:00,36.79 +2020-03-02 15:00:00+00:00,38.93 +2020-03-02 16:00:00+00:00,41.09 +2020-03-02 17:00:00+00:00,49.5 +2020-03-02 18:00:00+00:00,43.92 +2020-03-02 19:00:00+00:00,37.75 +2020-03-02 20:00:00+00:00,33.54 +2020-03-02 21:00:00+00:00,30.88 +2020-03-02 22:00:00+00:00,28.97 +2020-03-02 23:00:00+00:00,26.04 +2020-03-03 00:00:00+00:00,25.04 +2020-03-03 01:00:00+00:00,24.53 +2020-03-03 02:00:00+00:00,24.1 +2020-03-03 03:00:00+00:00,24.45 +2020-03-03 04:00:00+00:00,28.5 +2020-03-03 05:00:00+00:00,35.68 +2020-03-03 06:00:00+00:00,42.3 +2020-03-03 07:00:00+00:00,45.6 +2020-03-03 08:00:00+00:00,41.68 +2020-03-03 09:00:00+00:00,37.97 +2020-03-03 10:00:00+00:00,34.83 +2020-03-03 11:00:00+00:00,31.82 +2020-03-03 12:00:00+00:00,33.96 +2020-03-03 13:00:00+00:00,35.93 +2020-03-03 14:00:00+00:00,37.66 +2020-03-03 15:00:00+00:00,39.84 +2020-03-03 16:00:00+00:00,41.73 +2020-03-03 17:00:00+00:00,55.98 +2020-03-03 18:00:00+00:00,56.9 +2020-03-03 19:00:00+00:00,40.93 +2020-03-03 20:00:00+00:00,38.19 +2020-03-03 21:00:00+00:00,35.03 +2020-03-03 22:00:00+00:00,31.29 +2020-03-03 23:00:00+00:00,30.1 +2020-03-04 00:00:00+00:00,28.1 +2020-03-04 01:00:00+00:00,27.08 +2020-03-04 02:00:00+00:00,26.8 +2020-03-04 03:00:00+00:00,26.8 +2020-03-04 04:00:00+00:00,30.05 +2020-03-04 05:00:00+00:00,36.51 +2020-03-04 06:00:00+00:00,47.2 +2020-03-04 07:00:00+00:00,54.75 +2020-03-04 08:00:00+00:00,50.32 +2020-03-04 09:00:00+00:00,44.23 +2020-03-04 10:00:00+00:00,41.09 +2020-03-04 11:00:00+00:00,37.9 +2020-03-04 12:00:00+00:00,35.97 +2020-03-04 13:00:00+00:00,35.0 +2020-03-04 14:00:00+00:00,35.88 +2020-03-04 15:00:00+00:00,39.5 +2020-03-04 16:00:00+00:00,48.49 +2020-03-04 17:00:00+00:00,64.7 +2020-03-04 18:00:00+00:00,63.34 +2020-03-04 19:00:00+00:00,43.58 +2020-03-04 20:00:00+00:00,37.53 +2020-03-04 21:00:00+00:00,33.92 +2020-03-04 22:00:00+00:00,31.82 +2020-03-04 23:00:00+00:00,30.24 +2020-03-05 00:00:00+00:00,29.08 +2020-03-05 01:00:00+00:00,28.6 +2020-03-05 02:00:00+00:00,29.01 +2020-03-05 03:00:00+00:00,30.1 +2020-03-05 04:00:00+00:00,31.55 +2020-03-05 05:00:00+00:00,37.91 +2020-03-05 06:00:00+00:00,45.61 +2020-03-05 07:00:00+00:00,54.64 +2020-03-05 08:00:00+00:00,49.78 +2020-03-05 09:00:00+00:00,42.09 +2020-03-05 10:00:00+00:00,40.99 +2020-03-05 11:00:00+00:00,41.97 +2020-03-05 12:00:00+00:00,40.51 +2020-03-05 13:00:00+00:00,37.94 +2020-03-05 14:00:00+00:00,36.75 +2020-03-05 15:00:00+00:00,37.03 +2020-03-05 16:00:00+00:00,38.91 +2020-03-05 17:00:00+00:00,36.9 +2020-03-05 18:00:00+00:00,35.82 +2020-03-05 19:00:00+00:00,30.24 +2020-03-05 20:00:00+00:00,27.85 +2020-03-05 21:00:00+00:00,26.9 +2020-03-05 22:00:00+00:00,25.3 +2020-03-05 23:00:00+00:00,24.55 +2020-03-06 00:00:00+00:00,22.96 +2020-03-06 01:00:00+00:00,21.0 +2020-03-06 02:00:00+00:00,19.99 +2020-03-06 03:00:00+00:00,20.22 +2020-03-06 04:00:00+00:00,24.1 +2020-03-06 05:00:00+00:00,28.8 +2020-03-06 06:00:00+00:00,33.41 +2020-03-06 07:00:00+00:00,35.9 +2020-03-06 08:00:00+00:00,35.1 +2020-03-06 09:00:00+00:00,34.68 +2020-03-06 10:00:00+00:00,36.03 +2020-03-06 11:00:00+00:00,35.0 +2020-03-06 12:00:00+00:00,32.64 +2020-03-06 13:00:00+00:00,31.67 +2020-03-06 14:00:00+00:00,29.06 +2020-03-06 15:00:00+00:00,29.8 +2020-03-06 16:00:00+00:00,35.0 +2020-03-06 17:00:00+00:00,37.3 +2020-03-06 18:00:00+00:00,38.04 +2020-03-06 19:00:00+00:00,35.46 +2020-03-06 20:00:00+00:00,31.71 +2020-03-06 21:00:00+00:00,30.5 +2020-03-06 22:00:00+00:00,28.54 +2020-03-06 23:00:00+00:00,31.01 +2020-03-07 00:00:00+00:00,25.5 +2020-03-07 01:00:00+00:00,24.51 +2020-03-07 02:00:00+00:00,24.03 +2020-03-07 03:00:00+00:00,24.47 +2020-03-07 04:00:00+00:00,24.41 +2020-03-07 05:00:00+00:00,26.25 +2020-03-07 06:00:00+00:00,30.4 +2020-03-07 07:00:00+00:00,33.58 +2020-03-07 08:00:00+00:00,35.0 +2020-03-07 09:00:00+00:00,30.4 +2020-03-07 10:00:00+00:00,28.1 +2020-03-07 11:00:00+00:00,26.08 +2020-03-07 12:00:00+00:00,25.06 +2020-03-07 13:00:00+00:00,24.94 +2020-03-07 14:00:00+00:00,25.0 +2020-03-07 15:00:00+00:00,29.54 +2020-03-07 16:00:00+00:00,35.79 +2020-03-07 17:00:00+00:00,38.8 +2020-03-07 18:00:00+00:00,39.95 +2020-03-07 19:00:00+00:00,35.0 +2020-03-07 20:00:00+00:00,27.17 +2020-03-07 21:00:00+00:00,28.24 +2020-03-07 22:00:00+00:00,24.9 +2020-03-07 23:00:00+00:00,20.8 +2020-03-08 00:00:00+00:00,16.82 +2020-03-08 01:00:00+00:00,12.22 +2020-03-08 02:00:00+00:00,9.24 +2020-03-08 03:00:00+00:00,8.74 +2020-03-08 04:00:00+00:00,8.93 +2020-03-08 05:00:00+00:00,8.67 +2020-03-08 06:00:00+00:00,9.78 +2020-03-08 07:00:00+00:00,9.66 +2020-03-08 08:00:00+00:00,9.38 +2020-03-08 09:00:00+00:00,9.73 +2020-03-08 10:00:00+00:00,9.4 +2020-03-08 11:00:00+00:00,8.93 +2020-03-08 12:00:00+00:00,8.43 +2020-03-08 13:00:00+00:00,8.77 +2020-03-08 14:00:00+00:00,11.01 +2020-03-08 15:00:00+00:00,19.19 +2020-03-08 16:00:00+00:00,25.71 +2020-03-08 17:00:00+00:00,31.97 +2020-03-08 18:00:00+00:00,34.86 +2020-03-08 19:00:00+00:00,34.02 +2020-03-08 20:00:00+00:00,31.9 +2020-03-08 21:00:00+00:00,31.99 +2020-03-08 22:00:00+00:00,30.41 +2020-03-08 23:00:00+00:00,25.02 +2020-03-09 00:00:00+00:00,25.0 +2020-03-09 01:00:00+00:00,25.09 +2020-03-09 02:00:00+00:00,24.77 +2020-03-09 03:00:00+00:00,24.47 +2020-03-09 04:00:00+00:00,25.3 +2020-03-09 05:00:00+00:00,37.1 +2020-03-09 06:00:00+00:00,46.99 +2020-03-09 07:00:00+00:00,50.27 +2020-03-09 08:00:00+00:00,45.65 +2020-03-09 09:00:00+00:00,40.44 +2020-03-09 10:00:00+00:00,40.35 +2020-03-09 11:00:00+00:00,36.88 +2020-03-09 12:00:00+00:00,35.49 +2020-03-09 13:00:00+00:00,33.75 +2020-03-09 14:00:00+00:00,36.75 +2020-03-09 15:00:00+00:00,39.3 +2020-03-09 16:00:00+00:00,46.35 +2020-03-09 17:00:00+00:00,54.57 +2020-03-09 18:00:00+00:00,58.98 +2020-03-09 19:00:00+00:00,40.5 +2020-03-09 20:00:00+00:00,35.98 +2020-03-09 21:00:00+00:00,34.97 +2020-03-09 22:00:00+00:00,26.75 +2020-03-09 23:00:00+00:00,24.06 +2020-03-10 00:00:00+00:00,23.7 +2020-03-10 01:00:00+00:00,21.77 +2020-03-10 02:00:00+00:00,15.91 +2020-03-10 03:00:00+00:00,13.06 +2020-03-10 04:00:00+00:00,15.8 +2020-03-10 05:00:00+00:00,24.23 +2020-03-10 06:00:00+00:00,29.0 +2020-03-10 07:00:00+00:00,30.19 +2020-03-10 08:00:00+00:00,31.08 +2020-03-10 09:00:00+00:00,30.1 +2020-03-10 10:00:00+00:00,31.77 +2020-03-10 11:00:00+00:00,30.55 +2020-03-10 12:00:00+00:00,30.0 +2020-03-10 13:00:00+00:00,29.69 +2020-03-10 14:00:00+00:00,24.84 +2020-03-10 15:00:00+00:00,22.64 +2020-03-10 16:00:00+00:00,24.27 +2020-03-10 17:00:00+00:00,28.17 +2020-03-10 18:00:00+00:00,27.37 +2020-03-10 19:00:00+00:00,24.04 +2020-03-10 20:00:00+00:00,11.79 +2020-03-10 21:00:00+00:00,9.03 +2020-03-10 22:00:00+00:00,4.93 +2020-03-10 23:00:00+00:00,-0.05 +2020-03-11 00:00:00+00:00,0.07 +2020-03-11 01:00:00+00:00,0.1 +2020-03-11 02:00:00+00:00,0.13 +2020-03-11 03:00:00+00:00,5.58 +2020-03-11 04:00:00+00:00,21.13 +2020-03-11 05:00:00+00:00,26.87 +2020-03-11 06:00:00+00:00,33.94 +2020-03-11 07:00:00+00:00,37.17 +2020-03-11 08:00:00+00:00,34.27 +2020-03-11 09:00:00+00:00,33.07 +2020-03-11 10:00:00+00:00,29.2 +2020-03-11 11:00:00+00:00,25.97 +2020-03-11 12:00:00+00:00,24.06 +2020-03-11 13:00:00+00:00,23.73 +2020-03-11 14:00:00+00:00,24.9 +2020-03-11 15:00:00+00:00,26.66 +2020-03-11 16:00:00+00:00,33.8 +2020-03-11 17:00:00+00:00,37.0 +2020-03-11 18:00:00+00:00,37.99 +2020-03-11 19:00:00+00:00,28.1 +2020-03-11 20:00:00+00:00,24.39 +2020-03-11 21:00:00+00:00,23.84 +2020-03-11 22:00:00+00:00,12.58 +2020-03-11 23:00:00+00:00,7.37 +2020-03-12 00:00:00+00:00,1.79 +2020-03-12 01:00:00+00:00,0.06 +2020-03-12 02:00:00+00:00,0.01 +2020-03-12 03:00:00+00:00,-1.58 +2020-03-12 04:00:00+00:00,0.13 +2020-03-12 05:00:00+00:00,10.64 +2020-03-12 06:00:00+00:00,19.98 +2020-03-12 07:00:00+00:00,18.05 +2020-03-12 08:00:00+00:00,9.18 +2020-03-12 09:00:00+00:00,-0.01 +2020-03-12 10:00:00+00:00,-0.05 +2020-03-12 11:00:00+00:00,0.03 +2020-03-12 12:00:00+00:00,0.56 +2020-03-12 13:00:00+00:00,0.08 +2020-03-12 14:00:00+00:00,12.73 +2020-03-12 15:00:00+00:00,23.6 +2020-03-12 16:00:00+00:00,27.45 +2020-03-12 17:00:00+00:00,37.0 +2020-03-12 18:00:00+00:00,39.63 +2020-03-12 19:00:00+00:00,31.76 +2020-03-12 20:00:00+00:00,24.5 +2020-03-12 21:00:00+00:00,23.26 +2020-03-12 22:00:00+00:00,7.5 +2020-03-12 23:00:00+00:00,7.59 +2020-03-13 00:00:00+00:00,6.99 +2020-03-13 01:00:00+00:00,2.48 +2020-03-13 02:00:00+00:00,2.95 +2020-03-13 03:00:00+00:00,2.58 +2020-03-13 04:00:00+00:00,7.96 +2020-03-13 05:00:00+00:00,21.83 +2020-03-13 06:00:00+00:00,24.94 +2020-03-13 07:00:00+00:00,13.99 +2020-03-13 08:00:00+00:00,4.02 +2020-03-13 09:00:00+00:00,0.09 +2020-03-13 10:00:00+00:00,10.69 +2020-03-13 11:00:00+00:00,4.08 +2020-03-13 12:00:00+00:00,0.12 +2020-03-13 13:00:00+00:00,0.01 +2020-03-13 14:00:00+00:00,4.68 +2020-03-13 15:00:00+00:00,21.37 +2020-03-13 16:00:00+00:00,32.65 +2020-03-13 17:00:00+00:00,37.7 +2020-03-13 18:00:00+00:00,42.5 +2020-03-13 19:00:00+00:00,38.41 +2020-03-13 20:00:00+00:00,36.93 +2020-03-13 21:00:00+00:00,36.99 +2020-03-13 22:00:00+00:00,35.35 +2020-03-13 23:00:00+00:00,34.29 +2020-03-14 00:00:00+00:00,32.04 +2020-03-14 01:00:00+00:00,31.43 +2020-03-14 02:00:00+00:00,29.05 +2020-03-14 03:00:00+00:00,29.27 +2020-03-14 04:00:00+00:00,31.15 +2020-03-14 05:00:00+00:00,34.61 +2020-03-14 06:00:00+00:00,34.9 +2020-03-14 07:00:00+00:00,34.83 +2020-03-14 08:00:00+00:00,34.94 +2020-03-14 09:00:00+00:00,29.13 +2020-03-14 10:00:00+00:00,26.96 +2020-03-14 11:00:00+00:00,26.01 +2020-03-14 12:00:00+00:00,23.81 +2020-03-14 13:00:00+00:00,22.62 +2020-03-14 14:00:00+00:00,24.87 +2020-03-14 15:00:00+00:00,25.8 +2020-03-14 16:00:00+00:00,34.82 +2020-03-14 17:00:00+00:00,37.17 +2020-03-14 18:00:00+00:00,38.53 +2020-03-14 19:00:00+00:00,32.79 +2020-03-14 20:00:00+00:00,27.42 +2020-03-14 21:00:00+00:00,26.02 +2020-03-14 22:00:00+00:00,23.95 +2020-03-14 23:00:00+00:00,14.23 +2020-03-15 00:00:00+00:00,7.24 +2020-03-15 01:00:00+00:00,6.69 +2020-03-15 02:00:00+00:00,5.98 +2020-03-15 03:00:00+00:00,5.97 +2020-03-15 04:00:00+00:00,4.34 +2020-03-15 05:00:00+00:00,4.34 +2020-03-15 06:00:00+00:00,0.05 +2020-03-15 07:00:00+00:00,0.08 +2020-03-15 08:00:00+00:00,-0.01 +2020-03-15 09:00:00+00:00,-2.02 +2020-03-15 10:00:00+00:00,-8.79 +2020-03-15 11:00:00+00:00,-33.67 +2020-03-15 12:00:00+00:00,-33.8 +2020-03-15 13:00:00+00:00,-29.05 +2020-03-15 14:00:00+00:00,-4.96 +2020-03-15 15:00:00+00:00,0.07 +2020-03-15 16:00:00+00:00,13.13 +2020-03-15 17:00:00+00:00,26.0 +2020-03-15 18:00:00+00:00,29.96 +2020-03-15 19:00:00+00:00,26.44 +2020-03-15 20:00:00+00:00,25.23 +2020-03-15 21:00:00+00:00,30.18 +2020-03-15 22:00:00+00:00,23.93 +2020-03-15 23:00:00+00:00,20.01 +2020-03-16 00:00:00+00:00,20.13 +2020-03-16 01:00:00+00:00,20.04 +2020-03-16 02:00:00+00:00,20.0 +2020-03-16 03:00:00+00:00,19.92 +2020-03-16 04:00:00+00:00,22.4 +2020-03-16 05:00:00+00:00,33.85 +2020-03-16 06:00:00+00:00,38.34 +2020-03-16 07:00:00+00:00,36.92 +2020-03-16 08:00:00+00:00,32.13 +2020-03-16 09:00:00+00:00,28.0 +2020-03-16 10:00:00+00:00,25.88 +2020-03-16 11:00:00+00:00,24.83 +2020-03-16 12:00:00+00:00,25.0 +2020-03-16 13:00:00+00:00,25.23 +2020-03-16 14:00:00+00:00,27.1 +2020-03-16 15:00:00+00:00,30.77 +2020-03-16 16:00:00+00:00,37.25 +2020-03-16 17:00:00+00:00,54.93 +2020-03-16 18:00:00+00:00,61.96 +2020-03-16 19:00:00+00:00,43.1 +2020-03-16 20:00:00+00:00,35.99 +2020-03-16 21:00:00+00:00,34.56 +2020-03-16 22:00:00+00:00,30.24 +2020-03-16 23:00:00+00:00,25.54 +2020-03-17 00:00:00+00:00,23.49 +2020-03-17 01:00:00+00:00,23.06 +2020-03-17 02:00:00+00:00,22.71 +2020-03-17 03:00:00+00:00,23.0 +2020-03-17 04:00:00+00:00,23.87 +2020-03-17 05:00:00+00:00,30.48 +2020-03-17 06:00:00+00:00,35.17 +2020-03-17 07:00:00+00:00,34.39 +2020-03-17 08:00:00+00:00,31.37 +2020-03-17 09:00:00+00:00,28.89 +2020-03-17 10:00:00+00:00,24.02 +2020-03-17 11:00:00+00:00,25.55 +2020-03-17 12:00:00+00:00,23.9 +2020-03-17 13:00:00+00:00,24.61 +2020-03-17 14:00:00+00:00,24.02 +2020-03-17 15:00:00+00:00,26.09 +2020-03-17 16:00:00+00:00,30.04 +2020-03-17 17:00:00+00:00,35.93 +2020-03-17 18:00:00+00:00,38.23 +2020-03-17 19:00:00+00:00,35.21 +2020-03-17 20:00:00+00:00,31.0 +2020-03-17 21:00:00+00:00,29.85 +2020-03-17 22:00:00+00:00,25.97 +2020-03-17 23:00:00+00:00,22.02 +2020-03-18 00:00:00+00:00,21.73 +2020-03-18 01:00:00+00:00,20.76 +2020-03-18 02:00:00+00:00,20.77 +2020-03-18 03:00:00+00:00,20.73 +2020-03-18 04:00:00+00:00,21.06 +2020-03-18 05:00:00+00:00,24.69 +2020-03-18 06:00:00+00:00,30.59 +2020-03-18 07:00:00+00:00,29.79 +2020-03-18 08:00:00+00:00,22.99 +2020-03-18 09:00:00+00:00,20.4 +2020-03-18 10:00:00+00:00,20.64 +2020-03-18 11:00:00+00:00,20.34 +2020-03-18 12:00:00+00:00,20.74 +2020-03-18 13:00:00+00:00,21.8 +2020-03-18 14:00:00+00:00,24.76 +2020-03-18 15:00:00+00:00,28.62 +2020-03-18 16:00:00+00:00,33.0 +2020-03-18 17:00:00+00:00,37.56 +2020-03-18 18:00:00+00:00,41.07 +2020-03-18 19:00:00+00:00,35.4 +2020-03-18 20:00:00+00:00,33.7 +2020-03-18 21:00:00+00:00,31.83 +2020-03-18 22:00:00+00:00,29.06 +2020-03-18 23:00:00+00:00,26.67 +2020-03-19 00:00:00+00:00,24.0 +2020-03-19 01:00:00+00:00,22.99 +2020-03-19 02:00:00+00:00,22.16 +2020-03-19 03:00:00+00:00,22.26 +2020-03-19 04:00:00+00:00,26.0 +2020-03-19 05:00:00+00:00,30.25 +2020-03-19 06:00:00+00:00,33.98 +2020-03-19 07:00:00+00:00,32.06 +2020-03-19 08:00:00+00:00,28.75 +2020-03-19 09:00:00+00:00,25.04 +2020-03-19 10:00:00+00:00,24.93 +2020-03-19 11:00:00+00:00,22.83 +2020-03-19 12:00:00+00:00,22.99 +2020-03-19 13:00:00+00:00,24.92 +2020-03-19 14:00:00+00:00,27.9 +2020-03-19 15:00:00+00:00,30.57 +2020-03-19 16:00:00+00:00,33.73 +2020-03-19 17:00:00+00:00,40.2 +2020-03-19 18:00:00+00:00,44.42 +2020-03-19 19:00:00+00:00,34.48 +2020-03-19 20:00:00+00:00,31.65 +2020-03-19 21:00:00+00:00,28.7 +2020-03-19 22:00:00+00:00,27.1 +2020-03-19 23:00:00+00:00,25.18 +2020-03-20 00:00:00+00:00,20.0 +2020-03-20 01:00:00+00:00,20.87 +2020-03-20 02:00:00+00:00,20.72 +2020-03-20 03:00:00+00:00,25.0 +2020-03-20 04:00:00+00:00,26.45 +2020-03-20 05:00:00+00:00,31.9 +2020-03-20 06:00:00+00:00,35.0 +2020-03-20 07:00:00+00:00,33.88 +2020-03-20 08:00:00+00:00,28.08 +2020-03-20 09:00:00+00:00,25.99 +2020-03-20 10:00:00+00:00,25.36 +2020-03-20 11:00:00+00:00,22.85 +2020-03-20 12:00:00+00:00,21.59 +2020-03-20 13:00:00+00:00,20.38 +2020-03-20 14:00:00+00:00,22.72 +2020-03-20 15:00:00+00:00,25.03 +2020-03-20 16:00:00+00:00,27.72 +2020-03-20 17:00:00+00:00,29.58 +2020-03-20 18:00:00+00:00,28.92 +2020-03-20 19:00:00+00:00,25.06 +2020-03-20 20:00:00+00:00,20.05 +2020-03-20 21:00:00+00:00,21.02 +2020-03-20 22:00:00+00:00,19.9 +2020-03-20 23:00:00+00:00,18.23 +2020-03-21 00:00:00+00:00,13.97 +2020-03-21 01:00:00+00:00,11.16 +2020-03-21 02:00:00+00:00,9.43 +2020-03-21 03:00:00+00:00,8.94 +2020-03-21 04:00:00+00:00,9.5 +2020-03-21 05:00:00+00:00,7.74 +2020-03-21 06:00:00+00:00,10.8 +2020-03-21 07:00:00+00:00,15.05 +2020-03-21 08:00:00+00:00,8.64 +2020-03-21 09:00:00+00:00,7.51 +2020-03-21 10:00:00+00:00,7.18 +2020-03-21 11:00:00+00:00,7.13 +2020-03-21 12:00:00+00:00,6.17 +2020-03-21 13:00:00+00:00,0.39 +2020-03-21 14:00:00+00:00,5.7 +2020-03-21 15:00:00+00:00,8.3 +2020-03-21 16:00:00+00:00,15.01 +2020-03-21 17:00:00+00:00,20.0 +2020-03-21 18:00:00+00:00,20.12 +2020-03-21 19:00:00+00:00,15.34 +2020-03-21 20:00:00+00:00,9.29 +2020-03-21 21:00:00+00:00,11.29 +2020-03-21 22:00:00+00:00,10.79 +2020-03-21 23:00:00+00:00,7.51 +2020-03-22 00:00:00+00:00,7.51 +2020-03-22 01:00:00+00:00,7.24 +2020-03-22 02:00:00+00:00,6.25 +2020-03-22 03:00:00+00:00,6.09 +2020-03-22 04:00:00+00:00,6.24 +2020-03-22 05:00:00+00:00,6.06 +2020-03-22 06:00:00+00:00,5.68 +2020-03-22 07:00:00+00:00,2.94 +2020-03-22 08:00:00+00:00,-3.63 +2020-03-22 09:00:00+00:00,-20.8 +2020-03-22 10:00:00+00:00,-25.1 +2020-03-22 11:00:00+00:00,-38.48 +2020-03-22 12:00:00+00:00,-55.05 +2020-03-22 13:00:00+00:00,-36.63 +2020-03-22 14:00:00+00:00,-11.98 +2020-03-22 15:00:00+00:00,0.02 +2020-03-22 16:00:00+00:00,11.04 +2020-03-22 17:00:00+00:00,19.78 +2020-03-22 18:00:00+00:00,22.93 +2020-03-22 19:00:00+00:00,19.45 +2020-03-22 20:00:00+00:00,14.36 +2020-03-22 21:00:00+00:00,14.79 +2020-03-22 22:00:00+00:00,11.05 +2020-03-22 23:00:00+00:00,12.07 +2020-03-23 00:00:00+00:00,10.0 +2020-03-23 01:00:00+00:00,10.17 +2020-03-23 02:00:00+00:00,9.28 +2020-03-23 03:00:00+00:00,9.28 +2020-03-23 04:00:00+00:00,12.76 +2020-03-23 05:00:00+00:00,20.0 +2020-03-23 06:00:00+00:00,23.99 +2020-03-23 07:00:00+00:00,20.08 +2020-03-23 08:00:00+00:00,15.84 +2020-03-23 09:00:00+00:00,10.66 +2020-03-23 10:00:00+00:00,10.7 +2020-03-23 11:00:00+00:00,10.62 +2020-03-23 12:00:00+00:00,10.92 +2020-03-23 13:00:00+00:00,12.07 +2020-03-23 14:00:00+00:00,12.01 +2020-03-23 15:00:00+00:00,17.4 +2020-03-23 16:00:00+00:00,25.95 +2020-03-23 17:00:00+00:00,30.67 +2020-03-23 18:00:00+00:00,33.14 +2020-03-23 19:00:00+00:00,27.99 +2020-03-23 20:00:00+00:00,21.97 +2020-03-23 21:00:00+00:00,21.84 +2020-03-23 22:00:00+00:00,18.18 +2020-03-23 23:00:00+00:00,16.63 +2020-03-24 00:00:00+00:00,17.55 +2020-03-24 01:00:00+00:00,16.53 +2020-03-24 02:00:00+00:00,16.81 +2020-03-24 03:00:00+00:00,17.6 +2020-03-24 04:00:00+00:00,19.8 +2020-03-24 05:00:00+00:00,23.63 +2020-03-24 06:00:00+00:00,25.88 +2020-03-24 07:00:00+00:00,20.35 +2020-03-24 08:00:00+00:00,17.5 +2020-03-24 09:00:00+00:00,14.16 +2020-03-24 10:00:00+00:00,15.08 +2020-03-24 11:00:00+00:00,14.3 +2020-03-24 12:00:00+00:00,13.0 +2020-03-24 13:00:00+00:00,14.33 +2020-03-24 14:00:00+00:00,16.65 +2020-03-24 15:00:00+00:00,19.79 +2020-03-24 16:00:00+00:00,27.75 +2020-03-24 17:00:00+00:00,33.8 +2020-03-24 18:00:00+00:00,33.78 +2020-03-24 19:00:00+00:00,29.65 +2020-03-24 20:00:00+00:00,22.98 +2020-03-24 21:00:00+00:00,22.98 +2020-03-24 22:00:00+00:00,20.31 +2020-03-24 23:00:00+00:00,19.29 +2020-03-25 00:00:00+00:00,19.21 +2020-03-25 01:00:00+00:00,19.5 +2020-03-25 02:00:00+00:00,19.33 +2020-03-25 03:00:00+00:00,19.26 +2020-03-25 04:00:00+00:00,21.51 +2020-03-25 05:00:00+00:00,25.84 +2020-03-25 06:00:00+00:00,29.15 +2020-03-25 07:00:00+00:00,27.64 +2020-03-25 08:00:00+00:00,23.62 +2020-03-25 09:00:00+00:00,20.05 +2020-03-25 10:00:00+00:00,19.57 +2020-03-25 11:00:00+00:00,19.97 +2020-03-25 12:00:00+00:00,19.05 +2020-03-25 13:00:00+00:00,18.7 +2020-03-25 14:00:00+00:00,17.57 +2020-03-25 15:00:00+00:00,19.96 +2020-03-25 16:00:00+00:00,27.09 +2020-03-25 17:00:00+00:00,34.88 +2020-03-25 18:00:00+00:00,34.48 +2020-03-25 19:00:00+00:00,28.09 +2020-03-25 20:00:00+00:00,24.51 +2020-03-25 21:00:00+00:00,24.02 +2020-03-25 22:00:00+00:00,21.18 +2020-03-25 23:00:00+00:00,19.68 +2020-03-26 00:00:00+00:00,18.39 +2020-03-26 01:00:00+00:00,19.1 +2020-03-26 02:00:00+00:00,18.39 +2020-03-26 03:00:00+00:00,18.32 +2020-03-26 04:00:00+00:00,19.8 +2020-03-26 05:00:00+00:00,22.2 +2020-03-26 06:00:00+00:00,25.53 +2020-03-26 07:00:00+00:00,25.0 +2020-03-26 08:00:00+00:00,21.32 +2020-03-26 09:00:00+00:00,19.11 +2020-03-26 10:00:00+00:00,18.16 +2020-03-26 11:00:00+00:00,17.67 +2020-03-26 12:00:00+00:00,15.32 +2020-03-26 13:00:00+00:00,16.82 +2020-03-26 14:00:00+00:00,18.19 +2020-03-26 15:00:00+00:00,18.9 +2020-03-26 16:00:00+00:00,27.29 +2020-03-26 17:00:00+00:00,33.87 +2020-03-26 18:00:00+00:00,34.64 +2020-03-26 19:00:00+00:00,30.09 +2020-03-26 20:00:00+00:00,26.27 +2020-03-26 21:00:00+00:00,25.5 +2020-03-26 22:00:00+00:00,23.94 +2020-03-26 23:00:00+00:00,21.26 +2020-03-27 00:00:00+00:00,19.0 +2020-03-27 01:00:00+00:00,18.54 +2020-03-27 02:00:00+00:00,18.25 +2020-03-27 03:00:00+00:00,18.5 +2020-03-27 04:00:00+00:00,20.6 +2020-03-27 05:00:00+00:00,24.65 +2020-03-27 06:00:00+00:00,26.95 +2020-03-27 07:00:00+00:00,25.91 +2020-03-27 08:00:00+00:00,22.67 +2020-03-27 09:00:00+00:00,19.98 +2020-03-27 10:00:00+00:00,18.47 +2020-03-27 11:00:00+00:00,18.5 +2020-03-27 12:00:00+00:00,16.0 +2020-03-27 13:00:00+00:00,15.64 +2020-03-27 14:00:00+00:00,17.44 +2020-03-27 15:00:00+00:00,18.08 +2020-03-27 16:00:00+00:00,21.8 +2020-03-27 17:00:00+00:00,26.72 +2020-03-27 18:00:00+00:00,30.0 +2020-03-27 19:00:00+00:00,24.94 +2020-03-27 20:00:00+00:00,23.19 +2020-03-27 21:00:00+00:00,24.91 +2020-03-27 22:00:00+00:00,25.16 +2020-03-27 23:00:00+00:00,21.93 +2020-03-28 00:00:00+00:00,21.09 +2020-03-28 01:00:00+00:00,21.67 +2020-03-28 02:00:00+00:00,19.38 +2020-03-28 03:00:00+00:00,18.8 +2020-03-28 04:00:00+00:00,18.44 +2020-03-28 05:00:00+00:00,18.96 +2020-03-28 06:00:00+00:00,18.91 +2020-03-28 07:00:00+00:00,17.22 +2020-03-28 08:00:00+00:00,13.08 +2020-03-28 09:00:00+00:00,9.9 +2020-03-28 10:00:00+00:00,6.51 +2020-03-28 11:00:00+00:00,5.12 +2020-03-28 12:00:00+00:00,1.0 +2020-03-28 13:00:00+00:00,-0.72 +2020-03-28 14:00:00+00:00,0.02 +2020-03-28 15:00:00+00:00,5.77 +2020-03-28 16:00:00+00:00,17.14 +2020-03-28 17:00:00+00:00,19.97 +2020-03-28 18:00:00+00:00,21.93 +2020-03-28 19:00:00+00:00,17.02 +2020-03-28 20:00:00+00:00,16.39 +2020-03-28 21:00:00+00:00,16.27 +2020-03-28 22:00:00+00:00,16.0 +2020-03-28 23:00:00+00:00,11.76 +2020-03-29 00:00:00+00:00,11.05 +2020-03-29 01:00:00+00:00,6.6 +2020-03-29 04:00:00+00:00,0.08 +2020-03-29 05:00:00+00:00,0.96 +2020-03-29 06:00:00+00:00,2.59 +2020-03-29 07:00:00+00:00,2.93 +2020-03-29 08:00:00+00:00,-0.03 +2020-03-29 09:00:00+00:00,-0.08 +2020-03-29 10:00:00+00:00,-1.13 +2020-03-29 11:00:00+00:00,-10.87 +2020-03-29 12:00:00+00:00,-15.8 +2020-03-29 13:00:00+00:00,-10.79 +2020-03-29 14:00:00+00:00,-5.98 +2020-03-29 15:00:00+00:00,0.08 +2020-03-29 16:00:00+00:00,7.81 +2020-03-29 17:00:00+00:00,16.99 +2020-03-29 18:00:00+00:00,17.9 +2020-03-29 19:00:00+00:00,17.45 +2020-03-29 20:00:00+00:00,20.14 +2020-03-29 21:00:00+00:00,20.59 +2020-03-29 22:00:00+00:00,18.1 +2020-03-29 23:00:00+00:00,17.81 +2020-03-30 00:00:00+00:00,19.65 +2020-03-30 01:00:00+00:00,17.61 +2020-03-30 02:00:00+00:00,16.99 +2020-03-30 03:00:00+00:00,19.94 +2020-03-30 04:00:00+00:00,24.94 +2020-03-30 05:00:00+00:00,31.44 +2020-03-30 06:00:00+00:00,30.94 +2020-03-30 07:00:00+00:00,26.11 +2020-03-30 08:00:00+00:00,22.4 +2020-03-30 09:00:00+00:00,21.09 +2020-03-30 10:00:00+00:00,21.09 +2020-03-30 11:00:00+00:00,19.12 +2020-03-30 12:00:00+00:00,17.07 +2020-03-30 13:00:00+00:00,15.8 +2020-03-30 14:00:00+00:00,15.5 +2020-03-30 15:00:00+00:00,21.52 +2020-03-30 16:00:00+00:00,27.94 +2020-03-30 17:00:00+00:00,38.93 +2020-03-30 18:00:00+00:00,35.7 +2020-03-30 19:00:00+00:00,28.6 +2020-03-30 20:00:00+00:00,27.01 +2020-03-30 21:00:00+00:00,24.38 +2020-03-30 22:00:00+00:00,23.39 +2020-03-30 23:00:00+00:00,19.69 +2020-03-31 00:00:00+00:00,19.91 +2020-03-31 01:00:00+00:00,19.59 +2020-03-31 02:00:00+00:00,19.01 +2020-03-31 03:00:00+00:00,23.95 +2020-03-31 04:00:00+00:00,30.97 +2020-03-31 05:00:00+00:00,42.3 +2020-03-31 06:00:00+00:00,35.11 +2020-03-31 07:00:00+00:00,25.75 +2020-03-31 08:00:00+00:00,23.94 +2020-03-31 09:00:00+00:00,19.78 +2020-03-31 10:00:00+00:00,20.0 +2020-03-31 11:00:00+00:00,18.39 +2020-03-31 12:00:00+00:00,17.5 +2020-03-31 13:00:00+00:00,17.6 +2020-03-31 14:00:00+00:00,17.86 +2020-03-31 15:00:00+00:00,24.03 +2020-03-31 16:00:00+00:00,31.44 +2020-03-31 17:00:00+00:00,42.66 +2020-03-31 18:00:00+00:00,33.53 +2020-03-31 19:00:00+00:00,26.97 +2020-03-31 20:00:00+00:00,26.61 +2020-03-31 21:00:00+00:00,24.1 +2020-03-31 22:00:00+00:00,22.26 +2020-03-31 23:00:00+00:00,20.22 +2020-04-01 00:00:00+00:00,19.67 +2020-04-01 01:00:00+00:00,19.25 +2020-04-01 02:00:00+00:00,19.31 +2020-04-01 03:00:00+00:00,20.76 +2020-04-01 04:00:00+00:00,25.43 +2020-04-01 05:00:00+00:00,27.21 +2020-04-01 06:00:00+00:00,27.96 +2020-04-01 07:00:00+00:00,25.5 +2020-04-01 08:00:00+00:00,21.97 +2020-04-01 09:00:00+00:00,20.27 +2020-04-01 10:00:00+00:00,19.35 +2020-04-01 11:00:00+00:00,18.25 +2020-04-01 12:00:00+00:00,17.3 +2020-04-01 13:00:00+00:00,18.09 +2020-04-01 14:00:00+00:00,19.11 +2020-04-01 15:00:00+00:00,22.01 +2020-04-01 16:00:00+00:00,26.9 +2020-04-01 17:00:00+00:00,31.27 +2020-04-01 18:00:00+00:00,34.97 +2020-04-01 19:00:00+00:00,28.94 +2020-04-01 20:00:00+00:00,28.31 +2020-04-01 21:00:00+00:00,26.35 +2020-04-01 22:00:00+00:00,24.18 +2020-04-01 23:00:00+00:00,22.93 +2020-04-02 00:00:00+00:00,21.8 +2020-04-02 01:00:00+00:00,20.96 +2020-04-02 02:00:00+00:00,20.8 +2020-04-02 03:00:00+00:00,22.56 +2020-04-02 04:00:00+00:00,25.31 +2020-04-02 05:00:00+00:00,27.31 +2020-04-02 06:00:00+00:00,27.8 +2020-04-02 07:00:00+00:00,25.0 +2020-04-02 08:00:00+00:00,21.1 +2020-04-02 09:00:00+00:00,18.65 +2020-04-02 10:00:00+00:00,12.42 +2020-04-02 11:00:00+00:00,9.06 +2020-04-02 12:00:00+00:00,4.64 +2020-04-02 13:00:00+00:00,4.62 +2020-04-02 14:00:00+00:00,6.65 +2020-04-02 15:00:00+00:00,17.2 +2020-04-02 16:00:00+00:00,22.71 +2020-04-02 17:00:00+00:00,25.82 +2020-04-02 18:00:00+00:00,27.3 +2020-04-02 19:00:00+00:00,24.47 +2020-04-02 20:00:00+00:00,25.0 +2020-04-02 21:00:00+00:00,20.55 +2020-04-02 22:00:00+00:00,21.69 +2020-04-02 23:00:00+00:00,18.93 +2020-04-03 00:00:00+00:00,18.15 +2020-04-03 01:00:00+00:00,18.19 +2020-04-03 02:00:00+00:00,18.47 +2020-04-03 03:00:00+00:00,18.45 +2020-04-03 04:00:00+00:00,23.76 +2020-04-03 05:00:00+00:00,26.1 +2020-04-03 06:00:00+00:00,26.45 +2020-04-03 07:00:00+00:00,25.5 +2020-04-03 08:00:00+00:00,23.03 +2020-04-03 09:00:00+00:00,21.04 +2020-04-03 10:00:00+00:00,20.24 +2020-04-03 11:00:00+00:00,17.7 +2020-04-03 12:00:00+00:00,17.08 +2020-04-03 13:00:00+00:00,17.03 +2020-04-03 14:00:00+00:00,17.07 +2020-04-03 15:00:00+00:00,19.98 +2020-04-03 16:00:00+00:00,25.57 +2020-04-03 17:00:00+00:00,28.44 +2020-04-03 18:00:00+00:00,33.54 +2020-04-03 19:00:00+00:00,27.23 +2020-04-03 20:00:00+00:00,29.15 +2020-04-03 21:00:00+00:00,28.0 +2020-04-03 22:00:00+00:00,25.1 +2020-04-03 23:00:00+00:00,22.99 +2020-04-04 00:00:00+00:00,21.1 +2020-04-04 01:00:00+00:00,20.1 +2020-04-04 02:00:00+00:00,19.8 +2020-04-04 03:00:00+00:00,19.6 +2020-04-04 04:00:00+00:00,21.79 +2020-04-04 05:00:00+00:00,22.97 +2020-04-04 06:00:00+00:00,21.97 +2020-04-04 07:00:00+00:00,19.0 +2020-04-04 08:00:00+00:00,14.98 +2020-04-04 09:00:00+00:00,13.06 +2020-04-04 10:00:00+00:00,14.28 +2020-04-04 11:00:00+00:00,10.82 +2020-04-04 12:00:00+00:00,10.0 +2020-04-04 13:00:00+00:00,9.88 +2020-04-04 14:00:00+00:00,15.74 +2020-04-04 15:00:00+00:00,21.51 +2020-04-04 16:00:00+00:00,26.84 +2020-04-04 17:00:00+00:00,36.75 +2020-04-04 18:00:00+00:00,32.67 +2020-04-04 19:00:00+00:00,23.93 +2020-04-04 20:00:00+00:00,21.54 +2020-04-04 21:00:00+00:00,17.07 +2020-04-04 22:00:00+00:00,17.99 +2020-04-04 23:00:00+00:00,11.4 +2020-04-05 00:00:00+00:00,9.82 +2020-04-05 01:00:00+00:00,9.9 +2020-04-05 02:00:00+00:00,8.17 +2020-04-05 03:00:00+00:00,5.26 +2020-04-05 04:00:00+00:00,5.53 +2020-04-05 05:00:00+00:00,5.5 +2020-04-05 06:00:00+00:00,8.05 +2020-04-05 07:00:00+00:00,5.77 +2020-04-05 08:00:00+00:00,4.55 +2020-04-05 09:00:00+00:00,1.36 +2020-04-05 10:00:00+00:00,-1.85 +2020-04-05 11:00:00+00:00,-31.95 +2020-04-05 12:00:00+00:00,-50.26 +2020-04-05 13:00:00+00:00,-30.29 +2020-04-05 14:00:00+00:00,-4.95 +2020-04-05 15:00:00+00:00,4.48 +2020-04-05 16:00:00+00:00,16.51 +2020-04-05 17:00:00+00:00,19.9 +2020-04-05 18:00:00+00:00,18.48 +2020-04-05 19:00:00+00:00,10.39 +2020-04-05 20:00:00+00:00,11.95 +2020-04-05 21:00:00+00:00,8.65 +2020-04-05 22:00:00+00:00,6.66 +2020-04-05 23:00:00+00:00,4.28 +2020-04-06 00:00:00+00:00,6.34 +2020-04-06 01:00:00+00:00,3.7 +2020-04-06 02:00:00+00:00,3.66 +2020-04-06 03:00:00+00:00,5.09 +2020-04-06 04:00:00+00:00,11.89 +2020-04-06 05:00:00+00:00,21.81 +2020-04-06 06:00:00+00:00,21.63 +2020-04-06 07:00:00+00:00,18.73 +2020-04-06 08:00:00+00:00,14.54 +2020-04-06 09:00:00+00:00,11.19 +2020-04-06 10:00:00+00:00,14.02 +2020-04-06 11:00:00+00:00,10.55 +2020-04-06 12:00:00+00:00,10.0 +2020-04-06 13:00:00+00:00,11.83 +2020-04-06 14:00:00+00:00,14.83 +2020-04-06 15:00:00+00:00,21.68 +2020-04-06 16:00:00+00:00,31.01 +2020-04-06 17:00:00+00:00,43.33 +2020-04-06 18:00:00+00:00,45.33 +2020-04-06 19:00:00+00:00,30.86 +2020-04-06 20:00:00+00:00,28.92 +2020-04-06 21:00:00+00:00,24.17 +2020-04-06 22:00:00+00:00,20.05 +2020-04-06 23:00:00+00:00,19.97 +2020-04-07 00:00:00+00:00,20.61 +2020-04-07 01:00:00+00:00,20.48 +2020-04-07 02:00:00+00:00,21.65 +2020-04-07 03:00:00+00:00,23.86 +2020-04-07 04:00:00+00:00,31.98 +2020-04-07 05:00:00+00:00,34.91 +2020-04-07 06:00:00+00:00,31.32 +2020-04-07 07:00:00+00:00,23.66 +2020-04-07 08:00:00+00:00,19.19 +2020-04-07 09:00:00+00:00,18.05 +2020-04-07 10:00:00+00:00,17.55 +2020-04-07 11:00:00+00:00,13.52 +2020-04-07 12:00:00+00:00,12.89 +2020-04-07 13:00:00+00:00,15.48 +2020-04-07 14:00:00+00:00,18.0 +2020-04-07 15:00:00+00:00,22.66 +2020-04-07 16:00:00+00:00,31.42 +2020-04-07 17:00:00+00:00,40.1 +2020-04-07 18:00:00+00:00,37.31 +2020-04-07 19:00:00+00:00,27.4 +2020-04-07 20:00:00+00:00,23.84 +2020-04-07 21:00:00+00:00,19.05 +2020-04-07 22:00:00+00:00,21.17 +2020-04-07 23:00:00+00:00,20.53 +2020-04-08 00:00:00+00:00,19.72 +2020-04-08 01:00:00+00:00,19.87 +2020-04-08 02:00:00+00:00,20.09 +2020-04-08 03:00:00+00:00,23.15 +2020-04-08 04:00:00+00:00,29.33 +2020-04-08 05:00:00+00:00,29.7 +2020-04-08 06:00:00+00:00,29.08 +2020-04-08 07:00:00+00:00,25.91 +2020-04-08 08:00:00+00:00,21.95 +2020-04-08 09:00:00+00:00,21.69 +2020-04-08 10:00:00+00:00,19.08 +2020-04-08 11:00:00+00:00,17.92 +2020-04-08 12:00:00+00:00,17.57 +2020-04-08 13:00:00+00:00,19.54 +2020-04-08 14:00:00+00:00,21.55 +2020-04-08 15:00:00+00:00,25.9 +2020-04-08 16:00:00+00:00,35.31 +2020-04-08 17:00:00+00:00,49.12 +2020-04-08 18:00:00+00:00,50.33 +2020-04-08 19:00:00+00:00,39.03 +2020-04-08 20:00:00+00:00,31.41 +2020-04-08 21:00:00+00:00,27.34 +2020-04-08 22:00:00+00:00,22.66 +2020-04-08 23:00:00+00:00,21.3 +2020-04-09 00:00:00+00:00,21.08 +2020-04-09 01:00:00+00:00,20.9 +2020-04-09 02:00:00+00:00,20.51 +2020-04-09 03:00:00+00:00,22.68 +2020-04-09 04:00:00+00:00,27.66 +2020-04-09 05:00:00+00:00,29.7 +2020-04-09 06:00:00+00:00,29.53 +2020-04-09 07:00:00+00:00,25.21 +2020-04-09 08:00:00+00:00,22.43 +2020-04-09 09:00:00+00:00,21.26 +2020-04-09 10:00:00+00:00,19.42 +2020-04-09 11:00:00+00:00,17.4 +2020-04-09 12:00:00+00:00,13.19 +2020-04-09 13:00:00+00:00,16.41 +2020-04-09 14:00:00+00:00,17.88 +2020-04-09 15:00:00+00:00,23.18 +2020-04-09 16:00:00+00:00,26.58 +2020-04-09 17:00:00+00:00,30.92 +2020-04-09 18:00:00+00:00,31.43 +2020-04-09 19:00:00+00:00,30.0 +2020-04-09 20:00:00+00:00,27.92 +2020-04-09 21:00:00+00:00,25.0 +2020-04-09 22:00:00+00:00,25.11 +2020-04-09 23:00:00+00:00,25.02 +2020-04-10 00:00:00+00:00,22.0 +2020-04-10 01:00:00+00:00,24.59 +2020-04-10 02:00:00+00:00,26.37 +2020-04-10 03:00:00+00:00,26.86 +2020-04-10 04:00:00+00:00,27.06 +2020-04-10 05:00:00+00:00,28.57 +2020-04-10 06:00:00+00:00,28.0 +2020-04-10 07:00:00+00:00,24.58 +2020-04-10 08:00:00+00:00,18.04 +2020-04-10 09:00:00+00:00,17.06 +2020-04-10 10:00:00+00:00,13.97 +2020-04-10 11:00:00+00:00,10.55 +2020-04-10 12:00:00+00:00,9.16 +2020-04-10 13:00:00+00:00,10.03 +2020-04-10 14:00:00+00:00,14.0 +2020-04-10 15:00:00+00:00,24.54 +2020-04-10 16:00:00+00:00,27.0 +2020-04-10 17:00:00+00:00,31.43 +2020-04-10 18:00:00+00:00,34.83 +2020-04-10 19:00:00+00:00,31.57 +2020-04-10 20:00:00+00:00,29.81 +2020-04-10 21:00:00+00:00,25.18 +2020-04-10 22:00:00+00:00,24.19 +2020-04-10 23:00:00+00:00,21.84 +2020-04-11 00:00:00+00:00,20.1 +2020-04-11 01:00:00+00:00,21.11 +2020-04-11 02:00:00+00:00,23.01 +2020-04-11 03:00:00+00:00,24.72 +2020-04-11 04:00:00+00:00,24.92 +2020-04-11 05:00:00+00:00,25.16 +2020-04-11 06:00:00+00:00,25.04 +2020-04-11 07:00:00+00:00,20.82 +2020-04-11 08:00:00+00:00,14.3 +2020-04-11 09:00:00+00:00,11.59 +2020-04-11 10:00:00+00:00,12.33 +2020-04-11 11:00:00+00:00,8.6 +2020-04-11 12:00:00+00:00,5.1 +2020-04-11 13:00:00+00:00,7.28 +2020-04-11 14:00:00+00:00,14.01 +2020-04-11 15:00:00+00:00,20.0 +2020-04-11 16:00:00+00:00,28.97 +2020-04-11 17:00:00+00:00,35.11 +2020-04-11 18:00:00+00:00,35.95 +2020-04-11 19:00:00+00:00,29.66 +2020-04-11 20:00:00+00:00,24.91 +2020-04-11 21:00:00+00:00,21.82 +2020-04-11 22:00:00+00:00,21.8 +2020-04-11 23:00:00+00:00,19.01 +2020-04-12 00:00:00+00:00,16.71 +2020-04-12 01:00:00+00:00,14.38 +2020-04-12 02:00:00+00:00,14.06 +2020-04-12 03:00:00+00:00,15.02 +2020-04-12 04:00:00+00:00,15.74 +2020-04-12 05:00:00+00:00,16.0 +2020-04-12 06:00:00+00:00,14.06 +2020-04-12 07:00:00+00:00,9.99 +2020-04-12 08:00:00+00:00,5.18 +2020-04-12 09:00:00+00:00,4.93 +2020-04-12 10:00:00+00:00,4.98 +2020-04-12 11:00:00+00:00,1.84 +2020-04-12 12:00:00+00:00,0.0 +2020-04-12 13:00:00+00:00,0.17 +2020-04-12 14:00:00+00:00,3.0 +2020-04-12 15:00:00+00:00,4.14 +2020-04-12 16:00:00+00:00,18.13 +2020-04-12 17:00:00+00:00,25.17 +2020-04-12 18:00:00+00:00,24.82 +2020-04-12 19:00:00+00:00,21.97 +2020-04-12 20:00:00+00:00,19.5 +2020-04-12 21:00:00+00:00,8.2 +2020-04-12 22:00:00+00:00,10.34 +2020-04-12 23:00:00+00:00,0.07 +2020-04-13 00:00:00+00:00,1.72 +2020-04-13 01:00:00+00:00,0.77 +2020-04-13 02:00:00+00:00,0.02 +2020-04-13 03:00:00+00:00,-0.09 +2020-04-13 04:00:00+00:00,-2.35 +2020-04-13 05:00:00+00:00,-5.0 +2020-04-13 06:00:00+00:00,-5.91 +2020-04-13 07:00:00+00:00,-4.94 +2020-04-13 08:00:00+00:00,-5.09 +2020-04-13 09:00:00+00:00,-19.91 +2020-04-13 10:00:00+00:00,-55.62 +2020-04-13 11:00:00+00:00,-70.1 +2020-04-13 12:00:00+00:00,-78.0 +2020-04-13 13:00:00+00:00,-78.15 +2020-04-13 14:00:00+00:00,-74.97 +2020-04-13 15:00:00+00:00,-39.94 +2020-04-13 16:00:00+00:00,-1.77 +2020-04-13 17:00:00+00:00,11.21 +2020-04-13 18:00:00+00:00,9.76 +2020-04-13 19:00:00+00:00,6.45 +2020-04-13 20:00:00+00:00,9.29 +2020-04-13 21:00:00+00:00,9.02 +2020-04-13 22:00:00+00:00,3.79 +2020-04-13 23:00:00+00:00,4.4 +2020-04-14 00:00:00+00:00,3.9 +2020-04-14 01:00:00+00:00,3.96 +2020-04-14 02:00:00+00:00,4.6 +2020-04-14 03:00:00+00:00,15.88 +2020-04-14 04:00:00+00:00,25.24 +2020-04-14 05:00:00+00:00,32.74 +2020-04-14 06:00:00+00:00,31.14 +2020-04-14 07:00:00+00:00,23.68 +2020-04-14 08:00:00+00:00,21.86 +2020-04-14 09:00:00+00:00,19.73 +2020-04-14 10:00:00+00:00,17.13 +2020-04-14 11:00:00+00:00,16.12 +2020-04-14 12:00:00+00:00,13.54 +2020-04-14 13:00:00+00:00,15.03 +2020-04-14 14:00:00+00:00,19.1 +2020-04-14 15:00:00+00:00,23.15 +2020-04-14 16:00:00+00:00,26.82 +2020-04-14 17:00:00+00:00,33.38 +2020-04-14 18:00:00+00:00,39.91 +2020-04-14 19:00:00+00:00,31.45 +2020-04-14 20:00:00+00:00,26.77 +2020-04-14 21:00:00+00:00,24.28 +2020-04-14 22:00:00+00:00,19.87 +2020-04-14 23:00:00+00:00,17.08 +2020-04-15 00:00:00+00:00,17.0 +2020-04-15 01:00:00+00:00,16.37 +2020-04-15 02:00:00+00:00,16.57 +2020-04-15 03:00:00+00:00,19.1 +2020-04-15 04:00:00+00:00,25.15 +2020-04-15 05:00:00+00:00,26.09 +2020-04-15 06:00:00+00:00,24.99 +2020-04-15 07:00:00+00:00,21.2 +2020-04-15 08:00:00+00:00,14.29 +2020-04-15 09:00:00+00:00,14.16 +2020-04-15 10:00:00+00:00,8.07 +2020-04-15 11:00:00+00:00,8.0 +2020-04-15 12:00:00+00:00,4.46 +2020-04-15 13:00:00+00:00,8.05 +2020-04-15 14:00:00+00:00,12.13 +2020-04-15 15:00:00+00:00,21.85 +2020-04-15 16:00:00+00:00,27.34 +2020-04-15 17:00:00+00:00,37.84 +2020-04-15 18:00:00+00:00,41.91 +2020-04-15 19:00:00+00:00,31.34 +2020-04-15 20:00:00+00:00,26.25 +2020-04-15 21:00:00+00:00,24.99 +2020-04-15 22:00:00+00:00,20.81 +2020-04-15 23:00:00+00:00,20.61 +2020-04-16 00:00:00+00:00,21.02 +2020-04-16 01:00:00+00:00,21.1 +2020-04-16 02:00:00+00:00,21.1 +2020-04-16 03:00:00+00:00,23.73 +2020-04-16 04:00:00+00:00,26.96 +2020-04-16 05:00:00+00:00,35.01 +2020-04-16 06:00:00+00:00,30.98 +2020-04-16 07:00:00+00:00,25.0 +2020-04-16 08:00:00+00:00,21.5 +2020-04-16 09:00:00+00:00,19.49 +2020-04-16 10:00:00+00:00,16.23 +2020-04-16 11:00:00+00:00,15.29 +2020-04-16 12:00:00+00:00,15.1 +2020-04-16 13:00:00+00:00,16.29 +2020-04-16 14:00:00+00:00,19.32 +2020-04-16 15:00:00+00:00,24.99 +2020-04-16 16:00:00+00:00,29.34 +2020-04-16 17:00:00+00:00,46.39 +2020-04-16 18:00:00+00:00,53.25 +2020-04-16 19:00:00+00:00,33.37 +2020-04-16 20:00:00+00:00,27.01 +2020-04-16 21:00:00+00:00,23.71 +2020-04-16 22:00:00+00:00,22.08 +2020-04-16 23:00:00+00:00,21.58 +2020-04-17 00:00:00+00:00,21.06 +2020-04-17 01:00:00+00:00,21.99 +2020-04-17 02:00:00+00:00,22.44 +2020-04-17 03:00:00+00:00,24.41 +2020-04-17 04:00:00+00:00,32.93 +2020-04-17 05:00:00+00:00,47.84 +2020-04-17 06:00:00+00:00,44.81 +2020-04-17 07:00:00+00:00,26.41 +2020-04-17 08:00:00+00:00,24.9 +2020-04-17 09:00:00+00:00,23.87 +2020-04-17 10:00:00+00:00,20.44 +2020-04-17 11:00:00+00:00,18.07 +2020-04-17 12:00:00+00:00,18.54 +2020-04-17 13:00:00+00:00,21.87 +2020-04-17 14:00:00+00:00,21.75 +2020-04-17 15:00:00+00:00,25.15 +2020-04-17 16:00:00+00:00,36.76 +2020-04-17 17:00:00+00:00,48.75 +2020-04-17 18:00:00+00:00,42.77 +2020-04-17 19:00:00+00:00,32.38 +2020-04-17 20:00:00+00:00,24.92 +2020-04-17 21:00:00+00:00,20.76 +2020-04-17 22:00:00+00:00,22.1 +2020-04-17 23:00:00+00:00,20.13 +2020-04-18 00:00:00+00:00,18.78 +2020-04-18 01:00:00+00:00,18.1 +2020-04-18 02:00:00+00:00,18.55 +2020-04-18 03:00:00+00:00,19.07 +2020-04-18 04:00:00+00:00,21.62 +2020-04-18 05:00:00+00:00,23.82 +2020-04-18 06:00:00+00:00,23.87 +2020-04-18 07:00:00+00:00,24.33 +2020-04-18 08:00:00+00:00,17.3 +2020-04-18 09:00:00+00:00,16.63 +2020-04-18 10:00:00+00:00,16.8 +2020-04-18 11:00:00+00:00,12.84 +2020-04-18 12:00:00+00:00,12.32 +2020-04-18 13:00:00+00:00,12.77 +2020-04-18 14:00:00+00:00,15.05 +2020-04-18 15:00:00+00:00,24.13 +2020-04-18 16:00:00+00:00,25.99 +2020-04-18 17:00:00+00:00,29.42 +2020-04-18 18:00:00+00:00,28.66 +2020-04-18 19:00:00+00:00,23.81 +2020-04-18 20:00:00+00:00,21.16 +2020-04-18 21:00:00+00:00,18.38 +2020-04-18 22:00:00+00:00,16.51 +2020-04-18 23:00:00+00:00,13.71 +2020-04-19 00:00:00+00:00,12.12 +2020-04-19 01:00:00+00:00,9.3 +2020-04-19 02:00:00+00:00,10.98 +2020-04-19 03:00:00+00:00,11.31 +2020-04-19 04:00:00+00:00,11.66 +2020-04-19 05:00:00+00:00,11.15 +2020-04-19 06:00:00+00:00,14.01 +2020-04-19 07:00:00+00:00,12.47 +2020-04-19 08:00:00+00:00,10.88 +2020-04-19 09:00:00+00:00,8.39 +2020-04-19 10:00:00+00:00,9.21 +2020-04-19 11:00:00+00:00,0.03 +2020-04-19 12:00:00+00:00,-18.5 +2020-04-19 13:00:00+00:00,-26.0 +2020-04-19 14:00:00+00:00,-11.84 +2020-04-19 15:00:00+00:00,4.99 +2020-04-19 16:00:00+00:00,11.2 +2020-04-19 17:00:00+00:00,15.42 +2020-04-19 18:00:00+00:00,15.59 +2020-04-19 19:00:00+00:00,12.73 +2020-04-19 20:00:00+00:00,12.34 +2020-04-19 21:00:00+00:00,10.19 +2020-04-19 22:00:00+00:00,4.43 +2020-04-19 23:00:00+00:00,4.38 +2020-04-20 00:00:00+00:00,3.69 +2020-04-20 01:00:00+00:00,0.66 +2020-04-20 02:00:00+00:00,0.03 +2020-04-20 03:00:00+00:00,4.78 +2020-04-20 04:00:00+00:00,18.0 +2020-04-20 05:00:00+00:00,22.82 +2020-04-20 06:00:00+00:00,22.93 +2020-04-20 07:00:00+00:00,16.07 +2020-04-20 08:00:00+00:00,11.72 +2020-04-20 09:00:00+00:00,5.46 +2020-04-20 10:00:00+00:00,-4.98 +2020-04-20 11:00:00+00:00,-29.7 +2020-04-20 12:00:00+00:00,-44.25 +2020-04-20 13:00:00+00:00,-39.49 +2020-04-20 14:00:00+00:00,-23.71 +2020-04-20 15:00:00+00:00,4.01 +2020-04-20 16:00:00+00:00,16.38 +2020-04-20 17:00:00+00:00,17.81 +2020-04-20 18:00:00+00:00,18.38 +2020-04-20 19:00:00+00:00,14.05 +2020-04-20 20:00:00+00:00,10.28 +2020-04-20 21:00:00+00:00,4.85 +2020-04-20 22:00:00+00:00,4.65 +2020-04-20 23:00:00+00:00,4.31 +2020-04-21 00:00:00+00:00,3.69 +2020-04-21 01:00:00+00:00,-0.59 +2020-04-21 02:00:00+00:00,3.72 +2020-04-21 03:00:00+00:00,4.73 +2020-04-21 04:00:00+00:00,10.0 +2020-04-21 05:00:00+00:00,16.13 +2020-04-21 06:00:00+00:00,16.52 +2020-04-21 07:00:00+00:00,7.92 +2020-04-21 08:00:00+00:00,-19.21 +2020-04-21 09:00:00+00:00,-69.05 +2020-04-21 10:00:00+00:00,-79.74 +2020-04-21 11:00:00+00:00,-80.09 +2020-04-21 12:00:00+00:00,-83.94 +2020-04-21 13:00:00+00:00,-80.02 +2020-04-21 14:00:00+00:00,-78.09 +2020-04-21 15:00:00+00:00,-23.12 +2020-04-21 16:00:00+00:00,5.53 +2020-04-21 17:00:00+00:00,10.99 +2020-04-21 18:00:00+00:00,10.99 +2020-04-21 19:00:00+00:00,11.17 +2020-04-21 20:00:00+00:00,7.6 +2020-04-21 21:00:00+00:00,8.31 +2020-04-21 22:00:00+00:00,4.39 +2020-04-21 23:00:00+00:00,4.12 +2020-04-22 00:00:00+00:00,3.5 +2020-04-22 01:00:00+00:00,4.14 +2020-04-22 02:00:00+00:00,4.57 +2020-04-22 03:00:00+00:00,6.0 +2020-04-22 04:00:00+00:00,14.92 +2020-04-22 05:00:00+00:00,23.39 +2020-04-22 06:00:00+00:00,19.81 +2020-04-22 07:00:00+00:00,10.57 +2020-04-22 08:00:00+00:00,6.77 +2020-04-22 09:00:00+00:00,6.1 +2020-04-22 10:00:00+00:00,-0.56 +2020-04-22 11:00:00+00:00,-24.97 +2020-04-22 12:00:00+00:00,-29.98 +2020-04-22 13:00:00+00:00,-5.76 +2020-04-22 14:00:00+00:00,4.07 +2020-04-22 15:00:00+00:00,8.59 +2020-04-22 16:00:00+00:00,18.05 +2020-04-22 17:00:00+00:00,24.57 +2020-04-22 18:00:00+00:00,25.54 +2020-04-22 19:00:00+00:00,22.19 +2020-04-22 20:00:00+00:00,20.01 +2020-04-22 21:00:00+00:00,19.0 +2020-04-22 22:00:00+00:00,20.1 +2020-04-22 23:00:00+00:00,19.68 +2020-04-23 00:00:00+00:00,18.38 +2020-04-23 01:00:00+00:00,20.07 +2020-04-23 02:00:00+00:00,20.6 +2020-04-23 03:00:00+00:00,22.34 +2020-04-23 04:00:00+00:00,33.7 +2020-04-23 05:00:00+00:00,42.03 +2020-04-23 06:00:00+00:00,38.48 +2020-04-23 07:00:00+00:00,23.72 +2020-04-23 08:00:00+00:00,18.18 +2020-04-23 09:00:00+00:00,17.4 +2020-04-23 10:00:00+00:00,16.95 +2020-04-23 11:00:00+00:00,16.63 +2020-04-23 12:00:00+00:00,14.95 +2020-04-23 13:00:00+00:00,14.97 +2020-04-23 14:00:00+00:00,18.11 +2020-04-23 15:00:00+00:00,25.07 +2020-04-23 16:00:00+00:00,34.51 +2020-04-23 17:00:00+00:00,58.95 +2020-04-23 18:00:00+00:00,69.68 +2020-04-23 19:00:00+00:00,38.62 +2020-04-23 20:00:00+00:00,34.43 +2020-04-23 21:00:00+00:00,28.6 +2020-04-23 22:00:00+00:00,23.87 +2020-04-23 23:00:00+00:00,21.63 +2020-04-24 00:00:00+00:00,21.54 +2020-04-24 01:00:00+00:00,21.54 +2020-04-24 02:00:00+00:00,20.93 +2020-04-24 03:00:00+00:00,22.0 +2020-04-24 04:00:00+00:00,26.69 +2020-04-24 05:00:00+00:00,35.85 +2020-04-24 06:00:00+00:00,31.0 +2020-04-24 07:00:00+00:00,21.84 +2020-04-24 08:00:00+00:00,20.0 +2020-04-24 09:00:00+00:00,18.45 +2020-04-24 10:00:00+00:00,16.15 +2020-04-24 11:00:00+00:00,14.55 +2020-04-24 12:00:00+00:00,12.02 +2020-04-24 13:00:00+00:00,10.0 +2020-04-24 14:00:00+00:00,11.81 +2020-04-24 15:00:00+00:00,15.62 +2020-04-24 16:00:00+00:00,19.97 +2020-04-24 17:00:00+00:00,22.27 +2020-04-24 18:00:00+00:00,22.5 +2020-04-24 19:00:00+00:00,21.98 +2020-04-24 20:00:00+00:00,23.0 +2020-04-24 21:00:00+00:00,20.4 +2020-04-24 22:00:00+00:00,12.9 +2020-04-24 23:00:00+00:00,9.07 +2020-04-25 00:00:00+00:00,12.61 +2020-04-25 01:00:00+00:00,14.42 +2020-04-25 02:00:00+00:00,14.48 +2020-04-25 03:00:00+00:00,14.03 +2020-04-25 04:00:00+00:00,14.88 +2020-04-25 05:00:00+00:00,15.09 +2020-04-25 06:00:00+00:00,18.7 +2020-04-25 07:00:00+00:00,16.63 +2020-04-25 08:00:00+00:00,16.03 +2020-04-25 09:00:00+00:00,16.3 +2020-04-25 10:00:00+00:00,15.08 +2020-04-25 11:00:00+00:00,12.25 +2020-04-25 12:00:00+00:00,7.76 +2020-04-25 13:00:00+00:00,7.99 +2020-04-25 14:00:00+00:00,10.07 +2020-04-25 15:00:00+00:00,16.62 +2020-04-25 16:00:00+00:00,23.51 +2020-04-25 17:00:00+00:00,26.7 +2020-04-25 18:00:00+00:00,32.0 +2020-04-25 19:00:00+00:00,29.77 +2020-04-25 20:00:00+00:00,25.69 +2020-04-25 21:00:00+00:00,22.11 +2020-04-25 22:00:00+00:00,22.3 +2020-04-25 23:00:00+00:00,19.79 +2020-04-26 00:00:00+00:00,20.76 +2020-04-26 01:00:00+00:00,21.06 +2020-04-26 02:00:00+00:00,22.23 +2020-04-26 03:00:00+00:00,21.95 +2020-04-26 04:00:00+00:00,18.81 +2020-04-26 05:00:00+00:00,18.09 +2020-04-26 06:00:00+00:00,16.06 +2020-04-26 07:00:00+00:00,15.01 +2020-04-26 08:00:00+00:00,13.0 +2020-04-26 09:00:00+00:00,14.38 +2020-04-26 10:00:00+00:00,15.0 +2020-04-26 11:00:00+00:00,11.9 +2020-04-26 12:00:00+00:00,5.56 +2020-04-26 13:00:00+00:00,6.93 +2020-04-26 14:00:00+00:00,12.42 +2020-04-26 15:00:00+00:00,16.15 +2020-04-26 16:00:00+00:00,24.82 +2020-04-26 17:00:00+00:00,27.97 +2020-04-26 18:00:00+00:00,31.84 +2020-04-26 19:00:00+00:00,27.49 +2020-04-26 20:00:00+00:00,26.06 +2020-04-26 21:00:00+00:00,23.76 +2020-04-26 22:00:00+00:00,20.84 +2020-04-26 23:00:00+00:00,21.11 +2020-04-27 00:00:00+00:00,19.08 +2020-04-27 01:00:00+00:00,19.34 +2020-04-27 02:00:00+00:00,20.23 +2020-04-27 03:00:00+00:00,21.61 +2020-04-27 04:00:00+00:00,25.95 +2020-04-27 05:00:00+00:00,28.0 +2020-04-27 06:00:00+00:00,25.9 +2020-04-27 07:00:00+00:00,21.99 +2020-04-27 08:00:00+00:00,21.17 +2020-04-27 09:00:00+00:00,20.64 +2020-04-27 10:00:00+00:00,21.92 +2020-04-27 11:00:00+00:00,20.39 +2020-04-27 12:00:00+00:00,19.85 +2020-04-27 13:00:00+00:00,18.42 +2020-04-27 14:00:00+00:00,18.4 +2020-04-27 15:00:00+00:00,24.92 +2020-04-27 16:00:00+00:00,32.96 +2020-04-27 17:00:00+00:00,48.31 +2020-04-27 18:00:00+00:00,45.56 +2020-04-27 19:00:00+00:00,38.56 +2020-04-27 20:00:00+00:00,32.1 +2020-04-27 21:00:00+00:00,24.74 +2020-04-27 22:00:00+00:00,22.21 +2020-04-27 23:00:00+00:00,20.27 +2020-04-28 00:00:00+00:00,20.42 +2020-04-28 01:00:00+00:00,20.16 +2020-04-28 02:00:00+00:00,20.61 +2020-04-28 03:00:00+00:00,21.94 +2020-04-28 04:00:00+00:00,25.94 +2020-04-28 05:00:00+00:00,27.94 +2020-04-28 06:00:00+00:00,28.72 +2020-04-28 07:00:00+00:00,27.95 +2020-04-28 08:00:00+00:00,26.16 +2020-04-28 09:00:00+00:00,25.83 +2020-04-28 10:00:00+00:00,24.93 +2020-04-28 11:00:00+00:00,24.06 +2020-04-28 12:00:00+00:00,24.89 +2020-04-28 13:00:00+00:00,24.85 +2020-04-28 14:00:00+00:00,24.81 +2020-04-28 15:00:00+00:00,27.18 +2020-04-28 16:00:00+00:00,28.74 +2020-04-28 17:00:00+00:00,31.28 +2020-04-28 18:00:00+00:00,29.99 +2020-04-28 19:00:00+00:00,27.44 +2020-04-28 20:00:00+00:00,24.09 +2020-04-28 21:00:00+00:00,21.03 +2020-04-28 22:00:00+00:00,19.04 +2020-04-28 23:00:00+00:00,17.58 +2020-04-29 00:00:00+00:00,17.09 +2020-04-29 01:00:00+00:00,17.11 +2020-04-29 02:00:00+00:00,18.04 +2020-04-29 03:00:00+00:00,20.91 +2020-04-29 04:00:00+00:00,24.28 +2020-04-29 05:00:00+00:00,27.09 +2020-04-29 06:00:00+00:00,30.57 +2020-04-29 07:00:00+00:00,30.24 +2020-04-29 08:00:00+00:00,27.98 +2020-04-29 09:00:00+00:00,26.0 +2020-04-29 10:00:00+00:00,23.49 +2020-04-29 11:00:00+00:00,19.08 +2020-04-29 12:00:00+00:00,18.0 +2020-04-29 13:00:00+00:00,17.7 +2020-04-29 14:00:00+00:00,19.01 +2020-04-29 15:00:00+00:00,21.59 +2020-04-29 16:00:00+00:00,26.46 +2020-04-29 17:00:00+00:00,28.7 +2020-04-29 18:00:00+00:00,30.85 +2020-04-29 19:00:00+00:00,27.81 +2020-04-29 20:00:00+00:00,23.69 +2020-04-29 21:00:00+00:00,20.96 +2020-04-29 22:00:00+00:00,17.55 +2020-04-29 23:00:00+00:00,15.57 +2020-04-30 00:00:00+00:00,14.05 +2020-04-30 01:00:00+00:00,13.64 +2020-04-30 02:00:00+00:00,13.84 +2020-04-30 03:00:00+00:00,17.4 +2020-04-30 04:00:00+00:00,21.35 +2020-04-30 05:00:00+00:00,24.97 +2020-04-30 06:00:00+00:00,25.11 +2020-04-30 07:00:00+00:00,23.46 +2020-04-30 08:00:00+00:00,21.89 +2020-04-30 09:00:00+00:00,21.02 +2020-04-30 10:00:00+00:00,14.81 +2020-04-30 11:00:00+00:00,9.25 +2020-04-30 12:00:00+00:00,10.22 +2020-04-30 13:00:00+00:00,13.45 +2020-04-30 14:00:00+00:00,17.75 +2020-04-30 15:00:00+00:00,23.41 +2020-04-30 16:00:00+00:00,27.13 +2020-04-30 17:00:00+00:00,34.74 +2020-04-30 18:00:00+00:00,34.66 +2020-04-30 19:00:00+00:00,29.02 +2020-04-30 20:00:00+00:00,22.04 +2020-04-30 21:00:00+00:00,13.77 +2020-04-30 22:00:00+00:00,5.5 +2020-04-30 23:00:00+00:00,5.35 +2020-05-01 00:00:00+00:00,3.82 +2020-05-01 01:00:00+00:00,2.63 +2020-05-01 02:00:00+00:00,1.56 +2020-05-01 03:00:00+00:00,2.46 +2020-05-01 04:00:00+00:00,2.54 +2020-05-01 05:00:00+00:00,1.5 +2020-05-01 06:00:00+00:00,-1.57 +2020-05-01 07:00:00+00:00,-2.43 +2020-05-01 08:00:00+00:00,-2.89 +2020-05-01 09:00:00+00:00,-2.47 +2020-05-01 10:00:00+00:00,0.35 +2020-05-01 11:00:00+00:00,-2.04 +2020-05-01 12:00:00+00:00,-2.06 +2020-05-01 13:00:00+00:00,-0.04 +2020-05-01 14:00:00+00:00,1.95 +2020-05-01 15:00:00+00:00,7.88 +2020-05-01 16:00:00+00:00,18.99 +2020-05-01 17:00:00+00:00,23.5 +2020-05-01 18:00:00+00:00,28.43 +2020-05-01 19:00:00+00:00,26.88 +2020-05-01 20:00:00+00:00,20.91 +2020-05-01 21:00:00+00:00,16.0 +2020-05-01 22:00:00+00:00,12.2 +2020-05-01 23:00:00+00:00,10.0 +2020-05-02 00:00:00+00:00,10.0 +2020-05-02 01:00:00+00:00,8.0 +2020-05-02 02:00:00+00:00,8.0 +2020-05-02 03:00:00+00:00,8.0 +2020-05-02 04:00:00+00:00,7.2 +2020-05-02 05:00:00+00:00,8.0 +2020-05-02 06:00:00+00:00,10.3 +2020-05-02 07:00:00+00:00,10.55 +2020-05-02 08:00:00+00:00,10.7 +2020-05-02 09:00:00+00:00,11.28 +2020-05-02 10:00:00+00:00,10.01 +2020-05-02 11:00:00+00:00,7.35 +2020-05-02 12:00:00+00:00,5.65 +2020-05-02 13:00:00+00:00,5.25 +2020-05-02 14:00:00+00:00,5.66 +2020-05-02 15:00:00+00:00,8.5 +2020-05-02 16:00:00+00:00,17.93 +2020-05-02 17:00:00+00:00,25.51 +2020-05-02 18:00:00+00:00,28.04 +2020-05-02 19:00:00+00:00,26.37 +2020-05-02 20:00:00+00:00,24.38 +2020-05-02 21:00:00+00:00,20.49 +2020-05-02 22:00:00+00:00,18.0 +2020-05-02 23:00:00+00:00,13.76 +2020-05-03 00:00:00+00:00,11.44 +2020-05-03 01:00:00+00:00,11.15 +2020-05-03 02:00:00+00:00,11.82 +2020-05-03 03:00:00+00:00,13.37 +2020-05-03 04:00:00+00:00,11.56 +2020-05-03 05:00:00+00:00,10.94 +2020-05-03 06:00:00+00:00,10.0 +2020-05-03 07:00:00+00:00,9.4 +2020-05-03 08:00:00+00:00,9.7 +2020-05-03 09:00:00+00:00,10.0 +2020-05-03 10:00:00+00:00,10.0 +2020-05-03 11:00:00+00:00,7.06 +2020-05-03 12:00:00+00:00,5.84 +2020-05-03 13:00:00+00:00,5.57 +2020-05-03 14:00:00+00:00,7.14 +2020-05-03 15:00:00+00:00,10.21 +2020-05-03 16:00:00+00:00,20.05 +2020-05-03 17:00:00+00:00,24.97 +2020-05-03 18:00:00+00:00,28.69 +2020-05-03 19:00:00+00:00,28.96 +2020-05-03 20:00:00+00:00,25.37 +2020-05-03 21:00:00+00:00,22.33 +2020-05-03 22:00:00+00:00,22.2 +2020-05-03 23:00:00+00:00,20.46 +2020-05-04 00:00:00+00:00,21.86 +2020-05-04 01:00:00+00:00,20.7 +2020-05-04 02:00:00+00:00,20.14 +2020-05-04 03:00:00+00:00,22.99 +2020-05-04 04:00:00+00:00,34.05 +2020-05-04 05:00:00+00:00,48.53 +2020-05-04 06:00:00+00:00,55.13 +2020-05-04 07:00:00+00:00,40.04 +2020-05-04 08:00:00+00:00,27.86 +2020-05-04 09:00:00+00:00,25.07 +2020-05-04 10:00:00+00:00,22.96 +2020-05-04 11:00:00+00:00,20.74 +2020-05-04 12:00:00+00:00,19.02 +2020-05-04 13:00:00+00:00,19.58 +2020-05-04 14:00:00+00:00,19.81 +2020-05-04 15:00:00+00:00,22.39 +2020-05-04 16:00:00+00:00,24.24 +2020-05-04 17:00:00+00:00,27.92 +2020-05-04 18:00:00+00:00,28.97 +2020-05-04 19:00:00+00:00,25.99 +2020-05-04 20:00:00+00:00,22.51 +2020-05-04 21:00:00+00:00,19.89 +2020-05-04 22:00:00+00:00,18.14 +2020-05-04 23:00:00+00:00,17.99 +2020-05-05 00:00:00+00:00,17.2 +2020-05-05 01:00:00+00:00,18.05 +2020-05-05 02:00:00+00:00,18.07 +2020-05-05 03:00:00+00:00,20.08 +2020-05-05 04:00:00+00:00,23.2 +2020-05-05 05:00:00+00:00,27.61 +2020-05-05 06:00:00+00:00,26.06 +2020-05-05 07:00:00+00:00,22.35 +2020-05-05 08:00:00+00:00,19.92 +2020-05-05 09:00:00+00:00,21.02 +2020-05-05 10:00:00+00:00,20.27 +2020-05-05 11:00:00+00:00,19.1 +2020-05-05 12:00:00+00:00,17.16 +2020-05-05 13:00:00+00:00,16.12 +2020-05-05 14:00:00+00:00,17.31 +2020-05-05 15:00:00+00:00,20.42 +2020-05-05 16:00:00+00:00,24.1 +2020-05-05 17:00:00+00:00,28.75 +2020-05-05 18:00:00+00:00,32.41 +2020-05-05 19:00:00+00:00,27.34 +2020-05-05 20:00:00+00:00,25.34 +2020-05-05 21:00:00+00:00,23.24 +2020-05-05 22:00:00+00:00,20.99 +2020-05-05 23:00:00+00:00,20.06 +2020-05-06 00:00:00+00:00,20.0 +2020-05-06 01:00:00+00:00,19.04 +2020-05-06 02:00:00+00:00,17.04 +2020-05-06 03:00:00+00:00,20.1 +2020-05-06 04:00:00+00:00,24.95 +2020-05-06 05:00:00+00:00,29.2 +2020-05-06 06:00:00+00:00,26.96 +2020-05-06 07:00:00+00:00,24.22 +2020-05-06 08:00:00+00:00,21.16 +2020-05-06 09:00:00+00:00,20.34 +2020-05-06 10:00:00+00:00,20.28 +2020-05-06 11:00:00+00:00,18.07 +2020-05-06 12:00:00+00:00,16.41 +2020-05-06 13:00:00+00:00,16.27 +2020-05-06 14:00:00+00:00,16.69 +2020-05-06 15:00:00+00:00,18.79 +2020-05-06 16:00:00+00:00,22.94 +2020-05-06 17:00:00+00:00,24.96 +2020-05-06 18:00:00+00:00,25.42 +2020-05-06 19:00:00+00:00,24.97 +2020-05-06 20:00:00+00:00,24.68 +2020-05-06 21:00:00+00:00,22.14 +2020-05-06 22:00:00+00:00,20.08 +2020-05-06 23:00:00+00:00,19.03 +2020-05-07 00:00:00+00:00,18.57 +2020-05-07 01:00:00+00:00,18.5 +2020-05-07 02:00:00+00:00,18.48 +2020-05-07 03:00:00+00:00,19.07 +2020-05-07 04:00:00+00:00,23.01 +2020-05-07 05:00:00+00:00,25.03 +2020-05-07 06:00:00+00:00,24.98 +2020-05-07 07:00:00+00:00,21.54 +2020-05-07 08:00:00+00:00,19.7 +2020-05-07 09:00:00+00:00,19.06 +2020-05-07 10:00:00+00:00,17.06 +2020-05-07 11:00:00+00:00,16.75 +2020-05-07 12:00:00+00:00,17.34 +2020-05-07 13:00:00+00:00,18.32 +2020-05-07 14:00:00+00:00,19.0 +2020-05-07 15:00:00+00:00,20.39 +2020-05-07 16:00:00+00:00,26.91 +2020-05-07 17:00:00+00:00,48.95 +2020-05-07 18:00:00+00:00,50.98 +2020-05-07 19:00:00+00:00,35.45 +2020-05-07 20:00:00+00:00,27.93 +2020-05-07 21:00:00+00:00,25.8 +2020-05-07 22:00:00+00:00,23.18 +2020-05-07 23:00:00+00:00,20.44 +2020-05-08 00:00:00+00:00,20.04 +2020-05-08 01:00:00+00:00,20.09 +2020-05-08 02:00:00+00:00,20.83 +2020-05-08 03:00:00+00:00,22.61 +2020-05-08 04:00:00+00:00,26.95 +2020-05-08 05:00:00+00:00,30.09 +2020-05-08 06:00:00+00:00,26.8 +2020-05-08 07:00:00+00:00,22.99 +2020-05-08 08:00:00+00:00,20.35 +2020-05-08 09:00:00+00:00,19.01 +2020-05-08 10:00:00+00:00,17.89 +2020-05-08 11:00:00+00:00,16.0 +2020-05-08 12:00:00+00:00,15.04 +2020-05-08 13:00:00+00:00,16.26 +2020-05-08 14:00:00+00:00,19.3 +2020-05-08 15:00:00+00:00,22.47 +2020-05-08 16:00:00+00:00,26.96 +2020-05-08 17:00:00+00:00,36.93 +2020-05-08 18:00:00+00:00,41.62 +2020-05-08 19:00:00+00:00,30.71 +2020-05-08 20:00:00+00:00,29.33 +2020-05-08 21:00:00+00:00,24.9 +2020-05-08 22:00:00+00:00,21.51 +2020-05-08 23:00:00+00:00,20.72 +2020-05-09 00:00:00+00:00,20.89 +2020-05-09 01:00:00+00:00,21.01 +2020-05-09 02:00:00+00:00,22.0 +2020-05-09 03:00:00+00:00,21.73 +2020-05-09 04:00:00+00:00,21.11 +2020-05-09 05:00:00+00:00,21.11 +2020-05-09 06:00:00+00:00,21.06 +2020-05-09 07:00:00+00:00,20.24 +2020-05-09 08:00:00+00:00,18.79 +2020-05-09 09:00:00+00:00,19.01 +2020-05-09 10:00:00+00:00,18.38 +2020-05-09 11:00:00+00:00,16.93 +2020-05-09 12:00:00+00:00,16.02 +2020-05-09 13:00:00+00:00,14.75 +2020-05-09 14:00:00+00:00,16.05 +2020-05-09 15:00:00+00:00,21.11 +2020-05-09 16:00:00+00:00,26.97 +2020-05-09 17:00:00+00:00,36.61 +2020-05-09 18:00:00+00:00,33.99 +2020-05-09 19:00:00+00:00,28.28 +2020-05-09 20:00:00+00:00,26.41 +2020-05-09 21:00:00+00:00,22.93 +2020-05-09 22:00:00+00:00,20.7 +2020-05-09 23:00:00+00:00,19.41 +2020-05-10 00:00:00+00:00,19.1 +2020-05-10 01:00:00+00:00,18.29 +2020-05-10 02:00:00+00:00,16.43 +2020-05-10 03:00:00+00:00,16.0 +2020-05-10 04:00:00+00:00,15.0 +2020-05-10 05:00:00+00:00,12.76 +2020-05-10 06:00:00+00:00,14.01 +2020-05-10 07:00:00+00:00,13.77 +2020-05-10 08:00:00+00:00,12.64 +2020-05-10 09:00:00+00:00,12.46 +2020-05-10 10:00:00+00:00,11.72 +2020-05-10 11:00:00+00:00,6.0 +2020-05-10 12:00:00+00:00,2.36 +2020-05-10 13:00:00+00:00,0.93 +2020-05-10 14:00:00+00:00,2.97 +2020-05-10 15:00:00+00:00,6.23 +2020-05-10 16:00:00+00:00,12.1 +2020-05-10 17:00:00+00:00,14.51 +2020-05-10 18:00:00+00:00,14.73 +2020-05-10 19:00:00+00:00,14.99 +2020-05-10 20:00:00+00:00,14.7 +2020-05-10 21:00:00+00:00,10.1 +2020-05-10 22:00:00+00:00,5.79 +2020-05-10 23:00:00+00:00,2.02 +2020-05-11 00:00:00+00:00,0.13 +2020-05-11 01:00:00+00:00,-0.01 +2020-05-11 02:00:00+00:00,0.02 +2020-05-11 03:00:00+00:00,3.74 +2020-05-11 04:00:00+00:00,18.96 +2020-05-11 05:00:00+00:00,22.66 +2020-05-11 06:00:00+00:00,24.95 +2020-05-11 07:00:00+00:00,22.5 +2020-05-11 08:00:00+00:00,20.14 +2020-05-11 09:00:00+00:00,19.07 +2020-05-11 10:00:00+00:00,16.46 +2020-05-11 11:00:00+00:00,14.15 +2020-05-11 12:00:00+00:00,8.71 +2020-05-11 13:00:00+00:00,8.01 +2020-05-11 14:00:00+00:00,8.07 +2020-05-11 15:00:00+00:00,13.89 +2020-05-11 16:00:00+00:00,18.33 +2020-05-11 17:00:00+00:00,22.0 +2020-05-11 18:00:00+00:00,22.24 +2020-05-11 19:00:00+00:00,23.97 +2020-05-11 20:00:00+00:00,21.85 +2020-05-11 21:00:00+00:00,20.01 +2020-05-11 22:00:00+00:00,21.4 +2020-05-11 23:00:00+00:00,18.93 +2020-05-12 00:00:00+00:00,18.06 +2020-05-12 01:00:00+00:00,17.05 +2020-05-12 02:00:00+00:00,16.84 +2020-05-12 03:00:00+00:00,19.3 +2020-05-12 04:00:00+00:00,26.53 +2020-05-12 05:00:00+00:00,33.36 +2020-05-12 06:00:00+00:00,26.68 +2020-05-12 07:00:00+00:00,24.0 +2020-05-12 08:00:00+00:00,22.54 +2020-05-12 09:00:00+00:00,22.0 +2020-05-12 10:00:00+00:00,20.0 +2020-05-12 11:00:00+00:00,18.03 +2020-05-12 12:00:00+00:00,17.03 +2020-05-12 13:00:00+00:00,17.05 +2020-05-12 14:00:00+00:00,17.1 +2020-05-12 15:00:00+00:00,21.53 +2020-05-12 16:00:00+00:00,25.28 +2020-05-12 17:00:00+00:00,27.83 +2020-05-12 18:00:00+00:00,28.76 +2020-05-12 19:00:00+00:00,26.52 +2020-05-12 20:00:00+00:00,25.15 +2020-05-12 21:00:00+00:00,23.6 +2020-05-12 22:00:00+00:00,23.92 +2020-05-12 23:00:00+00:00,22.05 +2020-05-13 00:00:00+00:00,20.46 +2020-05-13 01:00:00+00:00,19.97 +2020-05-13 02:00:00+00:00,20.16 +2020-05-13 03:00:00+00:00,22.58 +2020-05-13 04:00:00+00:00,32.09 +2020-05-13 05:00:00+00:00,43.68 +2020-05-13 06:00:00+00:00,43.09 +2020-05-13 07:00:00+00:00,35.8 +2020-05-13 08:00:00+00:00,35.79 +2020-05-13 09:00:00+00:00,34.0 +2020-05-13 10:00:00+00:00,32.44 +2020-05-13 11:00:00+00:00,30.49 +2020-05-13 12:00:00+00:00,28.8 +2020-05-13 13:00:00+00:00,28.72 +2020-05-13 14:00:00+00:00,28.33 +2020-05-13 15:00:00+00:00,34.88 +2020-05-13 16:00:00+00:00,34.73 +2020-05-13 17:00:00+00:00,34.88 +2020-05-13 18:00:00+00:00,33.5 +2020-05-13 19:00:00+00:00,29.87 +2020-05-13 20:00:00+00:00,23.91 +2020-05-13 21:00:00+00:00,20.06 +2020-05-13 22:00:00+00:00,18.15 +2020-05-13 23:00:00+00:00,17.91 +2020-05-14 00:00:00+00:00,16.75 +2020-05-14 01:00:00+00:00,16.96 +2020-05-14 02:00:00+00:00,18.17 +2020-05-14 03:00:00+00:00,19.03 +2020-05-14 04:00:00+00:00,23.33 +2020-05-14 05:00:00+00:00,29.25 +2020-05-14 06:00:00+00:00,29.48 +2020-05-14 07:00:00+00:00,27.44 +2020-05-14 08:00:00+00:00,24.28 +2020-05-14 09:00:00+00:00,23.0 +2020-05-14 10:00:00+00:00,21.32 +2020-05-14 11:00:00+00:00,19.5 +2020-05-14 12:00:00+00:00,19.62 +2020-05-14 13:00:00+00:00,19.3 +2020-05-14 14:00:00+00:00,19.84 +2020-05-14 15:00:00+00:00,23.42 +2020-05-14 16:00:00+00:00,26.9 +2020-05-14 17:00:00+00:00,28.99 +2020-05-14 18:00:00+00:00,30.08 +2020-05-14 19:00:00+00:00,27.06 +2020-05-14 20:00:00+00:00,25.2 +2020-05-14 21:00:00+00:00,21.89 +2020-05-14 22:00:00+00:00,19.05 +2020-05-14 23:00:00+00:00,17.53 +2020-05-15 00:00:00+00:00,17.03 +2020-05-15 01:00:00+00:00,17.07 +2020-05-15 02:00:00+00:00,17.07 +2020-05-15 03:00:00+00:00,19.71 +2020-05-15 04:00:00+00:00,24.08 +2020-05-15 05:00:00+00:00,27.39 +2020-05-15 06:00:00+00:00,27.97 +2020-05-15 07:00:00+00:00,23.0 +2020-05-15 08:00:00+00:00,22.65 +2020-05-15 09:00:00+00:00,21.6 +2020-05-15 10:00:00+00:00,19.96 +2020-05-15 11:00:00+00:00,18.63 +2020-05-15 12:00:00+00:00,18.45 +2020-05-15 13:00:00+00:00,18.0 +2020-05-15 14:00:00+00:00,18.0 +2020-05-15 15:00:00+00:00,19.99 +2020-05-15 16:00:00+00:00,22.05 +2020-05-15 17:00:00+00:00,24.92 +2020-05-15 18:00:00+00:00,25.9 +2020-05-15 19:00:00+00:00,25.25 +2020-05-15 20:00:00+00:00,21.85 +2020-05-15 21:00:00+00:00,18.86 +2020-05-15 22:00:00+00:00,20.0 +2020-05-15 23:00:00+00:00,18.14 +2020-05-16 00:00:00+00:00,17.08 +2020-05-16 01:00:00+00:00,16.43 +2020-05-16 02:00:00+00:00,14.95 +2020-05-16 03:00:00+00:00,14.54 +2020-05-16 04:00:00+00:00,14.94 +2020-05-16 05:00:00+00:00,16.0 +2020-05-16 06:00:00+00:00,15.71 +2020-05-16 07:00:00+00:00,13.39 +2020-05-16 08:00:00+00:00,10.08 +2020-05-16 09:00:00+00:00,10.85 +2020-05-16 10:00:00+00:00,13.27 +2020-05-16 11:00:00+00:00,11.86 +2020-05-16 12:00:00+00:00,8.87 +2020-05-16 13:00:00+00:00,11.24 +2020-05-16 14:00:00+00:00,11.03 +2020-05-16 15:00:00+00:00,14.5 +2020-05-16 16:00:00+00:00,20.39 +2020-05-16 17:00:00+00:00,26.09 +2020-05-16 18:00:00+00:00,28.58 +2020-05-16 19:00:00+00:00,29.42 +2020-05-16 20:00:00+00:00,26.5 +2020-05-16 21:00:00+00:00,23.08 +2020-05-16 22:00:00+00:00,18.93 +2020-05-16 23:00:00+00:00,17.79 +2020-05-17 00:00:00+00:00,15.15 +2020-05-17 01:00:00+00:00,13.66 +2020-05-17 02:00:00+00:00,11.89 +2020-05-17 03:00:00+00:00,12.62 +2020-05-17 04:00:00+00:00,8.52 +2020-05-17 05:00:00+00:00,11.9 +2020-05-17 06:00:00+00:00,11.61 +2020-05-17 07:00:00+00:00,8.08 +2020-05-17 08:00:00+00:00,1.97 +2020-05-17 09:00:00+00:00,0.08 +2020-05-17 10:00:00+00:00,0.96 +2020-05-17 11:00:00+00:00,-5.19 +2020-05-17 12:00:00+00:00,-16.76 +2020-05-17 13:00:00+00:00,-14.9 +2020-05-17 14:00:00+00:00,-2.06 +2020-05-17 15:00:00+00:00,6.41 +2020-05-17 16:00:00+00:00,16.05 +2020-05-17 17:00:00+00:00,20.51 +2020-05-17 18:00:00+00:00,22.18 +2020-05-17 19:00:00+00:00,23.95 +2020-05-17 20:00:00+00:00,23.22 +2020-05-17 21:00:00+00:00,20.54 +2020-05-17 22:00:00+00:00,18.0 +2020-05-17 23:00:00+00:00,15.54 +2020-05-18 00:00:00+00:00,14.01 +2020-05-18 01:00:00+00:00,12.95 +2020-05-18 02:00:00+00:00,13.97 +2020-05-18 03:00:00+00:00,17.98 +2020-05-18 04:00:00+00:00,24.96 +2020-05-18 05:00:00+00:00,27.94 +2020-05-18 06:00:00+00:00,29.91 +2020-05-18 07:00:00+00:00,22.96 +2020-05-18 08:00:00+00:00,18.91 +2020-05-18 09:00:00+00:00,16.9 +2020-05-18 10:00:00+00:00,15.36 +2020-05-18 11:00:00+00:00,14.04 +2020-05-18 12:00:00+00:00,13.26 +2020-05-18 13:00:00+00:00,12.85 +2020-05-18 14:00:00+00:00,13.44 +2020-05-18 15:00:00+00:00,19.07 +2020-05-18 16:00:00+00:00,22.81 +2020-05-18 17:00:00+00:00,25.95 +2020-05-18 18:00:00+00:00,31.9 +2020-05-18 19:00:00+00:00,26.08 +2020-05-18 20:00:00+00:00,23.99 +2020-05-18 21:00:00+00:00,21.04 +2020-05-18 22:00:00+00:00,17.05 +2020-05-18 23:00:00+00:00,16.13 +2020-05-19 00:00:00+00:00,16.16 +2020-05-19 01:00:00+00:00,15.74 +2020-05-19 02:00:00+00:00,17.01 +2020-05-19 03:00:00+00:00,18.53 +2020-05-19 04:00:00+00:00,22.26 +2020-05-19 05:00:00+00:00,26.04 +2020-05-19 06:00:00+00:00,28.18 +2020-05-19 07:00:00+00:00,22.96 +2020-05-19 08:00:00+00:00,20.07 +2020-05-19 09:00:00+00:00,20.13 +2020-05-19 10:00:00+00:00,19.31 +2020-05-19 11:00:00+00:00,18.19 +2020-05-19 12:00:00+00:00,17.71 +2020-05-19 13:00:00+00:00,17.82 +2020-05-19 14:00:00+00:00,19.03 +2020-05-19 15:00:00+00:00,22.14 +2020-05-19 16:00:00+00:00,28.67 +2020-05-19 17:00:00+00:00,42.65 +2020-05-19 18:00:00+00:00,49.98 +2020-05-19 19:00:00+00:00,40.48 +2020-05-19 20:00:00+00:00,29.07 +2020-05-19 21:00:00+00:00,23.18 +2020-05-19 22:00:00+00:00,21.24 +2020-05-19 23:00:00+00:00,22.35 +2020-05-20 00:00:00+00:00,21.58 +2020-05-20 01:00:00+00:00,21.56 +2020-05-20 02:00:00+00:00,20.67 +2020-05-20 03:00:00+00:00,23.25 +2020-05-20 04:00:00+00:00,44.83 +2020-05-20 05:00:00+00:00,56.69 +2020-05-20 06:00:00+00:00,57.0 +2020-05-20 07:00:00+00:00,34.31 +2020-05-20 08:00:00+00:00,28.72 +2020-05-20 09:00:00+00:00,30.55 +2020-05-20 10:00:00+00:00,28.09 +2020-05-20 11:00:00+00:00,23.43 +2020-05-20 12:00:00+00:00,22.93 +2020-05-20 13:00:00+00:00,23.06 +2020-05-20 14:00:00+00:00,23.24 +2020-05-20 15:00:00+00:00,25.63 +2020-05-20 16:00:00+00:00,33.97 +2020-05-20 17:00:00+00:00,44.58 +2020-05-20 18:00:00+00:00,56.3 +2020-05-20 19:00:00+00:00,46.9 +2020-05-20 20:00:00+00:00,35.42 +2020-05-20 21:00:00+00:00,22.94 +2020-05-20 22:00:00+00:00,21.37 +2020-05-20 23:00:00+00:00,20.01 +2020-05-21 00:00:00+00:00,17.29 +2020-05-21 01:00:00+00:00,16.74 +2020-05-21 02:00:00+00:00,16.84 +2020-05-21 03:00:00+00:00,16.18 +2020-05-21 04:00:00+00:00,17.85 +2020-05-21 05:00:00+00:00,19.77 +2020-05-21 06:00:00+00:00,19.56 +2020-05-21 07:00:00+00:00,16.12 +2020-05-21 08:00:00+00:00,14.82 +2020-05-21 09:00:00+00:00,14.92 +2020-05-21 10:00:00+00:00,14.81 +2020-05-21 11:00:00+00:00,13.29 +2020-05-21 12:00:00+00:00,12.14 +2020-05-21 13:00:00+00:00,13.9 +2020-05-21 14:00:00+00:00,15.47 +2020-05-21 15:00:00+00:00,18.0 +2020-05-21 16:00:00+00:00,22.09 +2020-05-21 17:00:00+00:00,29.66 +2020-05-21 18:00:00+00:00,42.66 +2020-05-21 19:00:00+00:00,29.52 +2020-05-21 20:00:00+00:00,24.12 +2020-05-21 21:00:00+00:00,22.78 +2020-05-21 22:00:00+00:00,20.0 +2020-05-21 23:00:00+00:00,17.03 +2020-05-22 00:00:00+00:00,15.5 +2020-05-22 01:00:00+00:00,14.02 +2020-05-22 02:00:00+00:00,14.15 +2020-05-22 03:00:00+00:00,15.75 +2020-05-22 04:00:00+00:00,20.83 +2020-05-22 05:00:00+00:00,21.94 +2020-05-22 06:00:00+00:00,21.92 +2020-05-22 07:00:00+00:00,21.2 +2020-05-22 08:00:00+00:00,16.49 +2020-05-22 09:00:00+00:00,14.59 +2020-05-22 10:00:00+00:00,14.69 +2020-05-22 11:00:00+00:00,14.57 +2020-05-22 12:00:00+00:00,16.09 +2020-05-22 13:00:00+00:00,17.27 +2020-05-22 14:00:00+00:00,18.35 +2020-05-22 15:00:00+00:00,22.07 +2020-05-22 16:00:00+00:00,22.98 +2020-05-22 17:00:00+00:00,23.71 +2020-05-22 18:00:00+00:00,23.35 +2020-05-22 19:00:00+00:00,22.2 +2020-05-22 20:00:00+00:00,22.81 +2020-05-22 21:00:00+00:00,20.47 +2020-05-22 22:00:00+00:00,14.01 +2020-05-22 23:00:00+00:00,11.48 +2020-05-23 00:00:00+00:00,10.87 +2020-05-23 01:00:00+00:00,8.72 +2020-05-23 02:00:00+00:00,9.37 +2020-05-23 03:00:00+00:00,9.5 +2020-05-23 04:00:00+00:00,12.37 +2020-05-23 05:00:00+00:00,15.07 +2020-05-23 06:00:00+00:00,16.57 +2020-05-23 07:00:00+00:00,16.72 +2020-05-23 08:00:00+00:00,15.53 +2020-05-23 09:00:00+00:00,12.63 +2020-05-23 10:00:00+00:00,12.7 +2020-05-23 11:00:00+00:00,4.64 +2020-05-23 12:00:00+00:00,0.01 +2020-05-23 13:00:00+00:00,-0.94 +2020-05-23 14:00:00+00:00,-0.38 +2020-05-23 15:00:00+00:00,0.75 +2020-05-23 16:00:00+00:00,8.01 +2020-05-23 17:00:00+00:00,12.2 +2020-05-23 18:00:00+00:00,14.04 +2020-05-23 19:00:00+00:00,13.94 +2020-05-23 20:00:00+00:00,14.56 +2020-05-23 21:00:00+00:00,13.99 +2020-05-23 22:00:00+00:00,0.03 +2020-05-23 23:00:00+00:00,-2.44 +2020-05-24 00:00:00+00:00,-8.77 +2020-05-24 01:00:00+00:00,-20.01 +2020-05-24 02:00:00+00:00,-20.37 +2020-05-24 03:00:00+00:00,-24.65 +2020-05-24 04:00:00+00:00,-30.98 +2020-05-24 05:00:00+00:00,-25.0 +2020-05-24 06:00:00+00:00,-26.97 +2020-05-24 07:00:00+00:00,-39.46 +2020-05-24 08:00:00+00:00,-63.04 +2020-05-24 09:00:00+00:00,-63.06 +2020-05-24 10:00:00+00:00,-70.04 +2020-05-24 11:00:00+00:00,-74.97 +2020-05-24 12:00:00+00:00,-74.97 +2020-05-24 13:00:00+00:00,-69.99 +2020-05-24 14:00:00+00:00,-57.74 +2020-05-24 15:00:00+00:00,-16.98 +2020-05-24 16:00:00+00:00,1.54 +2020-05-24 17:00:00+00:00,8.03 +2020-05-24 18:00:00+00:00,14.0 +2020-05-24 19:00:00+00:00,14.39 +2020-05-24 20:00:00+00:00,16.31 +2020-05-24 21:00:00+00:00,8.0 +2020-05-24 22:00:00+00:00,13.01 +2020-05-24 23:00:00+00:00,12.03 +2020-05-25 00:00:00+00:00,8.03 +2020-05-25 01:00:00+00:00,7.59 +2020-05-25 02:00:00+00:00,7.73 +2020-05-25 03:00:00+00:00,9.99 +2020-05-25 04:00:00+00:00,16.02 +2020-05-25 05:00:00+00:00,21.09 +2020-05-25 06:00:00+00:00,22.06 +2020-05-25 07:00:00+00:00,20.92 +2020-05-25 08:00:00+00:00,20.0 +2020-05-25 09:00:00+00:00,20.94 +2020-05-25 10:00:00+00:00,17.5 +2020-05-25 11:00:00+00:00,15.47 +2020-05-25 12:00:00+00:00,14.38 +2020-05-25 13:00:00+00:00,14.0 +2020-05-25 14:00:00+00:00,15.75 +2020-05-25 15:00:00+00:00,18.9 +2020-05-25 16:00:00+00:00,23.53 +2020-05-25 17:00:00+00:00,26.08 +2020-05-25 18:00:00+00:00,38.32 +2020-05-25 19:00:00+00:00,34.35 +2020-05-25 20:00:00+00:00,26.25 +2020-05-25 21:00:00+00:00,23.19 +2020-05-25 22:00:00+00:00,21.1 +2020-05-25 23:00:00+00:00,22.63 +2020-05-26 00:00:00+00:00,21.41 +2020-05-26 01:00:00+00:00,21.49 +2020-05-26 02:00:00+00:00,22.65 +2020-05-26 03:00:00+00:00,22.72 +2020-05-26 04:00:00+00:00,34.94 +2020-05-26 05:00:00+00:00,40.05 +2020-05-26 06:00:00+00:00,34.65 +2020-05-26 07:00:00+00:00,24.21 +2020-05-26 08:00:00+00:00,22.59 +2020-05-26 09:00:00+00:00,21.97 +2020-05-26 10:00:00+00:00,21.0 +2020-05-26 11:00:00+00:00,19.75 +2020-05-26 12:00:00+00:00,19.98 +2020-05-26 13:00:00+00:00,20.91 +2020-05-26 14:00:00+00:00,21.0 +2020-05-26 15:00:00+00:00,24.2 +2020-05-26 16:00:00+00:00,31.29 +2020-05-26 17:00:00+00:00,47.9 +2020-05-26 18:00:00+00:00,50.15 +2020-05-26 19:00:00+00:00,35.95 +2020-05-26 20:00:00+00:00,31.51 +2020-05-26 21:00:00+00:00,24.15 +2020-05-26 22:00:00+00:00,23.15 +2020-05-26 23:00:00+00:00,21.68 +2020-05-27 00:00:00+00:00,21.4 +2020-05-27 01:00:00+00:00,21.92 +2020-05-27 02:00:00+00:00,21.99 +2020-05-27 03:00:00+00:00,22.54 +2020-05-27 04:00:00+00:00,26.05 +2020-05-27 05:00:00+00:00,31.94 +2020-05-27 06:00:00+00:00,26.85 +2020-05-27 07:00:00+00:00,24.1 +2020-05-27 08:00:00+00:00,21.16 +2020-05-27 09:00:00+00:00,23.08 +2020-05-27 10:00:00+00:00,22.04 +2020-05-27 11:00:00+00:00,20.71 +2020-05-27 12:00:00+00:00,20.0 +2020-05-27 13:00:00+00:00,19.99 +2020-05-27 14:00:00+00:00,19.65 +2020-05-27 15:00:00+00:00,21.4 +2020-05-27 16:00:00+00:00,22.76 +2020-05-27 17:00:00+00:00,24.92 +2020-05-27 18:00:00+00:00,25.54 +2020-05-27 19:00:00+00:00,23.99 +2020-05-27 20:00:00+00:00,22.65 +2020-05-27 21:00:00+00:00,20.02 +2020-05-27 22:00:00+00:00,18.09 +2020-05-27 23:00:00+00:00,16.31 +2020-05-28 00:00:00+00:00,16.2 +2020-05-28 01:00:00+00:00,15.92 +2020-05-28 02:00:00+00:00,16.16 +2020-05-28 03:00:00+00:00,18.03 +2020-05-28 04:00:00+00:00,22.94 +2020-05-28 05:00:00+00:00,25.55 +2020-05-28 06:00:00+00:00,25.16 +2020-05-28 07:00:00+00:00,22.99 +2020-05-28 08:00:00+00:00,20.15 +2020-05-28 09:00:00+00:00,18.95 +2020-05-28 10:00:00+00:00,18.0 +2020-05-28 11:00:00+00:00,17.03 +2020-05-28 12:00:00+00:00,16.9 +2020-05-28 13:00:00+00:00,16.19 +2020-05-28 14:00:00+00:00,15.48 +2020-05-28 15:00:00+00:00,19.99 +2020-05-28 16:00:00+00:00,22.99 +2020-05-28 17:00:00+00:00,26.15 +2020-05-28 18:00:00+00:00,26.11 +2020-05-28 19:00:00+00:00,27.16 +2020-05-28 20:00:00+00:00,25.05 +2020-05-28 21:00:00+00:00,22.39 +2020-05-28 22:00:00+00:00,19.42 +2020-05-28 23:00:00+00:00,18.88 +2020-05-29 00:00:00+00:00,18.22 +2020-05-29 01:00:00+00:00,19.07 +2020-05-29 02:00:00+00:00,19.43 +2020-05-29 03:00:00+00:00,22.36 +2020-05-29 04:00:00+00:00,27.92 +2020-05-29 05:00:00+00:00,37.2 +2020-05-29 06:00:00+00:00,30.92 +2020-05-29 07:00:00+00:00,24.4 +2020-05-29 08:00:00+00:00,22.35 +2020-05-29 09:00:00+00:00,20.68 +2020-05-29 10:00:00+00:00,19.91 +2020-05-29 11:00:00+00:00,19.04 +2020-05-29 12:00:00+00:00,16.15 +2020-05-29 13:00:00+00:00,17.3 +2020-05-29 14:00:00+00:00,18.1 +2020-05-29 15:00:00+00:00,22.85 +2020-05-29 16:00:00+00:00,25.68 +2020-05-29 17:00:00+00:00,26.49 +2020-05-29 18:00:00+00:00,25.79 +2020-05-29 19:00:00+00:00,25.97 +2020-05-29 20:00:00+00:00,24.16 +2020-05-29 21:00:00+00:00,22.32 +2020-05-29 22:00:00+00:00,18.27 +2020-05-29 23:00:00+00:00,15.55 +2020-05-30 00:00:00+00:00,15.06 +2020-05-30 01:00:00+00:00,15.0 +2020-05-30 02:00:00+00:00,14.55 +2020-05-30 03:00:00+00:00,15.0 +2020-05-30 04:00:00+00:00,14.4 +2020-05-30 05:00:00+00:00,15.04 +2020-05-30 06:00:00+00:00,13.0 +2020-05-30 07:00:00+00:00,12.0 +2020-05-30 08:00:00+00:00,9.33 +2020-05-30 09:00:00+00:00,7.17 +2020-05-30 10:00:00+00:00,9.44 +2020-05-30 11:00:00+00:00,4.8 +2020-05-30 12:00:00+00:00,2.61 +2020-05-30 13:00:00+00:00,2.17 +2020-05-30 14:00:00+00:00,4.61 +2020-05-30 15:00:00+00:00,9.06 +2020-05-30 16:00:00+00:00,15.33 +2020-05-30 17:00:00+00:00,17.58 +2020-05-30 18:00:00+00:00,17.46 +2020-05-30 19:00:00+00:00,18.28 +2020-05-30 20:00:00+00:00,17.91 +2020-05-30 21:00:00+00:00,17.26 +2020-05-30 22:00:00+00:00,11.88 +2020-05-30 23:00:00+00:00,11.6 +2020-05-31 00:00:00+00:00,11.76 +2020-05-31 01:00:00+00:00,11.09 +2020-05-31 02:00:00+00:00,8.83 +2020-05-31 03:00:00+00:00,7.15 +2020-05-31 04:00:00+00:00,5.56 +2020-05-31 05:00:00+00:00,6.12 +2020-05-31 06:00:00+00:00,2.58 +2020-05-31 07:00:00+00:00,2.7 +2020-05-31 08:00:00+00:00,0.1 +2020-05-31 09:00:00+00:00,1.05 +2020-05-31 10:00:00+00:00,0.85 +2020-05-31 11:00:00+00:00,-7.68 +2020-05-31 12:00:00+00:00,-35.51 +2020-05-31 13:00:00+00:00,-45.05 +2020-05-31 14:00:00+00:00,-20.22 +2020-05-31 15:00:00+00:00,-0.1 +2020-05-31 16:00:00+00:00,4.98 +2020-05-31 17:00:00+00:00,12.54 +2020-05-31 18:00:00+00:00,12.57 +2020-05-31 19:00:00+00:00,13.38 +2020-05-31 20:00:00+00:00,12.59 +2020-05-31 21:00:00+00:00,11.1 +2020-05-31 22:00:00+00:00,8.15 +2020-05-31 23:00:00+00:00,8.93 +2020-06-01 00:00:00+00:00,9.8 +2020-06-01 01:00:00+00:00,6.07 +2020-06-01 02:00:00+00:00,4.07 +2020-06-01 03:00:00+00:00,5.77 +2020-06-01 04:00:00+00:00,7.31 +2020-06-01 05:00:00+00:00,10.05 +2020-06-01 06:00:00+00:00,10.96 +2020-06-01 07:00:00+00:00,9.17 +2020-06-01 08:00:00+00:00,4.01 +2020-06-01 09:00:00+00:00,1.71 +2020-06-01 10:00:00+00:00,3.04 +2020-06-01 11:00:00+00:00,-20.51 +2020-06-01 12:00:00+00:00,-48.17 +2020-06-01 13:00:00+00:00,-15.47 +2020-06-01 14:00:00+00:00,0.47 +2020-06-01 15:00:00+00:00,11.53 +2020-06-01 16:00:00+00:00,16.57 +2020-06-01 17:00:00+00:00,22.0 +2020-06-01 18:00:00+00:00,23.05 +2020-06-01 19:00:00+00:00,22.91 +2020-06-01 20:00:00+00:00,23.37 +2020-06-01 21:00:00+00:00,20.9 +2020-06-01 22:00:00+00:00,18.08 +2020-06-01 23:00:00+00:00,16.33 +2020-06-02 00:00:00+00:00,14.99 +2020-06-02 01:00:00+00:00,14.65 +2020-06-02 02:00:00+00:00,15.92 +2020-06-02 03:00:00+00:00,19.96 +2020-06-02 04:00:00+00:00,29.37 +2020-06-02 05:00:00+00:00,47.66 +2020-06-02 06:00:00+00:00,42.29 +2020-06-02 07:00:00+00:00,24.1 +2020-06-02 08:00:00+00:00,21.86 +2020-06-02 09:00:00+00:00,21.45 +2020-06-02 10:00:00+00:00,20.91 +2020-06-02 11:00:00+00:00,19.54 +2020-06-02 12:00:00+00:00,19.02 +2020-06-02 13:00:00+00:00,18.06 +2020-06-02 14:00:00+00:00,20.09 +2020-06-02 15:00:00+00:00,26.02 +2020-06-02 16:00:00+00:00,41.7 +2020-06-02 17:00:00+00:00,53.65 +2020-06-02 18:00:00+00:00,60.75 +2020-06-02 19:00:00+00:00,49.95 +2020-06-02 20:00:00+00:00,37.92 +2020-06-02 21:00:00+00:00,25.92 +2020-06-02 22:00:00+00:00,25.54 +2020-06-02 23:00:00+00:00,24.0 +2020-06-03 00:00:00+00:00,22.72 +2020-06-03 01:00:00+00:00,20.87 +2020-06-03 02:00:00+00:00,20.24 +2020-06-03 03:00:00+00:00,21.17 +2020-06-03 04:00:00+00:00,31.82 +2020-06-03 05:00:00+00:00,51.41 +2020-06-03 06:00:00+00:00,36.06 +2020-06-03 07:00:00+00:00,31.07 +2020-06-03 08:00:00+00:00,28.72 +2020-06-03 09:00:00+00:00,28.0 +2020-06-03 10:00:00+00:00,27.23 +2020-06-03 11:00:00+00:00,25.27 +2020-06-03 12:00:00+00:00,25.58 +2020-06-03 13:00:00+00:00,26.04 +2020-06-03 14:00:00+00:00,25.78 +2020-06-03 15:00:00+00:00,26.18 +2020-06-03 16:00:00+00:00,29.68 +2020-06-03 17:00:00+00:00,40.17 +2020-06-03 18:00:00+00:00,40.1 +2020-06-03 19:00:00+00:00,32.6 +2020-06-03 20:00:00+00:00,27.91 +2020-06-03 21:00:00+00:00,23.99 +2020-06-03 22:00:00+00:00,22.17 +2020-06-03 23:00:00+00:00,20.98 +2020-06-04 00:00:00+00:00,19.61 +2020-06-04 01:00:00+00:00,20.15 +2020-06-04 02:00:00+00:00,20.36 +2020-06-04 03:00:00+00:00,23.83 +2020-06-04 04:00:00+00:00,28.13 +2020-06-04 05:00:00+00:00,33.64 +2020-06-04 06:00:00+00:00,43.97 +2020-06-04 07:00:00+00:00,33.25 +2020-06-04 08:00:00+00:00,31.82 +2020-06-04 09:00:00+00:00,31.77 +2020-06-04 10:00:00+00:00,30.5 +2020-06-04 11:00:00+00:00,28.93 +2020-06-04 12:00:00+00:00,26.13 +2020-06-04 13:00:00+00:00,26.0 +2020-06-04 14:00:00+00:00,25.7 +2020-06-04 15:00:00+00:00,26.38 +2020-06-04 16:00:00+00:00,28.2 +2020-06-04 17:00:00+00:00,26.07 +2020-06-04 18:00:00+00:00,25.0 +2020-06-04 19:00:00+00:00,23.94 +2020-06-04 20:00:00+00:00,22.69 +2020-06-04 21:00:00+00:00,18.04 +2020-06-04 22:00:00+00:00,17.25 +2020-06-04 23:00:00+00:00,16.8 +2020-06-05 00:00:00+00:00,17.06 +2020-06-05 01:00:00+00:00,15.99 +2020-06-05 02:00:00+00:00,15.61 +2020-06-05 03:00:00+00:00,17.09 +2020-06-05 04:00:00+00:00,24.41 +2020-06-05 05:00:00+00:00,28.96 +2020-06-05 06:00:00+00:00,37.07 +2020-06-05 07:00:00+00:00,33.07 +2020-06-05 08:00:00+00:00,28.4 +2020-06-05 09:00:00+00:00,31.42 +2020-06-05 10:00:00+00:00,26.43 +2020-06-05 11:00:00+00:00,24.0 +2020-06-05 12:00:00+00:00,22.5 +2020-06-05 13:00:00+00:00,23.02 +2020-06-05 14:00:00+00:00,23.6 +2020-06-05 15:00:00+00:00,25.65 +2020-06-05 16:00:00+00:00,27.91 +2020-06-05 17:00:00+00:00,27.99 +2020-06-05 18:00:00+00:00,27.03 +2020-06-05 19:00:00+00:00,25.89 +2020-06-05 20:00:00+00:00,25.89 +2020-06-05 21:00:00+00:00,20.16 +2020-06-05 22:00:00+00:00,7.69 +2020-06-05 23:00:00+00:00,1.46 +2020-06-06 00:00:00+00:00,-0.09 +2020-06-06 01:00:00+00:00,0.03 +2020-06-06 02:00:00+00:00,1.44 +2020-06-06 03:00:00+00:00,1.35 +2020-06-06 04:00:00+00:00,0.08 +2020-06-06 05:00:00+00:00,2.81 +2020-06-06 06:00:00+00:00,5.79 +2020-06-06 07:00:00+00:00,5.65 +2020-06-06 08:00:00+00:00,4.22 +2020-06-06 09:00:00+00:00,2.42 +2020-06-06 10:00:00+00:00,0.06 +2020-06-06 11:00:00+00:00,-4.9 +2020-06-06 12:00:00+00:00,-3.46 +2020-06-06 13:00:00+00:00,-1.71 +2020-06-06 14:00:00+00:00,0.05 +2020-06-06 15:00:00+00:00,9.78 +2020-06-06 16:00:00+00:00,16.42 +2020-06-06 17:00:00+00:00,21.23 +2020-06-06 18:00:00+00:00,23.27 +2020-06-06 19:00:00+00:00,22.98 +2020-06-06 20:00:00+00:00,23.91 +2020-06-06 21:00:00+00:00,19.96 +2020-06-06 22:00:00+00:00,21.04 +2020-06-06 23:00:00+00:00,17.21 +2020-06-07 00:00:00+00:00,14.93 +2020-06-07 01:00:00+00:00,12.63 +2020-06-07 02:00:00+00:00,10.92 +2020-06-07 03:00:00+00:00,10.96 +2020-06-07 04:00:00+00:00,10.88 +2020-06-07 05:00:00+00:00,14.04 +2020-06-07 06:00:00+00:00,15.36 +2020-06-07 07:00:00+00:00,16.0 +2020-06-07 08:00:00+00:00,15.7 +2020-06-07 09:00:00+00:00,16.06 +2020-06-07 10:00:00+00:00,16.94 +2020-06-07 11:00:00+00:00,13.37 +2020-06-07 12:00:00+00:00,10.76 +2020-06-07 13:00:00+00:00,10.64 +2020-06-07 14:00:00+00:00,13.64 +2020-06-07 15:00:00+00:00,17.0 +2020-06-07 16:00:00+00:00,21.06 +2020-06-07 17:00:00+00:00,25.36 +2020-06-07 18:00:00+00:00,28.56 +2020-06-07 19:00:00+00:00,29.14 +2020-06-07 20:00:00+00:00,32.33 +2020-06-07 21:00:00+00:00,28.91 +2020-06-07 22:00:00+00:00,26.0 +2020-06-07 23:00:00+00:00,22.7 +2020-06-08 00:00:00+00:00,22.01 +2020-06-08 01:00:00+00:00,21.36 +2020-06-08 02:00:00+00:00,21.25 +2020-06-08 03:00:00+00:00,23.03 +2020-06-08 04:00:00+00:00,33.45 +2020-06-08 05:00:00+00:00,41.92 +2020-06-08 06:00:00+00:00,40.25 +2020-06-08 07:00:00+00:00,34.76 +2020-06-08 08:00:00+00:00,33.95 +2020-06-08 09:00:00+00:00,35.0 +2020-06-08 10:00:00+00:00,33.0 +2020-06-08 11:00:00+00:00,31.93 +2020-06-08 12:00:00+00:00,30.2 +2020-06-08 13:00:00+00:00,29.28 +2020-06-08 14:00:00+00:00,29.08 +2020-06-08 15:00:00+00:00,33.31 +2020-06-08 16:00:00+00:00,38.85 +2020-06-08 17:00:00+00:00,38.65 +2020-06-08 18:00:00+00:00,35.23 +2020-06-08 19:00:00+00:00,34.6 +2020-06-08 20:00:00+00:00,33.05 +2020-06-08 21:00:00+00:00,30.0 +2020-06-08 22:00:00+00:00,28.39 +2020-06-08 23:00:00+00:00,25.79 +2020-06-09 00:00:00+00:00,24.08 +2020-06-09 01:00:00+00:00,23.67 +2020-06-09 02:00:00+00:00,23.48 +2020-06-09 03:00:00+00:00,25.27 +2020-06-09 04:00:00+00:00,33.43 +2020-06-09 05:00:00+00:00,43.85 +2020-06-09 06:00:00+00:00,52.31 +2020-06-09 07:00:00+00:00,42.46 +2020-06-09 08:00:00+00:00,46.62 +2020-06-09 09:00:00+00:00,46.68 +2020-06-09 10:00:00+00:00,44.12 +2020-06-09 11:00:00+00:00,39.11 +2020-06-09 12:00:00+00:00,36.39 +2020-06-09 13:00:00+00:00,33.49 +2020-06-09 14:00:00+00:00,33.76 +2020-06-09 15:00:00+00:00,41.61 +2020-06-09 16:00:00+00:00,45.21 +2020-06-09 17:00:00+00:00,48.07 +2020-06-09 18:00:00+00:00,43.09 +2020-06-09 19:00:00+00:00,37.47 +2020-06-09 20:00:00+00:00,32.83 +2020-06-09 21:00:00+00:00,29.06 +2020-06-09 22:00:00+00:00,26.09 +2020-06-09 23:00:00+00:00,24.2 +2020-06-10 00:00:00+00:00,22.61 +2020-06-10 01:00:00+00:00,21.49 +2020-06-10 02:00:00+00:00,22.58 +2020-06-10 03:00:00+00:00,24.15 +2020-06-10 04:00:00+00:00,29.41 +2020-06-10 05:00:00+00:00,35.5 +2020-06-10 06:00:00+00:00,42.15 +2020-06-10 07:00:00+00:00,41.06 +2020-06-10 08:00:00+00:00,40.4 +2020-06-10 09:00:00+00:00,35.81 +2020-06-10 10:00:00+00:00,34.9 +2020-06-10 11:00:00+00:00,34.14 +2020-06-10 12:00:00+00:00,32.4 +2020-06-10 13:00:00+00:00,31.3 +2020-06-10 14:00:00+00:00,30.24 +2020-06-10 15:00:00+00:00,31.87 +2020-06-10 16:00:00+00:00,33.77 +2020-06-10 17:00:00+00:00,34.91 +2020-06-10 18:00:00+00:00,33.05 +2020-06-10 19:00:00+00:00,31.5 +2020-06-10 20:00:00+00:00,31.45 +2020-06-10 21:00:00+00:00,27.77 +2020-06-10 22:00:00+00:00,26.06 +2020-06-10 23:00:00+00:00,22.75 +2020-06-11 00:00:00+00:00,21.07 +2020-06-11 01:00:00+00:00,19.43 +2020-06-11 02:00:00+00:00,18.96 +2020-06-11 03:00:00+00:00,18.96 +2020-06-11 04:00:00+00:00,21.56 +2020-06-11 05:00:00+00:00,26.06 +2020-06-11 06:00:00+00:00,30.0 +2020-06-11 07:00:00+00:00,28.96 +2020-06-11 08:00:00+00:00,26.06 +2020-06-11 09:00:00+00:00,25.51 +2020-06-11 10:00:00+00:00,23.89 +2020-06-11 11:00:00+00:00,21.99 +2020-06-11 12:00:00+00:00,20.5 +2020-06-11 13:00:00+00:00,19.1 +2020-06-11 14:00:00+00:00,19.28 +2020-06-11 15:00:00+00:00,23.07 +2020-06-11 16:00:00+00:00,26.08 +2020-06-11 17:00:00+00:00,30.65 +2020-06-11 18:00:00+00:00,30.17 +2020-06-11 19:00:00+00:00,27.09 +2020-06-11 20:00:00+00:00,26.51 +2020-06-11 21:00:00+00:00,23.81 +2020-06-11 22:00:00+00:00,20.72 +2020-06-11 23:00:00+00:00,17.96 +2020-06-12 00:00:00+00:00,17.09 +2020-06-12 01:00:00+00:00,16.01 +2020-06-12 02:00:00+00:00,16.0 +2020-06-12 03:00:00+00:00,18.08 +2020-06-12 04:00:00+00:00,22.92 +2020-06-12 05:00:00+00:00,25.1 +2020-06-12 06:00:00+00:00,26.01 +2020-06-12 07:00:00+00:00,23.23 +2020-06-12 08:00:00+00:00,21.07 +2020-06-12 09:00:00+00:00,20.5 +2020-06-12 10:00:00+00:00,19.45 +2020-06-12 11:00:00+00:00,15.2 +2020-06-12 12:00:00+00:00,16.19 +2020-06-12 13:00:00+00:00,16.14 +2020-06-12 14:00:00+00:00,17.4 +2020-06-12 15:00:00+00:00,20.06 +2020-06-12 16:00:00+00:00,23.53 +2020-06-12 17:00:00+00:00,27.51 +2020-06-12 18:00:00+00:00,26.22 +2020-06-12 19:00:00+00:00,24.7 +2020-06-12 20:00:00+00:00,25.46 +2020-06-12 21:00:00+00:00,20.78 +2020-06-12 22:00:00+00:00,20.98 +2020-06-12 23:00:00+00:00,19.44 +2020-06-13 00:00:00+00:00,18.7 +2020-06-13 01:00:00+00:00,18.8 +2020-06-13 02:00:00+00:00,19.0 +2020-06-13 03:00:00+00:00,18.9 +2020-06-13 04:00:00+00:00,19.33 +2020-06-13 05:00:00+00:00,19.86 +2020-06-13 06:00:00+00:00,20.0 +2020-06-13 07:00:00+00:00,19.1 +2020-06-13 08:00:00+00:00,18.5 +2020-06-13 09:00:00+00:00,18.11 +2020-06-13 10:00:00+00:00,16.47 +2020-06-13 11:00:00+00:00,16.25 +2020-06-13 12:00:00+00:00,15.89 +2020-06-13 13:00:00+00:00,15.84 +2020-06-13 14:00:00+00:00,17.5 +2020-06-13 15:00:00+00:00,18.5 +2020-06-13 16:00:00+00:00,21.93 +2020-06-13 17:00:00+00:00,24.87 +2020-06-13 18:00:00+00:00,25.96 +2020-06-13 19:00:00+00:00,24.42 +2020-06-13 20:00:00+00:00,24.85 +2020-06-13 21:00:00+00:00,23.06 +2020-06-13 22:00:00+00:00,18.53 +2020-06-13 23:00:00+00:00,16.8 +2020-06-14 00:00:00+00:00,14.91 +2020-06-14 01:00:00+00:00,13.82 +2020-06-14 02:00:00+00:00,14.84 +2020-06-14 03:00:00+00:00,14.04 +2020-06-14 04:00:00+00:00,14.94 +2020-06-14 05:00:00+00:00,15.3 +2020-06-14 06:00:00+00:00,15.0 +2020-06-14 07:00:00+00:00,18.54 +2020-06-14 08:00:00+00:00,19.07 +2020-06-14 09:00:00+00:00,20.74 +2020-06-14 10:00:00+00:00,19.73 +2020-06-14 11:00:00+00:00,17.97 +2020-06-14 12:00:00+00:00,15.14 +2020-06-14 13:00:00+00:00,14.49 +2020-06-14 14:00:00+00:00,14.8 +2020-06-14 15:00:00+00:00,17.7 +2020-06-14 16:00:00+00:00,21.08 +2020-06-14 17:00:00+00:00,23.8 +2020-06-14 18:00:00+00:00,25.97 +2020-06-14 19:00:00+00:00,25.87 +2020-06-14 20:00:00+00:00,27.91 +2020-06-14 21:00:00+00:00,23.17 +2020-06-14 22:00:00+00:00,23.07 +2020-06-14 23:00:00+00:00,20.65 +2020-06-15 00:00:00+00:00,19.06 +2020-06-15 01:00:00+00:00,18.02 +2020-06-15 02:00:00+00:00,17.4 +2020-06-15 03:00:00+00:00,21.36 +2020-06-15 04:00:00+00:00,29.93 +2020-06-15 05:00:00+00:00,35.28 +2020-06-15 06:00:00+00:00,36.05 +2020-06-15 07:00:00+00:00,35.89 +2020-06-15 08:00:00+00:00,34.09 +2020-06-15 09:00:00+00:00,34.0 +2020-06-15 10:00:00+00:00,34.1 +2020-06-15 11:00:00+00:00,33.15 +2020-06-15 12:00:00+00:00,31.9 +2020-06-15 13:00:00+00:00,31.0 +2020-06-15 14:00:00+00:00,30.52 +2020-06-15 15:00:00+00:00,33.0 +2020-06-15 16:00:00+00:00,38.0 +2020-06-15 17:00:00+00:00,43.52 +2020-06-15 18:00:00+00:00,41.07 +2020-06-15 19:00:00+00:00,35.91 +2020-06-15 20:00:00+00:00,36.39 +2020-06-15 21:00:00+00:00,31.92 +2020-06-15 22:00:00+00:00,29.86 +2020-06-15 23:00:00+00:00,26.59 +2020-06-16 00:00:00+00:00,25.17 +2020-06-16 01:00:00+00:00,24.79 +2020-06-16 02:00:00+00:00,24.34 +2020-06-16 03:00:00+00:00,25.1 +2020-06-16 04:00:00+00:00,31.13 +2020-06-16 05:00:00+00:00,38.39 +2020-06-16 06:00:00+00:00,45.9 +2020-06-16 07:00:00+00:00,41.39 +2020-06-16 08:00:00+00:00,39.32 +2020-06-16 09:00:00+00:00,40.31 +2020-06-16 10:00:00+00:00,38.75 +2020-06-16 11:00:00+00:00,34.97 +2020-06-16 12:00:00+00:00,32.21 +2020-06-16 13:00:00+00:00,30.86 +2020-06-16 14:00:00+00:00,30.66 +2020-06-16 15:00:00+00:00,33.73 +2020-06-16 16:00:00+00:00,38.32 +2020-06-16 17:00:00+00:00,42.91 +2020-06-16 18:00:00+00:00,40.66 +2020-06-16 19:00:00+00:00,37.66 +2020-06-16 20:00:00+00:00,34.92 +2020-06-16 21:00:00+00:00,30.94 +2020-06-16 22:00:00+00:00,32.05 +2020-06-16 23:00:00+00:00,30.38 +2020-06-17 00:00:00+00:00,28.1 +2020-06-17 01:00:00+00:00,26.92 +2020-06-17 02:00:00+00:00,26.53 +2020-06-17 03:00:00+00:00,28.76 +2020-06-17 04:00:00+00:00,35.96 +2020-06-17 05:00:00+00:00,51.93 +2020-06-17 06:00:00+00:00,56.52 +2020-06-17 07:00:00+00:00,42.14 +2020-06-17 08:00:00+00:00,38.99 +2020-06-17 09:00:00+00:00,38.39 +2020-06-17 10:00:00+00:00,36.03 +2020-06-17 11:00:00+00:00,36.01 +2020-06-17 12:00:00+00:00,35.05 +2020-06-17 13:00:00+00:00,33.59 +2020-06-17 14:00:00+00:00,33.0 +2020-06-17 15:00:00+00:00,40.06 +2020-06-17 16:00:00+00:00,45.58 +2020-06-17 17:00:00+00:00,47.33 +2020-06-17 18:00:00+00:00,42.32 +2020-06-17 19:00:00+00:00,39.71 +2020-06-17 20:00:00+00:00,36.59 +2020-06-17 21:00:00+00:00,31.7 +2020-06-17 22:00:00+00:00,27.74 +2020-06-17 23:00:00+00:00,25.26 +2020-06-18 00:00:00+00:00,23.51 +2020-06-18 01:00:00+00:00,22.82 +2020-06-18 02:00:00+00:00,22.92 +2020-06-18 03:00:00+00:00,24.31 +2020-06-18 04:00:00+00:00,28.86 +2020-06-18 05:00:00+00:00,34.59 +2020-06-18 06:00:00+00:00,36.59 +2020-06-18 07:00:00+00:00,36.93 +2020-06-18 08:00:00+00:00,36.73 +2020-06-18 09:00:00+00:00,36.87 +2020-06-18 10:00:00+00:00,36.21 +2020-06-18 11:00:00+00:00,34.0 +2020-06-18 12:00:00+00:00,32.17 +2020-06-18 13:00:00+00:00,31.81 +2020-06-18 14:00:00+00:00,30.72 +2020-06-18 15:00:00+00:00,33.1 +2020-06-18 16:00:00+00:00,35.0 +2020-06-18 17:00:00+00:00,36.99 +2020-06-18 18:00:00+00:00,36.3 +2020-06-18 19:00:00+00:00,35.57 +2020-06-18 20:00:00+00:00,34.3 +2020-06-18 21:00:00+00:00,29.98 +2020-06-18 22:00:00+00:00,28.1 +2020-06-18 23:00:00+00:00,24.45 +2020-06-19 00:00:00+00:00,23.25 +2020-06-19 01:00:00+00:00,22.28 +2020-06-19 02:00:00+00:00,22.2 +2020-06-19 03:00:00+00:00,23.07 +2020-06-19 04:00:00+00:00,30.06 +2020-06-19 05:00:00+00:00,35.33 +2020-06-19 06:00:00+00:00,37.8 +2020-06-19 07:00:00+00:00,36.08 +2020-06-19 08:00:00+00:00,34.56 +2020-06-19 09:00:00+00:00,33.33 +2020-06-19 10:00:00+00:00,31.22 +2020-06-19 11:00:00+00:00,28.4 +2020-06-19 12:00:00+00:00,26.89 +2020-06-19 13:00:00+00:00,25.29 +2020-06-19 14:00:00+00:00,24.07 +2020-06-19 15:00:00+00:00,28.64 +2020-06-19 16:00:00+00:00,32.61 +2020-06-19 17:00:00+00:00,34.0 +2020-06-19 18:00:00+00:00,34.22 +2020-06-19 19:00:00+00:00,34.13 +2020-06-19 20:00:00+00:00,35.0 +2020-06-19 21:00:00+00:00,31.5 +2020-06-19 22:00:00+00:00,26.7 +2020-06-19 23:00:00+00:00,24.73 +2020-06-20 00:00:00+00:00,22.8 +2020-06-20 01:00:00+00:00,21.63 +2020-06-20 02:00:00+00:00,20.48 +2020-06-20 03:00:00+00:00,20.04 +2020-06-20 04:00:00+00:00,21.85 +2020-06-20 05:00:00+00:00,22.94 +2020-06-20 06:00:00+00:00,24.17 +2020-06-20 07:00:00+00:00,23.67 +2020-06-20 08:00:00+00:00,22.06 +2020-06-20 09:00:00+00:00,21.5 +2020-06-20 10:00:00+00:00,20.5 +2020-06-20 11:00:00+00:00,17.9 +2020-06-20 12:00:00+00:00,15.27 +2020-06-20 13:00:00+00:00,15.68 +2020-06-20 14:00:00+00:00,18.98 +2020-06-20 15:00:00+00:00,23.67 +2020-06-20 16:00:00+00:00,28.3 +2020-06-20 17:00:00+00:00,30.87 +2020-06-20 18:00:00+00:00,33.0 +2020-06-20 19:00:00+00:00,34.77 +2020-06-20 20:00:00+00:00,34.94 +2020-06-20 21:00:00+00:00,31.58 +2020-06-20 22:00:00+00:00,30.11 +2020-06-20 23:00:00+00:00,25.07 +2020-06-21 00:00:00+00:00,22.15 +2020-06-21 01:00:00+00:00,20.26 +2020-06-21 02:00:00+00:00,16.2 +2020-06-21 03:00:00+00:00,12.53 +2020-06-21 04:00:00+00:00,10.98 +2020-06-21 05:00:00+00:00,11.25 +2020-06-21 06:00:00+00:00,14.0 +2020-06-21 07:00:00+00:00,13.68 +2020-06-21 08:00:00+00:00,15.6 +2020-06-21 09:00:00+00:00,17.0 +2020-06-21 10:00:00+00:00,18.62 +2020-06-21 11:00:00+00:00,13.08 +2020-06-21 12:00:00+00:00,8.26 +2020-06-21 13:00:00+00:00,8.4 +2020-06-21 14:00:00+00:00,11.65 +2020-06-21 15:00:00+00:00,20.84 +2020-06-21 16:00:00+00:00,24.83 +2020-06-21 17:00:00+00:00,29.61 +2020-06-21 18:00:00+00:00,30.2 +2020-06-21 19:00:00+00:00,30.84 +2020-06-21 20:00:00+00:00,32.93 +2020-06-21 21:00:00+00:00,29.9 +2020-06-21 22:00:00+00:00,23.57 +2020-06-21 23:00:00+00:00,21.88 +2020-06-22 00:00:00+00:00,20.61 +2020-06-22 01:00:00+00:00,21.04 +2020-06-22 02:00:00+00:00,21.5 +2020-06-22 03:00:00+00:00,23.04 +2020-06-22 04:00:00+00:00,30.69 +2020-06-22 05:00:00+00:00,35.41 +2020-06-22 06:00:00+00:00,38.58 +2020-06-22 07:00:00+00:00,34.5 +2020-06-22 08:00:00+00:00,33.08 +2020-06-22 09:00:00+00:00,31.66 +2020-06-22 10:00:00+00:00,30.04 +2020-06-22 11:00:00+00:00,24.67 +2020-06-22 12:00:00+00:00,24.49 +2020-06-22 13:00:00+00:00,23.62 +2020-06-22 14:00:00+00:00,24.59 +2020-06-22 15:00:00+00:00,31.41 +2020-06-22 16:00:00+00:00,34.39 +2020-06-22 17:00:00+00:00,36.74 +2020-06-22 18:00:00+00:00,37.63 +2020-06-22 19:00:00+00:00,37.39 +2020-06-22 20:00:00+00:00,36.38 +2020-06-22 21:00:00+00:00,33.01 +2020-06-22 22:00:00+00:00,30.0 +2020-06-22 23:00:00+00:00,27.12 +2020-06-23 00:00:00+00:00,25.88 +2020-06-23 01:00:00+00:00,24.4 +2020-06-23 02:00:00+00:00,24.43 +2020-06-23 03:00:00+00:00,27.04 +2020-06-23 04:00:00+00:00,35.03 +2020-06-23 05:00:00+00:00,37.44 +2020-06-23 06:00:00+00:00,37.16 +2020-06-23 07:00:00+00:00,34.9 +2020-06-23 08:00:00+00:00,31.92 +2020-06-23 09:00:00+00:00,30.24 +2020-06-23 10:00:00+00:00,28.79 +2020-06-23 11:00:00+00:00,28.23 +2020-06-23 12:00:00+00:00,27.82 +2020-06-23 13:00:00+00:00,28.65 +2020-06-23 14:00:00+00:00,30.0 +2020-06-23 15:00:00+00:00,33.0 +2020-06-23 16:00:00+00:00,36.59 +2020-06-23 17:00:00+00:00,44.51 +2020-06-23 18:00:00+00:00,48.65 +2020-06-23 19:00:00+00:00,41.29 +2020-06-23 20:00:00+00:00,38.41 +2020-06-23 21:00:00+00:00,34.3 +2020-06-23 22:00:00+00:00,33.02 +2020-06-23 23:00:00+00:00,31.37 +2020-06-24 00:00:00+00:00,27.83 +2020-06-24 01:00:00+00:00,26.76 +2020-06-24 02:00:00+00:00,26.86 +2020-06-24 03:00:00+00:00,28.53 +2020-06-24 04:00:00+00:00,37.57 +2020-06-24 05:00:00+00:00,41.71 +2020-06-24 06:00:00+00:00,39.03 +2020-06-24 07:00:00+00:00,37.66 +2020-06-24 08:00:00+00:00,36.12 +2020-06-24 09:00:00+00:00,35.21 +2020-06-24 10:00:00+00:00,34.84 +2020-06-24 11:00:00+00:00,33.69 +2020-06-24 12:00:00+00:00,32.77 +2020-06-24 13:00:00+00:00,32.72 +2020-06-24 14:00:00+00:00,33.98 +2020-06-24 15:00:00+00:00,37.16 +2020-06-24 16:00:00+00:00,40.93 +2020-06-24 17:00:00+00:00,47.11 +2020-06-24 18:00:00+00:00,45.65 +2020-06-24 19:00:00+00:00,40.24 +2020-06-24 20:00:00+00:00,38.95 +2020-06-24 21:00:00+00:00,33.98 +2020-06-24 22:00:00+00:00,30.32 +2020-06-24 23:00:00+00:00,27.54 +2020-06-25 00:00:00+00:00,26.51 +2020-06-25 01:00:00+00:00,25.75 +2020-06-25 02:00:00+00:00,25.43 +2020-06-25 03:00:00+00:00,27.1 +2020-06-25 04:00:00+00:00,34.49 +2020-06-25 05:00:00+00:00,40.73 +2020-06-25 06:00:00+00:00,42.09 +2020-06-25 07:00:00+00:00,40.98 +2020-06-25 08:00:00+00:00,38.83 +2020-06-25 09:00:00+00:00,36.5 +2020-06-25 10:00:00+00:00,33.31 +2020-06-25 11:00:00+00:00,31.38 +2020-06-25 12:00:00+00:00,29.94 +2020-06-25 13:00:00+00:00,29.87 +2020-06-25 14:00:00+00:00,32.4 +2020-06-25 15:00:00+00:00,37.06 +2020-06-25 16:00:00+00:00,39.55 +2020-06-25 17:00:00+00:00,44.13 +2020-06-25 18:00:00+00:00,41.23 +2020-06-25 19:00:00+00:00,40.24 +2020-06-25 20:00:00+00:00,38.0 +2020-06-25 21:00:00+00:00,33.43 +2020-06-25 22:00:00+00:00,32.47 +2020-06-25 23:00:00+00:00,28.12 +2020-06-26 00:00:00+00:00,25.96 +2020-06-26 01:00:00+00:00,25.59 +2020-06-26 02:00:00+00:00,25.63 +2020-06-26 03:00:00+00:00,26.8 +2020-06-26 04:00:00+00:00,34.05 +2020-06-26 05:00:00+00:00,38.94 +2020-06-26 06:00:00+00:00,40.13 +2020-06-26 07:00:00+00:00,39.19 +2020-06-26 08:00:00+00:00,36.93 +2020-06-26 09:00:00+00:00,34.64 +2020-06-26 10:00:00+00:00,33.12 +2020-06-26 11:00:00+00:00,32.79 +2020-06-26 12:00:00+00:00,32.42 +2020-06-26 13:00:00+00:00,32.18 +2020-06-26 14:00:00+00:00,33.28 +2020-06-26 15:00:00+00:00,36.45 +2020-06-26 16:00:00+00:00,40.43 +2020-06-26 17:00:00+00:00,46.8 +2020-06-26 18:00:00+00:00,41.96 +2020-06-26 19:00:00+00:00,39.9 +2020-06-26 20:00:00+00:00,40.1 +2020-06-26 21:00:00+00:00,37.4 +2020-06-26 22:00:00+00:00,35.92 +2020-06-26 23:00:00+00:00,30.68 +2020-06-27 00:00:00+00:00,28.3 +2020-06-27 01:00:00+00:00,27.48 +2020-06-27 02:00:00+00:00,27.14 +2020-06-27 03:00:00+00:00,26.82 +2020-06-27 04:00:00+00:00,25.34 +2020-06-27 05:00:00+00:00,27.72 +2020-06-27 06:00:00+00:00,28.44 +2020-06-27 07:00:00+00:00,28.22 +2020-06-27 08:00:00+00:00,27.01 +2020-06-27 09:00:00+00:00,25.0 +2020-06-27 10:00:00+00:00,23.09 +2020-06-27 11:00:00+00:00,21.56 +2020-06-27 12:00:00+00:00,18.29 +2020-06-27 13:00:00+00:00,17.52 +2020-06-27 14:00:00+00:00,18.52 +2020-06-27 15:00:00+00:00,23.05 +2020-06-27 16:00:00+00:00,27.08 +2020-06-27 17:00:00+00:00,30.52 +2020-06-27 18:00:00+00:00,32.28 +2020-06-27 19:00:00+00:00,32.96 +2020-06-27 20:00:00+00:00,32.91 +2020-06-27 21:00:00+00:00,28.09 +2020-06-27 22:00:00+00:00,23.93 +2020-06-27 23:00:00+00:00,21.03 +2020-06-28 00:00:00+00:00,16.9 +2020-06-28 01:00:00+00:00,14.26 +2020-06-28 02:00:00+00:00,15.48 +2020-06-28 03:00:00+00:00,13.02 +2020-06-28 04:00:00+00:00,12.45 +2020-06-28 05:00:00+00:00,12.6 +2020-06-28 06:00:00+00:00,13.36 +2020-06-28 07:00:00+00:00,12.48 +2020-06-28 08:00:00+00:00,14.17 +2020-06-28 09:00:00+00:00,15.9 +2020-06-28 10:00:00+00:00,13.71 +2020-06-28 11:00:00+00:00,1.42 +2020-06-28 12:00:00+00:00,0.08 +2020-06-28 13:00:00+00:00,0.54 +2020-06-28 14:00:00+00:00,2.22 +2020-06-28 15:00:00+00:00,16.9 +2020-06-28 16:00:00+00:00,25.04 +2020-06-28 17:00:00+00:00,29.83 +2020-06-28 18:00:00+00:00,33.04 +2020-06-28 19:00:00+00:00,35.07 +2020-06-28 20:00:00+00:00,36.0 +2020-06-28 21:00:00+00:00,31.14 +2020-06-28 22:00:00+00:00,24.02 +2020-06-28 23:00:00+00:00,19.77 +2020-06-29 00:00:00+00:00,17.76 +2020-06-29 01:00:00+00:00,17.4 +2020-06-29 02:00:00+00:00,17.6 +2020-06-29 03:00:00+00:00,19.88 +2020-06-29 04:00:00+00:00,26.52 +2020-06-29 05:00:00+00:00,35.54 +2020-06-29 06:00:00+00:00,40.45 +2020-06-29 07:00:00+00:00,38.81 +2020-06-29 08:00:00+00:00,35.4 +2020-06-29 09:00:00+00:00,34.03 +2020-06-29 10:00:00+00:00,29.35 +2020-06-29 11:00:00+00:00,22.63 +2020-06-29 12:00:00+00:00,21.98 +2020-06-29 13:00:00+00:00,21.02 +2020-06-29 14:00:00+00:00,19.85 +2020-06-29 15:00:00+00:00,22.84 +2020-06-29 16:00:00+00:00,32.96 +2020-06-29 17:00:00+00:00,34.95 +2020-06-29 18:00:00+00:00,34.9 +2020-06-29 19:00:00+00:00,34.63 +2020-06-29 20:00:00+00:00,33.97 +2020-06-29 21:00:00+00:00,23.28 +2020-06-29 22:00:00+00:00,22.49 +2020-06-29 23:00:00+00:00,21.02 +2020-06-30 00:00:00+00:00,17.53 +2020-06-30 01:00:00+00:00,15.82 +2020-06-30 02:00:00+00:00,16.44 +2020-06-30 03:00:00+00:00,19.1 +2020-06-30 04:00:00+00:00,25.79 +2020-06-30 05:00:00+00:00,31.95 +2020-06-30 06:00:00+00:00,26.14 +2020-06-30 07:00:00+00:00,17.1 +2020-06-30 08:00:00+00:00,2.58 +2020-06-30 09:00:00+00:00,0.39 +2020-06-30 10:00:00+00:00,0.92 +2020-06-30 11:00:00+00:00,-0.08 +2020-06-30 12:00:00+00:00,0.06 +2020-06-30 13:00:00+00:00,1.32 +2020-06-30 14:00:00+00:00,1.44 +2020-06-30 15:00:00+00:00,21.2 +2020-06-30 16:00:00+00:00,27.73 +2020-06-30 17:00:00+00:00,33.38 +2020-06-30 18:00:00+00:00,36.1 +2020-06-30 19:00:00+00:00,35.54 +2020-06-30 20:00:00+00:00,34.94 +2020-06-30 21:00:00+00:00,31.6 +2020-06-30 22:00:00+00:00,23.92 +2020-06-30 23:00:00+00:00,25.04 +2020-07-01 00:00:00+00:00,25.59 +2020-07-01 01:00:00+00:00,25.03 +2020-07-01 02:00:00+00:00,24.78 +2020-07-01 03:00:00+00:00,25.89 +2020-07-01 04:00:00+00:00,33.75 +2020-07-01 05:00:00+00:00,37.97 +2020-07-01 06:00:00+00:00,39.99 +2020-07-01 07:00:00+00:00,38.29 +2020-07-01 08:00:00+00:00,37.06 +2020-07-01 09:00:00+00:00,36.14 +2020-07-01 10:00:00+00:00,28.4 +2020-07-01 11:00:00+00:00,24.16 +2020-07-01 12:00:00+00:00,25.0 +2020-07-01 13:00:00+00:00,26.41 +2020-07-01 14:00:00+00:00,27.17 +2020-07-01 15:00:00+00:00,32.91 +2020-07-01 16:00:00+00:00,37.37 +2020-07-01 17:00:00+00:00,43.29 +2020-07-01 18:00:00+00:00,40.8 +2020-07-01 19:00:00+00:00,40.12 +2020-07-01 20:00:00+00:00,40.25 +2020-07-01 21:00:00+00:00,37.91 +2020-07-01 22:00:00+00:00,34.53 +2020-07-01 23:00:00+00:00,30.91 +2020-07-02 00:00:00+00:00,28.38 +2020-07-02 01:00:00+00:00,27.2 +2020-07-02 02:00:00+00:00,27.9 +2020-07-02 03:00:00+00:00,30.19 +2020-07-02 04:00:00+00:00,38.96 +2020-07-02 05:00:00+00:00,42.84 +2020-07-02 06:00:00+00:00,47.3 +2020-07-02 07:00:00+00:00,41.69 +2020-07-02 08:00:00+00:00,42.14 +2020-07-02 09:00:00+00:00,41.59 +2020-07-02 10:00:00+00:00,39.47 +2020-07-02 11:00:00+00:00,38.33 +2020-07-02 12:00:00+00:00,35.94 +2020-07-02 13:00:00+00:00,35.06 +2020-07-02 14:00:00+00:00,33.92 +2020-07-02 15:00:00+00:00,38.87 +2020-07-02 16:00:00+00:00,46.0 +2020-07-02 17:00:00+00:00,52.65 +2020-07-02 18:00:00+00:00,47.91 +2020-07-02 19:00:00+00:00,45.33 +2020-07-02 20:00:00+00:00,46.0 +2020-07-02 21:00:00+00:00,40.82 +2020-07-02 22:00:00+00:00,40.46 +2020-07-02 23:00:00+00:00,35.0 +2020-07-03 00:00:00+00:00,32.57 +2020-07-03 01:00:00+00:00,31.27 +2020-07-03 02:00:00+00:00,29.9 +2020-07-03 03:00:00+00:00,30.87 +2020-07-03 04:00:00+00:00,39.99 +2020-07-03 05:00:00+00:00,47.1 +2020-07-03 06:00:00+00:00,49.95 +2020-07-03 07:00:00+00:00,43.44 +2020-07-03 08:00:00+00:00,38.95 +2020-07-03 09:00:00+00:00,37.32 +2020-07-03 10:00:00+00:00,34.05 +2020-07-03 11:00:00+00:00,28.88 +2020-07-03 12:00:00+00:00,25.73 +2020-07-03 13:00:00+00:00,24.95 +2020-07-03 14:00:00+00:00,24.22 +2020-07-03 15:00:00+00:00,31.22 +2020-07-03 16:00:00+00:00,37.24 +2020-07-03 17:00:00+00:00,38.82 +2020-07-03 18:00:00+00:00,36.97 +2020-07-03 19:00:00+00:00,35.25 +2020-07-03 20:00:00+00:00,35.02 +2020-07-03 21:00:00+00:00,29.18 +2020-07-03 22:00:00+00:00,27.19 +2020-07-03 23:00:00+00:00,24.28 +2020-07-04 00:00:00+00:00,21.07 +2020-07-04 01:00:00+00:00,14.86 +2020-07-04 02:00:00+00:00,16.15 +2020-07-04 03:00:00+00:00,15.52 +2020-07-04 04:00:00+00:00,14.46 +2020-07-04 05:00:00+00:00,18.56 +2020-07-04 06:00:00+00:00,19.2 +2020-07-04 07:00:00+00:00,2.41 +2020-07-04 08:00:00+00:00,1.47 +2020-07-04 09:00:00+00:00,0.38 +2020-07-04 10:00:00+00:00,0.02 +2020-07-04 11:00:00+00:00,-4.71 +2020-07-04 12:00:00+00:00,0.91 +2020-07-04 13:00:00+00:00,0.29 +2020-07-04 14:00:00+00:00,13.61 +2020-07-04 15:00:00+00:00,21.95 +2020-07-04 16:00:00+00:00,26.81 +2020-07-04 17:00:00+00:00,28.03 +2020-07-04 18:00:00+00:00,27.58 +2020-07-04 19:00:00+00:00,26.73 +2020-07-04 20:00:00+00:00,28.0 +2020-07-04 21:00:00+00:00,24.91 +2020-07-04 22:00:00+00:00,11.21 +2020-07-04 23:00:00+00:00,2.19 +2020-07-05 00:00:00+00:00,0.08 +2020-07-05 01:00:00+00:00,-0.05 +2020-07-05 02:00:00+00:00,-3.82 +2020-07-05 03:00:00+00:00,-13.5 +2020-07-05 04:00:00+00:00,-14.91 +2020-07-05 05:00:00+00:00,-13.45 +2020-07-05 06:00:00+00:00,-13.87 +2020-07-05 07:00:00+00:00,-14.54 +2020-07-05 08:00:00+00:00,-17.01 +2020-07-05 09:00:00+00:00,-26.93 +2020-07-05 10:00:00+00:00,-63.02 +2020-07-05 11:00:00+00:00,-64.55 +2020-07-05 12:00:00+00:00,-64.99 +2020-07-05 13:00:00+00:00,-64.96 +2020-07-05 14:00:00+00:00,-64.59 +2020-07-05 15:00:00+00:00,-36.97 +2020-07-05 16:00:00+00:00,-4.44 +2020-07-05 17:00:00+00:00,1.49 +2020-07-05 18:00:00+00:00,19.17 +2020-07-05 19:00:00+00:00,25.94 +2020-07-05 20:00:00+00:00,30.24 +2020-07-05 21:00:00+00:00,23.19 +2020-07-05 22:00:00+00:00,19.9 +2020-07-05 23:00:00+00:00,7.71 +2020-07-06 00:00:00+00:00,6.45 +2020-07-06 01:00:00+00:00,3.37 +2020-07-06 02:00:00+00:00,3.23 +2020-07-06 03:00:00+00:00,5.61 +2020-07-06 04:00:00+00:00,20.42 +2020-07-06 05:00:00+00:00,28.27 +2020-07-06 06:00:00+00:00,27.97 +2020-07-06 07:00:00+00:00,21.89 +2020-07-06 08:00:00+00:00,1.49 +2020-07-06 09:00:00+00:00,1.1 +2020-07-06 10:00:00+00:00,1.25 +2020-07-06 11:00:00+00:00,0.05 +2020-07-06 12:00:00+00:00,-3.05 +2020-07-06 13:00:00+00:00,-2.97 +2020-07-06 14:00:00+00:00,-0.02 +2020-07-06 15:00:00+00:00,1.23 +2020-07-06 16:00:00+00:00,27.96 +2020-07-06 17:00:00+00:00,34.22 +2020-07-06 18:00:00+00:00,34.51 +2020-07-06 19:00:00+00:00,35.31 +2020-07-06 20:00:00+00:00,35.94 +2020-07-06 21:00:00+00:00,30.94 +2020-07-06 22:00:00+00:00,29.56 +2020-07-06 23:00:00+00:00,28.54 +2020-07-07 00:00:00+00:00,27.96 +2020-07-07 01:00:00+00:00,27.3 +2020-07-07 02:00:00+00:00,27.15 +2020-07-07 03:00:00+00:00,27.9 +2020-07-07 04:00:00+00:00,33.83 +2020-07-07 05:00:00+00:00,37.65 +2020-07-07 06:00:00+00:00,37.2 +2020-07-07 07:00:00+00:00,34.87 +2020-07-07 08:00:00+00:00,30.82 +2020-07-07 09:00:00+00:00,27.99 +2020-07-07 10:00:00+00:00,27.73 +2020-07-07 11:00:00+00:00,24.98 +2020-07-07 12:00:00+00:00,24.36 +2020-07-07 13:00:00+00:00,24.8 +2020-07-07 14:00:00+00:00,25.18 +2020-07-07 15:00:00+00:00,30.48 +2020-07-07 16:00:00+00:00,38.48 +2020-07-07 17:00:00+00:00,40.78 +2020-07-07 18:00:00+00:00,40.84 +2020-07-07 19:00:00+00:00,40.1 +2020-07-07 20:00:00+00:00,40.15 +2020-07-07 21:00:00+00:00,37.0 +2020-07-07 22:00:00+00:00,35.93 +2020-07-07 23:00:00+00:00,31.05 +2020-07-08 00:00:00+00:00,30.41 +2020-07-08 01:00:00+00:00,29.92 +2020-07-08 02:00:00+00:00,29.55 +2020-07-08 03:00:00+00:00,30.3 +2020-07-08 04:00:00+00:00,36.98 +2020-07-08 05:00:00+00:00,43.0 +2020-07-08 06:00:00+00:00,47.0 +2020-07-08 07:00:00+00:00,46.2 +2020-07-08 08:00:00+00:00,44.44 +2020-07-08 09:00:00+00:00,44.71 +2020-07-08 10:00:00+00:00,42.02 +2020-07-08 11:00:00+00:00,41.09 +2020-07-08 12:00:00+00:00,40.2 +2020-07-08 13:00:00+00:00,39.99 +2020-07-08 14:00:00+00:00,40.02 +2020-07-08 15:00:00+00:00,45.05 +2020-07-08 16:00:00+00:00,52.13 +2020-07-08 17:00:00+00:00,55.0 +2020-07-08 18:00:00+00:00,52.92 +2020-07-08 19:00:00+00:00,45.93 +2020-07-08 20:00:00+00:00,45.49 +2020-07-08 21:00:00+00:00,39.8 +2020-07-08 22:00:00+00:00,39.94 +2020-07-08 23:00:00+00:00,35.41 +2020-07-09 00:00:00+00:00,32.66 +2020-07-09 01:00:00+00:00,30.21 +2020-07-09 02:00:00+00:00,30.99 +2020-07-09 03:00:00+00:00,34.37 +2020-07-09 04:00:00+00:00,42.9 +2020-07-09 05:00:00+00:00,51.96 +2020-07-09 06:00:00+00:00,51.18 +2020-07-09 07:00:00+00:00,47.94 +2020-07-09 08:00:00+00:00,47.08 +2020-07-09 09:00:00+00:00,47.56 +2020-07-09 10:00:00+00:00,43.23 +2020-07-09 11:00:00+00:00,42.53 +2020-07-09 12:00:00+00:00,40.11 +2020-07-09 13:00:00+00:00,39.84 +2020-07-09 14:00:00+00:00,39.9 +2020-07-09 15:00:00+00:00,41.78 +2020-07-09 16:00:00+00:00,46.35 +2020-07-09 17:00:00+00:00,51.7 +2020-07-09 18:00:00+00:00,47.39 +2020-07-09 19:00:00+00:00,46.22 +2020-07-09 20:00:00+00:00,45.0 +2020-07-09 21:00:00+00:00,41.91 +2020-07-09 22:00:00+00:00,39.0 +2020-07-09 23:00:00+00:00,34.62 +2020-07-10 00:00:00+00:00,32.94 +2020-07-10 01:00:00+00:00,31.1 +2020-07-10 02:00:00+00:00,30.52 +2020-07-10 03:00:00+00:00,32.05 +2020-07-10 04:00:00+00:00,38.06 +2020-07-10 05:00:00+00:00,41.01 +2020-07-10 06:00:00+00:00,42.94 +2020-07-10 07:00:00+00:00,42.54 +2020-07-10 08:00:00+00:00,41.76 +2020-07-10 09:00:00+00:00,39.95 +2020-07-10 10:00:00+00:00,38.43 +2020-07-10 11:00:00+00:00,35.41 +2020-07-10 12:00:00+00:00,29.54 +2020-07-10 13:00:00+00:00,29.17 +2020-07-10 14:00:00+00:00,30.5 +2020-07-10 15:00:00+00:00,35.34 +2020-07-10 16:00:00+00:00,38.98 +2020-07-10 17:00:00+00:00,40.79 +2020-07-10 18:00:00+00:00,39.0 +2020-07-10 19:00:00+00:00,39.0 +2020-07-10 20:00:00+00:00,39.4 +2020-07-10 21:00:00+00:00,36.8 +2020-07-10 22:00:00+00:00,34.57 +2020-07-10 23:00:00+00:00,29.9 +2020-07-11 00:00:00+00:00,28.37 +2020-07-11 01:00:00+00:00,27.13 +2020-07-11 02:00:00+00:00,26.1 +2020-07-11 03:00:00+00:00,26.1 +2020-07-11 04:00:00+00:00,26.09 +2020-07-11 05:00:00+00:00,27.58 +2020-07-11 06:00:00+00:00,29.09 +2020-07-11 07:00:00+00:00,28.34 +2020-07-11 08:00:00+00:00,26.53 +2020-07-11 09:00:00+00:00,25.62 +2020-07-11 10:00:00+00:00,23.29 +2020-07-11 11:00:00+00:00,22.86 +2020-07-11 12:00:00+00:00,22.42 +2020-07-11 13:00:00+00:00,22.45 +2020-07-11 14:00:00+00:00,24.36 +2020-07-11 15:00:00+00:00,26.5 +2020-07-11 16:00:00+00:00,30.42 +2020-07-11 17:00:00+00:00,34.0 +2020-07-11 18:00:00+00:00,36.15 +2020-07-11 19:00:00+00:00,35.91 +2020-07-11 20:00:00+00:00,37.73 +2020-07-11 21:00:00+00:00,34.92 +2020-07-11 22:00:00+00:00,32.97 +2020-07-11 23:00:00+00:00,28.82 +2020-07-12 00:00:00+00:00,26.19 +2020-07-12 01:00:00+00:00,25.03 +2020-07-12 02:00:00+00:00,24.7 +2020-07-12 03:00:00+00:00,24.1 +2020-07-12 04:00:00+00:00,21.68 +2020-07-12 05:00:00+00:00,22.69 +2020-07-12 06:00:00+00:00,18.41 +2020-07-12 07:00:00+00:00,18.04 +2020-07-12 08:00:00+00:00,16.97 +2020-07-12 09:00:00+00:00,16.07 +2020-07-12 10:00:00+00:00,18.62 +2020-07-12 11:00:00+00:00,16.04 +2020-07-12 12:00:00+00:00,14.76 +2020-07-12 13:00:00+00:00,15.8 +2020-07-12 14:00:00+00:00,18.11 +2020-07-12 15:00:00+00:00,23.97 +2020-07-12 16:00:00+00:00,29.05 +2020-07-12 17:00:00+00:00,35.12 +2020-07-12 18:00:00+00:00,37.33 +2020-07-12 19:00:00+00:00,37.74 +2020-07-12 20:00:00+00:00,37.08 +2020-07-12 21:00:00+00:00,35.23 +2020-07-12 22:00:00+00:00,29.48 +2020-07-12 23:00:00+00:00,26.03 +2020-07-13 00:00:00+00:00,25.07 +2020-07-13 01:00:00+00:00,25.07 +2020-07-13 02:00:00+00:00,25.1 +2020-07-13 03:00:00+00:00,26.51 +2020-07-13 04:00:00+00:00,34.64 +2020-07-13 05:00:00+00:00,39.52 +2020-07-13 06:00:00+00:00,38.12 +2020-07-13 07:00:00+00:00,37.68 +2020-07-13 08:00:00+00:00,34.05 +2020-07-13 09:00:00+00:00,32.6 +2020-07-13 10:00:00+00:00,31.13 +2020-07-13 11:00:00+00:00,31.18 +2020-07-13 12:00:00+00:00,32.08 +2020-07-13 13:00:00+00:00,31.96 +2020-07-13 14:00:00+00:00,34.0 +2020-07-13 15:00:00+00:00,37.41 +2020-07-13 16:00:00+00:00,40.8 +2020-07-13 17:00:00+00:00,45.56 +2020-07-13 18:00:00+00:00,44.76 +2020-07-13 19:00:00+00:00,41.01 +2020-07-13 20:00:00+00:00,40.05 +2020-07-13 21:00:00+00:00,36.4 +2020-07-13 22:00:00+00:00,34.47 +2020-07-13 23:00:00+00:00,29.79 +2020-07-14 00:00:00+00:00,28.16 +2020-07-14 01:00:00+00:00,27.74 +2020-07-14 02:00:00+00:00,27.45 +2020-07-14 03:00:00+00:00,28.52 +2020-07-14 04:00:00+00:00,35.85 +2020-07-14 05:00:00+00:00,42.91 +2020-07-14 06:00:00+00:00,38.93 +2020-07-14 07:00:00+00:00,36.63 +2020-07-14 08:00:00+00:00,36.15 +2020-07-14 09:00:00+00:00,36.52 +2020-07-14 10:00:00+00:00,36.98 +2020-07-14 11:00:00+00:00,36.97 +2020-07-14 12:00:00+00:00,36.57 +2020-07-14 13:00:00+00:00,36.57 +2020-07-14 14:00:00+00:00,36.2 +2020-07-14 15:00:00+00:00,38.69 +2020-07-14 16:00:00+00:00,44.74 +2020-07-14 17:00:00+00:00,45.97 +2020-07-14 18:00:00+00:00,46.08 +2020-07-14 19:00:00+00:00,41.98 +2020-07-14 20:00:00+00:00,41.43 +2020-07-14 21:00:00+00:00,37.02 +2020-07-14 22:00:00+00:00,35.55 +2020-07-14 23:00:00+00:00,30.22 +2020-07-15 00:00:00+00:00,30.0 +2020-07-15 01:00:00+00:00,28.73 +2020-07-15 02:00:00+00:00,29.84 +2020-07-15 03:00:00+00:00,34.29 +2020-07-15 04:00:00+00:00,41.05 +2020-07-15 05:00:00+00:00,48.91 +2020-07-15 06:00:00+00:00,57.38 +2020-07-15 07:00:00+00:00,47.9 +2020-07-15 08:00:00+00:00,46.07 +2020-07-15 09:00:00+00:00,50.95 +2020-07-15 10:00:00+00:00,46.57 +2020-07-15 11:00:00+00:00,43.78 +2020-07-15 12:00:00+00:00,41.86 +2020-07-15 13:00:00+00:00,42.1 +2020-07-15 14:00:00+00:00,39.4 +2020-07-15 15:00:00+00:00,41.71 +2020-07-15 16:00:00+00:00,44.52 +2020-07-15 17:00:00+00:00,47.3 +2020-07-15 18:00:00+00:00,44.93 +2020-07-15 19:00:00+00:00,47.91 +2020-07-15 20:00:00+00:00,42.09 +2020-07-15 21:00:00+00:00,37.97 +2020-07-15 22:00:00+00:00,36.19 +2020-07-15 23:00:00+00:00,32.5 +2020-07-16 00:00:00+00:00,29.84 +2020-07-16 01:00:00+00:00,28.97 +2020-07-16 02:00:00+00:00,29.35 +2020-07-16 03:00:00+00:00,32.29 +2020-07-16 04:00:00+00:00,43.11 +2020-07-16 05:00:00+00:00,50.9 +2020-07-16 06:00:00+00:00,56.18 +2020-07-16 07:00:00+00:00,58.04 +2020-07-16 08:00:00+00:00,58.7 +2020-07-16 09:00:00+00:00,53.87 +2020-07-16 10:00:00+00:00,52.62 +2020-07-16 11:00:00+00:00,44.41 +2020-07-16 12:00:00+00:00,43.93 +2020-07-16 13:00:00+00:00,44.03 +2020-07-16 14:00:00+00:00,44.01 +2020-07-16 15:00:00+00:00,48.92 +2020-07-16 16:00:00+00:00,45.38 +2020-07-16 17:00:00+00:00,48.39 +2020-07-16 18:00:00+00:00,54.56 +2020-07-16 19:00:00+00:00,48.43 +2020-07-16 20:00:00+00:00,43.1 +2020-07-16 21:00:00+00:00,38.98 +2020-07-16 22:00:00+00:00,36.48 +2020-07-16 23:00:00+00:00,31.85 +2020-07-17 00:00:00+00:00,29.51 +2020-07-17 01:00:00+00:00,28.68 +2020-07-17 02:00:00+00:00,28.76 +2020-07-17 03:00:00+00:00,32.55 +2020-07-17 04:00:00+00:00,40.32 +2020-07-17 05:00:00+00:00,42.74 +2020-07-17 06:00:00+00:00,44.86 +2020-07-17 07:00:00+00:00,42.72 +2020-07-17 08:00:00+00:00,41.25 +2020-07-17 09:00:00+00:00,41.38 +2020-07-17 10:00:00+00:00,39.01 +2020-07-17 11:00:00+00:00,37.0 +2020-07-17 12:00:00+00:00,35.74 +2020-07-17 13:00:00+00:00,34.55 +2020-07-17 14:00:00+00:00,34.95 +2020-07-17 15:00:00+00:00,38.0 +2020-07-17 16:00:00+00:00,41.01 +2020-07-17 17:00:00+00:00,42.01 +2020-07-17 18:00:00+00:00,41.24 +2020-07-17 19:00:00+00:00,40.31 +2020-07-17 20:00:00+00:00,40.93 +2020-07-17 21:00:00+00:00,36.0 +2020-07-17 22:00:00+00:00,34.57 +2020-07-17 23:00:00+00:00,30.54 +2020-07-18 00:00:00+00:00,27.62 +2020-07-18 01:00:00+00:00,26.63 +2020-07-18 02:00:00+00:00,26.0 +2020-07-18 03:00:00+00:00,25.75 +2020-07-18 04:00:00+00:00,25.8 +2020-07-18 05:00:00+00:00,26.5 +2020-07-18 06:00:00+00:00,26.87 +2020-07-18 07:00:00+00:00,26.73 +2020-07-18 08:00:00+00:00,24.35 +2020-07-18 09:00:00+00:00,23.99 +2020-07-18 10:00:00+00:00,23.09 +2020-07-18 11:00:00+00:00,23.15 +2020-07-18 12:00:00+00:00,23.12 +2020-07-18 13:00:00+00:00,23.47 +2020-07-18 14:00:00+00:00,24.15 +2020-07-18 15:00:00+00:00,28.0 +2020-07-18 16:00:00+00:00,34.65 +2020-07-18 17:00:00+00:00,37.95 +2020-07-18 18:00:00+00:00,38.41 +2020-07-18 19:00:00+00:00,38.0 +2020-07-18 20:00:00+00:00,39.45 +2020-07-18 21:00:00+00:00,37.52 +2020-07-18 22:00:00+00:00,34.0 +2020-07-18 23:00:00+00:00,28.36 +2020-07-19 00:00:00+00:00,27.0 +2020-07-19 01:00:00+00:00,25.4 +2020-07-19 02:00:00+00:00,25.02 +2020-07-19 03:00:00+00:00,25.06 +2020-07-19 04:00:00+00:00,24.94 +2020-07-19 05:00:00+00:00,25.4 +2020-07-19 06:00:00+00:00,24.91 +2020-07-19 07:00:00+00:00,24.93 +2020-07-19 08:00:00+00:00,24.72 +2020-07-19 09:00:00+00:00,23.49 +2020-07-19 10:00:00+00:00,23.19 +2020-07-19 11:00:00+00:00,21.13 +2020-07-19 12:00:00+00:00,19.46 +2020-07-19 13:00:00+00:00,20.89 +2020-07-19 14:00:00+00:00,24.28 +2020-07-19 15:00:00+00:00,26.0 +2020-07-19 16:00:00+00:00,29.8 +2020-07-19 17:00:00+00:00,36.15 +2020-07-19 18:00:00+00:00,37.52 +2020-07-19 19:00:00+00:00,37.95 +2020-07-19 20:00:00+00:00,40.42 +2020-07-19 21:00:00+00:00,36.69 +2020-07-19 22:00:00+00:00,32.02 +2020-07-19 23:00:00+00:00,27.57 +2020-07-20 00:00:00+00:00,26.4 +2020-07-20 01:00:00+00:00,26.01 +2020-07-20 02:00:00+00:00,26.0 +2020-07-20 03:00:00+00:00,27.09 +2020-07-20 04:00:00+00:00,35.24 +2020-07-20 05:00:00+00:00,38.29 +2020-07-20 06:00:00+00:00,39.0 +2020-07-20 07:00:00+00:00,38.22 +2020-07-20 08:00:00+00:00,36.35 +2020-07-20 09:00:00+00:00,30.95 +2020-07-20 10:00:00+00:00,26.34 +2020-07-20 11:00:00+00:00,25.15 +2020-07-20 12:00:00+00:00,25.36 +2020-07-20 13:00:00+00:00,25.11 +2020-07-20 14:00:00+00:00,25.93 +2020-07-20 15:00:00+00:00,35.19 +2020-07-20 16:00:00+00:00,39.12 +2020-07-20 17:00:00+00:00,42.54 +2020-07-20 18:00:00+00:00,43.9 +2020-07-20 19:00:00+00:00,41.0 +2020-07-20 20:00:00+00:00,40.8 +2020-07-20 21:00:00+00:00,37.52 +2020-07-20 22:00:00+00:00,31.38 +2020-07-20 23:00:00+00:00,27.85 +2020-07-21 00:00:00+00:00,27.08 +2020-07-21 01:00:00+00:00,26.95 +2020-07-21 02:00:00+00:00,26.98 +2020-07-21 03:00:00+00:00,28.05 +2020-07-21 04:00:00+00:00,35.84 +2020-07-21 05:00:00+00:00,39.07 +2020-07-21 06:00:00+00:00,38.28 +2020-07-21 07:00:00+00:00,35.25 +2020-07-21 08:00:00+00:00,27.79 +2020-07-21 09:00:00+00:00,26.17 +2020-07-21 10:00:00+00:00,26.31 +2020-07-21 11:00:00+00:00,24.83 +2020-07-21 12:00:00+00:00,22.46 +2020-07-21 13:00:00+00:00,24.01 +2020-07-21 14:00:00+00:00,24.75 +2020-07-21 15:00:00+00:00,28.1 +2020-07-21 16:00:00+00:00,38.07 +2020-07-21 17:00:00+00:00,42.74 +2020-07-21 18:00:00+00:00,41.82 +2020-07-21 19:00:00+00:00,40.48 +2020-07-21 20:00:00+00:00,39.73 +2020-07-21 21:00:00+00:00,37.13 +2020-07-21 22:00:00+00:00,33.31 +2020-07-21 23:00:00+00:00,28.39 +2020-07-22 00:00:00+00:00,27.17 +2020-07-22 01:00:00+00:00,26.07 +2020-07-22 02:00:00+00:00,26.05 +2020-07-22 03:00:00+00:00,28.78 +2020-07-22 04:00:00+00:00,35.18 +2020-07-22 05:00:00+00:00,39.4 +2020-07-22 06:00:00+00:00,38.9 +2020-07-22 07:00:00+00:00,37.67 +2020-07-22 08:00:00+00:00,34.44 +2020-07-22 09:00:00+00:00,32.31 +2020-07-22 10:00:00+00:00,29.5 +2020-07-22 11:00:00+00:00,28.19 +2020-07-22 12:00:00+00:00,26.97 +2020-07-22 13:00:00+00:00,29.0 +2020-07-22 14:00:00+00:00,31.69 +2020-07-22 15:00:00+00:00,38.83 +2020-07-22 16:00:00+00:00,42.19 +2020-07-22 17:00:00+00:00,45.74 +2020-07-22 18:00:00+00:00,44.34 +2020-07-22 19:00:00+00:00,41.96 +2020-07-22 20:00:00+00:00,41.44 +2020-07-22 21:00:00+00:00,36.92 +2020-07-22 22:00:00+00:00,35.96 +2020-07-22 23:00:00+00:00,32.98 +2020-07-23 00:00:00+00:00,31.0 +2020-07-23 01:00:00+00:00,29.86 +2020-07-23 02:00:00+00:00,28.99 +2020-07-23 03:00:00+00:00,31.43 +2020-07-23 04:00:00+00:00,40.33 +2020-07-23 05:00:00+00:00,45.89 +2020-07-23 06:00:00+00:00,39.36 +2020-07-23 07:00:00+00:00,37.67 +2020-07-23 08:00:00+00:00,33.92 +2020-07-23 09:00:00+00:00,30.97 +2020-07-23 10:00:00+00:00,30.76 +2020-07-23 11:00:00+00:00,29.25 +2020-07-23 12:00:00+00:00,27.9 +2020-07-23 13:00:00+00:00,29.69 +2020-07-23 14:00:00+00:00,30.93 +2020-07-23 15:00:00+00:00,38.19 +2020-07-23 16:00:00+00:00,42.86 +2020-07-23 17:00:00+00:00,45.23 +2020-07-23 18:00:00+00:00,43.64 +2020-07-23 19:00:00+00:00,42.09 +2020-07-23 20:00:00+00:00,41.69 +2020-07-23 21:00:00+00:00,36.0 +2020-07-23 22:00:00+00:00,31.72 +2020-07-23 23:00:00+00:00,25.63 +2020-07-24 00:00:00+00:00,23.94 +2020-07-24 01:00:00+00:00,22.94 +2020-07-24 02:00:00+00:00,23.71 +2020-07-24 03:00:00+00:00,25.22 +2020-07-24 04:00:00+00:00,30.09 +2020-07-24 05:00:00+00:00,36.14 +2020-07-24 06:00:00+00:00,39.95 +2020-07-24 07:00:00+00:00,39.83 +2020-07-24 08:00:00+00:00,40.02 +2020-07-24 09:00:00+00:00,36.04 +2020-07-24 10:00:00+00:00,29.57 +2020-07-24 11:00:00+00:00,26.04 +2020-07-24 12:00:00+00:00,24.2 +2020-07-24 13:00:00+00:00,24.01 +2020-07-24 14:00:00+00:00,23.85 +2020-07-24 15:00:00+00:00,26.75 +2020-07-24 16:00:00+00:00,34.97 +2020-07-24 17:00:00+00:00,36.85 +2020-07-24 18:00:00+00:00,37.39 +2020-07-24 19:00:00+00:00,37.04 +2020-07-24 20:00:00+00:00,37.17 +2020-07-24 21:00:00+00:00,34.65 +2020-07-24 22:00:00+00:00,34.63 +2020-07-24 23:00:00+00:00,30.56 +2020-07-25 00:00:00+00:00,27.93 +2020-07-25 01:00:00+00:00,26.03 +2020-07-25 02:00:00+00:00,25.0 +2020-07-25 03:00:00+00:00,25.04 +2020-07-25 04:00:00+00:00,25.18 +2020-07-25 05:00:00+00:00,26.3 +2020-07-25 06:00:00+00:00,26.29 +2020-07-25 07:00:00+00:00,23.95 +2020-07-25 08:00:00+00:00,20.82 +2020-07-25 09:00:00+00:00,20.18 +2020-07-25 10:00:00+00:00,20.26 +2020-07-25 11:00:00+00:00,17.87 +2020-07-25 12:00:00+00:00,18.15 +2020-07-25 13:00:00+00:00,18.45 +2020-07-25 14:00:00+00:00,19.78 +2020-07-25 15:00:00+00:00,25.01 +2020-07-25 16:00:00+00:00,28.9 +2020-07-25 17:00:00+00:00,31.58 +2020-07-25 18:00:00+00:00,32.22 +2020-07-25 19:00:00+00:00,31.41 +2020-07-25 20:00:00+00:00,32.15 +2020-07-25 21:00:00+00:00,24.79 +2020-07-25 22:00:00+00:00,23.78 +2020-07-25 23:00:00+00:00,21.6 +2020-07-26 00:00:00+00:00,20.37 +2020-07-26 01:00:00+00:00,17.0 +2020-07-26 02:00:00+00:00,17.1 +2020-07-26 03:00:00+00:00,16.5 +2020-07-26 04:00:00+00:00,10.7 +2020-07-26 05:00:00+00:00,10.82 +2020-07-26 06:00:00+00:00,15.88 +2020-07-26 07:00:00+00:00,17.83 +2020-07-26 08:00:00+00:00,9.9 +2020-07-26 09:00:00+00:00,2.0 +2020-07-26 10:00:00+00:00,0.84 +2020-07-26 11:00:00+00:00,-5.82 +2020-07-26 12:00:00+00:00,-44.97 +2020-07-26 13:00:00+00:00,-21.35 +2020-07-26 14:00:00+00:00,-2.07 +2020-07-26 15:00:00+00:00,6.77 +2020-07-26 16:00:00+00:00,22.64 +2020-07-26 17:00:00+00:00,25.64 +2020-07-26 18:00:00+00:00,32.7 +2020-07-26 19:00:00+00:00,35.69 +2020-07-26 20:00:00+00:00,36.97 +2020-07-26 21:00:00+00:00,32.04 +2020-07-26 22:00:00+00:00,32.43 +2020-07-26 23:00:00+00:00,27.11 +2020-07-27 00:00:00+00:00,25.8 +2020-07-27 01:00:00+00:00,24.15 +2020-07-27 02:00:00+00:00,24.02 +2020-07-27 03:00:00+00:00,27.18 +2020-07-27 04:00:00+00:00,35.86 +2020-07-27 05:00:00+00:00,37.95 +2020-07-27 06:00:00+00:00,38.59 +2020-07-27 07:00:00+00:00,38.72 +2020-07-27 08:00:00+00:00,36.87 +2020-07-27 09:00:00+00:00,35.45 +2020-07-27 10:00:00+00:00,32.18 +2020-07-27 11:00:00+00:00,30.54 +2020-07-27 12:00:00+00:00,30.46 +2020-07-27 13:00:00+00:00,31.4 +2020-07-27 14:00:00+00:00,34.44 +2020-07-27 15:00:00+00:00,36.44 +2020-07-27 16:00:00+00:00,38.5 +2020-07-27 17:00:00+00:00,39.36 +2020-07-27 18:00:00+00:00,37.98 +2020-07-27 19:00:00+00:00,33.72 +2020-07-27 20:00:00+00:00,29.0 +2020-07-27 21:00:00+00:00,22.62 +2020-07-27 22:00:00+00:00,18.71 +2020-07-27 23:00:00+00:00,19.16 +2020-07-28 00:00:00+00:00,16.82 +2020-07-28 01:00:00+00:00,17.2 +2020-07-28 02:00:00+00:00,17.11 +2020-07-28 03:00:00+00:00,21.56 +2020-07-28 04:00:00+00:00,26.21 +2020-07-28 05:00:00+00:00,32.17 +2020-07-28 06:00:00+00:00,33.32 +2020-07-28 07:00:00+00:00,28.62 +2020-07-28 08:00:00+00:00,23.58 +2020-07-28 09:00:00+00:00,21.04 +2020-07-28 10:00:00+00:00,14.93 +2020-07-28 11:00:00+00:00,0.04 +2020-07-28 12:00:00+00:00,1.48 +2020-07-28 13:00:00+00:00,8.13 +2020-07-28 14:00:00+00:00,21.03 +2020-07-28 15:00:00+00:00,27.09 +2020-07-28 16:00:00+00:00,30.96 +2020-07-28 17:00:00+00:00,35.78 +2020-07-28 18:00:00+00:00,36.94 +2020-07-28 19:00:00+00:00,36.78 +2020-07-28 20:00:00+00:00,34.21 +2020-07-28 21:00:00+00:00,28.31 +2020-07-28 22:00:00+00:00,26.16 +2020-07-28 23:00:00+00:00,23.5 +2020-07-29 00:00:00+00:00,23.53 +2020-07-29 01:00:00+00:00,22.38 +2020-07-29 02:00:00+00:00,22.79 +2020-07-29 03:00:00+00:00,23.65 +2020-07-29 04:00:00+00:00,29.78 +2020-07-29 05:00:00+00:00,32.85 +2020-07-29 06:00:00+00:00,30.59 +2020-07-29 07:00:00+00:00,22.73 +2020-07-29 08:00:00+00:00,10.65 +2020-07-29 09:00:00+00:00,3.56 +2020-07-29 10:00:00+00:00,0.13 +2020-07-29 11:00:00+00:00,0.07 +2020-07-29 12:00:00+00:00,0.07 +2020-07-29 13:00:00+00:00,0.08 +2020-07-29 14:00:00+00:00,3.77 +2020-07-29 15:00:00+00:00,14.74 +2020-07-29 16:00:00+00:00,26.74 +2020-07-29 17:00:00+00:00,34.02 +2020-07-29 18:00:00+00:00,35.61 +2020-07-29 19:00:00+00:00,39.38 +2020-07-29 20:00:00+00:00,37.0 +2020-07-29 21:00:00+00:00,34.2 +2020-07-29 22:00:00+00:00,35.97 +2020-07-29 23:00:00+00:00,31.78 +2020-07-30 00:00:00+00:00,29.1 +2020-07-30 01:00:00+00:00,27.17 +2020-07-30 02:00:00+00:00,26.99 +2020-07-30 03:00:00+00:00,29.2 +2020-07-30 04:00:00+00:00,36.91 +2020-07-30 05:00:00+00:00,39.64 +2020-07-30 06:00:00+00:00,39.53 +2020-07-30 07:00:00+00:00,34.88 +2020-07-30 08:00:00+00:00,28.14 +2020-07-30 09:00:00+00:00,27.49 +2020-07-30 10:00:00+00:00,26.73 +2020-07-30 11:00:00+00:00,25.55 +2020-07-30 12:00:00+00:00,26.2 +2020-07-30 13:00:00+00:00,27.59 +2020-07-30 14:00:00+00:00,30.25 +2020-07-30 15:00:00+00:00,38.08 +2020-07-30 16:00:00+00:00,44.94 +2020-07-30 17:00:00+00:00,60.02 +2020-07-30 18:00:00+00:00,56.8 +2020-07-30 19:00:00+00:00,49.83 +2020-07-30 20:00:00+00:00,44.54 +2020-07-30 21:00:00+00:00,37.53 +2020-07-30 22:00:00+00:00,38.8 +2020-07-30 23:00:00+00:00,33.9 +2020-07-31 00:00:00+00:00,31.99 +2020-07-31 01:00:00+00:00,31.58 +2020-07-31 02:00:00+00:00,31.74 +2020-07-31 03:00:00+00:00,34.28 +2020-07-31 04:00:00+00:00,40.56 +2020-07-31 05:00:00+00:00,43.69 +2020-07-31 06:00:00+00:00,44.91 +2020-07-31 07:00:00+00:00,41.65 +2020-07-31 08:00:00+00:00,38.25 +2020-07-31 09:00:00+00:00,32.7 +2020-07-31 10:00:00+00:00,30.49 +2020-07-31 11:00:00+00:00,29.49 +2020-07-31 12:00:00+00:00,30.09 +2020-07-31 13:00:00+00:00,31.95 +2020-07-31 14:00:00+00:00,33.49 +2020-07-31 15:00:00+00:00,42.66 +2020-07-31 16:00:00+00:00,49.08 +2020-07-31 17:00:00+00:00,54.4 +2020-07-31 18:00:00+00:00,49.12 +2020-07-31 19:00:00+00:00,46.15 +2020-07-31 20:00:00+00:00,44.97 +2020-07-31 21:00:00+00:00,38.27 +2020-07-31 22:00:00+00:00,33.35 +2020-07-31 23:00:00+00:00,24.6 +2020-08-01 00:00:00+00:00,24.28 +2020-08-01 01:00:00+00:00,23.99 +2020-08-01 02:00:00+00:00,23.67 +2020-08-01 03:00:00+00:00,24.0 +2020-08-01 04:00:00+00:00,25.0 +2020-08-01 05:00:00+00:00,27.59 +2020-08-01 06:00:00+00:00,25.93 +2020-08-01 07:00:00+00:00,24.4 +2020-08-01 08:00:00+00:00,23.22 +2020-08-01 09:00:00+00:00,23.37 +2020-08-01 10:00:00+00:00,24.31 +2020-08-01 11:00:00+00:00,23.74 +2020-08-01 12:00:00+00:00,23.11 +2020-08-01 13:00:00+00:00,23.16 +2020-08-01 14:00:00+00:00,25.54 +2020-08-01 15:00:00+00:00,32.62 +2020-08-01 16:00:00+00:00,38.43 +2020-08-01 17:00:00+00:00,38.78 +2020-08-01 18:00:00+00:00,38.37 +2020-08-01 19:00:00+00:00,37.78 +2020-08-01 20:00:00+00:00,37.12 +2020-08-01 21:00:00+00:00,33.09 +2020-08-01 22:00:00+00:00,30.67 +2020-08-01 23:00:00+00:00,27.82 +2020-08-02 00:00:00+00:00,26.97 +2020-08-02 01:00:00+00:00,25.25 +2020-08-02 02:00:00+00:00,25.23 +2020-08-02 03:00:00+00:00,25.09 +2020-08-02 04:00:00+00:00,25.46 +2020-08-02 05:00:00+00:00,26.09 +2020-08-02 06:00:00+00:00,27.4 +2020-08-02 07:00:00+00:00,27.62 +2020-08-02 08:00:00+00:00,27.63 +2020-08-02 09:00:00+00:00,28.01 +2020-08-02 10:00:00+00:00,26.42 +2020-08-02 11:00:00+00:00,24.76 +2020-08-02 12:00:00+00:00,24.53 +2020-08-02 13:00:00+00:00,24.16 +2020-08-02 14:00:00+00:00,23.86 +2020-08-02 15:00:00+00:00,25.56 +2020-08-02 16:00:00+00:00,28.08 +2020-08-02 17:00:00+00:00,33.0 +2020-08-02 18:00:00+00:00,34.14 +2020-08-02 19:00:00+00:00,35.49 +2020-08-02 20:00:00+00:00,38.43 +2020-08-02 21:00:00+00:00,34.89 +2020-08-02 22:00:00+00:00,27.93 +2020-08-02 23:00:00+00:00,26.19 +2020-08-03 00:00:00+00:00,26.05 +2020-08-03 01:00:00+00:00,25.06 +2020-08-03 02:00:00+00:00,26.03 +2020-08-03 03:00:00+00:00,27.93 +2020-08-03 04:00:00+00:00,34.12 +2020-08-03 05:00:00+00:00,39.97 +2020-08-03 06:00:00+00:00,41.0 +2020-08-03 07:00:00+00:00,42.65 +2020-08-03 08:00:00+00:00,41.2 +2020-08-03 09:00:00+00:00,41.16 +2020-08-03 10:00:00+00:00,40.97 +2020-08-03 11:00:00+00:00,39.9 +2020-08-03 12:00:00+00:00,38.24 +2020-08-03 13:00:00+00:00,37.18 +2020-08-03 14:00:00+00:00,37.75 +2020-08-03 15:00:00+00:00,39.67 +2020-08-03 16:00:00+00:00,42.1 +2020-08-03 17:00:00+00:00,45.45 +2020-08-03 18:00:00+00:00,43.06 +2020-08-03 19:00:00+00:00,41.7 +2020-08-03 20:00:00+00:00,40.9 +2020-08-03 21:00:00+00:00,35.05 +2020-08-03 22:00:00+00:00,31.6 +2020-08-03 23:00:00+00:00,28.02 +2020-08-04 00:00:00+00:00,26.14 +2020-08-04 01:00:00+00:00,23.66 +2020-08-04 02:00:00+00:00,23.67 +2020-08-04 03:00:00+00:00,28.35 +2020-08-04 04:00:00+00:00,33.0 +2020-08-04 05:00:00+00:00,46.97 +2020-08-04 06:00:00+00:00,49.67 +2020-08-04 07:00:00+00:00,46.45 +2020-08-04 08:00:00+00:00,41.35 +2020-08-04 09:00:00+00:00,40.33 +2020-08-04 10:00:00+00:00,39.38 +2020-08-04 11:00:00+00:00,36.98 +2020-08-04 12:00:00+00:00,35.22 +2020-08-04 13:00:00+00:00,34.81 +2020-08-04 14:00:00+00:00,33.96 +2020-08-04 15:00:00+00:00,37.77 +2020-08-04 16:00:00+00:00,40.41 +2020-08-04 17:00:00+00:00,41.32 +2020-08-04 18:00:00+00:00,41.2 +2020-08-04 19:00:00+00:00,41.16 +2020-08-04 20:00:00+00:00,39.83 +2020-08-04 21:00:00+00:00,32.2 +2020-08-04 22:00:00+00:00,32.27 +2020-08-04 23:00:00+00:00,26.29 +2020-08-05 00:00:00+00:00,23.51 +2020-08-05 01:00:00+00:00,19.64 +2020-08-05 02:00:00+00:00,19.5 +2020-08-05 03:00:00+00:00,22.85 +2020-08-05 04:00:00+00:00,28.77 +2020-08-05 05:00:00+00:00,33.08 +2020-08-05 06:00:00+00:00,32.67 +2020-08-05 07:00:00+00:00,26.18 +2020-08-05 08:00:00+00:00,24.58 +2020-08-05 09:00:00+00:00,23.91 +2020-08-05 10:00:00+00:00,23.72 +2020-08-05 11:00:00+00:00,23.52 +2020-08-05 12:00:00+00:00,23.79 +2020-08-05 13:00:00+00:00,23.32 +2020-08-05 14:00:00+00:00,24.83 +2020-08-05 15:00:00+00:00,31.22 +2020-08-05 16:00:00+00:00,39.6 +2020-08-05 17:00:00+00:00,43.04 +2020-08-05 18:00:00+00:00,43.4 +2020-08-05 19:00:00+00:00,42.43 +2020-08-05 20:00:00+00:00,41.69 +2020-08-05 21:00:00+00:00,36.71 +2020-08-05 22:00:00+00:00,26.9 +2020-08-05 23:00:00+00:00,23.99 +2020-08-06 00:00:00+00:00,23.12 +2020-08-06 01:00:00+00:00,23.02 +2020-08-06 02:00:00+00:00,24.06 +2020-08-06 03:00:00+00:00,27.09 +2020-08-06 04:00:00+00:00,32.0 +2020-08-06 05:00:00+00:00,36.96 +2020-08-06 06:00:00+00:00,38.45 +2020-08-06 07:00:00+00:00,37.55 +2020-08-06 08:00:00+00:00,28.75 +2020-08-06 09:00:00+00:00,26.63 +2020-08-06 10:00:00+00:00,26.93 +2020-08-06 11:00:00+00:00,26.62 +2020-08-06 12:00:00+00:00,26.6 +2020-08-06 13:00:00+00:00,27.41 +2020-08-06 14:00:00+00:00,34.74 +2020-08-06 15:00:00+00:00,39.69 +2020-08-06 16:00:00+00:00,43.39 +2020-08-06 17:00:00+00:00,53.96 +2020-08-06 18:00:00+00:00,49.22 +2020-08-06 19:00:00+00:00,43.39 +2020-08-06 20:00:00+00:00,41.39 +2020-08-06 21:00:00+00:00,36.86 +2020-08-06 22:00:00+00:00,34.26 +2020-08-06 23:00:00+00:00,31.01 +2020-08-07 00:00:00+00:00,29.2 +2020-08-07 01:00:00+00:00,27.81 +2020-08-07 02:00:00+00:00,27.48 +2020-08-07 03:00:00+00:00,30.66 +2020-08-07 04:00:00+00:00,34.89 +2020-08-07 05:00:00+00:00,39.09 +2020-08-07 06:00:00+00:00,39.2 +2020-08-07 07:00:00+00:00,39.0 +2020-08-07 08:00:00+00:00,36.45 +2020-08-07 09:00:00+00:00,33.37 +2020-08-07 10:00:00+00:00,31.16 +2020-08-07 11:00:00+00:00,30.59 +2020-08-07 12:00:00+00:00,29.92 +2020-08-07 13:00:00+00:00,31.49 +2020-08-07 14:00:00+00:00,34.38 +2020-08-07 15:00:00+00:00,40.11 +2020-08-07 16:00:00+00:00,43.82 +2020-08-07 17:00:00+00:00,50.11 +2020-08-07 18:00:00+00:00,47.07 +2020-08-07 19:00:00+00:00,44.4 +2020-08-07 20:00:00+00:00,42.04 +2020-08-07 21:00:00+00:00,35.99 +2020-08-07 22:00:00+00:00,38.0 +2020-08-07 23:00:00+00:00,32.8 +2020-08-08 00:00:00+00:00,30.96 +2020-08-08 01:00:00+00:00,29.52 +2020-08-08 02:00:00+00:00,28.94 +2020-08-08 03:00:00+00:00,29.5 +2020-08-08 04:00:00+00:00,29.48 +2020-08-08 05:00:00+00:00,31.7 +2020-08-08 06:00:00+00:00,32.64 +2020-08-08 07:00:00+00:00,33.11 +2020-08-08 08:00:00+00:00,29.6 +2020-08-08 09:00:00+00:00,27.59 +2020-08-08 10:00:00+00:00,25.09 +2020-08-08 11:00:00+00:00,24.02 +2020-08-08 12:00:00+00:00,24.71 +2020-08-08 13:00:00+00:00,27.38 +2020-08-08 14:00:00+00:00,28.9 +2020-08-08 15:00:00+00:00,33.2 +2020-08-08 16:00:00+00:00,38.1 +2020-08-08 17:00:00+00:00,40.7 +2020-08-08 18:00:00+00:00,41.2 +2020-08-08 19:00:00+00:00,40.09 +2020-08-08 20:00:00+00:00,38.62 +2020-08-08 21:00:00+00:00,33.08 +2020-08-08 22:00:00+00:00,31.95 +2020-08-08 23:00:00+00:00,29.16 +2020-08-09 00:00:00+00:00,28.0 +2020-08-09 01:00:00+00:00,26.17 +2020-08-09 02:00:00+00:00,25.78 +2020-08-09 03:00:00+00:00,25.8 +2020-08-09 04:00:00+00:00,24.7 +2020-08-09 05:00:00+00:00,24.4 +2020-08-09 06:00:00+00:00,25.03 +2020-08-09 07:00:00+00:00,24.64 +2020-08-09 08:00:00+00:00,26.42 +2020-08-09 09:00:00+00:00,25.37 +2020-08-09 10:00:00+00:00,24.01 +2020-08-09 11:00:00+00:00,23.41 +2020-08-09 12:00:00+00:00,23.43 +2020-08-09 13:00:00+00:00,23.93 +2020-08-09 14:00:00+00:00,27.66 +2020-08-09 15:00:00+00:00,29.46 +2020-08-09 16:00:00+00:00,35.0 +2020-08-09 17:00:00+00:00,39.9 +2020-08-09 18:00:00+00:00,40.24 +2020-08-09 19:00:00+00:00,38.99 +2020-08-09 20:00:00+00:00,35.09 +2020-08-09 21:00:00+00:00,26.27 +2020-08-09 22:00:00+00:00,30.23 +2020-08-09 23:00:00+00:00,28.7 +2020-08-10 00:00:00+00:00,28.03 +2020-08-10 01:00:00+00:00,26.1 +2020-08-10 02:00:00+00:00,26.95 +2020-08-10 03:00:00+00:00,29.13 +2020-08-10 04:00:00+00:00,36.9 +2020-08-10 05:00:00+00:00,41.2 +2020-08-10 06:00:00+00:00,40.35 +2020-08-10 07:00:00+00:00,41.0 +2020-08-10 08:00:00+00:00,40.52 +2020-08-10 09:00:00+00:00,38.83 +2020-08-10 10:00:00+00:00,33.85 +2020-08-10 11:00:00+00:00,32.78 +2020-08-10 12:00:00+00:00,33.06 +2020-08-10 13:00:00+00:00,36.82 +2020-08-10 14:00:00+00:00,39.44 +2020-08-10 15:00:00+00:00,42.21 +2020-08-10 16:00:00+00:00,46.38 +2020-08-10 17:00:00+00:00,52.74 +2020-08-10 18:00:00+00:00,47.29 +2020-08-10 19:00:00+00:00,43.4 +2020-08-10 20:00:00+00:00,40.84 +2020-08-10 21:00:00+00:00,30.74 +2020-08-10 22:00:00+00:00,34.66 +2020-08-10 23:00:00+00:00,33.12 +2020-08-11 00:00:00+00:00,31.12 +2020-08-11 01:00:00+00:00,29.78 +2020-08-11 02:00:00+00:00,30.13 +2020-08-11 03:00:00+00:00,32.33 +2020-08-11 04:00:00+00:00,36.1 +2020-08-11 05:00:00+00:00,40.53 +2020-08-11 06:00:00+00:00,41.26 +2020-08-11 07:00:00+00:00,41.54 +2020-08-11 08:00:00+00:00,39.99 +2020-08-11 09:00:00+00:00,35.22 +2020-08-11 10:00:00+00:00,34.01 +2020-08-11 11:00:00+00:00,32.96 +2020-08-11 12:00:00+00:00,32.16 +2020-08-11 13:00:00+00:00,32.22 +2020-08-11 14:00:00+00:00,38.56 +2020-08-11 15:00:00+00:00,41.55 +2020-08-11 16:00:00+00:00,43.01 +2020-08-11 17:00:00+00:00,46.49 +2020-08-11 18:00:00+00:00,41.31 +2020-08-11 19:00:00+00:00,39.98 +2020-08-11 20:00:00+00:00,35.09 +2020-08-11 21:00:00+00:00,28.02 +2020-08-11 22:00:00+00:00,24.89 +2020-08-11 23:00:00+00:00,24.34 +2020-08-12 00:00:00+00:00,24.35 +2020-08-12 01:00:00+00:00,25.28 +2020-08-12 02:00:00+00:00,27.03 +2020-08-12 03:00:00+00:00,30.44 +2020-08-12 04:00:00+00:00,34.93 +2020-08-12 05:00:00+00:00,39.0 +2020-08-12 06:00:00+00:00,41.4 +2020-08-12 07:00:00+00:00,41.28 +2020-08-12 08:00:00+00:00,38.01 +2020-08-12 09:00:00+00:00,34.44 +2020-08-12 10:00:00+00:00,33.57 +2020-08-12 11:00:00+00:00,33.0 +2020-08-12 12:00:00+00:00,32.79 +2020-08-12 13:00:00+00:00,35.07 +2020-08-12 14:00:00+00:00,36.38 +2020-08-12 15:00:00+00:00,41.71 +2020-08-12 16:00:00+00:00,49.97 +2020-08-12 17:00:00+00:00,56.4 +2020-08-12 18:00:00+00:00,45.87 +2020-08-12 19:00:00+00:00,41.6 +2020-08-12 20:00:00+00:00,37.83 +2020-08-12 21:00:00+00:00,31.17 +2020-08-12 22:00:00+00:00,31.49 +2020-08-12 23:00:00+00:00,29.16 +2020-08-13 00:00:00+00:00,28.42 +2020-08-13 01:00:00+00:00,28.08 +2020-08-13 02:00:00+00:00,28.14 +2020-08-13 03:00:00+00:00,31.47 +2020-08-13 04:00:00+00:00,34.98 +2020-08-13 05:00:00+00:00,39.98 +2020-08-13 06:00:00+00:00,40.97 +2020-08-13 07:00:00+00:00,42.79 +2020-08-13 08:00:00+00:00,41.76 +2020-08-13 09:00:00+00:00,40.65 +2020-08-13 10:00:00+00:00,39.08 +2020-08-13 11:00:00+00:00,38.28 +2020-08-13 12:00:00+00:00,36.8 +2020-08-13 13:00:00+00:00,37.98 +2020-08-13 14:00:00+00:00,39.07 +2020-08-13 15:00:00+00:00,42.62 +2020-08-13 16:00:00+00:00,49.5 +2020-08-13 17:00:00+00:00,53.39 +2020-08-13 18:00:00+00:00,48.91 +2020-08-13 19:00:00+00:00,44.29 +2020-08-13 20:00:00+00:00,42.06 +2020-08-13 21:00:00+00:00,36.19 +2020-08-13 22:00:00+00:00,33.41 +2020-08-13 23:00:00+00:00,32.24 +2020-08-14 00:00:00+00:00,31.07 +2020-08-14 01:00:00+00:00,30.18 +2020-08-14 02:00:00+00:00,30.07 +2020-08-14 03:00:00+00:00,32.1 +2020-08-14 04:00:00+00:00,43.01 +2020-08-14 05:00:00+00:00,51.58 +2020-08-14 06:00:00+00:00,51.08 +2020-08-14 07:00:00+00:00,51.98 +2020-08-14 08:00:00+00:00,53.28 +2020-08-14 09:00:00+00:00,51.63 +2020-08-14 10:00:00+00:00,44.28 +2020-08-14 11:00:00+00:00,39.17 +2020-08-14 12:00:00+00:00,37.61 +2020-08-14 13:00:00+00:00,37.01 +2020-08-14 14:00:00+00:00,38.01 +2020-08-14 15:00:00+00:00,39.94 +2020-08-14 16:00:00+00:00,45.69 +2020-08-14 17:00:00+00:00,46.08 +2020-08-14 18:00:00+00:00,43.0 +2020-08-14 19:00:00+00:00,42.06 +2020-08-14 20:00:00+00:00,40.0 +2020-08-14 21:00:00+00:00,35.35 +2020-08-14 22:00:00+00:00,38.36 +2020-08-14 23:00:00+00:00,33.9 +2020-08-15 00:00:00+00:00,31.77 +2020-08-15 01:00:00+00:00,30.31 +2020-08-15 02:00:00+00:00,29.92 +2020-08-15 03:00:00+00:00,29.06 +2020-08-15 04:00:00+00:00,29.0 +2020-08-15 05:00:00+00:00,29.1 +2020-08-15 06:00:00+00:00,30.38 +2020-08-15 07:00:00+00:00,31.39 +2020-08-15 08:00:00+00:00,30.14 +2020-08-15 09:00:00+00:00,29.91 +2020-08-15 10:00:00+00:00,29.53 +2020-08-15 11:00:00+00:00,28.0 +2020-08-15 12:00:00+00:00,27.14 +2020-08-15 13:00:00+00:00,26.07 +2020-08-15 14:00:00+00:00,27.3 +2020-08-15 15:00:00+00:00,29.11 +2020-08-15 16:00:00+00:00,34.09 +2020-08-15 17:00:00+00:00,36.98 +2020-08-15 18:00:00+00:00,39.59 +2020-08-15 19:00:00+00:00,39.93 +2020-08-15 20:00:00+00:00,39.54 +2020-08-15 21:00:00+00:00,34.14 +2020-08-15 22:00:00+00:00,33.12 +2020-08-15 23:00:00+00:00,29.0 +2020-08-16 00:00:00+00:00,28.0 +2020-08-16 01:00:00+00:00,26.5 +2020-08-16 02:00:00+00:00,26.15 +2020-08-16 03:00:00+00:00,25.7 +2020-08-16 04:00:00+00:00,26.0 +2020-08-16 05:00:00+00:00,25.95 +2020-08-16 06:00:00+00:00,28.0 +2020-08-16 07:00:00+00:00,28.36 +2020-08-16 08:00:00+00:00,25.51 +2020-08-16 09:00:00+00:00,24.11 +2020-08-16 10:00:00+00:00,22.94 +2020-08-16 11:00:00+00:00,21.91 +2020-08-16 12:00:00+00:00,19.66 +2020-08-16 13:00:00+00:00,20.9 +2020-08-16 14:00:00+00:00,22.5 +2020-08-16 15:00:00+00:00,28.11 +2020-08-16 16:00:00+00:00,34.0 +2020-08-16 17:00:00+00:00,36.98 +2020-08-16 18:00:00+00:00,39.77 +2020-08-16 19:00:00+00:00,40.21 +2020-08-16 20:00:00+00:00,39.98 +2020-08-16 21:00:00+00:00,35.07 +2020-08-16 22:00:00+00:00,30.43 +2020-08-16 23:00:00+00:00,28.72 +2020-08-17 00:00:00+00:00,28.25 +2020-08-17 01:00:00+00:00,27.75 +2020-08-17 02:00:00+00:00,27.91 +2020-08-17 03:00:00+00:00,30.13 +2020-08-17 04:00:00+00:00,35.96 +2020-08-17 05:00:00+00:00,41.44 +2020-08-17 06:00:00+00:00,44.55 +2020-08-17 07:00:00+00:00,44.75 +2020-08-17 08:00:00+00:00,42.11 +2020-08-17 09:00:00+00:00,41.91 +2020-08-17 10:00:00+00:00,39.97 +2020-08-17 11:00:00+00:00,38.67 +2020-08-17 12:00:00+00:00,38.09 +2020-08-17 13:00:00+00:00,39.0 +2020-08-17 14:00:00+00:00,41.0 +2020-08-17 15:00:00+00:00,45.0 +2020-08-17 16:00:00+00:00,51.25 +2020-08-17 17:00:00+00:00,61.47 +2020-08-17 18:00:00+00:00,55.52 +2020-08-17 19:00:00+00:00,51.1 +2020-08-17 20:00:00+00:00,45.0 +2020-08-17 21:00:00+00:00,38.74 +2020-08-17 22:00:00+00:00,35.83 +2020-08-17 23:00:00+00:00,33.4 +2020-08-18 00:00:00+00:00,31.49 +2020-08-18 01:00:00+00:00,29.92 +2020-08-18 02:00:00+00:00,29.95 +2020-08-18 03:00:00+00:00,33.31 +2020-08-18 04:00:00+00:00,42.9 +2020-08-18 05:00:00+00:00,50.92 +2020-08-18 06:00:00+00:00,56.67 +2020-08-18 07:00:00+00:00,49.22 +2020-08-18 08:00:00+00:00,44.03 +2020-08-18 09:00:00+00:00,41.21 +2020-08-18 10:00:00+00:00,40.74 +2020-08-18 11:00:00+00:00,37.79 +2020-08-18 12:00:00+00:00,36.45 +2020-08-18 13:00:00+00:00,36.01 +2020-08-18 14:00:00+00:00,37.6 +2020-08-18 15:00:00+00:00,43.68 +2020-08-18 16:00:00+00:00,51.25 +2020-08-18 17:00:00+00:00,58.05 +2020-08-18 18:00:00+00:00,55.66 +2020-08-18 19:00:00+00:00,52.0 +2020-08-18 20:00:00+00:00,49.06 +2020-08-18 21:00:00+00:00,40.29 +2020-08-18 22:00:00+00:00,37.53 +2020-08-18 23:00:00+00:00,33.91 +2020-08-19 00:00:00+00:00,32.52 +2020-08-19 01:00:00+00:00,30.48 +2020-08-19 02:00:00+00:00,30.25 +2020-08-19 03:00:00+00:00,33.43 +2020-08-19 04:00:00+00:00,39.65 +2020-08-19 05:00:00+00:00,46.9 +2020-08-19 06:00:00+00:00,45.04 +2020-08-19 07:00:00+00:00,42.16 +2020-08-19 08:00:00+00:00,38.42 +2020-08-19 09:00:00+00:00,34.98 +2020-08-19 10:00:00+00:00,33.5 +2020-08-19 11:00:00+00:00,33.46 +2020-08-19 12:00:00+00:00,33.25 +2020-08-19 13:00:00+00:00,33.32 +2020-08-19 14:00:00+00:00,34.98 +2020-08-19 15:00:00+00:00,42.04 +2020-08-19 16:00:00+00:00,46.94 +2020-08-19 17:00:00+00:00,53.58 +2020-08-19 18:00:00+00:00,47.16 +2020-08-19 19:00:00+00:00,41.26 +2020-08-19 20:00:00+00:00,39.79 +2020-08-19 21:00:00+00:00,32.93 +2020-08-19 22:00:00+00:00,33.1 +2020-08-19 23:00:00+00:00,28.6 +2020-08-20 00:00:00+00:00,26.1 +2020-08-20 01:00:00+00:00,25.01 +2020-08-20 02:00:00+00:00,25.05 +2020-08-20 03:00:00+00:00,28.03 +2020-08-20 04:00:00+00:00,33.27 +2020-08-20 05:00:00+00:00,38.19 +2020-08-20 06:00:00+00:00,39.79 +2020-08-20 07:00:00+00:00,40.49 +2020-08-20 08:00:00+00:00,39.82 +2020-08-20 09:00:00+00:00,38.08 +2020-08-20 10:00:00+00:00,37.21 +2020-08-20 11:00:00+00:00,34.62 +2020-08-20 12:00:00+00:00,33.87 +2020-08-20 13:00:00+00:00,35.96 +2020-08-20 14:00:00+00:00,36.84 +2020-08-20 15:00:00+00:00,43.34 +2020-08-20 16:00:00+00:00,50.82 +2020-08-20 17:00:00+00:00,63.37 +2020-08-20 18:00:00+00:00,56.51 +2020-08-20 19:00:00+00:00,47.68 +2020-08-20 20:00:00+00:00,42.23 +2020-08-20 21:00:00+00:00,36.58 +2020-08-20 22:00:00+00:00,34.35 +2020-08-20 23:00:00+00:00,28.45 +2020-08-21 00:00:00+00:00,24.15 +2020-08-21 01:00:00+00:00,23.12 +2020-08-21 02:00:00+00:00,20.97 +2020-08-21 03:00:00+00:00,23.09 +2020-08-21 04:00:00+00:00,28.16 +2020-08-21 05:00:00+00:00,35.68 +2020-08-21 06:00:00+00:00,38.98 +2020-08-21 07:00:00+00:00,38.0 +2020-08-21 08:00:00+00:00,33.9 +2020-08-21 09:00:00+00:00,28.75 +2020-08-21 10:00:00+00:00,26.52 +2020-08-21 11:00:00+00:00,26.02 +2020-08-21 12:00:00+00:00,25.26 +2020-08-21 13:00:00+00:00,25.47 +2020-08-21 14:00:00+00:00,28.3 +2020-08-21 15:00:00+00:00,33.9 +2020-08-21 16:00:00+00:00,38.73 +2020-08-21 17:00:00+00:00,41.2 +2020-08-21 18:00:00+00:00,41.6 +2020-08-21 19:00:00+00:00,41.83 +2020-08-21 20:00:00+00:00,40.08 +2020-08-21 21:00:00+00:00,36.91 +2020-08-21 22:00:00+00:00,31.76 +2020-08-21 23:00:00+00:00,26.4 +2020-08-22 00:00:00+00:00,24.08 +2020-08-22 01:00:00+00:00,21.94 +2020-08-22 02:00:00+00:00,21.3 +2020-08-22 03:00:00+00:00,20.3 +2020-08-22 04:00:00+00:00,23.53 +2020-08-22 05:00:00+00:00,25.17 +2020-08-22 06:00:00+00:00,29.91 +2020-08-22 07:00:00+00:00,29.99 +2020-08-22 08:00:00+00:00,27.14 +2020-08-22 09:00:00+00:00,24.31 +2020-08-22 10:00:00+00:00,18.97 +2020-08-22 11:00:00+00:00,11.57 +2020-08-22 12:00:00+00:00,4.25 +2020-08-22 13:00:00+00:00,4.51 +2020-08-22 14:00:00+00:00,14.98 +2020-08-22 15:00:00+00:00,24.62 +2020-08-22 16:00:00+00:00,26.64 +2020-08-22 17:00:00+00:00,30.27 +2020-08-22 18:00:00+00:00,34.43 +2020-08-22 19:00:00+00:00,35.07 +2020-08-22 20:00:00+00:00,33.36 +2020-08-22 21:00:00+00:00,27.4 +2020-08-22 22:00:00+00:00,26.44 +2020-08-22 23:00:00+00:00,24.41 +2020-08-23 00:00:00+00:00,22.14 +2020-08-23 01:00:00+00:00,19.32 +2020-08-23 02:00:00+00:00,14.91 +2020-08-23 03:00:00+00:00,11.84 +2020-08-23 04:00:00+00:00,13.87 +2020-08-23 05:00:00+00:00,19.2 +2020-08-23 06:00:00+00:00,22.19 +2020-08-23 07:00:00+00:00,17.09 +2020-08-23 08:00:00+00:00,4.44 +2020-08-23 09:00:00+00:00,0.6 +2020-08-23 10:00:00+00:00,0.03 +2020-08-23 11:00:00+00:00,-16.18 +2020-08-23 12:00:00+00:00,-12.11 +2020-08-23 13:00:00+00:00,-3.81 +2020-08-23 14:00:00+00:00,0.09 +2020-08-23 15:00:00+00:00,19.7 +2020-08-23 16:00:00+00:00,28.0 +2020-08-23 17:00:00+00:00,33.41 +2020-08-23 18:00:00+00:00,37.32 +2020-08-23 19:00:00+00:00,38.87 +2020-08-23 20:00:00+00:00,38.07 +2020-08-23 21:00:00+00:00,31.99 +2020-08-23 22:00:00+00:00,28.73 +2020-08-23 23:00:00+00:00,27.95 +2020-08-24 00:00:00+00:00,27.14 +2020-08-24 01:00:00+00:00,25.5 +2020-08-24 02:00:00+00:00,25.5 +2020-08-24 03:00:00+00:00,28.9 +2020-08-24 04:00:00+00:00,39.79 +2020-08-24 05:00:00+00:00,53.26 +2020-08-24 06:00:00+00:00,53.13 +2020-08-24 07:00:00+00:00,53.84 +2020-08-24 08:00:00+00:00,47.1 +2020-08-24 09:00:00+00:00,48.0 +2020-08-24 10:00:00+00:00,44.01 +2020-08-24 11:00:00+00:00,42.31 +2020-08-24 12:00:00+00:00,42.1 +2020-08-24 13:00:00+00:00,41.2 +2020-08-24 14:00:00+00:00,41.92 +2020-08-24 15:00:00+00:00,50.25 +2020-08-24 16:00:00+00:00,55.87 +2020-08-24 17:00:00+00:00,66.33 +2020-08-24 18:00:00+00:00,70.16 +2020-08-24 19:00:00+00:00,61.44 +2020-08-24 20:00:00+00:00,54.9 +2020-08-24 21:00:00+00:00,43.97 +2020-08-24 22:00:00+00:00,42.16 +2020-08-24 23:00:00+00:00,37.96 +2020-08-25 00:00:00+00:00,34.48 +2020-08-25 01:00:00+00:00,32.76 +2020-08-25 02:00:00+00:00,32.68 +2020-08-25 03:00:00+00:00,34.7 +2020-08-25 04:00:00+00:00,46.9 +2020-08-25 05:00:00+00:00,48.06 +2020-08-25 06:00:00+00:00,53.66 +2020-08-25 07:00:00+00:00,45.24 +2020-08-25 08:00:00+00:00,43.07 +2020-08-25 09:00:00+00:00,41.69 +2020-08-25 10:00:00+00:00,40.07 +2020-08-25 11:00:00+00:00,37.85 +2020-08-25 12:00:00+00:00,35.6 +2020-08-25 13:00:00+00:00,35.25 +2020-08-25 14:00:00+00:00,35.65 +2020-08-25 15:00:00+00:00,38.95 +2020-08-25 16:00:00+00:00,40.99 +2020-08-25 17:00:00+00:00,42.04 +2020-08-25 18:00:00+00:00,38.03 +2020-08-25 19:00:00+00:00,35.19 +2020-08-25 20:00:00+00:00,30.05 +2020-08-25 21:00:00+00:00,20.07 +2020-08-25 22:00:00+00:00,11.16 +2020-08-25 23:00:00+00:00,3.55 +2020-08-26 00:00:00+00:00,2.78 +2020-08-26 01:00:00+00:00,2.47 +2020-08-26 02:00:00+00:00,2.94 +2020-08-26 03:00:00+00:00,16.94 +2020-08-26 04:00:00+00:00,31.09 +2020-08-26 05:00:00+00:00,37.09 +2020-08-26 06:00:00+00:00,37.81 +2020-08-26 07:00:00+00:00,34.79 +2020-08-26 08:00:00+00:00,21.7 +2020-08-26 09:00:00+00:00,19.07 +2020-08-26 10:00:00+00:00,13.95 +2020-08-26 11:00:00+00:00,8.21 +2020-08-26 12:00:00+00:00,0.31 +2020-08-26 13:00:00+00:00,-3.82 +2020-08-26 14:00:00+00:00,0.49 +2020-08-26 15:00:00+00:00,16.05 +2020-08-26 16:00:00+00:00,29.46 +2020-08-26 17:00:00+00:00,32.0 +2020-08-26 18:00:00+00:00,32.32 +2020-08-26 19:00:00+00:00,30.07 +2020-08-26 20:00:00+00:00,27.49 +2020-08-26 21:00:00+00:00,21.44 +2020-08-26 22:00:00+00:00,19.78 +2020-08-26 23:00:00+00:00,19.6 +2020-08-27 00:00:00+00:00,20.97 +2020-08-27 01:00:00+00:00,23.57 +2020-08-27 02:00:00+00:00,26.05 +2020-08-27 03:00:00+00:00,29.58 +2020-08-27 04:00:00+00:00,42.93 +2020-08-27 05:00:00+00:00,47.78 +2020-08-27 06:00:00+00:00,51.92 +2020-08-27 07:00:00+00:00,40.21 +2020-08-27 08:00:00+00:00,38.41 +2020-08-27 09:00:00+00:00,40.99 +2020-08-27 10:00:00+00:00,43.04 +2020-08-27 11:00:00+00:00,43.83 +2020-08-27 12:00:00+00:00,45.01 +2020-08-27 13:00:00+00:00,46.91 +2020-08-27 14:00:00+00:00,47.87 +2020-08-27 15:00:00+00:00,59.59 +2020-08-27 16:00:00+00:00,70.39 +2020-08-27 17:00:00+00:00,89.91 +2020-08-27 18:00:00+00:00,90.0 +2020-08-27 19:00:00+00:00,69.0 +2020-08-27 20:00:00+00:00,57.9 +2020-08-27 21:00:00+00:00,46.13 +2020-08-27 22:00:00+00:00,39.9 +2020-08-27 23:00:00+00:00,32.75 +2020-08-28 00:00:00+00:00,30.7 +2020-08-28 01:00:00+00:00,29.17 +2020-08-28 02:00:00+00:00,30.43 +2020-08-28 03:00:00+00:00,32.69 +2020-08-28 04:00:00+00:00,40.83 +2020-08-28 05:00:00+00:00,45.71 +2020-08-28 06:00:00+00:00,47.9 +2020-08-28 07:00:00+00:00,48.34 +2020-08-28 08:00:00+00:00,46.49 +2020-08-28 09:00:00+00:00,43.68 +2020-08-28 10:00:00+00:00,37.0 +2020-08-28 11:00:00+00:00,34.63 +2020-08-28 12:00:00+00:00,36.37 +2020-08-28 13:00:00+00:00,38.25 +2020-08-28 14:00:00+00:00,38.25 +2020-08-28 15:00:00+00:00,45.08 +2020-08-28 16:00:00+00:00,47.98 +2020-08-28 17:00:00+00:00,52.11 +2020-08-28 18:00:00+00:00,50.35 +2020-08-28 19:00:00+00:00,46.66 +2020-08-28 20:00:00+00:00,42.36 +2020-08-28 21:00:00+00:00,39.19 +2020-08-28 22:00:00+00:00,36.82 +2020-08-28 23:00:00+00:00,33.76 +2020-08-29 00:00:00+00:00,30.74 +2020-08-29 01:00:00+00:00,29.27 +2020-08-29 02:00:00+00:00,28.31 +2020-08-29 03:00:00+00:00,29.0 +2020-08-29 04:00:00+00:00,29.6 +2020-08-29 05:00:00+00:00,31.76 +2020-08-29 06:00:00+00:00,35.94 +2020-08-29 07:00:00+00:00,38.3 +2020-08-29 08:00:00+00:00,36.84 +2020-08-29 09:00:00+00:00,35.85 +2020-08-29 10:00:00+00:00,30.99 +2020-08-29 11:00:00+00:00,30.29 +2020-08-29 12:00:00+00:00,30.0 +2020-08-29 13:00:00+00:00,31.67 +2020-08-29 14:00:00+00:00,34.71 +2020-08-29 15:00:00+00:00,37.0 +2020-08-29 16:00:00+00:00,43.98 +2020-08-29 17:00:00+00:00,46.39 +2020-08-29 18:00:00+00:00,46.91 +2020-08-29 19:00:00+00:00,44.99 +2020-08-29 20:00:00+00:00,43.0 +2020-08-29 21:00:00+00:00,39.26 +2020-08-29 22:00:00+00:00,38.2 +2020-08-29 23:00:00+00:00,34.98 +2020-08-30 00:00:00+00:00,32.08 +2020-08-30 01:00:00+00:00,31.34 +2020-08-30 02:00:00+00:00,30.74 +2020-08-30 03:00:00+00:00,30.98 +2020-08-30 04:00:00+00:00,30.59 +2020-08-30 05:00:00+00:00,30.74 +2020-08-30 06:00:00+00:00,31.97 +2020-08-30 07:00:00+00:00,32.25 +2020-08-30 08:00:00+00:00,32.39 +2020-08-30 09:00:00+00:00,32.79 +2020-08-30 10:00:00+00:00,31.99 +2020-08-30 11:00:00+00:00,30.98 +2020-08-30 12:00:00+00:00,29.97 +2020-08-30 13:00:00+00:00,29.23 +2020-08-30 14:00:00+00:00,29.98 +2020-08-30 15:00:00+00:00,31.41 +2020-08-30 16:00:00+00:00,35.18 +2020-08-30 17:00:00+00:00,39.08 +2020-08-30 18:00:00+00:00,42.7 +2020-08-30 19:00:00+00:00,43.13 +2020-08-30 20:00:00+00:00,41.97 +2020-08-30 21:00:00+00:00,37.0 +2020-08-30 22:00:00+00:00,29.76 +2020-08-30 23:00:00+00:00,27.92 +2020-08-31 00:00:00+00:00,28.89 +2020-08-31 01:00:00+00:00,28.43 +2020-08-31 02:00:00+00:00,29.41 +2020-08-31 03:00:00+00:00,34.39 +2020-08-31 04:00:00+00:00,45.24 +2020-08-31 05:00:00+00:00,59.94 +2020-08-31 06:00:00+00:00,67.25 +2020-08-31 07:00:00+00:00,68.84 +2020-08-31 08:00:00+00:00,66.53 +2020-08-31 09:00:00+00:00,65.41 +2020-08-31 10:00:00+00:00,62.76 +2020-08-31 11:00:00+00:00,60.0 +2020-08-31 12:00:00+00:00,57.48 +2020-08-31 13:00:00+00:00,54.99 +2020-08-31 14:00:00+00:00,58.95 +2020-08-31 15:00:00+00:00,65.49 +2020-08-31 16:00:00+00:00,71.6 +2020-08-31 17:00:00+00:00,79.06 +2020-08-31 18:00:00+00:00,72.66 +2020-08-31 19:00:00+00:00,64.86 +2020-08-31 20:00:00+00:00,52.02 +2020-08-31 21:00:00+00:00,45.0 +2020-08-31 22:00:00+00:00,39.2 +2020-08-31 23:00:00+00:00,37.91 +2020-09-01 00:00:00+00:00,35.93 +2020-09-01 01:00:00+00:00,34.91 +2020-09-01 02:00:00+00:00,35.89 +2020-09-01 03:00:00+00:00,38.33 +2020-09-01 04:00:00+00:00,48.08 +2020-09-01 05:00:00+00:00,59.0 +2020-09-01 06:00:00+00:00,65.26 +2020-09-01 07:00:00+00:00,59.96 +2020-09-01 08:00:00+00:00,55.36 +2020-09-01 09:00:00+00:00,53.92 +2020-09-01 10:00:00+00:00,50.86 +2020-09-01 11:00:00+00:00,49.77 +2020-09-01 12:00:00+00:00,47.12 +2020-09-01 13:00:00+00:00,47.0 +2020-09-01 14:00:00+00:00,48.58 +2020-09-01 15:00:00+00:00,51.84 +2020-09-01 16:00:00+00:00,60.0 +2020-09-01 17:00:00+00:00,67.02 +2020-09-01 18:00:00+00:00,65.93 +2020-09-01 19:00:00+00:00,55.32 +2020-09-01 20:00:00+00:00,46.95 +2020-09-01 21:00:00+00:00,41.28 +2020-09-01 22:00:00+00:00,41.67 +2020-09-01 23:00:00+00:00,38.02 +2020-09-02 00:00:00+00:00,36.35 +2020-09-02 01:00:00+00:00,34.72 +2020-09-02 02:00:00+00:00,35.7 +2020-09-02 03:00:00+00:00,38.35 +2020-09-02 04:00:00+00:00,50.13 +2020-09-02 05:00:00+00:00,62.23 +2020-09-02 06:00:00+00:00,71.11 +2020-09-02 07:00:00+00:00,63.51 +2020-09-02 08:00:00+00:00,54.51 +2020-09-02 09:00:00+00:00,49.5 +2020-09-02 10:00:00+00:00,44.93 +2020-09-02 11:00:00+00:00,44.11 +2020-09-02 12:00:00+00:00,43.96 +2020-09-02 13:00:00+00:00,44.02 +2020-09-02 14:00:00+00:00,46.46 +2020-09-02 15:00:00+00:00,52.35 +2020-09-02 16:00:00+00:00,60.4 +2020-09-02 17:00:00+00:00,68.89 +2020-09-02 18:00:00+00:00,66.05 +2020-09-02 19:00:00+00:00,55.46 +2020-09-02 20:00:00+00:00,49.34 +2020-09-02 21:00:00+00:00,43.94 +2020-09-02 22:00:00+00:00,44.55 +2020-09-02 23:00:00+00:00,39.04 +2020-09-03 00:00:00+00:00,36.22 +2020-09-03 01:00:00+00:00,34.84 +2020-09-03 02:00:00+00:00,35.43 +2020-09-03 03:00:00+00:00,37.69 +2020-09-03 04:00:00+00:00,49.5 +2020-09-03 05:00:00+00:00,55.98 +2020-09-03 06:00:00+00:00,58.52 +2020-09-03 07:00:00+00:00,52.35 +2020-09-03 08:00:00+00:00,48.17 +2020-09-03 09:00:00+00:00,41.12 +2020-09-03 10:00:00+00:00,30.95 +2020-09-03 11:00:00+00:00,30.36 +2020-09-03 12:00:00+00:00,30.47 +2020-09-03 13:00:00+00:00,34.19 +2020-09-03 14:00:00+00:00,31.11 +2020-09-03 15:00:00+00:00,42.34 +2020-09-03 16:00:00+00:00,47.49 +2020-09-03 17:00:00+00:00,47.74 +2020-09-03 18:00:00+00:00,46.41 +2020-09-03 19:00:00+00:00,41.06 +2020-09-03 20:00:00+00:00,30.84 +2020-09-03 21:00:00+00:00,26.49 +2020-09-03 22:00:00+00:00,26.83 +2020-09-03 23:00:00+00:00,25.71 +2020-09-04 00:00:00+00:00,26.1 +2020-09-04 01:00:00+00:00,25.96 +2020-09-04 02:00:00+00:00,28.0 +2020-09-04 03:00:00+00:00,30.44 +2020-09-04 04:00:00+00:00,39.97 +2020-09-04 05:00:00+00:00,48.43 +2020-09-04 06:00:00+00:00,51.67 +2020-09-04 07:00:00+00:00,49.71 +2020-09-04 08:00:00+00:00,42.03 +2020-09-04 09:00:00+00:00,39.15 +2020-09-04 10:00:00+00:00,36.0 +2020-09-04 11:00:00+00:00,32.67 +2020-09-04 12:00:00+00:00,31.09 +2020-09-04 13:00:00+00:00,31.25 +2020-09-04 14:00:00+00:00,36.48 +2020-09-04 15:00:00+00:00,44.03 +2020-09-04 16:00:00+00:00,47.95 +2020-09-04 17:00:00+00:00,51.43 +2020-09-04 18:00:00+00:00,51.61 +2020-09-04 19:00:00+00:00,45.99 +2020-09-04 20:00:00+00:00,42.79 +2020-09-04 21:00:00+00:00,34.48 +2020-09-04 22:00:00+00:00,39.58 +2020-09-04 23:00:00+00:00,35.34 +2020-09-05 00:00:00+00:00,30.98 +2020-09-05 01:00:00+00:00,29.01 +2020-09-05 02:00:00+00:00,30.06 +2020-09-05 03:00:00+00:00,30.06 +2020-09-05 04:00:00+00:00,31.6 +2020-09-05 05:00:00+00:00,34.2 +2020-09-05 06:00:00+00:00,38.3 +2020-09-05 07:00:00+00:00,37.55 +2020-09-05 08:00:00+00:00,32.26 +2020-09-05 09:00:00+00:00,29.04 +2020-09-05 10:00:00+00:00,28.49 +2020-09-05 11:00:00+00:00,22.91 +2020-09-05 12:00:00+00:00,15.3 +2020-09-05 13:00:00+00:00,26.75 +2020-09-05 14:00:00+00:00,30.04 +2020-09-05 15:00:00+00:00,32.62 +2020-09-05 16:00:00+00:00,38.21 +2020-09-05 17:00:00+00:00,44.04 +2020-09-05 18:00:00+00:00,46.84 +2020-09-05 19:00:00+00:00,43.08 +2020-09-05 20:00:00+00:00,38.94 +2020-09-05 21:00:00+00:00,36.74 +2020-09-05 22:00:00+00:00,34.04 +2020-09-05 23:00:00+00:00,31.0 +2020-09-06 00:00:00+00:00,30.45 +2020-09-06 01:00:00+00:00,29.39 +2020-09-06 02:00:00+00:00,29.39 +2020-09-06 03:00:00+00:00,29.6 +2020-09-06 04:00:00+00:00,30.8 +2020-09-06 05:00:00+00:00,30.9 +2020-09-06 06:00:00+00:00,30.8 +2020-09-06 07:00:00+00:00,33.2 +2020-09-06 08:00:00+00:00,32.19 +2020-09-06 09:00:00+00:00,34.27 +2020-09-06 10:00:00+00:00,34.1 +2020-09-06 11:00:00+00:00,30.7 +2020-09-06 12:00:00+00:00,30.28 +2020-09-06 13:00:00+00:00,30.73 +2020-09-06 14:00:00+00:00,32.4 +2020-09-06 15:00:00+00:00,35.1 +2020-09-06 16:00:00+00:00,42.01 +2020-09-06 17:00:00+00:00,47.19 +2020-09-06 18:00:00+00:00,49.44 +2020-09-06 19:00:00+00:00,46.75 +2020-09-06 20:00:00+00:00,43.88 +2020-09-06 21:00:00+00:00,37.42 +2020-09-06 22:00:00+00:00,34.36 +2020-09-06 23:00:00+00:00,31.07 +2020-09-07 00:00:00+00:00,30.9 +2020-09-07 01:00:00+00:00,30.53 +2020-09-07 02:00:00+00:00,30.58 +2020-09-07 03:00:00+00:00,33.32 +2020-09-07 04:00:00+00:00,47.13 +2020-09-07 05:00:00+00:00,58.44 +2020-09-07 06:00:00+00:00,65.05 +2020-09-07 07:00:00+00:00,55.8 +2020-09-07 08:00:00+00:00,47.97 +2020-09-07 09:00:00+00:00,46.17 +2020-09-07 10:00:00+00:00,42.36 +2020-09-07 11:00:00+00:00,37.78 +2020-09-07 12:00:00+00:00,36.4 +2020-09-07 13:00:00+00:00,37.83 +2020-09-07 14:00:00+00:00,40.14 +2020-09-07 15:00:00+00:00,46.93 +2020-09-07 16:00:00+00:00,54.78 +2020-09-07 17:00:00+00:00,62.37 +2020-09-07 18:00:00+00:00,59.03 +2020-09-07 19:00:00+00:00,46.01 +2020-09-07 20:00:00+00:00,37.78 +2020-09-07 21:00:00+00:00,32.24 +2020-09-07 22:00:00+00:00,30.0 +2020-09-07 23:00:00+00:00,28.63 +2020-09-08 00:00:00+00:00,26.26 +2020-09-08 01:00:00+00:00,25.15 +2020-09-08 02:00:00+00:00,26.25 +2020-09-08 03:00:00+00:00,29.15 +2020-09-08 04:00:00+00:00,31.39 +2020-09-08 05:00:00+00:00,42.7 +2020-09-08 06:00:00+00:00,44.14 +2020-09-08 07:00:00+00:00,35.54 +2020-09-08 08:00:00+00:00,31.65 +2020-09-08 09:00:00+00:00,29.63 +2020-09-08 10:00:00+00:00,27.7 +2020-09-08 11:00:00+00:00,28.0 +2020-09-08 12:00:00+00:00,27.74 +2020-09-08 13:00:00+00:00,32.22 +2020-09-08 14:00:00+00:00,36.88 +2020-09-08 15:00:00+00:00,48.91 +2020-09-08 16:00:00+00:00,55.8 +2020-09-08 17:00:00+00:00,68.37 +2020-09-08 18:00:00+00:00,66.29 +2020-09-08 19:00:00+00:00,49.52 +2020-09-08 20:00:00+00:00,43.73 +2020-09-08 21:00:00+00:00,36.71 +2020-09-08 22:00:00+00:00,32.38 +2020-09-08 23:00:00+00:00,32.92 +2020-09-09 00:00:00+00:00,30.35 +2020-09-09 01:00:00+00:00,29.58 +2020-09-09 02:00:00+00:00,30.12 +2020-09-09 03:00:00+00:00,32.94 +2020-09-09 04:00:00+00:00,46.01 +2020-09-09 05:00:00+00:00,49.13 +2020-09-09 06:00:00+00:00,49.34 +2020-09-09 07:00:00+00:00,43.93 +2020-09-09 08:00:00+00:00,33.08 +2020-09-09 09:00:00+00:00,29.61 +2020-09-09 10:00:00+00:00,29.09 +2020-09-09 11:00:00+00:00,29.07 +2020-09-09 12:00:00+00:00,29.21 +2020-09-09 13:00:00+00:00,29.17 +2020-09-09 14:00:00+00:00,31.16 +2020-09-09 15:00:00+00:00,43.38 +2020-09-09 16:00:00+00:00,47.92 +2020-09-09 17:00:00+00:00,48.03 +2020-09-09 18:00:00+00:00,47.98 +2020-09-09 19:00:00+00:00,43.19 +2020-09-09 20:00:00+00:00,31.99 +2020-09-09 21:00:00+00:00,29.52 +2020-09-09 22:00:00+00:00,28.98 +2020-09-09 23:00:00+00:00,28.97 +2020-09-10 00:00:00+00:00,29.24 +2020-09-10 01:00:00+00:00,29.28 +2020-09-10 02:00:00+00:00,29.2 +2020-09-10 03:00:00+00:00,33.32 +2020-09-10 04:00:00+00:00,45.18 +2020-09-10 05:00:00+00:00,52.78 +2020-09-10 06:00:00+00:00,59.26 +2020-09-10 07:00:00+00:00,52.74 +2020-09-10 08:00:00+00:00,48.5 +2020-09-10 09:00:00+00:00,47.21 +2020-09-10 10:00:00+00:00,46.08 +2020-09-10 11:00:00+00:00,44.01 +2020-09-10 12:00:00+00:00,42.45 +2020-09-10 13:00:00+00:00,44.73 +2020-09-10 14:00:00+00:00,48.08 +2020-09-10 15:00:00+00:00,55.74 +2020-09-10 16:00:00+00:00,65.81 +2020-09-10 17:00:00+00:00,78.01 +2020-09-10 18:00:00+00:00,75.23 +2020-09-10 19:00:00+00:00,57.43 +2020-09-10 20:00:00+00:00,49.89 +2020-09-10 21:00:00+00:00,43.98 +2020-09-10 22:00:00+00:00,45.84 +2020-09-10 23:00:00+00:00,40.0 +2020-09-11 00:00:00+00:00,39.38 +2020-09-11 01:00:00+00:00,36.79 +2020-09-11 02:00:00+00:00,36.85 +2020-09-11 03:00:00+00:00,41.12 +2020-09-11 04:00:00+00:00,53.91 +2020-09-11 05:00:00+00:00,62.57 +2020-09-11 06:00:00+00:00,69.45 +2020-09-11 07:00:00+00:00,62.0 +2020-09-11 08:00:00+00:00,49.94 +2020-09-11 09:00:00+00:00,46.02 +2020-09-11 10:00:00+00:00,40.79 +2020-09-11 11:00:00+00:00,36.47 +2020-09-11 12:00:00+00:00,34.05 +2020-09-11 13:00:00+00:00,35.06 +2020-09-11 14:00:00+00:00,40.42 +2020-09-11 15:00:00+00:00,48.01 +2020-09-11 16:00:00+00:00,55.4 +2020-09-11 17:00:00+00:00,64.21 +2020-09-11 18:00:00+00:00,62.6 +2020-09-11 19:00:00+00:00,52.05 +2020-09-11 20:00:00+00:00,49.83 +2020-09-11 21:00:00+00:00,44.78 +2020-09-11 22:00:00+00:00,42.91 +2020-09-11 23:00:00+00:00,37.27 +2020-09-12 00:00:00+00:00,34.33 +2020-09-12 01:00:00+00:00,34.16 +2020-09-12 02:00:00+00:00,32.37 +2020-09-12 03:00:00+00:00,33.05 +2020-09-12 04:00:00+00:00,36.43 +2020-09-12 05:00:00+00:00,39.46 +2020-09-12 06:00:00+00:00,38.31 +2020-09-12 07:00:00+00:00,36.8 +2020-09-12 08:00:00+00:00,30.78 +2020-09-12 09:00:00+00:00,28.04 +2020-09-12 10:00:00+00:00,2.75 +2020-09-12 11:00:00+00:00,-0.12 +2020-09-12 12:00:00+00:00,-0.45 +2020-09-12 13:00:00+00:00,4.55 +2020-09-12 14:00:00+00:00,27.54 +2020-09-12 15:00:00+00:00,34.56 +2020-09-12 16:00:00+00:00,44.9 +2020-09-12 17:00:00+00:00,49.02 +2020-09-12 18:00:00+00:00,49.98 +2020-09-12 19:00:00+00:00,47.06 +2020-09-12 20:00:00+00:00,43.0 +2020-09-12 21:00:00+00:00,38.22 +2020-09-12 22:00:00+00:00,35.46 +2020-09-12 23:00:00+00:00,33.31 +2020-09-13 00:00:00+00:00,31.25 +2020-09-13 01:00:00+00:00,30.03 +2020-09-13 02:00:00+00:00,27.87 +2020-09-13 03:00:00+00:00,26.97 +2020-09-13 04:00:00+00:00,29.14 +2020-09-13 05:00:00+00:00,29.74 +2020-09-13 06:00:00+00:00,30.13 +2020-09-13 07:00:00+00:00,30.23 +2020-09-13 08:00:00+00:00,15.07 +2020-09-13 09:00:00+00:00,0.01 +2020-09-13 10:00:00+00:00,-24.08 +2020-09-13 11:00:00+00:00,-58.8 +2020-09-13 12:00:00+00:00,-49.94 +2020-09-13 13:00:00+00:00,-12.9 +2020-09-13 14:00:00+00:00,10.69 +2020-09-13 15:00:00+00:00,34.2 +2020-09-13 16:00:00+00:00,40.0 +2020-09-13 17:00:00+00:00,48.6 +2020-09-13 18:00:00+00:00,50.92 +2020-09-13 19:00:00+00:00,48.08 +2020-09-13 20:00:00+00:00,43.5 +2020-09-13 21:00:00+00:00,35.06 +2020-09-13 22:00:00+00:00,35.22 +2020-09-13 23:00:00+00:00,34.28 +2020-09-14 00:00:00+00:00,32.9 +2020-09-14 01:00:00+00:00,32.35 +2020-09-14 02:00:00+00:00,32.32 +2020-09-14 03:00:00+00:00,36.07 +2020-09-14 04:00:00+00:00,50.0 +2020-09-14 05:00:00+00:00,58.34 +2020-09-14 06:00:00+00:00,62.05 +2020-09-14 07:00:00+00:00,55.93 +2020-09-14 08:00:00+00:00,50.49 +2020-09-14 09:00:00+00:00,46.04 +2020-09-14 10:00:00+00:00,40.46 +2020-09-14 11:00:00+00:00,42.28 +2020-09-14 12:00:00+00:00,40.15 +2020-09-14 13:00:00+00:00,45.23 +2020-09-14 14:00:00+00:00,51.69 +2020-09-14 15:00:00+00:00,66.4 +2020-09-14 16:00:00+00:00,80.98 +2020-09-14 17:00:00+00:00,120.62 +2020-09-14 18:00:00+00:00,96.43 +2020-09-14 19:00:00+00:00,66.32 +2020-09-14 20:00:00+00:00,54.01 +2020-09-14 21:00:00+00:00,44.15 +2020-09-14 22:00:00+00:00,41.23 +2020-09-14 23:00:00+00:00,40.81 +2020-09-15 00:00:00+00:00,39.2 +2020-09-15 01:00:00+00:00,37.77 +2020-09-15 02:00:00+00:00,38.6 +2020-09-15 03:00:00+00:00,41.81 +2020-09-15 04:00:00+00:00,53.9 +2020-09-15 05:00:00+00:00,74.84 +2020-09-15 06:00:00+00:00,84.35 +2020-09-15 07:00:00+00:00,77.64 +2020-09-15 08:00:00+00:00,67.29 +2020-09-15 09:00:00+00:00,61.37 +2020-09-15 10:00:00+00:00,49.95 +2020-09-15 11:00:00+00:00,50.01 +2020-09-15 12:00:00+00:00,52.71 +2020-09-15 13:00:00+00:00,59.9 +2020-09-15 14:00:00+00:00,70.01 +2020-09-15 15:00:00+00:00,82.71 +2020-09-15 16:00:00+00:00,130.59 +2020-09-15 17:00:00+00:00,189.25 +2020-09-15 18:00:00+00:00,148.18 +2020-09-15 19:00:00+00:00,77.68 +2020-09-15 20:00:00+00:00,63.44 +2020-09-15 21:00:00+00:00,51.51 +2020-09-15 22:00:00+00:00,50.12 +2020-09-15 23:00:00+00:00,46.3 +2020-09-16 00:00:00+00:00,45.1 +2020-09-16 01:00:00+00:00,43.12 +2020-09-16 02:00:00+00:00,44.0 +2020-09-16 03:00:00+00:00,46.35 +2020-09-16 04:00:00+00:00,59.98 +2020-09-16 05:00:00+00:00,76.39 +2020-09-16 06:00:00+00:00,86.53 +2020-09-16 07:00:00+00:00,75.0 +2020-09-16 08:00:00+00:00,63.32 +2020-09-16 09:00:00+00:00,56.96 +2020-09-16 10:00:00+00:00,48.35 +2020-09-16 11:00:00+00:00,43.29 +2020-09-16 12:00:00+00:00,39.98 +2020-09-16 13:00:00+00:00,43.16 +2020-09-16 14:00:00+00:00,45.07 +2020-09-16 15:00:00+00:00,54.8 +2020-09-16 16:00:00+00:00,61.48 +2020-09-16 17:00:00+00:00,66.45 +2020-09-16 18:00:00+00:00,57.44 +2020-09-16 19:00:00+00:00,48.39 +2020-09-16 20:00:00+00:00,43.37 +2020-09-16 21:00:00+00:00,36.96 +2020-09-16 22:00:00+00:00,34.11 +2020-09-16 23:00:00+00:00,34.85 +2020-09-17 00:00:00+00:00,34.73 +2020-09-17 01:00:00+00:00,34.32 +2020-09-17 02:00:00+00:00,34.58 +2020-09-17 03:00:00+00:00,36.56 +2020-09-17 04:00:00+00:00,48.0 +2020-09-17 05:00:00+00:00,53.64 +2020-09-17 06:00:00+00:00,59.69 +2020-09-17 07:00:00+00:00,53.91 +2020-09-17 08:00:00+00:00,45.8 +2020-09-17 09:00:00+00:00,42.0 +2020-09-17 10:00:00+00:00,39.89 +2020-09-17 11:00:00+00:00,39.07 +2020-09-17 12:00:00+00:00,37.1 +2020-09-17 13:00:00+00:00,36.66 +2020-09-17 14:00:00+00:00,47.74 +2020-09-17 15:00:00+00:00,56.43 +2020-09-17 16:00:00+00:00,66.5 +2020-09-17 17:00:00+00:00,79.84 +2020-09-17 18:00:00+00:00,65.44 +2020-09-17 19:00:00+00:00,53.19 +2020-09-17 20:00:00+00:00,42.68 +2020-09-17 21:00:00+00:00,36.92 +2020-09-17 22:00:00+00:00,37.97 +2020-09-17 23:00:00+00:00,35.06 +2020-09-18 00:00:00+00:00,34.68 +2020-09-18 01:00:00+00:00,34.51 +2020-09-18 02:00:00+00:00,34.2 +2020-09-18 03:00:00+00:00,36.9 +2020-09-18 04:00:00+00:00,45.02 +2020-09-18 05:00:00+00:00,53.02 +2020-09-18 06:00:00+00:00,58.11 +2020-09-18 07:00:00+00:00,52.79 +2020-09-18 08:00:00+00:00,44.67 +2020-09-18 09:00:00+00:00,38.35 +2020-09-18 10:00:00+00:00,34.89 +2020-09-18 11:00:00+00:00,34.51 +2020-09-18 12:00:00+00:00,34.38 +2020-09-18 13:00:00+00:00,35.47 +2020-09-18 14:00:00+00:00,36.7 +2020-09-18 15:00:00+00:00,46.62 +2020-09-18 16:00:00+00:00,53.08 +2020-09-18 17:00:00+00:00,56.22 +2020-09-18 18:00:00+00:00,52.05 +2020-09-18 19:00:00+00:00,46.4 +2020-09-18 20:00:00+00:00,39.41 +2020-09-18 21:00:00+00:00,36.04 +2020-09-18 22:00:00+00:00,33.25 +2020-09-18 23:00:00+00:00,31.08 +2020-09-19 00:00:00+00:00,32.55 +2020-09-19 01:00:00+00:00,31.89 +2020-09-19 02:00:00+00:00,31.26 +2020-09-19 03:00:00+00:00,30.75 +2020-09-19 04:00:00+00:00,34.31 +2020-09-19 05:00:00+00:00,37.02 +2020-09-19 06:00:00+00:00,39.76 +2020-09-19 07:00:00+00:00,36.78 +2020-09-19 08:00:00+00:00,33.58 +2020-09-19 09:00:00+00:00,32.56 +2020-09-19 10:00:00+00:00,31.67 +2020-09-19 11:00:00+00:00,30.73 +2020-09-19 12:00:00+00:00,30.72 +2020-09-19 13:00:00+00:00,32.41 +2020-09-19 14:00:00+00:00,34.99 +2020-09-19 15:00:00+00:00,40.7 +2020-09-19 16:00:00+00:00,47.95 +2020-09-19 17:00:00+00:00,51.02 +2020-09-19 18:00:00+00:00,51.05 +2020-09-19 19:00:00+00:00,44.41 +2020-09-19 20:00:00+00:00,39.74 +2020-09-19 21:00:00+00:00,36.71 +2020-09-19 22:00:00+00:00,37.89 +2020-09-19 23:00:00+00:00,36.77 +2020-09-20 00:00:00+00:00,35.36 +2020-09-20 01:00:00+00:00,34.4 +2020-09-20 02:00:00+00:00,34.21 +2020-09-20 03:00:00+00:00,34.2 +2020-09-20 04:00:00+00:00,34.8 +2020-09-20 05:00:00+00:00,34.5 +2020-09-20 06:00:00+00:00,35.05 +2020-09-20 07:00:00+00:00,36.75 +2020-09-20 08:00:00+00:00,34.2 +2020-09-20 09:00:00+00:00,31.34 +2020-09-20 10:00:00+00:00,31.13 +2020-09-20 11:00:00+00:00,31.02 +2020-09-20 12:00:00+00:00,32.34 +2020-09-20 13:00:00+00:00,32.95 +2020-09-20 14:00:00+00:00,34.2 +2020-09-20 15:00:00+00:00,38.31 +2020-09-20 16:00:00+00:00,46.47 +2020-09-20 17:00:00+00:00,51.7 +2020-09-20 18:00:00+00:00,51.91 +2020-09-20 19:00:00+00:00,47.9 +2020-09-20 20:00:00+00:00,44.65 +2020-09-20 21:00:00+00:00,38.31 +2020-09-20 22:00:00+00:00,37.24 +2020-09-20 23:00:00+00:00,35.46 +2020-09-21 00:00:00+00:00,35.6 +2020-09-21 01:00:00+00:00,35.54 +2020-09-21 02:00:00+00:00,36.07 +2020-09-21 03:00:00+00:00,43.11 +2020-09-21 04:00:00+00:00,54.36 +2020-09-21 05:00:00+00:00,73.19 +2020-09-21 06:00:00+00:00,85.0 +2020-09-21 07:00:00+00:00,72.09 +2020-09-21 08:00:00+00:00,54.4 +2020-09-21 09:00:00+00:00,48.13 +2020-09-21 10:00:00+00:00,44.25 +2020-09-21 11:00:00+00:00,40.98 +2020-09-21 12:00:00+00:00,44.01 +2020-09-21 13:00:00+00:00,50.96 +2020-09-21 14:00:00+00:00,56.0 +2020-09-21 15:00:00+00:00,65.0 +2020-09-21 16:00:00+00:00,90.81 +2020-09-21 17:00:00+00:00,200.04 +2020-09-21 18:00:00+00:00,98.13 +2020-09-21 19:00:00+00:00,62.52 +2020-09-21 20:00:00+00:00,54.67 +2020-09-21 21:00:00+00:00,48.73 +2020-09-21 22:00:00+00:00,42.93 +2020-09-21 23:00:00+00:00,43.17 +2020-09-22 00:00:00+00:00,42.05 +2020-09-22 01:00:00+00:00,40.61 +2020-09-22 02:00:00+00:00,41.03 +2020-09-22 03:00:00+00:00,44.16 +2020-09-22 04:00:00+00:00,52.97 +2020-09-22 05:00:00+00:00,72.89 +2020-09-22 06:00:00+00:00,73.09 +2020-09-22 07:00:00+00:00,61.85 +2020-09-22 08:00:00+00:00,52.05 +2020-09-22 09:00:00+00:00,48.02 +2020-09-22 10:00:00+00:00,44.95 +2020-09-22 11:00:00+00:00,43.26 +2020-09-22 12:00:00+00:00,42.71 +2020-09-22 13:00:00+00:00,48.32 +2020-09-22 14:00:00+00:00,53.93 +2020-09-22 15:00:00+00:00,58.92 +2020-09-22 16:00:00+00:00,70.0 +2020-09-22 17:00:00+00:00,86.4 +2020-09-22 18:00:00+00:00,70.0 +2020-09-22 19:00:00+00:00,55.47 +2020-09-22 20:00:00+00:00,50.19 +2020-09-22 21:00:00+00:00,40.55 +2020-09-22 22:00:00+00:00,43.51 +2020-09-22 23:00:00+00:00,41.08 +2020-09-23 00:00:00+00:00,39.71 +2020-09-23 01:00:00+00:00,38.45 +2020-09-23 02:00:00+00:00,37.12 +2020-09-23 03:00:00+00:00,39.52 +2020-09-23 04:00:00+00:00,51.92 +2020-09-23 05:00:00+00:00,53.21 +2020-09-23 06:00:00+00:00,58.32 +2020-09-23 07:00:00+00:00,52.56 +2020-09-23 08:00:00+00:00,48.94 +2020-09-23 09:00:00+00:00,48.92 +2020-09-23 10:00:00+00:00,43.94 +2020-09-23 11:00:00+00:00,41.33 +2020-09-23 12:00:00+00:00,43.58 +2020-09-23 13:00:00+00:00,43.46 +2020-09-23 14:00:00+00:00,42.18 +2020-09-23 15:00:00+00:00,54.93 +2020-09-23 16:00:00+00:00,56.43 +2020-09-23 17:00:00+00:00,60.8 +2020-09-23 18:00:00+00:00,53.01 +2020-09-23 19:00:00+00:00,49.89 +2020-09-23 20:00:00+00:00,39.02 +2020-09-23 21:00:00+00:00,32.45 +2020-09-23 22:00:00+00:00,29.95 +2020-09-23 23:00:00+00:00,28.44 +2020-09-24 00:00:00+00:00,26.47 +2020-09-24 01:00:00+00:00,22.26 +2020-09-24 02:00:00+00:00,19.08 +2020-09-24 03:00:00+00:00,26.55 +2020-09-24 04:00:00+00:00,39.18 +2020-09-24 05:00:00+00:00,49.97 +2020-09-24 06:00:00+00:00,51.46 +2020-09-24 07:00:00+00:00,36.45 +2020-09-24 08:00:00+00:00,33.01 +2020-09-24 09:00:00+00:00,29.84 +2020-09-24 10:00:00+00:00,31.35 +2020-09-24 11:00:00+00:00,31.42 +2020-09-24 12:00:00+00:00,35.79 +2020-09-24 13:00:00+00:00,37.43 +2020-09-24 14:00:00+00:00,39.76 +2020-09-24 15:00:00+00:00,46.84 +2020-09-24 16:00:00+00:00,53.93 +2020-09-24 17:00:00+00:00,67.57 +2020-09-24 18:00:00+00:00,54.94 +2020-09-24 19:00:00+00:00,47.63 +2020-09-24 20:00:00+00:00,42.01 +2020-09-24 21:00:00+00:00,35.09 +2020-09-24 22:00:00+00:00,33.34 +2020-09-24 23:00:00+00:00,31.92 +2020-09-25 00:00:00+00:00,29.03 +2020-09-25 01:00:00+00:00,27.2 +2020-09-25 02:00:00+00:00,28.26 +2020-09-25 03:00:00+00:00,30.1 +2020-09-25 04:00:00+00:00,41.74 +2020-09-25 05:00:00+00:00,51.93 +2020-09-25 06:00:00+00:00,56.48 +2020-09-25 07:00:00+00:00,52.95 +2020-09-25 08:00:00+00:00,48.17 +2020-09-25 09:00:00+00:00,47.27 +2020-09-25 10:00:00+00:00,46.23 +2020-09-25 11:00:00+00:00,41.07 +2020-09-25 12:00:00+00:00,35.76 +2020-09-25 13:00:00+00:00,34.43 +2020-09-25 14:00:00+00:00,35.1 +2020-09-25 15:00:00+00:00,45.72 +2020-09-25 16:00:00+00:00,50.94 +2020-09-25 17:00:00+00:00,53.1 +2020-09-25 18:00:00+00:00,51.21 +2020-09-25 19:00:00+00:00,46.03 +2020-09-25 20:00:00+00:00,39.82 +2020-09-25 21:00:00+00:00,35.12 +2020-09-25 22:00:00+00:00,34.01 +2020-09-25 23:00:00+00:00,30.71 +2020-09-26 00:00:00+00:00,28.03 +2020-09-26 01:00:00+00:00,26.39 +2020-09-26 02:00:00+00:00,25.94 +2020-09-26 03:00:00+00:00,27.17 +2020-09-26 04:00:00+00:00,28.97 +2020-09-26 05:00:00+00:00,31.98 +2020-09-26 06:00:00+00:00,36.72 +2020-09-26 07:00:00+00:00,41.22 +2020-09-26 08:00:00+00:00,39.4 +2020-09-26 09:00:00+00:00,38.13 +2020-09-26 10:00:00+00:00,34.08 +2020-09-26 11:00:00+00:00,32.03 +2020-09-26 12:00:00+00:00,28.9 +2020-09-26 13:00:00+00:00,27.92 +2020-09-26 14:00:00+00:00,29.18 +2020-09-26 15:00:00+00:00,33.95 +2020-09-26 16:00:00+00:00,39.49 +2020-09-26 17:00:00+00:00,45.49 +2020-09-26 18:00:00+00:00,41.25 +2020-09-26 19:00:00+00:00,34.08 +2020-09-26 20:00:00+00:00,31.76 +2020-09-26 21:00:00+00:00,28.34 +2020-09-26 22:00:00+00:00,0.75 +2020-09-26 23:00:00+00:00,19.92 +2020-09-27 00:00:00+00:00,17.99 +2020-09-27 01:00:00+00:00,12.25 +2020-09-27 02:00:00+00:00,12.0 +2020-09-27 03:00:00+00:00,14.39 +2020-09-27 04:00:00+00:00,17.86 +2020-09-27 05:00:00+00:00,21.7 +2020-09-27 06:00:00+00:00,27.11 +2020-09-27 07:00:00+00:00,32.0 +2020-09-27 08:00:00+00:00,32.99 +2020-09-27 09:00:00+00:00,34.26 +2020-09-27 10:00:00+00:00,35.8 +2020-09-27 11:00:00+00:00,32.4 +2020-09-27 12:00:00+00:00,31.99 +2020-09-27 13:00:00+00:00,32.01 +2020-09-27 14:00:00+00:00,33.32 +2020-09-27 15:00:00+00:00,37.63 +2020-09-27 16:00:00+00:00,46.23 +2020-09-27 17:00:00+00:00,51.95 +2020-09-27 18:00:00+00:00,50.0 +2020-09-27 19:00:00+00:00,46.48 +2020-09-27 20:00:00+00:00,41.76 +2020-09-27 21:00:00+00:00,37.84 +2020-09-27 22:00:00+00:00,33.3 +2020-09-27 23:00:00+00:00,30.82 +2020-09-28 00:00:00+00:00,29.5 +2020-09-28 01:00:00+00:00,28.6 +2020-09-28 02:00:00+00:00,28.72 +2020-09-28 03:00:00+00:00,30.89 +2020-09-28 04:00:00+00:00,47.04 +2020-09-28 05:00:00+00:00,55.64 +2020-09-28 06:00:00+00:00,59.31 +2020-09-28 07:00:00+00:00,56.54 +2020-09-28 08:00:00+00:00,53.0 +2020-09-28 09:00:00+00:00,51.72 +2020-09-28 10:00:00+00:00,49.16 +2020-09-28 11:00:00+00:00,48.0 +2020-09-28 12:00:00+00:00,46.0 +2020-09-28 13:00:00+00:00,47.04 +2020-09-28 14:00:00+00:00,48.46 +2020-09-28 15:00:00+00:00,52.93 +2020-09-28 16:00:00+00:00,61.14 +2020-09-28 17:00:00+00:00,80.03 +2020-09-28 18:00:00+00:00,58.0 +2020-09-28 19:00:00+00:00,52.24 +2020-09-28 20:00:00+00:00,46.16 +2020-09-28 21:00:00+00:00,40.01 +2020-09-28 22:00:00+00:00,37.85 +2020-09-28 23:00:00+00:00,37.43 +2020-09-29 00:00:00+00:00,36.85 +2020-09-29 01:00:00+00:00,34.79 +2020-09-29 02:00:00+00:00,36.48 +2020-09-29 03:00:00+00:00,39.51 +2020-09-29 04:00:00+00:00,51.58 +2020-09-29 05:00:00+00:00,67.87 +2020-09-29 06:00:00+00:00,84.65 +2020-09-29 07:00:00+00:00,76.0 +2020-09-29 08:00:00+00:00,69.4 +2020-09-29 09:00:00+00:00,66.62 +2020-09-29 10:00:00+00:00,59.37 +2020-09-29 11:00:00+00:00,54.25 +2020-09-29 12:00:00+00:00,52.24 +2020-09-29 13:00:00+00:00,53.0 +2020-09-29 14:00:00+00:00,56.95 +2020-09-29 15:00:00+00:00,62.43 +2020-09-29 16:00:00+00:00,73.69 +2020-09-29 17:00:00+00:00,128.31 +2020-09-29 18:00:00+00:00,73.14 +2020-09-29 19:00:00+00:00,57.5 +2020-09-29 20:00:00+00:00,51.57 +2020-09-29 21:00:00+00:00,45.36 +2020-09-29 22:00:00+00:00,41.7 +2020-09-29 23:00:00+00:00,41.78 +2020-09-30 00:00:00+00:00,39.9 +2020-09-30 01:00:00+00:00,37.95 +2020-09-30 02:00:00+00:00,37.79 +2020-09-30 03:00:00+00:00,41.89 +2020-09-30 04:00:00+00:00,51.32 +2020-09-30 05:00:00+00:00,62.37 +2020-09-30 06:00:00+00:00,72.8 +2020-09-30 07:00:00+00:00,65.76 +2020-09-30 08:00:00+00:00,55.16 +2020-09-30 09:00:00+00:00,52.29 +2020-09-30 10:00:00+00:00,49.17 +2020-09-30 11:00:00+00:00,47.4 +2020-09-30 12:00:00+00:00,45.04 +2020-09-30 13:00:00+00:00,45.19 +2020-09-30 14:00:00+00:00,48.94 +2020-09-30 15:00:00+00:00,53.98 +2020-09-30 16:00:00+00:00,60.14 +2020-09-30 17:00:00+00:00,72.43 +2020-09-30 18:00:00+00:00,55.34 +2020-09-30 19:00:00+00:00,49.92 +2020-09-30 20:00:00+00:00,42.79 +2020-09-30 21:00:00+00:00,35.02 +2020-09-30 22:00:00+00:00,34.4 diff --git a/docs/notebooks/data/raw/tmy_dresden.csv b/docs/notebooks/data/raw/tmy_dresden.csv new file mode 100644 index 000000000..176ef3468 --- /dev/null +++ b/docs/notebooks/data/raw/tmy_dresden.csv @@ -0,0 +1,8761 @@ +time,temperature_C,ghi_W_m2,dni_W_m2,dhi_W_m2,wind_speed_m_s,relative_humidity_percent +2020-01-01 00:00:00,-1.77,0.0,-0.0,0.0,2.87,88.15 +2020-01-01 01:00:00,-2.63,0.0,-0.0,0.0,2.78,88.43 +2020-01-01 02:00:00,-3.49,0.0,-0.0,0.0,2.68,88.71 +2020-01-01 03:00:00,-4.35,0.0,-0.0,0.0,2.59,88.99 +2020-01-01 04:00:00,-5.22,0.0,-0.0,0.0,2.5,89.28 +2020-01-01 05:00:00,-6.08,0.0,-0.0,0.0,2.41,89.56 +2020-01-01 06:00:00,-6.94,0.0,-0.0,0.0,2.31,89.84 +2020-01-01 07:00:00,-7.8,0.0,-0.0,0.0,2.22,90.12 +2020-01-01 08:00:00,-5.5,43.0,36.5,39.0,3.59,88.05 +2020-01-01 09:00:00,-5.19,45.0,0.0,45.0,3.24,81.25 +2020-01-01 10:00:00,-4.84,63.0,0.0,63.0,3.03,84.6 +2020-01-01 11:00:00,-4.41,57.0,0.0,57.0,2.97,81.3 +2020-01-01 12:00:00,-3.98,56.0,0.0,56.0,3.24,78.15 +2020-01-01 13:00:00,-3.74,42.0,0.0,42.0,3.1,78.15 +2020-01-01 14:00:00,-3.65,19.0,0.0,19.0,2.76,78.2 +2020-01-01 15:00:00,-3.63,0.0,-0.0,0.0,3.03,78.2 +2020-01-01 16:00:00,-3.66,0.0,-0.0,0.0,2.62,78.2 +2020-01-01 17:00:00,-3.72,0.0,-0.0,0.0,2.14,84.7 +2020-01-01 18:00:00,-3.7,0.0,-0.0,0.0,2.14,84.7 +2020-01-01 19:00:00,-3.78,0.0,-0.0,0.0,2.0,84.7 +2020-01-01 20:00:00,-3.63,0.0,-0.0,0.0,2.21,84.8 +2020-01-01 21:00:00,-3.62,0.0,-0.0,0.0,1.66,84.8 +2020-01-01 22:00:00,-3.6,0.0,-0.0,0.0,1.59,84.8 +2020-01-01 23:00:00,-3.57,0.0,-0.0,0.0,1.66,84.8 +2020-01-02 00:00:00,-3.55,0.0,-0.0,0.0,1.59,88.25 +2020-01-02 01:00:00,-3.02,0.0,-0.0,0.0,1.38,88.25 +2020-01-02 02:00:00,-2.98,0.0,-0.0,0.0,1.31,88.25 +2020-01-02 03:00:00,-2.98,0.0,-0.0,0.0,1.24,91.85 +2020-01-02 04:00:00,-3.27,0.0,-0.0,0.0,1.1,95.55 +2020-01-02 05:00:00,-3.09,0.0,-0.0,0.0,1.45,91.85 +2020-01-02 06:00:00,-3.08,0.0,-0.0,0.0,1.31,91.85 +2020-01-02 07:00:00,-3.09,0.0,-0.0,0.0,1.17,91.85 +2020-01-02 08:00:00,-3.18,0.0,0.0,0.0,1.24,91.85 +2020-01-02 09:00:00,-2.92,0.0,0.0,0.0,1.59,91.85 +2020-01-02 10:00:00,-2.45,81.0,0.0,81.0,2.0,88.25 +2020-01-02 11:00:00,-1.96,0.0,0.0,0.0,2.41,81.55 +2020-01-02 12:00:00,-1.7,68.0,0.0,68.0,2.55,81.55 +2020-01-02 13:00:00,-1.68,0.0,0.0,0.0,2.62,81.55 +2020-01-02 14:00:00,-1.72,21.0,0.0,21.0,2.55,84.85 +2020-01-02 15:00:00,-2.02,0.0,-0.0,0.0,2.48,88.3 +2020-01-02 16:00:00,-2.52,0.0,-0.0,0.0,2.07,95.55 +2020-01-02 17:00:00,-2.97,0.0,-0.0,0.0,2.14,95.55 +2020-01-02 18:00:00,-3.41,0.0,-0.0,0.0,2.07,95.55 +2020-01-02 19:00:00,-3.9,0.0,-0.0,0.0,1.66,95.5 +2020-01-02 20:00:00,-4.56,0.0,-0.0,0.0,1.31,91.75 +2020-01-02 21:00:00,-5.05,0.0,-0.0,0.0,1.17,95.5 +2020-01-02 22:00:00,-5.45,0.0,-0.0,0.0,1.17,95.45 +2020-01-02 23:00:00,-5.83,0.0,-0.0,0.0,1.17,95.45 +2020-01-03 00:00:00,-6.38,0.0,-0.0,0.0,1.31,95.45 +2020-01-03 01:00:00,-7.02,0.0,-0.0,0.0,1.45,95.45 +2020-01-03 02:00:00,-7.5,0.0,-0.0,0.0,1.59,95.4 +2020-01-03 03:00:00,-7.62,0.0,-0.0,0.0,1.72,91.6 +2020-01-03 04:00:00,-7.48,0.0,-0.0,0.0,1.79,91.6 +2020-01-03 05:00:00,-7.32,0.0,-0.0,0.0,1.93,91.6 +2020-01-03 06:00:00,-7.03,0.0,-0.0,0.0,1.93,91.6 +2020-01-03 07:00:00,-6.78,0.0,-0.0,0.0,2.14,87.95 +2020-01-03 08:00:00,-6.44,70.0,234.63,44.0,2.21,84.45 +2020-01-03 09:00:00,-5.47,0.0,0.0,0.0,2.41,74.8 +2020-01-03 10:00:00,-4.08,205.0,378.45,107.0,2.9,58.6 +2020-01-03 11:00:00,-2.98,0.0,0.0,0.0,3.1,49.8 +2020-01-03 12:00:00,-2.22,197.0,338.72,110.0,3.1,49.8 +2020-01-03 13:00:00,-1.97,0.0,0.0,0.0,3.03,45.9 +2020-01-03 14:00:00,-2.24,59.0,161.79,42.0,2.69,49.8 +2020-01-03 15:00:00,-3.08,0.0,-0.0,0.0,2.76,47.75 +2020-01-03 16:00:00,-3.73,0.0,-0.0,0.0,2.97,51.65 +2020-01-03 17:00:00,-4.02,0.0,-0.0,0.0,3.17,49.55 +2020-01-03 18:00:00,-4.11,0.0,-0.0,0.0,3.45,49.55 +2020-01-03 19:00:00,-4.04,0.0,-0.0,0.0,3.72,63.7 +2020-01-03 20:00:00,-3.78,0.0,-0.0,0.0,3.93,66.35 +2020-01-03 21:00:00,-3.33,0.0,-0.0,0.0,4.14,69.25 +2020-01-03 22:00:00,-2.55,0.0,-0.0,0.0,4.48,72.25 +2020-01-03 23:00:00,-2.05,0.0,-0.0,0.0,4.76,75.3 +2020-01-04 00:00:00,-1.77,0.0,-0.0,0.0,4.83,81.55 +2020-01-04 01:00:00,-1.68,0.0,-0.0,0.0,4.76,84.85 +2020-01-04 02:00:00,-1.5,0.0,-0.0,0.0,4.69,84.95 +2020-01-04 03:00:00,-1.42,0.0,-0.0,0.0,4.55,88.35 +2020-01-04 04:00:00,-1.35,0.0,-0.0,0.0,4.41,88.35 +2020-01-04 05:00:00,-1.35,0.0,-0.0,0.0,4.28,88.35 +2020-01-04 06:00:00,-1.27,0.0,-0.0,0.0,4.07,88.35 +2020-01-04 07:00:00,-0.83,0.0,-0.0,0.0,4.14,91.95 +2020-01-04 08:00:00,-0.65,26.0,0.0,26.0,4.07,88.45 +2020-01-04 09:00:00,-0.36,0.0,0.0,0.0,3.93,88.45 +2020-01-04 10:00:00,-0.19,67.0,0.0,67.0,4.76,91.95 +2020-01-04 11:00:00,-0.08,0.0,0.0,0.0,5.1,88.45 +2020-01-04 12:00:00,-0.16,116.0,34.77,107.0,3.93,91.95 +2020-01-04 13:00:00,-0.21,0.0,0.0,0.0,3.79,88.45 +2020-01-04 14:00:00,-0.39,14.0,0.0,14.0,4.21,88.45 +2020-01-04 15:00:00,-0.6,0.0,-0.0,0.0,4.28,88.45 +2020-01-04 16:00:00,-0.69,0.0,-0.0,0.0,4.21,91.95 +2020-01-04 17:00:00,-0.74,0.0,-0.0,0.0,4.14,88.4 +2020-01-04 18:00:00,-0.82,0.0,-0.0,0.0,4.14,88.4 +2020-01-04 19:00:00,-0.88,0.0,-0.0,0.0,4.07,88.4 +2020-01-04 20:00:00,-0.97,0.0,-0.0,0.0,4.14,88.4 +2020-01-04 21:00:00,-1.06,0.0,-0.0,0.0,4.21,88.4 +2020-01-04 22:00:00,-1.09,0.0,-0.0,0.0,4.21,88.4 +2020-01-04 23:00:00,-1.16,0.0,-0.0,0.0,4.07,88.4 +2020-01-05 00:00:00,-1.18,0.0,-0.0,0.0,4.0,91.9 +2020-01-05 01:00:00,-1.21,0.0,-0.0,0.0,3.72,91.9 +2020-01-05 02:00:00,-1.22,0.0,-0.0,0.0,3.45,95.6 +2020-01-05 03:00:00,-1.27,0.0,-0.0,0.0,3.1,95.6 +2020-01-05 04:00:00,-1.31,0.0,-0.0,0.0,2.69,95.6 +2020-01-05 05:00:00,-1.69,0.0,-0.0,0.0,2.14,99.4 +2020-01-05 06:00:00,-1.98,0.0,-0.0,0.0,2.28,95.55 +2020-01-05 07:00:00,-1.75,0.0,-0.0,0.0,2.83,99.4 +2020-01-05 08:00:00,-2.11,29.0,0.0,29.0,2.34,95.55 +2020-01-05 09:00:00,-2.65,0.0,0.0,0.0,2.41,95.55 +2020-01-05 10:00:00,-3.19,204.0,362.77,109.0,2.62,95.55 +2020-01-05 11:00:00,-3.77,0.0,0.0,0.0,2.9,91.8 +2020-01-05 12:00:00,-4.41,44.0,0.0,44.0,3.17,91.75 +2020-01-05 13:00:00,-5.4,0.0,0.0,0.0,3.93,88.05 +2020-01-05 14:00:00,-6.14,7.0,0.0,7.0,3.66,84.5 +2020-01-05 15:00:00,-6.85,0.0,-0.0,0.0,3.72,87.95 +2020-01-05 16:00:00,-7.51,0.0,-0.0,0.0,3.52,84.35 +2020-01-05 17:00:00,-8.26,0.0,-0.0,0.0,3.45,84.2 +2020-01-05 18:00:00,-8.8,0.0,-0.0,0.0,3.24,84.15 +2020-01-05 19:00:00,-8.96,0.0,-0.0,0.0,3.24,84.15 +2020-01-05 20:00:00,-9.73,0.0,-0.0,0.0,2.9,84.1 +2020-01-05 21:00:00,-10.3,0.0,-0.0,0.0,2.55,87.65 +2020-01-05 22:00:00,-11.22,0.0,-0.0,0.0,2.14,83.95 +2020-01-05 23:00:00,-12.51,0.0,-0.0,0.0,2.0,87.45 +2020-01-06 00:00:00,-14.15,0.0,-0.0,0.0,1.93,87.35 +2020-01-06 01:00:00,-15.69,0.0,-0.0,0.0,1.72,87.2 +2020-01-06 02:00:00,-16.52,0.0,-0.0,0.0,1.79,91.05 +2020-01-06 03:00:00,-17.51,0.0,-0.0,0.0,1.93,91.0 +2020-01-06 04:00:00,-19.15,0.0,-0.0,0.0,2.0,90.9 +2020-01-06 05:00:00,-19.41,0.0,-0.0,0.0,2.07,95.05 +2020-01-06 06:00:00,-19.32,0.0,-0.0,0.0,2.0,90.9 +2020-01-06 07:00:00,-18.5,0.0,-0.0,0.0,1.93,90.9 +2020-01-06 08:00:00,-19.67,65.0,149.7,48.0,2.07,90.85 +2020-01-06 09:00:00,-17.93,0.0,0.0,0.0,1.59,90.95 +2020-01-06 10:00:00,-14.27,146.0,79.69,125.0,0.83,80.1 +2020-01-06 11:00:00,-12.33,0.0,0.0,0.0,0.34,73.55 +2020-01-06 12:00:00,-11.19,160.0,117.82,129.0,0.14,67.7 +2020-01-06 13:00:00,-10.71,0.0,0.0,0.0,0.07,67.8 +2020-01-06 14:00:00,-10.87,60.0,124.51,46.0,0.07,73.8 +2020-01-06 15:00:00,-11.45,0.0,-0.0,0.0,0.07,77.0 +2020-01-06 16:00:00,-12.39,0.0,-0.0,0.0,0.76,83.75 +2020-01-06 17:00:00,-15.7,0.0,-0.0,0.0,1.79,87.2 +2020-01-06 18:00:00,-19.2,0.0,-0.0,0.0,2.34,90.9 +2020-01-06 19:00:00,-20.44,0.0,-0.0,0.0,2.21,90.8 +2020-01-06 20:00:00,-22.11,0.0,-0.0,0.0,2.41,90.65 +2020-01-06 21:00:00,-23.56,0.0,-0.0,0.0,2.41,90.55 +2020-01-06 22:00:00,-24.36,0.0,-0.0,0.0,2.28,78.7 +2020-01-06 23:00:00,-24.75,0.0,-0.0,0.0,2.28,78.6 +2020-01-07 00:00:00,-23.83,0.0,-0.0,0.0,2.14,82.45 +2020-01-07 01:00:00,-23.38,0.0,-0.0,0.0,2.21,82.55 +2020-01-07 02:00:00,-22.61,0.0,-0.0,0.0,2.21,82.6 +2020-01-07 03:00:00,-21.49,0.0,-0.0,0.0,2.21,82.7 +2020-01-07 04:00:00,-20.21,0.0,-0.0,0.0,2.28,79.2 +2020-01-07 05:00:00,-19.24,0.0,-0.0,0.0,2.48,79.35 +2020-01-07 06:00:00,-18.14,0.0,-0.0,0.0,2.55,83.2 +2020-01-07 07:00:00,-18.09,0.0,-0.0,0.0,2.62,83.2 +2020-01-07 08:00:00,-17.29,65.0,156.9,47.0,2.62,83.3 +2020-01-07 09:00:00,-15.06,0.0,0.0,0.0,2.69,79.95 +2020-01-07 10:00:00,-11.69,84.0,0.0,84.0,2.97,73.75 +2020-01-07 11:00:00,-8.31,0.0,0.0,0.0,3.45,65.4 +2020-01-07 12:00:00,-6.38,86.0,3.77,85.0,3.86,60.5 +2020-01-07 13:00:00,-5.7,0.0,0.0,0.0,4.28,66.05 +2020-01-07 14:00:00,-5.39,27.0,0.0,27.0,4.0,77.9 +2020-01-07 15:00:00,-5.18,0.0,-0.0,0.0,3.72,78.0 +2020-01-07 16:00:00,-5.12,0.0,-0.0,0.0,3.52,81.25 +2020-01-07 17:00:00,-4.97,0.0,-0.0,0.0,3.45,84.6 +2020-01-07 18:00:00,-4.66,0.0,-0.0,0.0,3.38,84.65 +2020-01-07 19:00:00,-5.16,0.0,-0.0,0.0,3.24,88.1 +2020-01-07 20:00:00,-5.06,0.0,-0.0,0.0,3.1,91.75 +2020-01-07 21:00:00,-5.09,0.0,-0.0,0.0,2.83,91.75 +2020-01-07 22:00:00,-5.22,0.0,-0.0,0.0,2.62,95.45 +2020-01-07 23:00:00,-5.42,0.0,-0.0,0.0,2.41,95.45 +2020-01-08 00:00:00,-5.59,0.0,-0.0,0.0,2.21,91.7 +2020-01-08 01:00:00,-5.76,0.0,-0.0,0.0,2.14,95.45 +2020-01-08 02:00:00,-6.02,0.0,-0.0,0.0,2.0,91.7 +2020-01-08 03:00:00,-6.33,0.0,-0.0,0.0,1.93,95.45 +2020-01-08 04:00:00,-6.42,0.0,-0.0,0.0,1.86,95.45 +2020-01-08 05:00:00,-6.57,0.0,-0.0,0.0,1.79,91.65 +2020-01-08 06:00:00,-6.97,0.0,-0.0,0.0,1.72,91.6 +2020-01-08 07:00:00,-7.09,0.0,-0.0,0.0,1.38,91.6 +2020-01-08 08:00:00,-7.21,52.0,51.72,46.0,1.24,91.6 +2020-01-08 09:00:00,-6.7,0.0,0.0,0.0,1.45,91.65 +2020-01-08 10:00:00,-6.03,85.0,0.0,85.0,1.66,88.0 +2020-01-08 11:00:00,-5.32,0.0,0.0,0.0,1.72,84.55 +2020-01-08 12:00:00,-4.92,166.0,141.86,128.0,1.59,78.0 +2020-01-08 13:00:00,-4.86,0.0,0.0,0.0,1.45,78.0 +2020-01-08 14:00:00,-5.39,61.0,110.28,48.0,1.24,77.9 +2020-01-08 15:00:00,-6.94,0.0,-0.0,0.0,1.45,77.7 +2020-01-08 16:00:00,-9.78,0.0,-0.0,0.0,2.21,80.55 +2020-01-08 17:00:00,-12.48,0.0,-0.0,0.0,2.28,87.45 +2020-01-08 18:00:00,-14.29,0.0,-0.0,0.0,2.34,83.65 +2020-01-08 19:00:00,-14.62,0.0,-0.0,0.0,2.48,91.2 +2020-01-08 20:00:00,-14.87,0.0,-0.0,0.0,2.62,91.15 +2020-01-08 21:00:00,-14.47,0.0,-0.0,0.0,2.69,91.2 +2020-01-08 22:00:00,-14.08,0.0,-0.0,0.0,2.69,87.35 +2020-01-08 23:00:00,-13.85,0.0,-0.0,0.0,2.62,87.35 +2020-01-09 00:00:00,-14.1,0.0,-0.0,0.0,2.62,87.35 +2020-01-09 01:00:00,-14.76,0.0,-0.0,0.0,2.48,83.6 +2020-01-09 02:00:00,-15.29,0.0,-0.0,0.0,2.48,83.55 +2020-01-09 03:00:00,-15.74,0.0,-0.0,0.0,2.48,87.2 +2020-01-09 04:00:00,-16.1,0.0,-0.0,0.0,2.55,87.15 +2020-01-09 05:00:00,-16.32,0.0,-0.0,0.0,2.55,91.05 +2020-01-09 06:00:00,-16.47,0.0,-0.0,0.0,2.48,87.1 +2020-01-09 07:00:00,-14.95,0.0,-0.0,0.0,2.55,91.15 +2020-01-09 08:00:00,-13.46,70.0,195.88,47.0,2.55,87.4 +2020-01-09 09:00:00,-11.76,0.0,0.0,0.0,2.41,83.9 +2020-01-09 10:00:00,-9.92,177.0,185.73,127.0,2.28,84.05 +2020-01-09 11:00:00,-7.91,0.0,0.0,0.0,2.07,77.55 +2020-01-09 12:00:00,-6.47,161.0,125.73,127.0,1.66,74.6 +2020-01-09 13:00:00,-5.92,0.0,0.0,0.0,1.45,74.7 +2020-01-09 14:00:00,-6.29,53.0,57.97,46.0,1.52,77.75 +2020-01-09 15:00:00,-7.72,0.0,0.0,0.0,2.07,77.6 +2020-01-09 16:00:00,-9.61,0.0,-0.0,0.0,2.07,80.65 +2020-01-09 17:00:00,-11.24,0.0,-0.0,0.0,2.0,83.95 +2020-01-09 18:00:00,-12.23,0.0,-0.0,0.0,1.93,87.5 +2020-01-09 19:00:00,-11.78,0.0,-0.0,0.0,2.07,87.5 +2020-01-09 20:00:00,-11.89,0.0,-0.0,0.0,2.14,87.5 +2020-01-09 21:00:00,-11.55,0.0,-0.0,0.0,2.14,83.9 +2020-01-09 22:00:00,-10.83,0.0,-0.0,0.0,2.14,83.95 +2020-01-09 23:00:00,-10.55,0.0,-0.0,0.0,2.14,80.5 +2020-01-10 00:00:00,-9.85,0.0,-0.0,0.0,2.0,80.55 +2020-01-10 01:00:00,-10.0,0.0,-0.0,0.0,2.14,80.55 +2020-01-10 02:00:00,-9.97,0.0,-0.0,0.0,2.07,80.55 +2020-01-10 03:00:00,-9.62,0.0,-0.0,0.0,1.93,80.65 +2020-01-10 04:00:00,-9.28,0.0,-0.0,0.0,1.79,80.65 +2020-01-10 05:00:00,-9.03,0.0,-0.0,0.0,1.86,80.7 +2020-01-10 06:00:00,-8.93,0.0,-0.0,0.0,2.07,80.7 +2020-01-10 07:00:00,-7.89,0.0,-0.0,0.0,2.34,80.85 +2020-01-10 08:00:00,-7.44,48.0,42.03,43.0,2.34,84.35 +2020-01-10 09:00:00,-6.25,0.0,0.0,0.0,2.21,84.45 +2020-01-10 10:00:00,-5.01,126.0,40.54,115.0,2.34,81.25 +2020-01-10 11:00:00,-3.97,0.0,0.0,0.0,2.48,78.15 +2020-01-10 12:00:00,-3.23,184.0,212.38,126.0,2.55,75.1 +2020-01-10 13:00:00,-2.85,0.0,0.0,0.0,2.48,75.2 +2020-01-10 14:00:00,-3.19,65.0,129.33,49.0,2.28,78.2 +2020-01-10 15:00:00,-4.45,0.0,0.0,0.0,2.41,78.05 +2020-01-10 16:00:00,-5.86,0.0,-0.0,0.0,2.69,81.1 +2020-01-10 17:00:00,-6.76,0.0,-0.0,0.0,2.97,81.0 +2020-01-10 18:00:00,-7.19,0.0,-0.0,0.0,3.1,77.7 +2020-01-10 19:00:00,-6.95,0.0,-0.0,0.0,3.24,81.0 +2020-01-10 20:00:00,-6.84,0.0,-0.0,0.0,3.31,81.0 +2020-01-10 21:00:00,-6.95,0.0,-0.0,0.0,3.38,81.0 +2020-01-10 22:00:00,-7.12,0.0,-0.0,0.0,3.31,77.7 +2020-01-10 23:00:00,-7.38,0.0,-0.0,0.0,3.17,80.9 +2020-01-11 00:00:00,-7.69,0.0,-0.0,0.0,3.17,77.6 +2020-01-11 01:00:00,-7.78,0.0,-0.0,0.0,3.17,80.85 +2020-01-11 02:00:00,-8.13,0.0,-0.0,0.0,3.03,80.85 +2020-01-11 03:00:00,-8.57,0.0,-0.0,0.0,2.76,80.8 +2020-01-11 04:00:00,-9.22,0.0,-0.0,0.0,2.69,80.7 +2020-01-11 05:00:00,-9.72,0.0,-0.0,0.0,2.83,80.65 +2020-01-11 06:00:00,-9.88,0.0,-0.0,0.0,3.17,84.05 +2020-01-11 07:00:00,-8.84,0.0,-0.0,0.0,3.31,84.15 +2020-01-11 08:00:00,-8.42,74.0,215.58,48.0,3.1,84.2 +2020-01-11 09:00:00,-7.37,0.0,0.0,0.0,2.76,80.9 +2020-01-11 10:00:00,-5.95,188.0,219.29,128.0,2.69,74.7 +2020-01-11 11:00:00,-4.47,0.0,0.0,0.0,2.55,71.95 +2020-01-11 12:00:00,-3.39,180.0,177.62,131.0,2.41,72.1 +2020-01-11 13:00:00,-2.82,0.0,0.0,0.0,2.34,72.25 +2020-01-11 14:00:00,-3.12,69.0,141.99,51.0,2.55,72.25 +2020-01-11 15:00:00,-4.05,0.0,0.0,0.0,2.76,75.05 +2020-01-11 16:00:00,-5.71,0.0,-0.0,0.0,2.97,77.9 +2020-01-11 17:00:00,-6.78,0.0,-0.0,0.0,3.1,81.0 +2020-01-11 18:00:00,-7.6,0.0,-0.0,0.0,3.17,80.9 +2020-01-11 19:00:00,-7.78,0.0,-0.0,0.0,3.38,80.85 +2020-01-11 20:00:00,-7.81,0.0,-0.0,0.0,3.38,80.85 +2020-01-11 21:00:00,-7.81,0.0,-0.0,0.0,3.38,80.85 +2020-01-11 22:00:00,-7.88,0.0,-0.0,0.0,3.38,80.85 +2020-01-11 23:00:00,-7.84,0.0,-0.0,0.0,3.52,80.85 +2020-01-12 00:00:00,-7.71,0.0,-0.0,0.0,3.45,77.6 +2020-01-12 01:00:00,-7.87,0.0,-0.0,0.0,3.38,77.55 +2020-01-12 02:00:00,-8.24,0.0,-0.0,0.0,3.38,80.8 +2020-01-12 03:00:00,-8.61,0.0,-0.0,0.0,3.45,77.45 +2020-01-12 04:00:00,-8.91,0.0,-0.0,0.0,3.59,80.7 +2020-01-12 05:00:00,-9.06,0.0,-0.0,0.0,3.72,77.35 +2020-01-12 06:00:00,-8.99,0.0,-0.0,0.0,3.93,77.35 +2020-01-12 07:00:00,-8.72,0.0,0.0,0.0,4.14,74.25 +2020-01-12 08:00:00,-8.65,78.0,236.98,49.0,4.14,74.25 +2020-01-12 09:00:00,-7.56,126.0,120.19,100.0,4.28,71.4 +2020-01-12 10:00:00,-6.23,209.0,307.99,124.0,4.0,71.55 +2020-01-12 11:00:00,-4.61,222.0,282.57,138.0,3.93,63.55 +2020-01-12 12:00:00,-3.32,201.0,254.71,130.0,4.14,63.8 +2020-01-12 13:00:00,-2.63,156.0,261.65,98.0,4.0,63.9 +2020-01-12 14:00:00,-2.9,72.0,138.54,54.0,4.0,61.35 +2020-01-12 15:00:00,-4.02,0.0,0.0,0.0,3.93,63.7 +2020-01-12 16:00:00,-5.23,0.0,-0.0,0.0,4.14,68.85 +2020-01-12 17:00:00,-5.83,0.0,-0.0,0.0,4.34,71.65 +2020-01-12 18:00:00,-5.89,0.0,-0.0,0.0,4.48,71.65 +2020-01-12 19:00:00,-7.14,0.0,-0.0,0.0,4.83,71.5 +2020-01-12 20:00:00,-7.11,0.0,-0.0,0.0,4.83,71.5 +2020-01-12 21:00:00,-7.13,0.0,-0.0,0.0,4.83,74.55 +2020-01-12 22:00:00,-7.23,0.0,-0.0,0.0,4.83,74.55 +2020-01-12 23:00:00,-7.27,0.0,-0.0,0.0,4.69,74.45 +2020-01-13 00:00:00,-7.35,0.0,-0.0,0.0,4.48,74.45 +2020-01-13 01:00:00,-7.25,0.0,-0.0,0.0,4.34,74.45 +2020-01-13 02:00:00,-7.19,0.0,-0.0,0.0,4.28,71.5 +2020-01-13 03:00:00,-7.06,0.0,-0.0,0.0,4.41,71.5 +2020-01-13 04:00:00,-6.91,0.0,-0.0,0.0,4.34,71.5 +2020-01-13 05:00:00,-6.91,0.0,-0.0,0.0,4.34,71.5 +2020-01-13 06:00:00,-6.81,0.0,-0.0,0.0,4.28,71.5 +2020-01-13 07:00:00,-6.52,0.0,0.0,0.0,4.41,71.55 +2020-01-13 08:00:00,-6.13,72.0,177.04,50.0,4.41,71.65 +2020-01-13 09:00:00,-4.8,130.0,137.28,100.0,4.21,68.95 +2020-01-13 10:00:00,-3.01,229.0,427.32,110.0,4.21,63.9 +2020-01-13 11:00:00,-1.4,243.0,393.28,125.0,3.72,66.8 +2020-01-13 12:00:00,-0.25,222.0,365.58,119.0,3.86,64.35 +2020-01-13 13:00:00,0.12,181.0,435.89,83.0,3.59,64.45 +2020-01-13 14:00:00,-0.44,69.0,105.12,55.0,3.45,67.0 +2020-01-13 15:00:00,-1.76,0.0,0.0,0.0,3.38,72.3 +2020-01-13 16:00:00,-3.26,0.0,-0.0,0.0,3.38,72.1 +2020-01-13 17:00:00,-4.31,0.0,-0.0,0.0,3.38,74.95 +2020-01-13 18:00:00,-5.04,0.0,-0.0,0.0,3.66,74.9 +2020-01-13 19:00:00,-6.43,0.0,-0.0,0.0,3.86,77.75 +2020-01-13 20:00:00,-6.54,0.0,-0.0,0.0,4.14,77.75 +2020-01-13 21:00:00,-6.6,0.0,-0.0,0.0,4.07,77.75 +2020-01-13 22:00:00,-6.89,0.0,-0.0,0.0,3.59,81.0 +2020-01-13 23:00:00,-7.36,0.0,-0.0,0.0,3.38,80.9 +2020-01-14 00:00:00,-7.93,0.0,-0.0,0.0,3.38,80.85 +2020-01-14 01:00:00,-8.43,0.0,-0.0,0.0,3.38,80.8 +2020-01-14 02:00:00,-8.78,0.0,-0.0,0.0,3.38,84.15 +2020-01-14 03:00:00,-9.06,0.0,-0.0,0.0,3.31,80.7 +2020-01-14 04:00:00,-9.3,0.0,-0.0,0.0,3.17,84.1 +2020-01-14 05:00:00,-9.64,0.0,-0.0,0.0,3.03,80.65 +2020-01-14 06:00:00,-10.03,0.0,-0.0,0.0,2.9,80.55 +2020-01-14 07:00:00,-8.75,0.0,0.0,0.0,2.9,84.15 +2020-01-14 08:00:00,-8.49,65.0,102.95,52.0,2.69,80.8 +2020-01-14 09:00:00,-7.14,99.0,36.23,91.0,2.34,77.7 +2020-01-14 10:00:00,-5.38,137.0,46.25,124.0,2.21,74.8 +2020-01-14 11:00:00,-3.6,105.0,3.3,104.0,1.93,72.1 +2020-01-14 12:00:00,-2.09,105.0,7.02,103.0,1.72,72.3 +2020-01-14 13:00:00,-1.34,98.0,30.69,91.0,1.59,75.35 +2020-01-14 14:00:00,-1.21,55.0,36.62,50.0,1.45,78.45 +2020-01-14 15:00:00,-2.03,0.0,0.0,0.0,1.72,81.55 +2020-01-14 16:00:00,-2.98,0.0,-0.0,0.0,1.86,81.5 +2020-01-14 17:00:00,-3.57,0.0,-0.0,0.0,1.93,81.45 +2020-01-14 18:00:00,-3.8,0.0,-0.0,0.0,2.0,81.4 +2020-01-14 19:00:00,-3.83,0.0,-0.0,0.0,2.07,84.7 +2020-01-14 20:00:00,-3.81,0.0,-0.0,0.0,2.14,88.2 +2020-01-14 21:00:00,-3.32,0.0,-0.0,0.0,2.14,88.25 +2020-01-14 22:00:00,-2.55,0.0,-0.0,0.0,2.07,91.85 +2020-01-14 23:00:00,-1.48,0.0,-0.0,0.0,2.07,91.9 +2020-01-15 00:00:00,-0.63,0.0,-0.0,0.0,2.14,91.95 +2020-01-15 01:00:00,-0.19,0.0,-0.0,0.0,2.14,95.6 +2020-01-15 02:00:00,0.06,0.0,-0.0,0.0,2.21,95.6 +2020-01-15 03:00:00,0.21,0.0,-0.0,0.0,2.28,95.6 +2020-01-15 04:00:00,0.3,0.0,-0.0,0.0,2.28,99.4 +2020-01-15 05:00:00,0.37,0.0,-0.0,0.0,2.14,95.65 +2020-01-15 06:00:00,0.41,0.0,-0.0,0.0,2.07,95.65 +2020-01-15 07:00:00,0.36,0.0,0.0,0.0,2.07,95.65 +2020-01-15 08:00:00,0.49,49.0,31.15,45.0,1.93,95.65 +2020-01-15 09:00:00,0.74,63.0,0.0,63.0,1.72,99.4 +2020-01-15 10:00:00,1.02,53.0,0.0,53.0,1.93,95.65 +2020-01-15 11:00:00,1.17,67.0,0.0,67.0,1.79,99.35 +2020-01-15 12:00:00,1.28,65.0,0.0,65.0,1.86,99.35 +2020-01-15 13:00:00,1.33,73.0,4.32,72.0,1.86,99.35 +2020-01-15 14:00:00,1.14,35.0,0.0,35.0,1.72,99.35 +2020-01-15 15:00:00,0.87,0.0,0.0,0.0,1.52,99.35 +2020-01-15 16:00:00,0.7,0.0,-0.0,0.0,1.38,99.4 +2020-01-15 17:00:00,0.51,0.0,-0.0,0.0,1.24,99.4 +2020-01-15 18:00:00,0.33,0.0,-0.0,0.0,1.24,100.0 +2020-01-15 19:00:00,0.23,0.0,-0.0,0.0,1.31,99.4 +2020-01-15 20:00:00,-0.15,0.0,-0.0,0.0,1.24,95.6 +2020-01-15 21:00:00,-0.34,0.0,-0.0,0.0,1.1,99.4 +2020-01-15 22:00:00,-0.65,0.0,-0.0,0.0,1.03,95.6 +2020-01-15 23:00:00,-0.77,0.0,-0.0,0.0,0.9,99.4 +2020-01-16 00:00:00,-0.93,0.0,-0.0,0.0,0.9,99.4 +2020-01-16 01:00:00,-0.69,0.0,-0.0,0.0,0.69,99.4 +2020-01-16 02:00:00,-0.19,0.0,-0.0,0.0,0.48,99.4 +2020-01-16 03:00:00,-0.28,0.0,-0.0,0.0,0.48,95.6 +2020-01-16 04:00:00,-0.98,0.0,-0.0,0.0,0.9,95.6 +2020-01-16 05:00:00,-1.37,0.0,-0.0,0.0,1.1,99.4 +2020-01-16 06:00:00,-1.77,0.0,-0.0,0.0,1.24,99.4 +2020-01-16 07:00:00,-1.22,0.0,0.0,0.0,0.97,99.4 +2020-01-16 08:00:00,-1.46,66.0,99.5,53.0,0.97,99.4 +2020-01-16 09:00:00,-1.22,92.0,22.14,87.0,1.45,99.4 +2020-01-16 10:00:00,-0.53,142.0,52.33,127.0,1.66,95.6 +2020-01-16 11:00:00,0.0,229.0,288.03,140.0,1.79,92.0 +2020-01-16 12:00:00,0.14,222.0,329.51,126.0,1.79,92.0 +2020-01-16 13:00:00,0.31,166.0,272.48,102.0,2.0,88.45 +2020-01-16 14:00:00,0.28,76.0,118.49,59.0,2.14,88.45 +2020-01-16 15:00:00,-0.24,0.0,0.0,0.0,1.86,91.95 +2020-01-16 16:00:00,-0.59,0.0,-0.0,0.0,2.14,88.45 +2020-01-16 17:00:00,-0.85,0.0,-0.0,0.0,2.41,91.95 +2020-01-16 18:00:00,-1.07,0.0,-0.0,0.0,2.69,88.4 +2020-01-16 19:00:00,-1.06,0.0,-0.0,0.0,2.62,91.95 +2020-01-16 20:00:00,-1.11,0.0,-0.0,0.0,2.83,91.95 +2020-01-16 21:00:00,-1.54,0.0,-0.0,0.0,3.03,91.9 +2020-01-16 22:00:00,-1.36,0.0,-0.0,0.0,3.17,91.9 +2020-01-16 23:00:00,-1.71,0.0,-0.0,0.0,3.03,95.55 +2020-01-17 00:00:00,-1.88,0.0,-0.0,0.0,2.97,91.85 +2020-01-17 01:00:00,-2.05,0.0,-0.0,0.0,2.9,91.85 +2020-01-17 02:00:00,-2.15,0.0,-0.0,0.0,2.83,91.85 +2020-01-17 03:00:00,-2.22,0.0,-0.0,0.0,2.83,95.55 +2020-01-17 04:00:00,-2.27,0.0,-0.0,0.0,2.83,95.55 +2020-01-17 05:00:00,-2.23,0.0,-0.0,0.0,2.83,95.55 +2020-01-17 06:00:00,-2.24,0.0,-0.0,0.0,2.9,95.55 +2020-01-17 07:00:00,-1.73,0.0,0.0,0.0,2.83,91.85 +2020-01-17 08:00:00,-1.85,28.0,0.0,28.0,2.62,91.85 +2020-01-17 09:00:00,-1.18,108.0,52.52,96.0,2.41,88.35 +2020-01-17 10:00:00,-0.12,182.0,162.29,135.0,2.21,81.8 +2020-01-17 11:00:00,0.96,236.0,323.51,135.0,2.55,78.8 +2020-01-17 12:00:00,1.66,65.0,0.0,65.0,2.48,82.0 +2020-01-17 13:00:00,1.92,28.0,0.0,28.0,2.21,82.05 +2020-01-17 14:00:00,1.9,68.0,67.99,58.0,2.07,82.05 +2020-01-17 15:00:00,1.63,0.0,0.0,0.0,2.14,85.25 +2020-01-17 16:00:00,1.53,0.0,-0.0,0.0,2.21,85.25 +2020-01-17 17:00:00,1.54,0.0,-0.0,0.0,2.34,85.25 +2020-01-17 18:00:00,1.57,0.0,-0.0,0.0,2.41,85.25 +2020-01-17 19:00:00,1.86,0.0,-0.0,0.0,2.69,85.3 +2020-01-17 20:00:00,1.9,0.0,-0.0,0.0,2.69,85.3 +2020-01-17 21:00:00,2.04,0.0,-0.0,0.0,2.55,88.65 +2020-01-17 22:00:00,2.12,0.0,-0.0,0.0,2.55,88.65 +2020-01-17 23:00:00,1.93,0.0,-0.0,0.0,2.41,88.65 +2020-01-18 00:00:00,1.72,0.0,-0.0,0.0,2.34,88.6 +2020-01-18 01:00:00,1.96,0.0,-0.0,0.0,2.62,88.65 +2020-01-18 02:00:00,2.23,0.0,-0.0,0.0,2.69,88.65 +2020-01-18 03:00:00,2.15,0.0,-0.0,0.0,2.41,88.65 +2020-01-18 04:00:00,1.77,0.0,-0.0,0.0,2.07,92.05 +2020-01-18 05:00:00,1.34,0.0,-0.0,0.0,2.21,95.65 +2020-01-18 06:00:00,1.01,0.0,-0.0,0.0,2.41,92.05 +2020-01-18 07:00:00,0.93,0.0,0.0,0.0,2.48,92.05 +2020-01-18 08:00:00,1.22,59.0,51.66,52.0,2.9,92.05 +2020-01-18 09:00:00,1.83,97.0,25.94,91.0,3.31,92.05 +2020-01-18 10:00:00,2.73,257.0,574.05,89.0,3.52,88.65 +2020-01-18 11:00:00,3.31,263.0,462.75,117.0,3.79,88.7 +2020-01-18 12:00:00,3.74,242.0,432.53,113.0,4.07,85.45 +2020-01-18 13:00:00,3.83,209.0,574.25,70.0,4.07,85.45 +2020-01-18 14:00:00,3.72,56.0,19.9,53.0,4.0,85.45 +2020-01-18 15:00:00,3.26,6.0,0.0,6.0,3.93,88.7 +2020-01-18 16:00:00,3.01,0.0,-0.0,0.0,3.93,85.4 +2020-01-18 17:00:00,3.19,0.0,-0.0,0.0,4.14,85.4 +2020-01-18 18:00:00,3.58,0.0,-0.0,0.0,4.76,82.25 +2020-01-18 19:00:00,2.97,0.0,-0.0,0.0,4.21,85.4 +2020-01-18 20:00:00,3.0,0.0,-0.0,0.0,4.34,85.4 +2020-01-18 21:00:00,2.97,0.0,-0.0,0.0,4.69,85.4 +2020-01-18 22:00:00,2.95,0.0,-0.0,0.0,5.24,85.4 +2020-01-18 23:00:00,2.84,0.0,-0.0,0.0,5.38,88.65 +2020-01-19 00:00:00,2.54,0.0,-0.0,0.0,5.38,88.65 +2020-01-19 01:00:00,2.26,0.0,-0.0,0.0,5.31,88.65 +2020-01-19 02:00:00,2.01,0.0,-0.0,0.0,4.97,85.3 +2020-01-19 03:00:00,1.67,0.0,-0.0,0.0,4.55,85.25 +2020-01-19 04:00:00,1.09,0.0,-0.0,0.0,4.14,85.2 +2020-01-19 05:00:00,0.72,0.0,-0.0,0.0,3.59,88.5 +2020-01-19 06:00:00,0.34,0.0,-0.0,0.0,2.97,85.15 +2020-01-19 07:00:00,0.36,0.0,0.0,0.0,2.34,88.5 +2020-01-19 08:00:00,0.16,102.0,405.52,46.0,2.21,88.45 +2020-01-19 09:00:00,1.05,167.0,281.87,101.0,2.55,88.55 +2020-01-19 10:00:00,2.27,281.0,720.02,68.0,3.31,85.3 +2020-01-19 11:00:00,2.67,295.0,649.08,88.0,4.34,79.0 +2020-01-19 12:00:00,2.86,192.0,162.34,143.0,4.62,79.0 +2020-01-19 13:00:00,2.97,110.0,36.62,101.0,5.17,76.1 +2020-01-19 14:00:00,2.7,41.0,0.0,41.0,5.1,79.0 +2020-01-19 15:00:00,2.13,1.0,0.0,1.0,5.31,85.3 +2020-01-19 16:00:00,2.19,0.0,-0.0,0.0,5.72,85.3 +2020-01-19 17:00:00,2.54,0.0,-0.0,0.0,6.14,82.15 +2020-01-19 18:00:00,2.7,0.0,-0.0,0.0,5.86,79.0 +2020-01-19 19:00:00,2.93,0.0,-0.0,0.0,4.9,76.1 +2020-01-19 20:00:00,3.08,0.0,-0.0,0.0,4.55,76.1 +2020-01-19 21:00:00,2.65,0.0,-0.0,0.0,4.41,79.0 +2020-01-19 22:00:00,2.75,0.0,-0.0,0.0,4.14,79.0 +2020-01-19 23:00:00,3.05,0.0,-0.0,0.0,3.86,79.1 +2020-01-20 00:00:00,2.83,0.0,-0.0,0.0,3.72,82.15 +2020-01-20 01:00:00,2.65,0.0,-0.0,0.0,3.66,85.35 +2020-01-20 02:00:00,2.61,0.0,-0.0,0.0,3.66,85.35 +2020-01-20 03:00:00,2.79,0.0,-0.0,0.0,3.59,85.35 +2020-01-20 04:00:00,2.81,0.0,-0.0,0.0,3.31,85.35 +2020-01-20 05:00:00,2.77,0.0,-0.0,0.0,2.9,85.35 +2020-01-20 06:00:00,2.79,0.0,-0.0,0.0,2.55,88.65 +2020-01-20 07:00:00,2.97,0.0,0.0,0.0,2.34,88.7 +2020-01-20 08:00:00,2.84,51.0,21.31,48.0,2.07,95.7 +2020-01-20 09:00:00,2.97,73.0,0.0,73.0,1.86,92.15 +2020-01-20 10:00:00,3.55,25.0,0.0,25.0,1.66,92.15 +2020-01-20 11:00:00,4.79,210.0,179.89,152.0,1.52,92.25 +2020-01-20 12:00:00,4.81,165.0,81.82,140.0,1.45,92.25 +2020-01-20 13:00:00,4.78,123.0,60.1,108.0,1.31,92.25 +2020-01-20 14:00:00,4.95,62.0,25.26,58.0,1.31,88.85 +2020-01-20 15:00:00,3.84,10.0,0.0,10.0,1.38,95.7 +2020-01-20 16:00:00,1.55,0.0,-0.0,0.0,1.86,95.65 +2020-01-20 17:00:00,0.03,0.0,-0.0,0.0,1.93,95.6 +2020-01-20 18:00:00,-0.2,0.0,-0.0,0.0,1.52,99.4 +2020-01-20 19:00:00,-1.86,0.0,-0.0,0.0,1.72,99.4 +2020-01-20 20:00:00,-1.73,0.0,-0.0,0.0,1.72,99.4 +2020-01-20 21:00:00,-1.45,0.0,-0.0,0.0,1.59,99.4 +2020-01-20 22:00:00,-1.29,0.0,-0.0,0.0,1.52,99.4 +2020-01-20 23:00:00,-1.29,0.0,-0.0,0.0,1.38,99.4 +2020-01-21 00:00:00,-1.4,0.0,-0.0,0.0,1.24,99.4 +2020-01-21 01:00:00,-1.53,0.0,-0.0,0.0,1.17,95.6 +2020-01-21 02:00:00,-1.61,0.0,-0.0,0.0,1.1,95.6 +2020-01-21 03:00:00,-1.68,0.0,-0.0,0.0,1.03,99.4 +2020-01-21 04:00:00,-1.43,0.0,-0.0,0.0,0.83,99.4 +2020-01-21 05:00:00,-1.6,0.0,-0.0,0.0,0.83,95.6 +2020-01-21 06:00:00,-1.12,0.0,-0.0,0.0,0.62,95.6 +2020-01-21 07:00:00,0.08,0.0,0.0,0.0,0.55,95.6 +2020-01-21 08:00:00,0.36,72.0,97.48,58.0,0.55,95.65 +2020-01-21 09:00:00,0.43,63.0,0.0,63.0,0.97,99.4 +2020-01-21 10:00:00,1.21,132.0,26.45,124.0,1.17,99.35 +2020-01-21 11:00:00,1.36,102.0,0.0,102.0,1.31,95.65 +2020-01-21 12:00:00,1.66,81.0,0.0,81.0,1.38,99.4 +2020-01-21 13:00:00,1.54,77.0,0.0,77.0,1.45,99.4 +2020-01-21 14:00:00,1.4,40.0,0.0,40.0,1.38,99.4 +2020-01-21 15:00:00,1.14,9.0,0.0,9.0,1.38,99.35 +2020-01-21 16:00:00,0.98,0.0,-0.0,0.0,1.59,99.35 +2020-01-21 17:00:00,0.88,0.0,-0.0,0.0,1.66,99.35 +2020-01-21 18:00:00,0.82,0.0,-0.0,0.0,1.86,100.0 +2020-01-21 19:00:00,0.93,0.0,-0.0,0.0,1.79,95.65 +2020-01-21 20:00:00,0.81,0.0,-0.0,0.0,1.79,99.4 +2020-01-21 21:00:00,0.76,0.0,-0.0,0.0,1.86,99.4 +2020-01-21 22:00:00,0.66,0.0,-0.0,0.0,1.93,95.65 +2020-01-21 23:00:00,0.41,0.0,-0.0,0.0,2.07,95.65 +2020-01-22 00:00:00,0.33,0.0,-0.0,0.0,2.07,99.4 +2020-01-22 01:00:00,-0.07,0.0,-0.0,0.0,2.21,95.6 +2020-01-22 02:00:00,-0.09,0.0,-0.0,0.0,2.28,95.6 +2020-01-22 03:00:00,-0.23,0.0,-0.0,0.0,2.21,95.6 +2020-01-22 04:00:00,-0.26,0.0,-0.0,0.0,2.14,95.6 +2020-01-22 05:00:00,-0.07,0.0,-0.0,0.0,2.14,92.0 +2020-01-22 06:00:00,-0.34,0.0,-0.0,0.0,2.0,95.6 +2020-01-22 07:00:00,-0.89,0.0,0.0,0.0,1.59,95.6 +2020-01-22 08:00:00,-1.32,113.0,470.81,44.0,1.45,95.6 +2020-01-22 09:00:00,-0.54,170.0,254.64,108.0,1.31,91.95 +2020-01-22 10:00:00,1.46,303.0,794.21,60.0,1.86,85.25 +2020-01-22 11:00:00,2.75,329.0,806.7,63.0,2.0,79.0 +2020-01-22 12:00:00,3.42,303.0,747.15,69.0,2.28,76.15 +2020-01-22 13:00:00,2.83,242.0,722.49,56.0,2.55,82.15 +2020-01-22 14:00:00,2.37,123.0,384.85,59.0,2.48,82.15 +2020-01-22 15:00:00,1.29,22.0,239.32,11.0,2.76,88.55 +2020-01-22 16:00:00,0.17,0.0,-0.0,0.0,3.1,92.0 +2020-01-22 17:00:00,-0.15,0.0,-0.0,0.0,3.24,88.45 +2020-01-22 18:00:00,-0.28,0.0,-0.0,0.0,3.45,91.95 +2020-01-22 19:00:00,0.27,0.0,-0.0,0.0,3.45,88.45 +2020-01-22 20:00:00,0.37,0.0,-0.0,0.0,3.72,85.15 +2020-01-22 21:00:00,0.06,0.0,-0.0,0.0,3.86,88.45 +2020-01-22 22:00:00,-0.23,0.0,-0.0,0.0,4.0,88.45 +2020-01-22 23:00:00,-0.35,0.0,-0.0,0.0,4.0,88.45 +2020-01-23 00:00:00,-0.38,0.0,-0.0,0.0,4.0,88.45 +2020-01-23 01:00:00,-1.02,0.0,-0.0,0.0,4.07,88.4 +2020-01-23 02:00:00,-0.88,0.0,-0.0,0.0,4.07,91.95 +2020-01-23 03:00:00,-0.94,0.0,-0.0,0.0,4.0,91.95 +2020-01-23 04:00:00,-1.05,0.0,-0.0,0.0,4.0,91.95 +2020-01-23 05:00:00,-1.35,0.0,-0.0,0.0,4.21,91.9 +2020-01-23 06:00:00,-1.49,0.0,-0.0,0.0,4.41,91.9 +2020-01-23 07:00:00,-1.66,0.0,0.0,0.0,4.69,88.35 +2020-01-23 08:00:00,-1.4,126.0,601.62,36.0,4.69,91.9 +2020-01-23 09:00:00,-0.92,225.0,668.53,60.0,5.24,88.4 +2020-01-23 10:00:00,-0.24,174.0,93.68,145.0,5.66,88.45 +2020-01-23 11:00:00,0.52,179.0,71.96,155.0,5.72,85.15 +2020-01-23 12:00:00,1.15,162.0,59.91,143.0,5.93,85.2 +2020-01-23 13:00:00,1.7,55.0,0.0,55.0,5.86,85.25 +2020-01-23 14:00:00,1.95,52.0,5.87,51.0,6.07,82.05 +2020-01-23 15:00:00,2.07,14.0,20.01,13.0,5.79,82.05 +2020-01-23 16:00:00,1.97,0.0,-0.0,0.0,5.72,85.3 +2020-01-23 17:00:00,2.06,0.0,-0.0,0.0,5.93,88.65 +2020-01-23 18:00:00,2.3,0.0,-0.0,0.0,5.79,88.65 +2020-01-23 19:00:00,1.91,0.0,-0.0,0.0,5.24,85.3 +2020-01-23 20:00:00,2.08,0.0,-0.0,0.0,4.41,85.3 +2020-01-23 21:00:00,2.54,0.0,-0.0,0.0,3.79,85.35 +2020-01-23 22:00:00,3.39,0.0,-0.0,0.0,3.93,82.25 +2020-01-23 23:00:00,3.71,0.0,-0.0,0.0,4.48,82.25 +2020-01-24 00:00:00,3.46,0.0,-0.0,0.0,4.69,79.15 +2020-01-24 01:00:00,3.74,0.0,-0.0,0.0,4.76,82.25 +2020-01-24 02:00:00,3.44,0.0,-0.0,0.0,4.9,82.25 +2020-01-24 03:00:00,3.23,0.0,-0.0,0.0,4.97,85.4 +2020-01-24 04:00:00,3.08,0.0,-0.0,0.0,4.97,85.4 +2020-01-24 05:00:00,2.84,0.0,-0.0,0.0,4.9,85.35 +2020-01-24 06:00:00,2.53,0.0,-0.0,0.0,4.76,82.15 +2020-01-24 07:00:00,2.48,6.0,0.0,6.0,3.79,85.35 +2020-01-24 08:00:00,2.52,59.0,26.19,55.0,3.45,85.35 +2020-01-24 09:00:00,2.84,75.0,0.0,75.0,3.31,85.35 +2020-01-24 10:00:00,3.35,137.0,25.54,129.0,3.72,82.2 +2020-01-24 11:00:00,3.53,327.0,752.74,73.0,3.38,76.15 +2020-01-24 12:00:00,3.94,312.0,769.0,65.0,3.45,70.6 +2020-01-24 13:00:00,3.89,214.0,440.47,97.0,2.83,67.9 +2020-01-24 14:00:00,3.73,143.0,555.84,46.0,2.0,70.5 +2020-01-24 15:00:00,3.11,22.0,111.0,16.0,1.45,76.1 +2020-01-24 16:00:00,2.12,0.0,-0.0,0.0,1.52,78.95 +2020-01-24 17:00:00,1.09,0.0,-0.0,0.0,1.45,85.2 +2020-01-24 18:00:00,0.78,0.0,-0.0,0.0,1.24,88.5 +2020-01-24 19:00:00,0.59,0.0,-0.0,0.0,1.31,85.15 +2020-01-24 20:00:00,-0.27,0.0,-0.0,0.0,1.45,88.45 +2020-01-24 21:00:00,-0.85,0.0,-0.0,0.0,1.45,88.4 +2020-01-24 22:00:00,-1.28,0.0,-0.0,0.0,1.52,88.35 +2020-01-24 23:00:00,-1.34,0.0,-0.0,0.0,1.59,88.35 +2020-01-25 00:00:00,-1.58,0.0,-0.0,0.0,1.72,88.35 +2020-01-25 01:00:00,-1.69,0.0,-0.0,0.0,1.79,91.85 +2020-01-25 02:00:00,-1.39,0.0,-0.0,0.0,1.72,88.35 +2020-01-25 03:00:00,-1.05,0.0,-0.0,0.0,1.66,91.95 +2020-01-25 04:00:00,-0.42,0.0,-0.0,0.0,1.52,91.95 +2020-01-25 05:00:00,0.0,0.0,-0.0,0.0,1.38,95.6 +2020-01-25 06:00:00,0.19,0.0,-0.0,0.0,1.24,95.6 +2020-01-25 07:00:00,0.25,4.0,0.0,4.0,1.31,95.6 +2020-01-25 08:00:00,0.95,45.0,0.0,45.0,1.1,95.65 +2020-01-25 09:00:00,1.79,149.0,126.1,117.0,1.45,95.65 +2020-01-25 10:00:00,2.33,170.0,72.55,147.0,1.45,92.1 +2020-01-25 11:00:00,2.39,189.0,82.01,161.0,1.1,88.65 +2020-01-25 12:00:00,2.84,183.0,92.21,153.0,0.76,88.65 +2020-01-25 13:00:00,2.99,165.0,148.24,125.0,0.76,85.4 +2020-01-25 14:00:00,2.97,118.0,251.79,73.0,0.97,85.4 +2020-01-25 15:00:00,2.41,34.0,343.68,14.0,1.31,88.65 +2020-01-25 16:00:00,1.31,0.0,-0.0,0.0,1.59,95.65 +2020-01-25 17:00:00,0.59,0.0,-0.0,0.0,1.59,95.65 +2020-01-25 18:00:00,-0.24,0.0,-0.0,0.0,1.66,99.4 +2020-01-25 19:00:00,-0.68,0.0,-0.0,0.0,2.0,99.4 +2020-01-25 20:00:00,-1.2,0.0,-0.0,0.0,2.0,99.4 +2020-01-25 21:00:00,-1.72,0.0,-0.0,0.0,2.0,95.55 +2020-01-25 22:00:00,-2.03,0.0,-0.0,0.0,1.93,95.55 +2020-01-25 23:00:00,-2.11,0.0,-0.0,0.0,1.86,91.85 +2020-01-26 00:00:00,-2.03,0.0,-0.0,0.0,1.79,91.85 +2020-01-26 01:00:00,-2.36,0.0,-0.0,0.0,1.79,95.55 +2020-01-26 02:00:00,-2.57,0.0,-0.0,0.0,1.86,95.55 +2020-01-26 03:00:00,-2.4,0.0,-0.0,0.0,1.79,95.55 +2020-01-26 04:00:00,-2.41,0.0,-0.0,0.0,1.86,95.55 +2020-01-26 05:00:00,-2.36,0.0,-0.0,0.0,1.93,95.55 +2020-01-26 06:00:00,-2.6,0.0,-0.0,0.0,1.93,95.55 +2020-01-26 07:00:00,-2.9,7.0,0.0,7.0,1.93,91.85 +2020-01-26 08:00:00,-2.05,124.0,464.32,50.0,1.66,91.85 +2020-01-26 09:00:00,0.46,195.0,345.75,106.0,1.59,85.15 +2020-01-26 10:00:00,1.94,317.0,788.42,64.0,2.0,78.95 +2020-01-26 11:00:00,2.79,335.0,746.75,77.0,2.14,76.0 +2020-01-26 12:00:00,3.14,309.0,676.7,86.0,2.28,76.1 +2020-01-26 13:00:00,3.29,255.0,700.44,63.0,2.34,76.1 +2020-01-26 14:00:00,2.96,127.0,306.02,71.0,1.93,79.1 +2020-01-26 15:00:00,2.17,30.0,176.3,19.0,1.93,85.3 +2020-01-26 16:00:00,1.17,0.0,-0.0,0.0,1.93,92.05 +2020-01-26 17:00:00,0.47,0.0,-0.0,0.0,1.79,92.0 +2020-01-26 18:00:00,-0.36,0.0,-0.0,0.0,1.79,95.6 +2020-01-26 19:00:00,-0.86,0.0,-0.0,0.0,1.45,95.6 +2020-01-26 20:00:00,-1.47,0.0,-0.0,0.0,1.45,95.6 +2020-01-26 21:00:00,-1.5,0.0,-0.0,0.0,1.38,95.6 +2020-01-26 22:00:00,-1.58,0.0,-0.0,0.0,1.31,95.6 +2020-01-26 23:00:00,-1.26,0.0,-0.0,0.0,1.17,95.6 +2020-01-27 00:00:00,-1.07,0.0,-0.0,0.0,1.03,91.95 +2020-01-27 01:00:00,-1.0,0.0,-0.0,0.0,0.97,91.95 +2020-01-27 02:00:00,-0.94,0.0,-0.0,0.0,0.9,91.95 +2020-01-27 03:00:00,-0.92,0.0,-0.0,0.0,0.83,95.6 +2020-01-27 04:00:00,-1.02,0.0,-0.0,0.0,0.9,95.6 +2020-01-27 05:00:00,-0.93,0.0,-0.0,0.0,0.9,95.6 +2020-01-27 06:00:00,-0.89,0.0,-0.0,0.0,0.97,95.6 +2020-01-27 07:00:00,-1.24,8.0,0.0,8.0,0.69,95.6 +2020-01-27 08:00:00,-1.05,88.0,116.68,69.0,0.69,91.95 +2020-01-27 09:00:00,-0.01,61.0,0.0,61.0,0.9,88.45 +2020-01-27 10:00:00,0.82,134.0,15.39,129.0,1.1,88.5 +2020-01-27 11:00:00,1.34,170.0,42.9,155.0,1.59,88.55 +2020-01-27 12:00:00,1.54,110.0,3.0,109.0,2.14,85.25 +2020-01-27 13:00:00,1.37,77.0,0.0,77.0,2.28,85.25 +2020-01-27 14:00:00,1.27,49.0,0.0,49.0,2.21,92.05 +2020-01-27 15:00:00,0.89,15.0,0.0,15.0,1.93,92.05 +2020-01-27 16:00:00,0.35,0.0,-0.0,0.0,1.72,95.65 +2020-01-27 17:00:00,0.0,0.0,-0.0,0.0,2.07,95.6 +2020-01-27 18:00:00,-0.3,0.0,-0.0,0.0,2.28,99.4 +2020-01-27 19:00:00,-0.78,0.0,-0.0,0.0,2.62,99.4 +2020-01-27 20:00:00,-1.13,0.0,-0.0,0.0,2.76,91.95 +2020-01-27 21:00:00,-1.35,0.0,-0.0,0.0,2.41,95.6 +2020-01-27 22:00:00,-1.49,0.0,-0.0,0.0,2.0,91.9 +2020-01-27 23:00:00,-1.56,0.0,-0.0,0.0,2.07,91.9 +2020-01-28 00:00:00,-1.68,0.0,-0.0,0.0,2.14,95.55 +2020-01-28 01:00:00,-1.87,0.0,-0.0,0.0,2.14,91.85 +2020-01-28 02:00:00,-1.92,0.0,-0.0,0.0,1.93,91.85 +2020-01-28 03:00:00,-1.91,0.0,-0.0,0.0,1.86,91.85 +2020-01-28 04:00:00,-1.93,0.0,-0.0,0.0,1.86,91.85 +2020-01-28 05:00:00,-1.89,0.0,-0.0,0.0,1.79,91.85 +2020-01-28 06:00:00,-1.9,0.0,-0.0,0.0,1.66,91.85 +2020-01-28 07:00:00,-1.71,10.0,0.0,10.0,1.79,95.55 +2020-01-28 08:00:00,-1.46,66.0,24.04,62.0,1.86,91.9 +2020-01-28 09:00:00,-1.11,151.0,105.67,123.0,2.14,88.4 +2020-01-28 10:00:00,-0.55,274.0,434.74,131.0,2.34,88.45 +2020-01-28 11:00:00,0.07,69.0,0.0,69.0,2.48,85.1 +2020-01-28 12:00:00,0.49,126.0,5.91,124.0,2.48,78.75 +2020-01-28 13:00:00,0.72,176.0,152.0,133.0,2.41,78.75 +2020-01-28 14:00:00,0.72,109.0,130.39,84.0,2.28,75.7 +2020-01-28 15:00:00,0.52,42.0,310.04,20.0,2.14,75.7 +2020-01-28 16:00:00,-0.49,0.0,-0.0,0.0,1.86,81.75 +2020-01-28 17:00:00,-1.5,0.0,-0.0,0.0,1.79,88.35 +2020-01-28 18:00:00,-2.45,0.0,-0.0,0.0,1.45,95.55 +2020-01-28 19:00:00,-3.38,0.0,-0.0,0.0,1.86,95.55 +2020-01-28 20:00:00,-3.29,0.0,-0.0,0.0,2.0,99.4 +2020-01-28 21:00:00,-2.96,0.0,-0.0,0.0,2.21,95.55 +2020-01-28 22:00:00,-2.65,0.0,-0.0,0.0,2.48,95.55 +2020-01-28 23:00:00,-2.52,0.0,-0.0,0.0,2.41,95.55 +2020-01-29 00:00:00,-2.61,0.0,-0.0,0.0,2.48,95.55 +2020-01-29 01:00:00,-2.8,0.0,-0.0,0.0,2.21,91.85 +2020-01-29 02:00:00,-2.73,0.0,-0.0,0.0,1.86,95.55 +2020-01-29 03:00:00,-2.56,0.0,-0.0,0.0,1.79,95.55 +2020-01-29 04:00:00,-2.43,0.0,-0.0,0.0,1.93,95.55 +2020-01-29 05:00:00,-2.28,0.0,-0.0,0.0,2.07,95.55 +2020-01-29 06:00:00,-2.17,0.0,-0.0,0.0,2.0,95.55 +2020-01-29 07:00:00,-2.07,5.0,0.0,5.0,2.48,95.55 +2020-01-29 08:00:00,-1.84,69.0,29.4,64.0,3.31,91.85 +2020-01-29 09:00:00,-1.65,59.0,0.0,59.0,3.66,84.95 +2020-01-29 10:00:00,-1.24,61.0,0.0,61.0,3.17,84.95 +2020-01-29 11:00:00,-0.58,65.0,0.0,65.0,2.97,78.6 +2020-01-29 12:00:00,-0.33,56.0,0.0,56.0,2.76,78.6 +2020-01-29 13:00:00,-0.26,48.0,0.0,48.0,2.62,78.6 +2020-01-29 14:00:00,-0.33,33.0,0.0,33.0,2.55,81.75 +2020-01-29 15:00:00,-0.71,16.0,0.0,16.0,2.14,85.0 +2020-01-29 16:00:00,-1.22,0.0,-0.0,0.0,2.0,88.35 +2020-01-29 17:00:00,-1.5,0.0,-0.0,0.0,2.07,88.35 +2020-01-29 18:00:00,-1.69,0.0,-0.0,0.0,2.07,91.85 +2020-01-29 19:00:00,-2.23,0.0,-0.0,0.0,2.83,91.85 +2020-01-29 20:00:00,-2.27,0.0,-0.0,0.0,2.55,91.85 +2020-01-29 21:00:00,-2.37,0.0,-0.0,0.0,2.28,91.85 +2020-01-29 22:00:00,-2.53,0.0,-0.0,0.0,2.21,91.85 +2020-01-29 23:00:00,-2.62,0.0,-0.0,0.0,1.86,88.25 +2020-01-30 00:00:00,-2.69,0.0,-0.0,0.0,2.21,88.25 +2020-01-30 01:00:00,-2.84,0.0,-0.0,0.0,2.14,88.25 +2020-01-30 02:00:00,-3.02,0.0,-0.0,0.0,1.86,88.25 +2020-01-30 03:00:00,-3.01,0.0,-0.0,0.0,2.0,84.85 +2020-01-30 04:00:00,-3.12,0.0,-0.0,0.0,2.14,88.25 +2020-01-30 05:00:00,-3.3,0.0,-0.0,0.0,2.28,88.25 +2020-01-30 06:00:00,-3.14,0.0,-0.0,0.0,2.41,84.85 +2020-01-30 07:00:00,-3.02,6.0,0.0,6.0,2.69,84.85 +2020-01-30 08:00:00,-2.9,71.0,28.76,66.0,2.62,81.5 +2020-01-30 09:00:00,-2.71,44.0,0.0,44.0,2.48,81.5 +2020-01-30 10:00:00,-2.3,77.0,0.0,77.0,2.48,81.5 +2020-01-30 11:00:00,-2.08,88.0,0.0,88.0,2.41,75.3 +2020-01-30 12:00:00,-1.72,199.0,95.06,166.0,2.34,78.35 +2020-01-30 13:00:00,-1.53,56.0,0.0,56.0,2.28,75.35 +2020-01-30 14:00:00,-1.55,44.0,0.0,44.0,2.21,75.35 +2020-01-30 15:00:00,-1.62,20.0,0.0,20.0,2.14,78.45 +2020-01-30 16:00:00,-1.86,0.0,-0.0,0.0,1.93,81.55 +2020-01-30 17:00:00,-2.03,0.0,-0.0,0.0,1.72,84.85 +2020-01-30 18:00:00,-2.28,0.0,-0.0,0.0,1.52,88.25 +2020-01-30 19:00:00,-2.33,0.0,-0.0,0.0,2.28,88.25 +2020-01-30 20:00:00,-2.32,0.0,-0.0,0.0,2.0,91.85 +2020-01-30 21:00:00,-2.24,0.0,-0.0,0.0,1.24,95.55 +2020-01-30 22:00:00,-2.16,0.0,-0.0,0.0,0.97,91.85 +2020-01-30 23:00:00,-2.04,0.0,-0.0,0.0,1.03,91.85 +2020-01-31 00:00:00,-1.91,0.0,-0.0,0.0,1.38,95.55 +2020-01-31 01:00:00,-1.13,0.0,-0.0,0.0,1.59,95.6 +2020-01-31 02:00:00,-0.91,0.0,-0.0,0.0,1.79,95.6 +2020-01-31 03:00:00,-0.79,0.0,-0.0,0.0,1.93,95.6 +2020-01-31 04:00:00,-0.72,0.0,-0.0,0.0,1.93,99.4 +2020-01-31 05:00:00,-0.7,0.0,-0.0,0.0,2.28,95.6 +2020-01-31 06:00:00,-0.74,0.0,-0.0,0.0,2.41,95.6 +2020-01-31 07:00:00,-0.7,9.0,0.0,9.0,2.0,91.95 +2020-01-31 08:00:00,-0.53,83.0,56.26,73.0,3.03,85.05 +2020-01-31 09:00:00,-0.54,87.0,0.0,87.0,3.45,88.45 +2020-01-31 10:00:00,-0.16,64.0,0.0,64.0,3.52,81.75 +2020-01-31 11:00:00,-0.03,72.0,0.0,72.0,3.52,78.65 +2020-01-31 12:00:00,0.13,68.0,0.0,68.0,3.31,78.65 +2020-01-31 13:00:00,0.28,153.0,64.07,134.0,3.31,78.65 +2020-01-31 14:00:00,0.24,53.0,0.0,53.0,3.31,78.65 +2020-01-31 15:00:00,-0.3,29.0,23.75,27.0,2.55,81.75 +2020-01-31 16:00:00,-3.77,0.0,-0.0,0.0,2.26,87.1 +2020-01-31 17:00:00,-3.11,0.0,-0.0,0.0,2.32,87.42 +2020-01-31 18:00:00,-2.46,0.0,-0.0,0.0,2.39,87.74 +2020-01-31 19:00:00,-1.8,0.0,-0.0,0.0,2.46,88.05 +2020-01-31 20:00:00,-1.14,0.0,-0.0,0.0,2.52,88.37 +2020-01-31 21:00:00,-0.49,0.0,-0.0,0.0,2.59,88.68 +2020-01-31 22:00:00,0.17,0.0,-0.0,0.0,2.66,89.0 +2020-01-31 23:00:00,0.82,0.0,-0.0,0.0,2.73,89.32 +2020-02-01 00:00:00,1.48,0.0,-0.0,0.0,2.79,89.63 +2020-02-01 01:00:00,2.13,0.0,-0.0,0.0,2.86,89.95 +2020-02-01 02:00:00,2.79,0.0,-0.0,0.0,2.93,90.27 +2020-02-01 03:00:00,3.44,0.0,-0.0,0.0,2.99,90.58 +2020-02-01 04:00:00,4.1,0.0,-0.0,0.0,3.06,90.9 +2020-02-01 05:00:00,4.76,0.0,-0.0,0.0,3.13,91.21 +2020-02-01 06:00:00,5.41,0.0,-0.0,0.0,3.19,91.53 +2020-02-01 07:00:00,6.07,3.0,0.0,3.0,3.26,91.85 +2020-02-01 08:00:00,3.68,111.0,192.62,76.0,1.66,95.7 +2020-02-01 09:00:00,3.96,120.0,24.89,113.0,2.14,92.2 +2020-02-01 10:00:00,4.43,151.0,23.12,143.0,2.34,85.55 +2020-02-01 11:00:00,4.72,177.0,40.36,162.0,2.34,79.3 +2020-02-01 12:00:00,5.17,161.0,30.87,150.0,2.41,68.1 +2020-02-01 13:00:00,5.37,97.0,3.32,96.0,2.28,65.55 +2020-02-01 14:00:00,5.39,63.0,4.76,62.0,2.0,63.05 +2020-02-01 15:00:00,5.11,31.0,33.82,28.0,1.66,63.05 +2020-02-01 16:00:00,4.29,0.0,-0.0,0.0,1.86,67.9 +2020-02-01 17:00:00,3.41,0.0,-0.0,0.0,2.14,70.5 +2020-02-01 18:00:00,2.96,0.0,-0.0,0.0,2.34,73.2 +2020-02-01 19:00:00,2.13,0.0,-0.0,0.0,2.28,78.95 +2020-02-01 20:00:00,1.48,0.0,-0.0,0.0,2.48,82.0 +2020-02-01 21:00:00,0.97,0.0,-0.0,0.0,2.76,81.95 +2020-02-01 22:00:00,0.54,0.0,-0.0,0.0,2.83,81.9 +2020-02-01 23:00:00,0.23,0.0,-0.0,0.0,2.83,81.8 +2020-02-02 00:00:00,0.02,0.0,-0.0,0.0,2.9,81.8 +2020-02-02 01:00:00,0.08,0.0,-0.0,0.0,2.9,81.8 +2020-02-02 02:00:00,0.07,0.0,-0.0,0.0,2.83,85.1 +2020-02-02 03:00:00,-0.08,0.0,-0.0,0.0,2.69,85.1 +2020-02-02 04:00:00,-0.35,0.0,-0.0,0.0,2.55,88.45 +2020-02-02 05:00:00,-0.73,0.0,-0.0,0.0,2.41,88.4 +2020-02-02 06:00:00,-1.12,0.0,-0.0,0.0,2.41,88.4 +2020-02-02 07:00:00,-1.26,31.0,241.46,17.0,2.55,88.35 +2020-02-02 08:00:00,0.05,152.0,532.92,53.0,2.62,85.1 +2020-02-02 09:00:00,1.92,259.0,637.34,77.0,3.17,78.95 +2020-02-02 10:00:00,3.24,330.0,676.07,93.0,3.59,70.4 +2020-02-02 11:00:00,4.3,357.0,677.62,102.0,3.72,60.4 +2020-02-02 12:00:00,4.89,337.0,648.02,103.0,3.93,55.95 +2020-02-02 13:00:00,5.5,151.0,52.3,135.0,4.48,46.1 +2020-02-02 14:00:00,5.45,42.0,0.0,42.0,3.86,48.0 +2020-02-02 15:00:00,5.0,10.0,0.0,10.0,3.1,51.8 +2020-02-02 16:00:00,4.41,0.0,-0.0,0.0,2.76,53.8 +2020-02-02 17:00:00,4.06,0.0,-0.0,0.0,2.55,58.05 +2020-02-02 18:00:00,3.52,0.0,-0.0,0.0,2.41,62.7 +2020-02-02 19:00:00,2.19,0.0,-0.0,0.0,2.14,73.0 +2020-02-02 20:00:00,1.33,0.0,-0.0,0.0,2.41,78.8 +2020-02-02 21:00:00,0.82,0.0,-0.0,0.0,2.55,85.15 +2020-02-02 22:00:00,0.71,0.0,-0.0,0.0,2.62,88.5 +2020-02-02 23:00:00,1.03,0.0,-0.0,0.0,2.69,85.2 +2020-02-03 00:00:00,1.38,0.0,-0.0,0.0,2.55,85.25 +2020-02-03 01:00:00,1.34,0.0,-0.0,0.0,2.41,88.55 +2020-02-03 02:00:00,1.28,0.0,-0.0,0.0,2.41,92.05 +2020-02-03 03:00:00,1.32,0.0,-0.0,0.0,2.55,92.05 +2020-02-03 04:00:00,1.3,0.0,-0.0,0.0,2.62,92.05 +2020-02-03 05:00:00,1.33,0.0,-0.0,0.0,2.76,92.05 +2020-02-03 06:00:00,1.37,0.0,-0.0,0.0,2.76,88.6 +2020-02-03 07:00:00,1.48,16.0,16.16,15.0,2.62,92.05 +2020-02-03 08:00:00,1.39,84.0,42.12,76.0,2.28,92.05 +2020-02-03 09:00:00,1.68,144.0,55.18,128.0,2.76,95.65 +2020-02-03 10:00:00,1.91,252.0,242.17,166.0,3.1,88.65 +2020-02-03 11:00:00,2.58,57.0,0.0,57.0,3.59,76.0 +2020-02-03 12:00:00,3.08,127.0,2.73,126.0,3.31,70.4 +2020-02-03 13:00:00,3.19,72.0,0.0,72.0,3.03,70.4 +2020-02-03 14:00:00,2.87,41.0,0.0,41.0,2.9,76.0 +2020-02-03 15:00:00,1.99,18.0,0.0,18.0,2.21,82.05 +2020-02-03 16:00:00,1.05,0.0,-0.0,0.0,1.79,88.55 +2020-02-03 17:00:00,0.06,0.0,-0.0,0.0,2.0,92.0 +2020-02-03 18:00:00,-0.56,0.0,-0.0,0.0,1.79,91.95 +2020-02-03 19:00:00,-1.51,0.0,-0.0,0.0,1.59,95.6 +2020-02-03 20:00:00,-2.04,0.0,-0.0,0.0,1.38,95.55 +2020-02-03 21:00:00,-2.11,0.0,-0.0,0.0,1.31,95.55 +2020-02-03 22:00:00,-2.18,0.0,-0.0,0.0,1.24,95.55 +2020-02-03 23:00:00,-1.7,0.0,-0.0,0.0,1.03,99.4 +2020-02-04 00:00:00,-1.09,0.0,-0.0,0.0,0.83,91.95 +2020-02-04 01:00:00,-0.93,0.0,-0.0,0.0,0.69,91.95 +2020-02-04 02:00:00,-0.63,0.0,-0.0,0.0,0.55,88.45 +2020-02-04 03:00:00,-0.76,0.0,-0.0,0.0,0.62,91.95 +2020-02-04 04:00:00,-1.41,0.0,-0.0,0.0,0.9,91.9 +2020-02-04 05:00:00,-1.38,0.0,-0.0,0.0,0.9,95.6 +2020-02-04 06:00:00,-1.19,0.0,-0.0,0.0,0.9,95.6 +2020-02-04 07:00:00,-0.89,20.0,15.18,19.0,1.79,99.4 +2020-02-04 08:00:00,-0.42,39.0,0.0,39.0,2.07,91.95 +2020-02-04 09:00:00,0.0,68.0,0.0,68.0,2.28,88.45 +2020-02-04 10:00:00,0.56,58.0,0.0,58.0,2.21,85.15 +2020-02-04 11:00:00,0.88,87.0,0.0,87.0,2.21,81.95 +2020-02-04 12:00:00,1.32,48.0,0.0,48.0,2.07,78.8 +2020-02-04 13:00:00,1.78,56.0,0.0,56.0,2.0,72.95 +2020-02-04 14:00:00,1.66,40.0,0.0,40.0,2.34,70.1 +2020-02-04 15:00:00,0.97,30.0,9.76,29.0,2.21,70.05 +2020-02-04 16:00:00,0.19,0.0,-0.0,0.0,2.28,75.6 +2020-02-04 17:00:00,-0.64,0.0,-0.0,0.0,2.0,75.55 +2020-02-04 18:00:00,-1.2,0.0,-0.0,0.0,1.66,81.65 +2020-02-04 19:00:00,-1.66,0.0,-0.0,0.0,1.52,88.3 +2020-02-04 20:00:00,-1.88,0.0,-0.0,0.0,1.24,88.3 +2020-02-04 21:00:00,-1.76,0.0,-0.0,0.0,1.24,88.3 +2020-02-04 22:00:00,-1.79,0.0,-0.0,0.0,1.24,88.3 +2020-02-04 23:00:00,-1.49,0.0,-0.0,0.0,1.31,88.35 +2020-02-05 00:00:00,-1.4,0.0,-0.0,0.0,1.31,88.35 +2020-02-05 01:00:00,-1.25,0.0,-0.0,0.0,1.31,91.9 +2020-02-05 02:00:00,-1.09,0.0,-0.0,0.0,1.86,91.95 +2020-02-05 03:00:00,-1.35,0.0,-0.0,0.0,1.52,99.4 +2020-02-05 04:00:00,-1.45,0.0,-0.0,0.0,1.72,99.4 +2020-02-05 05:00:00,-1.79,0.0,-0.0,0.0,1.86,99.4 +2020-02-05 06:00:00,-2.21,0.0,-0.0,0.0,2.07,95.55 +2020-02-05 07:00:00,-2.57,22.0,28.58,20.0,1.52,91.85 +2020-02-05 08:00:00,-2.19,149.0,372.76,75.0,2.41,91.85 +2020-02-05 09:00:00,-1.26,212.0,250.86,137.0,2.55,81.65 +2020-02-05 10:00:00,-0.23,278.0,310.02,165.0,2.83,69.75 +2020-02-05 11:00:00,0.22,117.0,0.0,117.0,2.9,67.1 +2020-02-05 12:00:00,0.4,184.0,42.6,168.0,2.83,64.55 +2020-02-05 13:00:00,0.49,135.0,18.72,129.0,2.76,62.0 +2020-02-05 14:00:00,0.08,142.0,174.77,102.0,2.55,67.1 +2020-02-05 15:00:00,-0.36,35.0,18.66,33.0,2.34,69.75 +2020-02-05 16:00:00,-1.22,0.0,-0.0,0.0,2.07,75.35 +2020-02-05 17:00:00,-2.21,0.0,-0.0,0.0,2.07,78.35 +2020-02-05 18:00:00,-2.88,0.0,-0.0,0.0,2.14,78.3 +2020-02-05 19:00:00,-3.18,0.0,-0.0,0.0,2.07,81.45 +2020-02-05 20:00:00,-3.19,0.0,-0.0,0.0,2.28,72.1 +2020-02-05 21:00:00,-3.48,0.0,-0.0,0.0,2.21,69.25 +2020-02-05 22:00:00,-3.72,0.0,-0.0,0.0,2.07,69.15 +2020-02-05 23:00:00,-4.0,0.0,-0.0,0.0,2.0,66.35 +2020-02-06 00:00:00,-4.24,0.0,-0.0,0.0,1.79,69.05 +2020-02-06 01:00:00,-4.95,0.0,-0.0,0.0,1.66,68.95 +2020-02-06 02:00:00,-5.4,0.0,-0.0,0.0,1.59,71.75 +2020-02-06 03:00:00,-6.05,0.0,-0.0,0.0,1.52,74.7 +2020-02-06 04:00:00,-6.44,0.0,-0.0,0.0,1.45,77.75 +2020-02-06 05:00:00,-6.64,0.0,-0.0,0.0,1.52,77.75 +2020-02-06 06:00:00,-6.86,0.0,-0.0,0.0,1.59,81.0 +2020-02-06 07:00:00,-5.73,38.0,175.24,25.0,1.59,74.7 +2020-02-06 08:00:00,-4.23,161.0,443.47,71.0,1.66,69.05 +2020-02-06 09:00:00,-3.1,269.0,563.24,98.0,2.28,61.35 +2020-02-06 10:00:00,-1.92,342.0,617.4,114.0,2.83,58.95 +2020-02-06 11:00:00,-1.07,378.0,669.97,113.0,2.9,59.2 +2020-02-06 12:00:00,-0.34,330.0,486.13,145.0,2.9,56.95 +2020-02-06 13:00:00,0.11,290.0,565.52,106.0,2.83,57.1 +2020-02-06 14:00:00,0.24,194.0,496.31,78.0,2.76,57.1 +2020-02-06 15:00:00,-0.27,75.0,348.58,36.0,2.21,59.35 +2020-02-06 16:00:00,-1.44,0.0,-0.0,0.0,2.07,64.1 +2020-02-06 17:00:00,-2.36,0.0,-0.0,0.0,2.07,66.65 +2020-02-06 18:00:00,-2.9,0.0,-0.0,0.0,2.14,69.35 +2020-02-06 19:00:00,-3.14,0.0,-0.0,0.0,2.55,69.35 +2020-02-06 20:00:00,-3.12,0.0,-0.0,0.0,2.76,69.35 +2020-02-06 21:00:00,-3.16,0.0,-0.0,0.0,2.83,69.35 +2020-02-06 22:00:00,-3.08,0.0,-0.0,0.0,2.76,69.35 +2020-02-06 23:00:00,-3.2,0.0,-0.0,0.0,2.55,75.1 +2020-02-07 00:00:00,-3.31,0.0,-0.0,0.0,2.14,78.2 +2020-02-07 01:00:00,-3.52,0.0,-0.0,0.0,2.14,78.2 +2020-02-07 02:00:00,-3.14,0.0,-0.0,0.0,2.14,78.3 +2020-02-07 03:00:00,-3.1,0.0,-0.0,0.0,2.07,78.3 +2020-02-07 04:00:00,-2.97,0.0,-0.0,0.0,2.0,81.5 +2020-02-07 05:00:00,-2.85,0.0,-0.0,0.0,1.93,84.85 +2020-02-07 06:00:00,-2.55,0.0,-0.0,0.0,1.79,84.85 +2020-02-07 07:00:00,-3.53,36.0,114.7,27.0,1.72,84.8 +2020-02-07 08:00:00,-2.59,133.0,212.09,89.0,1.45,84.85 +2020-02-07 09:00:00,-1.09,237.0,353.54,128.0,1.45,81.7 +2020-02-07 10:00:00,0.05,307.0,416.94,151.0,1.79,72.65 +2020-02-07 11:00:00,1.18,373.0,639.19,117.0,1.31,67.3 +2020-02-07 12:00:00,1.52,346.0,570.6,126.0,1.03,67.4 +2020-02-07 13:00:00,1.59,286.0,526.73,112.0,1.66,70.1 +2020-02-07 14:00:00,1.44,156.0,217.93,104.0,2.0,70.1 +2020-02-07 15:00:00,0.93,48.0,51.45,42.0,2.0,72.85 +2020-02-07 16:00:00,-0.25,0.0,-0.0,0.0,1.93,81.75 +2020-02-07 17:00:00,-1.05,0.0,-0.0,0.0,1.79,85.0 +2020-02-07 18:00:00,-1.56,0.0,-0.0,0.0,1.59,84.95 +2020-02-07 19:00:00,-2.24,0.0,-0.0,0.0,1.45,81.55 +2020-02-07 20:00:00,-2.8,0.0,-0.0,0.0,1.31,81.5 +2020-02-07 21:00:00,-3.41,0.0,-0.0,0.0,1.17,84.8 +2020-02-07 22:00:00,-3.83,0.0,-0.0,0.0,1.1,88.2 +2020-02-07 23:00:00,-3.93,0.0,-0.0,0.0,1.17,88.2 +2020-02-08 00:00:00,-3.56,0.0,-0.0,0.0,1.31,84.8 +2020-02-08 01:00:00,-3.62,0.0,-0.0,0.0,1.38,84.8 +2020-02-08 02:00:00,-3.64,0.0,-0.0,0.0,1.52,84.8 +2020-02-08 03:00:00,-3.5,0.0,-0.0,0.0,1.66,88.25 +2020-02-08 04:00:00,-3.54,0.0,-0.0,0.0,1.79,88.25 +2020-02-08 05:00:00,-3.43,0.0,-0.0,0.0,1.79,88.25 +2020-02-08 06:00:00,-3.36,0.0,-0.0,0.0,1.59,88.25 +2020-02-08 07:00:00,-3.52,5.0,0.0,5.0,1.38,88.25 +2020-02-08 08:00:00,-3.25,81.0,18.86,77.0,1.45,88.25 +2020-02-08 09:00:00,-2.81,106.0,3.19,105.0,1.52,81.5 +2020-02-08 10:00:00,-2.41,108.0,0.0,108.0,1.45,78.35 +2020-02-08 11:00:00,-1.85,88.0,0.0,88.0,1.38,78.35 +2020-02-08 12:00:00,-1.44,78.0,0.0,78.0,1.31,75.35 +2020-02-08 13:00:00,-1.44,98.0,0.0,98.0,1.45,72.4 +2020-02-08 14:00:00,-1.46,68.0,0.0,68.0,1.31,72.4 +2020-02-08 15:00:00,-1.76,28.0,0.0,28.0,1.45,75.3 +2020-02-08 16:00:00,-2.18,0.0,-0.0,0.0,1.38,75.3 +2020-02-08 17:00:00,-2.46,0.0,-0.0,0.0,1.31,78.35 +2020-02-08 18:00:00,-2.68,0.0,-0.0,0.0,1.03,81.5 +2020-02-08 19:00:00,-2.83,0.0,-0.0,0.0,0.76,81.5 +2020-02-08 20:00:00,-2.97,0.0,-0.0,0.0,0.48,81.5 +2020-02-08 21:00:00,-2.97,0.0,-0.0,0.0,0.55,81.5 +2020-02-08 22:00:00,-2.92,0.0,-0.0,0.0,0.55,81.5 +2020-02-08 23:00:00,-3.02,0.0,-0.0,0.0,0.62,81.5 +2020-02-09 00:00:00,-3.1,0.0,-0.0,0.0,0.83,81.5 +2020-02-09 01:00:00,-2.92,0.0,-0.0,0.0,1.1,84.85 +2020-02-09 02:00:00,-2.65,0.0,-0.0,0.0,1.17,78.35 +2020-02-09 03:00:00,-3.21,0.0,-0.0,0.0,1.31,81.45 +2020-02-09 04:00:00,-3.59,0.0,-0.0,0.0,1.45,81.45 +2020-02-09 05:00:00,-3.4,0.0,-0.0,0.0,1.59,81.45 +2020-02-09 06:00:00,-3.11,0.0,-0.0,0.0,1.72,78.3 +2020-02-09 07:00:00,-2.59,36.0,68.72,30.0,1.59,88.3 +2020-02-09 08:00:00,-1.65,162.0,369.12,82.0,1.59,81.65 +2020-02-09 09:00:00,-0.56,294.0,669.89,81.0,1.86,75.55 +2020-02-09 10:00:00,0.84,365.0,689.95,100.0,1.93,69.95 +2020-02-09 11:00:00,2.09,402.0,747.67,95.0,2.07,62.35 +2020-02-09 12:00:00,2.73,386.0,753.07,88.0,1.93,57.7 +2020-02-09 13:00:00,2.77,317.0,672.7,88.0,1.72,57.7 +2020-02-09 14:00:00,2.88,219.0,627.78,63.0,1.59,55.55 +2020-02-09 15:00:00,2.05,87.0,372.41,40.0,1.66,64.9 +2020-02-09 16:00:00,0.66,0.0,-0.0,0.0,2.21,72.75 +2020-02-09 17:00:00,-0.44,0.0,-0.0,0.0,2.28,78.6 +2020-02-09 18:00:00,-0.75,0.0,-0.0,0.0,2.28,81.7 +2020-02-09 19:00:00,-1.7,0.0,-0.0,0.0,2.07,88.3 +2020-02-09 20:00:00,-1.8,0.0,-0.0,0.0,2.0,88.3 +2020-02-09 21:00:00,-1.92,0.0,-0.0,0.0,1.86,88.3 +2020-02-09 22:00:00,-2.1,0.0,-0.0,0.0,1.66,88.3 +2020-02-09 23:00:00,-2.21,0.0,-0.0,0.0,1.52,88.3 +2020-02-10 00:00:00,-2.5,0.0,-0.0,0.0,1.45,88.3 +2020-02-10 01:00:00,-2.53,0.0,-0.0,0.0,1.31,88.3 +2020-02-10 02:00:00,-2.27,0.0,-0.0,0.0,1.17,91.85 +2020-02-10 03:00:00,-2.63,0.0,-0.0,0.0,1.24,88.3 +2020-02-10 04:00:00,-2.43,0.0,-0.0,0.0,1.17,91.85 +2020-02-10 05:00:00,-2.59,0.0,-0.0,0.0,1.24,88.3 +2020-02-10 06:00:00,-2.33,0.0,-0.0,0.0,1.24,91.85 +2020-02-10 07:00:00,-2.23,15.0,0.0,15.0,0.97,91.85 +2020-02-10 08:00:00,-0.81,51.0,0.0,51.0,0.83,88.4 +2020-02-10 09:00:00,0.6,96.0,0.0,96.0,1.03,75.7 +2020-02-10 10:00:00,1.44,135.0,2.57,134.0,1.31,67.4 +2020-02-10 11:00:00,2.25,148.0,4.81,146.0,1.59,62.35 +2020-02-10 12:00:00,2.29,173.0,19.96,165.0,1.59,62.35 +2020-02-10 13:00:00,2.71,101.0,0.0,101.0,1.52,60.05 +2020-02-10 14:00:00,2.71,100.0,19.73,95.0,1.24,60.05 +2020-02-10 15:00:00,2.42,50.0,30.53,46.0,0.48,62.5 +2020-02-10 16:00:00,1.98,0.0,-0.0,0.0,0.34,70.2 +2020-02-10 17:00:00,0.9,0.0,-0.0,0.0,0.9,70.05 +2020-02-10 18:00:00,-0.47,0.0,-0.0,0.0,1.45,78.6 +2020-02-10 19:00:00,-0.75,0.0,-0.0,0.0,1.38,85.0 +2020-02-10 20:00:00,-0.71,0.0,-0.0,0.0,1.52,85.0 +2020-02-10 21:00:00,-0.87,0.0,-0.0,0.0,1.66,85.0 +2020-02-10 22:00:00,-1.25,0.0,-0.0,0.0,1.72,88.35 +2020-02-10 23:00:00,-1.55,0.0,-0.0,0.0,1.79,88.35 +2020-02-11 00:00:00,-2.08,0.0,-0.0,0.0,1.86,88.3 +2020-02-11 01:00:00,-2.06,0.0,-0.0,0.0,1.93,91.85 +2020-02-11 02:00:00,-1.8,0.0,-0.0,0.0,2.0,91.85 +2020-02-11 03:00:00,-1.79,0.0,-0.0,0.0,2.07,91.85 +2020-02-11 04:00:00,-2.02,0.0,-0.0,0.0,2.21,88.3 +2020-02-11 05:00:00,-2.14,0.0,-0.0,0.0,2.28,88.3 +2020-02-11 06:00:00,-1.94,0.0,-0.0,0.0,2.41,88.3 +2020-02-11 07:00:00,-1.06,56.0,248.73,32.0,2.97,85.0 +2020-02-11 08:00:00,0.27,197.0,622.98,56.0,2.83,85.1 +2020-02-11 09:00:00,1.9,250.0,341.57,138.0,3.79,73.0 +2020-02-11 10:00:00,2.74,260.0,177.54,190.0,4.34,65.0 +2020-02-11 11:00:00,3.52,405.0,715.09,104.0,4.83,57.95 +2020-02-11 12:00:00,3.69,186.0,29.55,174.0,4.76,57.95 +2020-02-11 13:00:00,3.65,158.0,31.37,147.0,4.07,62.7 +2020-02-11 14:00:00,3.24,45.0,0.0,45.0,3.86,70.4 +2020-02-11 15:00:00,2.84,45.0,14.72,43.0,3.72,73.1 +2020-02-11 16:00:00,2.51,0.0,-0.0,0.0,3.31,76.0 +2020-02-11 17:00:00,2.32,0.0,-0.0,0.0,3.17,78.95 +2020-02-11 18:00:00,2.18,0.0,-0.0,0.0,3.03,78.95 +2020-02-11 19:00:00,2.02,0.0,-0.0,0.0,2.83,78.95 +2020-02-11 20:00:00,1.79,0.0,-0.0,0.0,2.9,82.0 +2020-02-11 21:00:00,1.79,0.0,-0.0,0.0,2.97,82.0 +2020-02-11 22:00:00,1.61,0.0,-0.0,0.0,2.9,85.25 +2020-02-11 23:00:00,1.53,0.0,-0.0,0.0,2.97,85.25 +2020-02-12 00:00:00,1.29,0.0,-0.0,0.0,3.1,88.55 +2020-02-12 01:00:00,1.35,0.0,-0.0,0.0,3.03,92.05 +2020-02-12 02:00:00,0.92,0.0,-0.0,0.0,2.83,88.55 +2020-02-12 03:00:00,0.49,0.0,-0.0,0.0,2.62,88.5 +2020-02-12 04:00:00,0.01,0.0,-0.0,0.0,2.69,88.45 +2020-02-12 05:00:00,-0.06,0.0,-0.0,0.0,2.76,88.45 +2020-02-12 06:00:00,0.01,0.0,-0.0,0.0,2.9,88.45 +2020-02-12 07:00:00,0.24,62.0,266.78,35.0,2.69,92.0 +2020-02-12 08:00:00,1.3,205.0,640.02,57.0,2.76,88.55 +2020-02-12 09:00:00,2.28,192.0,108.12,156.0,4.07,78.95 +2020-02-12 10:00:00,2.55,233.0,102.64,192.0,3.79,79.0 +2020-02-12 11:00:00,3.06,272.0,147.83,209.0,3.72,76.1 +2020-02-12 12:00:00,3.51,134.0,2.43,133.0,3.59,70.5 +2020-02-12 13:00:00,3.6,122.0,2.81,121.0,3.66,67.8 +2020-02-12 14:00:00,3.13,52.0,0.0,52.0,3.72,65.1 +2020-02-12 15:00:00,2.83,12.0,0.0,12.0,2.9,67.6 +2020-02-12 16:00:00,1.8,0.0,-0.0,0.0,2.07,70.1 +2020-02-12 17:00:00,0.43,0.0,-0.0,0.0,2.07,72.75 +2020-02-12 18:00:00,-0.57,0.0,-0.0,0.0,2.28,75.55 +2020-02-12 19:00:00,-0.84,0.0,-0.0,0.0,2.69,85.0 +2020-02-12 20:00:00,-1.02,0.0,-0.0,0.0,2.48,85.0 +2020-02-12 21:00:00,-1.4,0.0,-0.0,0.0,2.41,88.35 +2020-02-12 22:00:00,-1.57,0.0,-0.0,0.0,2.48,84.95 +2020-02-12 23:00:00,-1.78,0.0,-0.0,0.0,2.41,88.3 +2020-02-13 00:00:00,-2.05,0.0,-0.0,0.0,2.34,88.3 +2020-02-13 01:00:00,-2.33,0.0,-0.0,0.0,2.21,84.85 +2020-02-13 02:00:00,-2.81,0.0,-0.0,0.0,2.0,88.25 +2020-02-13 03:00:00,-3.46,0.0,-0.0,0.0,1.93,88.25 +2020-02-13 04:00:00,-3.67,0.0,-0.0,0.0,1.86,84.8 +2020-02-13 05:00:00,-3.93,0.0,-0.0,0.0,1.86,88.2 +2020-02-13 06:00:00,-4.06,0.0,-0.0,0.0,1.86,84.7 +2020-02-13 07:00:00,-2.84,67.0,292.44,36.0,1.59,84.85 +2020-02-13 08:00:00,-0.71,206.0,596.88,65.0,1.52,85.0 +2020-02-13 09:00:00,1.36,325.0,727.6,79.0,1.93,67.3 +2020-02-13 10:00:00,2.26,405.0,788.27,86.0,1.45,62.35 +2020-02-13 11:00:00,2.93,385.0,523.83,159.0,1.1,53.4 +2020-02-13 12:00:00,3.53,197.0,33.61,183.0,0.83,49.4 +2020-02-13 13:00:00,3.52,253.0,227.15,171.0,0.41,47.45 +2020-02-13 14:00:00,3.32,207.0,368.56,108.0,0.34,49.25 +2020-02-13 15:00:00,2.9,112.0,487.39,41.0,0.9,53.4 +2020-02-13 16:00:00,1.8,0.0,0.0,0.0,1.52,62.25 +2020-02-13 17:00:00,0.23,0.0,-0.0,0.0,1.59,69.85 +2020-02-13 18:00:00,-1.42,0.0,-0.0,0.0,1.72,75.35 +2020-02-13 19:00:00,-2.18,0.0,-0.0,0.0,1.66,75.3 +2020-02-13 20:00:00,-2.51,0.0,-0.0,0.0,1.66,75.3 +2020-02-13 21:00:00,-2.7,0.0,-0.0,0.0,1.59,78.3 +2020-02-13 22:00:00,-2.67,0.0,-0.0,0.0,1.52,75.3 +2020-02-13 23:00:00,-2.38,0.0,-0.0,0.0,1.38,75.3 +2020-02-14 00:00:00,-1.92,0.0,-0.0,0.0,1.17,78.35 +2020-02-14 01:00:00,-1.54,0.0,-0.0,0.0,1.1,78.45 +2020-02-14 02:00:00,-1.71,0.0,-0.0,0.0,1.1,81.55 +2020-02-14 03:00:00,-1.84,0.0,-0.0,0.0,1.1,81.55 +2020-02-14 04:00:00,-2.25,0.0,-0.0,0.0,1.17,78.35 +2020-02-14 05:00:00,-2.59,0.0,-0.0,0.0,1.24,78.35 +2020-02-14 06:00:00,-2.94,0.0,-0.0,0.0,1.24,81.5 +2020-02-14 07:00:00,-2.16,74.0,333.69,37.0,0.97,88.3 +2020-02-14 08:00:00,-1.74,213.0,609.24,66.0,0.69,88.3 +2020-02-14 09:00:00,0.39,333.0,736.97,80.0,1.45,75.7 +2020-02-14 10:00:00,1.65,408.0,768.35,93.0,1.79,67.4 +2020-02-14 11:00:00,2.63,428.0,716.64,115.0,2.07,57.7 +2020-02-14 12:00:00,3.06,309.0,248.91,204.0,2.07,51.3 +2020-02-14 13:00:00,3.2,265.0,251.21,173.0,2.0,47.3 +2020-02-14 14:00:00,3.15,229.0,485.92,96.0,2.0,45.4 +2020-02-14 15:00:00,2.37,124.0,577.71,37.0,1.79,51.15 +2020-02-14 16:00:00,1.03,0.0,0.0,0.0,1.93,59.7 +2020-02-14 17:00:00,-0.46,0.0,-0.0,0.0,1.86,69.75 +2020-02-14 18:00:00,-2.08,0.0,-0.0,0.0,1.86,78.35 +2020-02-14 19:00:00,-3.17,0.0,-0.0,0.0,1.79,81.5 +2020-02-14 20:00:00,-3.24,0.0,-0.0,0.0,1.66,84.8 +2020-02-14 21:00:00,-3.52,0.0,-0.0,0.0,1.72,84.8 +2020-02-14 22:00:00,-4.08,0.0,-0.0,0.0,1.86,88.2 +2020-02-14 23:00:00,-4.42,0.0,-0.0,0.0,2.0,88.15 +2020-02-15 00:00:00,-4.63,0.0,-0.0,0.0,2.0,88.15 +2020-02-15 01:00:00,-4.89,0.0,-0.0,0.0,2.0,88.1 +2020-02-15 02:00:00,-4.94,0.0,-0.0,0.0,2.07,88.1 +2020-02-15 03:00:00,-4.92,0.0,-0.0,0.0,2.07,88.1 +2020-02-15 04:00:00,-4.82,0.0,-0.0,0.0,2.28,88.1 +2020-02-15 05:00:00,-4.52,0.0,-0.0,0.0,2.48,84.65 +2020-02-15 06:00:00,-4.02,0.0,-0.0,0.0,2.62,84.7 +2020-02-15 07:00:00,-3.21,43.0,25.9,40.0,2.55,88.25 +2020-02-15 08:00:00,-1.54,138.0,109.57,111.0,2.69,84.95 +2020-02-15 09:00:00,0.01,228.0,177.88,166.0,3.86,72.65 +2020-02-15 10:00:00,1.06,296.0,235.97,198.0,4.48,64.65 +2020-02-15 11:00:00,1.9,303.0,194.52,217.0,4.69,57.6 +2020-02-15 12:00:00,2.41,306.0,236.44,205.0,4.76,51.15 +2020-02-15 13:00:00,2.72,257.0,218.05,176.0,4.69,51.15 +2020-02-15 14:00:00,2.71,125.0,35.87,115.0,4.55,51.15 +2020-02-15 15:00:00,2.27,64.0,38.58,58.0,4.14,53.15 +2020-02-15 16:00:00,1.72,0.0,0.0,0.0,4.21,55.2 +2020-02-15 17:00:00,1.48,0.0,-0.0,0.0,4.21,55.2 +2020-02-15 18:00:00,1.41,0.0,-0.0,0.0,4.21,55.2 +2020-02-15 19:00:00,1.22,0.0,-0.0,0.0,4.21,70.05 +2020-02-15 20:00:00,1.29,0.0,-0.0,0.0,4.07,70.05 +2020-02-15 21:00:00,0.96,0.0,-0.0,0.0,3.59,70.05 +2020-02-15 22:00:00,0.96,0.0,-0.0,0.0,3.38,72.85 +2020-02-15 23:00:00,1.17,0.0,-0.0,0.0,2.97,78.8 +2020-02-16 00:00:00,1.06,0.0,-0.0,0.0,2.55,85.2 +2020-02-16 01:00:00,1.47,0.0,-0.0,0.0,2.21,92.05 +2020-02-16 02:00:00,1.76,0.0,-0.0,0.0,2.07,95.65 +2020-02-16 03:00:00,1.93,0.0,-0.0,0.0,2.48,95.65 +2020-02-16 04:00:00,1.88,0.0,-0.0,0.0,2.48,95.65 +2020-02-16 05:00:00,1.67,0.0,-0.0,0.0,2.14,99.4 +2020-02-16 06:00:00,1.4,0.0,-0.0,0.0,2.0,99.4 +2020-02-16 07:00:00,0.55,86.0,421.96,35.0,2.41,99.4 +2020-02-16 08:00:00,1.67,222.0,624.0,65.0,2.9,95.65 +2020-02-16 09:00:00,2.62,338.0,731.91,79.0,3.66,88.65 +2020-02-16 10:00:00,3.97,416.0,777.29,89.0,3.24,82.3 +2020-02-16 11:00:00,5.15,437.0,735.15,108.0,3.1,76.4 +2020-02-16 12:00:00,6.1,420.0,725.95,106.0,3.24,68.3 +2020-02-16 13:00:00,6.54,354.0,679.51,98.0,3.59,63.35 +2020-02-16 14:00:00,6.27,251.0,605.73,79.0,3.52,63.25 +2020-02-16 15:00:00,5.46,127.0,523.4,43.0,2.76,68.2 +2020-02-16 16:00:00,3.78,0.0,0.0,0.0,2.62,76.15 +2020-02-16 17:00:00,2.37,0.0,-0.0,0.0,2.62,82.15 +2020-02-16 18:00:00,1.42,0.0,-0.0,0.0,2.62,88.6 +2020-02-16 19:00:00,1.19,0.0,-0.0,0.0,2.55,92.05 +2020-02-16 20:00:00,0.57,0.0,-0.0,0.0,2.41,92.0 +2020-02-16 21:00:00,0.14,0.0,-0.0,0.0,2.21,92.0 +2020-02-16 22:00:00,-0.67,0.0,-0.0,0.0,2.07,95.6 +2020-02-16 23:00:00,-1.47,0.0,-0.0,0.0,2.0,91.9 +2020-02-17 00:00:00,-2.21,0.0,-0.0,0.0,2.0,91.85 +2020-02-17 01:00:00,-2.42,0.0,-0.0,0.0,1.79,88.3 +2020-02-17 02:00:00,-2.36,0.0,-0.0,0.0,1.59,88.3 +2020-02-17 03:00:00,-2.53,0.0,-0.0,0.0,1.52,88.3 +2020-02-17 04:00:00,-2.47,0.0,-0.0,0.0,1.38,91.85 +2020-02-17 05:00:00,-2.55,0.0,-0.0,0.0,1.31,91.85 +2020-02-17 06:00:00,-2.18,0.0,-0.0,0.0,1.17,91.85 +2020-02-17 07:00:00,-0.7,44.0,23.82,41.0,0.9,91.95 +2020-02-17 08:00:00,0.24,151.0,136.26,116.0,0.69,95.6 +2020-02-17 09:00:00,2.33,224.0,144.75,172.0,0.9,82.05 +2020-02-17 10:00:00,3.1,285.0,180.7,208.0,1.45,76.1 +2020-02-17 11:00:00,3.65,290.0,145.71,224.0,1.79,65.2 +2020-02-17 12:00:00,3.99,276.0,139.29,215.0,1.86,62.8 +2020-02-17 13:00:00,3.93,215.0,89.0,181.0,2.14,60.4 +2020-02-17 14:00:00,3.61,165.0,110.69,133.0,2.21,62.7 +2020-02-17 15:00:00,3.1,76.0,60.44,66.0,1.86,70.4 +2020-02-17 16:00:00,2.16,0.0,0.0,0.0,1.93,75.95 +2020-02-17 17:00:00,1.39,0.0,-0.0,0.0,1.86,82.0 +2020-02-17 18:00:00,0.85,0.0,-0.0,0.0,1.86,88.55 +2020-02-17 19:00:00,0.03,0.0,-0.0,0.0,1.59,92.0 +2020-02-17 20:00:00,-0.76,0.0,-0.0,0.0,1.45,95.6 +2020-02-17 21:00:00,-0.7,0.0,-0.0,0.0,1.24,99.4 +2020-02-17 22:00:00,-0.61,0.0,-0.0,0.0,1.17,95.6 +2020-02-17 23:00:00,-0.45,0.0,-0.0,0.0,1.1,99.4 +2020-02-18 00:00:00,-0.29,0.0,-0.0,0.0,1.03,99.4 +2020-02-18 01:00:00,-0.23,0.0,-0.0,0.0,1.03,100.0 +2020-02-18 02:00:00,-0.04,0.0,-0.0,0.0,1.17,99.4 +2020-02-18 03:00:00,-0.01,0.0,-0.0,0.0,1.17,99.4 +2020-02-18 04:00:00,-0.21,0.0,-0.0,0.0,1.03,100.0 +2020-02-18 05:00:00,-0.62,0.0,-0.0,0.0,0.9,99.4 +2020-02-18 06:00:00,-0.32,0.0,-0.0,0.0,0.69,95.6 +2020-02-18 07:00:00,-0.42,57.0,53.38,50.0,0.83,95.6 +2020-02-18 08:00:00,0.41,139.0,83.91,117.0,0.97,85.15 +2020-02-18 09:00:00,1.46,164.0,27.42,154.0,1.38,75.85 +2020-02-18 10:00:00,2.01,85.0,0.0,85.0,1.38,67.5 +2020-02-18 11:00:00,2.41,82.0,0.0,82.0,1.1,65.0 +2020-02-18 12:00:00,2.52,89.0,0.0,89.0,0.9,65.0 +2020-02-18 13:00:00,2.54,94.0,0.0,94.0,0.76,65.0 +2020-02-18 14:00:00,2.62,69.0,0.0,69.0,0.62,65.0 +2020-02-18 15:00:00,2.56,68.0,29.34,63.0,0.62,62.5 +2020-02-18 16:00:00,2.07,0.0,0.0,0.0,1.1,67.5 +2020-02-18 17:00:00,0.8,0.0,-0.0,0.0,1.38,78.75 +2020-02-18 18:00:00,-0.71,0.0,-0.0,0.0,1.52,85.0 +2020-02-18 19:00:00,-1.22,0.0,-0.0,0.0,1.66,88.35 +2020-02-18 20:00:00,-1.48,0.0,-0.0,0.0,1.72,84.95 +2020-02-18 21:00:00,-1.95,0.0,-0.0,0.0,1.72,88.3 +2020-02-18 22:00:00,-2.41,0.0,-0.0,0.0,1.72,84.85 +2020-02-18 23:00:00,-2.64,0.0,-0.0,0.0,1.79,84.85 +2020-02-19 00:00:00,-2.79,0.0,-0.0,0.0,1.93,84.85 +2020-02-19 01:00:00,-2.9,0.0,-0.0,0.0,2.0,84.85 +2020-02-19 02:00:00,-3.1,0.0,-0.0,0.0,1.93,81.5 +2020-02-19 03:00:00,-3.33,0.0,-0.0,0.0,1.86,84.8 +2020-02-19 04:00:00,-3.47,0.0,-0.0,0.0,1.86,84.8 +2020-02-19 05:00:00,-3.55,0.0,-0.0,0.0,1.86,84.8 +2020-02-19 06:00:00,-3.5,0.0,-0.0,0.0,1.86,84.8 +2020-02-19 07:00:00,-2.62,63.0,73.33,53.0,1.79,81.55 +2020-02-19 08:00:00,-0.79,229.0,530.74,87.0,1.93,78.5 +2020-02-19 09:00:00,0.49,317.0,478.21,140.0,2.21,67.2 +2020-02-19 10:00:00,1.25,353.0,363.75,194.0,2.21,59.7 +2020-02-19 11:00:00,1.8,435.0,607.84,153.0,1.93,57.45 +2020-02-19 12:00:00,2.35,425.0,639.51,138.0,1.66,55.3 +2020-02-19 13:00:00,2.7,386.0,751.26,91.0,1.59,57.7 +2020-02-19 14:00:00,2.42,235.0,377.38,122.0,1.79,57.7 +2020-02-19 15:00:00,2.04,143.0,541.55,48.0,1.66,59.95 +2020-02-19 16:00:00,0.9,2.0,0.0,2.0,1.72,64.65 +2020-02-19 17:00:00,-0.25,0.0,-0.0,0.0,1.72,72.6 +2020-02-19 18:00:00,-1.14,0.0,-0.0,0.0,1.52,75.45 +2020-02-19 19:00:00,-1.72,0.0,-0.0,0.0,1.38,81.55 +2020-02-19 20:00:00,-2.1,0.0,-0.0,0.0,1.38,81.55 +2020-02-19 21:00:00,-2.25,0.0,-0.0,0.0,1.38,81.55 +2020-02-19 22:00:00,-2.5,0.0,-0.0,0.0,1.52,81.55 +2020-02-19 23:00:00,-2.74,0.0,-0.0,0.0,1.66,84.85 +2020-02-20 00:00:00,-3.08,0.0,-0.0,0.0,1.86,84.85 +2020-02-20 01:00:00,-3.06,0.0,-0.0,0.0,1.79,84.85 +2020-02-20 02:00:00,-3.08,0.0,-0.0,0.0,1.72,84.85 +2020-02-20 03:00:00,-3.19,0.0,-0.0,0.0,1.72,88.25 +2020-02-20 04:00:00,-3.35,0.0,-0.0,0.0,1.72,88.25 +2020-02-20 05:00:00,-3.56,0.0,-0.0,0.0,1.72,84.8 +2020-02-20 06:00:00,-3.82,0.0,-0.0,0.0,1.72,88.2 +2020-02-20 07:00:00,-3.56,77.0,141.16,57.0,1.45,88.25 +2020-02-20 08:00:00,-1.26,217.0,413.94,104.0,1.52,78.45 +2020-02-20 09:00:00,0.08,317.0,455.21,146.0,1.72,69.85 +2020-02-20 10:00:00,1.33,426.0,684.49,123.0,1.93,62.15 +2020-02-20 11:00:00,2.42,462.0,724.21,122.0,2.07,55.45 +2020-02-20 12:00:00,3.12,451.0,752.9,109.0,2.0,53.4 +2020-02-20 13:00:00,3.35,354.0,540.18,139.0,2.14,53.4 +2020-02-20 14:00:00,3.03,187.0,144.44,143.0,2.28,57.8 +2020-02-20 15:00:00,2.37,132.0,376.9,64.0,2.41,62.5 +2020-02-20 16:00:00,0.9,3.0,0.0,3.0,2.55,70.05 +2020-02-20 17:00:00,-0.6,0.0,-0.0,0.0,2.48,78.6 +2020-02-20 18:00:00,-1.48,0.0,-0.0,0.0,2.48,84.95 +2020-02-20 19:00:00,-1.78,0.0,-0.0,0.0,2.41,84.85 +2020-02-20 20:00:00,-2.1,0.0,-0.0,0.0,2.28,81.55 +2020-02-20 21:00:00,-2.53,0.0,-0.0,0.0,2.0,81.55 +2020-02-20 22:00:00,-3.3,0.0,-0.0,0.0,1.86,84.8 +2020-02-20 23:00:00,-4.07,0.0,-0.0,0.0,1.72,84.7 +2020-02-21 00:00:00,-4.63,0.0,-0.0,0.0,1.86,84.65 +2020-02-21 01:00:00,-5.0,0.0,-0.0,0.0,2.0,88.1 +2020-02-21 02:00:00,-5.12,0.0,-0.0,0.0,2.07,84.6 +2020-02-21 03:00:00,-4.97,0.0,-0.0,0.0,2.21,84.6 +2020-02-21 04:00:00,-4.88,0.0,-0.0,0.0,2.21,84.6 +2020-02-21 05:00:00,-4.89,0.0,-0.0,0.0,2.07,84.6 +2020-02-21 06:00:00,-4.99,0.0,-0.0,0.0,1.86,84.6 +2020-02-21 07:00:00,-4.16,81.0,142.81,60.0,1.79,84.7 +2020-02-21 08:00:00,-3.34,143.0,68.23,124.0,2.76,81.45 +2020-02-21 09:00:00,-2.26,298.0,341.02,168.0,3.17,75.3 +2020-02-21 10:00:00,-1.2,459.0,832.12,86.0,3.31,72.4 +2020-02-21 11:00:00,-0.29,502.0,894.67,77.0,3.45,64.35 +2020-02-21 12:00:00,0.69,488.0,909.24,70.0,3.79,57.2 +2020-02-21 13:00:00,0.99,407.0,813.15,79.0,3.72,52.85 +2020-02-21 14:00:00,1.23,147.0,41.96,134.0,3.93,52.85 +2020-02-21 15:00:00,0.83,137.0,377.51,67.0,3.72,54.95 +2020-02-21 16:00:00,-0.13,5.0,0.0,5.0,2.97,59.45 +2020-02-21 17:00:00,-0.84,0.0,-0.0,0.0,2.62,66.9 +2020-02-21 18:00:00,-1.5,0.0,-0.0,0.0,2.34,72.4 +2020-02-21 19:00:00,-1.44,0.0,-0.0,0.0,2.76,72.4 +2020-02-21 20:00:00,-1.77,0.0,-0.0,0.0,2.55,78.35 +2020-02-21 21:00:00,-2.44,0.0,-0.0,0.0,2.41,78.35 +2020-02-21 22:00:00,-3.01,0.0,-0.0,0.0,2.34,81.5 +2020-02-21 23:00:00,-3.55,0.0,-0.0,0.0,2.28,81.45 +2020-02-22 00:00:00,-4.01,0.0,-0.0,0.0,2.14,84.7 +2020-02-22 01:00:00,-4.3,0.0,-0.0,0.0,2.07,84.65 +2020-02-22 02:00:00,-4.73,0.0,-0.0,0.0,2.07,84.6 +2020-02-22 03:00:00,-4.82,0.0,-0.0,0.0,2.21,84.6 +2020-02-22 04:00:00,-4.76,0.0,-0.0,0.0,2.34,84.6 +2020-02-22 05:00:00,-4.55,0.0,-0.0,0.0,2.41,81.3 +2020-02-22 06:00:00,-4.37,0.0,0.0,0.0,2.41,81.3 +2020-02-22 07:00:00,-4.04,121.0,504.99,44.0,2.48,78.15 +2020-02-22 08:00:00,-2.94,275.0,764.03,58.0,3.17,69.35 +2020-02-22 09:00:00,-1.63,400.0,863.45,66.0,4.28,61.55 +2020-02-22 10:00:00,-0.73,478.0,887.91,75.0,4.21,59.2 +2020-02-22 11:00:00,0.09,509.0,890.52,81.0,4.28,52.6 +2020-02-22 12:00:00,0.26,482.0,846.91,88.0,4.41,50.5 +2020-02-22 13:00:00,0.41,350.0,455.06,164.0,4.48,46.6 +2020-02-22 14:00:00,0.39,275.0,536.49,106.0,4.41,44.75 +2020-02-22 15:00:00,0.09,132.0,288.81,77.0,4.21,46.5 +2020-02-22 16:00:00,-0.63,6.0,0.0,6.0,3.45,50.35 +2020-02-22 17:00:00,-1.57,0.0,-0.0,0.0,3.1,54.4 +2020-02-22 18:00:00,-2.2,0.0,-0.0,0.0,2.97,58.95 +2020-02-22 19:00:00,-2.48,0.0,-0.0,0.0,3.1,58.95 +2020-02-22 20:00:00,-2.97,0.0,-0.0,0.0,2.97,61.35 +2020-02-22 21:00:00,-3.19,0.0,-0.0,0.0,2.83,63.8 +2020-02-22 22:00:00,-3.4,0.0,-0.0,0.0,2.83,63.8 +2020-02-22 23:00:00,-3.44,0.0,-0.0,0.0,2.76,63.8 +2020-02-23 00:00:00,-3.71,0.0,-0.0,0.0,2.76,66.35 +2020-02-23 01:00:00,-4.66,0.0,-0.0,0.0,2.76,66.25 +2020-02-23 02:00:00,-5.03,0.0,-0.0,0.0,2.9,68.95 +2020-02-23 03:00:00,-5.28,0.0,-0.0,0.0,3.17,68.85 +2020-02-23 04:00:00,-5.52,0.0,-0.0,0.0,3.17,66.05 +2020-02-23 05:00:00,-5.66,0.0,-0.0,0.0,3.31,63.35 +2020-02-23 06:00:00,-5.83,0.0,0.0,0.0,3.38,63.25 +2020-02-23 07:00:00,-5.51,126.0,512.76,45.0,3.59,68.85 +2020-02-23 08:00:00,-5.34,280.0,752.73,62.0,3.72,66.05 +2020-02-23 09:00:00,-4.82,392.0,769.49,90.0,3.59,60.85 +2020-02-23 10:00:00,-4.19,493.0,916.16,72.0,3.45,58.5 +2020-02-23 11:00:00,-3.51,529.0,944.01,70.0,3.45,54.0 +2020-02-23 12:00:00,-3.04,502.0,898.61,79.0,3.52,51.95 +2020-02-23 13:00:00,-3.03,425.0,828.29,82.0,3.52,51.95 +2020-02-23 14:00:00,-3.07,312.0,758.87,69.0,3.52,51.95 +2020-02-23 15:00:00,-3.28,172.0,634.45,48.0,3.52,54.0 +2020-02-23 16:00:00,-3.96,0.0,0.0,0.0,3.24,56.2 +2020-02-23 17:00:00,-4.88,0.0,-0.0,0.0,2.9,60.85 +2020-02-23 18:00:00,-5.46,0.0,-0.0,0.0,2.83,60.75 +2020-02-23 19:00:00,-5.68,0.0,-0.0,0.0,2.83,60.75 +2020-02-23 20:00:00,-5.95,0.0,-0.0,0.0,2.62,63.25 +2020-02-23 21:00:00,-6.16,0.0,-0.0,0.0,2.21,65.95 +2020-02-23 22:00:00,-6.4,0.0,-0.0,0.0,2.07,68.65 +2020-02-23 23:00:00,-6.59,0.0,-0.0,0.0,2.07,68.65 +2020-02-24 00:00:00,-6.83,0.0,-0.0,0.0,2.28,71.5 +2020-02-24 01:00:00,-6.65,0.0,-0.0,0.0,2.28,71.55 +2020-02-24 02:00:00,-6.82,0.0,-0.0,0.0,2.14,74.55 +2020-02-24 03:00:00,-6.96,0.0,-0.0,0.0,2.14,71.5 +2020-02-24 04:00:00,-6.88,0.0,-0.0,0.0,2.28,71.5 +2020-02-24 05:00:00,-6.69,0.0,-0.0,0.0,2.21,65.8 +2020-02-24 06:00:00,-6.05,0.0,0.0,0.0,2.21,65.95 +2020-02-24 07:00:00,-6.1,67.0,30.58,62.0,1.59,74.7 +2020-02-24 08:00:00,-4.44,230.0,365.79,122.0,2.76,78.05 +2020-02-24 09:00:00,-3.11,382.0,690.69,107.0,3.86,66.6 +2020-02-24 10:00:00,-2.06,444.0,647.02,143.0,4.28,56.6 +2020-02-24 11:00:00,-1.55,463.0,607.92,164.0,4.55,44.15 +2020-02-24 12:00:00,-1.25,414.0,464.04,193.0,4.69,40.55 +2020-02-24 13:00:00,-1.31,278.0,171.64,206.0,4.83,40.55 +2020-02-24 14:00:00,-1.84,189.0,110.63,153.0,4.83,42.2 +2020-02-24 15:00:00,-2.44,168.0,558.72,56.0,4.83,42.2 +2020-02-24 16:00:00,-3.28,13.0,18.82,12.0,4.55,45.65 +2020-02-24 17:00:00,-3.97,0.0,-0.0,0.0,4.21,47.45 +2020-02-24 18:00:00,-4.24,0.0,-0.0,0.0,4.21,49.4 +2020-02-24 19:00:00,-5.03,0.0,-0.0,0.0,3.66,49.25 +2020-02-24 20:00:00,-5.64,0.0,-0.0,0.0,3.31,51.25 +2020-02-24 21:00:00,-6.19,0.0,-0.0,0.0,3.03,55.7 +2020-02-24 22:00:00,-6.66,0.0,-0.0,0.0,2.76,58.0 +2020-02-24 23:00:00,-7.1,0.0,-0.0,0.0,2.55,60.4 +2020-02-25 00:00:00,-7.46,0.0,-0.0,0.0,2.55,62.9 +2020-02-25 01:00:00,-7.85,0.0,-0.0,0.0,2.55,65.5 +2020-02-25 02:00:00,-8.14,0.0,-0.0,0.0,2.55,62.75 +2020-02-25 03:00:00,-8.5,0.0,-0.0,0.0,2.48,62.65 +2020-02-25 04:00:00,-8.87,0.0,-0.0,0.0,2.48,62.5 +2020-02-25 05:00:00,-9.32,0.0,-0.0,0.0,2.34,65.15 +2020-02-25 06:00:00,-9.67,0.0,0.0,0.0,2.34,62.4 +2020-02-25 07:00:00,-9.77,148.0,644.55,39.0,2.76,65.05 +2020-02-25 08:00:00,-9.09,302.0,820.78,55.0,3.17,57.35 +2020-02-25 09:00:00,-8.1,431.0,921.08,59.0,3.24,50.55 +2020-02-25 10:00:00,-6.95,512.0,940.73,69.0,3.24,46.6 +2020-02-25 11:00:00,-5.87,548.0,968.87,66.0,3.38,41.15 +2020-02-25 12:00:00,-5.05,512.0,884.22,86.0,3.52,38.0 +2020-02-25 13:00:00,-4.52,444.0,861.45,78.0,3.66,34.95 +2020-02-25 14:00:00,-4.35,329.0,792.45,67.0,3.72,33.45 +2020-02-25 15:00:00,-4.58,187.0,686.23,46.0,3.66,34.95 +2020-02-25 16:00:00,-5.18,18.0,34.52,16.0,3.59,36.35 +2020-02-25 17:00:00,-5.87,0.0,-0.0,0.0,3.24,41.15 +2020-02-25 18:00:00,-6.46,0.0,-0.0,0.0,2.97,44.75 +2020-02-25 19:00:00,-7.04,0.0,-0.0,0.0,2.83,48.7 +2020-02-25 20:00:00,-7.49,0.0,-0.0,0.0,2.41,52.95 +2020-02-25 21:00:00,-7.88,0.0,-0.0,0.0,2.07,55.15 +2020-02-25 22:00:00,-8.28,0.0,-0.0,0.0,1.79,57.5 +2020-02-25 23:00:00,-8.61,0.0,-0.0,0.0,1.66,57.5 +2020-02-26 00:00:00,-8.87,0.0,-0.0,0.0,1.52,62.5 +2020-02-26 01:00:00,-9.13,0.0,-0.0,0.0,1.38,65.25 +2020-02-26 02:00:00,-9.44,0.0,-0.0,0.0,1.38,68.0 +2020-02-26 03:00:00,-9.37,0.0,-0.0,0.0,1.59,71.0 +2020-02-26 04:00:00,-9.16,0.0,-0.0,0.0,2.0,71.1 +2020-02-26 05:00:00,-9.24,0.0,-0.0,0.0,2.34,74.1 +2020-02-26 06:00:00,-9.58,0.0,0.0,0.0,2.76,71.0 +2020-02-26 07:00:00,-9.59,152.0,629.43,42.0,3.93,59.75 +2020-02-26 08:00:00,-9.27,297.0,750.02,67.0,3.79,59.75 +2020-02-26 09:00:00,-8.56,424.0,849.56,76.0,3.59,55.05 +2020-02-26 10:00:00,-7.79,421.0,474.15,195.0,3.45,50.55 +2020-02-26 11:00:00,-6.99,350.0,184.84,257.0,3.45,48.7 +2020-02-26 12:00:00,-6.37,283.0,84.13,242.0,3.79,44.75 +2020-02-26 13:00:00,-6.11,219.0,46.48,199.0,4.0,43.0 +2020-02-26 14:00:00,-6.2,161.0,41.69,147.0,4.0,43.0 +2020-02-26 15:00:00,-6.41,111.0,90.27,92.0,3.86,44.75 +2020-02-26 16:00:00,-6.78,16.0,15.93,15.0,3.72,44.65 +2020-02-26 17:00:00,-7.25,0.0,-0.0,0.0,3.52,46.5 +2020-02-26 18:00:00,-7.67,0.0,-0.0,0.0,3.31,46.5 +2020-02-26 19:00:00,-8.35,0.0,-0.0,0.0,3.31,50.45 +2020-02-26 20:00:00,-8.93,0.0,-0.0,0.0,3.1,52.55 +2020-02-26 21:00:00,-9.44,0.0,-0.0,0.0,2.97,54.75 +2020-02-26 22:00:00,-9.86,0.0,-0.0,0.0,2.76,54.65 +2020-02-26 23:00:00,-10.16,0.0,-0.0,0.0,2.76,54.65 +2020-02-27 00:00:00,-10.49,0.0,-0.0,0.0,2.55,54.5 +2020-02-27 01:00:00,-10.93,0.0,-0.0,0.0,2.41,54.35 +2020-02-27 02:00:00,-11.11,0.0,-0.0,0.0,2.34,54.35 +2020-02-27 03:00:00,-11.35,0.0,-0.0,0.0,2.41,56.7 +2020-02-27 04:00:00,-11.63,0.0,-0.0,0.0,2.41,54.25 +2020-02-27 05:00:00,-11.84,0.0,-0.0,0.0,2.48,56.55 +2020-02-27 06:00:00,-12.05,0.0,0.0,0.0,2.41,56.55 +2020-02-27 07:00:00,-12.11,155.0,609.55,45.0,2.97,67.5 +2020-02-27 08:00:00,-11.15,309.0,787.39,63.0,3.1,62.05 +2020-02-27 09:00:00,-9.76,441.0,902.73,66.0,3.17,54.65 +2020-02-27 10:00:00,-8.09,530.0,970.17,62.0,3.52,44.35 +2020-02-27 11:00:00,-6.77,566.0,994.49,60.0,3.72,40.85 +2020-02-27 12:00:00,-5.93,536.0,937.39,74.0,3.86,37.7 +2020-02-27 13:00:00,-4.92,461.0,890.66,73.0,5.17,33.3 +2020-02-27 14:00:00,-4.84,343.0,809.32,67.0,5.03,31.85 +2020-02-27 15:00:00,-5.1,188.0,593.99,60.0,4.62,31.85 +2020-02-27 16:00:00,-5.7,23.0,29.6,21.0,4.21,34.65 +2020-02-27 17:00:00,-6.65,0.0,-0.0,0.0,3.66,37.55 +2020-02-27 18:00:00,-7.41,0.0,-0.0,0.0,3.38,42.55 +2020-02-27 19:00:00,-8.48,0.0,-0.0,0.0,3.52,50.45 +2020-02-27 20:00:00,-8.96,0.0,-0.0,0.0,3.45,52.55 +2020-02-27 21:00:00,-9.55,0.0,-0.0,0.0,3.1,54.75 +2020-02-27 22:00:00,-10.03,0.0,-0.0,0.0,2.83,54.65 +2020-02-27 23:00:00,-10.42,0.0,-0.0,0.0,2.55,54.5 +2020-02-28 00:00:00,-10.73,0.0,-0.0,0.0,2.48,54.5 +2020-02-28 01:00:00,-11.03,0.0,-0.0,0.0,2.48,54.35 +2020-02-28 02:00:00,-11.3,0.0,-0.0,0.0,2.48,56.7 +2020-02-28 03:00:00,-11.53,0.0,-0.0,0.0,2.69,56.7 +2020-02-28 04:00:00,-11.74,0.0,-0.0,0.0,2.76,56.7 +2020-02-28 05:00:00,-11.94,0.0,-0.0,0.0,2.83,56.55 +2020-02-28 06:00:00,-12.14,5.0,0.0,5.0,2.9,56.55 +2020-02-28 07:00:00,-12.22,162.0,633.71,44.0,3.52,67.5 +2020-02-28 08:00:00,-11.27,298.0,672.48,84.0,3.72,61.95 +2020-02-28 09:00:00,-9.96,429.0,814.31,86.0,3.79,52.3 +2020-02-28 10:00:00,-8.74,488.0,735.42,129.0,3.72,48.1 +2020-02-28 11:00:00,-7.62,551.0,905.77,85.0,3.66,42.55 +2020-02-28 12:00:00,-6.79,536.0,920.95,77.0,3.66,42.7 +2020-02-28 13:00:00,-6.19,467.0,897.94,71.0,3.72,39.4 +2020-02-28 14:00:00,-5.89,349.0,817.39,66.0,3.86,41.15 +2020-02-28 15:00:00,-6.08,202.0,698.43,48.0,4.07,41.15 +2020-02-28 16:00:00,-9.4,25.0,41.44,22.0,3.69,41.97 +2020-02-28 17:00:00,-8.84,0.0,-0.0,0.0,3.63,46.49 +2020-02-28 18:00:00,-8.27,0.0,-0.0,0.0,3.57,51.0 +2020-02-28 19:00:00,-7.7,0.0,-0.0,0.0,3.51,55.52 +2020-02-28 20:00:00,-7.13,0.0,-0.0,0.0,3.45,60.03 +2020-02-28 21:00:00,-6.57,0.0,-0.0,0.0,3.39,64.55 +2020-02-28 22:00:00,-6.0,0.0,-0.0,0.0,3.32,69.06 +2020-02-28 23:00:00,-5.43,0.0,-0.0,0.0,3.26,73.58 +2020-03-01 00:00:00,-4.86,0.0,-0.0,0.0,3.2,78.09 +2020-03-01 01:00:00,-4.3,0.0,-0.0,0.0,3.14,82.61 +2020-03-01 02:00:00,-3.73,0.0,-0.0,0.0,3.08,87.12 +2020-03-01 03:00:00,-3.16,0.0,-0.0,0.0,3.02,91.64 +2020-03-01 04:00:00,-2.59,0.0,-0.0,0.0,2.96,96.15 +2020-03-01 05:00:00,-2.03,0.0,-0.0,0.0,2.89,100.0 +2020-03-01 06:00:00,-1.46,2.0,0.0,2.0,2.83,100.0 +2020-03-01 07:00:00,-0.89,38.0,0.0,38.0,2.77,100.0 +2020-03-01 08:00:00,-2.05,55.0,0.0,55.0,2.83,95.55 +2020-03-01 09:00:00,-1.41,117.0,0.0,117.0,2.76,88.35 +2020-03-01 10:00:00,-0.87,66.0,0.0,66.0,2.9,85.05 +2020-03-01 11:00:00,-0.64,58.0,0.0,58.0,3.52,85.05 +2020-03-01 12:00:00,-0.64,502.0,751.74,119.0,3.66,88.45 +2020-03-01 13:00:00,-0.65,61.0,0.0,61.0,3.38,88.45 +2020-03-01 14:00:00,-0.57,60.0,0.0,60.0,3.24,88.45 +2020-03-01 15:00:00,-0.48,54.0,0.0,54.0,3.03,88.45 +2020-03-01 16:00:00,-0.54,34.0,97.54,26.0,2.48,88.45 +2020-03-01 17:00:00,-0.83,0.0,-0.0,0.0,1.93,88.45 +2020-03-01 18:00:00,-1.16,0.0,-0.0,0.0,1.79,91.95 +2020-03-01 19:00:00,-1.86,0.0,-0.0,0.0,2.07,88.35 +2020-03-01 20:00:00,-2.66,0.0,-0.0,0.0,2.41,88.25 +2020-03-01 21:00:00,-3.37,0.0,-0.0,0.0,2.69,81.45 +2020-03-01 22:00:00,-3.99,0.0,-0.0,0.0,2.83,84.65 +2020-03-01 23:00:00,-4.24,0.0,-0.0,0.0,2.9,81.3 +2020-03-02 00:00:00,-3.95,0.0,-0.0,0.0,2.9,84.65 +2020-03-02 01:00:00,-3.04,0.0,-0.0,0.0,2.83,84.8 +2020-03-02 02:00:00,-1.83,0.0,-0.0,0.0,2.9,84.95 +2020-03-02 03:00:00,-0.42,0.0,-0.0,0.0,2.97,88.45 +2020-03-02 04:00:00,0.74,0.0,-0.0,0.0,2.97,85.2 +2020-03-02 05:00:00,1.56,0.0,-0.0,0.0,3.1,85.25 +2020-03-02 06:00:00,1.97,3.0,0.0,3.0,3.17,82.05 +2020-03-02 07:00:00,2.26,59.0,4.91,58.0,3.31,82.15 +2020-03-02 08:00:00,3.03,71.0,0.0,71.0,3.38,82.2 +2020-03-02 09:00:00,3.68,105.0,0.0,105.0,3.52,82.25 +2020-03-02 10:00:00,4.27,96.0,0.0,96.0,3.72,82.3 +2020-03-02 11:00:00,4.95,245.0,28.22,230.0,3.52,82.35 +2020-03-02 12:00:00,6.04,122.0,0.0,122.0,3.66,79.4 +2020-03-02 13:00:00,6.51,103.0,0.0,103.0,3.79,79.5 +2020-03-02 14:00:00,6.53,64.0,0.0,64.0,3.59,79.5 +2020-03-02 15:00:00,5.94,36.0,0.0,36.0,3.38,82.5 +2020-03-02 16:00:00,5.08,14.0,0.0,14.0,3.03,85.55 +2020-03-02 17:00:00,4.22,0.0,-0.0,0.0,2.76,88.8 +2020-03-02 18:00:00,3.63,0.0,-0.0,0.0,2.28,92.15 +2020-03-02 19:00:00,2.77,0.0,-0.0,0.0,1.86,92.15 +2020-03-02 20:00:00,2.67,0.0,-0.0,0.0,2.07,88.7 +2020-03-02 21:00:00,2.69,0.0,-0.0,0.0,2.41,88.7 +2020-03-02 22:00:00,2.29,0.0,-0.0,0.0,2.14,92.15 +2020-03-02 23:00:00,1.7,0.0,-0.0,0.0,1.72,92.1 +2020-03-03 00:00:00,1.26,0.0,-0.0,0.0,1.66,92.05 +2020-03-03 01:00:00,0.83,0.0,-0.0,0.0,1.52,92.05 +2020-03-03 02:00:00,0.15,0.0,-0.0,0.0,0.0,92.0 +2020-03-03 03:00:00,0.01,0.0,-0.0,0.0,0.0,92.0 +2020-03-03 04:00:00,-0.14,0.0,-0.0,0.0,1.45,92.0 +2020-03-03 05:00:00,-0.46,0.0,-0.0,0.0,1.52,95.6 +2020-03-03 06:00:00,-0.62,21.0,87.38,16.0,1.52,91.95 +2020-03-03 07:00:00,0.92,90.0,38.17,82.0,0.97,92.05 +2020-03-03 08:00:00,2.04,60.0,0.0,60.0,1.1,88.65 +2020-03-03 09:00:00,2.77,187.0,17.99,179.0,2.76,82.2 +2020-03-03 10:00:00,3.12,263.0,52.8,236.0,2.14,79.1 +2020-03-03 11:00:00,3.38,324.0,109.82,265.0,1.79,79.1 +2020-03-03 12:00:00,3.68,179.0,1.92,178.0,1.66,73.3 +2020-03-03 13:00:00,3.68,212.0,30.27,198.0,1.45,73.3 +2020-03-03 14:00:00,3.87,193.0,73.58,166.0,1.17,70.5 +2020-03-03 15:00:00,4.57,45.0,0.0,45.0,1.03,67.9 +2020-03-03 16:00:00,3.58,24.0,10.92,23.0,1.17,73.2 +2020-03-03 17:00:00,2.54,0.0,-0.0,0.0,1.52,76.0 +2020-03-03 18:00:00,3.07,0.0,-0.0,0.0,1.24,73.2 +2020-03-03 19:00:00,3.52,0.0,-0.0,0.0,0.76,73.2 +2020-03-03 20:00:00,2.99,0.0,-0.0,0.0,1.1,73.2 +2020-03-03 21:00:00,1.35,0.0,-0.0,0.0,1.38,78.9 +2020-03-03 22:00:00,-0.26,0.0,-0.0,0.0,1.59,81.8 +2020-03-03 23:00:00,-1.19,0.0,-0.0,0.0,1.66,85.0 +2020-03-04 00:00:00,-1.63,0.0,-0.0,0.0,1.66,88.35 +2020-03-04 01:00:00,-1.82,0.0,-0.0,0.0,1.66,84.95 +2020-03-04 02:00:00,-2.21,0.0,-0.0,0.0,1.79,84.85 +2020-03-04 03:00:00,-1.97,0.0,-0.0,0.0,1.79,81.55 +2020-03-04 04:00:00,-1.36,0.0,-0.0,0.0,1.66,75.45 +2020-03-04 05:00:00,-1.08,0.0,-0.0,0.0,1.59,78.5 +2020-03-04 06:00:00,-1.45,29.0,158.47,19.0,1.59,78.45 +2020-03-04 07:00:00,0.6,179.0,533.55,64.0,1.03,81.9 +2020-03-04 08:00:00,2.86,321.0,652.86,94.0,0.76,73.2 +2020-03-04 09:00:00,4.05,438.0,714.64,116.0,1.1,70.5 +2020-03-04 10:00:00,4.58,508.0,709.64,141.0,1.93,70.6 +2020-03-04 11:00:00,5.18,512.0,611.51,180.0,2.76,60.65 +2020-03-04 12:00:00,5.87,397.0,275.62,252.0,2.9,56.2 +2020-03-04 13:00:00,6.09,445.0,656.25,138.0,3.17,56.2 +2020-03-04 14:00:00,7.37,250.0,201.57,175.0,3.45,48.4 +2020-03-04 15:00:00,7.15,173.0,289.33,102.0,3.38,50.2 +2020-03-04 16:00:00,5.97,44.0,103.74,34.0,2.83,54.05 +2020-03-04 17:00:00,4.55,0.0,-0.0,0.0,2.9,58.05 +2020-03-04 18:00:00,3.73,0.0,-0.0,0.0,3.24,60.3 +2020-03-04 19:00:00,3.47,0.0,-0.0,0.0,3.86,65.1 +2020-03-04 20:00:00,3.53,0.0,-0.0,0.0,4.0,67.7 +2020-03-04 21:00:00,3.51,0.0,-0.0,0.0,4.07,70.4 +2020-03-04 22:00:00,3.35,0.0,-0.0,0.0,4.0,70.4 +2020-03-04 23:00:00,3.33,0.0,-0.0,0.0,4.07,73.2 +2020-03-05 00:00:00,2.97,0.0,-0.0,0.0,3.86,73.2 +2020-03-05 01:00:00,2.06,0.0,-0.0,0.0,3.66,78.95 +2020-03-05 02:00:00,1.72,0.0,-0.0,0.0,3.59,78.95 +2020-03-05 03:00:00,1.57,0.0,-0.0,0.0,3.38,82.0 +2020-03-05 04:00:00,1.49,0.0,-0.0,0.0,3.17,82.0 +2020-03-05 05:00:00,1.49,0.0,-0.0,0.0,3.17,85.25 +2020-03-05 06:00:00,1.31,19.0,14.49,18.0,2.97,85.25 +2020-03-05 07:00:00,1.68,154.0,302.47,87.0,2.9,82.05 +2020-03-05 08:00:00,2.95,278.0,384.54,142.0,2.97,79.1 +2020-03-05 09:00:00,4.89,409.0,565.18,151.0,3.03,76.3 +2020-03-05 10:00:00,6.38,395.0,288.73,244.0,2.9,70.95 +2020-03-05 11:00:00,8.23,441.0,359.1,244.0,2.83,63.7 +2020-03-05 12:00:00,8.77,389.0,253.95,254.0,2.69,61.45 +2020-03-05 13:00:00,9.33,345.0,262.08,221.0,2.48,59.25 +2020-03-05 14:00:00,9.5,116.0,2.65,115.0,2.69,61.55 +2020-03-05 15:00:00,8.99,55.0,0.0,55.0,2.62,61.45 +2020-03-05 16:00:00,7.9,23.0,0.0,23.0,2.14,68.6 +2020-03-05 17:00:00,6.42,0.0,-0.0,0.0,2.0,73.7 +2020-03-05 18:00:00,5.5,0.0,-0.0,0.0,1.86,79.35 +2020-03-05 19:00:00,4.51,0.0,-0.0,0.0,1.72,85.5 +2020-03-05 20:00:00,3.57,0.0,-0.0,0.0,1.52,92.15 +2020-03-05 21:00:00,2.81,0.0,-0.0,0.0,1.52,88.7 +2020-03-05 22:00:00,2.27,0.0,-0.0,0.0,1.45,92.15 +2020-03-05 23:00:00,1.96,0.0,-0.0,0.0,1.38,92.1 +2020-03-06 00:00:00,2.11,0.0,-0.0,0.0,1.38,95.65 +2020-03-06 01:00:00,2.25,0.0,-0.0,0.0,1.66,92.15 +2020-03-06 02:00:00,2.42,0.0,-0.0,0.0,1.93,95.7 +2020-03-06 03:00:00,2.88,0.0,-0.0,0.0,2.07,95.7 +2020-03-06 04:00:00,2.99,0.0,-0.0,0.0,2.21,99.4 +2020-03-06 05:00:00,2.44,0.0,-0.0,0.0,2.48,99.4 +2020-03-06 06:00:00,2.39,12.0,0.0,12.0,3.79,95.7 +2020-03-06 07:00:00,2.05,104.0,52.75,92.0,3.66,95.65 +2020-03-06 08:00:00,2.1,151.0,22.24,143.0,4.14,95.65 +2020-03-06 09:00:00,2.73,127.0,0.0,127.0,5.24,88.7 +2020-03-06 10:00:00,3.62,236.0,24.58,223.0,4.97,88.7 +2020-03-06 11:00:00,4.57,250.0,23.45,237.0,5.1,79.2 +2020-03-06 12:00:00,5.79,161.0,0.0,161.0,5.03,65.65 +2020-03-06 13:00:00,6.47,59.0,0.0,59.0,4.83,58.55 +2020-03-06 14:00:00,6.9,70.0,0.0,70.0,4.48,54.3 +2020-03-06 15:00:00,6.9,44.0,0.0,44.0,3.31,54.3 +2020-03-06 16:00:00,6.14,44.0,66.07,37.0,2.07,60.75 +2020-03-06 17:00:00,4.83,0.0,-0.0,0.0,1.86,65.4 +2020-03-06 18:00:00,3.24,0.0,-0.0,0.0,2.14,73.2 +2020-03-06 19:00:00,1.85,0.0,-0.0,0.0,1.86,78.95 +2020-03-06 20:00:00,0.45,0.0,-0.0,0.0,2.14,85.15 +2020-03-06 21:00:00,-0.35,0.0,-0.0,0.0,2.34,85.1 +2020-03-06 22:00:00,-0.89,0.0,-0.0,0.0,2.48,85.05 +2020-03-06 23:00:00,-0.89,0.0,-0.0,0.0,2.55,85.05 +2020-03-07 00:00:00,-0.76,0.0,-0.0,0.0,2.69,88.45 +2020-03-07 01:00:00,-0.53,0.0,-0.0,0.0,2.76,88.45 +2020-03-07 02:00:00,-0.54,0.0,-0.0,0.0,2.62,88.45 +2020-03-07 03:00:00,-0.82,0.0,-0.0,0.0,2.41,88.45 +2020-03-07 04:00:00,-1.24,0.0,-0.0,0.0,2.28,88.4 +2020-03-07 05:00:00,-1.61,0.0,-0.0,0.0,2.21,88.35 +2020-03-07 06:00:00,-1.69,28.0,37.05,25.0,2.07,88.35 +2020-03-07 07:00:00,0.25,190.0,496.73,74.0,1.79,88.5 +2020-03-07 08:00:00,2.62,318.0,533.29,123.0,2.48,79.0 +2020-03-07 09:00:00,3.66,378.0,377.92,201.0,2.83,70.5 +2020-03-07 10:00:00,4.62,253.0,31.8,236.0,2.07,67.9 +2020-03-07 11:00:00,5.32,358.0,139.3,280.0,1.38,63.05 +2020-03-07 12:00:00,5.51,109.0,0.0,109.0,0.97,63.05 +2020-03-07 13:00:00,5.26,171.0,4.13,169.0,0.83,63.05 +2020-03-07 14:00:00,5.42,222.0,103.25,182.0,0.62,63.05 +2020-03-07 15:00:00,5.3,142.0,107.61,114.0,0.69,63.05 +2020-03-07 16:00:00,4.98,72.0,343.25,34.0,0.9,65.4 +2020-03-07 17:00:00,4.06,0.0,-0.0,0.0,1.45,73.3 +2020-03-07 18:00:00,2.17,0.0,-0.0,0.0,1.45,82.15 +2020-03-07 19:00:00,2.35,0.0,-0.0,0.0,0.21,82.15 +2020-03-07 20:00:00,2.03,0.0,-0.0,0.0,0.41,85.3 +2020-03-07 21:00:00,1.72,0.0,-0.0,0.0,0.62,85.3 +2020-03-07 22:00:00,1.42,0.0,-0.0,0.0,0.48,85.25 +2020-03-07 23:00:00,1.15,0.0,-0.0,0.0,0.48,85.25 +2020-03-08 00:00:00,0.87,0.0,-0.0,0.0,0.69,88.55 +2020-03-08 01:00:00,0.65,0.0,-0.0,0.0,0.9,85.2 +2020-03-08 02:00:00,0.42,0.0,-0.0,0.0,0.97,88.5 +2020-03-08 03:00:00,-0.86,0.0,-0.0,0.0,1.17,91.95 +2020-03-08 04:00:00,-1.79,0.0,-0.0,0.0,1.52,95.6 +2020-03-08 05:00:00,-2.27,0.0,-0.0,0.0,1.66,95.55 +2020-03-08 06:00:00,-2.02,6.0,0.0,6.0,1.45,99.4 +2020-03-08 07:00:00,-0.67,0.0,0.0,0.0,1.17,95.6 +2020-03-08 08:00:00,1.41,167.0,32.29,155.0,1.72,88.6 +2020-03-08 09:00:00,2.62,164.0,4.22,162.0,1.86,88.65 +2020-03-08 10:00:00,3.39,345.0,142.48,268.0,2.0,85.4 +2020-03-08 11:00:00,4.29,452.0,341.23,259.0,2.41,73.4 +2020-03-08 12:00:00,4.97,213.0,9.12,208.0,2.34,68.0 +2020-03-08 13:00:00,5.34,116.0,0.0,116.0,2.41,63.05 +2020-03-08 14:00:00,5.52,108.0,0.0,108.0,2.34,63.05 +2020-03-08 15:00:00,5.51,130.0,64.13,113.0,2.28,63.05 +2020-03-08 16:00:00,5.04,47.0,51.97,41.0,1.59,65.4 +2020-03-08 17:00:00,3.74,0.0,-0.0,0.0,1.24,76.15 +2020-03-08 18:00:00,3.38,0.0,-0.0,0.0,0.97,73.2 +2020-03-08 19:00:00,1.7,0.0,-0.0,0.0,1.1,78.95 +2020-03-08 20:00:00,-0.13,0.0,-0.0,0.0,1.52,85.1 +2020-03-08 21:00:00,-0.94,0.0,-0.0,0.0,1.66,88.4 +2020-03-08 22:00:00,-1.43,0.0,-0.0,0.0,1.66,88.35 +2020-03-08 23:00:00,-1.27,0.0,-0.0,0.0,1.59,85.0 +2020-03-09 00:00:00,-1.55,0.0,-0.0,0.0,1.52,88.35 +2020-03-09 01:00:00,-2.03,0.0,-0.0,0.0,1.52,91.85 +2020-03-09 02:00:00,-2.35,0.0,-0.0,0.0,1.45,88.3 +2020-03-09 03:00:00,-2.55,0.0,-0.0,0.0,1.45,91.85 +2020-03-09 04:00:00,-2.74,0.0,-0.0,0.0,1.52,91.85 +2020-03-09 05:00:00,-2.84,0.0,-0.0,0.0,1.59,88.25 +2020-03-09 06:00:00,-2.77,38.0,64.49,32.0,1.59,88.25 +2020-03-09 07:00:00,-1.12,0.0,0.0,0.0,1.31,95.6 +2020-03-09 08:00:00,3.19,315.0,452.74,144.0,1.24,82.2 +2020-03-09 09:00:00,5.16,468.0,722.57,121.0,2.14,68.1 +2020-03-09 10:00:00,6.19,553.0,787.19,123.0,3.24,60.85 +2020-03-09 11:00:00,6.74,567.0,729.99,150.0,3.59,56.45 +2020-03-09 12:00:00,7.14,446.0,350.48,252.0,3.59,54.3 +2020-03-09 13:00:00,7.17,490.0,758.67,115.0,3.52,52.2 +2020-03-09 14:00:00,7.15,377.0,694.21,101.0,3.59,50.2 +2020-03-09 15:00:00,6.88,124.0,48.15,111.0,3.38,50.2 +2020-03-09 16:00:00,6.12,52.0,66.56,44.0,2.62,56.2 +2020-03-09 17:00:00,4.9,0.0,-0.0,0.0,2.28,62.95 +2020-03-09 18:00:00,3.77,0.0,-0.0,0.0,2.21,67.8 +2020-03-09 19:00:00,2.75,0.0,-0.0,0.0,2.0,73.2 +2020-03-09 20:00:00,1.94,0.0,-0.0,0.0,1.93,82.05 +2020-03-09 21:00:00,1.24,0.0,-0.0,0.0,2.0,85.25 +2020-03-09 22:00:00,0.63,0.0,-0.0,0.0,2.07,85.2 +2020-03-09 23:00:00,0.41,0.0,-0.0,0.0,1.93,88.5 +2020-03-10 00:00:00,0.13,0.0,-0.0,0.0,1.86,88.5 +2020-03-10 01:00:00,0.41,0.0,-0.0,0.0,1.66,92.0 +2020-03-10 02:00:00,0.31,0.0,-0.0,0.0,1.52,92.0 +2020-03-10 03:00:00,-0.14,0.0,-0.0,0.0,1.38,92.0 +2020-03-10 04:00:00,-0.04,0.0,-0.0,0.0,1.31,95.6 +2020-03-10 05:00:00,-0.08,0.0,-0.0,0.0,1.31,95.6 +2020-03-10 06:00:00,-0.01,34.0,30.27,31.0,1.31,95.6 +2020-03-10 07:00:00,1.43,90.0,11.92,87.0,0.97,95.65 +2020-03-10 08:00:00,3.21,113.0,0.0,113.0,1.72,88.7 +2020-03-10 09:00:00,4.5,109.0,0.0,109.0,2.0,85.5 +2020-03-10 10:00:00,5.35,91.0,0.0,91.0,2.34,79.35 +2020-03-10 11:00:00,6.19,114.0,0.0,114.0,2.62,73.7 +2020-03-10 12:00:00,6.76,77.0,0.0,77.0,2.76,73.8 +2020-03-10 13:00:00,7.16,129.0,0.0,129.0,2.83,73.8 +2020-03-10 14:00:00,7.23,142.0,4.97,140.0,2.9,73.9 +2020-03-10 15:00:00,7.05,59.0,0.0,59.0,2.76,76.65 +2020-03-10 16:00:00,6.69,46.0,32.02,42.0,2.83,76.65 +2020-03-10 17:00:00,6.01,0.0,-0.0,0.0,2.76,79.4 +2020-03-10 18:00:00,5.56,0.0,-0.0,0.0,2.69,79.35 +2020-03-10 19:00:00,5.04,0.0,-0.0,0.0,2.34,82.35 +2020-03-10 20:00:00,4.6,0.0,-0.0,0.0,2.14,85.5 +2020-03-10 21:00:00,4.14,0.0,-0.0,0.0,1.93,85.5 +2020-03-10 22:00:00,3.51,0.0,-0.0,0.0,1.66,95.7 +2020-03-10 23:00:00,3.07,0.0,-0.0,0.0,1.52,92.15 +2020-03-11 00:00:00,2.59,0.0,-0.0,0.0,1.31,95.7 +2020-03-11 01:00:00,2.49,0.0,-0.0,0.0,1.24,95.7 +2020-03-11 02:00:00,2.4,0.0,-0.0,0.0,1.24,95.7 +2020-03-11 03:00:00,2.23,0.0,-0.0,0.0,1.24,95.7 +2020-03-11 04:00:00,2.31,0.0,-0.0,0.0,1.31,95.7 +2020-03-11 05:00:00,2.14,0.0,-0.0,0.0,1.38,95.7 +2020-03-11 06:00:00,1.93,23.0,0.0,23.0,1.38,95.65 +2020-03-11 07:00:00,2.35,75.0,0.0,75.0,1.45,95.7 +2020-03-11 08:00:00,3.24,124.0,2.57,123.0,1.93,92.15 +2020-03-11 09:00:00,4.04,211.0,18.29,202.0,2.28,88.75 +2020-03-11 10:00:00,4.82,163.0,0.0,163.0,3.17,76.3 +2020-03-11 11:00:00,5.86,117.0,0.0,117.0,3.45,65.65 +2020-03-11 12:00:00,5.73,68.0,0.0,68.0,3.31,65.65 +2020-03-11 13:00:00,5.84,61.0,0.0,61.0,3.1,65.65 +2020-03-11 14:00:00,5.52,67.0,0.0,67.0,2.83,68.1 +2020-03-11 15:00:00,5.34,47.0,0.0,47.0,2.97,70.8 +2020-03-11 16:00:00,4.94,34.0,7.71,33.0,2.76,73.45 +2020-03-11 17:00:00,4.35,0.0,-0.0,0.0,2.55,76.25 +2020-03-11 18:00:00,3.65,0.0,-0.0,0.0,2.34,79.15 +2020-03-11 19:00:00,3.21,0.0,-0.0,0.0,2.28,82.2 +2020-03-11 20:00:00,2.83,0.0,-0.0,0.0,2.21,82.2 +2020-03-11 21:00:00,2.37,0.0,-0.0,0.0,2.14,85.35 +2020-03-11 22:00:00,1.88,0.0,-0.0,0.0,1.93,88.65 +2020-03-11 23:00:00,1.61,0.0,-0.0,0.0,1.79,92.05 +2020-03-12 00:00:00,1.42,0.0,-0.0,0.0,0.0,92.05 +2020-03-12 01:00:00,1.3,0.0,-0.0,0.0,0.0,88.6 +2020-03-12 02:00:00,1.27,0.0,-0.0,0.0,1.79,92.05 +2020-03-12 03:00:00,1.29,0.0,-0.0,0.0,1.52,92.05 +2020-03-12 04:00:00,1.25,0.0,-0.0,0.0,1.45,95.65 +2020-03-12 05:00:00,1.26,0.0,-0.0,0.0,1.66,95.65 +2020-03-12 06:00:00,1.31,20.0,0.0,20.0,2.14,92.05 +2020-03-12 07:00:00,1.53,62.0,0.0,62.0,2.41,92.05 +2020-03-12 08:00:00,1.84,97.0,0.0,97.0,2.69,88.65 +2020-03-12 09:00:00,2.2,87.0,0.0,87.0,2.83,82.15 +2020-03-12 10:00:00,2.26,92.0,0.0,92.0,2.69,82.15 +2020-03-12 11:00:00,2.5,106.0,0.0,106.0,2.55,82.15 +2020-03-12 12:00:00,2.93,66.0,0.0,66.0,2.48,79.1 +2020-03-12 13:00:00,2.94,83.0,0.0,83.0,2.41,79.1 +2020-03-12 14:00:00,2.99,121.0,0.0,121.0,2.34,79.1 +2020-03-12 15:00:00,3.1,94.0,7.03,92.0,2.62,79.1 +2020-03-12 16:00:00,2.97,41.0,7.44,40.0,2.69,79.1 +2020-03-12 17:00:00,2.71,0.0,-0.0,0.0,2.14,79.1 +2020-03-12 18:00:00,2.51,0.0,-0.0,0.0,2.14,85.35 +2020-03-12 19:00:00,1.99,0.0,-0.0,0.0,2.07,88.65 +2020-03-12 20:00:00,1.79,0.0,-0.0,0.0,1.93,88.65 +2020-03-12 21:00:00,1.64,0.0,-0.0,0.0,1.79,88.65 +2020-03-12 22:00:00,1.48,0.0,-0.0,0.0,1.72,92.05 +2020-03-12 23:00:00,1.3,0.0,-0.0,0.0,2.0,92.05 +2020-03-13 00:00:00,1.24,0.0,-0.0,0.0,2.07,92.05 +2020-03-13 01:00:00,0.99,0.0,-0.0,0.0,1.38,99.35 +2020-03-13 02:00:00,1.0,0.0,-0.0,0.0,1.59,99.35 +2020-03-13 03:00:00,0.99,0.0,-0.0,0.0,1.59,99.35 +2020-03-13 04:00:00,1.02,0.0,-0.0,0.0,1.66,99.35 +2020-03-13 05:00:00,0.99,0.0,-0.0,0.0,1.93,99.35 +2020-03-13 06:00:00,0.96,14.0,0.0,14.0,2.41,95.65 +2020-03-13 07:00:00,0.91,60.0,0.0,60.0,2.62,92.05 +2020-03-13 08:00:00,1.27,72.0,0.0,72.0,2.83,92.05 +2020-03-13 09:00:00,1.5,80.0,0.0,80.0,3.1,92.05 +2020-03-13 10:00:00,1.59,69.0,0.0,69.0,3.24,88.6 +2020-03-13 11:00:00,1.88,65.0,0.0,65.0,3.31,85.3 +2020-03-13 12:00:00,1.7,58.0,0.0,58.0,3.24,82.05 +2020-03-13 13:00:00,2.02,69.0,0.0,69.0,3.1,82.05 +2020-03-13 14:00:00,2.46,59.0,0.0,59.0,3.1,79.0 +2020-03-13 15:00:00,2.35,35.0,0.0,35.0,3.03,79.0 +2020-03-13 16:00:00,2.16,23.0,0.0,23.0,2.9,79.0 +2020-03-13 17:00:00,1.95,0.0,-0.0,0.0,2.62,85.3 +2020-03-13 18:00:00,1.71,0.0,-0.0,0.0,2.48,85.3 +2020-03-13 19:00:00,0.84,0.0,-0.0,0.0,2.62,85.2 +2020-03-13 20:00:00,0.65,0.0,-0.0,0.0,2.28,85.2 +2020-03-13 21:00:00,0.41,0.0,-0.0,0.0,1.86,88.5 +2020-03-13 22:00:00,-0.09,0.0,-0.0,0.0,2.14,92.0 +2020-03-13 23:00:00,-0.31,0.0,-0.0,0.0,1.93,92.0 +2020-03-14 00:00:00,-0.38,0.0,-0.0,0.0,2.0,88.45 +2020-03-14 01:00:00,-0.85,0.0,-0.0,0.0,2.0,88.45 +2020-03-14 02:00:00,-1.23,0.0,-0.0,0.0,1.93,91.95 +2020-03-14 03:00:00,-1.65,0.0,-0.0,0.0,1.72,91.9 +2020-03-14 04:00:00,-1.9,0.0,-0.0,0.0,1.52,91.9 +2020-03-14 05:00:00,-2.47,0.0,-0.0,0.0,1.38,95.55 +2020-03-14 06:00:00,-2.15,43.0,24.27,40.0,1.38,95.55 +2020-03-14 07:00:00,-0.76,196.0,289.69,116.0,1.93,88.45 +2020-03-14 08:00:00,0.16,355.0,509.94,147.0,2.07,81.9 +2020-03-14 09:00:00,1.25,245.0,35.3,227.0,2.48,78.9 +2020-03-14 10:00:00,2.24,231.0,8.69,226.0,2.14,73.1 +2020-03-14 11:00:00,2.99,409.0,176.9,303.0,1.66,70.4 +2020-03-14 12:00:00,4.02,466.0,341.1,268.0,1.79,67.8 +2020-03-14 13:00:00,4.6,523.0,776.58,119.0,2.14,67.9 +2020-03-14 14:00:00,4.75,407.0,714.7,105.0,2.0,65.4 +2020-03-14 15:00:00,4.56,241.0,469.2,103.0,1.93,65.3 +2020-03-14 16:00:00,3.92,85.0,208.82,55.0,2.28,67.8 +2020-03-14 17:00:00,2.86,0.0,-0.0,0.0,1.86,70.4 +2020-03-14 18:00:00,1.83,0.0,-0.0,0.0,1.72,75.95 +2020-03-14 19:00:00,1.85,0.0,-0.0,0.0,1.66,78.95 +2020-03-14 20:00:00,1.38,0.0,-0.0,0.0,1.72,82.0 +2020-03-14 21:00:00,1.19,0.0,-0.0,0.0,1.79,82.0 +2020-03-14 22:00:00,1.17,0.0,-0.0,0.0,1.86,82.0 +2020-03-14 23:00:00,1.12,0.0,-0.0,0.0,2.0,82.0 +2020-03-15 00:00:00,1.19,0.0,-0.0,0.0,2.14,82.0 +2020-03-15 01:00:00,1.51,0.0,-0.0,0.0,2.28,85.25 +2020-03-15 02:00:00,1.44,0.0,-0.0,0.0,2.48,88.6 +2020-03-15 03:00:00,1.23,0.0,-0.0,0.0,2.62,92.05 +2020-03-15 04:00:00,0.99,0.0,-0.0,0.0,2.76,95.65 +2020-03-15 05:00:00,0.83,0.0,-0.0,0.0,2.62,95.65 +2020-03-15 06:00:00,0.81,26.0,0.0,26.0,2.41,95.65 +2020-03-15 07:00:00,0.94,21.0,0.0,21.0,2.48,95.65 +2020-03-15 08:00:00,1.46,108.0,0.0,108.0,3.03,95.65 +2020-03-15 09:00:00,2.84,124.0,0.0,124.0,3.72,79.1 +2020-03-15 10:00:00,3.53,175.0,0.0,175.0,3.93,79.1 +2020-03-15 11:00:00,4.28,110.0,0.0,110.0,4.21,76.25 +2020-03-15 12:00:00,4.46,80.0,0.0,80.0,4.14,76.25 +2020-03-15 13:00:00,4.44,80.0,0.0,80.0,4.28,73.4 +2020-03-15 14:00:00,4.33,49.0,0.0,49.0,4.34,76.25 +2020-03-15 15:00:00,4.23,66.0,0.0,66.0,4.21,76.25 +2020-03-15 16:00:00,3.77,49.0,13.49,47.0,3.31,79.15 +2020-03-15 17:00:00,2.83,0.0,-0.0,0.0,2.69,85.4 +2020-03-15 18:00:00,2.15,0.0,-0.0,0.0,2.69,88.65 +2020-03-15 19:00:00,1.36,0.0,-0.0,0.0,2.69,92.05 +2020-03-15 20:00:00,1.25,0.0,-0.0,0.0,2.76,92.05 +2020-03-15 21:00:00,1.28,0.0,-0.0,0.0,3.03,92.05 +2020-03-15 22:00:00,1.02,0.0,-0.0,0.0,3.1,92.05 +2020-03-15 23:00:00,0.68,0.0,-0.0,0.0,2.97,88.55 +2020-03-16 00:00:00,0.6,0.0,-0.0,0.0,2.97,92.0 +2020-03-16 01:00:00,0.79,0.0,-0.0,0.0,2.83,88.55 +2020-03-16 02:00:00,0.54,0.0,-0.0,0.0,2.76,92.0 +2020-03-16 03:00:00,0.01,0.0,-0.0,0.0,2.69,92.0 +2020-03-16 04:00:00,-0.05,0.0,-0.0,0.0,2.9,92.0 +2020-03-16 05:00:00,-0.09,0.0,-0.0,0.0,2.9,92.0 +2020-03-16 06:00:00,-0.2,81.0,220.64,51.0,3.38,92.0 +2020-03-16 07:00:00,0.85,226.0,409.15,108.0,3.66,85.2 +2020-03-16 08:00:00,1.64,390.0,650.08,117.0,3.79,78.95 +2020-03-16 09:00:00,2.99,466.0,532.94,188.0,3.72,73.2 +2020-03-16 10:00:00,4.65,395.0,173.9,293.0,3.59,65.4 +2020-03-16 11:00:00,6.42,575.0,612.9,201.0,3.86,58.55 +2020-03-16 12:00:00,7.52,399.0,174.26,296.0,3.79,54.4 +2020-03-16 13:00:00,8.08,260.0,39.59,239.0,3.86,52.45 +2020-03-16 14:00:00,8.27,104.0,0.0,104.0,3.79,50.6 +2020-03-16 15:00:00,7.9,74.0,0.0,74.0,3.38,52.45 +2020-03-16 16:00:00,7.43,70.0,71.92,59.0,2.83,56.55 +2020-03-16 17:00:00,6.35,0.0,-0.0,0.0,2.48,60.85 +2020-03-16 18:00:00,4.69,0.0,-0.0,0.0,2.34,68.0 +2020-03-16 19:00:00,3.63,0.0,-0.0,0.0,2.28,76.1 +2020-03-16 20:00:00,2.53,0.0,-0.0,0.0,2.21,79.0 +2020-03-16 21:00:00,1.32,0.0,-0.0,0.0,2.14,85.25 +2020-03-16 22:00:00,0.28,0.0,-0.0,0.0,2.07,88.5 +2020-03-16 23:00:00,-0.63,0.0,-0.0,0.0,2.0,91.95 +2020-03-17 00:00:00,-1.32,0.0,-0.0,0.0,1.93,91.95 +2020-03-17 01:00:00,-1.95,0.0,-0.0,0.0,1.86,95.55 +2020-03-17 02:00:00,-2.17,0.0,-0.0,0.0,1.79,91.85 +2020-03-17 03:00:00,-2.37,0.0,-0.0,0.0,1.79,91.85 +2020-03-17 04:00:00,-2.55,0.0,-0.0,0.0,1.79,95.55 +2020-03-17 05:00:00,-2.6,0.0,-0.0,0.0,1.72,95.55 +2020-03-17 06:00:00,-1.99,90.0,267.33,52.0,1.52,95.55 +2020-03-17 07:00:00,1.12,261.0,611.15,81.0,1.03,85.25 +2020-03-17 08:00:00,4.61,417.0,751.24,97.0,0.97,79.2 +2020-03-17 09:00:00,6.44,534.0,796.21,114.0,1.1,70.95 +2020-03-17 10:00:00,7.77,616.0,842.61,117.0,1.1,63.6 +2020-03-17 11:00:00,8.89,645.0,854.33,119.0,1.03,59.15 +2020-03-17 12:00:00,9.7,517.0,454.44,246.0,0.83,55.05 +2020-03-17 13:00:00,10.22,558.0,855.32,100.0,0.48,53.1 +2020-03-17 14:00:00,10.41,428.0,750.16,100.0,0.41,53.1 +2020-03-17 15:00:00,10.25,275.0,616.2,85.0,0.9,53.1 +2020-03-17 16:00:00,9.49,116.0,431.6,48.0,1.38,59.25 +2020-03-17 17:00:00,7.67,0.0,-0.0,0.0,1.86,68.5 +2020-03-17 18:00:00,5.64,0.0,-0.0,0.0,1.66,79.35 +2020-03-17 19:00:00,6.6,0.0,-0.0,0.0,0.69,65.75 +2020-03-17 20:00:00,6.28,0.0,-0.0,0.0,0.28,65.75 +2020-03-17 21:00:00,5.62,0.0,-0.0,0.0,1.17,70.8 +2020-03-17 22:00:00,1.65,0.0,-0.0,0.0,1.79,78.95 +2020-03-17 23:00:00,0.15,0.0,-0.0,0.0,2.0,85.15 +2020-03-18 00:00:00,-0.76,0.0,-0.0,0.0,2.0,88.45 +2020-03-18 01:00:00,-1.41,0.0,-0.0,0.0,2.07,91.9 +2020-03-18 02:00:00,-1.5,0.0,-0.0,0.0,2.14,91.9 +2020-03-18 03:00:00,-1.48,0.0,-0.0,0.0,2.28,91.9 +2020-03-18 04:00:00,-1.39,0.0,-0.0,0.0,2.34,88.4 +2020-03-18 05:00:00,-1.26,0.0,-0.0,0.0,2.41,88.4 +2020-03-18 06:00:00,-0.58,91.0,235.96,56.0,2.48,88.45 +2020-03-18 07:00:00,2.71,256.0,545.49,92.0,2.62,79.1 +2020-03-18 08:00:00,4.64,409.0,694.5,109.0,3.52,76.25 +2020-03-18 09:00:00,5.44,530.0,768.74,120.0,4.0,70.8 +2020-03-18 10:00:00,6.09,604.0,787.81,133.0,4.07,68.2 +2020-03-18 11:00:00,6.68,643.0,835.58,124.0,4.0,65.85 +2020-03-18 12:00:00,7.1,489.0,364.05,270.0,4.0,68.4 +2020-03-18 13:00:00,7.13,527.0,716.02,140.0,4.0,71.05 +2020-03-18 14:00:00,6.78,223.0,54.29,199.0,3.79,71.05 +2020-03-18 15:00:00,6.11,81.0,0.0,81.0,3.52,76.5 +2020-03-18 16:00:00,5.47,60.0,24.67,56.0,3.17,76.4 +2020-03-18 17:00:00,4.85,0.0,-0.0,0.0,2.41,79.3 +2020-03-18 18:00:00,4.13,0.0,-0.0,0.0,2.0,85.45 +2020-03-18 19:00:00,3.44,0.0,-0.0,0.0,2.0,88.7 +2020-03-18 20:00:00,3.19,0.0,-0.0,0.0,1.86,88.7 +2020-03-18 21:00:00,3.12,0.0,-0.0,0.0,1.72,88.7 +2020-03-18 22:00:00,3.1,0.0,-0.0,0.0,1.52,92.15 +2020-03-18 23:00:00,3.01,0.0,-0.0,0.0,1.31,92.15 +2020-03-19 00:00:00,3.05,0.0,-0.0,0.0,1.24,92.15 +2020-03-19 01:00:00,3.47,0.0,-0.0,0.0,1.24,99.4 +2020-03-19 02:00:00,3.46,0.0,-0.0,0.0,1.24,99.4 +2020-03-19 03:00:00,3.4,0.0,-0.0,0.0,1.31,99.4 +2020-03-19 04:00:00,3.55,0.0,-0.0,0.0,1.45,99.4 +2020-03-19 05:00:00,3.51,0.0,-0.0,0.0,1.45,100.0 +2020-03-19 06:00:00,3.54,48.0,12.94,46.0,1.93,100.0 +2020-03-19 07:00:00,3.46,89.0,0.0,89.0,2.07,100.0 +2020-03-19 08:00:00,3.62,132.0,0.0,132.0,2.0,100.0 +2020-03-19 09:00:00,3.99,190.0,3.71,188.0,1.79,99.4 +2020-03-19 10:00:00,4.89,174.0,0.0,174.0,1.93,88.85 +2020-03-19 11:00:00,5.59,315.0,41.5,289.0,2.07,82.4 +2020-03-19 12:00:00,6.42,331.0,67.57,290.0,2.41,76.55 +2020-03-19 13:00:00,6.54,207.0,7.33,203.0,2.34,76.55 +2020-03-19 14:00:00,6.69,221.0,49.23,199.0,2.69,71.05 +2020-03-19 15:00:00,6.66,195.0,160.53,144.0,2.62,68.4 +2020-03-19 16:00:00,6.24,96.0,173.94,67.0,2.07,70.95 +2020-03-19 17:00:00,5.52,0.0,0.0,0.0,1.72,79.35 +2020-03-19 18:00:00,4.78,0.0,-0.0,0.0,1.72,79.3 +2020-03-19 19:00:00,4.28,0.0,-0.0,0.0,2.07,88.8 +2020-03-19 20:00:00,4.07,0.0,-0.0,0.0,2.07,88.75 +2020-03-19 21:00:00,3.91,0.0,-0.0,0.0,2.14,88.75 +2020-03-19 22:00:00,3.78,0.0,-0.0,0.0,2.28,85.45 +2020-03-19 23:00:00,3.82,0.0,-0.0,0.0,2.62,85.45 +2020-03-20 00:00:00,3.52,0.0,-0.0,0.0,2.83,85.4 +2020-03-20 01:00:00,3.16,0.0,-0.0,0.0,3.03,82.2 +2020-03-20 02:00:00,2.84,0.0,-0.0,0.0,3.24,82.2 +2020-03-20 03:00:00,2.34,0.0,-0.0,0.0,3.31,85.35 +2020-03-20 04:00:00,2.16,0.0,-0.0,0.0,3.59,85.35 +2020-03-20 05:00:00,1.97,0.0,-0.0,0.0,3.66,88.65 +2020-03-20 06:00:00,2.05,44.0,6.22,43.0,3.66,88.65 +2020-03-20 07:00:00,2.7,89.0,0.0,89.0,3.93,85.4 +2020-03-20 08:00:00,3.12,114.0,0.0,114.0,4.55,82.2 +2020-03-20 09:00:00,3.69,119.0,0.0,119.0,4.83,76.15 +2020-03-20 10:00:00,4.18,112.0,0.0,112.0,4.76,73.4 +2020-03-20 11:00:00,4.67,90.0,0.0,90.0,4.69,70.7 +2020-03-20 12:00:00,5.24,77.0,0.0,77.0,4.76,68.1 +2020-03-20 13:00:00,5.43,282.0,50.87,254.0,4.62,70.8 +2020-03-20 14:00:00,5.53,88.0,0.0,88.0,4.21,70.8 +2020-03-20 15:00:00,5.53,100.0,3.1,99.0,4.62,73.55 +2020-03-20 16:00:00,5.31,63.0,23.35,59.0,3.52,76.4 +2020-03-20 17:00:00,4.99,0.0,0.0,0.0,3.59,79.3 +2020-03-20 18:00:00,4.66,0.0,-0.0,0.0,3.59,79.3 +2020-03-20 19:00:00,4.35,0.0,-0.0,0.0,3.86,82.3 +2020-03-20 20:00:00,4.32,0.0,-0.0,0.0,3.93,85.5 +2020-03-20 21:00:00,4.27,0.0,-0.0,0.0,3.86,85.5 +2020-03-20 22:00:00,4.27,0.0,-0.0,0.0,3.66,88.8 +2020-03-20 23:00:00,4.2,0.0,-0.0,0.0,3.52,92.2 +2020-03-21 00:00:00,4.13,0.0,-0.0,0.0,3.45,95.7 +2020-03-21 01:00:00,4.14,0.0,-0.0,0.0,3.45,95.75 +2020-03-21 02:00:00,3.92,0.0,-0.0,0.0,3.59,99.4 +2020-03-21 03:00:00,3.6,0.0,-0.0,0.0,3.52,99.4 +2020-03-21 04:00:00,3.19,0.0,-0.0,0.0,3.45,95.7 +2020-03-21 05:00:00,2.96,0.0,0.0,0.0,3.31,95.7 +2020-03-21 06:00:00,3.03,68.0,41.95,61.0,3.17,95.7 +2020-03-21 07:00:00,3.87,86.0,0.0,86.0,3.38,95.7 +2020-03-21 08:00:00,4.58,224.0,51.13,201.0,4.0,92.2 +2020-03-21 09:00:00,4.7,212.0,7.26,208.0,3.79,92.25 +2020-03-21 10:00:00,5.32,198.0,1.63,197.0,3.38,88.85 +2020-03-21 11:00:00,6.31,106.0,0.0,106.0,3.93,79.5 +2020-03-21 12:00:00,7.27,123.0,0.0,123.0,4.83,71.15 +2020-03-21 13:00:00,7.32,85.0,0.0,85.0,5.03,71.15 +2020-03-21 14:00:00,7.25,150.0,2.19,149.0,5.1,68.5 +2020-03-21 15:00:00,7.32,78.0,0.0,78.0,5.03,68.5 +2020-03-21 16:00:00,6.79,60.0,17.06,57.0,4.0,73.8 +2020-03-21 17:00:00,6.12,0.0,0.0,0.0,3.72,79.4 +2020-03-21 18:00:00,5.5,0.0,-0.0,0.0,3.93,85.6 +2020-03-21 19:00:00,4.81,0.0,-0.0,0.0,4.0,88.85 +2020-03-21 20:00:00,4.4,0.0,-0.0,0.0,4.14,92.2 +2020-03-21 21:00:00,4.24,0.0,-0.0,0.0,4.0,92.2 +2020-03-21 22:00:00,4.09,0.0,-0.0,0.0,4.07,95.7 +2020-03-21 23:00:00,3.94,0.0,-0.0,0.0,4.21,95.7 +2020-03-22 00:00:00,3.75,0.0,-0.0,0.0,4.14,95.7 +2020-03-22 01:00:00,3.6,0.0,-0.0,0.0,4.0,99.4 +2020-03-22 02:00:00,3.43,0.0,-0.0,0.0,3.72,99.4 +2020-03-22 03:00:00,3.35,0.0,-0.0,0.0,3.59,99.4 +2020-03-22 04:00:00,3.24,0.0,-0.0,0.0,3.79,95.7 +2020-03-22 05:00:00,3.07,0.0,0.0,0.0,3.72,92.15 +2020-03-22 06:00:00,3.04,60.0,17.34,57.0,4.14,92.15 +2020-03-22 07:00:00,3.74,124.0,15.38,119.0,2.76,95.7 +2020-03-22 08:00:00,4.57,192.0,19.75,183.0,0.0,99.4 +2020-03-22 09:00:00,5.38,176.0,0.0,176.0,0.0,95.75 +2020-03-22 10:00:00,6.06,122.0,0.0,122.0,3.38,92.3 +2020-03-22 11:00:00,6.63,405.0,129.17,322.0,3.03,92.3 +2020-03-22 12:00:00,7.14,187.0,0.0,187.0,3.1,89.0 +2020-03-22 13:00:00,7.7,50.0,0.0,50.0,3.31,79.7 +2020-03-22 14:00:00,7.17,220.0,39.04,202.0,3.31,79.65 +2020-03-22 15:00:00,7.09,162.0,57.31,143.0,2.97,82.6 +2020-03-22 16:00:00,6.42,89.0,94.27,72.0,2.83,79.5 +2020-03-22 17:00:00,5.48,0.0,0.0,0.0,2.07,85.6 +2020-03-22 18:00:00,4.59,0.0,-0.0,0.0,1.79,92.2 +2020-03-22 19:00:00,3.98,0.0,-0.0,0.0,1.93,92.15 +2020-03-22 20:00:00,3.8,0.0,-0.0,0.0,1.79,92.15 +2020-03-22 21:00:00,3.34,0.0,-0.0,0.0,1.72,95.7 +2020-03-22 22:00:00,3.16,0.0,-0.0,0.0,1.59,95.7 +2020-03-22 23:00:00,2.93,0.0,-0.0,0.0,1.52,92.15 +2020-03-23 00:00:00,2.55,0.0,-0.0,0.0,1.59,95.7 +2020-03-23 01:00:00,2.68,0.0,-0.0,0.0,1.59,92.15 +2020-03-23 02:00:00,2.85,0.0,-0.0,0.0,1.59,92.15 +2020-03-23 03:00:00,2.92,0.0,-0.0,0.0,1.52,95.7 +2020-03-23 04:00:00,2.98,0.0,-0.0,0.0,1.52,95.7 +2020-03-23 05:00:00,2.74,0.0,0.0,0.0,1.45,95.7 +2020-03-23 06:00:00,2.92,126.0,351.54,63.0,1.45,95.7 +2020-03-23 07:00:00,3.83,142.0,30.21,132.0,2.21,95.7 +2020-03-23 08:00:00,4.64,275.0,119.13,220.0,2.21,92.2 +2020-03-23 09:00:00,5.49,291.0,53.37,261.0,2.62,82.4 +2020-03-23 10:00:00,6.41,393.0,127.87,313.0,2.9,70.95 +2020-03-23 11:00:00,7.21,91.0,0.0,91.0,3.38,63.5 +2020-03-23 12:00:00,7.37,161.0,0.0,161.0,3.72,63.5 +2020-03-23 13:00:00,7.22,113.0,0.0,113.0,4.0,65.95 +2020-03-23 14:00:00,7.0,110.0,0.0,110.0,4.14,65.85 +2020-03-23 15:00:00,6.54,71.0,0.0,71.0,3.79,70.95 +2020-03-23 16:00:00,5.7,63.0,16.23,60.0,2.97,76.5 +2020-03-23 17:00:00,5.03,0.0,0.0,0.0,2.83,82.35 +2020-03-23 18:00:00,4.21,0.0,-0.0,0.0,2.62,85.5 +2020-03-23 19:00:00,3.88,0.0,-0.0,0.0,2.41,85.45 +2020-03-23 20:00:00,3.7,0.0,-0.0,0.0,2.28,85.45 +2020-03-23 21:00:00,3.32,0.0,-0.0,0.0,2.21,88.7 +2020-03-23 22:00:00,2.95,0.0,-0.0,0.0,2.14,88.7 +2020-03-23 23:00:00,2.74,0.0,-0.0,0.0,2.0,88.7 +2020-03-24 00:00:00,2.38,0.0,-0.0,0.0,1.93,92.15 +2020-03-24 01:00:00,2.32,0.0,-0.0,0.0,1.86,92.15 +2020-03-24 02:00:00,1.89,0.0,-0.0,0.0,1.86,95.65 +2020-03-24 03:00:00,1.68,0.0,-0.0,0.0,1.93,95.65 +2020-03-24 04:00:00,1.45,0.0,-0.0,0.0,1.86,99.4 +2020-03-24 05:00:00,1.22,0.0,0.0,0.0,1.72,95.65 +2020-03-24 06:00:00,1.54,83.0,59.34,72.0,2.41,95.65 +2020-03-24 07:00:00,2.14,138.0,20.77,131.0,1.86,95.7 +2020-03-24 08:00:00,2.53,143.0,0.0,143.0,2.55,92.15 +2020-03-24 09:00:00,3.76,125.0,0.0,125.0,2.48,88.75 +2020-03-24 10:00:00,5.28,223.0,3.17,221.0,2.62,79.35 +2020-03-24 11:00:00,6.09,493.0,267.98,318.0,2.76,73.65 +2020-03-24 12:00:00,6.84,475.0,264.18,308.0,2.97,65.85 +2020-03-24 13:00:00,6.98,420.0,254.44,275.0,2.97,65.85 +2020-03-24 14:00:00,6.98,430.0,599.43,148.0,2.83,63.35 +2020-03-24 15:00:00,7.12,301.0,593.0,99.0,2.9,58.65 +2020-03-24 16:00:00,6.72,150.0,496.55,56.0,2.55,56.45 +2020-03-24 17:00:00,6.07,0.0,0.0,0.0,1.66,60.75 +2020-03-24 18:00:00,4.94,0.0,-0.0,0.0,0.97,73.45 +2020-03-24 19:00:00,5.73,0.0,-0.0,0.0,0.62,58.45 +2020-03-24 20:00:00,2.66,0.0,-0.0,0.0,1.59,76.1 +2020-03-24 21:00:00,1.95,0.0,-0.0,0.0,1.72,78.95 +2020-03-24 22:00:00,1.76,0.0,-0.0,0.0,1.93,78.95 +2020-03-24 23:00:00,2.16,0.0,-0.0,0.0,2.0,76.0 +2020-03-25 00:00:00,1.92,0.0,-0.0,0.0,2.0,78.95 +2020-03-25 01:00:00,2.15,0.0,-0.0,0.0,1.93,76.0 +2020-03-25 02:00:00,2.48,0.0,-0.0,0.0,1.86,76.0 +2020-03-25 03:00:00,2.96,0.0,-0.0,0.0,2.0,73.2 +2020-03-25 04:00:00,3.37,0.0,-0.0,0.0,2.07,76.1 +2020-03-25 05:00:00,3.59,0.0,0.0,0.0,2.0,82.2 +2020-03-25 06:00:00,3.93,53.0,5.22,52.0,2.0,85.45 +2020-03-25 07:00:00,5.54,140.0,23.32,132.0,2.07,82.4 +2020-03-25 08:00:00,7.02,296.0,149.98,225.0,3.1,76.65 +2020-03-25 09:00:00,7.87,273.0,34.88,253.0,3.24,68.6 +2020-03-25 10:00:00,8.66,190.0,0.0,190.0,3.24,68.7 +2020-03-25 11:00:00,9.0,187.0,0.0,187.0,3.31,63.8 +2020-03-25 12:00:00,8.81,129.0,0.0,129.0,3.1,66.25 +2020-03-25 13:00:00,8.52,153.0,0.0,153.0,3.17,71.35 +2020-03-25 14:00:00,8.27,136.0,0.0,136.0,3.1,74.05 +2020-03-25 15:00:00,8.03,74.0,0.0,74.0,2.69,76.8 +2020-03-25 16:00:00,7.7,66.0,15.48,63.0,2.69,79.7 +2020-03-25 17:00:00,6.96,1.0,0.0,1.0,2.41,82.6 +2020-03-25 18:00:00,6.24,0.0,-0.0,0.0,2.34,88.95 +2020-03-25 19:00:00,5.44,0.0,-0.0,0.0,2.34,92.25 +2020-03-25 20:00:00,5.13,0.0,-0.0,0.0,2.34,95.75 +2020-03-25 21:00:00,4.89,0.0,-0.0,0.0,2.28,95.75 +2020-03-25 22:00:00,4.67,0.0,-0.0,0.0,2.14,95.75 +2020-03-25 23:00:00,4.56,0.0,-0.0,0.0,2.14,99.4 +2020-03-26 00:00:00,4.46,0.0,-0.0,0.0,2.28,99.4 +2020-03-26 01:00:00,4.15,0.0,-0.0,0.0,2.28,99.4 +2020-03-26 02:00:00,4.11,0.0,-0.0,0.0,2.14,100.0 +2020-03-26 03:00:00,4.17,0.0,-0.0,0.0,2.07,99.4 +2020-03-26 04:00:00,4.29,0.0,-0.0,0.0,2.14,99.4 +2020-03-26 05:00:00,4.44,2.0,0.0,2.0,2.14,99.4 +2020-03-26 06:00:00,4.71,52.0,0.0,52.0,2.07,99.4 +2020-03-26 07:00:00,5.68,96.0,0.0,96.0,3.1,99.4 +2020-03-26 08:00:00,6.37,204.0,18.78,195.0,3.52,95.8 +2020-03-26 09:00:00,6.96,310.0,62.17,274.0,3.17,89.0 +2020-03-26 10:00:00,8.28,247.0,6.23,243.0,3.52,74.05 +2020-03-26 11:00:00,8.85,385.0,87.43,327.0,3.24,71.4 +2020-03-26 12:00:00,9.18,429.0,165.1,323.0,3.03,68.8 +2020-03-26 13:00:00,9.52,437.0,283.07,273.0,2.69,66.35 +2020-03-26 14:00:00,9.96,383.0,373.2,204.0,2.41,61.65 +2020-03-26 15:00:00,10.17,312.0,614.95,97.0,1.72,59.35 +2020-03-26 16:00:00,9.91,155.0,479.33,60.0,1.17,61.65 +2020-03-26 17:00:00,8.89,7.0,0.0,7.0,0.62,79.85 +2020-03-26 18:00:00,8.21,0.0,-0.0,0.0,0.97,66.15 +2020-03-26 19:00:00,4.22,0.0,-0.0,0.0,2.07,85.5 +2020-03-26 20:00:00,2.32,0.0,-0.0,0.0,2.21,88.65 +2020-03-26 21:00:00,1.48,0.0,-0.0,0.0,2.28,88.6 +2020-03-26 22:00:00,1.24,0.0,-0.0,0.0,2.48,88.6 +2020-03-26 23:00:00,1.25,0.0,-0.0,0.0,2.62,88.6 +2020-03-27 00:00:00,1.59,0.0,-0.0,0.0,2.9,88.6 +2020-03-27 01:00:00,2.07,0.0,-0.0,0.0,3.24,88.65 +2020-03-27 02:00:00,2.49,0.0,-0.0,0.0,3.59,85.35 +2020-03-27 03:00:00,2.78,0.0,-0.0,0.0,4.0,82.2 +2020-03-27 04:00:00,3.0,0.0,-0.0,0.0,4.28,82.2 +2020-03-27 05:00:00,3.1,2.0,0.0,2.0,4.48,82.2 +2020-03-27 06:00:00,3.89,140.0,304.37,78.0,4.55,82.25 +2020-03-27 07:00:00,4.34,321.0,630.89,97.0,4.48,85.5 +2020-03-27 08:00:00,6.14,482.0,775.29,106.0,4.97,76.5 +2020-03-27 09:00:00,8.03,543.0,591.83,197.0,5.1,66.05 +2020-03-27 10:00:00,10.01,662.0,800.36,144.0,4.9,59.35 +2020-03-27 11:00:00,11.76,603.0,513.13,260.0,4.34,51.5 +2020-03-27 12:00:00,13.25,552.0,423.55,278.0,3.93,46.35 +2020-03-27 13:00:00,14.1,596.0,811.56,122.0,3.45,48.15 +2020-03-27 14:00:00,14.56,371.0,313.93,219.0,3.1,50.1 +2020-03-27 15:00:00,14.5,196.0,96.03,162.0,2.69,53.95 +2020-03-27 16:00:00,13.04,62.0,4.94,61.0,2.97,62.3 +2020-03-27 17:00:00,11.39,1.0,0.0,1.0,3.59,69.25 +2020-03-27 18:00:00,9.93,0.0,-0.0,0.0,2.9,77.1 +2020-03-27 19:00:00,9.38,0.0,-0.0,0.0,2.14,79.9 +2020-03-27 20:00:00,8.75,0.0,-0.0,0.0,1.72,79.85 +2020-03-27 21:00:00,7.77,0.0,-0.0,0.0,1.45,85.85 +2020-03-27 22:00:00,6.94,0.0,-0.0,0.0,1.45,92.35 +2020-03-27 23:00:00,5.87,0.0,-0.0,0.0,1.59,92.3 +2020-03-28 00:00:00,5.26,0.0,-0.0,0.0,1.59,92.25 +2020-03-28 01:00:00,4.77,0.0,-0.0,0.0,1.72,92.25 +2020-03-28 02:00:00,4.28,0.0,-0.0,0.0,1.79,92.2 +2020-03-28 03:00:00,3.58,0.0,-0.0,0.0,1.86,95.7 +2020-03-28 04:00:00,2.95,0.0,-0.0,0.0,2.07,92.15 +2020-03-28 05:00:00,2.17,14.0,42.2,12.0,2.21,92.15 +2020-03-28 06:00:00,3.02,158.0,429.05,68.0,2.0,92.15 +2020-03-28 07:00:00,7.09,339.0,709.19,83.0,1.72,85.75 +2020-03-28 08:00:00,10.12,467.0,703.06,122.0,2.48,69.0 +2020-03-28 09:00:00,11.56,596.0,804.89,121.0,3.86,57.55 +2020-03-28 10:00:00,12.61,682.0,868.98,115.0,4.34,53.6 +2020-03-28 11:00:00,13.6,686.0,807.72,142.0,4.62,53.85 +2020-03-28 12:00:00,14.85,531.0,368.24,291.0,5.79,50.25 +2020-03-28 13:00:00,15.34,323.0,71.34,281.0,5.38,46.75 +2020-03-28 14:00:00,15.11,116.0,0.0,116.0,3.79,50.25 +2020-03-28 15:00:00,14.46,104.0,0.0,104.0,3.72,52.0 +2020-03-28 16:00:00,12.79,143.0,318.85,77.0,3.31,60.05 +2020-03-28 17:00:00,11.66,10.0,22.46,9.0,3.1,69.25 +2020-03-28 18:00:00,10.63,0.0,-0.0,0.0,3.59,69.1 +2020-03-28 19:00:00,10.51,0.0,-0.0,0.0,3.45,71.7 +2020-03-28 20:00:00,10.08,0.0,-0.0,0.0,3.79,69.0 +2020-03-28 21:00:00,9.89,0.0,-0.0,0.0,4.21,66.45 +2020-03-28 22:00:00,9.58,0.0,-0.0,0.0,3.93,74.2 +2020-03-28 23:00:00,9.37,0.0,-0.0,0.0,3.59,74.2 +2020-03-29 00:00:00,8.83,0.0,-0.0,0.0,3.38,79.85 +2020-03-29 01:00:00,8.48,0.0,-0.0,0.0,3.17,82.75 +2020-03-29 02:00:00,7.99,0.0,-0.0,0.0,2.97,82.7 +2020-03-29 03:00:00,7.46,0.0,-0.0,0.0,2.76,85.8 +2020-03-29 04:00:00,7.49,0.0,-0.0,0.0,3.31,82.65 +2020-03-29 05:00:00,7.93,14.0,18.68,13.0,4.48,76.8 +2020-03-29 06:00:00,7.8,176.0,537.51,60.0,5.59,73.95 +2020-03-29 07:00:00,7.57,294.0,422.5,139.0,5.38,76.7 +2020-03-29 08:00:00,8.18,484.0,747.36,113.0,6.28,68.7 +2020-03-29 09:00:00,8.99,353.0,102.41,292.0,5.86,66.25 +2020-03-29 10:00:00,9.78,677.0,830.13,131.0,5.79,64.0 +2020-03-29 11:00:00,10.23,639.0,613.11,223.0,5.59,64.1 +2020-03-29 12:00:00,10.51,530.0,353.37,298.0,5.45,64.1 +2020-03-29 13:00:00,10.55,352.0,102.81,291.0,5.24,64.1 +2020-03-29 14:00:00,10.62,467.0,681.33,131.0,5.1,64.1 +2020-03-29 15:00:00,10.53,337.0,688.94,87.0,4.97,61.75 +2020-03-29 16:00:00,10.26,161.0,435.28,69.0,4.55,59.5 +2020-03-29 17:00:00,9.51,16.0,61.31,13.0,3.93,59.25 +2020-03-29 18:00:00,8.54,0.0,-0.0,0.0,3.38,63.7 +2020-03-29 19:00:00,7.91,0.0,-0.0,0.0,3.24,54.55 +2020-03-29 20:00:00,7.32,0.0,-0.0,0.0,3.52,56.55 +2020-03-29 21:00:00,6.78,0.0,-0.0,0.0,3.59,65.85 +2020-03-29 22:00:00,6.33,0.0,-0.0,0.0,3.38,70.95 +2020-03-29 23:00:00,5.82,0.0,-0.0,0.0,2.9,76.5 +2020-03-30 00:00:00,5.22,0.0,-0.0,0.0,2.55,79.35 +2020-03-30 01:00:00,4.88,0.0,-0.0,0.0,2.34,85.55 +2020-03-30 02:00:00,4.57,0.0,-0.0,0.0,2.41,88.8 +2020-03-30 03:00:00,4.63,0.0,-0.0,0.0,2.28,88.8 +2020-03-30 04:00:00,4.83,0.0,-0.0,0.0,2.48,82.35 +2020-03-30 05:00:00,5.02,4.0,0.0,4.0,2.69,82.35 +2020-03-30 06:00:00,5.81,37.0,0.0,37.0,2.76,82.5 +2020-03-30 07:00:00,7.28,65.0,0.0,65.0,3.17,76.7 +2020-03-30 08:00:00,8.06,246.0,47.8,222.0,3.45,82.7 +2020-03-30 09:00:00,9.62,374.0,131.44,295.0,3.52,79.9 +2020-03-30 10:00:00,11.58,351.0,58.83,312.0,4.34,71.85 +2020-03-30 11:00:00,12.43,385.0,79.01,331.0,4.83,64.55 +2020-03-30 12:00:00,12.42,319.0,34.78,296.0,4.48,66.95 +2020-03-30 13:00:00,12.6,201.0,1.67,200.0,4.69,66.95 +2020-03-30 14:00:00,12.26,76.0,0.0,76.0,4.76,64.55 +2020-03-30 15:00:00,11.57,144.0,16.34,138.0,3.45,71.85 +2020-03-30 16:00:00,11.15,77.0,18.54,73.0,3.38,74.45 +2020-03-30 17:00:00,10.34,12.0,18.75,11.0,2.9,77.15 +2020-03-30 18:00:00,9.65,0.0,-0.0,0.0,2.55,79.9 +2020-03-30 19:00:00,8.07,0.0,-0.0,0.0,2.28,89.05 +2020-03-30 20:00:00,7.19,0.0,-0.0,0.0,2.34,89.0 +2020-03-30 21:00:00,6.55,0.0,-0.0,0.0,2.34,92.3 +2020-03-30 22:00:00,5.96,0.0,-0.0,0.0,2.34,92.3 +2020-03-30 23:00:00,5.51,0.0,-0.0,0.0,2.28,92.25 +2020-03-31 00:00:00,5.85,0.0,-0.0,0.0,1.93,92.3 +2020-03-31 01:00:00,5.82,0.0,-0.0,0.0,1.72,92.3 +2020-03-31 02:00:00,5.57,0.0,-0.0,0.0,1.59,92.25 +2020-03-31 03:00:00,5.41,0.0,-0.0,0.0,1.38,95.75 +2020-03-31 04:00:00,5.21,0.0,-0.0,0.0,1.31,95.75 +2020-03-31 05:00:00,5.2,5.0,0.0,5.0,1.1,95.75 +2020-03-31 06:00:00,5.64,37.0,0.0,37.0,0.62,99.4 +2020-03-31 07:00:00,7.3,157.0,23.78,148.0,0.21,95.8 +2020-03-31 08:00:00,8.12,119.0,0.0,119.0,0.55,92.4 +2020-03-31 09:00:00,8.46,101.0,0.0,101.0,1.17,89.1 +2020-03-31 10:00:00,9.32,130.0,0.0,130.0,1.72,79.9 +2020-03-31 11:00:00,10.02,202.0,0.0,202.0,1.72,79.95 +2020-03-31 12:00:00,9.97,109.0,0.0,109.0,1.66,82.95 +2020-03-31 13:00:00,9.91,137.0,0.0,137.0,1.72,82.95 +2020-03-31 14:00:00,9.67,89.0,0.0,89.0,1.66,89.15 +2020-03-31 15:00:00,9.6,39.0,0.0,39.0,2.34,89.15 +2020-03-31 16:00:00,6.32,86.0,27.27,80.0,2.88,96.41 +2020-03-31 17:00:00,6.69,12.0,0.0,12.0,3.0,95.89 +2020-03-31 18:00:00,7.05,0.0,-0.0,0.0,3.11,95.38 +2020-03-31 19:00:00,7.41,0.0,-0.0,0.0,3.23,94.86 +2020-03-31 20:00:00,7.78,0.0,-0.0,0.0,3.35,94.35 +2020-03-31 21:00:00,8.14,0.0,-0.0,0.0,3.47,93.83 +2020-03-31 22:00:00,8.5,0.0,-0.0,0.0,3.58,93.32 +2020-03-31 23:00:00,8.86,0.0,-0.0,0.0,3.7,92.8 +2020-04-01 00:00:00,9.23,0.0,-0.0,0.0,3.82,92.29 +2020-04-01 01:00:00,9.59,0.0,-0.0,0.0,3.93,91.77 +2020-04-01 02:00:00,9.95,0.0,-0.0,0.0,4.05,91.26 +2020-04-01 03:00:00,10.32,0.0,-0.0,0.0,4.17,90.75 +2020-04-01 04:00:00,10.68,0.0,-0.0,0.0,4.29,90.23 +2020-04-01 05:00:00,11.04,30.0,167.29,19.0,4.4,89.72 +2020-04-01 06:00:00,11.41,60.0,0.0,60.0,4.52,89.2 +2020-04-01 07:00:00,11.77,329.0,567.97,114.0,4.64,88.69 +2020-04-01 08:00:00,12.74,442.0,520.01,178.0,5.03,86.25 +2020-04-01 09:00:00,13.15,473.0,326.5,275.0,5.79,80.35 +2020-04-01 10:00:00,14.14,408.0,115.26,331.0,5.66,69.75 +2020-04-01 11:00:00,14.93,445.0,145.27,345.0,6.07,60.5 +2020-04-01 12:00:00,15.21,389.0,94.6,326.0,5.59,60.5 +2020-04-01 13:00:00,15.43,491.0,386.79,258.0,5.38,58.45 +2020-04-01 14:00:00,15.16,361.0,258.97,231.0,4.55,60.5 +2020-04-01 15:00:00,14.95,244.0,207.24,167.0,3.17,62.75 +2020-04-01 16:00:00,14.81,155.0,331.79,82.0,2.69,64.95 +2020-04-01 17:00:00,13.94,9.0,0.0,9.0,2.0,67.25 +2020-04-01 18:00:00,12.67,0.0,-0.0,0.0,1.93,72.05 +2020-04-01 19:00:00,12.22,0.0,-0.0,0.0,2.14,74.6 +2020-04-01 20:00:00,11.57,0.0,-0.0,0.0,2.28,77.3 +2020-04-01 21:00:00,11.16,0.0,-0.0,0.0,2.21,83.05 +2020-04-01 22:00:00,10.87,0.0,-0.0,0.0,2.07,86.1 +2020-04-01 23:00:00,11.02,0.0,-0.0,0.0,1.86,89.3 +2020-04-02 00:00:00,10.52,0.0,-0.0,0.0,1.72,92.5 +2020-04-02 01:00:00,9.92,0.0,-0.0,0.0,1.66,95.9 +2020-04-02 02:00:00,9.15,0.0,-0.0,0.0,1.72,99.4 +2020-04-02 03:00:00,8.61,0.0,-0.0,0.0,1.72,99.4 +2020-04-02 04:00:00,8.24,0.0,-0.0,0.0,1.79,99.4 +2020-04-02 05:00:00,7.95,16.0,0.0,16.0,1.72,99.4 +2020-04-02 06:00:00,9.77,139.0,183.95,96.0,1.03,95.85 +2020-04-02 07:00:00,12.73,238.0,166.53,174.0,0.9,89.4 +2020-04-02 08:00:00,15.17,446.0,529.96,174.0,0.83,75.1 +2020-04-02 09:00:00,16.66,530.0,495.29,227.0,0.97,65.35 +2020-04-02 10:00:00,17.88,531.0,341.68,301.0,1.1,63.2 +2020-04-02 11:00:00,18.4,511.0,259.65,331.0,0.76,58.9 +2020-04-02 12:00:00,18.62,584.0,493.57,253.0,0.55,56.95 +2020-04-02 13:00:00,18.71,528.0,504.22,222.0,1.17,59.05 +2020-04-02 14:00:00,18.8,431.0,495.72,180.0,1.93,59.05 +2020-04-02 15:00:00,18.68,309.0,484.23,127.0,2.28,59.05 +2020-04-02 16:00:00,18.22,162.0,378.93,77.0,2.21,63.3 +2020-04-02 17:00:00,16.82,25.0,112.81,18.0,2.41,67.75 +2020-04-02 18:00:00,14.96,0.0,-0.0,0.0,2.62,72.45 +2020-04-02 19:00:00,14.43,0.0,-0.0,0.0,2.76,75.0 +2020-04-02 20:00:00,13.46,0.0,-0.0,0.0,2.83,77.6 +2020-04-02 21:00:00,12.69,0.0,-0.0,0.0,2.83,83.25 +2020-04-02 22:00:00,12.11,0.0,-0.0,0.0,2.9,83.15 +2020-04-02 23:00:00,11.78,0.0,-0.0,0.0,3.03,86.15 +2020-04-03 00:00:00,11.43,0.0,-0.0,0.0,2.97,86.15 +2020-04-03 01:00:00,11.1,0.0,-0.0,0.0,2.97,86.1 +2020-04-03 02:00:00,10.73,0.0,-0.0,0.0,3.03,89.25 +2020-04-03 03:00:00,10.36,0.0,-0.0,0.0,3.03,89.25 +2020-04-03 04:00:00,10.04,0.0,-0.0,0.0,2.97,92.5 +2020-04-03 05:00:00,9.61,38.0,166.96,25.0,2.9,92.45 +2020-04-03 06:00:00,10.49,188.0,467.32,76.0,2.83,89.25 +2020-04-03 07:00:00,12.83,345.0,599.92,111.0,2.62,86.25 +2020-04-03 08:00:00,15.5,489.0,684.31,134.0,2.83,72.55 +2020-04-03 09:00:00,17.57,596.0,706.59,160.0,2.9,65.45 +2020-04-03 10:00:00,19.17,660.0,706.29,181.0,2.9,59.15 +2020-04-03 11:00:00,20.23,555.0,336.66,320.0,2.97,55.3 +2020-04-03 12:00:00,20.94,619.0,571.66,233.0,3.17,53.5 +2020-04-03 13:00:00,21.38,571.0,631.43,185.0,3.17,49.95 +2020-04-03 14:00:00,21.43,455.0,563.98,167.0,3.1,49.95 +2020-04-03 15:00:00,21.22,304.0,426.19,142.0,2.9,49.95 +2020-04-03 16:00:00,20.46,174.0,437.47,74.0,2.21,55.45 +2020-04-03 17:00:00,18.66,33.0,195.8,20.0,2.41,63.4 +2020-04-03 18:00:00,16.75,0.0,-0.0,0.0,2.69,65.35 +2020-04-03 19:00:00,16.68,0.0,-0.0,0.0,2.69,63.05 +2020-04-03 20:00:00,15.75,0.0,-0.0,0.0,2.9,65.15 +2020-04-03 21:00:00,15.14,0.0,-0.0,0.0,2.97,67.45 +2020-04-03 22:00:00,14.64,0.0,-0.0,0.0,3.1,69.85 +2020-04-03 23:00:00,14.09,0.0,-0.0,0.0,2.97,72.3 +2020-04-04 00:00:00,12.81,0.0,-0.0,0.0,2.21,80.3 +2020-04-04 01:00:00,11.45,0.0,-0.0,0.0,1.93,86.15 +2020-04-04 02:00:00,11.22,0.0,-0.0,0.0,1.93,92.55 +2020-04-04 03:00:00,11.06,0.0,-0.0,0.0,2.28,95.9 +2020-04-04 04:00:00,10.65,0.0,-0.0,0.0,2.34,99.4 +2020-04-04 05:00:00,10.25,36.0,107.31,27.0,1.93,100.0 +2020-04-04 06:00:00,9.64,17.0,0.0,17.0,1.72,100.0 +2020-04-04 07:00:00,9.84,53.0,0.0,53.0,2.41,100.0 +2020-04-04 08:00:00,9.43,68.0,0.0,68.0,2.41,99.4 +2020-04-04 09:00:00,8.98,97.0,0.0,97.0,2.69,99.4 +2020-04-04 10:00:00,8.98,116.0,0.0,116.0,3.86,92.45 +2020-04-04 11:00:00,8.49,63.0,0.0,63.0,3.79,92.4 +2020-04-04 12:00:00,8.55,121.0,0.0,121.0,4.69,95.85 +2020-04-04 13:00:00,8.92,535.0,500.24,227.0,4.0,85.95 +2020-04-04 14:00:00,9.96,464.0,600.07,155.0,4.48,69.0 +2020-04-04 15:00:00,10.99,160.0,23.42,151.0,4.14,61.85 +2020-04-04 16:00:00,11.34,68.0,4.29,67.0,3.38,61.85 +2020-04-04 17:00:00,10.94,33.0,155.54,22.0,2.21,64.25 +2020-04-04 18:00:00,9.94,0.0,-0.0,0.0,2.21,66.45 +2020-04-04 19:00:00,9.19,0.0,-0.0,0.0,2.41,68.8 +2020-04-04 20:00:00,8.66,0.0,-0.0,0.0,2.69,74.05 +2020-04-04 21:00:00,7.9,0.0,-0.0,0.0,2.76,76.8 +2020-04-04 22:00:00,7.31,0.0,-0.0,0.0,2.69,85.75 +2020-04-04 23:00:00,7.01,0.0,-0.0,0.0,2.76,89.0 +2020-04-05 00:00:00,6.45,0.0,-0.0,0.0,2.55,92.3 +2020-04-05 01:00:00,6.53,0.0,-0.0,0.0,2.34,92.3 +2020-04-05 02:00:00,5.99,0.0,-0.0,0.0,2.34,88.95 +2020-04-05 03:00:00,5.68,0.0,-0.0,0.0,2.48,92.3 +2020-04-05 04:00:00,5.45,0.0,-0.0,0.0,2.28,88.9 +2020-04-05 05:00:00,5.06,44.0,144.69,31.0,2.07,88.85 +2020-04-05 06:00:00,6.0,185.0,365.99,93.0,1.86,88.95 +2020-04-05 07:00:00,8.87,353.0,578.0,121.0,2.0,82.85 +2020-04-05 08:00:00,10.64,510.0,719.33,129.0,2.62,77.15 +2020-04-05 09:00:00,12.03,549.0,494.06,239.0,3.24,62.1 +2020-04-05 10:00:00,13.01,605.0,489.75,268.0,4.34,53.7 +2020-04-05 11:00:00,13.37,622.0,483.4,280.0,4.28,50.0 +2020-04-05 12:00:00,13.63,551.0,353.66,309.0,3.52,48.15 +2020-04-05 13:00:00,14.32,461.0,269.33,294.0,3.59,44.75 +2020-04-05 14:00:00,14.41,338.0,171.42,249.0,3.52,43.25 +2020-04-05 15:00:00,14.09,166.0,28.31,155.0,3.24,44.75 +2020-04-05 16:00:00,13.62,117.0,80.15,98.0,2.62,48.15 +2020-04-05 17:00:00,12.62,30.0,79.97,24.0,2.14,51.65 +2020-04-05 18:00:00,11.43,0.0,-0.0,0.0,1.93,55.4 +2020-04-05 19:00:00,11.16,0.0,-0.0,0.0,1.79,61.85 +2020-04-05 20:00:00,10.71,0.0,-0.0,0.0,1.93,64.1 +2020-04-05 21:00:00,10.02,0.0,-0.0,0.0,2.0,66.45 +2020-04-05 22:00:00,9.45,0.0,-0.0,0.0,2.0,68.9 +2020-04-05 23:00:00,9.88,0.0,-0.0,0.0,2.76,66.45 +2020-04-06 00:00:00,10.2,0.0,-0.0,0.0,3.24,66.45 +2020-04-06 01:00:00,9.78,0.0,-0.0,0.0,3.24,66.35 +2020-04-06 02:00:00,9.54,0.0,-0.0,0.0,3.17,66.35 +2020-04-06 03:00:00,9.1,0.0,-0.0,0.0,3.1,71.4 +2020-04-06 04:00:00,9.16,0.0,-0.0,0.0,3.1,74.15 +2020-04-06 05:00:00,9.2,17.0,0.0,17.0,3.1,76.95 +2020-04-06 06:00:00,9.66,61.0,0.0,61.0,3.24,77.0 +2020-04-06 07:00:00,10.42,212.0,78.63,180.0,2.9,80.05 +2020-04-06 08:00:00,11.65,111.0,0.0,111.0,4.0,77.3 +2020-04-06 09:00:00,12.14,482.0,313.01,284.0,4.14,77.35 +2020-04-06 10:00:00,12.68,371.0,64.94,326.0,4.62,77.45 +2020-04-06 11:00:00,13.42,589.0,408.63,298.0,4.9,74.85 +2020-04-06 12:00:00,14.23,347.0,47.91,314.0,4.9,74.95 +2020-04-06 13:00:00,14.94,371.0,112.12,301.0,4.97,72.45 +2020-04-06 14:00:00,15.8,236.0,32.48,219.0,5.03,70.0 +2020-04-06 15:00:00,16.27,0.0,0.0,0.0,4.83,67.65 +2020-04-06 16:00:00,16.05,0.0,0.0,0.0,4.41,67.65 +2020-04-06 17:00:00,15.54,26.0,37.82,23.0,4.0,70.0 +2020-04-06 18:00:00,14.91,0.0,-0.0,0.0,3.93,72.45 +2020-04-06 19:00:00,13.99,0.0,-0.0,0.0,3.38,77.65 +2020-04-06 20:00:00,13.41,0.0,-0.0,0.0,3.52,80.4 +2020-04-06 21:00:00,13.2,0.0,-0.0,0.0,3.24,83.25 +2020-04-06 22:00:00,12.81,0.0,-0.0,0.0,2.97,86.25 +2020-04-06 23:00:00,12.46,0.0,-0.0,0.0,2.83,86.25 +2020-04-07 00:00:00,12.5,0.0,-0.0,0.0,2.9,86.25 +2020-04-07 01:00:00,12.65,0.0,-0.0,0.0,3.1,89.4 +2020-04-07 02:00:00,12.65,0.0,-0.0,0.0,3.24,89.4 +2020-04-07 03:00:00,12.54,0.0,-0.0,0.0,3.17,89.4 +2020-04-07 04:00:00,12.44,0.0,-0.0,0.0,3.24,89.4 +2020-04-07 05:00:00,12.6,60.0,255.71,34.0,3.72,89.4 +2020-04-07 06:00:00,13.76,218.0,543.94,75.0,4.14,83.35 +2020-04-07 07:00:00,15.9,377.0,664.22,103.0,3.79,75.15 +2020-04-07 08:00:00,17.73,506.0,679.21,139.0,4.97,70.3 +2020-04-07 09:00:00,19.11,614.0,715.13,158.0,5.79,63.5 +2020-04-07 10:00:00,19.95,528.0,289.48,326.0,6.48,59.35 +2020-04-07 11:00:00,20.49,616.0,463.21,284.0,6.97,55.45 +2020-04-07 12:00:00,20.5,549.0,344.81,310.0,7.52,49.8 +2020-04-07 13:00:00,20.1,572.0,588.61,202.0,7.72,46.2 +2020-04-07 14:00:00,19.32,274.0,64.45,240.0,7.52,47.65 +2020-04-07 15:00:00,17.9,155.0,15.12,149.0,6.14,52.75 +2020-04-07 16:00:00,16.3,66.0,0.0,66.0,6.28,56.5 +2020-04-07 17:00:00,14.8,23.0,11.96,22.0,6.21,64.95 +2020-04-07 18:00:00,13.32,0.0,-0.0,0.0,6.28,74.75 +2020-04-07 19:00:00,11.56,0.0,-0.0,0.0,6.48,69.25 +2020-04-07 20:00:00,10.48,0.0,-0.0,0.0,6.07,64.1 +2020-04-07 21:00:00,9.58,0.0,-0.0,0.0,6.41,59.25 +2020-04-07 22:00:00,8.52,0.0,-0.0,0.0,5.66,56.8 +2020-04-07 23:00:00,7.83,0.0,-0.0,0.0,5.45,56.7 +2020-04-08 00:00:00,7.15,0.0,-0.0,0.0,5.45,61.0 +2020-04-08 01:00:00,6.42,0.0,-0.0,0.0,5.38,63.25 +2020-04-08 02:00:00,5.91,0.0,-0.0,0.0,5.31,63.25 +2020-04-08 03:00:00,5.56,0.0,-0.0,0.0,5.24,65.65 +2020-04-08 04:00:00,5.23,0.0,-0.0,0.0,5.17,68.1 +2020-04-08 05:00:00,5.04,44.0,74.4,36.0,5.1,68.1 +2020-04-08 06:00:00,5.48,81.0,3.72,80.0,4.48,68.2 +2020-04-08 07:00:00,6.51,247.0,129.19,193.0,5.59,68.3 +2020-04-08 08:00:00,7.81,272.0,49.49,245.0,6.0,63.5 +2020-04-08 09:00:00,8.76,224.0,3.11,222.0,6.14,61.3 +2020-04-08 10:00:00,9.51,222.0,0.0,222.0,6.21,57.05 +2020-04-08 11:00:00,10.17,197.0,0.0,197.0,6.0,55.05 +2020-04-08 12:00:00,11.5,387.0,73.12,336.0,6.34,53.35 +2020-04-08 13:00:00,12.96,377.0,107.46,309.0,6.83,53.7 +2020-04-08 14:00:00,13.63,478.0,581.19,169.0,7.17,53.85 +2020-04-08 15:00:00,14.1,351.0,578.62,119.0,7.59,53.95 +2020-04-08 16:00:00,13.85,136.0,124.21,105.0,7.31,58.0 +2020-04-08 17:00:00,13.2,20.0,0.0,20.0,7.17,60.05 +2020-04-08 18:00:00,12.84,0.0,-0.0,0.0,7.45,62.2 +2020-04-08 19:00:00,12.65,0.0,-0.0,0.0,7.93,62.2 +2020-04-08 20:00:00,12.31,0.0,-0.0,0.0,8.07,64.45 +2020-04-08 21:00:00,10.96,0.0,-0.0,0.0,7.59,69.15 +2020-04-08 22:00:00,9.48,0.0,-0.0,0.0,7.31,68.9 +2020-04-08 23:00:00,8.1,0.0,-0.0,0.0,6.76,68.6 +2020-04-09 00:00:00,7.15,0.0,-0.0,0.0,6.28,71.05 +2020-04-09 01:00:00,6.39,0.0,-0.0,0.0,5.86,70.95 +2020-04-09 02:00:00,5.84,0.0,-0.0,0.0,5.59,68.3 +2020-04-09 03:00:00,5.41,0.0,-0.0,0.0,5.31,70.85 +2020-04-09 04:00:00,5.05,0.0,-0.0,0.0,5.1,73.55 +2020-04-09 05:00:00,4.75,34.0,17.65,32.0,4.9,76.3 +2020-04-09 06:00:00,5.15,108.0,21.88,102.0,5.72,73.55 +2020-04-09 07:00:00,5.71,197.0,40.15,180.0,5.45,76.5 +2020-04-09 08:00:00,6.27,294.0,65.36,258.0,5.31,76.55 +2020-04-09 09:00:00,6.93,435.0,172.95,323.0,5.31,73.8 +2020-04-09 10:00:00,7.73,461.0,145.63,358.0,5.45,71.15 +2020-04-09 11:00:00,8.55,696.0,637.96,233.0,5.45,68.7 +2020-04-09 12:00:00,9.41,600.0,428.91,299.0,5.45,63.9 +2020-04-09 13:00:00,10.0,434.0,183.68,317.0,5.45,59.35 +2020-04-09 14:00:00,10.32,0.0,0.0,0.0,5.38,57.15 +2020-04-09 15:00:00,10.58,349.0,523.43,137.0,5.1,53.1 +2020-04-09 16:00:00,10.52,191.0,390.23,92.0,4.83,53.1 +2020-04-09 17:00:00,10.13,53.0,260.65,29.0,3.66,55.05 +2020-04-09 18:00:00,9.04,0.0,-0.0,0.0,3.1,56.9 +2020-04-09 19:00:00,8.17,0.0,-0.0,0.0,2.76,61.2 +2020-04-09 20:00:00,7.17,0.0,-0.0,0.0,2.55,65.85 +2020-04-09 21:00:00,6.23,0.0,-0.0,0.0,2.34,68.3 +2020-04-09 22:00:00,5.3,0.0,-0.0,0.0,2.21,70.85 +2020-04-09 23:00:00,4.3,0.0,-0.0,0.0,2.14,73.45 +2020-04-10 00:00:00,3.34,0.0,-0.0,0.0,2.07,76.15 +2020-04-10 01:00:00,2.48,0.0,-0.0,0.0,2.07,82.15 +2020-04-10 02:00:00,1.71,0.0,-0.0,0.0,2.07,88.6 +2020-04-10 03:00:00,1.19,0.0,-0.0,0.0,2.14,88.55 +2020-04-10 04:00:00,0.86,0.0,-0.0,0.0,2.14,85.2 +2020-04-10 05:00:00,0.98,73.0,277.07,40.0,2.14,85.2 +2020-04-10 06:00:00,3.71,230.0,511.04,87.0,2.0,76.15 +2020-04-10 07:00:00,7.59,393.0,643.64,117.0,2.9,63.5 +2020-04-10 08:00:00,9.29,547.0,755.5,127.0,3.59,61.45 +2020-04-10 09:00:00,10.81,663.0,803.09,139.0,4.0,55.15 +2020-04-10 10:00:00,12.0,735.0,830.15,144.0,4.34,49.6 +2020-04-10 11:00:00,12.95,761.0,849.11,141.0,4.34,46.2 +2020-04-10 12:00:00,13.72,716.0,787.52,160.0,4.34,46.35 +2020-04-10 13:00:00,14.27,642.0,773.7,146.0,4.34,48.25 +2020-04-10 14:00:00,14.54,528.0,744.73,126.0,4.34,46.6 +2020-04-10 15:00:00,14.45,377.0,660.05,107.0,4.14,46.6 +2020-04-10 16:00:00,14.06,211.0,515.93,78.0,3.79,48.25 +2020-04-10 17:00:00,13.07,54.0,238.86,31.0,2.83,53.7 +2020-04-10 18:00:00,11.47,0.0,-0.0,0.0,2.34,57.55 +2020-04-10 19:00:00,10.5,0.0,-0.0,0.0,2.34,59.5 +2020-04-10 20:00:00,9.07,0.0,-0.0,0.0,2.14,66.25 +2020-04-10 21:00:00,7.58,0.0,-0.0,0.0,1.93,76.7 +2020-04-10 22:00:00,6.76,0.0,-0.0,0.0,1.72,79.5 +2020-04-10 23:00:00,6.05,0.0,-0.0,0.0,1.86,76.55 +2020-04-11 00:00:00,5.12,0.0,-0.0,0.0,2.0,79.35 +2020-04-11 01:00:00,4.23,0.0,-0.0,0.0,2.07,82.3 +2020-04-11 02:00:00,3.56,0.0,-0.0,0.0,1.93,85.45 +2020-04-11 03:00:00,3.22,0.0,-0.0,0.0,1.86,88.7 +2020-04-11 04:00:00,3.17,0.0,-0.0,0.0,1.86,88.7 +2020-04-11 05:00:00,3.4,48.0,48.07,42.0,1.79,85.45 +2020-04-11 06:00:00,6.27,156.0,115.64,123.0,1.38,82.55 +2020-04-11 07:00:00,11.01,328.0,334.0,183.0,1.31,69.15 +2020-04-11 08:00:00,13.2,419.0,292.34,255.0,1.79,57.9 +2020-04-11 09:00:00,14.53,531.0,371.21,287.0,2.0,48.4 +2020-04-11 10:00:00,15.74,649.0,547.1,257.0,1.93,45.15 +2020-04-11 11:00:00,16.63,638.0,465.6,296.0,2.14,42.15 +2020-04-11 12:00:00,17.44,609.0,449.17,290.0,2.41,42.3 +2020-04-11 13:00:00,18.0,526.0,385.96,277.0,2.69,42.45 +2020-04-11 14:00:00,18.16,379.0,226.19,256.0,2.97,42.45 +2020-04-11 15:00:00,18.1,265.0,183.99,189.0,2.97,42.45 +2020-04-11 16:00:00,17.38,149.0,137.48,113.0,2.0,50.75 +2020-04-11 17:00:00,16.03,34.0,29.85,31.0,1.93,52.5 +2020-04-11 18:00:00,14.61,0.0,-0.0,0.0,2.28,54.1 +2020-04-11 19:00:00,13.81,0.0,-0.0,0.0,2.48,55.9 +2020-04-11 20:00:00,12.73,0.0,-0.0,0.0,2.83,57.75 +2020-04-11 21:00:00,12.08,0.0,-0.0,0.0,3.1,59.85 +2020-04-11 22:00:00,11.85,0.0,-0.0,0.0,3.31,59.7 +2020-04-11 23:00:00,11.62,0.0,-0.0,0.0,3.31,59.7 +2020-04-12 00:00:00,11.24,0.0,-0.0,0.0,3.17,61.85 +2020-04-12 01:00:00,11.08,0.0,-0.0,0.0,3.1,61.85 +2020-04-12 02:00:00,11.11,0.0,-0.0,0.0,3.17,64.25 +2020-04-12 03:00:00,11.14,0.0,-0.0,0.0,3.66,66.65 +2020-04-12 04:00:00,10.86,0.0,-0.0,0.0,3.93,69.15 +2020-04-12 05:00:00,10.31,24.0,0.0,24.0,3.93,79.95 +2020-04-12 06:00:00,10.75,53.0,0.0,53.0,3.52,80.05 +2020-04-12 07:00:00,11.94,127.0,0.0,127.0,4.83,71.95 +2020-04-12 08:00:00,11.31,122.0,0.0,122.0,5.66,77.2 +2020-04-12 09:00:00,9.86,107.0,0.0,107.0,5.24,82.95 +2020-04-12 10:00:00,9.37,138.0,0.0,138.0,5.24,82.9 +2020-04-12 11:00:00,10.75,151.0,0.0,151.0,4.97,83.0 +2020-04-12 12:00:00,9.54,625.0,475.96,285.0,6.55,74.2 +2020-04-12 13:00:00,9.54,196.0,0.0,196.0,6.0,77.0 +2020-04-12 14:00:00,10.12,330.0,120.49,264.0,6.28,66.45 +2020-04-12 15:00:00,10.07,313.0,323.72,178.0,6.83,55.05 +2020-04-12 16:00:00,9.83,182.0,274.54,109.0,6.62,50.85 +2020-04-12 17:00:00,9.17,51.0,133.77,37.0,6.0,46.95 +2020-04-12 18:00:00,8.2,0.0,-0.0,0.0,5.66,48.55 +2020-04-12 19:00:00,7.17,0.0,-0.0,0.0,5.59,52.2 +2020-04-12 20:00:00,6.45,0.0,-0.0,0.0,5.31,54.15 +2020-04-12 21:00:00,6.0,0.0,-0.0,0.0,5.38,54.15 +2020-04-12 22:00:00,5.48,0.0,-0.0,0.0,5.52,58.45 +2020-04-12 23:00:00,5.12,0.0,-0.0,0.0,5.59,60.65 +2020-04-13 00:00:00,4.87,0.0,-0.0,0.0,5.66,63.05 +2020-04-13 01:00:00,4.51,0.0,-0.0,0.0,5.72,65.4 +2020-04-13 02:00:00,4.38,0.0,-0.0,0.0,5.86,65.4 +2020-04-13 03:00:00,4.11,0.0,-0.0,0.0,5.93,70.6 +2020-04-13 04:00:00,3.63,0.0,-0.0,0.0,6.0,76.15 +2020-04-13 05:00:00,3.47,33.0,0.0,33.0,5.86,76.15 +2020-04-13 06:00:00,3.63,45.0,0.0,45.0,6.0,79.15 +2020-04-13 07:00:00,4.02,81.0,0.0,81.0,5.66,85.5 +2020-04-13 08:00:00,4.23,129.0,0.0,129.0,4.48,95.75 +2020-04-13 09:00:00,4.82,170.0,0.0,170.0,4.48,95.75 +2020-04-13 10:00:00,4.7,152.0,0.0,152.0,4.34,99.4 +2020-04-13 11:00:00,3.78,148.0,0.0,148.0,7.24,92.2 +2020-04-13 12:00:00,3.97,101.0,0.0,101.0,6.9,88.8 +2020-04-13 13:00:00,4.46,127.0,0.0,127.0,6.62,88.85 +2020-04-13 14:00:00,4.7,179.0,1.81,178.0,6.76,92.25 +2020-04-13 15:00:00,4.68,112.0,0.0,112.0,6.0,92.25 +2020-04-13 16:00:00,4.77,39.0,0.0,39.0,5.59,95.75 +2020-04-13 17:00:00,4.84,33.0,18.38,31.0,5.17,92.25 +2020-04-13 18:00:00,4.83,0.0,-0.0,0.0,4.76,92.25 +2020-04-13 19:00:00,4.64,0.0,-0.0,0.0,4.07,92.25 +2020-04-13 20:00:00,4.74,0.0,-0.0,0.0,4.0,92.25 +2020-04-13 21:00:00,4.87,0.0,-0.0,0.0,4.28,92.25 +2020-04-13 22:00:00,4.87,0.0,-0.0,0.0,4.62,88.85 +2020-04-13 23:00:00,4.58,0.0,-0.0,0.0,4.83,92.25 +2020-04-14 00:00:00,4.19,0.0,-0.0,0.0,4.76,92.2 +2020-04-14 01:00:00,3.63,0.0,-0.0,0.0,4.41,95.7 +2020-04-14 02:00:00,3.36,0.0,-0.0,0.0,3.52,95.7 +2020-04-14 03:00:00,3.29,0.0,-0.0,0.0,3.31,95.7 +2020-04-14 04:00:00,3.27,0.0,-0.0,0.0,3.93,99.4 +2020-04-14 05:00:00,3.42,26.0,0.0,26.0,4.0,92.15 +2020-04-14 06:00:00,3.55,43.0,0.0,43.0,4.07,95.7 +2020-04-14 07:00:00,4.3,105.0,0.0,105.0,3.59,88.85 +2020-04-14 08:00:00,4.52,113.0,0.0,113.0,3.24,92.25 +2020-04-14 09:00:00,4.7,127.0,0.0,127.0,3.17,92.25 +2020-04-14 10:00:00,5.33,124.0,0.0,124.0,3.03,88.9 +2020-04-14 11:00:00,5.81,144.0,0.0,144.0,3.17,85.7 +2020-04-14 12:00:00,6.52,129.0,0.0,129.0,2.9,92.3 +2020-04-14 13:00:00,7.07,145.0,0.0,145.0,2.76,92.35 +2020-04-14 14:00:00,7.37,69.0,0.0,69.0,2.97,85.8 +2020-04-14 15:00:00,7.56,117.0,0.0,117.0,2.83,85.8 +2020-04-14 16:00:00,7.51,73.0,0.0,73.0,2.9,85.8 +2020-04-14 17:00:00,7.02,11.0,0.0,11.0,2.55,85.75 +2020-04-14 18:00:00,6.11,0.0,-0.0,0.0,2.28,85.7 +2020-04-14 19:00:00,4.91,0.0,-0.0,0.0,1.59,92.25 +2020-04-14 20:00:00,4.54,0.0,-0.0,0.0,1.59,92.25 +2020-04-14 21:00:00,4.56,0.0,-0.0,0.0,1.52,95.75 +2020-04-14 22:00:00,4.34,0.0,-0.0,0.0,1.52,92.25 +2020-04-14 23:00:00,4.15,0.0,-0.0,0.0,1.45,95.75 +2020-04-15 00:00:00,3.97,0.0,-0.0,0.0,1.45,95.75 +2020-04-15 01:00:00,3.88,0.0,-0.0,0.0,1.45,95.75 +2020-04-15 02:00:00,3.87,0.0,-0.0,0.0,1.45,95.75 +2020-04-15 03:00:00,3.81,0.0,-0.0,0.0,1.31,95.75 +2020-04-15 04:00:00,3.82,0.0,-0.0,0.0,1.17,95.75 +2020-04-15 05:00:00,3.8,26.0,0.0,26.0,1.1,95.75 +2020-04-15 06:00:00,3.92,60.0,0.0,60.0,1.72,95.75 +2020-04-15 07:00:00,4.54,163.0,6.6,160.0,1.66,92.25 +2020-04-15 08:00:00,5.9,341.0,101.61,282.0,2.14,82.55 +2020-04-15 09:00:00,6.94,327.0,35.51,303.0,2.41,76.65 +2020-04-15 10:00:00,7.95,158.0,0.0,158.0,2.48,71.25 +2020-04-15 11:00:00,8.91,263.0,2.66,261.0,2.69,71.4 +2020-04-15 12:00:00,9.73,546.0,264.31,354.0,2.9,71.5 +2020-04-15 13:00:00,10.06,122.0,0.0,122.0,3.17,71.6 +2020-04-15 14:00:00,9.73,117.0,0.0,117.0,3.31,68.9 +2020-04-15 15:00:00,9.59,174.0,18.66,166.0,3.17,66.35 +2020-04-15 16:00:00,9.49,80.0,3.6,79.0,2.48,66.35 +2020-04-15 17:00:00,8.95,38.0,17.09,36.0,1.79,68.8 +2020-04-15 18:00:00,8.02,0.0,-0.0,0.0,1.52,73.95 +2020-04-15 19:00:00,6.72,0.0,-0.0,0.0,1.52,85.7 +2020-04-15 20:00:00,5.98,0.0,-0.0,0.0,1.24,82.55 +2020-04-15 21:00:00,5.67,0.0,-0.0,0.0,1.17,82.5 +2020-04-15 22:00:00,5.59,0.0,-0.0,0.0,1.17,82.5 +2020-04-15 23:00:00,5.3,0.0,-0.0,0.0,1.24,79.4 +2020-04-16 00:00:00,4.98,0.0,-0.0,0.0,1.24,82.4 +2020-04-16 01:00:00,4.84,0.0,-0.0,0.0,1.31,82.4 +2020-04-16 02:00:00,4.41,0.0,-0.0,0.0,1.38,82.35 +2020-04-16 03:00:00,4.08,0.0,-0.0,0.0,1.38,85.5 +2020-04-16 04:00:00,3.49,0.0,-0.0,0.0,1.45,85.45 +2020-04-16 05:00:00,3.0,106.0,373.63,49.0,1.45,88.7 +2020-04-16 06:00:00,6.18,265.0,551.04,93.0,0.55,85.7 +2020-04-16 07:00:00,8.85,439.0,715.58,110.0,0.34,79.85 +2020-04-16 08:00:00,10.62,584.0,780.67,127.0,0.34,69.1 +2020-04-16 09:00:00,11.49,608.0,514.36,258.0,1.1,59.7 +2020-04-16 10:00:00,12.16,442.0,98.85,369.0,1.38,53.5 +2020-04-16 11:00:00,12.93,548.0,226.35,377.0,1.38,48.0 +2020-04-16 12:00:00,13.16,397.0,62.98,351.0,1.31,48.0 +2020-04-16 13:00:00,13.56,345.0,51.15,311.0,1.03,44.65 +2020-04-16 14:00:00,13.56,306.0,72.8,265.0,1.1,44.65 +2020-04-16 15:00:00,13.45,268.0,150.27,203.0,1.31,44.65 +2020-04-16 16:00:00,13.21,198.0,294.54,115.0,1.52,46.2 +2020-04-16 17:00:00,12.23,70.0,222.87,43.0,1.59,57.65 +2020-04-16 18:00:00,10.75,0.0,-0.0,0.0,1.79,61.75 +2020-04-16 19:00:00,9.68,0.0,-0.0,0.0,1.45,74.2 +2020-04-16 20:00:00,9.81,0.0,-0.0,0.0,1.1,63.9 +2020-04-16 21:00:00,10.77,0.0,-0.0,0.0,0.48,57.3 +2020-04-16 22:00:00,10.57,0.0,-0.0,0.0,0.28,55.15 +2020-04-16 23:00:00,10.3,0.0,-0.0,0.0,0.69,57.15 +2020-04-17 00:00:00,9.96,0.0,-0.0,0.0,0.9,57.15 +2020-04-17 01:00:00,8.84,0.0,-0.0,0.0,1.1,61.45 +2020-04-17 02:00:00,8.59,0.0,-0.0,0.0,1.03,63.7 +2020-04-17 03:00:00,8.25,0.0,-0.0,0.0,1.1,66.05 +2020-04-17 04:00:00,7.9,0.0,-0.0,0.0,1.1,66.05 +2020-04-17 05:00:00,7.49,30.0,0.0,30.0,1.17,71.15 +2020-04-17 06:00:00,8.82,77.0,0.0,77.0,0.62,74.05 +2020-04-17 07:00:00,10.94,192.0,19.37,183.0,0.34,71.75 +2020-04-17 08:00:00,12.17,286.0,38.98,263.0,0.62,64.45 +2020-04-17 09:00:00,12.84,399.0,91.99,336.0,1.1,59.95 +2020-04-17 10:00:00,13.58,663.0,504.94,288.0,1.31,53.85 +2020-04-17 11:00:00,13.88,447.0,92.17,377.0,1.38,50.1 +2020-04-17 12:00:00,13.86,215.0,0.0,215.0,1.38,50.0 +2020-04-17 13:00:00,14.09,405.0,112.19,330.0,1.24,48.25 +2020-04-17 14:00:00,14.02,394.0,213.43,273.0,1.17,46.5 +2020-04-17 15:00:00,13.98,217.0,55.0,193.0,1.03,46.5 +2020-04-17 16:00:00,13.57,155.0,108.51,124.0,0.48,51.9 +2020-04-17 17:00:00,13.41,75.0,255.57,43.0,0.14,51.9 +2020-04-17 18:00:00,13.07,0.0,-0.0,0.0,0.41,53.7 +2020-04-17 19:00:00,12.53,0.0,-0.0,0.0,0.34,47.85 +2020-04-17 20:00:00,12.38,0.0,-0.0,0.0,0.21,46.1 +2020-04-17 21:00:00,12.15,0.0,-0.0,0.0,0.69,45.95 +2020-04-17 22:00:00,11.45,0.0,-0.0,0.0,0.97,49.45 +2020-04-17 23:00:00,10.58,0.0,-0.0,0.0,1.03,55.15 +2020-04-18 00:00:00,9.32,0.0,-0.0,0.0,1.31,63.8 +2020-04-18 01:00:00,8.59,0.0,-0.0,0.0,1.45,68.7 +2020-04-18 02:00:00,7.8,0.0,-0.0,0.0,1.52,73.9 +2020-04-18 03:00:00,7.2,0.0,-0.0,0.0,1.52,73.8 +2020-04-18 04:00:00,6.85,0.0,0.0,0.0,1.45,73.8 +2020-04-18 05:00:00,6.95,115.0,379.83,53.0,1.38,76.65 +2020-04-18 06:00:00,9.23,280.0,589.34,90.0,0.55,76.95 +2020-04-18 07:00:00,11.98,445.0,702.8,115.0,0.21,59.85 +2020-04-18 08:00:00,13.66,592.0,773.53,132.0,0.41,50.0 +2020-04-18 09:00:00,14.89,705.0,818.29,141.0,1.17,48.4 +2020-04-18 10:00:00,15.74,779.0,854.31,141.0,1.79,41.9 +2020-04-18 11:00:00,16.17,251.0,1.31,250.0,1.79,40.5 +2020-04-18 12:00:00,16.49,668.0,528.45,278.0,1.79,39.15 +2020-04-18 13:00:00,16.45,391.0,92.23,329.0,1.93,40.6 +2020-04-18 14:00:00,16.41,398.0,215.54,275.0,2.07,40.6 +2020-04-18 15:00:00,16.08,385.0,547.57,144.0,2.0,43.65 +2020-04-18 16:00:00,15.3,239.0,518.01,89.0,1.72,54.25 +2020-04-18 17:00:00,14.51,81.0,294.0,43.0,1.72,56.15 +2020-04-18 18:00:00,13.18,0.0,-0.0,0.0,1.79,62.3 +2020-04-18 19:00:00,11.62,0.0,-0.0,0.0,1.93,69.25 +2020-04-18 20:00:00,9.83,0.0,-0.0,0.0,2.07,77.0 +2020-04-18 21:00:00,8.63,0.0,-0.0,0.0,2.14,79.75 +2020-04-18 22:00:00,7.68,0.0,-0.0,0.0,2.21,82.65 +2020-04-18 23:00:00,6.8,0.0,-0.0,0.0,2.28,82.55 +2020-04-19 00:00:00,6.19,0.0,-0.0,0.0,2.28,79.5 +2020-04-19 01:00:00,5.68,0.0,-0.0,0.0,2.28,82.5 +2020-04-19 02:00:00,5.11,0.0,-0.0,0.0,2.28,82.4 +2020-04-19 03:00:00,4.64,0.0,-0.0,0.0,2.21,82.35 +2020-04-19 04:00:00,4.12,0.0,0.0,0.0,2.21,85.5 +2020-04-19 05:00:00,4.46,116.0,338.34,59.0,1.93,85.55 +2020-04-19 06:00:00,7.94,281.0,565.03,96.0,1.38,85.85 +2020-04-19 07:00:00,12.75,449.0,695.71,119.0,1.24,69.45 +2020-04-19 08:00:00,14.99,594.0,766.01,135.0,1.45,54.25 +2020-04-19 09:00:00,16.35,711.0,821.89,141.0,1.72,48.8 +2020-04-19 10:00:00,17.33,785.0,856.34,142.0,2.14,45.45 +2020-04-19 11:00:00,17.99,801.0,851.02,148.0,2.28,42.45 +2020-04-19 12:00:00,18.78,761.0,815.63,156.0,2.55,41.05 +2020-04-19 13:00:00,19.11,689.0,813.7,139.0,2.62,38.2 +2020-04-19 14:00:00,19.24,568.0,764.39,129.0,2.69,38.2 +2020-04-19 15:00:00,19.0,414.0,675.91,114.0,2.83,38.2 +2020-04-19 16:00:00,18.37,245.0,535.08,88.0,2.76,44.05 +2020-04-19 17:00:00,16.76,83.0,277.64,46.0,2.55,52.65 +2020-04-19 18:00:00,14.76,0.0,-0.0,0.0,2.55,60.4 +2020-04-19 19:00:00,13.73,0.0,-0.0,0.0,2.41,62.45 +2020-04-19 20:00:00,12.21,0.0,-0.0,0.0,2.55,69.35 +2020-04-19 21:00:00,11.03,0.0,-0.0,0.0,2.41,74.45 +2020-04-19 22:00:00,9.99,0.0,-0.0,0.0,2.41,79.95 +2020-04-19 23:00:00,9.12,0.0,-0.0,0.0,2.34,82.85 +2020-04-20 00:00:00,8.18,0.0,-0.0,0.0,2.21,89.05 +2020-04-20 01:00:00,7.24,0.0,-0.0,0.0,2.14,92.35 +2020-04-20 02:00:00,6.45,0.0,-0.0,0.0,2.07,92.3 +2020-04-20 03:00:00,5.84,0.0,-0.0,0.0,2.07,88.95 +2020-04-20 04:00:00,5.39,0.0,0.0,0.0,2.14,88.9 +2020-04-20 05:00:00,5.86,117.0,322.51,61.0,1.93,88.95 +2020-04-20 06:00:00,9.55,277.0,517.5,105.0,1.52,85.95 +2020-04-20 07:00:00,13.84,447.0,678.43,122.0,1.31,72.2 +2020-04-20 08:00:00,16.05,594.0,758.7,136.0,1.17,60.75 +2020-04-20 09:00:00,17.56,710.0,814.05,142.0,0.83,56.75 +2020-04-20 10:00:00,18.58,787.0,859.76,138.0,0.97,51.15 +2020-04-20 11:00:00,19.28,812.0,877.93,135.0,1.1,49.45 +2020-04-20 12:00:00,19.81,776.0,855.86,138.0,1.24,47.8 +2020-04-20 13:00:00,20.31,695.0,827.01,133.0,1.17,44.6 +2020-04-20 14:00:00,20.56,582.0,808.04,115.0,1.1,41.55 +2020-04-20 15:00:00,20.37,423.0,706.08,107.0,1.24,43.0 +2020-04-20 16:00:00,19.9,256.0,585.4,82.0,1.52,46.1 +2020-04-20 17:00:00,18.6,89.0,327.85,44.0,1.66,56.95 +2020-04-20 18:00:00,16.36,0.0,-0.0,0.0,2.07,62.95 +2020-04-20 19:00:00,14.86,0.0,-0.0,0.0,1.52,72.4 +2020-04-20 20:00:00,12.58,0.0,-0.0,0.0,1.93,74.7 +2020-04-20 21:00:00,10.74,0.0,-0.0,0.0,2.07,83.0 +2020-04-20 22:00:00,9.49,0.0,-0.0,0.0,1.93,82.9 +2020-04-20 23:00:00,8.61,0.0,-0.0,0.0,1.86,85.9 +2020-04-21 00:00:00,7.72,0.0,-0.0,0.0,1.86,89.0 +2020-04-21 01:00:00,6.95,0.0,-0.0,0.0,1.86,89.0 +2020-04-21 02:00:00,6.4,0.0,-0.0,0.0,1.86,88.95 +2020-04-21 03:00:00,5.99,0.0,-0.0,0.0,1.79,85.7 +2020-04-21 04:00:00,5.76,0.0,0.0,0.0,1.72,88.9 +2020-04-21 05:00:00,6.88,125.0,358.07,61.0,1.38,85.75 +2020-04-21 06:00:00,11.39,292.0,581.17,96.0,0.55,77.3 +2020-04-21 07:00:00,15.46,455.0,686.4,123.0,0.28,62.85 +2020-04-21 08:00:00,17.64,599.0,753.23,141.0,0.48,52.75 +2020-04-21 09:00:00,18.97,718.0,820.64,142.0,1.24,47.65 +2020-04-21 10:00:00,19.99,795.0,865.85,138.0,1.72,43.0 +2020-04-21 11:00:00,20.58,807.0,849.15,149.0,1.79,40.1 +2020-04-21 12:00:00,20.92,762.0,798.31,164.0,2.0,40.1 +2020-04-21 13:00:00,20.87,664.0,701.18,185.0,2.28,40.1 +2020-04-21 14:00:00,20.81,567.0,739.43,137.0,2.21,40.1 +2020-04-21 15:00:00,20.7,420.0,675.98,115.0,2.14,40.1 +2020-04-21 16:00:00,20.03,257.0,571.4,85.0,2.34,44.6 +2020-04-21 17:00:00,18.27,95.0,354.04,45.0,2.41,52.9 +2020-04-21 18:00:00,16.5,0.0,-0.0,0.0,2.41,52.65 +2020-04-21 19:00:00,15.39,0.0,-0.0,0.0,2.34,50.5 +2020-04-21 20:00:00,13.47,0.0,-0.0,0.0,2.34,55.9 +2020-04-21 21:00:00,11.9,0.0,-0.0,0.0,2.41,62.1 +2020-04-21 22:00:00,10.72,0.0,-0.0,0.0,2.41,66.55 +2020-04-21 23:00:00,9.79,0.0,-0.0,0.0,2.34,71.5 +2020-04-22 00:00:00,9.09,0.0,-0.0,0.0,2.28,74.15 +2020-04-22 01:00:00,8.48,0.0,-0.0,0.0,2.14,76.85 +2020-04-22 02:00:00,7.95,0.0,-0.0,0.0,2.07,79.7 +2020-04-22 03:00:00,7.45,0.0,-0.0,0.0,1.93,82.65 +2020-04-22 04:00:00,6.99,0.0,0.0,0.0,1.86,82.6 +2020-04-22 05:00:00,7.69,137.0,429.9,58.0,1.59,85.8 +2020-04-22 06:00:00,11.45,296.0,581.78,97.0,1.1,80.15 +2020-04-22 07:00:00,16.13,458.0,686.14,123.0,0.9,65.25 +2020-04-22 08:00:00,18.29,605.0,764.25,137.0,1.1,58.9 +2020-04-22 09:00:00,19.88,721.0,818.74,143.0,1.38,55.2 +2020-04-22 10:00:00,21.2,794.0,852.28,144.0,1.79,49.95 +2020-04-22 11:00:00,22.08,804.0,836.13,153.0,2.14,40.5 +2020-04-22 12:00:00,22.54,769.0,811.78,158.0,2.34,36.4 +2020-04-22 13:00:00,22.75,693.0,798.07,145.0,2.62,35.1 +2020-04-22 14:00:00,22.74,579.0,770.85,128.0,2.83,35.1 +2020-04-22 15:00:00,22.48,427.0,692.59,112.0,2.97,35.1 +2020-04-22 16:00:00,21.89,263.0,587.33,84.0,2.55,40.35 +2020-04-22 17:00:00,20.22,101.0,378.88,46.0,2.14,47.9 +2020-04-22 18:00:00,18.04,0.0,-0.0,0.0,2.41,52.9 +2020-04-22 19:00:00,16.51,0.0,-0.0,0.0,2.34,56.6 +2020-04-22 20:00:00,14.7,0.0,-0.0,0.0,2.48,64.95 +2020-04-22 21:00:00,13.35,0.0,-0.0,0.0,2.48,72.1 +2020-04-22 22:00:00,12.24,0.0,-0.0,0.0,2.48,74.6 +2020-04-22 23:00:00,11.28,0.0,-0.0,0.0,2.48,77.2 +2020-04-23 00:00:00,10.36,0.0,-0.0,0.0,2.41,77.15 +2020-04-23 01:00:00,9.62,0.0,-0.0,0.0,2.34,79.9 +2020-04-23 02:00:00,9.08,0.0,-0.0,0.0,2.41,79.85 +2020-04-23 03:00:00,8.6,0.0,-0.0,0.0,2.34,82.75 +2020-04-23 04:00:00,8.18,3.0,0.0,3.0,2.28,82.7 +2020-04-23 05:00:00,8.76,145.0,471.6,56.0,2.0,82.75 +2020-04-23 06:00:00,12.2,310.0,631.52,91.0,1.52,80.2 +2020-04-23 07:00:00,16.26,470.0,716.43,117.0,1.93,62.95 +2020-04-23 08:00:00,18.11,612.0,773.6,135.0,2.28,56.85 +2020-04-23 09:00:00,19.71,727.0,828.22,139.0,2.76,53.25 +2020-04-23 10:00:00,21.04,766.0,754.13,188.0,3.52,46.5 +2020-04-23 11:00:00,21.51,802.0,820.74,160.0,4.07,40.35 +2020-04-23 12:00:00,21.71,701.0,584.51,259.0,4.34,40.35 +2020-04-23 13:00:00,21.78,688.0,769.41,157.0,4.69,40.35 +2020-04-23 14:00:00,21.61,572.0,737.39,138.0,4.9,38.9 +2020-04-23 15:00:00,20.89,425.0,674.09,116.0,4.69,41.55 +2020-04-23 16:00:00,20.25,260.0,554.31,89.0,4.21,44.6 +2020-04-23 17:00:00,19.16,97.0,308.56,51.0,4.0,45.95 +2020-04-23 18:00:00,17.55,0.0,-0.0,0.0,3.72,47.3 +2020-04-23 19:00:00,16.65,0.0,-0.0,0.0,3.66,43.8 +2020-04-23 20:00:00,15.61,0.0,-0.0,0.0,3.45,45.15 +2020-04-23 21:00:00,14.63,0.0,-0.0,0.0,3.31,44.9 +2020-04-23 22:00:00,13.68,0.0,-0.0,0.0,3.17,46.35 +2020-04-23 23:00:00,12.65,0.0,-0.0,0.0,2.9,49.7 +2020-04-24 00:00:00,11.53,0.0,-0.0,0.0,2.55,55.4 +2020-04-24 01:00:00,10.46,0.0,-0.0,0.0,2.41,61.75 +2020-04-24 02:00:00,9.54,0.0,-0.0,0.0,2.34,66.35 +2020-04-24 03:00:00,8.78,0.0,-0.0,0.0,2.41,74.05 +2020-04-24 04:00:00,8.11,2.0,0.0,2.0,2.34,76.8 +2020-04-24 05:00:00,8.3,152.0,490.71,57.0,2.21,79.7 +2020-04-24 06:00:00,10.48,314.0,623.16,95.0,2.34,71.7 +2020-04-24 07:00:00,12.72,477.0,720.14,119.0,2.83,66.95 +2020-04-24 08:00:00,14.52,619.0,776.48,137.0,3.03,60.4 +2020-04-24 09:00:00,16.16,731.0,818.06,147.0,3.38,54.45 +2020-04-24 10:00:00,17.47,807.0,863.44,142.0,3.79,47.3 +2020-04-24 11:00:00,18.35,818.0,845.02,154.0,3.86,42.45 +2020-04-24 12:00:00,18.87,777.0,805.63,165.0,3.66,39.55 +2020-04-24 13:00:00,19.14,702.0,795.89,150.0,3.72,36.8 +2020-04-24 14:00:00,19.14,568.0,692.54,158.0,3.79,35.45 +2020-04-24 15:00:00,18.8,431.0,675.42,119.0,3.93,35.35 +2020-04-24 16:00:00,18.31,269.0,579.79,88.0,3.79,36.55 +2020-04-24 17:00:00,17.33,109.0,398.78,48.0,3.03,40.6 +2020-04-24 18:00:00,15.67,0.0,-0.0,0.0,2.48,41.9 +2020-04-24 19:00:00,13.89,0.0,-0.0,0.0,1.93,50.1 +2020-04-24 20:00:00,12.33,0.0,-0.0,0.0,1.59,57.65 +2020-04-24 21:00:00,12.33,0.0,-0.0,0.0,1.38,51.5 +2020-04-24 22:00:00,13.22,0.0,-0.0,0.0,1.1,46.2 +2020-04-24 23:00:00,12.92,0.0,-0.0,0.0,0.69,44.5 +2020-04-25 00:00:00,12.38,0.0,-0.0,0.0,0.41,46.1 +2020-04-25 01:00:00,11.56,0.0,-0.0,0.0,0.69,49.45 +2020-04-25 02:00:00,10.79,0.0,-0.0,0.0,0.97,53.1 +2020-04-25 03:00:00,9.34,0.0,-0.0,0.0,1.24,57.05 +2020-04-25 04:00:00,8.21,4.0,0.0,4.0,1.31,66.05 +2020-04-25 05:00:00,8.62,87.0,55.44,76.0,1.03,66.15 +2020-04-25 06:00:00,9.77,299.0,528.08,111.0,1.03,74.2 +2020-04-25 07:00:00,10.68,372.0,299.13,222.0,2.28,80.05 +2020-04-25 08:00:00,11.82,325.0,57.62,289.0,3.52,74.55 +2020-04-25 09:00:00,12.12,339.0,30.65,317.0,3.72,69.35 +2020-04-25 10:00:00,12.55,136.0,0.0,136.0,3.66,69.45 +2020-04-25 11:00:00,12.35,231.0,0.0,231.0,3.24,71.95 +2020-04-25 12:00:00,13.04,113.0,0.0,113.0,3.1,69.55 +2020-04-25 13:00:00,13.74,257.0,4.3,254.0,3.24,67.15 +2020-04-25 14:00:00,13.98,153.0,0.0,153.0,3.31,64.85 +2020-04-25 15:00:00,14.23,125.0,0.0,125.0,3.72,62.55 +2020-04-25 16:00:00,14.14,160.0,79.15,135.0,3.45,60.3 +2020-04-25 17:00:00,13.75,100.0,280.57,56.0,2.69,62.45 +2020-04-25 18:00:00,12.9,0.0,-0.0,0.0,2.0,64.65 +2020-04-25 19:00:00,11.26,0.0,-0.0,0.0,1.52,74.45 +2020-04-25 20:00:00,9.89,0.0,-0.0,0.0,1.31,79.95 +2020-04-25 21:00:00,9.36,0.0,-0.0,0.0,1.31,79.9 +2020-04-25 22:00:00,8.74,0.0,-0.0,0.0,1.38,85.9 +2020-04-25 23:00:00,7.9,0.0,-0.0,0.0,1.45,85.85 +2020-04-26 00:00:00,8.59,0.0,-0.0,0.0,1.17,85.9 +2020-04-26 01:00:00,9.28,0.0,-0.0,0.0,0.76,82.85 +2020-04-26 02:00:00,9.11,0.0,-0.0,0.0,0.76,82.85 +2020-04-26 03:00:00,8.23,0.0,-0.0,0.0,1.1,89.05 +2020-04-26 04:00:00,7.82,2.0,0.0,2.0,1.1,85.85 +2020-04-26 05:00:00,7.53,46.0,0.0,46.0,0.97,89.0 +2020-04-26 06:00:00,9.63,143.0,19.42,136.0,0.69,89.15 +2020-04-26 07:00:00,10.94,312.0,144.35,239.0,1.1,86.1 +2020-04-26 08:00:00,11.98,212.0,3.18,210.0,1.72,83.15 +2020-04-26 09:00:00,12.84,316.0,19.4,302.0,2.0,86.25 +2020-04-26 10:00:00,13.87,346.0,20.58,330.0,2.48,80.4 +2020-04-26 11:00:00,14.41,170.0,0.0,170.0,2.97,77.75 +2020-04-26 12:00:00,13.12,193.0,0.0,193.0,3.72,77.5 +2020-04-26 13:00:00,12.25,200.0,0.0,200.0,2.9,86.2 +2020-04-26 14:00:00,12.6,340.0,91.85,285.0,3.79,77.45 +2020-04-26 15:00:00,13.14,374.0,407.32,183.0,2.62,74.75 +2020-04-26 16:00:00,13.86,272.0,572.82,89.0,2.69,72.2 +2020-04-26 17:00:00,13.48,107.0,336.13,53.0,2.14,74.85 +2020-04-26 18:00:00,12.1,0.0,0.0,0.0,2.07,83.15 +2020-04-26 19:00:00,11.01,0.0,-0.0,0.0,1.72,86.1 +2020-04-26 20:00:00,10.03,0.0,-0.0,0.0,1.72,92.5 +2020-04-26 21:00:00,9.38,0.0,-0.0,0.0,2.0,92.45 +2020-04-26 22:00:00,8.91,0.0,-0.0,0.0,2.07,95.85 +2020-04-26 23:00:00,8.61,0.0,-0.0,0.0,1.86,95.85 +2020-04-27 00:00:00,8.12,0.0,-0.0,0.0,1.72,95.8 +2020-04-27 01:00:00,7.29,0.0,-0.0,0.0,1.79,99.4 +2020-04-27 02:00:00,6.87,0.0,-0.0,0.0,1.66,95.8 +2020-04-27 03:00:00,6.35,0.0,-0.0,0.0,1.66,95.8 +2020-04-27 04:00:00,6.02,2.0,0.0,2.0,1.59,95.8 +2020-04-27 05:00:00,7.04,86.0,43.32,77.0,1.31,99.4 +2020-04-27 06:00:00,9.32,177.0,60.29,155.0,1.1,95.85 +2020-04-27 07:00:00,11.74,177.0,5.88,174.0,1.45,89.3 +2020-04-27 08:00:00,13.2,160.0,0.0,160.0,1.86,80.35 +2020-04-27 09:00:00,14.33,287.0,9.65,280.0,2.0,72.3 +2020-04-27 10:00:00,15.06,261.0,1.28,260.0,1.86,67.45 +2020-04-27 11:00:00,16.09,585.0,249.97,386.0,1.93,65.25 +2020-04-27 12:00:00,16.7,558.0,240.35,373.0,2.14,65.35 +2020-04-27 13:00:00,17.4,480.0,193.31,344.0,2.28,65.35 +2020-04-27 14:00:00,17.35,305.0,53.15,273.0,2.55,65.35 +2020-04-27 15:00:00,16.98,193.0,19.05,184.0,2.34,65.35 +2020-04-27 16:00:00,15.55,106.0,6.19,104.0,1.59,77.85 +2020-04-27 17:00:00,14.63,40.0,0.0,40.0,1.31,80.55 +2020-04-27 18:00:00,13.73,0.0,0.0,0.0,1.72,83.35 +2020-04-27 19:00:00,11.64,0.0,-0.0,0.0,1.03,92.55 +2020-04-27 20:00:00,10.83,0.0,-0.0,0.0,1.1,99.4 +2020-04-27 21:00:00,10.18,0.0,-0.0,0.0,0.9,95.9 +2020-04-27 22:00:00,9.19,0.0,-0.0,0.0,1.31,99.4 +2020-04-27 23:00:00,8.7,0.0,-0.0,0.0,1.31,95.85 +2020-04-28 00:00:00,9.75,0.0,-0.0,0.0,0.97,92.45 +2020-04-28 01:00:00,10.72,0.0,-0.0,0.0,0.69,89.25 +2020-04-28 02:00:00,10.68,0.0,-0.0,0.0,0.69,89.25 +2020-04-28 03:00:00,10.43,0.0,-0.0,0.0,0.69,89.25 +2020-04-28 04:00:00,10.23,12.0,18.54,11.0,0.83,92.5 +2020-04-28 05:00:00,10.24,52.0,0.0,52.0,0.9,92.5 +2020-04-28 06:00:00,11.6,79.0,0.0,79.0,0.55,92.55 +2020-04-28 07:00:00,14.59,186.0,7.78,182.0,1.24,77.75 +2020-04-28 08:00:00,15.68,577.0,590.64,201.0,1.31,75.15 +2020-04-28 09:00:00,16.62,617.0,444.55,293.0,1.52,72.7 +2020-04-28 10:00:00,17.49,719.0,577.54,266.0,1.86,70.3 +2020-04-28 11:00:00,18.45,688.0,459.09,321.0,2.14,63.4 +2020-04-28 12:00:00,19.38,678.0,495.51,295.0,2.55,57.1 +2020-04-28 13:00:00,19.94,437.0,128.75,346.0,2.9,53.35 +2020-04-28 14:00:00,20.25,285.0,36.34,263.0,3.38,53.35 +2020-04-28 15:00:00,19.92,285.0,134.53,221.0,3.59,55.2 +2020-04-28 16:00:00,17.62,231.0,306.16,131.0,2.07,70.3 +2020-04-28 17:00:00,17.58,87.0,130.78,65.0,1.93,67.85 +2020-04-28 18:00:00,16.38,0.0,0.0,0.0,1.93,70.1 +2020-04-28 19:00:00,14.74,0.0,-0.0,0.0,2.07,75.0 +2020-04-28 20:00:00,12.98,0.0,-0.0,0.0,2.0,83.25 +2020-04-28 21:00:00,11.48,0.0,-0.0,0.0,2.07,86.15 +2020-04-28 22:00:00,10.29,0.0,-0.0,0.0,2.07,89.2 +2020-04-28 23:00:00,9.43,0.0,-0.0,0.0,2.21,85.95 +2020-04-29 00:00:00,8.95,0.0,-0.0,0.0,2.34,82.85 +2020-04-29 01:00:00,8.47,0.0,-0.0,0.0,2.41,82.75 +2020-04-29 02:00:00,8.04,0.0,-0.0,0.0,2.34,82.7 +2020-04-29 03:00:00,7.56,0.0,-0.0,0.0,2.34,82.65 +2020-04-29 04:00:00,7.23,12.0,0.0,12.0,2.34,82.6 +2020-04-29 05:00:00,8.16,144.0,281.35,83.0,2.14,82.7 +2020-04-29 06:00:00,12.12,287.0,382.89,144.0,2.0,71.95 +2020-04-29 07:00:00,14.8,416.0,401.59,208.0,2.55,69.85 +2020-04-29 08:00:00,16.84,589.0,610.6,198.0,3.31,63.05 +2020-04-29 09:00:00,18.58,721.0,745.52,175.0,4.14,56.95 +2020-04-29 10:00:00,19.9,782.0,750.25,191.0,4.76,51.4 +2020-04-29 11:00:00,20.69,628.0,315.2,375.0,5.38,43.1 +2020-04-29 12:00:00,20.67,183.0,0.0,183.0,5.31,41.55 +2020-04-29 13:00:00,20.61,116.0,0.0,116.0,4.97,41.55 +2020-04-29 14:00:00,20.32,224.0,6.57,220.0,5.1,44.6 +2020-04-29 15:00:00,19.72,303.0,166.98,223.0,4.69,46.1 +2020-04-29 16:00:00,18.65,25.0,0.0,25.0,3.24,51.15 +2020-04-29 17:00:00,18.03,89.0,127.94,67.0,3.17,51.0 +2020-04-29 18:00:00,16.73,0.0,0.0,0.0,2.83,52.65 +2020-04-29 19:00:00,15.23,0.0,-0.0,0.0,2.9,50.35 +2020-04-29 20:00:00,14.33,0.0,-0.0,0.0,3.1,53.95 +2020-04-29 21:00:00,13.6,0.0,-0.0,0.0,2.76,55.9 +2020-04-29 22:00:00,12.61,0.0,-0.0,0.0,2.62,59.95 +2020-04-29 23:00:00,11.66,0.0,-0.0,0.0,2.55,64.35 +2020-04-30 00:00:00,10.86,0.0,-0.0,0.0,2.62,66.65 +2020-04-30 01:00:00,10.27,0.0,-0.0,0.0,2.62,71.6 +2020-04-30 02:00:00,9.74,0.0,-0.0,0.0,2.69,77.0 +2020-04-30 03:00:00,9.26,0.0,-0.0,0.0,2.76,79.85 +2020-04-30 04:00:00,8.76,9.0,0.0,9.0,2.69,82.75 +2020-04-30 05:00:00,9.18,150.0,302.88,83.0,2.62,82.85 +2020-04-30 06:00:00,10.61,301.0,431.63,138.0,2.97,77.15 +2020-04-30 07:00:00,12.1,439.0,471.39,193.0,3.38,77.35 +2020-04-30 08:00:00,13.44,625.0,726.67,157.0,3.45,72.2 +2020-04-30 09:00:00,14.86,631.0,452.54,298.0,3.72,67.35 +2020-04-30 10:00:00,16.17,615.0,305.93,373.0,4.07,60.75 +2020-04-30 11:00:00,17.19,796.0,729.67,208.0,4.28,58.7 +2020-04-30 12:00:00,18.08,809.0,858.52,140.0,4.34,52.9 +2020-04-30 13:00:00,18.37,732.0,842.83,131.0,4.34,51.0 +2020-04-30 14:00:00,18.42,603.0,773.09,130.0,5.1,45.85 +2020-04-30 15:00:00,18.06,436.0,619.8,137.0,4.76,47.4 +2020-04-30 16:00:00,14.48,280.0,539.5,100.0,3.6,46.83 +2020-04-30 17:00:00,14.35,105.0,216.33,67.0,3.44,50.03 +2020-04-30 18:00:00,14.22,0.0,0.0,0.0,3.28,53.23 +2020-04-30 19:00:00,14.09,0.0,-0.0,0.0,3.11,56.43 +2020-04-30 20:00:00,13.95,0.0,-0.0,0.0,2.95,59.62 +2020-04-30 21:00:00,13.82,0.0,-0.0,0.0,2.79,62.82 +2020-04-30 22:00:00,13.69,0.0,-0.0,0.0,2.63,66.02 +2020-04-30 23:00:00,13.56,0.0,-0.0,0.0,2.47,69.22 +2020-05-01 00:00:00,13.43,0.0,-0.0,0.0,2.31,72.42 +2020-05-01 01:00:00,13.3,0.0,-0.0,0.0,2.15,75.62 +2020-05-01 02:00:00,13.17,0.0,-0.0,0.0,1.99,78.81 +2020-05-01 03:00:00,13.03,0.0,-0.0,0.0,1.83,82.01 +2020-05-01 04:00:00,12.9,22.0,27.7,20.0,1.67,85.21 +2020-05-01 05:00:00,12.77,152.0,278.57,88.0,1.51,88.41 +2020-05-01 06:00:00,12.64,318.0,497.8,126.0,1.35,91.61 +2020-05-01 07:00:00,12.51,484.0,644.08,143.0,1.19,94.81 +2020-05-01 08:00:00,20.64,624.0,715.65,158.0,1.93,61.6 +2020-05-01 09:00:00,22.35,741.0,794.55,151.0,2.07,55.8 +2020-05-01 10:00:00,23.76,802.0,802.58,162.0,2.0,50.55 +2020-05-01 11:00:00,24.82,828.0,832.5,152.0,1.93,47.4 +2020-05-01 12:00:00,25.43,770.0,738.55,190.0,1.72,42.75 +2020-05-01 13:00:00,25.53,661.0,604.86,226.0,1.38,41.25 +2020-05-01 14:00:00,25.69,598.0,749.09,135.0,1.31,39.95 +2020-05-01 15:00:00,25.53,448.0,668.81,121.0,1.17,42.75 +2020-05-01 16:00:00,25.23,285.0,549.16,98.0,0.55,50.95 +2020-05-01 17:00:00,24.46,125.0,344.33,62.0,1.24,54.35 +2020-05-01 18:00:00,22.2,0.0,0.0,0.0,1.38,66.35 +2020-05-01 19:00:00,19.13,0.0,-0.0,0.0,1.93,70.55 +2020-05-01 20:00:00,17.48,0.0,-0.0,0.0,2.0,78.1 +2020-05-01 21:00:00,16.12,0.0,-0.0,0.0,2.14,83.6 +2020-05-01 22:00:00,15.16,0.0,-0.0,0.0,1.86,89.55 +2020-05-01 23:00:00,14.51,0.0,-0.0,0.0,1.79,89.5 +2020-05-02 00:00:00,13.81,0.0,-0.0,0.0,1.59,95.95 +2020-05-02 01:00:00,13.44,0.0,-0.0,0.0,1.45,92.65 +2020-05-02 02:00:00,13.3,0.0,-0.0,0.0,1.38,95.95 +2020-05-02 03:00:00,13.47,0.0,-0.0,0.0,1.31,92.65 +2020-05-02 04:00:00,12.85,25.0,39.19,22.0,1.52,92.6 +2020-05-02 05:00:00,13.39,151.0,256.54,91.0,1.31,95.95 +2020-05-02 06:00:00,17.24,317.0,479.98,130.0,1.03,80.85 +2020-05-02 07:00:00,19.75,486.0,643.41,143.0,1.1,73.15 +2020-05-02 08:00:00,21.6,633.0,742.45,147.0,1.45,61.85 +2020-05-02 09:00:00,23.22,743.0,793.78,151.0,1.79,58.0 +2020-05-02 10:00:00,24.69,804.0,800.73,163.0,2.14,52.6 +2020-05-02 11:00:00,25.88,825.0,819.63,157.0,2.41,49.3 +2020-05-02 12:00:00,26.67,776.0,752.28,183.0,2.69,43.15 +2020-05-02 13:00:00,25.97,695.0,713.15,180.0,3.31,46.0 +2020-05-02 14:00:00,25.05,581.0,671.35,164.0,3.1,52.75 +2020-05-02 15:00:00,25.1,232.0,42.67,211.0,3.52,54.55 +2020-05-02 16:00:00,24.24,161.0,55.25,142.0,3.31,60.25 +2020-05-02 17:00:00,22.92,58.0,10.72,56.0,2.83,66.45 +2020-05-02 18:00:00,21.56,5.0,0.0,5.0,2.48,71.0 +2020-05-02 19:00:00,20.07,0.0,-0.0,0.0,2.41,68.3 +2020-05-02 20:00:00,18.77,0.0,-0.0,0.0,2.21,75.55 +2020-05-02 21:00:00,17.6,0.0,-0.0,0.0,2.21,80.85 +2020-05-02 22:00:00,16.56,0.0,-0.0,0.0,1.72,80.8 +2020-05-02 23:00:00,14.99,0.0,-0.0,0.0,1.79,86.5 +2020-05-03 00:00:00,14.56,0.0,-0.0,0.0,2.41,89.5 +2020-05-03 01:00:00,14.24,0.0,-0.0,0.0,2.41,92.7 +2020-05-03 02:00:00,13.85,0.0,-0.0,0.0,2.34,95.95 +2020-05-03 03:00:00,13.61,0.0,-0.0,0.0,2.48,95.95 +2020-05-03 04:00:00,13.42,5.0,0.0,5.0,2.48,92.65 +2020-05-03 05:00:00,14.0,40.0,0.0,40.0,2.97,92.7 +2020-05-03 06:00:00,14.94,128.0,5.08,126.0,3.45,89.55 +2020-05-03 07:00:00,15.77,137.0,0.0,137.0,3.24,89.6 +2020-05-03 08:00:00,16.7,291.0,24.32,275.0,2.83,89.65 +2020-05-03 09:00:00,16.67,477.0,148.2,366.0,3.52,89.65 +2020-05-03 10:00:00,17.14,226.0,0.0,226.0,3.86,86.65 +2020-05-03 11:00:00,17.27,427.0,55.02,382.0,3.72,86.65 +2020-05-03 12:00:00,17.01,126.0,0.0,126.0,3.86,83.7 +2020-05-03 13:00:00,17.2,319.0,22.07,303.0,3.93,80.85 +2020-05-03 14:00:00,17.03,288.0,33.65,267.0,3.59,78.1 +2020-05-03 15:00:00,17.28,315.0,177.68,227.0,3.17,78.1 +2020-05-03 16:00:00,17.44,53.0,0.0,53.0,3.17,75.4 +2020-05-03 17:00:00,17.31,12.0,0.0,12.0,2.41,75.4 +2020-05-03 18:00:00,16.68,1.0,0.0,1.0,2.07,75.3 +2020-05-03 19:00:00,15.29,0.0,-0.0,0.0,1.72,86.5 +2020-05-03 20:00:00,14.58,0.0,-0.0,0.0,1.72,86.45 +2020-05-03 21:00:00,14.0,0.0,-0.0,0.0,1.86,89.5 +2020-05-03 22:00:00,13.48,0.0,-0.0,0.0,2.28,89.45 +2020-05-03 23:00:00,13.32,0.0,-0.0,0.0,2.48,86.3 +2020-05-04 00:00:00,12.97,0.0,-0.0,0.0,2.55,83.25 +2020-05-04 01:00:00,12.66,0.0,-0.0,0.0,2.62,83.25 +2020-05-04 02:00:00,12.16,0.0,-0.0,0.0,2.62,83.15 +2020-05-04 03:00:00,11.59,0.0,-0.0,0.0,2.41,83.1 +2020-05-04 04:00:00,11.05,45.0,200.01,28.0,2.21,83.05 +2020-05-04 05:00:00,11.35,115.0,70.28,98.0,1.93,86.1 +2020-05-04 06:00:00,13.31,349.0,599.29,111.0,2.14,77.5 +2020-05-04 07:00:00,14.95,510.0,701.64,131.0,2.07,72.45 +2020-05-04 08:00:00,16.38,650.0,760.72,147.0,2.28,60.75 +2020-05-04 09:00:00,17.36,593.0,332.42,343.0,2.69,50.85 +2020-05-04 10:00:00,18.07,752.0,602.62,266.0,2.9,47.4 +2020-05-04 11:00:00,18.68,781.0,644.51,252.0,3.66,44.2 +2020-05-04 12:00:00,19.12,658.0,394.21,345.0,3.59,41.15 +2020-05-04 13:00:00,19.35,567.0,320.08,334.0,3.38,41.15 +2020-05-04 14:00:00,19.35,513.0,405.03,259.0,2.97,41.15 +2020-05-04 15:00:00,19.37,452.0,626.01,140.0,2.55,42.7 +2020-05-04 16:00:00,19.19,293.0,530.59,107.0,1.93,44.3 +2020-05-04 17:00:00,18.37,140.0,407.93,61.0,0.83,63.3 +2020-05-04 18:00:00,18.32,9.0,0.0,9.0,0.28,49.15 +2020-05-04 19:00:00,14.22,0.0,-0.0,0.0,1.86,72.3 +2020-05-04 20:00:00,12.34,0.0,-0.0,0.0,2.21,74.6 +2020-05-04 21:00:00,11.26,0.0,-0.0,0.0,2.28,77.2 +2020-05-04 22:00:00,10.4,0.0,-0.0,0.0,2.21,77.15 +2020-05-04 23:00:00,9.67,0.0,-0.0,0.0,2.14,82.9 +2020-05-05 00:00:00,9.12,0.0,-0.0,0.0,2.21,82.85 +2020-05-05 01:00:00,8.65,0.0,-0.0,0.0,2.28,82.75 +2020-05-05 02:00:00,8.37,0.0,-0.0,0.0,2.28,82.75 +2020-05-05 03:00:00,8.08,0.0,-0.0,0.0,2.21,82.7 +2020-05-05 04:00:00,7.68,38.0,89.8,30.0,2.07,85.8 +2020-05-05 05:00:00,9.05,178.0,374.36,86.0,1.59,85.95 +2020-05-05 06:00:00,13.38,335.0,516.51,128.0,0.97,74.75 +2020-05-05 07:00:00,16.09,499.0,651.26,145.0,0.76,65.25 +2020-05-05 08:00:00,17.99,628.0,680.31,176.0,0.55,58.9 +2020-05-05 09:00:00,19.64,760.0,810.53,148.0,0.83,53.25 +2020-05-05 10:00:00,20.64,422.0,53.13,379.0,0.83,48.05 +2020-05-05 11:00:00,20.69,276.0,1.21,275.0,0.97,49.8 +2020-05-05 12:00:00,18.63,390.0,37.65,360.0,2.9,65.65 +2020-05-05 13:00:00,16.7,65.0,0.0,65.0,2.34,78.0 +2020-05-05 14:00:00,16.68,59.0,0.0,59.0,1.31,75.3 +2020-05-05 15:00:00,16.13,76.0,0.0,76.0,1.52,77.95 +2020-05-05 16:00:00,15.29,34.0,0.0,34.0,0.9,83.5 +2020-05-05 17:00:00,14.71,26.0,0.0,26.0,0.34,86.45 +2020-05-05 18:00:00,13.95,10.0,23.73,9.0,0.34,89.5 +2020-05-05 19:00:00,12.78,0.0,-0.0,0.0,1.24,95.95 +2020-05-05 20:00:00,12.24,0.0,-0.0,0.0,1.31,99.4 +2020-05-05 21:00:00,11.65,0.0,-0.0,0.0,1.1,99.4 +2020-05-05 22:00:00,11.27,0.0,-0.0,0.0,1.24,100.0 +2020-05-05 23:00:00,10.91,0.0,-0.0,0.0,1.79,99.35 +2020-05-06 00:00:00,10.05,0.0,-0.0,0.0,2.07,99.4 +2020-05-06 01:00:00,8.23,0.0,-0.0,0.0,2.14,99.4 +2020-05-06 02:00:00,6.8,0.0,-0.0,0.0,2.34,95.8 +2020-05-06 03:00:00,6.15,0.0,-0.0,0.0,2.0,92.3 +2020-05-06 04:00:00,5.75,18.0,0.0,18.0,1.86,95.75 +2020-05-06 05:00:00,5.61,30.0,0.0,30.0,1.59,92.3 +2020-05-06 06:00:00,5.7,59.0,0.0,59.0,1.31,92.3 +2020-05-06 07:00:00,5.89,125.0,0.0,125.0,1.66,88.95 +2020-05-06 08:00:00,6.38,223.0,1.5,222.0,1.38,85.75 +2020-05-06 09:00:00,6.98,251.0,1.32,250.0,1.45,89.0 +2020-05-06 10:00:00,7.85,209.0,0.0,209.0,1.38,85.8 +2020-05-06 11:00:00,8.52,324.0,7.26,318.0,1.24,79.75 +2020-05-06 12:00:00,8.86,293.0,5.0,289.0,0.83,82.75 +2020-05-06 13:00:00,9.36,129.0,0.0,129.0,1.17,85.95 +2020-05-06 14:00:00,9.46,55.0,0.0,55.0,1.52,82.9 +2020-05-06 15:00:00,9.7,78.0,0.0,78.0,1.93,82.9 +2020-05-06 16:00:00,9.55,60.0,0.0,60.0,2.34,82.9 +2020-05-06 17:00:00,9.37,53.0,0.0,53.0,2.28,85.95 +2020-05-06 18:00:00,9.06,5.0,0.0,5.0,1.86,85.95 +2020-05-06 19:00:00,7.3,0.0,-0.0,0.0,1.66,95.8 +2020-05-06 20:00:00,7.17,0.0,-0.0,0.0,1.59,95.8 +2020-05-06 21:00:00,7.05,0.0,-0.0,0.0,1.59,95.8 +2020-05-06 22:00:00,6.87,0.0,-0.0,0.0,1.59,92.35 +2020-05-06 23:00:00,6.73,0.0,-0.0,0.0,1.31,92.35 +2020-05-07 00:00:00,6.58,0.0,-0.0,0.0,1.1,92.35 +2020-05-07 01:00:00,6.65,0.0,-0.0,0.0,0.97,92.35 +2020-05-07 02:00:00,6.63,0.0,-0.0,0.0,0.9,92.35 +2020-05-07 03:00:00,6.15,0.0,-0.0,0.0,1.03,95.8 +2020-05-07 04:00:00,6.12,10.0,0.0,10.0,0.97,95.8 +2020-05-07 05:00:00,6.92,128.0,90.83,105.0,1.79,92.35 +2020-05-07 06:00:00,8.05,114.0,0.0,114.0,1.66,89.05 +2020-05-07 07:00:00,8.71,107.0,0.0,107.0,0.9,89.1 +2020-05-07 08:00:00,9.56,226.0,1.49,225.0,0.9,82.9 +2020-05-07 09:00:00,10.83,376.0,39.43,346.0,0.9,80.05 +2020-05-07 10:00:00,11.98,275.0,1.23,274.0,1.1,71.95 +2020-05-07 11:00:00,12.9,434.0,51.87,391.0,1.52,66.95 +2020-05-07 12:00:00,13.55,438.0,66.07,385.0,1.72,64.75 +2020-05-07 13:00:00,14.2,574.0,321.9,337.0,1.72,60.3 +2020-05-07 14:00:00,14.29,423.0,182.46,307.0,1.86,60.3 +2020-05-07 15:00:00,14.21,323.0,165.52,239.0,1.79,60.3 +2020-05-07 16:00:00,13.94,285.0,435.82,128.0,1.93,60.3 +2020-05-07 17:00:00,13.33,106.0,117.64,82.0,2.14,64.65 +2020-05-07 18:00:00,12.57,18.0,101.07,13.0,2.14,66.95 +2020-05-07 19:00:00,11.41,0.0,-0.0,0.0,2.0,71.85 +2020-05-07 20:00:00,10.57,0.0,-0.0,0.0,1.86,77.15 +2020-05-07 21:00:00,9.63,0.0,-0.0,0.0,1.86,82.9 +2020-05-07 22:00:00,9.04,0.0,-0.0,0.0,1.93,85.95 +2020-05-07 23:00:00,8.97,0.0,-0.0,0.0,2.0,82.85 +2020-05-08 00:00:00,8.96,0.0,-0.0,0.0,2.0,82.85 +2020-05-08 01:00:00,8.83,0.0,-0.0,0.0,2.07,89.1 +2020-05-08 02:00:00,8.71,0.0,-0.0,0.0,2.07,89.1 +2020-05-08 03:00:00,8.44,0.0,-0.0,0.0,2.14,89.1 +2020-05-08 04:00:00,8.08,8.0,0.0,8.0,2.21,89.05 +2020-05-08 05:00:00,8.81,52.0,0.0,52.0,2.28,89.1 +2020-05-08 06:00:00,10.43,255.0,165.4,187.0,2.55,80.05 +2020-05-08 07:00:00,12.8,465.0,468.15,206.0,2.48,83.25 +2020-05-08 08:00:00,14.58,408.0,118.79,328.0,2.76,69.85 +2020-05-08 09:00:00,15.76,533.0,206.91,375.0,2.62,70.0 +2020-05-08 10:00:00,16.52,370.0,22.02,352.0,2.55,60.85 +2020-05-08 11:00:00,17.18,554.0,163.54,418.0,2.34,54.7 +2020-05-08 12:00:00,18.13,270.0,1.24,269.0,2.48,51.0 +2020-05-08 13:00:00,18.93,232.0,0.0,232.0,2.34,47.55 +2020-05-08 14:00:00,19.47,314.0,43.85,286.0,2.07,44.3 +2020-05-08 15:00:00,19.24,426.0,470.18,186.0,1.72,45.95 +2020-05-08 16:00:00,19.0,309.0,558.63,106.0,1.79,49.45 +2020-05-08 17:00:00,18.29,159.0,467.72,62.0,1.66,58.9 +2020-05-08 18:00:00,16.88,19.0,75.37,15.0,2.0,60.85 +2020-05-08 19:00:00,15.31,0.0,-0.0,0.0,2.76,60.5 +2020-05-08 20:00:00,14.52,0.0,-0.0,0.0,2.9,58.25 +2020-05-08 21:00:00,13.93,0.0,-0.0,0.0,3.1,60.3 +2020-05-08 22:00:00,13.47,0.0,-0.0,0.0,3.17,62.45 +2020-05-08 23:00:00,13.02,0.0,-0.0,0.0,3.24,64.65 +2020-05-09 00:00:00,12.67,0.0,-0.0,0.0,3.38,66.95 +2020-05-09 01:00:00,12.44,0.0,-0.0,0.0,3.52,69.45 +2020-05-09 02:00:00,12.16,0.0,-0.0,0.0,3.59,69.35 +2020-05-09 03:00:00,11.88,0.0,-0.0,0.0,3.59,71.85 +2020-05-09 04:00:00,11.64,9.0,0.0,9.0,3.45,71.85 +2020-05-09 05:00:00,12.29,79.0,3.84,78.0,3.17,71.95 +2020-05-09 06:00:00,13.91,250.0,152.03,187.0,2.83,69.65 +2020-05-09 07:00:00,15.79,363.0,183.36,261.0,2.83,70.0 +2020-05-09 08:00:00,17.19,217.0,1.48,216.0,2.62,65.45 +2020-05-09 09:00:00,18.61,244.0,1.3,243.0,2.28,63.4 +2020-05-09 10:00:00,20.14,384.0,29.26,360.0,2.48,59.35 +2020-05-09 11:00:00,21.14,396.0,31.17,370.0,3.24,55.55 +2020-05-09 12:00:00,21.53,562.0,206.86,395.0,3.59,53.75 +2020-05-09 13:00:00,21.76,462.0,133.51,363.0,3.52,53.75 +2020-05-09 14:00:00,21.6,277.0,21.83,263.0,3.03,51.9 +2020-05-09 15:00:00,21.72,42.0,0.0,42.0,3.17,51.9 +2020-05-09 16:00:00,21.3,222.0,163.71,162.0,2.21,57.55 +2020-05-09 17:00:00,20.51,111.0,128.13,84.0,1.45,63.8 +2020-05-09 18:00:00,19.22,18.0,52.97,15.0,1.66,70.55 +2020-05-09 19:00:00,16.82,0.0,-0.0,0.0,2.0,83.65 +2020-05-09 20:00:00,15.53,0.0,-0.0,0.0,2.07,86.5 +2020-05-09 21:00:00,14.81,0.0,-0.0,0.0,2.07,89.5 +2020-05-09 22:00:00,14.12,0.0,-0.0,0.0,2.07,89.5 +2020-05-09 23:00:00,13.5,0.0,-0.0,0.0,2.07,89.45 +2020-05-10 00:00:00,13.51,0.0,-0.0,0.0,2.14,89.45 +2020-05-10 01:00:00,13.27,0.0,-0.0,0.0,2.21,92.65 +2020-05-10 02:00:00,12.52,0.0,-0.0,0.0,2.07,92.6 +2020-05-10 03:00:00,11.79,0.0,-0.0,0.0,2.07,95.9 +2020-05-10 04:00:00,11.41,63.0,249.64,36.0,2.0,95.9 +2020-05-10 05:00:00,12.85,204.0,466.42,81.0,1.66,95.95 +2020-05-10 06:00:00,16.55,295.0,287.38,175.0,1.52,83.65 +2020-05-10 07:00:00,19.12,514.0,668.75,140.0,1.86,78.3 +2020-05-10 08:00:00,20.77,652.0,739.22,150.0,2.21,68.4 +2020-05-10 09:00:00,22.14,752.0,760.78,167.0,2.55,57.75 +2020-05-10 10:00:00,22.86,759.0,610.21,257.0,2.55,55.9 +2020-05-10 11:00:00,23.88,726.0,487.69,318.0,2.97,50.55 +2020-05-10 12:00:00,24.8,564.0,211.16,393.0,3.79,44.15 +2020-05-10 13:00:00,24.96,449.0,118.27,361.0,3.72,44.15 +2020-05-10 14:00:00,24.71,477.0,293.5,288.0,3.1,45.75 +2020-05-10 15:00:00,24.59,304.0,125.92,239.0,2.48,49.05 +2020-05-10 16:00:00,24.21,108.0,2.71,107.0,2.0,54.35 +2020-05-10 17:00:00,23.26,107.0,102.8,85.0,1.72,62.15 +2020-05-10 18:00:00,21.92,21.0,66.5,17.0,2.14,64.0 +2020-05-10 19:00:00,20.12,0.0,-0.0,0.0,2.34,68.3 +2020-05-10 20:00:00,18.97,0.0,-0.0,0.0,2.48,72.95 +2020-05-10 21:00:00,18.2,0.0,-0.0,0.0,2.62,75.45 +2020-05-10 22:00:00,17.75,0.0,-0.0,0.0,2.76,78.1 +2020-05-10 23:00:00,17.62,0.0,-0.0,0.0,2.9,78.1 +2020-05-11 00:00:00,17.77,0.0,-0.0,0.0,3.03,78.1 +2020-05-11 01:00:00,17.89,0.0,-0.0,0.0,3.17,75.4 +2020-05-11 02:00:00,18.0,0.0,-0.0,0.0,3.1,72.9 +2020-05-11 03:00:00,17.84,0.0,-0.0,0.0,3.1,75.4 +2020-05-11 04:00:00,17.77,9.0,0.0,9.0,3.03,75.4 +2020-05-11 05:00:00,18.39,145.0,127.33,111.0,2.9,75.45 +2020-05-11 06:00:00,20.21,128.0,2.38,127.0,2.48,68.3 +2020-05-11 07:00:00,22.18,197.0,5.34,194.0,2.76,66.35 +2020-05-11 08:00:00,24.05,439.0,167.21,325.0,3.1,60.25 +2020-05-11 09:00:00,25.6,544.0,230.72,366.0,3.1,54.7 +2020-05-11 10:00:00,26.8,648.0,345.41,363.0,3.17,53.1 +2020-05-11 11:00:00,27.54,821.0,761.61,182.0,3.17,48.0 +2020-05-11 12:00:00,28.08,803.0,794.11,158.0,3.1,43.4 +2020-05-11 13:00:00,28.29,738.0,800.96,140.0,2.97,43.4 +2020-05-11 14:00:00,28.55,313.0,44.85,284.0,3.17,41.95 +2020-05-11 15:00:00,28.46,178.0,5.78,175.0,3.38,43.4 +2020-05-11 16:00:00,26.7,28.0,0.0,28.0,2.28,53.1 +2020-05-11 17:00:00,25.8,14.0,0.0,14.0,2.55,56.6 +2020-05-11 18:00:00,24.36,2.0,0.0,2.0,2.48,60.25 +2020-05-11 19:00:00,21.48,0.0,-0.0,0.0,2.21,78.55 +2020-05-11 20:00:00,19.82,0.0,-0.0,0.0,0.76,89.85 +2020-05-11 21:00:00,19.06,0.0,-0.0,0.0,2.34,86.8 +2020-05-11 22:00:00,18.11,0.0,-0.0,0.0,4.0,96.05 +2020-05-11 23:00:00,16.43,0.0,-0.0,0.0,4.14,96.05 +2020-05-12 00:00:00,15.19,0.0,-0.0,0.0,3.66,92.75 +2020-05-12 01:00:00,14.13,0.0,-0.0,0.0,3.38,96.0 +2020-05-12 02:00:00,13.28,0.0,-0.0,0.0,3.79,92.65 +2020-05-12 03:00:00,12.51,0.0,-0.0,0.0,3.59,92.6 +2020-05-12 04:00:00,11.8,6.0,0.0,6.0,3.72,92.55 +2020-05-12 05:00:00,11.36,29.0,0.0,29.0,3.59,89.3 +2020-05-12 06:00:00,10.94,63.0,0.0,63.0,4.14,80.1 +2020-05-12 07:00:00,10.79,100.0,0.0,100.0,3.79,86.05 +2020-05-12 08:00:00,10.42,156.0,0.0,156.0,4.21,77.15 +2020-05-12 09:00:00,10.21,360.0,28.43,338.0,4.41,71.6 +2020-05-12 10:00:00,10.72,167.0,0.0,167.0,4.48,66.55 +2020-05-12 11:00:00,11.72,673.0,349.43,379.0,4.97,57.55 +2020-05-12 12:00:00,12.65,506.0,125.21,404.0,5.24,51.65 +2020-05-12 13:00:00,13.26,512.0,196.24,365.0,5.31,48.0 +2020-05-12 14:00:00,13.58,465.0,249.54,303.0,5.31,42.95 +2020-05-12 15:00:00,13.33,418.0,412.04,203.0,5.24,41.2 +2020-05-12 16:00:00,12.73,213.0,122.48,167.0,5.17,42.7 +2020-05-12 17:00:00,11.88,105.0,77.12,88.0,4.41,47.6 +2020-05-12 18:00:00,11.12,26.0,89.5,20.0,3.72,49.3 +2020-05-12 19:00:00,10.02,0.0,-0.0,0.0,3.72,55.05 +2020-05-12 20:00:00,9.52,0.0,-0.0,0.0,3.59,57.05 +2020-05-12 21:00:00,8.69,0.0,-0.0,0.0,3.38,61.3 +2020-05-12 22:00:00,8.15,0.0,-0.0,0.0,3.24,66.05 +2020-05-12 23:00:00,7.52,0.0,-0.0,0.0,3.24,68.5 +2020-05-13 00:00:00,6.91,0.0,-0.0,0.0,3.24,68.4 +2020-05-13 01:00:00,6.51,0.0,-0.0,0.0,3.17,68.4 +2020-05-13 02:00:00,5.99,0.0,-0.0,0.0,3.1,70.95 +2020-05-13 03:00:00,5.56,0.0,-0.0,0.0,3.03,73.65 +2020-05-13 04:00:00,5.27,64.0,168.83,44.0,2.97,76.4 +2020-05-13 05:00:00,5.39,110.0,25.61,103.0,3.45,73.65 +2020-05-13 06:00:00,6.04,140.0,2.34,139.0,4.28,68.3 +2020-05-13 07:00:00,6.64,170.0,0.0,170.0,3.93,71.05 +2020-05-13 08:00:00,7.36,300.0,18.93,287.0,3.72,68.5 +2020-05-13 09:00:00,7.88,264.0,1.29,263.0,3.72,66.05 +2020-05-13 10:00:00,8.99,224.0,0.0,224.0,3.86,61.45 +2020-05-13 11:00:00,9.66,224.0,0.0,224.0,3.86,59.25 +2020-05-13 12:00:00,10.12,211.0,0.0,211.0,3.79,55.05 +2020-05-13 13:00:00,10.53,229.0,0.0,229.0,3.66,55.15 +2020-05-13 14:00:00,10.88,305.0,30.69,285.0,3.38,53.1 +2020-05-13 15:00:00,11.03,256.0,45.76,232.0,3.03,51.25 +2020-05-13 16:00:00,10.69,151.0,18.49,144.0,2.62,53.1 +2020-05-13 17:00:00,10.52,79.0,13.42,76.0,2.41,53.1 +2020-05-13 18:00:00,10.04,18.0,14.2,17.0,1.86,55.05 +2020-05-13 19:00:00,8.65,0.0,-0.0,0.0,1.31,66.15 +2020-05-13 20:00:00,7.49,0.0,-0.0,0.0,0.97,76.7 +2020-05-13 21:00:00,7.67,0.0,-0.0,0.0,0.76,68.5 +2020-05-13 22:00:00,7.53,0.0,-0.0,0.0,0.55,68.5 +2020-05-13 23:00:00,7.14,0.0,-0.0,0.0,0.69,71.05 +2020-05-14 00:00:00,6.77,0.0,-0.0,0.0,0.76,71.05 +2020-05-14 01:00:00,6.51,0.0,-0.0,0.0,0.83,71.05 +2020-05-14 02:00:00,6.18,0.0,-0.0,0.0,0.83,73.7 +2020-05-14 03:00:00,5.63,0.0,-0.0,0.0,0.9,76.5 +2020-05-14 04:00:00,4.31,80.0,345.11,38.0,1.17,82.3 +2020-05-14 05:00:00,5.53,239.0,597.02,74.0,0.76,82.5 +2020-05-14 06:00:00,8.38,407.0,715.03,100.0,0.9,71.35 +2020-05-14 07:00:00,9.45,570.0,792.69,118.0,0.83,68.9 +2020-05-14 08:00:00,11.04,710.0,839.92,131.0,1.31,59.6 +2020-05-14 09:00:00,12.11,763.0,699.92,218.0,1.52,55.5 +2020-05-14 10:00:00,12.63,442.0,52.88,398.0,1.52,51.65 +2020-05-14 11:00:00,13.2,617.0,226.98,425.0,1.66,49.85 +2020-05-14 12:00:00,13.29,487.0,93.99,410.0,1.72,49.85 +2020-05-14 13:00:00,13.81,377.0,41.12,346.0,1.52,48.15 +2020-05-14 14:00:00,13.93,289.0,21.4,275.0,1.59,46.5 +2020-05-14 15:00:00,14.47,373.0,240.9,246.0,1.79,44.9 +2020-05-14 16:00:00,14.22,304.0,422.15,143.0,1.72,46.5 +2020-05-14 17:00:00,13.79,120.0,110.27,95.0,1.24,53.85 +2020-05-14 18:00:00,12.88,38.0,217.01,22.0,1.24,62.2 +2020-05-14 19:00:00,10.71,0.0,-0.0,0.0,1.93,66.55 +2020-05-14 20:00:00,8.78,0.0,-0.0,0.0,1.93,76.85 +2020-05-14 21:00:00,7.52,0.0,-0.0,0.0,1.93,79.65 +2020-05-14 22:00:00,6.88,0.0,-0.0,0.0,1.93,79.55 +2020-05-14 23:00:00,6.46,0.0,-0.0,0.0,1.93,79.55 +2020-05-15 00:00:00,5.74,0.0,-0.0,0.0,2.0,82.5 +2020-05-15 01:00:00,5.13,0.0,-0.0,0.0,2.0,85.6 +2020-05-15 02:00:00,5.04,0.0,-0.0,0.0,2.0,85.6 +2020-05-15 03:00:00,4.81,0.0,-0.0,0.0,2.07,88.85 +2020-05-15 04:00:00,4.76,15.0,0.0,15.0,2.0,88.85 +2020-05-15 05:00:00,6.94,38.0,0.0,38.0,1.66,85.75 +2020-05-15 06:00:00,10.78,91.0,0.0,91.0,1.59,71.7 +2020-05-15 07:00:00,12.67,311.0,75.08,268.0,1.86,64.55 +2020-05-15 08:00:00,13.97,240.0,2.89,238.0,2.97,48.25 +2020-05-15 09:00:00,14.71,172.0,0.0,172.0,3.17,44.9 +2020-05-15 10:00:00,15.05,481.0,82.71,412.0,3.03,43.4 +2020-05-15 11:00:00,15.2,449.0,53.06,404.0,3.17,43.4 +2020-05-15 12:00:00,15.24,601.0,236.18,407.0,2.97,45.05 +2020-05-15 13:00:00,15.24,703.0,596.45,252.0,3.1,45.05 +2020-05-15 14:00:00,15.63,632.0,698.96,173.0,3.1,45.15 +2020-05-15 15:00:00,15.89,418.0,368.04,223.0,2.97,43.5 +2020-05-15 16:00:00,16.15,107.0,0.0,107.0,2.9,40.5 +2020-05-15 17:00:00,14.65,80.0,13.06,77.0,2.62,50.25 +2020-05-15 18:00:00,12.92,25.0,38.96,22.0,2.21,62.3 +2020-05-15 19:00:00,10.87,0.0,-0.0,0.0,2.07,80.05 +2020-05-15 20:00:00,10.08,0.0,-0.0,0.0,2.28,79.95 +2020-05-15 21:00:00,9.36,0.0,-0.0,0.0,2.41,85.95 +2020-05-15 22:00:00,9.15,0.0,-0.0,0.0,2.62,85.95 +2020-05-15 23:00:00,8.91,0.0,-0.0,0.0,2.69,82.85 +2020-05-16 00:00:00,8.48,0.0,-0.0,0.0,2.76,85.9 +2020-05-16 01:00:00,7.89,0.0,-0.0,0.0,3.1,85.85 +2020-05-16 02:00:00,7.38,0.0,-0.0,0.0,3.31,85.8 +2020-05-16 03:00:00,6.87,0.0,-0.0,0.0,3.17,85.75 +2020-05-16 04:00:00,6.71,7.0,0.0,7.0,3.24,85.75 +2020-05-16 05:00:00,6.96,80.0,0.0,80.0,3.66,89.0 +2020-05-16 06:00:00,7.36,74.0,0.0,74.0,3.72,89.0 +2020-05-16 07:00:00,7.7,202.0,3.48,200.0,4.48,82.65 +2020-05-16 08:00:00,8.32,191.0,0.0,191.0,4.76,76.8 +2020-05-16 09:00:00,9.08,208.0,0.0,208.0,5.31,66.25 +2020-05-16 10:00:00,9.95,173.0,0.0,173.0,5.45,61.65 +2020-05-16 11:00:00,10.14,229.0,0.0,229.0,5.17,59.35 +2020-05-16 12:00:00,10.91,206.0,0.0,206.0,5.17,55.3 +2020-05-16 13:00:00,11.61,370.0,35.6,343.0,5.45,53.35 +2020-05-16 14:00:00,11.85,480.0,253.38,313.0,5.1,55.4 +2020-05-16 15:00:00,12.06,431.0,403.82,216.0,5.24,51.5 +2020-05-16 16:00:00,11.79,355.0,671.82,95.0,5.31,51.35 +2020-05-16 17:00:00,10.82,155.0,270.65,92.0,4.97,59.5 +2020-05-16 18:00:00,9.97,14.0,0.0,14.0,3.86,61.65 +2020-05-16 19:00:00,8.79,0.0,-0.0,0.0,4.07,63.7 +2020-05-16 20:00:00,8.04,0.0,-0.0,0.0,4.07,68.6 +2020-05-16 21:00:00,7.46,0.0,-0.0,0.0,3.86,71.15 +2020-05-16 22:00:00,7.15,0.0,-0.0,0.0,3.72,76.65 +2020-05-16 23:00:00,6.69,0.0,-0.0,0.0,3.79,76.65 +2020-05-17 00:00:00,6.01,0.0,-0.0,0.0,3.66,76.55 +2020-05-17 01:00:00,5.6,0.0,-0.0,0.0,3.52,79.4 +2020-05-17 02:00:00,5.01,0.0,-0.0,0.0,3.31,79.35 +2020-05-17 03:00:00,4.64,0.0,-0.0,0.0,3.24,82.35 +2020-05-17 04:00:00,4.49,79.0,244.6,47.0,3.31,82.35 +2020-05-17 05:00:00,4.99,236.0,519.57,88.0,3.24,79.35 +2020-05-17 06:00:00,5.66,333.0,331.65,188.0,4.14,73.65 +2020-05-17 07:00:00,6.35,464.0,372.29,249.0,4.69,70.95 +2020-05-17 08:00:00,7.28,572.0,379.21,308.0,4.97,61.0 +2020-05-17 09:00:00,8.35,641.0,357.89,360.0,4.69,54.55 +2020-05-17 10:00:00,8.68,622.0,242.14,419.0,4.41,52.6 +2020-05-17 11:00:00,9.6,573.0,159.59,437.0,4.55,47.05 +2020-05-17 12:00:00,10.33,620.0,255.56,409.0,4.55,47.2 +2020-05-17 13:00:00,10.57,563.0,248.49,374.0,4.14,45.55 +2020-05-17 14:00:00,10.92,502.0,288.76,311.0,3.93,43.95 +2020-05-17 15:00:00,11.01,423.0,360.78,230.0,3.45,45.7 +2020-05-17 16:00:00,10.97,223.0,115.46,178.0,2.9,45.7 +2020-05-17 17:00:00,10.85,118.0,84.84,98.0,2.28,49.2 +2020-05-17 18:00:00,10.15,41.0,155.92,28.0,1.38,57.15 +2020-05-17 19:00:00,8.44,0.0,-0.0,0.0,1.17,68.7 +2020-05-17 20:00:00,9.33,0.0,-0.0,0.0,0.21,54.8 +2020-05-17 21:00:00,7.9,0.0,-0.0,0.0,1.1,56.7 +2020-05-17 22:00:00,4.35,0.0,-0.0,0.0,2.0,73.45 +2020-05-17 23:00:00,3.21,0.0,-0.0,0.0,2.14,79.1 +2020-05-18 00:00:00,2.73,0.0,-0.0,0.0,2.21,82.15 +2020-05-18 01:00:00,2.54,0.0,-0.0,0.0,2.34,79.0 +2020-05-18 02:00:00,2.65,0.0,-0.0,0.0,2.55,79.0 +2020-05-18 03:00:00,2.77,0.0,-0.0,0.0,2.62,79.0 +2020-05-18 04:00:00,3.37,75.0,187.03,50.0,2.69,73.3 +2020-05-18 05:00:00,5.74,213.0,358.28,110.0,2.9,68.2 +2020-05-18 06:00:00,8.01,355.0,420.8,170.0,3.38,63.6 +2020-05-18 07:00:00,9.01,447.0,324.28,259.0,3.79,61.45 +2020-05-18 08:00:00,10.35,606.0,472.58,276.0,4.21,55.05 +2020-05-18 09:00:00,11.41,539.0,182.94,395.0,4.34,49.45 +2020-05-18 10:00:00,12.53,578.0,180.88,426.0,4.28,46.1 +2020-05-18 11:00:00,13.29,687.0,336.01,400.0,4.34,44.5 +2020-05-18 12:00:00,14.04,572.0,184.85,419.0,4.55,41.5 +2020-05-18 13:00:00,14.55,430.0,76.04,372.0,4.48,41.65 +2020-05-18 14:00:00,14.58,441.0,171.75,327.0,4.07,41.65 +2020-05-18 15:00:00,14.27,353.0,180.48,256.0,3.93,44.75 +2020-05-18 16:00:00,14.27,369.0,713.47,89.0,3.45,46.5 +2020-05-18 17:00:00,13.87,147.0,201.14,99.0,2.62,50.0 +2020-05-18 18:00:00,12.74,41.0,150.32,28.0,2.41,51.65 +2020-05-18 19:00:00,11.35,0.0,-0.0,0.0,2.48,57.4 +2020-05-18 20:00:00,10.28,0.0,-0.0,0.0,2.62,59.35 +2020-05-18 21:00:00,9.7,0.0,-0.0,0.0,2.76,59.25 +2020-05-18 22:00:00,9.33,0.0,-0.0,0.0,2.9,59.15 +2020-05-18 23:00:00,9.06,0.0,-0.0,0.0,3.03,59.15 +2020-05-19 00:00:00,8.79,0.0,-0.0,0.0,3.03,63.7 +2020-05-19 01:00:00,8.81,0.0,-0.0,0.0,2.97,66.15 +2020-05-19 02:00:00,8.51,0.0,-0.0,0.0,2.9,66.15 +2020-05-19 03:00:00,8.25,0.0,-0.0,0.0,2.9,68.6 +2020-05-19 04:00:00,8.07,78.0,212.59,49.0,2.76,71.25 +2020-05-19 05:00:00,9.51,219.0,399.97,103.0,2.55,68.9 +2020-05-19 06:00:00,11.84,382.0,563.38,133.0,2.48,66.75 +2020-05-19 07:00:00,14.47,555.0,725.19,133.0,2.62,58.25 +2020-05-19 08:00:00,15.96,692.0,779.65,146.0,2.69,54.45 +2020-05-19 09:00:00,17.79,750.0,665.31,225.0,2.69,52.75 +2020-05-19 10:00:00,19.21,869.0,856.07,148.0,2.76,47.65 +2020-05-19 11:00:00,20.22,895.0,884.29,138.0,2.83,44.6 +2020-05-19 12:00:00,20.96,849.0,840.1,152.0,2.76,43.1 +2020-05-19 13:00:00,21.5,767.0,804.09,152.0,2.62,40.2 +2020-05-19 14:00:00,21.78,654.0,774.77,138.0,2.48,38.9 +2020-05-19 15:00:00,21.77,517.0,740.89,117.0,2.48,38.9 +2020-05-19 16:00:00,21.43,340.0,574.54,113.0,2.48,41.7 +2020-05-19 17:00:00,20.44,190.0,488.64,72.0,2.07,49.7 +2020-05-19 18:00:00,18.59,41.0,122.86,30.0,2.34,54.95 +2020-05-19 19:00:00,17.19,0.0,-0.0,0.0,2.76,54.7 +2020-05-19 20:00:00,15.93,0.0,-0.0,0.0,2.97,56.35 +2020-05-19 21:00:00,15.08,0.0,-0.0,0.0,3.03,54.25 +2020-05-19 22:00:00,14.36,0.0,-0.0,0.0,3.03,56.0 +2020-05-19 23:00:00,13.71,0.0,-0.0,0.0,3.03,58.0 +2020-05-20 00:00:00,13.12,0.0,-0.0,0.0,3.03,60.05 +2020-05-20 01:00:00,12.43,0.0,-0.0,0.0,3.03,62.2 +2020-05-20 02:00:00,11.92,0.0,-0.0,0.0,3.03,62.1 +2020-05-20 03:00:00,11.54,0.0,-0.0,0.0,3.1,64.35 +2020-05-20 04:00:00,11.38,89.0,309.24,46.0,3.03,69.15 +2020-05-20 05:00:00,12.54,240.0,526.58,86.0,2.97,66.95 +2020-05-20 06:00:00,14.67,404.0,655.08,113.0,2.97,64.95 +2020-05-20 07:00:00,17.0,568.0,756.86,126.0,3.59,56.75 +2020-05-20 08:00:00,18.52,707.0,811.66,137.0,3.86,54.95 +2020-05-20 09:00:00,20.02,818.0,858.45,139.0,4.0,51.5 +2020-05-20 10:00:00,21.43,882.0,877.91,141.0,4.0,48.2 +2020-05-20 11:00:00,22.64,908.0,905.69,131.0,4.0,43.65 +2020-05-20 12:00:00,23.62,880.0,907.91,125.0,3.93,42.35 +2020-05-20 13:00:00,24.28,796.0,871.06,128.0,3.93,41.0 +2020-05-20 14:00:00,24.65,676.0,827.59,123.0,3.93,39.7 +2020-05-20 15:00:00,24.68,527.0,761.6,114.0,3.93,39.7 +2020-05-20 16:00:00,24.22,350.0,611.03,107.0,3.38,44.05 +2020-05-20 17:00:00,23.13,122.0,85.96,101.0,3.1,48.7 +2020-05-20 18:00:00,21.38,19.0,0.0,19.0,2.97,53.6 +2020-05-20 19:00:00,19.84,0.0,-0.0,0.0,3.1,53.25 +2020-05-20 20:00:00,18.67,0.0,-0.0,0.0,3.1,54.95 +2020-05-20 21:00:00,17.87,0.0,-0.0,0.0,3.1,56.75 +2020-05-20 22:00:00,17.37,0.0,-0.0,0.0,3.24,54.7 +2020-05-20 23:00:00,17.06,0.0,-0.0,0.0,3.38,54.7 +2020-05-21 00:00:00,16.94,0.0,-0.0,0.0,3.52,56.6 +2020-05-21 01:00:00,16.8,0.0,-0.0,0.0,3.59,56.6 +2020-05-21 02:00:00,16.45,0.0,-0.0,0.0,3.66,58.6 +2020-05-21 03:00:00,16.11,0.0,-0.0,0.0,3.72,60.75 +2020-05-21 04:00:00,15.85,64.0,84.75,52.0,3.72,65.15 +2020-05-21 05:00:00,16.61,207.0,318.87,113.0,3.86,65.35 +2020-05-21 06:00:00,18.14,260.0,127.7,203.0,3.59,65.55 +2020-05-21 07:00:00,19.84,239.0,17.07,229.0,4.76,61.4 +2020-05-21 08:00:00,21.67,272.0,8.52,266.0,4.97,53.75 +2020-05-21 09:00:00,23.6,304.0,7.57,298.0,5.03,48.8 +2020-05-21 10:00:00,25.23,136.0,0.0,136.0,5.66,42.75 +2020-05-21 11:00:00,25.92,127.0,0.0,127.0,6.41,39.95 +2020-05-21 12:00:00,24.77,250.0,0.0,250.0,6.21,45.75 +2020-05-21 13:00:00,26.07,385.0,46.82,349.0,6.97,37.35 +2020-05-21 14:00:00,26.0,371.0,83.54,315.0,6.55,35.9 +2020-05-21 15:00:00,25.8,188.0,5.51,185.0,6.21,35.9 +2020-05-21 16:00:00,25.2,114.0,0.0,114.0,5.38,38.45 +2020-05-21 17:00:00,24.12,86.0,16.19,82.0,4.83,41.0 +2020-05-21 18:00:00,22.79,20.0,0.0,20.0,4.55,43.65 +2020-05-21 19:00:00,21.81,0.0,-0.0,0.0,4.41,44.95 +2020-05-21 20:00:00,21.01,0.0,-0.0,0.0,4.41,46.5 +2020-05-21 21:00:00,20.04,0.0,-0.0,0.0,4.21,51.5 +2020-05-21 22:00:00,18.83,0.0,-0.0,0.0,3.72,61.2 +2020-05-21 23:00:00,17.97,0.0,-0.0,0.0,3.38,65.55 +2020-05-22 00:00:00,17.48,0.0,-0.0,0.0,3.38,67.85 +2020-05-22 01:00:00,17.16,0.0,-0.0,0.0,3.24,70.3 +2020-05-22 02:00:00,16.9,0.0,-0.0,0.0,3.1,72.7 +2020-05-22 03:00:00,16.59,0.0,-0.0,0.0,3.03,72.7 +2020-05-22 04:00:00,16.48,23.0,0.0,23.0,2.9,72.7 +2020-05-22 05:00:00,17.36,142.0,67.33,122.0,2.76,72.8 +2020-05-22 06:00:00,19.12,372.0,508.45,144.0,2.55,70.55 +2020-05-22 07:00:00,21.34,520.0,586.85,175.0,3.38,61.7 +2020-05-22 08:00:00,22.9,664.0,691.3,176.0,3.59,57.9 +2020-05-22 09:00:00,24.18,753.0,685.99,208.0,3.59,50.7 +2020-05-22 10:00:00,24.96,820.0,724.48,206.0,3.59,47.4 +2020-05-22 11:00:00,25.45,315.0,4.64,311.0,3.52,44.3 +2020-05-22 12:00:00,25.55,599.0,239.45,399.0,3.52,42.9 +2020-05-22 13:00:00,25.97,280.0,5.19,276.0,3.66,42.9 +2020-05-22 14:00:00,25.7,540.0,398.54,272.0,3.59,42.9 +2020-05-22 15:00:00,25.53,507.0,700.29,124.0,3.45,45.9 +2020-05-22 16:00:00,25.1,341.0,566.14,113.0,2.28,49.2 +2020-05-22 17:00:00,24.63,90.0,20.02,85.0,1.79,54.45 +2020-05-22 18:00:00,23.25,7.0,0.0,7.0,2.9,60.05 +2020-05-22 19:00:00,22.14,0.0,-0.0,0.0,1.79,57.75 +2020-05-22 20:00:00,20.19,0.0,-0.0,0.0,1.72,68.3 +2020-05-22 21:00:00,19.18,0.0,-0.0,0.0,1.66,70.55 +2020-05-22 22:00:00,19.17,0.0,-0.0,0.0,1.45,68.1 +2020-05-22 23:00:00,18.52,0.0,-0.0,0.0,1.52,70.45 +2020-05-23 00:00:00,17.37,0.0,-0.0,0.0,1.72,75.4 +2020-05-23 01:00:00,16.36,0.0,-0.0,0.0,1.86,77.95 +2020-05-23 02:00:00,15.58,0.0,-0.0,0.0,1.79,80.65 +2020-05-23 03:00:00,14.93,0.0,0.0,0.0,1.79,83.45 +2020-05-23 04:00:00,14.45,85.0,211.8,54.0,1.72,83.45 +2020-05-23 05:00:00,16.2,239.0,491.35,92.0,1.38,83.6 +2020-05-23 06:00:00,20.05,405.0,648.33,113.0,1.38,65.95 +2020-05-23 07:00:00,21.91,568.0,751.23,125.0,1.86,68.6 +2020-05-23 08:00:00,23.4,709.0,816.82,131.0,2.48,62.15 +2020-05-23 09:00:00,24.78,819.0,865.46,130.0,3.17,50.8 +2020-05-23 10:00:00,25.51,645.0,289.71,399.0,3.66,42.75 +2020-05-23 11:00:00,25.76,862.0,786.71,183.0,3.66,39.95 +2020-05-23 12:00:00,26.07,779.0,620.06,260.0,3.59,38.7 +2020-05-23 13:00:00,26.49,641.0,418.02,318.0,3.59,40.1 +2020-05-23 14:00:00,26.66,651.0,745.72,148.0,3.52,38.85 +2020-05-23 15:00:00,26.62,506.0,671.94,137.0,3.52,38.85 +2020-05-23 16:00:00,26.21,343.0,552.87,119.0,3.72,40.1 +2020-05-23 17:00:00,24.68,188.0,416.09,83.0,3.66,47.4 +2020-05-23 18:00:00,23.25,27.0,9.89,26.0,3.38,54.1 +2020-05-23 19:00:00,22.2,0.0,-0.0,0.0,2.48,52.0 +2020-05-23 20:00:00,20.82,0.0,-0.0,0.0,2.28,59.5 +2020-05-23 21:00:00,19.51,0.0,-0.0,0.0,2.28,65.85 +2020-05-23 22:00:00,18.64,0.0,-0.0,0.0,2.55,72.95 +2020-05-23 23:00:00,18.05,0.0,-0.0,0.0,2.55,75.45 +2020-05-24 00:00:00,16.86,0.0,-0.0,0.0,1.86,83.65 +2020-05-24 01:00:00,15.88,0.0,-0.0,0.0,1.86,86.5 +2020-05-24 02:00:00,15.12,0.0,-0.0,0.0,2.14,89.55 +2020-05-24 03:00:00,14.81,0.0,0.0,0.0,2.28,92.7 +2020-05-24 04:00:00,14.93,80.0,161.51,56.0,2.48,92.7 +2020-05-24 05:00:00,15.67,208.0,288.83,121.0,2.55,80.65 +2020-05-24 06:00:00,16.53,399.0,603.64,126.0,3.1,67.75 +2020-05-24 07:00:00,17.2,496.0,464.97,221.0,3.17,75.4 +2020-05-24 08:00:00,18.37,653.0,613.33,218.0,3.38,72.9 +2020-05-24 09:00:00,19.68,687.0,467.61,314.0,3.66,63.6 +2020-05-24 10:00:00,20.73,749.0,499.59,324.0,3.93,57.45 +2020-05-24 11:00:00,21.54,847.0,725.11,220.0,4.14,48.3 +2020-05-24 12:00:00,22.04,862.0,852.49,147.0,4.34,41.95 +2020-05-24 13:00:00,22.42,776.0,806.94,151.0,4.55,40.5 +2020-05-24 14:00:00,22.53,668.0,787.86,135.0,4.62,37.75 +2020-05-24 15:00:00,22.4,500.0,629.38,153.0,4.69,39.05 +2020-05-24 16:00:00,21.9,324.0,436.76,146.0,4.62,38.9 +2020-05-24 17:00:00,21.16,175.0,305.97,97.0,3.93,41.7 +2020-05-24 18:00:00,19.96,39.0,48.13,34.0,3.79,42.85 +2020-05-24 19:00:00,18.69,0.0,-0.0,0.0,3.24,42.6 +2020-05-24 20:00:00,17.49,0.0,-0.0,0.0,3.17,43.9 +2020-05-24 21:00:00,16.36,0.0,-0.0,0.0,2.83,43.65 +2020-05-24 22:00:00,15.22,0.0,-0.0,0.0,2.55,45.05 +2020-05-24 23:00:00,13.99,0.0,-0.0,0.0,2.28,48.25 +2020-05-25 00:00:00,12.44,0.0,-0.0,0.0,2.0,55.65 +2020-05-25 01:00:00,11.09,0.0,-0.0,0.0,1.93,61.85 +2020-05-25 02:00:00,9.98,0.0,-0.0,0.0,1.93,66.45 +2020-05-25 03:00:00,9.12,0.0,0.0,0.0,2.0,71.4 +2020-05-25 04:00:00,9.01,52.0,19.9,49.0,1.86,76.95 +2020-05-25 05:00:00,11.21,163.0,102.26,132.0,1.72,71.75 +2020-05-25 06:00:00,13.02,428.0,709.19,106.0,2.34,62.3 +2020-05-25 07:00:00,14.1,589.0,792.48,119.0,2.62,62.55 +2020-05-25 08:00:00,15.55,730.0,846.94,128.0,2.83,58.45 +2020-05-25 09:00:00,16.95,833.0,870.9,137.0,3.24,56.6 +2020-05-25 10:00:00,18.02,898.0,889.47,140.0,3.52,51.0 +2020-05-25 11:00:00,18.81,925.0,917.75,130.0,3.59,47.55 +2020-05-25 12:00:00,19.43,879.0,872.24,146.0,3.52,44.3 +2020-05-25 13:00:00,19.99,796.0,837.29,146.0,3.59,41.45 +2020-05-25 14:00:00,20.31,683.0,809.19,134.0,3.66,39.95 +2020-05-25 15:00:00,20.39,539.0,758.84,119.0,3.72,38.5 +2020-05-25 16:00:00,20.14,371.0,648.98,105.0,3.66,37.1 +2020-05-25 17:00:00,19.63,211.0,536.05,73.0,2.97,39.8 +2020-05-25 18:00:00,18.59,58.0,197.06,37.0,2.41,44.2 +2020-05-25 19:00:00,16.96,0.0,-0.0,0.0,2.0,43.9 +2020-05-25 20:00:00,15.36,0.0,-0.0,0.0,2.07,50.35 +2020-05-25 21:00:00,13.65,0.0,-0.0,0.0,2.0,60.2 +2020-05-25 22:00:00,12.65,0.0,-0.0,0.0,1.79,64.55 +2020-05-25 23:00:00,11.7,0.0,-0.0,0.0,1.86,64.35 +2020-05-26 00:00:00,11.03,0.0,-0.0,0.0,1.86,64.25 +2020-05-26 01:00:00,10.83,0.0,-0.0,0.0,1.72,66.55 +2020-05-26 02:00:00,10.75,0.0,-0.0,0.0,1.52,64.1 +2020-05-26 03:00:00,10.57,0.0,0.0,0.0,1.45,64.1 +2020-05-26 04:00:00,10.08,104.0,353.5,50.0,1.45,69.0 +2020-05-26 05:00:00,12.08,255.0,554.08,86.0,0.83,66.85 +2020-05-26 06:00:00,15.14,421.0,680.24,111.0,0.9,56.25 +2020-05-26 07:00:00,16.79,570.0,728.19,137.0,1.52,56.6 +2020-05-26 08:00:00,18.17,721.0,825.54,133.0,2.21,49.15 +2020-05-26 09:00:00,19.04,767.0,674.5,227.0,2.9,42.7 +2020-05-26 10:00:00,19.65,863.0,798.94,181.0,3.17,42.85 +2020-05-26 11:00:00,20.21,919.0,908.11,131.0,3.24,41.45 +2020-05-26 12:00:00,20.59,0.0,0.0,0.0,2.97,40.1 +2020-05-26 13:00:00,20.96,761.0,728.75,194.0,2.76,40.1 +2020-05-26 14:00:00,21.18,439.0,157.27,332.0,2.62,38.75 +2020-05-26 15:00:00,21.05,461.0,450.0,211.0,2.83,37.35 +2020-05-26 16:00:00,20.79,311.0,354.25,165.0,2.76,38.65 +2020-05-26 17:00:00,20.27,205.0,488.66,78.0,1.79,43.0 +2020-05-26 18:00:00,19.18,67.0,293.09,35.0,1.31,53.15 +2020-05-26 19:00:00,17.44,0.0,-0.0,0.0,2.0,49.05 +2020-05-26 20:00:00,16.3,0.0,-0.0,0.0,2.07,50.6 +2020-05-26 21:00:00,15.6,0.0,-0.0,0.0,2.34,52.4 +2020-05-26 22:00:00,14.8,0.0,-0.0,0.0,2.21,58.25 +2020-05-26 23:00:00,13.88,0.0,-0.0,0.0,1.79,69.65 +2020-05-27 00:00:00,13.69,0.0,-0.0,0.0,1.38,69.65 +2020-05-27 01:00:00,14.07,0.0,-0.0,0.0,1.1,64.85 +2020-05-27 02:00:00,14.88,0.0,-0.0,0.0,0.55,60.4 +2020-05-27 03:00:00,13.37,0.0,0.0,0.0,1.1,69.55 +2020-05-27 04:00:00,12.17,58.0,32.32,53.0,1.24,80.2 +2020-05-27 05:00:00,13.12,239.0,443.35,103.0,0.83,86.3 +2020-05-27 06:00:00,14.96,408.0,627.57,121.0,0.83,69.9 +2020-05-27 07:00:00,17.12,572.0,738.13,132.0,0.69,63.2 +2020-05-27 08:00:00,18.63,708.0,791.71,143.0,1.31,56.95 +2020-05-27 09:00:00,19.82,736.0,591.06,262.0,2.0,47.8 +2020-05-27 10:00:00,20.33,825.0,693.57,232.0,2.14,43.0 +2020-05-27 11:00:00,20.68,697.0,352.06,391.0,2.07,41.55 +2020-05-27 12:00:00,21.05,454.0,62.83,401.0,2.07,40.2 +2020-05-27 13:00:00,21.09,114.0,0.0,114.0,2.14,40.2 +2020-05-27 14:00:00,21.03,600.0,533.57,236.0,2.21,40.2 +2020-05-27 15:00:00,20.86,127.0,0.0,127.0,2.14,41.55 +2020-05-27 16:00:00,20.74,171.0,24.13,161.0,2.14,41.55 +2020-05-27 17:00:00,20.05,103.0,26.69,96.0,1.59,46.2 +2020-05-27 18:00:00,19.16,35.0,17.9,33.0,1.31,55.05 +2020-05-27 19:00:00,17.39,0.0,-0.0,0.0,1.24,67.85 +2020-05-27 20:00:00,17.18,0.0,-0.0,0.0,1.03,56.75 +2020-05-27 21:00:00,17.78,0.0,-0.0,0.0,0.83,52.75 +2020-05-27 22:00:00,17.58,0.0,-0.0,0.0,0.69,50.85 +2020-05-27 23:00:00,17.3,0.0,-0.0,0.0,0.9,50.85 +2020-05-28 00:00:00,15.11,0.0,-0.0,0.0,1.24,60.5 +2020-05-28 01:00:00,13.93,0.0,-0.0,0.0,1.45,67.25 +2020-05-28 02:00:00,13.16,0.0,-0.0,0.0,1.52,72.1 +2020-05-28 03:00:00,12.89,0.0,0.0,0.0,1.45,77.45 +2020-05-28 04:00:00,12.93,51.0,12.78,49.0,1.38,77.5 +2020-05-28 05:00:00,14.18,186.0,171.85,133.0,1.03,80.45 +2020-05-28 06:00:00,17.09,103.0,0.0,103.0,1.03,60.95 +2020-05-28 07:00:00,17.43,126.0,0.0,126.0,1.93,67.85 +2020-05-28 08:00:00,18.23,393.0,75.53,339.0,1.86,67.95 +2020-05-28 09:00:00,18.45,484.0,110.8,395.0,2.0,67.95 +2020-05-28 10:00:00,19.26,754.0,499.83,326.0,2.07,63.5 +2020-05-28 11:00:00,19.51,861.0,748.97,209.0,1.86,61.4 +2020-05-28 12:00:00,19.83,760.0,542.04,302.0,1.79,59.25 +2020-05-28 13:00:00,20.11,575.0,261.09,371.0,2.21,55.3 +2020-05-28 14:00:00,20.18,599.0,523.4,241.0,2.41,51.5 +2020-05-28 15:00:00,20.65,496.0,579.03,172.0,2.41,48.05 +2020-05-28 16:00:00,20.57,339.0,468.17,144.0,2.34,44.7 +2020-05-28 17:00:00,20.47,199.0,415.7,89.0,1.93,44.6 +2020-05-28 18:00:00,19.49,62.0,192.64,40.0,1.1,55.2 +2020-05-28 19:00:00,20.3,0.0,-0.0,0.0,0.21,41.45 +2020-05-28 20:00:00,19.52,0.0,-0.0,0.0,0.69,42.85 +2020-05-28 21:00:00,16.37,0.0,-0.0,0.0,1.45,54.45 +2020-05-28 22:00:00,13.79,0.0,-0.0,0.0,1.79,67.15 +2020-05-28 23:00:00,12.52,0.0,-0.0,0.0,1.79,74.7 +2020-05-29 00:00:00,11.53,0.0,-0.0,0.0,1.79,80.15 +2020-05-29 01:00:00,10.8,0.0,-0.0,0.0,1.79,86.05 +2020-05-29 02:00:00,10.52,0.0,-0.0,0.0,1.79,86.05 +2020-05-29 03:00:00,10.15,0.0,0.0,0.0,1.93,89.2 +2020-05-29 04:00:00,10.57,87.0,164.32,61.0,1.86,86.05 +2020-05-29 05:00:00,13.34,173.0,125.82,134.0,1.59,83.25 +2020-05-29 06:00:00,17.7,273.0,132.54,212.0,1.93,60.95 +2020-05-29 07:00:00,18.46,327.0,80.16,279.0,3.24,63.3 +2020-05-29 08:00:00,19.28,707.0,781.92,147.0,3.17,61.3 +2020-05-29 09:00:00,20.36,797.0,771.96,176.0,3.59,57.3 +2020-05-29 10:00:00,21.27,791.0,592.4,283.0,4.28,48.2 +2020-05-29 11:00:00,21.73,727.0,408.33,371.0,4.41,43.4 +2020-05-29 12:00:00,22.18,693.0,385.17,367.0,4.9,40.5 +2020-05-29 13:00:00,22.42,583.0,274.62,368.0,5.45,39.05 +2020-05-29 14:00:00,21.54,541.0,357.29,296.0,4.69,43.4 +2020-05-29 15:00:00,21.47,510.0,632.27,155.0,4.69,44.85 +2020-05-29 16:00:00,21.25,302.0,303.38,175.0,4.62,43.25 +2020-05-29 17:00:00,20.54,166.0,209.83,110.0,4.0,43.1 +2020-05-29 18:00:00,19.4,44.0,42.88,39.0,3.38,42.7 +2020-05-29 19:00:00,17.92,0.0,-0.0,0.0,3.03,47.3 +2020-05-29 20:00:00,16.8,0.0,-0.0,0.0,2.55,45.45 +2020-05-29 21:00:00,15.56,0.0,-0.0,0.0,2.28,46.9 +2020-05-29 22:00:00,14.47,0.0,-0.0,0.0,2.21,50.25 +2020-05-29 23:00:00,13.26,0.0,-0.0,0.0,2.0,57.9 +2020-05-30 00:00:00,12.03,0.0,-0.0,0.0,1.79,64.45 +2020-05-30 01:00:00,10.96,0.0,-0.0,0.0,1.72,69.15 +2020-05-30 02:00:00,10.25,0.0,-0.0,0.0,1.66,74.3 +2020-05-30 03:00:00,9.55,0.0,0.0,0.0,1.72,77.0 +2020-05-30 04:00:00,9.48,93.0,200.19,61.0,1.59,79.9 +2020-05-30 05:00:00,11.74,240.0,423.87,108.0,1.31,77.3 +2020-05-30 06:00:00,14.52,291.0,171.16,212.0,2.21,58.25 +2020-05-30 07:00:00,16.1,531.0,563.33,193.0,3.45,56.5 +2020-05-30 08:00:00,17.18,527.0,267.65,335.0,4.21,49.05 +2020-05-30 09:00:00,17.95,473.0,98.07,394.0,4.41,49.05 +2020-05-30 10:00:00,18.79,719.0,416.91,361.0,4.69,49.3 +2020-05-30 11:00:00,19.45,877.0,794.88,183.0,5.45,47.65 +2020-05-30 12:00:00,19.86,847.0,786.79,180.0,5.86,46.1 +2020-05-30 13:00:00,19.54,601.0,307.24,360.0,5.59,46.1 +2020-05-30 14:00:00,19.52,486.0,234.21,325.0,4.97,46.1 +2020-05-30 15:00:00,18.83,539.0,740.24,122.0,4.55,49.3 +2020-05-30 16:00:00,18.61,339.0,451.68,149.0,4.48,49.3 +2020-05-30 17:00:00,18.09,211.0,475.7,83.0,3.86,51.0 +2020-05-30 18:00:00,17.18,63.0,176.54,42.0,2.97,52.75 +2020-05-30 19:00:00,15.48,0.0,-0.0,0.0,2.62,58.45 +2020-05-30 20:00:00,14.43,0.0,-0.0,0.0,2.0,67.25 +2020-05-30 21:00:00,13.57,0.0,-0.0,0.0,1.38,69.65 +2020-05-30 22:00:00,12.31,0.0,-0.0,0.0,1.24,80.2 +2020-05-30 23:00:00,11.47,0.0,-0.0,0.0,1.31,83.1 +2020-05-31 00:00:00,10.81,0.0,-0.0,0.0,1.38,86.05 +2020-05-31 01:00:00,10.42,0.0,-0.0,0.0,1.45,86.05 +2020-05-31 02:00:00,9.82,0.0,-0.0,0.0,1.52,92.45 +2020-05-31 03:00:00,9.56,0.0,0.0,0.0,1.59,89.15 +2020-05-31 04:00:00,10.23,13.0,0.0,13.0,1.38,89.2 +2020-05-31 05:00:00,12.1,34.0,0.0,34.0,0.83,86.2 +2020-05-31 06:00:00,14.51,240.0,75.63,205.0,0.97,69.85 +2020-05-31 07:00:00,15.35,368.0,139.74,284.0,1.17,77.8 +2020-05-31 08:00:00,17.27,182.0,0.0,182.0,2.07,67.85 +2020-05-31 09:00:00,18.35,279.0,2.48,277.0,3.59,63.3 +2020-05-31 10:00:00,18.58,419.0,38.38,386.0,4.0,63.4 +2020-05-31 11:00:00,19.04,450.0,50.33,406.0,3.72,63.5 +2020-05-31 12:00:00,19.51,514.0,115.42,416.0,3.38,63.6 +2020-05-31 13:00:00,20.39,449.0,91.62,377.0,3.31,63.7 +2020-05-31 14:00:00,20.66,100.0,0.0,100.0,2.9,61.6 +2020-05-31 15:00:00,20.48,190.0,3.54,188.0,2.14,61.5 +2020-05-31 16:00:00,15.93,56.0,0.0,56.0,1.72,81.06 +2020-05-31 17:00:00,15.97,59.0,0.0,59.0,1.73,82.32 +2020-05-31 18:00:00,16.02,20.0,0.0,20.0,1.74,83.59 +2020-05-31 19:00:00,16.07,0.0,-0.0,0.0,1.75,84.86 +2020-05-31 20:00:00,16.12,0.0,-0.0,0.0,1.76,86.12 +2020-05-31 21:00:00,16.17,0.0,-0.0,0.0,1.77,87.39 +2020-05-31 22:00:00,16.22,0.0,-0.0,0.0,1.78,88.66 +2020-05-31 23:00:00,16.27,0.0,-0.0,0.0,1.79,89.92 +2020-06-01 00:00:00,16.31,0.0,-0.0,0.0,1.8,91.19 +2020-06-01 01:00:00,16.36,0.0,-0.0,0.0,1.81,92.46 +2020-06-01 02:00:00,16.41,0.0,-0.0,0.0,1.82,93.72 +2020-06-01 03:00:00,16.46,0.0,0.0,0.0,1.83,94.99 +2020-06-01 04:00:00,16.51,32.0,0.0,32.0,1.85,96.26 +2020-06-01 05:00:00,16.56,176.0,136.92,133.0,1.86,97.52 +2020-06-01 06:00:00,16.61,326.0,284.54,194.0,1.87,98.79 +2020-06-01 07:00:00,16.66,460.0,353.71,247.0,1.88,100.0 +2020-06-01 08:00:00,19.94,616.0,507.35,251.0,2.14,86.85 +2020-06-01 09:00:00,20.99,641.0,370.21,342.0,1.93,78.55 +2020-06-01 10:00:00,20.94,643.0,286.92,396.0,1.52,81.2 +2020-06-01 11:00:00,21.09,687.0,346.13,384.0,1.03,78.55 +2020-06-01 12:00:00,22.1,716.0,455.13,329.0,1.03,76.1 +2020-06-01 13:00:00,22.95,705.0,580.48,248.0,0.9,71.15 +2020-06-01 14:00:00,23.35,436.0,159.28,326.0,0.76,71.25 +2020-06-01 15:00:00,23.67,354.0,165.82,260.0,0.97,68.95 +2020-06-01 16:00:00,23.64,50.0,0.0,50.0,1.1,68.95 +2020-06-01 17:00:00,23.27,22.0,0.0,22.0,1.03,71.25 +2020-06-01 18:00:00,21.4,33.0,8.1,32.0,1.86,81.3 +2020-06-01 19:00:00,20.9,0.0,-0.0,0.0,1.52,81.2 +2020-06-01 20:00:00,19.62,0.0,-0.0,0.0,1.38,86.85 +2020-06-01 21:00:00,18.39,0.0,-0.0,0.0,1.66,92.85 +2020-06-01 22:00:00,17.62,0.0,-0.0,0.0,1.86,92.85 +2020-06-01 23:00:00,17.08,0.0,-0.0,0.0,1.79,89.7 +2020-06-02 00:00:00,16.39,0.0,-0.0,0.0,1.79,92.8 +2020-06-02 01:00:00,16.06,0.0,-0.0,0.0,1.79,92.8 +2020-06-02 02:00:00,15.49,0.0,-0.0,0.0,1.93,92.75 +2020-06-02 03:00:00,15.17,0.0,0.0,0.0,2.07,96.0 +2020-06-02 04:00:00,15.63,64.0,36.56,58.0,2.07,92.75 +2020-06-02 05:00:00,17.71,221.0,314.08,122.0,1.93,86.65 +2020-06-02 06:00:00,19.89,388.0,511.87,150.0,2.07,75.7 +2020-06-02 07:00:00,21.58,552.0,654.89,157.0,2.21,64.0 +2020-06-02 08:00:00,22.83,690.0,731.58,163.0,2.41,57.9 +2020-06-02 09:00:00,24.01,800.0,792.74,159.0,2.48,54.2 +2020-06-02 10:00:00,24.98,872.0,834.26,153.0,2.62,50.8 +2020-06-02 11:00:00,25.79,884.0,827.17,159.0,2.62,47.65 +2020-06-02 12:00:00,26.35,873.0,864.35,137.0,2.62,46.15 +2020-06-02 13:00:00,26.63,780.0,795.05,153.0,2.69,44.7 +2020-06-02 14:00:00,26.79,584.0,476.79,254.0,2.69,43.15 +2020-06-02 15:00:00,26.75,530.0,705.28,129.0,2.76,43.15 +2020-06-02 16:00:00,26.34,371.0,607.42,112.0,3.03,46.15 +2020-06-02 17:00:00,25.04,214.0,472.25,84.0,2.34,52.75 +2020-06-02 18:00:00,23.67,69.0,199.17,44.0,2.14,60.15 +2020-06-02 19:00:00,22.32,0.0,-0.0,0.0,2.34,61.95 +2020-06-02 20:00:00,20.84,0.0,-0.0,0.0,2.28,68.4 +2020-06-02 21:00:00,19.75,0.0,-0.0,0.0,2.28,73.15 +2020-06-02 22:00:00,18.88,0.0,-0.0,0.0,2.48,78.25 +2020-06-02 23:00:00,18.42,0.0,-0.0,0.0,2.62,78.15 +2020-06-03 00:00:00,17.93,0.0,-0.0,0.0,2.55,78.1 +2020-06-03 01:00:00,17.24,0.0,-0.0,0.0,2.48,75.4 +2020-06-03 02:00:00,16.65,0.0,-0.0,0.0,2.48,78.0 +2020-06-03 03:00:00,16.16,0.0,0.0,0.0,2.41,77.95 +2020-06-03 04:00:00,16.22,104.0,272.23,59.0,2.34,80.75 +2020-06-03 05:00:00,17.87,249.0,477.43,98.0,2.21,80.85 +2020-06-03 06:00:00,20.15,403.0,588.08,129.0,2.34,70.7 +2020-06-03 07:00:00,21.58,562.0,700.29,139.0,2.83,64.0 +2020-06-03 08:00:00,22.67,699.0,763.99,148.0,2.48,59.95 +2020-06-03 09:00:00,23.77,793.0,774.59,166.0,2.14,58.1 +2020-06-03 10:00:00,24.73,676.0,339.61,383.0,2.07,56.35 +2020-06-03 11:00:00,25.23,228.0,0.0,228.0,2.21,52.75 +2020-06-03 12:00:00,25.84,198.0,0.0,198.0,2.28,51.05 +2020-06-03 13:00:00,25.66,142.0,0.0,142.0,1.52,52.85 +2020-06-03 14:00:00,23.5,137.0,0.0,137.0,1.17,66.55 +2020-06-03 15:00:00,22.1,249.0,28.06,233.0,1.59,76.1 +2020-06-03 16:00:00,21.27,288.0,242.89,184.0,1.03,84.05 +2020-06-03 17:00:00,20.91,105.0,21.64,99.0,1.1,86.9 +2020-06-03 18:00:00,20.34,76.0,258.69,43.0,1.38,92.95 +2020-06-03 19:00:00,18.42,0.0,-0.0,0.0,1.86,96.05 +2020-06-03 20:00:00,17.71,0.0,-0.0,0.0,1.1,96.05 +2020-06-03 21:00:00,17.3,0.0,-0.0,0.0,0.69,96.05 +2020-06-03 22:00:00,16.91,0.0,-0.0,0.0,0.9,96.05 +2020-06-03 23:00:00,16.49,0.0,-0.0,0.0,0.9,96.05 +2020-06-04 00:00:00,16.04,0.0,-0.0,0.0,1.1,96.05 +2020-06-04 01:00:00,15.88,0.0,-0.0,0.0,1.03,96.0 +2020-06-04 02:00:00,16.11,0.0,-0.0,0.0,0.76,96.05 +2020-06-04 03:00:00,16.07,0.0,0.0,0.0,0.62,96.05 +2020-06-04 04:00:00,16.37,12.0,0.0,12.0,0.62,99.4 +2020-06-04 05:00:00,16.5,34.0,0.0,34.0,0.9,96.05 +2020-06-04 06:00:00,16.85,87.0,0.0,87.0,1.59,96.05 +2020-06-04 07:00:00,16.97,146.0,0.0,146.0,1.59,96.05 +2020-06-04 08:00:00,17.58,276.0,8.31,270.0,1.93,96.05 +2020-06-04 09:00:00,18.07,469.0,98.73,389.0,2.48,89.75 +2020-06-04 10:00:00,18.81,511.0,105.37,420.0,3.03,83.8 +2020-06-04 11:00:00,19.8,544.0,128.64,431.0,3.79,75.7 +2020-06-04 12:00:00,20.92,622.0,258.86,401.0,3.72,68.4 +2020-06-04 13:00:00,21.73,368.0,31.6,343.0,3.86,57.65 +2020-06-04 14:00:00,21.97,394.0,96.4,327.0,3.52,55.65 +2020-06-04 15:00:00,22.34,287.0,59.46,253.0,3.52,52.0 +2020-06-04 16:00:00,22.51,178.0,25.59,167.0,3.1,50.3 +2020-06-04 17:00:00,21.76,192.0,315.34,104.0,2.48,55.65 +2020-06-04 18:00:00,20.67,63.0,123.53,47.0,2.07,59.5 +2020-06-04 19:00:00,19.34,0.0,-0.0,0.0,2.0,65.75 +2020-06-04 20:00:00,18.25,0.0,-0.0,0.0,2.28,65.55 +2020-06-04 21:00:00,17.75,0.0,-0.0,0.0,3.17,60.95 +2020-06-04 22:00:00,17.28,0.0,-0.0,0.0,3.59,54.7 +2020-06-04 23:00:00,16.52,0.0,-0.0,0.0,3.31,54.6 +2020-06-05 00:00:00,15.69,0.0,-0.0,0.0,2.9,58.45 +2020-06-05 01:00:00,14.94,0.0,-0.0,0.0,2.69,60.5 +2020-06-05 02:00:00,14.25,0.0,-0.0,0.0,2.48,64.85 +2020-06-05 03:00:00,13.61,0.0,0.0,0.0,2.48,69.65 +2020-06-05 04:00:00,13.47,117.0,364.35,56.0,2.48,72.2 +2020-06-05 05:00:00,14.47,267.0,550.08,92.0,2.41,72.4 +2020-06-05 06:00:00,15.86,426.0,652.3,121.0,3.17,67.55 +2020-06-05 07:00:00,17.05,578.0,713.38,146.0,3.45,67.85 +2020-06-05 08:00:00,18.36,720.0,791.48,148.0,3.79,67.95 +2020-06-05 09:00:00,19.34,820.0,810.13,163.0,4.41,61.3 +2020-06-05 10:00:00,19.81,884.0,828.3,168.0,4.62,57.2 +2020-06-05 11:00:00,20.25,906.0,847.22,161.0,4.55,53.35 +2020-06-05 12:00:00,20.4,886.0,862.22,149.0,4.55,51.5 +2020-06-05 13:00:00,20.58,806.0,827.92,150.0,4.55,48.05 +2020-06-05 14:00:00,20.77,591.0,456.68,273.0,4.48,46.35 +2020-06-05 15:00:00,20.68,561.0,772.75,118.0,4.34,46.35 +2020-06-05 16:00:00,20.33,394.0,665.02,107.0,4.0,47.9 +2020-06-05 17:00:00,19.89,235.0,555.45,79.0,3.03,51.4 +2020-06-05 18:00:00,18.98,81.0,273.95,45.0,2.55,53.15 +2020-06-05 19:00:00,17.39,0.0,-0.0,0.0,2.62,56.75 +2020-06-05 20:00:00,16.21,0.0,-0.0,0.0,2.34,60.75 +2020-06-05 21:00:00,15.13,0.0,-0.0,0.0,2.34,62.75 +2020-06-05 22:00:00,14.38,0.0,-0.0,0.0,2.55,64.85 +2020-06-05 23:00:00,13.97,0.0,-0.0,0.0,2.55,62.55 +2020-06-06 00:00:00,13.76,0.0,-0.0,0.0,2.41,67.15 +2020-06-06 01:00:00,12.72,0.0,-0.0,0.0,2.07,72.05 +2020-06-06 02:00:00,11.63,0.0,-0.0,0.0,1.93,77.3 +2020-06-06 03:00:00,10.81,1.0,0.0,1.0,2.0,83.0 +2020-06-06 04:00:00,10.95,113.0,326.74,58.0,2.0,83.05 +2020-06-06 05:00:00,13.4,269.0,561.27,90.0,2.0,77.5 +2020-06-06 06:00:00,15.43,432.0,676.95,115.0,2.69,69.9 +2020-06-06 07:00:00,16.76,589.0,755.51,131.0,3.03,72.7 +2020-06-06 08:00:00,18.05,680.0,653.92,207.0,4.14,63.3 +2020-06-06 09:00:00,18.79,641.0,336.35,368.0,4.41,59.05 +2020-06-06 10:00:00,19.48,709.0,380.28,380.0,4.41,53.25 +2020-06-06 11:00:00,19.6,658.0,263.58,426.0,4.07,53.25 +2020-06-06 12:00:00,19.9,604.0,212.68,422.0,3.93,53.25 +2020-06-06 13:00:00,20.22,445.0,78.14,383.0,3.93,51.5 +2020-06-06 14:00:00,20.57,400.0,93.17,335.0,4.14,49.8 +2020-06-06 15:00:00,20.39,491.0,499.37,204.0,3.79,51.5 +2020-06-06 16:00:00,19.88,376.0,577.15,126.0,2.97,57.2 +2020-06-06 17:00:00,19.79,201.0,332.66,107.0,2.69,57.2 +2020-06-06 18:00:00,19.01,63.0,105.09,49.0,2.14,59.15 +2020-06-06 19:00:00,18.19,0.0,-0.0,0.0,2.14,65.55 +2020-06-06 20:00:00,16.82,0.0,-0.0,0.0,1.93,67.75 +2020-06-06 21:00:00,15.57,0.0,-0.0,0.0,1.79,75.15 +2020-06-06 22:00:00,14.4,0.0,-0.0,0.0,1.79,80.45 +2020-06-06 23:00:00,14.18,0.0,-0.0,0.0,1.59,77.65 +2020-06-07 00:00:00,13.39,0.0,-0.0,0.0,1.72,80.35 +2020-06-07 01:00:00,12.61,0.0,-0.0,0.0,1.79,80.3 +2020-06-07 02:00:00,12.15,0.0,-0.0,0.0,1.86,83.15 +2020-06-07 03:00:00,11.68,2.0,0.0,2.0,1.93,83.1 +2020-06-07 04:00:00,12.09,86.0,118.25,66.0,1.72,83.15 +2020-06-07 05:00:00,14.08,272.0,578.83,87.0,1.45,83.4 +2020-06-07 06:00:00,17.58,413.0,592.9,135.0,1.59,70.3 +2020-06-07 07:00:00,19.07,570.0,688.89,152.0,1.52,70.55 +2020-06-07 08:00:00,20.7,711.0,766.69,156.0,2.28,63.8 +2020-06-07 09:00:00,21.72,643.0,342.26,365.0,3.31,55.65 +2020-06-07 10:00:00,22.07,286.0,1.15,285.0,3.72,52.0 +2020-06-07 11:00:00,22.24,307.0,2.27,305.0,3.72,52.0 +2020-06-07 12:00:00,22.54,193.0,0.0,193.0,3.66,48.55 +2020-06-07 13:00:00,22.81,626.0,339.82,356.0,3.45,46.85 +2020-06-07 14:00:00,22.9,555.0,366.31,299.0,3.38,43.65 +2020-06-07 15:00:00,22.77,252.0,26.04,237.0,3.31,43.65 +2020-06-07 16:00:00,22.6,297.0,243.85,191.0,3.31,43.65 +2020-06-07 17:00:00,21.97,188.0,256.85,115.0,2.62,50.05 +2020-06-07 18:00:00,20.64,47.0,29.64,43.0,1.79,57.45 +2020-06-07 19:00:00,18.95,0.0,-0.0,0.0,1.66,65.65 +2020-06-07 20:00:00,18.62,0.0,-0.0,0.0,1.31,59.05 +2020-06-07 21:00:00,18.46,0.0,-0.0,0.0,1.31,56.85 +2020-06-07 22:00:00,18.7,0.0,-0.0,0.0,1.17,53.0 +2020-06-07 23:00:00,19.21,0.0,-0.0,0.0,0.9,51.25 +2020-06-08 00:00:00,18.66,0.0,-0.0,0.0,0.76,53.0 +2020-06-08 01:00:00,18.07,0.0,-0.0,0.0,1.03,52.9 +2020-06-08 02:00:00,17.62,0.0,-0.0,0.0,1.03,56.75 +2020-06-08 03:00:00,17.05,1.0,0.0,1.0,1.1,54.7 +2020-06-08 04:00:00,14.86,88.0,129.53,66.0,1.45,67.35 +2020-06-08 05:00:00,15.42,202.0,202.99,137.0,1.24,75.1 +2020-06-08 06:00:00,18.33,401.0,538.98,148.0,0.83,67.95 +2020-06-08 07:00:00,19.58,580.0,732.8,135.0,1.24,63.6 +2020-06-08 08:00:00,21.01,716.0,785.5,147.0,2.14,57.55 +2020-06-08 09:00:00,21.92,815.0,808.34,158.0,2.55,55.65 +2020-06-08 10:00:00,21.71,854.0,752.5,202.0,3.31,55.65 +2020-06-08 11:00:00,20.82,476.0,63.51,420.0,3.59,59.5 +2020-06-08 12:00:00,21.32,425.0,40.81,390.0,3.72,57.55 +2020-06-08 13:00:00,21.73,275.0,2.51,273.0,3.93,51.9 +2020-06-08 14:00:00,21.85,543.0,338.56,306.0,4.34,50.05 +2020-06-08 15:00:00,21.28,533.0,668.47,147.0,4.0,49.95 +2020-06-08 16:00:00,20.93,311.0,288.9,185.0,3.59,51.65 +2020-06-08 17:00:00,20.36,43.0,0.0,43.0,2.69,57.3 +2020-06-08 18:00:00,19.49,41.0,14.64,39.0,2.14,59.25 +2020-06-08 19:00:00,18.44,0.0,-0.0,0.0,1.59,70.35 +2020-06-08 20:00:00,16.93,0.0,-0.0,0.0,1.45,75.3 +2020-06-08 21:00:00,16.18,0.0,-0.0,0.0,1.45,75.25 +2020-06-08 22:00:00,14.75,0.0,-0.0,0.0,1.72,83.45 +2020-06-08 23:00:00,13.76,0.0,-0.0,0.0,1.79,89.45 +2020-06-09 00:00:00,12.94,0.0,-0.0,0.0,1.72,89.4 +2020-06-09 01:00:00,12.51,0.0,-0.0,0.0,1.72,92.6 +2020-06-09 02:00:00,12.36,0.0,-0.0,0.0,1.66,92.6 +2020-06-09 03:00:00,12.11,1.0,0.0,1.0,1.59,92.6 +2020-06-09 04:00:00,12.83,83.0,99.74,66.0,1.24,92.6 +2020-06-09 05:00:00,15.04,186.0,146.55,139.0,0.76,89.55 +2020-06-09 06:00:00,18.23,401.0,542.74,146.0,0.9,78.15 +2020-06-09 07:00:00,20.19,568.0,692.81,147.0,1.24,68.3 +2020-06-09 08:00:00,21.45,708.0,769.87,150.0,1.66,61.7 +2020-06-09 09:00:00,22.57,811.0,802.95,158.0,1.93,55.9 +2020-06-09 10:00:00,23.49,884.0,844.29,152.0,1.93,52.25 +2020-06-09 11:00:00,24.04,892.0,826.18,163.0,1.79,45.6 +2020-06-09 12:00:00,24.24,840.0,744.46,201.0,1.59,44.05 +2020-06-09 13:00:00,24.69,760.0,696.78,205.0,1.31,41.15 +2020-06-09 14:00:00,24.99,534.0,319.48,310.0,1.17,41.15 +2020-06-09 15:00:00,25.03,549.0,730.95,126.0,1.38,38.45 +2020-06-09 16:00:00,24.82,372.0,550.82,131.0,1.45,39.7 +2020-06-09 17:00:00,23.98,218.0,424.69,96.0,1.1,54.2 +2020-06-09 18:00:00,22.63,69.0,130.3,51.0,1.31,62.05 +2020-06-09 19:00:00,20.89,0.0,0.0,0.0,1.86,61.6 +2020-06-09 20:00:00,18.78,0.0,-0.0,0.0,2.07,68.05 +2020-06-09 21:00:00,17.33,0.0,-0.0,0.0,2.14,70.3 +2020-06-09 22:00:00,16.14,0.0,-0.0,0.0,2.28,72.65 +2020-06-09 23:00:00,15.51,0.0,-0.0,0.0,2.28,72.55 +2020-06-10 00:00:00,14.62,0.0,-0.0,0.0,2.14,75.0 +2020-06-10 01:00:00,14.33,0.0,-0.0,0.0,1.93,77.65 +2020-06-10 02:00:00,13.85,0.0,-0.0,0.0,1.86,80.4 +2020-06-10 03:00:00,13.09,2.0,0.0,2.0,1.86,83.25 +2020-06-10 04:00:00,13.05,118.0,356.82,57.0,1.66,86.3 +2020-06-10 05:00:00,15.41,44.0,0.0,44.0,1.1,86.5 +2020-06-10 06:00:00,19.25,70.0,0.0,70.0,1.24,70.55 +2020-06-10 07:00:00,20.79,585.0,741.78,134.0,1.45,63.8 +2020-06-10 08:00:00,22.54,118.0,0.0,118.0,1.72,55.9 +2020-06-10 09:00:00,23.93,0.0,0.0,0.0,2.0,48.8 +2020-06-10 10:00:00,24.82,839.0,693.96,237.0,2.34,44.15 +2020-06-10 11:00:00,25.36,148.0,0.0,148.0,2.48,41.25 +2020-06-10 12:00:00,25.45,144.0,0.0,144.0,2.48,41.25 +2020-06-10 13:00:00,25.68,814.0,842.71,142.0,2.41,39.95 +2020-06-10 14:00:00,25.63,689.0,771.86,147.0,2.48,39.95 +2020-06-10 15:00:00,25.86,90.0,0.0,90.0,2.69,39.95 +2020-06-10 16:00:00,25.41,390.0,622.08,117.0,2.76,42.75 +2020-06-10 17:00:00,24.52,228.0,474.57,91.0,2.07,50.8 +2020-06-10 18:00:00,23.24,15.0,0.0,15.0,1.86,58.0 +2020-06-10 19:00:00,21.61,0.0,0.0,0.0,1.93,59.7 +2020-06-10 20:00:00,19.73,0.0,-0.0,0.0,2.14,68.2 +2020-06-10 21:00:00,18.41,0.0,-0.0,0.0,2.21,72.9 +2020-06-10 22:00:00,17.46,0.0,-0.0,0.0,2.21,72.8 +2020-06-10 23:00:00,16.38,0.0,-0.0,0.0,2.21,77.95 +2020-06-11 00:00:00,15.94,0.0,-0.0,0.0,2.21,75.25 +2020-06-11 01:00:00,15.36,0.0,-0.0,0.0,2.34,77.8 +2020-06-11 02:00:00,14.83,0.0,-0.0,0.0,2.34,80.55 +2020-06-11 03:00:00,14.23,1.0,0.0,1.0,2.21,86.4 +2020-06-11 04:00:00,14.33,11.0,0.0,11.0,2.0,89.5 +2020-06-11 05:00:00,15.84,44.0,0.0,44.0,2.0,83.55 +2020-06-11 06:00:00,16.37,74.0,0.0,74.0,2.9,75.25 +2020-06-11 07:00:00,16.67,94.0,0.0,94.0,2.97,75.3 +2020-06-11 08:00:00,16.64,150.0,0.0,150.0,3.17,72.7 +2020-06-11 09:00:00,17.32,220.0,0.0,220.0,3.31,67.85 +2020-06-11 10:00:00,17.65,185.0,0.0,185.0,3.59,65.45 +2020-06-11 11:00:00,17.64,442.0,43.01,404.0,3.66,63.2 +2020-06-11 12:00:00,18.47,314.0,4.65,310.0,4.07,59.05 +2020-06-11 13:00:00,18.47,245.0,0.0,245.0,3.93,56.95 +2020-06-11 14:00:00,18.82,248.0,4.27,245.0,4.28,53.0 +2020-06-11 15:00:00,18.88,246.0,22.37,233.0,4.34,49.3 +2020-06-11 16:00:00,18.45,176.0,18.18,168.0,4.48,49.15 +2020-06-11 17:00:00,17.45,84.0,3.45,83.0,4.14,49.05 +2020-06-11 18:00:00,16.66,81.0,205.67,52.0,3.31,50.75 +2020-06-11 19:00:00,15.68,0.0,0.0,0.0,2.55,56.35 +2020-06-11 20:00:00,14.97,0.0,-0.0,0.0,2.48,58.35 +2020-06-11 21:00:00,14.18,0.0,-0.0,0.0,2.48,62.55 +2020-06-11 22:00:00,13.68,0.0,-0.0,0.0,2.55,64.75 +2020-06-11 23:00:00,13.01,0.0,-0.0,0.0,2.41,67.05 +2020-06-12 00:00:00,12.57,0.0,-0.0,0.0,2.34,72.05 +2020-06-12 01:00:00,12.13,0.0,-0.0,0.0,2.34,74.6 +2020-06-12 02:00:00,12.1,0.0,-0.0,0.0,2.41,74.6 +2020-06-12 03:00:00,11.64,1.0,0.0,1.0,2.34,77.3 +2020-06-12 04:00:00,11.99,11.0,0.0,11.0,2.21,77.35 +2020-06-12 05:00:00,12.3,43.0,0.0,43.0,2.69,74.6 +2020-06-12 06:00:00,12.88,140.0,2.12,139.0,2.9,72.05 +2020-06-12 07:00:00,13.35,371.0,133.13,290.0,2.48,74.75 +2020-06-12 08:00:00,14.28,469.0,158.48,354.0,2.76,67.25 +2020-06-12 09:00:00,15.15,610.0,277.55,384.0,2.9,58.35 +2020-06-12 10:00:00,15.87,138.0,0.0,138.0,3.03,54.35 +2020-06-12 11:00:00,16.53,127.0,0.0,127.0,3.03,50.75 +2020-06-12 12:00:00,16.8,149.0,0.0,149.0,3.17,47.15 +2020-06-12 13:00:00,16.83,90.0,0.0,90.0,2.62,47.15 +2020-06-12 14:00:00,16.74,121.0,0.0,121.0,1.93,48.9 +2020-06-12 15:00:00,16.61,110.0,0.0,110.0,1.38,48.9 +2020-06-12 16:00:00,16.27,58.0,0.0,58.0,1.03,52.5 +2020-06-12 17:00:00,15.83,68.0,0.0,68.0,0.97,56.35 +2020-06-12 18:00:00,15.24,41.0,7.03,40.0,1.03,62.75 +2020-06-12 19:00:00,14.27,0.0,0.0,0.0,0.28,72.3 +2020-06-12 20:00:00,13.45,0.0,-0.0,0.0,0.48,80.4 +2020-06-12 21:00:00,13.15,0.0,-0.0,0.0,0.34,80.35 +2020-06-12 22:00:00,12.82,0.0,-0.0,0.0,0.62,86.25 +2020-06-12 23:00:00,12.16,0.0,-0.0,0.0,0.76,89.35 +2020-06-13 00:00:00,11.73,0.0,-0.0,0.0,1.17,92.55 +2020-06-13 01:00:00,11.34,0.0,-0.0,0.0,1.17,95.9 +2020-06-13 02:00:00,11.32,0.0,-0.0,0.0,1.1,95.9 +2020-06-13 03:00:00,11.18,6.0,0.0,6.0,0.83,95.9 +2020-06-13 04:00:00,11.24,16.0,0.0,16.0,0.83,95.9 +2020-06-13 05:00:00,11.3,117.0,12.43,113.0,1.1,99.35 +2020-06-13 06:00:00,11.44,191.0,19.12,182.0,1.86,95.9 +2020-06-13 07:00:00,11.72,194.0,1.64,193.0,4.07,83.1 +2020-06-13 08:00:00,12.04,259.0,4.13,256.0,4.07,86.2 +2020-06-13 09:00:00,11.94,315.0,7.37,309.0,4.28,89.35 +2020-06-13 10:00:00,12.84,391.0,21.87,372.0,4.07,89.4 +2020-06-13 11:00:00,13.49,293.0,1.13,292.0,4.21,80.4 +2020-06-13 12:00:00,14.42,184.0,0.0,184.0,4.21,77.65 +2020-06-13 13:00:00,14.78,355.0,21.26,338.0,4.0,75.0 +2020-06-13 14:00:00,15.12,582.0,421.27,285.0,3.66,69.9 +2020-06-13 15:00:00,15.29,166.0,0.0,166.0,3.52,67.45 +2020-06-13 16:00:00,15.15,375.0,535.73,138.0,3.31,67.45 +2020-06-13 17:00:00,14.56,46.0,0.0,46.0,3.52,64.95 +2020-06-13 18:00:00,13.85,47.0,20.9,44.0,3.03,67.15 +2020-06-13 19:00:00,12.6,0.0,0.0,0.0,2.9,69.45 +2020-06-13 20:00:00,11.74,0.0,-0.0,0.0,2.9,74.55 +2020-06-13 21:00:00,11.09,0.0,-0.0,0.0,2.55,77.2 +2020-06-13 22:00:00,10.4,0.0,-0.0,0.0,2.55,80.05 +2020-06-13 23:00:00,9.88,0.0,-0.0,0.0,2.69,85.95 +2020-06-14 00:00:00,9.4,0.0,-0.0,0.0,2.62,82.9 +2020-06-14 01:00:00,9.11,0.0,-0.0,0.0,2.62,85.95 +2020-06-14 02:00:00,8.71,0.0,-0.0,0.0,2.55,89.1 +2020-06-14 03:00:00,8.46,1.0,0.0,1.0,2.41,89.1 +2020-06-14 04:00:00,8.65,112.0,284.87,63.0,2.14,89.1 +2020-06-14 05:00:00,9.82,255.0,453.6,109.0,2.07,89.15 +2020-06-14 06:00:00,11.14,415.0,582.03,141.0,3.03,77.2 +2020-06-14 07:00:00,12.21,473.0,343.44,264.0,2.28,83.15 +2020-06-14 08:00:00,13.1,432.0,107.46,354.0,2.07,77.5 +2020-06-14 09:00:00,14.09,568.0,203.77,402.0,2.14,72.3 +2020-06-14 10:00:00,14.85,441.0,43.73,403.0,2.55,58.25 +2020-06-14 11:00:00,15.57,573.0,144.65,445.0,2.9,48.65 +2020-06-14 12:00:00,16.2,578.0,170.62,431.0,3.17,45.3 +2020-06-14 13:00:00,16.67,683.0,448.49,324.0,3.17,43.8 +2020-06-14 14:00:00,16.5,414.0,102.01,342.0,2.9,45.45 +2020-06-14 15:00:00,16.56,447.0,328.75,255.0,2.76,45.45 +2020-06-14 16:00:00,16.2,210.0,47.36,189.0,2.34,47.0 +2020-06-14 17:00:00,15.88,225.0,422.41,101.0,1.86,50.5 +2020-06-14 18:00:00,15.24,53.0,34.57,48.0,1.03,58.35 +2020-06-14 19:00:00,13.5,0.0,0.0,0.0,1.17,69.65 +2020-06-14 20:00:00,12.23,0.0,-0.0,0.0,1.31,71.95 +2020-06-14 21:00:00,10.93,0.0,-0.0,0.0,1.45,74.45 +2020-06-14 22:00:00,9.55,0.0,-0.0,0.0,1.72,82.9 +2020-06-14 23:00:00,8.7,0.0,-0.0,0.0,1.79,85.9 +2020-06-15 00:00:00,8.17,0.0,-0.0,0.0,1.72,89.05 +2020-06-15 01:00:00,7.67,0.0,-0.0,0.0,1.72,92.35 +2020-06-15 02:00:00,7.41,0.0,-0.0,0.0,1.66,92.35 +2020-06-15 03:00:00,7.08,6.0,0.0,6.0,1.66,95.8 +2020-06-15 04:00:00,8.02,104.0,226.71,65.0,1.38,92.4 +2020-06-15 05:00:00,10.34,60.0,0.0,60.0,0.97,92.5 +2020-06-15 06:00:00,13.41,202.0,27.62,189.0,1.17,72.2 +2020-06-15 07:00:00,14.41,383.0,152.83,290.0,1.38,69.75 +2020-06-15 08:00:00,15.14,472.0,163.95,353.0,1.79,65.05 +2020-06-15 09:00:00,15.84,622.0,300.72,377.0,2.21,60.65 +2020-06-15 10:00:00,15.96,634.0,246.24,420.0,2.07,62.95 +2020-06-15 11:00:00,16.37,428.0,33.89,398.0,1.59,60.75 +2020-06-15 12:00:00,15.98,742.0,464.03,342.0,1.17,65.25 +2020-06-15 13:00:00,16.44,247.0,0.0,247.0,0.76,67.65 +2020-06-15 14:00:00,16.42,62.0,0.0,62.0,0.62,70.1 +2020-06-15 15:00:00,16.46,524.0,594.98,176.0,0.55,70.2 +2020-06-15 16:00:00,16.14,220.0,60.76,193.0,0.55,72.65 +2020-06-15 17:00:00,14.73,163.0,125.61,126.0,1.31,77.75 +2020-06-15 18:00:00,14.6,42.0,6.86,41.0,1.52,80.55 +2020-06-15 19:00:00,13.07,0.0,0.0,0.0,1.79,83.25 +2020-06-15 20:00:00,12.35,0.0,-0.0,0.0,1.93,89.35 +2020-06-15 21:00:00,12.16,0.0,-0.0,0.0,1.79,89.35 +2020-06-15 22:00:00,11.68,0.0,-0.0,0.0,1.79,89.3 +2020-06-15 23:00:00,11.48,0.0,-0.0,0.0,1.66,89.3 +2020-06-16 00:00:00,11.17,0.0,-0.0,0.0,1.66,92.55 +2020-06-16 01:00:00,10.89,0.0,-0.0,0.0,1.59,92.55 +2020-06-16 02:00:00,10.57,0.0,-0.0,0.0,1.52,95.9 +2020-06-16 03:00:00,10.3,1.0,0.0,1.0,1.45,95.9 +2020-06-16 04:00:00,10.45,11.0,0.0,11.0,1.1,95.9 +2020-06-16 05:00:00,11.91,27.0,0.0,27.0,0.83,92.6 +2020-06-16 06:00:00,12.53,47.0,0.0,47.0,0.83,89.4 +2020-06-16 07:00:00,12.45,143.0,0.0,143.0,0.62,86.25 +2020-06-16 08:00:00,12.67,158.0,0.0,158.0,0.9,86.25 +2020-06-16 09:00:00,13.0,373.0,27.0,351.0,0.97,83.25 +2020-06-16 10:00:00,13.96,457.0,56.37,408.0,0.97,77.65 +2020-06-16 11:00:00,14.83,272.0,0.0,272.0,1.03,75.0 +2020-06-16 12:00:00,15.76,340.0,8.12,333.0,0.9,70.0 +2020-06-16 13:00:00,16.47,403.0,46.16,366.0,0.76,65.35 +2020-06-16 14:00:00,16.5,130.0,0.0,130.0,1.17,65.35 +2020-06-16 15:00:00,16.61,478.0,433.67,224.0,1.31,65.35 +2020-06-16 16:00:00,16.57,297.0,224.59,197.0,1.31,65.35 +2020-06-16 17:00:00,16.13,74.0,0.0,74.0,1.03,65.25 +2020-06-16 18:00:00,15.25,49.0,20.46,46.0,1.52,72.45 +2020-06-16 19:00:00,13.91,0.0,0.0,0.0,1.79,77.6 +2020-06-16 20:00:00,12.84,0.0,-0.0,0.0,1.17,86.25 +2020-06-16 21:00:00,11.48,0.0,-0.0,0.0,1.24,92.55 +2020-06-16 22:00:00,10.72,0.0,-0.0,0.0,1.31,92.5 +2020-06-16 23:00:00,9.91,0.0,-0.0,0.0,1.45,92.5 +2020-06-17 00:00:00,9.19,0.0,-0.0,0.0,1.59,95.85 +2020-06-17 01:00:00,8.99,0.0,-0.0,0.0,1.52,95.85 +2020-06-17 02:00:00,8.61,0.0,-0.0,0.0,1.52,99.4 +2020-06-17 03:00:00,8.15,1.0,0.0,1.0,1.59,99.4 +2020-06-17 04:00:00,8.46,95.0,157.18,68.0,1.45,95.85 +2020-06-17 05:00:00,10.42,239.0,360.82,123.0,1.03,95.9 +2020-06-17 06:00:00,12.99,421.0,616.57,131.0,1.1,80.35 +2020-06-17 07:00:00,13.61,559.0,628.09,177.0,0.76,80.4 +2020-06-17 08:00:00,15.1,719.0,777.23,155.0,1.03,65.05 +2020-06-17 09:00:00,16.23,793.0,711.91,213.0,1.45,54.45 +2020-06-17 10:00:00,16.89,808.0,598.18,288.0,1.38,50.75 +2020-06-17 11:00:00,17.8,821.0,589.36,299.0,0.97,49.05 +2020-06-17 12:00:00,18.13,860.0,763.8,201.0,0.83,44.05 +2020-06-17 13:00:00,18.09,785.0,730.54,199.0,1.45,44.05 +2020-06-17 14:00:00,18.13,605.0,469.01,273.0,2.0,45.7 +2020-06-17 15:00:00,18.07,557.0,716.2,137.0,2.21,45.7 +2020-06-17 16:00:00,18.05,394.0,600.81,126.0,2.21,45.7 +2020-06-17 17:00:00,17.7,244.0,529.77,87.0,1.93,49.05 +2020-06-17 18:00:00,16.93,66.0,74.58,55.0,1.59,56.6 +2020-06-17 19:00:00,16.16,0.0,0.0,0.0,1.52,58.6 +2020-06-17 20:00:00,14.88,0.0,-0.0,0.0,1.45,69.85 +2020-06-17 21:00:00,14.43,0.0,-0.0,0.0,1.38,64.95 +2020-06-17 22:00:00,13.93,0.0,-0.0,0.0,1.45,67.25 +2020-06-17 23:00:00,13.39,0.0,-0.0,0.0,1.59,72.1 +2020-06-18 00:00:00,13.01,0.0,-0.0,0.0,1.59,72.1 +2020-06-18 01:00:00,12.84,0.0,-0.0,0.0,1.59,74.7 +2020-06-18 02:00:00,12.77,0.0,-0.0,0.0,1.52,77.45 +2020-06-18 03:00:00,12.52,1.0,0.0,1.0,1.45,80.3 +2020-06-18 04:00:00,12.53,11.0,0.0,11.0,1.31,83.25 +2020-06-18 05:00:00,13.21,43.0,0.0,43.0,1.03,86.3 +2020-06-18 06:00:00,15.22,190.0,19.15,181.0,0.76,72.45 +2020-06-18 07:00:00,15.16,178.0,0.0,178.0,0.55,83.5 +2020-06-18 08:00:00,16.53,467.0,157.14,353.0,0.41,67.75 +2020-06-18 09:00:00,17.26,412.0,46.65,374.0,0.34,63.2 +2020-06-18 10:00:00,18.03,600.0,197.85,428.0,0.28,61.05 +2020-06-18 11:00:00,18.46,773.0,487.66,341.0,0.14,58.9 +2020-06-18 12:00:00,19.24,180.0,0.0,180.0,0.07,55.05 +2020-06-18 13:00:00,20.12,208.0,0.0,208.0,0.07,51.5 +2020-06-18 14:00:00,20.92,310.0,22.58,294.0,0.34,51.65 +2020-06-18 15:00:00,20.83,538.0,648.97,157.0,0.69,51.65 +2020-06-18 16:00:00,20.82,348.0,393.92,172.0,0.83,53.5 +2020-06-18 17:00:00,20.62,225.0,410.61,103.0,0.9,53.5 +2020-06-18 18:00:00,19.7,77.0,141.64,56.0,1.17,65.85 +2020-06-18 19:00:00,19.26,0.0,0.0,0.0,1.03,70.55 +2020-06-18 20:00:00,19.65,0.0,-0.0,0.0,0.55,59.25 +2020-06-18 21:00:00,18.38,0.0,-0.0,0.0,1.03,65.55 +2020-06-18 22:00:00,15.06,0.0,-0.0,0.0,1.93,80.6 +2020-06-18 23:00:00,13.7,0.0,-0.0,0.0,1.93,89.45 +2020-06-19 00:00:00,12.66,0.0,-0.0,0.0,1.86,92.6 +2020-06-19 01:00:00,11.84,0.0,-0.0,0.0,1.86,95.9 +2020-06-19 02:00:00,11.41,0.0,-0.0,0.0,2.0,95.9 +2020-06-19 03:00:00,11.41,3.0,0.0,3.0,2.14,95.9 +2020-06-19 04:00:00,12.43,14.0,0.0,14.0,2.07,92.6 +2020-06-19 05:00:00,14.99,173.0,109.1,138.0,1.86,89.55 +2020-06-19 06:00:00,17.66,360.0,374.74,184.0,1.93,83.7 +2020-06-19 07:00:00,19.55,518.0,506.92,210.0,1.79,73.15 +2020-06-19 08:00:00,20.9,636.0,533.64,249.0,2.07,68.4 +2020-06-19 09:00:00,22.41,540.0,174.35,398.0,2.9,52.0 +2020-06-19 10:00:00,22.92,424.0,37.96,391.0,3.31,45.25 +2020-06-19 11:00:00,23.2,315.0,3.39,312.0,3.03,43.75 +2020-06-19 12:00:00,23.43,497.0,90.35,419.0,3.03,43.75 +2020-06-19 13:00:00,23.49,315.0,9.96,307.0,2.76,47.0 +2020-06-19 14:00:00,23.86,205.0,0.0,205.0,3.31,45.5 +2020-06-19 15:00:00,23.82,570.0,772.53,116.0,3.66,45.5 +2020-06-19 16:00:00,23.65,403.0,657.06,109.0,3.03,47.15 +2020-06-19 17:00:00,23.08,230.0,446.61,97.0,2.14,54.1 +2020-06-19 18:00:00,21.92,38.0,6.71,37.0,1.79,61.85 +2020-06-19 19:00:00,20.3,0.0,0.0,0.0,2.0,61.5 +2020-06-19 20:00:00,18.56,0.0,-0.0,0.0,2.28,65.65 +2020-06-19 21:00:00,17.45,0.0,-0.0,0.0,2.41,67.85 +2020-06-19 22:00:00,16.48,0.0,-0.0,0.0,2.34,67.75 +2020-06-19 23:00:00,15.82,0.0,-0.0,0.0,2.21,72.55 +2020-06-20 00:00:00,15.49,0.0,-0.0,0.0,2.34,70.0 +2020-06-20 01:00:00,14.91,0.0,-0.0,0.0,2.28,75.0 +2020-06-20 02:00:00,14.39,0.0,-0.0,0.0,2.07,77.65 +2020-06-20 03:00:00,13.7,3.0,0.0,3.0,2.07,80.4 +2020-06-20 04:00:00,14.4,52.0,11.72,50.0,2.34,80.45 +2020-06-20 05:00:00,16.09,27.0,0.0,27.0,3.1,72.65 +2020-06-20 06:00:00,17.05,54.0,0.0,54.0,3.38,72.8 +2020-06-20 07:00:00,16.39,157.0,0.0,157.0,2.69,86.55 +2020-06-20 08:00:00,16.69,352.0,42.77,321.0,3.59,83.65 +2020-06-20 09:00:00,17.36,585.0,243.17,387.0,3.79,80.85 +2020-06-20 10:00:00,18.51,604.0,208.23,423.0,3.72,72.95 +2020-06-20 11:00:00,18.84,659.0,272.0,418.0,3.45,68.05 +2020-06-20 12:00:00,19.92,698.0,378.68,371.0,3.59,61.4 +2020-06-20 13:00:00,20.49,699.0,506.67,292.0,3.52,53.5 +2020-06-20 14:00:00,21.05,571.0,396.13,290.0,3.72,49.95 +2020-06-20 15:00:00,21.12,342.0,115.61,274.0,3.45,48.2 +2020-06-20 16:00:00,20.92,398.0,636.13,113.0,3.38,46.35 +2020-06-20 17:00:00,20.28,162.0,120.64,126.0,2.41,49.7 +2020-06-20 18:00:00,19.4,80.0,160.51,56.0,1.72,57.1 +2020-06-20 19:00:00,18.12,0.0,0.0,0.0,1.59,65.55 +2020-06-20 20:00:00,16.56,0.0,-0.0,0.0,1.66,70.2 +2020-06-20 21:00:00,15.39,0.0,-0.0,0.0,1.79,75.1 +2020-06-20 22:00:00,14.9,0.0,-0.0,0.0,1.59,77.75 +2020-06-20 23:00:00,14.45,0.0,-0.0,0.0,1.52,75.0 +2020-06-21 00:00:00,14.66,0.0,-0.0,0.0,1.31,77.75 +2020-06-21 01:00:00,13.9,0.0,-0.0,0.0,1.38,80.4 +2020-06-21 02:00:00,13.51,0.0,-0.0,0.0,1.38,80.4 +2020-06-21 03:00:00,13.35,1.0,0.0,1.0,1.24,86.3 +2020-06-21 04:00:00,13.21,10.0,0.0,10.0,1.17,89.4 +2020-06-21 05:00:00,14.31,39.0,0.0,39.0,0.83,89.5 +2020-06-21 06:00:00,15.83,83.0,0.0,83.0,0.76,77.85 +2020-06-21 07:00:00,17.2,205.0,3.3,203.0,0.9,72.8 +2020-06-21 08:00:00,16.93,144.0,0.0,144.0,1.38,72.7 +2020-06-21 09:00:00,16.63,152.0,0.0,152.0,1.72,75.3 +2020-06-21 10:00:00,16.26,188.0,0.0,188.0,1.66,77.95 +2020-06-21 11:00:00,16.51,159.0,0.0,159.0,1.38,75.3 +2020-06-21 12:00:00,16.48,416.0,37.05,384.0,1.52,75.3 +2020-06-21 13:00:00,17.08,265.0,1.24,264.0,1.86,72.8 +2020-06-21 14:00:00,17.58,407.0,101.45,335.0,2.34,72.8 +2020-06-21 15:00:00,18.94,303.0,66.25,264.0,3.52,68.05 +2020-06-21 16:00:00,18.76,205.0,44.59,185.0,2.97,65.65 +2020-06-21 17:00:00,18.27,239.0,505.17,88.0,2.28,67.95 +2020-06-21 18:00:00,17.78,97.0,306.65,51.0,1.59,70.3 +2020-06-21 19:00:00,17.02,0.0,0.0,0.0,1.17,75.4 +2020-06-21 20:00:00,17.26,0.0,-0.0,0.0,0.34,70.3 +2020-06-21 21:00:00,15.49,0.0,-0.0,0.0,1.17,77.85 +2020-06-21 22:00:00,13.47,0.0,-0.0,0.0,1.79,89.45 +2020-06-21 23:00:00,12.63,0.0,-0.0,0.0,2.0,92.6 +2020-06-22 00:00:00,12.21,0.0,-0.0,0.0,2.07,92.6 +2020-06-22 01:00:00,12.2,0.0,-0.0,0.0,2.28,92.6 +2020-06-22 02:00:00,12.43,0.0,-0.0,0.0,2.41,89.4 +2020-06-22 03:00:00,12.76,2.0,0.0,2.0,2.55,89.4 +2020-06-22 04:00:00,13.53,118.0,371.61,55.0,2.55,86.35 +2020-06-22 05:00:00,15.53,264.0,548.35,89.0,2.48,80.65 +2020-06-22 06:00:00,18.12,421.0,655.89,114.0,2.9,75.45 +2020-06-22 07:00:00,20.43,575.0,739.06,127.0,3.38,70.7 +2020-06-22 08:00:00,22.17,711.0,794.15,136.0,3.72,64.1 +2020-06-22 09:00:00,23.8,821.0,841.89,136.0,3.93,60.15 +2020-06-22 10:00:00,25.4,873.0,827.51,154.0,4.0,56.5 +2020-06-22 11:00:00,26.69,878.0,798.01,171.0,3.86,53.1 +2020-06-22 12:00:00,27.77,665.0,321.86,387.0,3.66,49.8 +2020-06-22 13:00:00,28.44,746.0,650.71,223.0,3.59,49.8 +2020-06-22 14:00:00,28.46,545.0,338.01,305.0,3.52,49.8 +2020-06-22 15:00:00,28.3,286.0,49.23,257.0,3.03,49.8 +2020-06-22 16:00:00,28.11,386.0,585.83,123.0,2.34,53.35 +2020-06-22 17:00:00,27.47,230.0,451.01,95.0,1.72,58.9 +2020-06-22 18:00:00,26.32,47.0,13.3,45.0,1.86,62.8 +2020-06-22 19:00:00,24.63,0.0,0.0,0.0,2.21,62.45 +2020-06-22 20:00:00,23.04,0.0,-0.0,0.0,2.14,64.3 +2020-06-22 21:00:00,21.76,0.0,-0.0,0.0,2.21,71.0 +2020-06-22 22:00:00,21.02,0.0,-0.0,0.0,2.41,70.9 +2020-06-22 23:00:00,21.72,0.0,-0.0,0.0,3.52,64.0 +2020-06-23 00:00:00,21.51,0.0,-0.0,0.0,2.76,64.0 +2020-06-23 01:00:00,20.67,0.0,-0.0,0.0,2.41,68.4 +2020-06-23 02:00:00,20.16,0.0,-0.0,0.0,2.55,70.7 +2020-06-23 03:00:00,19.85,6.0,0.0,6.0,2.62,73.15 +2020-06-23 04:00:00,19.85,10.0,0.0,10.0,2.48,75.7 +2020-06-23 05:00:00,20.4,194.0,185.29,135.0,3.45,78.45 +2020-06-23 06:00:00,19.44,366.0,408.66,175.0,2.62,89.8 +2020-06-23 07:00:00,19.77,209.0,4.95,206.0,3.31,86.85 +2020-06-23 08:00:00,20.39,481.0,186.59,346.0,3.45,83.95 +2020-06-23 09:00:00,22.23,660.0,394.71,339.0,4.21,71.05 +2020-06-23 10:00:00,23.28,786.0,575.63,286.0,4.76,60.05 +2020-06-23 11:00:00,24.09,887.0,816.17,164.0,5.1,50.7 +2020-06-23 12:00:00,24.35,885.0,864.82,138.0,5.03,44.05 +2020-06-23 13:00:00,24.65,817.0,854.61,130.0,4.97,36.95 +2020-06-23 14:00:00,24.6,605.0,491.36,256.0,4.83,33.15 +2020-06-23 15:00:00,24.13,561.0,744.93,122.0,4.48,31.85 +2020-06-23 16:00:00,23.47,402.0,647.72,111.0,3.93,34.0 +2020-06-23 17:00:00,22.44,247.0,540.61,85.0,3.31,39.05 +2020-06-23 18:00:00,21.11,99.0,318.52,51.0,2.69,44.85 +2020-06-23 19:00:00,19.63,0.0,0.0,0.0,2.41,49.55 +2020-06-23 20:00:00,17.98,0.0,-0.0,0.0,2.07,56.85 +2020-06-23 21:00:00,16.31,0.0,-0.0,0.0,2.0,62.95 +2020-06-23 22:00:00,14.83,0.0,-0.0,0.0,1.93,69.85 +2020-06-23 23:00:00,13.76,0.0,-0.0,0.0,1.79,74.85 +2020-06-24 00:00:00,13.06,0.0,-0.0,0.0,1.72,74.75 +2020-06-24 01:00:00,12.88,0.0,-0.0,0.0,1.52,77.45 +2020-06-24 02:00:00,12.37,0.0,-0.0,0.0,1.45,77.35 +2020-06-24 03:00:00,11.73,1.0,0.0,1.0,1.45,80.15 +2020-06-24 04:00:00,11.75,110.0,303.58,59.0,1.31,83.1 +2020-06-24 05:00:00,13.66,256.0,503.76,96.0,1.17,74.85 +2020-06-24 06:00:00,14.83,412.0,615.05,125.0,1.72,69.85 +2020-06-24 07:00:00,15.82,576.0,734.03,132.0,1.79,72.55 +2020-06-24 08:00:00,16.87,708.0,777.37,146.0,1.66,67.75 +2020-06-24 09:00:00,17.91,817.0,821.85,149.0,1.72,65.45 +2020-06-24 10:00:00,19.09,874.0,818.84,163.0,1.72,59.15 +2020-06-24 11:00:00,20.07,847.0,686.49,239.0,1.38,55.3 +2020-06-24 12:00:00,20.68,854.0,773.4,186.0,1.1,53.5 +2020-06-24 13:00:00,21.56,815.0,840.85,139.0,1.17,50.05 +2020-06-24 14:00:00,21.8,527.0,288.56,322.0,1.59,50.05 +2020-06-24 15:00:00,21.85,385.0,184.89,276.0,2.14,50.05 +2020-06-24 16:00:00,21.63,391.0,591.74,125.0,2.28,50.05 +2020-06-24 17:00:00,21.07,220.0,373.45,108.0,2.0,49.95 +2020-06-24 18:00:00,20.23,81.0,152.42,58.0,1.79,55.3 +2020-06-24 19:00:00,18.68,0.0,0.0,0.0,1.86,56.95 +2020-06-24 20:00:00,17.17,0.0,-0.0,0.0,1.59,60.95 +2020-06-24 21:00:00,15.84,0.0,-0.0,0.0,1.59,70.0 +2020-06-24 22:00:00,15.45,0.0,-0.0,0.0,1.79,65.15 +2020-06-24 23:00:00,15.04,0.0,-0.0,0.0,1.93,65.05 +2020-06-25 00:00:00,14.7,0.0,-0.0,0.0,2.14,62.65 +2020-06-25 01:00:00,14.77,0.0,-0.0,0.0,2.21,60.4 +2020-06-25 02:00:00,14.77,0.0,-0.0,0.0,2.21,58.25 +2020-06-25 03:00:00,14.36,3.0,0.0,3.0,2.14,62.55 +2020-06-25 04:00:00,14.46,27.0,0.0,27.0,2.0,67.35 +2020-06-25 05:00:00,15.88,248.0,479.9,96.0,1.79,67.55 +2020-06-25 06:00:00,18.46,388.0,530.28,141.0,2.07,65.55 +2020-06-25 07:00:00,20.66,410.0,226.77,273.0,2.34,63.8 +2020-06-25 08:00:00,22.7,426.0,116.29,342.0,2.34,59.95 +2020-06-25 09:00:00,24.47,723.0,572.45,258.0,2.48,58.2 +2020-06-25 10:00:00,25.66,675.0,342.19,378.0,2.83,56.6 +2020-06-25 11:00:00,26.41,407.0,28.23,382.0,2.97,56.7 +2020-06-25 12:00:00,26.9,108.0,0.0,108.0,2.76,56.85 +2020-06-25 13:00:00,26.36,475.0,113.19,384.0,2.34,60.7 +2020-06-25 14:00:00,24.59,386.0,80.22,329.0,1.86,69.15 +2020-06-25 15:00:00,22.23,49.0,0.0,49.0,1.66,87.05 +2020-06-25 16:00:00,21.88,63.0,0.0,63.0,2.14,89.95 +2020-06-25 17:00:00,21.74,21.0,0.0,21.0,1.79,89.95 +2020-06-25 18:00:00,21.63,28.0,0.0,28.0,2.34,89.95 +2020-06-25 19:00:00,19.96,0.0,0.0,0.0,2.9,96.1 +2020-06-25 20:00:00,19.53,0.0,-0.0,0.0,3.1,92.9 +2020-06-25 21:00:00,19.2,0.0,-0.0,0.0,2.97,92.9 +2020-06-25 22:00:00,18.98,0.0,-0.0,0.0,2.9,92.9 +2020-06-25 23:00:00,18.78,0.0,-0.0,0.0,2.97,96.1 +2020-06-26 00:00:00,18.27,0.0,-0.0,0.0,3.17,96.05 +2020-06-26 01:00:00,18.0,0.0,-0.0,0.0,3.66,92.85 +2020-06-26 02:00:00,17.6,0.0,-0.0,0.0,3.52,92.85 +2020-06-26 03:00:00,17.05,1.0,0.0,1.0,3.45,89.7 +2020-06-26 04:00:00,16.48,36.0,0.0,36.0,3.24,89.65 +2020-06-26 05:00:00,16.52,84.0,0.0,84.0,2.69,86.6 +2020-06-26 06:00:00,17.02,151.0,4.3,149.0,3.03,83.7 +2020-06-26 07:00:00,18.23,532.0,563.55,192.0,3.31,75.45 +2020-06-26 08:00:00,19.18,687.0,702.58,180.0,3.24,65.75 +2020-06-26 09:00:00,20.2,684.0,441.03,326.0,3.24,59.35 +2020-06-26 10:00:00,21.01,671.0,312.39,400.0,3.24,53.6 +2020-06-26 11:00:00,21.67,715.0,367.17,390.0,3.24,50.05 +2020-06-26 12:00:00,22.22,799.0,608.02,274.0,2.97,48.45 +2020-06-26 13:00:00,22.56,779.0,730.18,192.0,2.69,45.25 +2020-06-26 14:00:00,22.82,562.0,364.5,303.0,2.41,45.25 +2020-06-26 15:00:00,22.87,540.0,647.7,158.0,2.07,45.25 +2020-06-26 16:00:00,22.77,346.0,377.96,176.0,1.79,45.25 +2020-06-26 17:00:00,22.37,168.0,129.94,129.0,1.66,46.75 +2020-06-26 18:00:00,21.37,26.0,0.0,26.0,1.31,57.55 +2020-06-26 19:00:00,19.79,0.0,0.0,0.0,1.52,63.6 +2020-06-26 20:00:00,20.26,0.0,-0.0,0.0,0.76,53.35 +2020-06-26 21:00:00,18.82,0.0,-0.0,0.0,1.1,61.2 +2020-06-26 22:00:00,16.97,0.0,-0.0,0.0,1.45,65.45 +2020-06-26 23:00:00,15.44,0.0,-0.0,0.0,1.66,72.55 +2020-06-27 00:00:00,15.02,0.0,-0.0,0.0,1.52,77.8 +2020-06-27 01:00:00,14.77,0.0,-0.0,0.0,1.45,77.75 +2020-06-27 02:00:00,14.09,0.0,-0.0,0.0,1.52,80.45 +2020-06-27 03:00:00,13.61,2.0,0.0,2.0,1.52,83.35 +2020-06-27 04:00:00,13.74,119.0,405.97,52.0,1.38,89.45 +2020-06-27 05:00:00,15.65,186.0,158.87,136.0,1.03,86.5 +2020-06-27 06:00:00,19.48,350.0,347.04,189.0,1.17,65.85 +2020-06-27 07:00:00,20.3,223.0,8.3,218.0,2.48,65.95 +2020-06-27 08:00:00,20.88,143.0,0.0,143.0,3.66,59.5 +2020-06-27 09:00:00,20.45,229.0,0.0,229.0,4.21,59.35 +2020-06-27 10:00:00,19.54,126.0,0.0,126.0,5.24,61.4 +2020-06-27 11:00:00,19.2,760.0,457.72,355.0,4.62,61.3 +2020-06-27 12:00:00,19.81,337.0,8.11,330.0,4.48,53.25 +2020-06-27 13:00:00,21.16,540.0,181.64,394.0,6.48,46.5 +2020-06-27 14:00:00,20.9,518.0,263.18,331.0,6.69,48.05 +2020-06-27 15:00:00,20.77,514.0,540.88,195.0,6.28,49.8 +2020-06-27 16:00:00,20.48,306.0,235.66,200.0,5.66,51.5 +2020-06-27 17:00:00,20.03,214.0,326.55,116.0,4.34,51.5 +2020-06-27 18:00:00,19.47,98.0,304.81,52.0,3.79,53.15 +2020-06-27 19:00:00,18.1,0.0,0.0,0.0,3.59,56.85 +2020-06-27 20:00:00,17.11,0.0,-0.0,0.0,2.9,56.75 +2020-06-27 21:00:00,16.26,0.0,-0.0,0.0,2.28,60.75 +2020-06-27 22:00:00,15.23,0.0,-0.0,0.0,2.0,67.45 +2020-06-27 23:00:00,13.95,0.0,-0.0,0.0,1.86,72.3 +2020-06-28 00:00:00,13.09,0.0,-0.0,0.0,1.86,77.5 +2020-06-28 01:00:00,12.77,0.0,-0.0,0.0,1.86,80.3 +2020-06-28 02:00:00,12.76,0.0,-0.0,0.0,2.0,80.3 +2020-06-28 03:00:00,13.21,0.0,0.0,0.0,2.28,77.5 +2020-06-28 04:00:00,13.86,79.0,97.63,63.0,2.34,80.4 +2020-06-28 05:00:00,15.18,83.0,0.0,83.0,2.34,77.8 +2020-06-28 06:00:00,16.85,236.0,69.13,204.0,2.83,72.7 +2020-06-28 07:00:00,17.48,224.0,8.31,219.0,2.97,75.4 +2020-06-28 08:00:00,18.23,361.0,50.0,325.0,3.59,72.9 +2020-06-28 09:00:00,19.07,266.0,1.23,265.0,4.07,68.1 +2020-06-28 10:00:00,20.58,212.0,0.0,212.0,5.93,55.45 +2020-06-28 11:00:00,20.71,324.0,4.52,320.0,6.0,57.45 +2020-06-28 12:00:00,20.45,577.0,177.29,424.0,5.86,61.5 +2020-06-28 13:00:00,20.53,444.0,77.15,382.0,4.9,59.5 +2020-06-28 14:00:00,20.73,476.0,192.84,339.0,4.41,61.6 +2020-06-28 15:00:00,20.62,470.0,403.59,232.0,4.41,66.05 +2020-06-28 16:00:00,20.73,351.0,406.92,168.0,4.21,68.4 +2020-06-28 17:00:00,20.13,204.0,283.35,119.0,2.97,70.7 +2020-06-28 18:00:00,19.72,101.0,338.37,50.0,2.69,73.15 +2020-06-28 19:00:00,19.44,0.0,0.0,0.0,2.55,70.55 +2020-06-28 20:00:00,18.63,0.0,-0.0,0.0,2.41,72.95 +2020-06-28 21:00:00,17.89,0.0,-0.0,0.0,2.28,75.4 +2020-06-28 22:00:00,17.11,0.0,-0.0,0.0,2.28,75.4 +2020-06-28 23:00:00,16.5,0.0,-0.0,0.0,2.21,78.0 +2020-06-29 00:00:00,15.87,0.0,-0.0,0.0,2.28,83.55 +2020-06-29 01:00:00,15.55,0.0,-0.0,0.0,2.34,83.55 +2020-06-29 02:00:00,15.42,0.0,-0.0,0.0,2.28,89.55 +2020-06-29 03:00:00,15.15,0.0,0.0,0.0,2.21,89.55 +2020-06-29 04:00:00,14.9,67.0,49.19,59.0,2.14,92.7 +2020-06-29 05:00:00,16.33,244.0,451.33,103.0,1.93,89.65 +2020-06-29 06:00:00,17.84,432.0,714.66,102.0,2.48,83.7 +2020-06-29 07:00:00,20.15,566.0,714.41,137.0,2.83,70.7 +2020-06-29 08:00:00,20.94,591.0,419.93,289.0,3.24,63.8 +2020-06-29 09:00:00,21.96,672.0,419.91,332.0,3.38,53.75 +2020-06-29 10:00:00,22.9,727.0,429.61,355.0,3.93,48.55 +2020-06-29 11:00:00,23.43,647.0,252.27,424.0,3.86,47.0 +2020-06-29 12:00:00,23.44,610.0,221.41,419.0,3.38,50.45 +2020-06-29 13:00:00,23.99,545.0,191.69,391.0,3.31,50.55 +2020-06-29 14:00:00,24.65,564.0,371.7,300.0,3.45,45.75 +2020-06-29 15:00:00,24.68,541.0,658.12,153.0,3.1,44.15 +2020-06-29 16:00:00,24.76,399.0,627.29,117.0,2.97,42.65 +2020-06-29 17:00:00,24.24,247.0,537.07,86.0,2.14,47.25 +2020-06-29 18:00:00,22.91,100.0,325.73,51.0,1.45,59.95 +2020-06-29 19:00:00,21.21,0.0,0.0,0.0,1.24,66.15 +2020-06-29 20:00:00,22.23,0.0,-0.0,0.0,0.69,50.2 +2020-06-29 21:00:00,21.28,0.0,-0.0,0.0,1.03,53.6 +2020-06-29 22:00:00,17.94,0.0,-0.0,0.0,1.52,70.3 +2020-06-29 23:00:00,15.9,0.0,-0.0,0.0,1.79,77.85 +2020-06-30 00:00:00,14.71,0.0,-0.0,0.0,1.79,83.45 +2020-06-30 01:00:00,13.72,0.0,-0.0,0.0,1.79,86.35 +2020-06-30 02:00:00,13.12,0.0,-0.0,0.0,1.79,89.4 +2020-06-30 03:00:00,13.01,0.0,0.0,0.0,1.72,89.4 +2020-06-30 04:00:00,13.96,13.0,0.0,13.0,1.45,89.5 +2020-06-30 05:00:00,16.47,73.0,0.0,73.0,1.45,80.8 +2020-06-30 06:00:00,18.6,254.0,99.87,208.0,2.55,70.45 +2020-06-30 07:00:00,18.9,523.0,548.85,194.0,3.03,78.25 +2020-06-30 08:00:00,19.79,638.0,555.52,239.0,3.38,73.15 +2020-06-30 09:00:00,20.43,557.0,203.98,392.0,3.93,68.3 +2020-06-30 10:00:00,21.22,274.0,1.16,273.0,4.0,63.9 +2020-06-30 11:00:00,21.86,516.0,95.08,432.0,4.34,59.7 +2020-06-30 12:00:00,22.45,650.0,286.46,403.0,4.48,55.8 +2020-06-30 13:00:00,22.85,669.0,427.11,326.0,4.55,50.3 +2020-06-30 14:00:00,22.84,568.0,380.29,298.0,4.48,48.55 +2020-06-30 15:00:00,22.7,541.0,656.68,154.0,4.28,46.85 +2020-06-30 16:00:00,20.06,407.0,663.24,109.0,2.85,45.78 +2020-06-30 17:00:00,19.81,171.0,143.58,128.0,2.67,49.67 +2020-06-30 18:00:00,19.57,78.0,133.3,58.0,2.49,53.56 +2020-06-30 19:00:00,19.32,0.0,0.0,0.0,2.32,57.45 +2020-06-30 20:00:00,19.07,0.0,-0.0,0.0,2.14,61.34 +2020-06-30 21:00:00,18.83,0.0,-0.0,0.0,1.96,65.23 +2020-06-30 22:00:00,18.58,0.0,-0.0,0.0,1.78,69.12 +2020-06-30 23:00:00,18.34,0.0,-0.0,0.0,1.61,73.01 +2020-07-01 00:00:00,18.09,0.0,-0.0,0.0,1.43,76.89 +2020-07-01 01:00:00,17.84,0.0,-0.0,0.0,1.25,80.78 +2020-07-01 02:00:00,17.6,0.0,-0.0,0.0,1.07,84.67 +2020-07-01 03:00:00,17.35,0.0,0.0,0.0,0.9,88.56 +2020-07-01 04:00:00,17.11,9.0,0.0,9.0,0.72,92.45 +2020-07-01 05:00:00,16.86,113.0,16.07,108.0,0.54,96.34 +2020-07-01 06:00:00,16.62,283.0,171.52,204.0,0.36,100.0 +2020-07-01 07:00:00,16.37,402.0,216.87,272.0,0.19,100.0 +2020-07-01 08:00:00,22.86,627.0,549.95,232.0,1.31,78.75 +2020-07-01 09:00:00,23.87,779.0,751.63,171.0,1.79,73.75 +2020-07-01 10:00:00,24.5,672.0,338.63,379.0,2.28,73.85 +2020-07-01 11:00:00,24.76,648.0,269.4,410.0,2.62,73.95 +2020-07-01 12:00:00,24.79,555.0,158.89,418.0,2.69,73.95 +2020-07-01 13:00:00,24.86,502.0,144.44,386.0,2.21,73.95 +2020-07-01 14:00:00,23.02,76.0,0.0,76.0,1.59,78.75 +2020-07-01 15:00:00,22.0,48.0,0.0,48.0,0.14,84.1 +2020-07-01 16:00:00,22.14,84.0,0.0,84.0,0.97,81.4 +2020-07-01 17:00:00,22.04,72.0,0.0,72.0,0.76,87.0 +2020-07-01 18:00:00,21.74,33.0,0.0,33.0,1.1,89.95 +2020-07-01 19:00:00,21.25,0.0,0.0,0.0,1.45,89.95 +2020-07-01 20:00:00,20.15,0.0,-0.0,0.0,1.03,92.95 +2020-07-01 21:00:00,19.18,0.0,-0.0,0.0,0.97,96.1 +2020-07-01 22:00:00,18.22,0.0,-0.0,0.0,1.24,96.1 +2020-07-01 23:00:00,18.27,0.0,-0.0,0.0,1.03,96.1 +2020-07-02 00:00:00,17.74,0.0,-0.0,0.0,1.03,96.05 +2020-07-02 01:00:00,16.72,0.0,-0.0,0.0,1.17,99.4 +2020-07-02 02:00:00,16.94,0.0,-0.0,0.0,0.97,99.4 +2020-07-02 03:00:00,17.05,0.0,0.0,0.0,0.9,99.4 +2020-07-02 04:00:00,17.24,9.0,0.0,9.0,0.69,99.4 +2020-07-02 05:00:00,18.22,24.0,0.0,24.0,0.28,96.1 +2020-07-02 06:00:00,19.02,79.0,0.0,79.0,0.41,96.1 +2020-07-02 07:00:00,21.01,172.0,0.0,172.0,0.41,86.9 +2020-07-02 08:00:00,22.11,181.0,0.0,181.0,0.9,81.4 +2020-07-02 09:00:00,23.28,260.0,1.24,259.0,1.45,76.2 +2020-07-02 10:00:00,23.96,451.0,60.15,399.0,1.79,76.3 +2020-07-02 11:00:00,24.73,868.0,789.46,171.0,2.34,69.15 +2020-07-02 12:00:00,22.74,708.0,418.9,347.0,2.28,78.75 +2020-07-02 13:00:00,22.51,409.0,54.81,365.0,1.45,81.4 +2020-07-02 14:00:00,22.62,219.0,1.41,218.0,1.17,78.75 +2020-07-02 15:00:00,21.74,48.0,0.0,48.0,2.28,84.1 +2020-07-02 16:00:00,21.39,39.0,0.0,39.0,1.59,86.95 +2020-07-02 17:00:00,21.45,21.0,0.0,21.0,1.66,86.95 +2020-07-02 18:00:00,21.05,26.0,0.0,26.0,1.24,86.95 +2020-07-02 19:00:00,20.63,0.0,0.0,0.0,1.24,89.9 +2020-07-02 20:00:00,19.6,0.0,-0.0,0.0,1.45,92.9 +2020-07-02 21:00:00,18.95,0.0,-0.0,0.0,1.31,99.4 +2020-07-02 22:00:00,18.11,0.0,-0.0,0.0,1.24,96.1 +2020-07-02 23:00:00,17.57,0.0,-0.0,0.0,1.1,99.4 +2020-07-03 00:00:00,17.93,0.0,-0.0,0.0,0.69,99.4 +2020-07-03 01:00:00,17.91,0.0,-0.0,0.0,0.76,99.4 +2020-07-03 02:00:00,17.61,0.0,-0.0,0.0,0.76,99.4 +2020-07-03 03:00:00,17.56,0.0,0.0,0.0,0.55,99.4 +2020-07-03 04:00:00,17.72,17.0,0.0,17.0,0.62,99.4 +2020-07-03 05:00:00,17.8,80.0,0.0,80.0,0.48,99.4 +2020-07-03 06:00:00,17.91,194.0,28.38,181.0,0.62,99.4 +2020-07-03 07:00:00,19.33,362.0,139.0,279.0,0.69,92.9 +2020-07-03 08:00:00,21.28,683.0,730.24,160.0,1.03,84.05 +2020-07-03 09:00:00,22.85,809.0,821.39,146.0,1.31,76.15 +2020-07-03 10:00:00,24.09,843.0,755.95,190.0,1.59,66.75 +2020-07-03 11:00:00,24.6,677.0,312.83,401.0,1.93,64.6 +2020-07-03 12:00:00,25.33,654.0,303.04,393.0,1.93,62.55 +2020-07-03 13:00:00,25.53,798.0,808.95,149.0,2.07,62.55 +2020-07-03 14:00:00,25.66,667.0,699.3,171.0,2.28,60.6 +2020-07-03 15:00:00,25.54,548.0,701.62,135.0,2.14,62.55 +2020-07-03 16:00:00,24.84,392.0,608.62,119.0,1.66,64.6 +2020-07-03 17:00:00,24.5,233.0,472.22,92.0,1.24,66.75 +2020-07-03 18:00:00,23.64,91.0,255.11,53.0,0.76,76.3 +2020-07-03 19:00:00,23.84,0.0,0.0,0.0,0.62,68.95 +2020-07-03 20:00:00,22.51,0.0,-0.0,0.0,0.9,76.1 +2020-07-03 21:00:00,20.68,0.0,-0.0,0.0,1.24,81.2 +2020-07-03 22:00:00,18.83,0.0,-0.0,0.0,1.66,89.75 +2020-07-03 23:00:00,17.61,0.0,-0.0,0.0,1.72,89.75 +2020-07-04 00:00:00,16.66,0.0,-0.0,0.0,1.72,92.8 +2020-07-04 01:00:00,16.37,0.0,-0.0,0.0,1.59,96.05 +2020-07-04 02:00:00,15.62,0.0,-0.0,0.0,1.59,96.0 +2020-07-04 03:00:00,15.32,0.0,0.0,0.0,1.59,99.4 +2020-07-04 04:00:00,15.64,64.0,51.01,56.0,1.38,96.0 +2020-07-04 05:00:00,18.06,224.0,371.47,110.0,0.9,92.85 +2020-07-04 06:00:00,21.61,374.0,492.74,149.0,0.9,81.35 +2020-07-04 07:00:00,22.27,534.0,622.63,163.0,1.93,78.7 +2020-07-04 08:00:00,23.17,668.0,685.22,178.0,2.28,73.7 +2020-07-04 09:00:00,23.86,738.0,620.18,238.0,2.55,73.75 +2020-07-04 10:00:00,24.42,751.0,507.53,313.0,3.03,71.4 +2020-07-04 11:00:00,23.29,502.0,88.48,424.0,2.9,78.85 +2020-07-04 12:00:00,23.78,616.0,241.66,408.0,2.14,78.9 +2020-07-04 13:00:00,23.39,562.0,228.24,379.0,2.76,78.85 +2020-07-04 14:00:00,22.14,61.0,0.0,61.0,2.69,81.4 +2020-07-04 15:00:00,22.73,49.0,0.0,49.0,1.86,78.75 +2020-07-04 16:00:00,23.28,35.0,0.0,35.0,1.86,76.2 +2020-07-04 17:00:00,22.27,33.0,0.0,33.0,2.14,78.7 +2020-07-04 18:00:00,21.31,87.0,222.58,54.0,1.52,84.05 +2020-07-04 19:00:00,20.26,0.0,0.0,0.0,1.93,86.85 +2020-07-04 20:00:00,19.34,0.0,-0.0,0.0,2.0,92.9 +2020-07-04 21:00:00,18.94,0.0,-0.0,0.0,2.0,96.1 +2020-07-04 22:00:00,18.42,0.0,-0.0,0.0,2.07,92.85 +2020-07-04 23:00:00,18.33,0.0,-0.0,0.0,2.0,92.85 +2020-07-05 00:00:00,17.45,0.0,-0.0,0.0,2.0,92.85 +2020-07-05 01:00:00,17.27,0.0,-0.0,0.0,1.79,92.85 +2020-07-05 02:00:00,17.0,0.0,-0.0,0.0,1.93,92.85 +2020-07-05 03:00:00,16.5,0.0,0.0,0.0,1.93,92.8 +2020-07-05 04:00:00,16.78,27.0,0.0,27.0,1.86,96.05 +2020-07-05 05:00:00,17.61,84.0,0.0,84.0,1.93,92.85 +2020-07-05 06:00:00,18.16,132.0,0.0,132.0,2.62,89.75 +2020-07-05 07:00:00,19.3,248.0,20.18,236.0,1.79,92.9 +2020-07-05 08:00:00,20.14,602.0,467.83,268.0,2.21,86.85 +2020-07-05 09:00:00,21.16,802.0,806.0,153.0,2.76,75.95 +2020-07-05 10:00:00,21.61,600.0,211.1,418.0,3.03,66.25 +2020-07-05 11:00:00,22.05,744.0,443.89,353.0,2.97,61.95 +2020-07-05 12:00:00,22.69,876.0,847.61,147.0,2.9,57.9 +2020-07-05 13:00:00,23.13,793.0,792.56,158.0,3.24,54.1 +2020-07-05 14:00:00,23.3,678.0,732.76,159.0,3.45,52.25 +2020-07-05 15:00:00,23.35,530.0,627.96,161.0,3.38,52.25 +2020-07-05 16:00:00,22.86,398.0,632.47,115.0,3.31,54.0 +2020-07-05 17:00:00,22.31,243.0,528.05,86.0,2.55,55.8 +2020-07-05 18:00:00,21.31,95.0,298.37,51.0,1.86,61.7 +2020-07-05 19:00:00,19.95,0.0,0.0,0.0,1.59,70.65 +2020-07-05 20:00:00,18.96,0.0,-0.0,0.0,1.24,72.95 +2020-07-05 21:00:00,19.71,0.0,-0.0,0.0,0.9,61.4 +2020-07-05 22:00:00,19.74,0.0,-0.0,0.0,0.48,61.4 +2020-07-05 23:00:00,17.2,0.0,-0.0,0.0,1.38,70.3 +2020-07-06 00:00:00,15.25,0.0,-0.0,0.0,1.59,80.6 +2020-07-06 01:00:00,13.97,0.0,-0.0,0.0,1.79,86.35 +2020-07-06 02:00:00,13.34,0.0,-0.0,0.0,1.66,89.4 +2020-07-06 03:00:00,13.18,0.0,0.0,0.0,1.59,89.4 +2020-07-06 04:00:00,13.55,81.0,143.35,59.0,1.45,89.45 +2020-07-06 05:00:00,15.97,247.0,520.26,89.0,0.9,89.6 +2020-07-06 06:00:00,19.59,404.0,637.03,115.0,0.69,73.15 +2020-07-06 07:00:00,21.62,566.0,740.08,127.0,0.55,64.0 +2020-07-06 08:00:00,22.83,629.0,551.41,236.0,0.97,54.0 +2020-07-06 09:00:00,23.45,792.0,782.2,163.0,1.38,50.45 +2020-07-06 10:00:00,23.87,608.0,224.1,415.0,1.79,48.8 +2020-07-06 11:00:00,23.96,596.0,187.49,431.0,2.0,48.8 +2020-07-06 12:00:00,23.8,597.0,210.62,416.0,2.07,48.8 +2020-07-06 13:00:00,23.54,213.0,0.0,213.0,2.28,50.45 +2020-07-06 14:00:00,23.15,140.0,0.0,140.0,2.41,52.25 +2020-07-06 15:00:00,22.69,96.0,0.0,96.0,2.69,55.9 +2020-07-06 16:00:00,21.98,106.0,0.0,106.0,2.62,64.0 +2020-07-06 17:00:00,21.39,66.0,0.0,66.0,2.0,68.5 +2020-07-06 18:00:00,20.78,37.0,6.82,36.0,1.38,73.3 +2020-07-06 19:00:00,19.5,0.0,0.0,0.0,1.17,86.8 +2020-07-06 20:00:00,18.27,0.0,-0.0,0.0,1.24,89.75 +2020-07-06 21:00:00,17.37,0.0,-0.0,0.0,1.52,92.85 +2020-07-06 22:00:00,16.72,0.0,-0.0,0.0,1.66,92.8 +2020-07-06 23:00:00,16.38,0.0,-0.0,0.0,1.59,92.8 +2020-07-07 00:00:00,15.88,0.0,-0.0,0.0,1.66,96.0 +2020-07-07 01:00:00,15.28,0.0,-0.0,0.0,1.66,96.0 +2020-07-07 02:00:00,14.61,0.0,-0.0,0.0,1.79,92.7 +2020-07-07 03:00:00,14.16,0.0,0.0,0.0,1.86,92.7 +2020-07-07 04:00:00,14.23,79.0,138.46,58.0,1.79,92.7 +2020-07-07 05:00:00,16.46,250.0,543.07,86.0,1.66,89.65 +2020-07-07 06:00:00,18.48,398.0,621.55,117.0,2.28,81.0 +2020-07-07 07:00:00,19.16,296.0,57.46,262.0,2.48,78.3 +2020-07-07 08:00:00,20.15,522.0,281.12,322.0,2.28,73.2 +2020-07-07 09:00:00,21.02,680.0,465.74,306.0,2.14,70.8 +2020-07-07 10:00:00,21.64,167.0,0.0,167.0,1.93,68.6 +2020-07-07 11:00:00,22.35,871.0,790.49,176.0,1.59,66.35 +2020-07-07 12:00:00,22.82,723.0,446.07,340.0,1.45,64.2 +2020-07-07 13:00:00,23.21,225.0,0.0,225.0,1.59,62.15 +2020-07-07 14:00:00,23.49,176.0,0.0,176.0,2.07,58.0 +2020-07-07 15:00:00,21.83,377.0,179.09,272.0,3.03,61.85 +2020-07-07 16:00:00,19.79,70.0,0.0,70.0,2.69,73.15 +2020-07-07 17:00:00,19.39,37.0,0.0,37.0,1.86,73.05 +2020-07-07 18:00:00,19.17,18.0,0.0,18.0,1.59,73.05 +2020-07-07 19:00:00,18.09,0.0,0.0,0.0,2.07,72.95 +2020-07-07 20:00:00,17.64,0.0,-0.0,0.0,1.86,75.45 +2020-07-07 21:00:00,17.24,0.0,-0.0,0.0,2.14,75.4 +2020-07-07 22:00:00,16.77,0.0,-0.0,0.0,2.28,75.3 +2020-07-07 23:00:00,16.3,0.0,-0.0,0.0,2.41,75.25 +2020-07-08 00:00:00,15.84,0.0,-0.0,0.0,2.48,75.15 +2020-07-08 01:00:00,15.31,0.0,-0.0,0.0,2.76,77.8 +2020-07-08 02:00:00,15.06,0.0,-0.0,0.0,2.83,75.1 +2020-07-08 03:00:00,14.94,0.0,0.0,0.0,2.97,77.75 +2020-07-08 04:00:00,14.95,71.0,93.46,57.0,3.24,77.75 +2020-07-08 05:00:00,15.27,232.0,443.01,99.0,3.45,77.8 +2020-07-08 06:00:00,16.06,242.0,91.02,201.0,4.14,70.1 +2020-07-08 07:00:00,16.12,347.0,118.59,277.0,4.34,72.65 +2020-07-08 08:00:00,16.87,540.0,316.85,315.0,4.21,70.2 +2020-07-08 09:00:00,17.92,508.0,144.66,392.0,4.28,67.95 +2020-07-08 10:00:00,18.67,632.0,260.7,408.0,4.55,68.05 +2020-07-08 11:00:00,18.75,542.0,124.1,433.0,4.48,70.45 +2020-07-08 12:00:00,19.44,845.0,763.59,190.0,4.55,68.1 +2020-07-08 13:00:00,19.69,691.0,491.8,298.0,4.83,65.85 +2020-07-08 14:00:00,19.38,148.0,0.0,148.0,5.17,68.1 +2020-07-08 15:00:00,18.81,511.0,555.09,186.0,5.24,68.05 +2020-07-08 16:00:00,18.6,198.0,35.94,182.0,5.17,65.65 +2020-07-08 17:00:00,18.36,134.0,54.29,118.0,5.31,61.2 +2020-07-08 18:00:00,16.82,75.0,138.44,55.0,5.24,67.75 +2020-07-08 19:00:00,15.59,0.0,0.0,0.0,5.03,72.55 +2020-07-08 20:00:00,15.36,0.0,-0.0,0.0,4.62,69.9 +2020-07-08 21:00:00,15.03,0.0,-0.0,0.0,4.62,69.9 +2020-07-08 22:00:00,14.8,0.0,-0.0,0.0,4.76,69.85 +2020-07-08 23:00:00,14.57,0.0,-0.0,0.0,4.9,69.85 +2020-07-09 00:00:00,14.42,0.0,-0.0,0.0,4.9,72.3 +2020-07-09 01:00:00,14.32,0.0,-0.0,0.0,4.69,74.95 +2020-07-09 02:00:00,14.34,0.0,-0.0,0.0,4.55,74.95 +2020-07-09 03:00:00,14.26,0.0,0.0,0.0,4.34,74.95 +2020-07-09 04:00:00,14.07,10.0,0.0,10.0,4.41,74.95 +2020-07-09 05:00:00,13.88,105.0,13.41,101.0,3.86,80.4 +2020-07-09 06:00:00,14.31,127.0,0.0,127.0,4.69,77.65 +2020-07-09 07:00:00,14.93,111.0,0.0,111.0,4.34,80.55 +2020-07-09 08:00:00,15.42,432.0,125.57,343.0,4.14,77.8 +2020-07-09 09:00:00,16.11,0.0,0.0,0.0,4.07,72.65 +2020-07-09 10:00:00,16.95,0.0,0.0,0.0,4.07,70.2 +2020-07-09 11:00:00,17.8,811.0,597.27,287.0,4.21,65.55 +2020-07-09 12:00:00,18.38,653.0,294.08,401.0,4.34,63.4 +2020-07-09 13:00:00,18.52,580.0,249.29,381.0,4.41,61.2 +2020-07-09 14:00:00,18.52,479.0,199.89,338.0,4.62,56.95 +2020-07-09 15:00:00,18.01,478.0,431.05,226.0,4.14,53.0 +2020-07-09 16:00:00,18.08,197.0,33.77,182.0,3.79,51.15 +2020-07-09 17:00:00,17.63,0.0,0.0,0.0,3.31,51.0 +2020-07-09 18:00:00,16.97,0.0,0.0,0.0,2.62,54.6 +2020-07-09 19:00:00,16.13,0.0,0.0,0.0,2.48,56.5 +2020-07-09 20:00:00,15.08,0.0,-0.0,0.0,2.14,60.5 +2020-07-09 21:00:00,14.0,0.0,-0.0,0.0,2.0,64.85 +2020-07-09 22:00:00,12.95,0.0,-0.0,0.0,1.86,74.7 +2020-07-09 23:00:00,11.97,0.0,-0.0,0.0,1.86,77.35 +2020-07-10 00:00:00,11.0,0.0,-0.0,0.0,1.93,80.1 +2020-07-10 01:00:00,10.51,0.0,-0.0,0.0,2.07,83.0 +2020-07-10 02:00:00,10.43,0.0,-0.0,0.0,2.21,86.0 +2020-07-10 03:00:00,10.82,0.0,0.0,0.0,2.41,83.0 +2020-07-10 04:00:00,11.77,18.0,0.0,18.0,2.69,80.15 +2020-07-10 05:00:00,12.62,59.0,0.0,59.0,2.69,80.3 +2020-07-10 06:00:00,13.11,372.0,498.88,149.0,2.48,83.25 +2020-07-10 07:00:00,14.09,402.0,221.42,272.0,3.52,74.95 +2020-07-10 08:00:00,15.2,463.0,171.07,342.0,3.31,67.45 +2020-07-10 09:00:00,16.72,456.0,87.57,386.0,3.45,60.85 +2020-07-10 10:00:00,17.75,271.0,1.17,270.0,3.66,58.9 +2020-07-10 11:00:00,18.31,781.0,526.07,320.0,4.07,59.05 +2020-07-10 12:00:00,18.45,835.0,727.83,212.0,4.48,59.05 +2020-07-10 13:00:00,18.5,670.0,435.19,323.0,4.9,56.95 +2020-07-10 14:00:00,18.81,407.0,99.36,337.0,5.1,56.95 +2020-07-10 15:00:00,17.97,130.0,0.0,130.0,5.52,58.9 +2020-07-10 16:00:00,17.52,247.0,106.06,200.0,4.83,58.9 +2020-07-10 17:00:00,16.56,30.0,0.0,30.0,4.48,63.05 +2020-07-10 18:00:00,15.09,46.0,14.09,44.0,4.34,72.45 +2020-07-10 19:00:00,13.52,0.0,0.0,0.0,3.79,72.2 +2020-07-10 20:00:00,13.26,0.0,-0.0,0.0,3.66,77.5 +2020-07-10 21:00:00,12.99,0.0,-0.0,0.0,3.86,80.35 +2020-07-10 22:00:00,12.38,0.0,-0.0,0.0,4.07,89.35 +2020-07-10 23:00:00,12.31,0.0,-0.0,0.0,4.76,89.35 +2020-07-11 00:00:00,12.23,0.0,-0.0,0.0,4.69,92.6 +2020-07-11 01:00:00,12.13,0.0,-0.0,0.0,4.34,89.35 +2020-07-11 02:00:00,11.96,0.0,-0.0,0.0,4.14,89.35 +2020-07-11 03:00:00,11.76,0.0,0.0,0.0,4.0,89.3 +2020-07-11 04:00:00,11.89,43.0,6.96,42.0,3.72,89.3 +2020-07-11 05:00:00,12.1,82.0,0.0,82.0,3.52,86.2 +2020-07-11 06:00:00,12.49,138.0,2.25,137.0,3.93,83.25 +2020-07-11 07:00:00,12.96,202.0,5.12,199.0,3.59,83.25 +2020-07-11 08:00:00,13.4,413.0,106.26,338.0,3.79,77.5 +2020-07-11 09:00:00,13.85,503.0,139.09,392.0,3.72,72.2 +2020-07-11 10:00:00,14.57,303.0,3.51,300.0,3.86,64.95 +2020-07-11 11:00:00,16.07,349.0,9.14,341.0,4.48,58.6 +2020-07-11 12:00:00,16.84,372.0,18.71,356.0,4.0,58.7 +2020-07-11 13:00:00,17.37,235.0,0.0,235.0,3.72,60.95 +2020-07-11 14:00:00,17.81,586.0,442.08,275.0,3.45,58.9 +2020-07-11 15:00:00,18.02,499.0,516.63,198.0,3.1,56.95 +2020-07-11 16:00:00,18.17,112.0,0.0,112.0,2.97,54.95 +2020-07-11 17:00:00,17.75,129.0,48.07,115.0,2.62,56.85 +2020-07-11 18:00:00,16.98,42.0,14.23,40.0,2.34,63.05 +2020-07-11 19:00:00,15.26,0.0,0.0,0.0,2.14,72.45 +2020-07-11 20:00:00,14.39,0.0,-0.0,0.0,2.28,77.65 +2020-07-11 21:00:00,13.93,0.0,-0.0,0.0,2.48,83.35 +2020-07-11 22:00:00,13.41,0.0,-0.0,0.0,2.21,86.3 +2020-07-11 23:00:00,12.89,0.0,-0.0,0.0,1.93,89.4 +2020-07-12 00:00:00,12.48,0.0,-0.0,0.0,2.0,86.25 +2020-07-12 01:00:00,12.17,0.0,-0.0,0.0,1.93,89.35 +2020-07-12 02:00:00,11.71,0.0,-0.0,0.0,1.93,92.55 +2020-07-12 03:00:00,11.45,0.0,0.0,0.0,1.86,95.9 +2020-07-12 04:00:00,11.91,17.0,0.0,17.0,1.52,95.9 +2020-07-12 05:00:00,13.16,144.0,78.64,121.0,1.17,92.65 +2020-07-12 06:00:00,14.27,225.0,69.93,194.0,1.66,86.4 +2020-07-12 07:00:00,15.36,368.0,161.02,274.0,1.52,83.5 +2020-07-12 08:00:00,16.54,450.0,157.61,339.0,2.07,72.7 +2020-07-12 09:00:00,17.67,623.0,338.92,353.0,2.0,67.95 +2020-07-12 10:00:00,18.85,494.0,92.44,415.0,2.21,63.4 +2020-07-12 11:00:00,19.74,503.0,89.24,425.0,2.55,59.25 +2020-07-12 12:00:00,20.25,401.0,30.45,375.0,2.83,57.3 +2020-07-12 13:00:00,20.66,256.0,1.26,255.0,2.55,55.45 +2020-07-12 14:00:00,20.99,533.0,314.62,312.0,2.41,55.45 +2020-07-12 15:00:00,21.31,394.0,216.68,268.0,2.21,53.6 +2020-07-12 16:00:00,21.09,320.0,310.82,183.0,2.0,55.55 +2020-07-12 17:00:00,20.53,139.0,69.0,119.0,1.31,63.7 +2020-07-12 18:00:00,19.72,42.0,14.38,40.0,1.38,70.65 +2020-07-12 19:00:00,18.83,0.0,-0.0,0.0,1.72,70.45 +2020-07-12 20:00:00,17.77,0.0,-0.0,0.0,1.79,72.9 +2020-07-12 21:00:00,16.95,0.0,-0.0,0.0,1.86,78.0 +2020-07-12 22:00:00,16.27,0.0,-0.0,0.0,2.07,77.95 +2020-07-12 23:00:00,16.01,0.0,-0.0,0.0,2.07,77.95 +2020-07-13 00:00:00,16.13,0.0,-0.0,0.0,2.14,77.95 +2020-07-13 01:00:00,15.83,0.0,-0.0,0.0,2.14,80.65 +2020-07-13 02:00:00,15.81,0.0,-0.0,0.0,2.14,83.55 +2020-07-13 03:00:00,15.8,0.0,-0.0,0.0,2.07,83.55 +2020-07-13 04:00:00,15.81,17.0,0.0,17.0,2.34,86.5 +2020-07-13 05:00:00,16.69,222.0,427.02,98.0,2.69,86.6 +2020-07-13 06:00:00,17.32,379.0,566.41,129.0,3.45,86.65 +2020-07-13 07:00:00,18.86,542.0,692.44,139.0,3.52,81.0 +2020-07-13 08:00:00,19.78,640.0,611.93,210.0,2.9,73.15 +2020-07-13 09:00:00,21.0,733.0,616.19,243.0,2.34,63.8 +2020-07-13 10:00:00,22.05,774.0,571.92,286.0,2.07,55.65 +2020-07-13 11:00:00,23.01,809.0,612.91,274.0,2.0,48.55 +2020-07-13 12:00:00,23.72,840.0,763.41,189.0,1.93,45.5 +2020-07-13 13:00:00,24.3,372.0,31.48,347.0,1.86,44.05 +2020-07-13 14:00:00,24.44,681.0,758.6,149.0,1.24,45.6 +2020-07-13 15:00:00,24.39,526.0,630.72,160.0,0.41,45.6 +2020-07-13 16:00:00,24.43,358.0,468.76,152.0,0.48,47.25 +2020-07-13 17:00:00,23.37,215.0,388.34,103.0,0.41,71.25 +2020-07-13 18:00:00,23.0,89.0,298.36,48.0,0.48,64.2 +2020-07-13 19:00:00,21.23,0.0,-0.0,0.0,1.93,63.9 +2020-07-13 20:00:00,19.97,0.0,-0.0,0.0,1.93,70.65 +2020-07-13 21:00:00,18.53,0.0,-0.0,0.0,2.0,75.55 +2020-07-13 22:00:00,17.56,0.0,-0.0,0.0,1.93,78.15 +2020-07-13 23:00:00,16.82,0.0,-0.0,0.0,1.93,83.65 +2020-07-14 00:00:00,16.42,0.0,-0.0,0.0,1.93,89.65 +2020-07-14 01:00:00,16.56,0.0,-0.0,0.0,2.0,86.6 +2020-07-14 02:00:00,16.41,0.0,-0.0,0.0,2.07,89.65 +2020-07-14 03:00:00,16.5,0.0,-0.0,0.0,2.0,89.65 +2020-07-14 04:00:00,16.62,9.0,0.0,9.0,1.93,89.65 +2020-07-14 05:00:00,17.88,30.0,0.0,30.0,1.79,86.7 +2020-07-14 06:00:00,20.02,60.0,0.0,60.0,1.59,83.9 +2020-07-14 07:00:00,20.05,114.0,0.0,114.0,1.79,86.85 +2020-07-14 08:00:00,21.04,443.0,161.19,330.0,1.66,84.05 +2020-07-14 09:00:00,22.55,613.0,342.69,341.0,2.28,76.1 +2020-07-14 10:00:00,23.48,538.0,147.9,412.0,2.62,71.25 +2020-07-14 11:00:00,23.97,139.0,0.0,139.0,2.83,71.35 +2020-07-14 12:00:00,23.98,294.0,3.52,291.0,2.62,71.35 +2020-07-14 13:00:00,23.67,624.0,368.21,332.0,2.34,73.75 +2020-07-14 14:00:00,24.02,593.0,499.95,243.0,2.41,73.75 +2020-07-14 15:00:00,22.7,390.0,224.53,260.0,2.28,78.75 +2020-07-14 16:00:00,22.39,341.0,424.61,155.0,1.79,81.4 +2020-07-14 17:00:00,22.47,222.0,467.14,88.0,0.76,81.4 +2020-07-14 18:00:00,22.28,85.0,272.66,48.0,0.83,84.15 +2020-07-14 19:00:00,23.37,0.0,-0.0,0.0,0.62,73.7 +2020-07-14 20:00:00,20.21,0.0,-0.0,0.0,1.79,86.85 +2020-07-14 21:00:00,18.64,0.0,-0.0,0.0,1.79,92.85 +2020-07-14 22:00:00,17.51,0.0,-0.0,0.0,1.79,92.85 +2020-07-14 23:00:00,16.75,0.0,-0.0,0.0,1.72,96.05 +2020-07-15 00:00:00,16.28,0.0,-0.0,0.0,1.72,96.05 +2020-07-15 01:00:00,16.17,0.0,-0.0,0.0,1.86,96.05 +2020-07-15 02:00:00,16.16,0.0,-0.0,0.0,1.93,92.8 +2020-07-15 03:00:00,16.25,0.0,-0.0,0.0,2.0,92.8 +2020-07-15 04:00:00,16.73,7.0,0.0,7.0,1.79,92.8 +2020-07-15 05:00:00,18.27,20.0,0.0,20.0,1.31,92.85 +2020-07-15 06:00:00,20.81,56.0,0.0,56.0,1.1,84.05 +2020-07-15 07:00:00,20.58,70.0,0.0,70.0,2.0,84.05 +2020-07-15 08:00:00,21.3,92.0,0.0,92.0,1.45,81.3 +2020-07-15 09:00:00,21.93,279.0,3.79,276.0,1.52,78.65 +2020-07-15 10:00:00,22.94,715.0,457.38,326.0,1.45,73.6 +2020-07-15 11:00:00,23.77,826.0,702.04,215.0,2.0,68.95 +2020-07-15 12:00:00,23.67,650.0,319.89,378.0,1.72,68.95 +2020-07-15 13:00:00,23.16,411.0,60.62,363.0,1.86,71.25 +2020-07-15 14:00:00,22.57,334.0,41.5,305.0,0.97,78.75 +2020-07-15 15:00:00,23.14,72.0,0.0,72.0,1.66,76.2 +2020-07-15 16:00:00,23.53,200.0,45.81,180.0,1.79,73.7 +2020-07-15 17:00:00,23.63,216.0,434.79,92.0,1.52,71.35 +2020-07-15 18:00:00,23.2,79.0,231.56,48.0,1.38,76.2 +2020-07-15 19:00:00,21.58,0.0,-0.0,0.0,1.79,78.65 +2020-07-15 20:00:00,20.03,0.0,-0.0,0.0,2.0,86.85 +2020-07-15 21:00:00,18.98,0.0,-0.0,0.0,2.14,92.85 +2020-07-15 22:00:00,18.34,0.0,-0.0,0.0,2.21,89.75 +2020-07-15 23:00:00,17.72,0.0,-0.0,0.0,2.14,89.75 +2020-07-16 00:00:00,17.21,0.0,-0.0,0.0,2.21,89.7 +2020-07-16 01:00:00,16.76,0.0,-0.0,0.0,2.34,89.65 +2020-07-16 02:00:00,16.31,0.0,-0.0,0.0,2.28,89.65 +2020-07-16 03:00:00,15.79,0.0,-0.0,0.0,2.21,89.6 +2020-07-16 04:00:00,15.52,52.0,45.37,46.0,2.0,89.6 +2020-07-16 05:00:00,17.15,215.0,419.39,96.0,1.52,92.85 +2020-07-16 06:00:00,20.27,367.0,537.64,133.0,1.38,83.95 +2020-07-16 07:00:00,21.66,532.0,676.61,142.0,2.0,78.65 +2020-07-16 08:00:00,22.89,679.0,765.47,145.0,1.86,71.15 +2020-07-16 09:00:00,23.99,706.0,555.27,267.0,1.86,64.4 +2020-07-16 10:00:00,24.53,815.0,701.97,219.0,1.93,58.2 +2020-07-16 11:00:00,24.77,789.0,574.26,290.0,1.86,54.45 +2020-07-16 12:00:00,25.03,834.0,759.75,189.0,1.86,54.45 +2020-07-16 13:00:00,25.38,771.0,754.0,175.0,1.93,50.95 +2020-07-16 14:00:00,25.33,630.0,596.51,214.0,1.86,50.95 +2020-07-16 15:00:00,25.1,535.0,689.07,138.0,1.66,50.95 +2020-07-16 16:00:00,25.01,374.0,572.48,125.0,1.45,52.6 +2020-07-16 17:00:00,24.34,190.0,268.13,114.0,1.17,62.35 +2020-07-16 18:00:00,23.4,80.0,242.52,48.0,1.31,68.85 +2020-07-16 19:00:00,21.72,0.0,-0.0,0.0,1.72,66.25 +2020-07-16 20:00:00,19.89,0.0,-0.0,0.0,1.93,73.15 +2020-07-16 21:00:00,18.58,0.0,-0.0,0.0,2.14,72.95 +2020-07-16 22:00:00,17.86,0.0,-0.0,0.0,2.28,75.45 +2020-07-16 23:00:00,17.98,0.0,-0.0,0.0,2.28,72.9 +2020-07-17 00:00:00,17.73,0.0,-0.0,0.0,2.34,72.9 +2020-07-17 01:00:00,17.23,0.0,-0.0,0.0,2.34,72.8 +2020-07-17 02:00:00,17.23,0.0,-0.0,0.0,2.41,72.8 +2020-07-17 03:00:00,16.96,0.0,-0.0,0.0,2.41,78.0 +2020-07-17 04:00:00,17.3,46.0,30.83,42.0,2.55,78.1 +2020-07-17 05:00:00,18.82,140.0,88.84,115.0,2.55,78.25 +2020-07-17 06:00:00,20.96,371.0,586.49,117.0,2.62,75.85 +2020-07-17 07:00:00,23.26,533.0,703.31,129.0,2.76,68.85 +2020-07-17 08:00:00,24.79,671.0,764.57,139.0,3.31,64.6 +2020-07-17 09:00:00,26.0,780.0,803.59,146.0,3.79,60.6 +2020-07-17 10:00:00,27.15,867.0,871.95,128.0,4.21,55.05 +2020-07-17 11:00:00,27.93,831.0,719.3,207.0,4.28,53.35 +2020-07-17 12:00:00,28.82,194.0,0.0,194.0,4.21,50.05 +2020-07-17 13:00:00,28.22,91.0,0.0,91.0,3.59,51.65 +2020-07-17 14:00:00,26.57,60.0,0.0,60.0,2.21,60.7 +2020-07-17 15:00:00,26.18,47.0,0.0,47.0,0.97,62.8 +2020-07-17 16:00:00,26.31,33.0,0.0,33.0,0.28,62.8 +2020-07-17 17:00:00,26.1,142.0,92.33,116.0,0.48,64.95 +2020-07-17 18:00:00,23.08,73.0,200.13,47.0,1.72,78.85 +2020-07-17 19:00:00,20.49,0.0,-0.0,0.0,1.38,92.95 +2020-07-17 20:00:00,19.72,0.0,-0.0,0.0,0.76,96.1 +2020-07-17 21:00:00,19.19,0.0,-0.0,0.0,0.83,96.1 +2020-07-17 22:00:00,18.56,0.0,-0.0,0.0,1.24,96.1 +2020-07-17 23:00:00,18.6,0.0,-0.0,0.0,1.24,96.1 +2020-07-18 00:00:00,18.73,0.0,-0.0,0.0,1.24,99.4 +2020-07-18 01:00:00,18.33,0.0,-0.0,0.0,0.76,96.1 +2020-07-18 02:00:00,17.95,0.0,-0.0,0.0,0.9,99.4 +2020-07-18 03:00:00,17.92,0.0,-0.0,0.0,1.24,99.4 +2020-07-18 04:00:00,17.58,7.0,0.0,7.0,4.14,92.85 +2020-07-18 05:00:00,15.73,19.0,0.0,19.0,6.14,89.6 +2020-07-18 06:00:00,15.64,51.0,0.0,51.0,5.17,83.55 +2020-07-18 07:00:00,15.93,274.0,47.17,247.0,4.14,77.85 +2020-07-18 08:00:00,16.69,110.0,0.0,110.0,3.79,72.7 +2020-07-18 09:00:00,16.83,112.0,0.0,112.0,4.14,72.7 +2020-07-18 10:00:00,16.88,142.0,0.0,142.0,4.07,75.3 +2020-07-18 11:00:00,16.4,141.0,0.0,141.0,3.31,80.75 +2020-07-18 12:00:00,15.89,133.0,0.0,133.0,2.9,86.5 +2020-07-18 13:00:00,15.62,147.0,0.0,147.0,3.38,86.5 +2020-07-18 14:00:00,14.85,125.0,0.0,125.0,2.9,89.5 +2020-07-18 15:00:00,14.94,184.0,1.75,183.0,2.62,92.7 +2020-07-18 16:00:00,15.03,104.0,0.0,104.0,2.48,89.55 +2020-07-18 17:00:00,15.26,39.0,0.0,39.0,2.21,89.55 +2020-07-18 18:00:00,15.48,24.0,0.0,24.0,2.28,86.5 +2020-07-18 19:00:00,14.79,0.0,-0.0,0.0,2.62,83.45 +2020-07-18 20:00:00,14.61,0.0,-0.0,0.0,3.03,83.45 +2020-07-18 21:00:00,14.5,0.0,-0.0,0.0,3.24,83.45 +2020-07-18 22:00:00,14.55,0.0,-0.0,0.0,3.52,80.55 +2020-07-18 23:00:00,14.42,0.0,-0.0,0.0,3.79,83.4 +2020-07-19 00:00:00,14.2,0.0,-0.0,0.0,3.93,80.45 +2020-07-19 01:00:00,14.27,0.0,-0.0,0.0,4.0,80.45 +2020-07-19 02:00:00,14.02,0.0,-0.0,0.0,4.0,80.45 +2020-07-19 03:00:00,13.75,0.0,-0.0,0.0,4.07,86.35 +2020-07-19 04:00:00,13.45,29.0,0.0,29.0,3.86,92.65 +2020-07-19 05:00:00,13.52,67.0,0.0,67.0,3.66,89.45 +2020-07-19 06:00:00,14.0,286.0,228.64,188.0,3.52,86.4 +2020-07-19 07:00:00,14.94,446.0,375.22,232.0,2.97,89.5 +2020-07-19 08:00:00,16.61,663.0,731.14,157.0,3.52,78.0 +2020-07-19 09:00:00,17.95,753.0,711.63,194.0,3.72,70.35 +2020-07-19 10:00:00,19.03,654.0,320.97,383.0,4.07,61.3 +2020-07-19 11:00:00,19.79,105.0,0.0,105.0,4.0,61.4 +2020-07-19 12:00:00,19.68,279.0,1.18,278.0,4.14,59.25 +2020-07-19 13:00:00,18.91,303.0,7.63,297.0,4.34,61.2 +2020-07-19 14:00:00,18.63,448.0,167.44,332.0,4.34,63.4 +2020-07-19 15:00:00,18.95,469.0,448.17,213.0,4.41,63.4 +2020-07-19 16:00:00,19.23,268.0,172.25,194.0,4.21,63.5 +2020-07-19 17:00:00,18.02,135.0,75.65,114.0,4.76,63.4 +2020-07-19 18:00:00,17.19,74.0,223.02,46.0,3.66,70.3 +2020-07-19 19:00:00,16.73,0.0,-0.0,0.0,3.45,67.75 +2020-07-19 20:00:00,16.29,0.0,-0.0,0.0,3.66,67.65 +2020-07-19 21:00:00,15.89,0.0,-0.0,0.0,3.72,70.0 +2020-07-19 22:00:00,15.67,0.0,-0.0,0.0,3.66,70.0 +2020-07-19 23:00:00,15.18,0.0,-0.0,0.0,3.52,72.45 +2020-07-20 00:00:00,14.71,0.0,-0.0,0.0,3.31,75.0 +2020-07-20 01:00:00,14.35,0.0,-0.0,0.0,3.1,77.65 +2020-07-20 02:00:00,13.94,0.0,-0.0,0.0,2.83,80.4 +2020-07-20 03:00:00,13.5,0.0,-0.0,0.0,2.69,80.4 +2020-07-20 04:00:00,13.33,21.0,0.0,21.0,2.34,83.25 +2020-07-20 05:00:00,14.25,85.0,7.3,83.0,2.48,80.45 +2020-07-20 06:00:00,15.57,94.0,0.0,94.0,2.48,75.15 +2020-07-20 07:00:00,16.29,392.0,234.06,259.0,3.17,77.95 +2020-07-20 08:00:00,17.29,333.0,40.57,305.0,3.72,72.8 +2020-07-20 09:00:00,18.38,371.0,34.45,344.0,3.86,68.05 +2020-07-20 10:00:00,19.4,456.0,67.64,399.0,4.0,61.3 +2020-07-20 11:00:00,20.39,286.0,1.16,285.0,4.41,59.35 +2020-07-20 12:00:00,20.17,496.0,100.83,411.0,4.97,59.35 +2020-07-20 13:00:00,18.47,754.0,712.64,195.0,4.41,70.45 +2020-07-20 14:00:00,19.07,538.0,347.28,298.0,4.48,65.75 +2020-07-20 15:00:00,20.46,518.0,639.27,154.0,5.38,57.3 +2020-07-20 16:00:00,19.08,333.0,402.22,161.0,4.48,63.5 +2020-07-20 17:00:00,18.77,213.0,450.18,89.0,3.31,68.05 +2020-07-20 18:00:00,18.5,55.0,81.16,45.0,3.52,63.4 +2020-07-20 19:00:00,17.56,0.0,-0.0,0.0,3.17,67.95 +2020-07-20 20:00:00,16.89,0.0,-0.0,0.0,2.76,72.7 +2020-07-20 21:00:00,16.36,0.0,-0.0,0.0,2.41,72.65 +2020-07-20 22:00:00,15.68,0.0,-0.0,0.0,2.0,75.15 +2020-07-20 23:00:00,15.29,0.0,-0.0,0.0,2.0,77.8 +2020-07-21 00:00:00,14.64,0.0,-0.0,0.0,2.14,80.55 +2020-07-21 01:00:00,14.0,0.0,-0.0,0.0,2.07,83.4 +2020-07-21 02:00:00,13.73,0.0,-0.0,0.0,1.93,86.35 +2020-07-21 03:00:00,13.19,0.0,-0.0,0.0,1.93,86.3 +2020-07-21 04:00:00,12.7,33.0,8.39,32.0,1.86,89.4 +2020-07-21 05:00:00,14.24,63.0,0.0,63.0,1.52,86.4 +2020-07-21 06:00:00,16.62,170.0,21.23,161.0,1.38,78.0 +2020-07-21 07:00:00,18.73,261.0,37.1,240.0,1.59,75.55 +2020-07-21 08:00:00,20.07,388.0,91.55,325.0,1.79,68.3 +2020-07-21 09:00:00,21.11,270.0,2.56,268.0,2.83,59.6 +2020-07-21 10:00:00,21.74,535.0,146.27,412.0,3.24,53.75 +2020-07-21 11:00:00,22.16,609.0,224.11,416.0,2.76,52.0 +2020-07-21 12:00:00,23.06,0.0,0.0,0.0,2.62,50.3 +2020-07-21 13:00:00,23.86,0.0,0.0,0.0,2.28,47.15 +2020-07-21 14:00:00,24.37,0.0,0.0,0.0,1.93,47.25 +2020-07-21 15:00:00,24.86,182.0,1.76,181.0,2.07,47.4 +2020-07-21 16:00:00,24.67,246.0,129.24,191.0,2.0,49.05 +2020-07-21 17:00:00,23.4,185.0,285.51,107.0,0.9,71.25 +2020-07-21 18:00:00,23.3,74.0,264.97,42.0,0.48,68.85 +2020-07-21 19:00:00,21.72,0.0,-0.0,0.0,1.93,71.0 +2020-07-21 20:00:00,19.6,0.0,-0.0,0.0,2.07,78.35 +2020-07-21 21:00:00,18.65,0.0,-0.0,0.0,2.28,83.8 +2020-07-21 22:00:00,18.24,0.0,-0.0,0.0,2.41,81.0 +2020-07-21 23:00:00,18.18,0.0,-0.0,0.0,2.55,81.0 +2020-07-22 00:00:00,18.35,0.0,-0.0,0.0,2.55,83.8 +2020-07-22 01:00:00,18.25,0.0,-0.0,0.0,2.48,81.0 +2020-07-22 02:00:00,18.19,0.0,-0.0,0.0,2.07,83.8 +2020-07-22 03:00:00,17.99,0.0,-0.0,0.0,2.34,89.75 +2020-07-22 04:00:00,18.5,24.0,0.0,24.0,2.9,86.75 +2020-07-22 05:00:00,19.07,22.0,0.0,22.0,3.03,86.8 +2020-07-22 06:00:00,20.73,32.0,0.0,32.0,2.97,84.05 +2020-07-22 07:00:00,21.33,103.0,0.0,103.0,1.59,81.3 +2020-07-22 08:00:00,22.73,327.0,42.27,298.0,1.31,76.15 +2020-07-22 09:00:00,25.28,350.0,26.92,329.0,2.48,66.95 +2020-07-22 10:00:00,27.15,594.0,237.15,395.0,2.62,60.9 +2020-07-22 11:00:00,27.11,271.0,1.16,270.0,3.17,60.9 +2020-07-22 12:00:00,24.55,254.0,0.0,254.0,2.97,73.85 +2020-07-22 13:00:00,23.91,105.0,0.0,105.0,2.21,76.3 +2020-07-22 14:00:00,24.75,225.0,2.91,223.0,1.59,73.95 +2020-07-22 15:00:00,26.0,421.0,328.91,235.0,2.07,67.05 +2020-07-22 16:00:00,26.19,105.0,0.0,105.0,1.86,62.8 +2020-07-22 17:00:00,25.39,171.0,228.91,109.0,1.1,69.25 +2020-07-22 18:00:00,24.26,67.0,203.01,43.0,1.24,76.35 +2020-07-22 19:00:00,22.57,0.0,-0.0,0.0,2.0,84.25 +2020-07-22 20:00:00,21.34,0.0,-0.0,0.0,2.21,89.95 +2020-07-22 21:00:00,20.54,0.0,-0.0,0.0,2.28,86.9 +2020-07-22 22:00:00,20.07,0.0,-0.0,0.0,2.34,86.85 +2020-07-22 23:00:00,19.76,0.0,-0.0,0.0,2.41,86.85 +2020-07-23 00:00:00,19.54,0.0,-0.0,0.0,2.48,81.1 +2020-07-23 01:00:00,19.12,0.0,-0.0,0.0,2.55,81.05 +2020-07-23 02:00:00,18.94,0.0,-0.0,0.0,2.69,81.0 +2020-07-23 03:00:00,19.08,0.0,-0.0,0.0,2.83,78.3 +2020-07-23 04:00:00,19.56,52.0,96.9,41.0,2.97,75.7 +2020-07-23 05:00:00,20.77,51.0,0.0,51.0,2.9,75.85 +2020-07-23 06:00:00,22.44,222.0,93.06,183.0,2.48,73.55 +2020-07-23 07:00:00,24.29,433.0,377.51,221.0,3.1,69.05 +2020-07-23 08:00:00,26.16,340.0,51.17,305.0,3.31,62.8 +2020-07-23 09:00:00,27.85,112.0,0.0,112.0,3.31,57.05 +2020-07-23 10:00:00,28.78,129.0,0.0,129.0,3.52,53.55 +2020-07-23 11:00:00,29.46,453.0,62.96,399.0,3.38,55.4 +2020-07-23 12:00:00,29.68,645.0,328.23,370.0,3.03,55.5 +2020-07-23 13:00:00,28.82,366.0,34.65,339.0,1.1,63.3 +2020-07-23 14:00:00,30.81,411.0,126.91,324.0,2.28,53.9 +2020-07-23 15:00:00,27.31,46.0,0.0,46.0,3.24,63.0 +2020-07-23 16:00:00,26.24,265.0,189.95,185.0,2.41,67.15 +2020-07-23 17:00:00,26.09,165.0,208.64,109.0,3.72,64.8 +2020-07-23 18:00:00,24.32,18.0,0.0,18.0,4.62,64.5 +2020-07-23 19:00:00,23.57,0.0,-0.0,0.0,3.72,68.95 +2020-07-23 20:00:00,21.07,0.0,-0.0,0.0,3.86,81.3 +2020-07-23 21:00:00,20.21,0.0,-0.0,0.0,3.59,83.95 +2020-07-23 22:00:00,19.58,0.0,-0.0,0.0,3.45,86.85 +2020-07-23 23:00:00,19.09,0.0,-0.0,0.0,3.38,86.8 +2020-07-24 00:00:00,18.79,0.0,-0.0,0.0,3.45,86.75 +2020-07-24 01:00:00,18.27,0.0,-0.0,0.0,3.38,83.8 +2020-07-24 02:00:00,17.9,0.0,-0.0,0.0,3.38,86.7 +2020-07-24 03:00:00,17.61,0.0,-0.0,0.0,3.31,86.7 +2020-07-24 04:00:00,17.61,38.0,27.12,35.0,3.52,83.75 +2020-07-24 05:00:00,17.5,142.0,125.21,109.0,3.72,86.65 +2020-07-24 06:00:00,17.29,326.0,427.28,148.0,3.79,89.7 +2020-07-24 07:00:00,17.94,348.0,159.14,259.0,3.79,86.7 +2020-07-24 08:00:00,19.13,506.0,297.71,303.0,3.45,83.85 +2020-07-24 09:00:00,20.29,555.0,247.41,363.0,3.24,81.15 +2020-07-24 10:00:00,21.06,670.0,374.68,357.0,3.24,75.95 +2020-07-24 11:00:00,22.09,332.0,8.18,325.0,3.45,71.05 +2020-07-24 12:00:00,22.43,529.0,144.74,408.0,3.93,68.7 +2020-07-24 13:00:00,21.87,251.0,1.29,250.0,3.86,66.25 +2020-07-24 14:00:00,21.96,219.0,1.46,218.0,4.21,59.7 +2020-07-24 15:00:00,22.11,334.0,131.85,260.0,4.07,52.0 +2020-07-24 16:00:00,21.91,285.0,250.71,180.0,3.03,55.65 +2020-07-24 17:00:00,21.57,181.0,300.91,101.0,2.62,55.65 +2020-07-24 18:00:00,20.78,62.0,186.12,41.0,2.69,57.45 +2020-07-24 19:00:00,18.48,0.0,-0.0,0.0,2.83,70.45 +2020-07-24 20:00:00,17.63,0.0,-0.0,0.0,2.21,70.35 +2020-07-24 21:00:00,17.29,0.0,-0.0,0.0,2.07,72.8 +2020-07-24 22:00:00,16.93,0.0,-0.0,0.0,2.07,75.3 +2020-07-24 23:00:00,16.57,0.0,-0.0,0.0,2.21,75.3 +2020-07-25 00:00:00,16.08,0.0,-0.0,0.0,2.21,77.95 +2020-07-25 01:00:00,15.7,0.0,-0.0,0.0,2.0,83.55 +2020-07-25 02:00:00,14.99,0.0,-0.0,0.0,1.93,86.5 +2020-07-25 03:00:00,14.48,0.0,-0.0,0.0,2.07,86.45 +2020-07-25 04:00:00,14.62,6.0,0.0,6.0,2.41,86.45 +2020-07-25 05:00:00,15.37,21.0,0.0,21.0,2.76,86.5 +2020-07-25 06:00:00,15.13,93.0,0.0,93.0,3.59,83.5 +2020-07-25 07:00:00,14.1,226.0,16.16,217.0,4.41,89.5 +2020-07-25 08:00:00,14.15,382.0,89.75,321.0,4.41,89.5 +2020-07-25 09:00:00,15.31,637.0,409.56,320.0,3.86,86.5 +2020-07-25 10:00:00,17.61,391.0,31.2,365.0,4.69,67.95 +2020-07-25 11:00:00,18.64,590.0,200.26,419.0,5.17,61.2 +2020-07-25 12:00:00,18.83,591.0,224.21,404.0,5.52,61.2 +2020-07-25 13:00:00,18.78,767.0,783.02,160.0,5.79,59.05 +2020-07-25 14:00:00,18.59,59.0,0.0,59.0,5.93,56.95 +2020-07-25 15:00:00,18.33,295.0,73.35,254.0,5.59,54.95 +2020-07-25 16:00:00,18.21,73.0,0.0,73.0,5.24,54.95 +2020-07-25 17:00:00,18.0,154.0,163.37,111.0,4.9,56.85 +2020-07-25 18:00:00,17.07,69.0,281.87,38.0,4.48,63.2 +2020-07-25 19:00:00,15.85,0.0,-0.0,0.0,4.21,70.0 +2020-07-25 20:00:00,15.56,0.0,-0.0,0.0,3.93,72.55 +2020-07-25 21:00:00,15.2,0.0,-0.0,0.0,3.79,75.1 +2020-07-25 22:00:00,14.97,0.0,-0.0,0.0,3.86,77.75 +2020-07-25 23:00:00,14.64,0.0,-0.0,0.0,3.79,77.75 +2020-07-26 00:00:00,14.39,0.0,-0.0,0.0,3.59,80.45 +2020-07-26 01:00:00,14.17,0.0,-0.0,0.0,3.38,80.45 +2020-07-26 02:00:00,13.76,0.0,-0.0,0.0,3.17,80.4 +2020-07-26 03:00:00,13.38,0.0,-0.0,0.0,3.1,83.25 +2020-07-26 04:00:00,12.94,31.0,9.56,30.0,3.03,86.25 +2020-07-26 05:00:00,13.29,124.0,69.77,106.0,2.76,86.3 +2020-07-26 06:00:00,14.1,262.0,184.71,186.0,3.79,80.45 +2020-07-26 07:00:00,14.36,327.0,119.02,261.0,3.72,86.4 +2020-07-26 08:00:00,15.59,582.0,484.16,254.0,3.79,72.55 +2020-07-26 09:00:00,16.7,582.0,286.31,361.0,3.79,63.05 +2020-07-26 10:00:00,17.74,710.0,447.43,338.0,4.62,54.85 +2020-07-26 11:00:00,18.11,581.0,184.29,424.0,4.48,51.15 +2020-07-26 12:00:00,18.25,746.0,533.62,302.0,4.48,51.15 +2020-07-26 13:00:00,18.25,753.0,725.62,192.0,3.79,51.15 +2020-07-26 14:00:00,19.11,639.0,664.01,188.0,4.0,49.45 +2020-07-26 15:00:00,19.08,464.0,458.1,209.0,3.45,49.45 +2020-07-26 16:00:00,19.11,361.0,582.37,120.0,3.17,49.45 +2020-07-26 17:00:00,18.77,133.0,92.14,109.0,2.28,54.95 +2020-07-26 18:00:00,17.54,66.0,270.95,37.0,1.24,67.95 +2020-07-26 19:00:00,18.43,0.0,-0.0,0.0,0.62,54.95 +2020-07-26 20:00:00,16.84,0.0,-0.0,0.0,1.03,63.05 +2020-07-26 21:00:00,13.2,0.0,-0.0,0.0,1.93,80.35 +2020-07-26 22:00:00,12.41,0.0,-0.0,0.0,2.0,86.2 +2020-07-26 23:00:00,12.32,0.0,-0.0,0.0,2.0,86.2 +2020-07-27 00:00:00,12.53,0.0,-0.0,0.0,2.07,83.25 +2020-07-27 01:00:00,12.57,0.0,-0.0,0.0,2.14,83.25 +2020-07-27 02:00:00,12.29,0.0,-0.0,0.0,2.28,83.15 +2020-07-27 03:00:00,12.18,0.0,-0.0,0.0,2.34,83.15 +2020-07-27 04:00:00,12.22,59.0,236.24,35.0,2.41,80.2 +2020-07-27 05:00:00,13.66,203.0,493.81,77.0,2.28,77.6 +2020-07-27 06:00:00,16.11,366.0,638.44,105.0,2.14,70.1 +2020-07-27 07:00:00,18.35,507.0,661.15,142.0,2.55,65.65 +2020-07-27 08:00:00,20.0,557.0,426.55,269.0,3.03,61.4 +2020-07-27 09:00:00,21.36,762.0,783.36,159.0,3.31,57.55 +2020-07-27 10:00:00,22.45,842.0,828.36,155.0,3.72,53.85 +2020-07-27 11:00:00,23.25,897.0,913.06,121.0,3.86,48.7 +2020-07-27 12:00:00,23.92,873.0,906.04,121.0,3.86,48.8 +2020-07-27 13:00:00,24.44,801.0,881.98,121.0,3.86,48.95 +2020-07-27 14:00:00,24.8,684.0,831.69,121.0,3.72,47.4 +2020-07-27 15:00:00,24.99,536.0,756.03,117.0,3.52,49.05 +2020-07-27 16:00:00,24.9,370.0,642.03,106.0,3.1,49.05 +2020-07-27 17:00:00,24.17,205.0,485.19,80.0,2.41,56.25 +2020-07-27 18:00:00,22.64,61.0,240.43,36.0,2.41,62.05 +2020-07-27 19:00:00,20.95,0.0,-0.0,0.0,2.62,61.6 +2020-07-27 20:00:00,19.75,0.0,-0.0,0.0,2.76,63.6 +2020-07-27 21:00:00,18.99,0.0,-0.0,0.0,2.9,65.65 +2020-07-27 22:00:00,18.4,0.0,-0.0,0.0,2.83,65.65 +2020-07-27 23:00:00,17.84,0.0,-0.0,0.0,2.41,67.95 +2020-07-28 00:00:00,16.8,0.0,-0.0,0.0,2.07,75.3 +2020-07-28 01:00:00,16.64,0.0,-0.0,0.0,2.28,75.3 +2020-07-28 02:00:00,17.12,0.0,-0.0,0.0,2.83,75.4 +2020-07-28 03:00:00,17.02,0.0,-0.0,0.0,2.97,80.85 +2020-07-28 04:00:00,16.76,5.0,0.0,5.0,2.83,86.6 +2020-07-28 05:00:00,16.74,57.0,0.0,57.0,2.9,89.65 +2020-07-28 06:00:00,16.74,36.0,0.0,36.0,3.1,89.65 +2020-07-28 07:00:00,17.6,62.0,0.0,62.0,3.38,80.9 +2020-07-28 08:00:00,17.52,106.0,0.0,106.0,3.31,80.9 +2020-07-28 09:00:00,17.8,529.0,208.45,369.0,3.24,78.15 +2020-07-28 10:00:00,18.59,145.0,0.0,145.0,3.24,70.45 +2020-07-28 11:00:00,19.87,625.0,265.4,400.0,3.59,63.6 +2020-07-28 12:00:00,20.51,724.0,508.54,303.0,3.72,59.35 +2020-07-28 13:00:00,20.91,522.0,196.42,371.0,3.72,55.45 +2020-07-28 14:00:00,20.82,414.0,134.9,323.0,3.38,53.5 +2020-07-28 15:00:00,20.75,225.0,18.13,215.0,3.1,53.5 +2020-07-28 16:00:00,20.61,294.0,306.02,169.0,2.9,53.5 +2020-07-28 17:00:00,20.1,189.0,400.48,87.0,2.34,53.35 +2020-07-28 18:00:00,19.06,56.0,198.35,36.0,1.66,61.3 +2020-07-28 19:00:00,18.1,0.0,-0.0,0.0,1.1,68.05 +2020-07-28 20:00:00,18.89,0.0,-0.0,0.0,0.48,59.05 +2020-07-28 21:00:00,16.69,0.0,-0.0,0.0,1.31,65.35 +2020-07-28 22:00:00,14.2,0.0,-0.0,0.0,1.79,77.65 +2020-07-28 23:00:00,13.18,0.0,-0.0,0.0,1.86,83.25 +2020-07-29 00:00:00,12.55,0.0,-0.0,0.0,1.86,86.25 +2020-07-29 01:00:00,12.26,0.0,-0.0,0.0,1.86,89.35 +2020-07-29 02:00:00,11.81,0.0,-0.0,0.0,1.86,89.3 +2020-07-29 03:00:00,11.44,0.0,-0.0,0.0,1.93,92.55 +2020-07-29 04:00:00,11.24,38.0,62.94,32.0,1.86,92.55 +2020-07-29 05:00:00,13.19,190.0,433.17,82.0,1.52,89.4 +2020-07-29 06:00:00,17.42,346.0,565.2,118.0,1.24,78.1 +2020-07-29 07:00:00,19.65,503.0,661.73,141.0,1.38,70.65 +2020-07-29 08:00:00,21.13,639.0,706.94,165.0,1.93,57.55 +2020-07-29 09:00:00,21.93,716.0,641.57,225.0,2.21,53.75 +2020-07-29 10:00:00,22.62,769.0,621.78,256.0,2.28,50.3 +2020-07-29 11:00:00,23.29,860.0,826.6,161.0,2.21,47.0 +2020-07-29 12:00:00,23.55,827.0,795.73,170.0,2.28,47.0 +2020-07-29 13:00:00,24.25,741.0,716.26,192.0,2.34,44.05 +2020-07-29 14:00:00,24.4,612.0,599.58,209.0,2.34,44.05 +2020-07-29 15:00:00,24.59,518.0,708.47,129.0,2.48,42.65 +2020-07-29 16:00:00,24.39,350.0,569.46,119.0,2.34,45.6 +2020-07-29 17:00:00,23.61,177.0,321.87,96.0,1.86,54.2 +2020-07-29 18:00:00,22.09,43.0,92.23,34.0,2.07,59.8 +2020-07-29 19:00:00,20.41,0.0,-0.0,0.0,2.48,63.7 +2020-07-29 20:00:00,19.32,0.0,-0.0,0.0,2.55,68.1 +2020-07-29 21:00:00,18.57,0.0,-0.0,0.0,2.55,68.05 +2020-07-29 22:00:00,17.94,0.0,-0.0,0.0,2.62,70.35 +2020-07-29 23:00:00,17.44,0.0,-0.0,0.0,2.83,67.85 +2020-07-30 00:00:00,17.05,0.0,-0.0,0.0,2.97,65.45 +2020-07-30 01:00:00,16.73,0.0,-0.0,0.0,3.17,67.75 +2020-07-30 02:00:00,16.6,0.0,-0.0,0.0,3.38,67.75 +2020-07-30 03:00:00,16.45,0.0,-0.0,0.0,3.52,72.65 +2020-07-30 04:00:00,16.25,47.0,151.95,33.0,3.45,75.25 +2020-07-30 05:00:00,16.81,190.0,454.64,78.0,3.45,75.3 +2020-07-30 06:00:00,18.43,310.0,401.86,149.0,3.24,72.95 +2020-07-30 07:00:00,20.74,194.0,5.51,191.0,3.31,68.4 +2020-07-30 08:00:00,22.49,322.0,40.41,295.0,3.17,66.35 +2020-07-30 09:00:00,24.09,729.0,695.92,198.0,3.86,62.35 +2020-07-30 10:00:00,24.5,442.0,64.41,389.0,4.97,56.25 +2020-07-30 11:00:00,24.43,729.0,474.27,329.0,5.31,52.5 +2020-07-30 12:00:00,24.31,692.0,429.93,338.0,5.1,50.7 +2020-07-30 13:00:00,24.34,582.0,304.93,349.0,4.97,48.95 +2020-07-30 14:00:00,24.13,491.0,274.78,307.0,4.62,47.25 +2020-07-30 15:00:00,23.47,322.0,120.8,256.0,4.14,50.45 +2020-07-30 16:00:00,22.76,195.0,57.11,172.0,3.72,50.3 +2020-07-30 17:00:00,21.61,66.0,0.0,66.0,3.52,51.9 +2020-07-30 18:00:00,20.42,31.0,21.22,29.0,3.1,55.3 +2020-07-30 19:00:00,19.72,0.0,-0.0,0.0,2.41,53.25 +2020-07-30 20:00:00,18.37,0.0,-0.0,0.0,1.93,59.05 +2020-07-30 21:00:00,16.54,0.0,-0.0,0.0,1.93,65.35 +2020-07-30 22:00:00,15.46,0.0,-0.0,0.0,1.86,72.45 +2020-07-30 23:00:00,14.48,0.0,-0.0,0.0,1.79,75.0 +2020-07-31 00:00:00,13.61,0.0,-0.0,0.0,1.86,77.6 +2020-07-31 01:00:00,13.0,0.0,-0.0,0.0,1.79,80.35 +2020-07-31 02:00:00,12.42,0.0,-0.0,0.0,1.93,83.15 +2020-07-31 03:00:00,12.64,0.0,-0.0,0.0,2.21,80.3 +2020-07-31 04:00:00,12.96,9.0,0.0,9.0,2.34,77.5 +2020-07-31 05:00:00,13.86,100.0,36.99,91.0,2.0,80.4 +2020-07-31 06:00:00,15.68,118.0,2.51,117.0,2.0,70.0 +2020-07-31 07:00:00,16.31,228.0,22.15,216.0,2.14,72.65 +2020-07-31 08:00:00,17.28,439.0,178.78,320.0,2.07,67.85 +2020-07-31 09:00:00,18.2,712.0,641.54,224.0,2.14,61.2 +2020-07-31 10:00:00,18.97,710.0,474.08,321.0,2.07,59.05 +2020-07-31 11:00:00,19.26,686.0,379.25,367.0,1.86,55.05 +2020-07-31 12:00:00,20.0,737.0,539.56,294.0,1.79,53.25 +2020-07-31 13:00:00,20.33,615.0,375.5,329.0,1.86,49.7 +2020-07-31 14:00:00,20.53,391.0,106.44,320.0,1.66,47.9 +2020-07-31 15:00:00,20.37,322.0,121.42,256.0,1.79,47.9 +2020-07-31 16:00:00,16.45,306.0,375.26,156.0,1.59,57.83 +2020-07-31 17:00:00,16.68,180.0,375.06,88.0,1.64,59.33 +2020-07-31 18:00:00,16.9,35.0,55.07,30.0,1.69,60.82 +2020-07-31 19:00:00,17.13,0.0,-0.0,0.0,1.74,62.31 +2020-07-31 20:00:00,17.36,0.0,-0.0,0.0,1.8,63.8 +2020-07-31 21:00:00,17.58,0.0,-0.0,0.0,1.85,65.3 +2020-07-31 22:00:00,17.81,0.0,-0.0,0.0,1.9,66.79 +2020-07-31 23:00:00,18.04,0.0,-0.0,0.0,1.95,68.28 +2020-08-01 00:00:00,18.26,0.0,-0.0,0.0,2.0,69.78 +2020-08-01 01:00:00,18.49,0.0,-0.0,0.0,2.06,71.27 +2020-08-01 02:00:00,18.71,0.0,-0.0,0.0,2.11,72.76 +2020-08-01 03:00:00,18.94,0.0,-0.0,0.0,2.16,74.26 +2020-08-01 04:00:00,19.17,30.0,48.62,26.0,2.21,75.75 +2020-08-01 05:00:00,19.39,158.0,299.38,87.0,2.26,77.24 +2020-08-01 06:00:00,19.62,325.0,540.7,113.0,2.32,78.74 +2020-08-01 07:00:00,19.84,484.0,665.41,127.0,2.37,80.23 +2020-08-01 08:00:00,25.97,632.0,756.92,132.0,2.28,56.6 +2020-08-01 09:00:00,27.51,741.0,797.79,138.0,2.34,51.45 +2020-08-01 10:00:00,28.73,816.0,832.32,137.0,2.41,46.65 +2020-08-01 11:00:00,29.53,840.0,840.54,137.0,2.28,43.65 +2020-08-01 12:00:00,30.09,812.0,820.92,142.0,2.0,38.3 +2020-08-01 13:00:00,30.61,73.0,0.0,73.0,1.86,35.85 +2020-08-01 14:00:00,30.64,70.0,0.0,70.0,1.52,35.85 +2020-08-01 15:00:00,29.67,195.0,7.44,191.0,2.41,38.15 +2020-08-01 16:00:00,27.19,285.0,330.43,155.0,3.24,46.4 +2020-08-01 17:00:00,25.82,132.0,142.53,98.0,2.14,58.55 +2020-08-01 18:00:00,24.87,35.0,83.72,28.0,1.66,66.85 +2020-08-01 19:00:00,23.38,0.0,-0.0,0.0,2.55,73.7 +2020-08-01 20:00:00,21.48,0.0,-0.0,0.0,2.0,86.95 +2020-08-01 21:00:00,20.66,0.0,-0.0,0.0,1.45,89.9 +2020-08-01 22:00:00,20.39,0.0,-0.0,0.0,1.66,92.95 +2020-08-01 23:00:00,20.02,0.0,-0.0,0.0,1.79,89.85 +2020-08-02 00:00:00,19.8,0.0,-0.0,0.0,1.93,89.85 +2020-08-02 01:00:00,19.63,0.0,-0.0,0.0,2.34,89.85 +2020-08-02 02:00:00,19.26,0.0,-0.0,0.0,2.48,92.9 +2020-08-02 03:00:00,18.63,0.0,-0.0,0.0,2.07,92.85 +2020-08-02 04:00:00,18.38,9.0,0.0,9.0,1.93,96.05 +2020-08-02 05:00:00,18.43,27.0,0.0,27.0,1.93,99.4 +2020-08-02 06:00:00,18.84,32.0,0.0,32.0,2.9,96.1 +2020-08-02 07:00:00,18.72,96.0,0.0,96.0,1.93,96.1 +2020-08-02 08:00:00,18.6,135.0,0.0,135.0,1.79,96.1 +2020-08-02 09:00:00,18.53,241.0,1.33,240.0,2.21,96.1 +2020-08-02 10:00:00,18.67,345.0,18.44,330.0,2.0,96.1 +2020-08-02 11:00:00,19.01,196.0,0.0,196.0,1.93,92.9 +2020-08-02 12:00:00,20.44,247.0,0.0,247.0,1.24,92.95 +2020-08-02 13:00:00,20.69,162.0,0.0,162.0,1.45,89.9 +2020-08-02 14:00:00,20.86,282.0,22.77,267.0,1.52,86.9 +2020-08-02 15:00:00,21.02,305.0,114.09,244.0,1.17,84.05 +2020-08-02 16:00:00,21.05,271.0,287.08,159.0,0.9,81.3 +2020-08-02 17:00:00,20.97,137.0,178.7,95.0,0.55,78.5 +2020-08-02 18:00:00,20.68,38.0,137.7,27.0,0.76,78.5 +2020-08-02 19:00:00,19.23,0.0,-0.0,0.0,0.83,89.8 +2020-08-02 20:00:00,17.97,0.0,-0.0,0.0,1.31,92.85 +2020-08-02 21:00:00,17.18,0.0,-0.0,0.0,1.59,92.85 +2020-08-02 22:00:00,16.55,0.0,-0.0,0.0,1.79,92.8 +2020-08-02 23:00:00,16.58,0.0,-0.0,0.0,1.86,92.8 +2020-08-03 00:00:00,16.61,0.0,-0.0,0.0,1.93,92.8 +2020-08-03 01:00:00,17.17,0.0,-0.0,0.0,2.0,92.85 +2020-08-03 02:00:00,17.44,0.0,-0.0,0.0,2.0,92.85 +2020-08-03 03:00:00,17.94,0.0,-0.0,0.0,2.07,92.85 +2020-08-03 04:00:00,17.88,33.0,105.98,25.0,2.14,86.65 +2020-08-03 05:00:00,18.49,114.0,90.98,93.0,2.34,78.25 +2020-08-03 06:00:00,19.69,185.0,59.56,162.0,2.69,73.15 +2020-08-03 07:00:00,20.49,497.0,726.95,111.0,3.59,70.8 +2020-08-03 08:00:00,21.39,435.0,193.79,308.0,4.14,70.9 +2020-08-03 09:00:00,22.36,359.0,38.63,330.0,4.21,66.35 +2020-08-03 10:00:00,23.22,568.0,223.23,387.0,4.34,60.05 +2020-08-03 11:00:00,23.81,323.0,8.42,316.0,4.55,56.15 +2020-08-03 12:00:00,24.2,506.0,136.87,395.0,4.83,52.5 +2020-08-03 13:00:00,24.49,126.0,0.0,126.0,4.9,52.5 +2020-08-03 14:00:00,24.57,210.0,1.52,209.0,4.48,49.05 +2020-08-03 15:00:00,24.19,276.0,71.5,238.0,3.93,50.7 +2020-08-03 16:00:00,24.1,113.0,2.59,112.0,3.72,50.7 +2020-08-03 17:00:00,23.44,129.0,146.92,95.0,3.03,56.0 +2020-08-03 18:00:00,22.4,34.0,105.19,26.0,2.34,59.8 +2020-08-03 19:00:00,21.31,0.0,-0.0,0.0,1.86,63.9 +2020-08-03 20:00:00,19.59,0.0,-0.0,0.0,2.0,73.15 +2020-08-03 21:00:00,18.25,0.0,-0.0,0.0,2.07,78.15 +2020-08-03 22:00:00,17.51,0.0,-0.0,0.0,2.14,80.85 +2020-08-03 23:00:00,17.03,0.0,-0.0,0.0,2.21,80.85 +2020-08-04 00:00:00,16.88,0.0,-0.0,0.0,2.34,86.6 +2020-08-04 01:00:00,17.34,0.0,-0.0,0.0,2.55,83.7 +2020-08-04 02:00:00,17.56,0.0,-0.0,0.0,2.9,83.7 +2020-08-04 03:00:00,18.16,0.0,-0.0,0.0,3.31,80.9 +2020-08-04 04:00:00,18.72,4.0,0.0,4.0,3.72,78.25 +2020-08-04 05:00:00,19.77,25.0,0.0,25.0,4.34,75.7 +2020-08-04 06:00:00,21.15,121.0,2.61,120.0,3.93,70.9 +2020-08-04 07:00:00,21.98,81.0,0.0,81.0,3.79,73.45 +2020-08-04 08:00:00,20.87,332.0,56.69,295.0,5.59,73.3 +2020-08-04 09:00:00,19.71,130.0,0.0,130.0,4.21,86.85 +2020-08-04 10:00:00,18.22,444.0,74.23,384.0,4.48,89.75 +2020-08-04 11:00:00,18.59,695.0,438.0,332.0,4.21,78.25 +2020-08-04 12:00:00,20.82,554.0,200.42,392.0,5.59,63.8 +2020-08-04 13:00:00,22.09,674.0,584.02,237.0,6.28,53.85 +2020-08-04 14:00:00,22.06,522.0,393.72,265.0,5.59,52.0 +2020-08-04 15:00:00,21.71,474.0,628.48,142.0,5.45,53.75 +2020-08-04 16:00:00,21.12,334.0,615.72,98.0,4.69,55.55 +2020-08-04 17:00:00,20.54,160.0,338.14,83.0,4.34,57.45 +2020-08-04 18:00:00,20.05,34.0,138.63,24.0,4.28,53.35 +2020-08-04 19:00:00,19.31,0.0,-0.0,0.0,3.24,53.15 +2020-08-04 20:00:00,18.42,0.0,-0.0,0.0,2.76,54.85 +2020-08-04 21:00:00,17.52,0.0,-0.0,0.0,2.76,56.75 +2020-08-04 22:00:00,16.8,0.0,-0.0,0.0,2.9,58.7 +2020-08-04 23:00:00,16.39,0.0,-0.0,0.0,3.17,60.75 +2020-08-05 00:00:00,16.12,0.0,-0.0,0.0,3.38,62.95 +2020-08-05 01:00:00,15.68,0.0,-0.0,0.0,3.45,67.55 +2020-08-05 02:00:00,15.41,0.0,-0.0,0.0,3.52,69.9 +2020-08-05 03:00:00,15.07,0.0,-0.0,0.0,3.79,69.9 +2020-08-05 04:00:00,14.88,36.0,204.23,22.0,4.0,72.4 +2020-08-05 05:00:00,15.14,176.0,485.83,67.0,4.34,72.45 +2020-08-05 06:00:00,16.09,333.0,618.18,98.0,4.28,70.1 +2020-08-05 07:00:00,17.52,493.0,711.96,119.0,6.41,70.3 +2020-08-05 08:00:00,18.15,610.0,681.59,167.0,6.48,65.55 +2020-08-05 09:00:00,18.71,653.0,513.7,270.0,6.55,63.4 +2020-08-05 10:00:00,19.39,465.0,90.61,392.0,6.69,61.3 +2020-08-05 11:00:00,19.86,741.0,543.51,292.0,6.55,57.2 +2020-08-05 12:00:00,20.81,618.0,305.39,372.0,6.76,53.5 +2020-08-05 13:00:00,21.34,278.0,5.37,274.0,6.55,49.95 +2020-08-05 14:00:00,21.43,566.0,521.83,227.0,6.0,48.2 +2020-08-05 15:00:00,21.41,348.0,196.22,245.0,5.38,48.2 +2020-08-05 16:00:00,21.16,230.0,155.38,171.0,4.55,48.2 +2020-08-05 17:00:00,20.74,148.0,267.96,88.0,3.52,49.8 +2020-08-05 18:00:00,19.84,31.0,117.43,23.0,2.83,51.4 +2020-08-05 19:00:00,18.72,0.0,-0.0,0.0,2.83,54.95 +2020-08-05 20:00:00,17.75,0.0,-0.0,0.0,2.34,60.95 +2020-08-05 21:00:00,16.64,0.0,-0.0,0.0,2.21,63.05 +2020-08-05 22:00:00,15.74,0.0,-0.0,0.0,2.21,67.55 +2020-08-05 23:00:00,14.83,0.0,-0.0,0.0,2.07,72.4 +2020-08-06 00:00:00,13.82,0.0,-0.0,0.0,1.93,80.4 +2020-08-06 01:00:00,13.09,0.0,-0.0,0.0,1.86,80.35 +2020-08-06 02:00:00,12.51,0.0,-0.0,0.0,1.72,83.25 +2020-08-06 03:00:00,12.26,0.0,-0.0,0.0,1.66,86.2 +2020-08-06 04:00:00,11.93,3.0,0.0,3.0,1.59,86.2 +2020-08-06 05:00:00,12.89,148.0,298.55,82.0,1.24,89.4 +2020-08-06 06:00:00,16.31,296.0,437.58,131.0,0.9,77.95 +2020-08-06 07:00:00,18.59,263.0,61.25,231.0,1.17,70.45 +2020-08-06 08:00:00,20.17,242.0,7.73,237.0,0.97,61.5 +2020-08-06 09:00:00,21.22,555.0,290.76,339.0,0.48,55.55 +2020-08-06 10:00:00,22.16,746.0,636.38,235.0,0.28,52.0 +2020-08-06 11:00:00,22.81,829.0,829.51,146.0,0.55,48.55 +2020-08-06 12:00:00,23.93,813.0,844.64,135.0,0.34,45.5 +2020-08-06 13:00:00,24.07,746.0,833.83,127.0,0.28,44.05 +2020-08-06 14:00:00,24.06,621.0,750.26,136.0,1.24,45.6 +2020-08-06 15:00:00,24.07,462.0,602.09,148.0,1.86,45.6 +2020-08-06 16:00:00,23.79,313.0,531.82,113.0,2.21,48.8 +2020-08-06 17:00:00,23.14,150.0,309.05,82.0,1.93,54.1 +2020-08-06 18:00:00,21.83,24.0,62.48,20.0,2.07,59.7 +2020-08-06 19:00:00,20.92,0.0,-0.0,0.0,2.48,59.5 +2020-08-06 20:00:00,19.68,0.0,-0.0,0.0,2.41,63.6 +2020-08-06 21:00:00,18.65,0.0,-0.0,0.0,2.34,65.65 +2020-08-06 22:00:00,18.06,0.0,-0.0,0.0,2.34,67.95 +2020-08-06 23:00:00,17.57,0.0,-0.0,0.0,2.48,67.85 +2020-08-07 00:00:00,17.16,0.0,-0.0,0.0,2.55,65.45 +2020-08-07 01:00:00,16.87,0.0,-0.0,0.0,2.69,65.35 +2020-08-07 02:00:00,16.79,0.0,-0.0,0.0,2.76,63.05 +2020-08-07 03:00:00,16.72,0.0,-0.0,0.0,2.9,63.05 +2020-08-07 04:00:00,16.71,30.0,178.96,19.0,2.97,63.05 +2020-08-07 05:00:00,17.23,167.0,468.44,65.0,2.83,65.45 +2020-08-07 06:00:00,19.14,333.0,647.12,91.0,2.69,65.75 +2020-08-07 07:00:00,21.3,486.0,712.25,116.0,3.1,61.7 +2020-08-07 08:00:00,23.09,638.0,806.98,118.0,3.24,56.0 +2020-08-07 09:00:00,24.71,736.0,805.26,140.0,3.45,49.05 +2020-08-07 10:00:00,26.16,802.0,817.25,148.0,3.72,44.55 +2020-08-07 11:00:00,27.34,849.0,882.29,125.0,3.79,40.35 +2020-08-07 12:00:00,28.36,829.0,886.47,120.0,3.93,39.1 +2020-08-07 13:00:00,29.07,121.0,0.0,121.0,4.0,36.7 +2020-08-07 14:00:00,29.51,645.0,831.81,110.0,4.07,35.45 +2020-08-07 15:00:00,29.67,484.0,704.62,119.0,4.0,35.6 +2020-08-07 16:00:00,29.32,319.0,580.14,103.0,3.31,39.35 +2020-08-07 17:00:00,28.2,139.0,245.32,86.0,2.76,43.4 +2020-08-07 18:00:00,26.22,25.0,100.29,19.0,2.69,47.75 +2020-08-07 19:00:00,24.79,0.0,-0.0,0.0,3.17,49.05 +2020-08-07 20:00:00,24.13,0.0,-0.0,0.0,3.38,50.7 +2020-08-07 21:00:00,23.76,0.0,-0.0,0.0,3.17,56.15 +2020-08-07 22:00:00,23.0,0.0,-0.0,0.0,2.76,62.05 +2020-08-07 23:00:00,22.04,0.0,-0.0,0.0,2.21,66.35 +2020-08-08 00:00:00,21.19,0.0,-0.0,0.0,1.93,73.35 +2020-08-08 01:00:00,20.32,0.0,-0.0,0.0,1.93,78.45 +2020-08-08 02:00:00,19.16,0.0,-0.0,0.0,1.86,86.8 +2020-08-08 03:00:00,18.67,0.0,-0.0,0.0,1.52,89.75 +2020-08-08 04:00:00,18.34,2.0,0.0,2.0,1.38,92.85 +2020-08-08 05:00:00,18.27,35.0,0.0,35.0,1.31,96.05 +2020-08-08 06:00:00,19.52,256.0,283.16,151.0,1.66,86.85 +2020-08-08 07:00:00,19.47,453.0,606.0,140.0,2.34,86.8 +2020-08-08 08:00:00,20.27,588.0,665.6,161.0,3.31,81.15 +2020-08-08 09:00:00,21.33,640.0,524.85,253.0,3.38,75.95 +2020-08-08 10:00:00,22.55,641.0,387.48,332.0,3.1,68.75 +2020-08-08 11:00:00,23.62,363.0,23.23,344.0,3.45,60.15 +2020-08-08 12:00:00,24.24,722.0,599.88,244.0,3.79,54.35 +2020-08-08 13:00:00,24.49,225.0,0.0,225.0,4.14,50.7 +2020-08-08 14:00:00,23.73,379.0,120.35,302.0,4.34,48.8 +2020-08-08 15:00:00,22.93,484.0,734.81,106.0,4.83,52.1 +2020-08-08 16:00:00,21.94,299.0,499.34,115.0,4.07,57.65 +2020-08-08 17:00:00,21.24,113.0,127.37,86.0,3.31,61.7 +2020-08-08 18:00:00,20.46,20.0,54.01,17.0,2.9,68.3 +2020-08-08 19:00:00,19.8,0.0,-0.0,0.0,3.59,61.4 +2020-08-08 20:00:00,19.23,0.0,-0.0,0.0,3.59,63.5 +2020-08-08 21:00:00,18.57,0.0,-0.0,0.0,3.38,65.65 +2020-08-08 22:00:00,18.12,0.0,-0.0,0.0,3.45,67.95 +2020-08-08 23:00:00,17.71,0.0,-0.0,0.0,3.59,70.3 +2020-08-09 00:00:00,17.31,0.0,-0.0,0.0,3.52,70.3 +2020-08-09 01:00:00,16.78,0.0,-0.0,0.0,3.72,75.3 +2020-08-09 02:00:00,16.22,0.0,-0.0,0.0,3.72,77.95 +2020-08-09 03:00:00,15.35,0.0,-0.0,0.0,4.28,86.5 +2020-08-09 04:00:00,14.97,20.0,55.3,17.0,4.07,86.5 +2020-08-09 05:00:00,14.54,30.0,0.0,30.0,3.86,86.45 +2020-08-09 06:00:00,14.53,169.0,46.24,152.0,3.93,83.45 +2020-08-09 07:00:00,14.82,221.0,25.32,208.0,4.14,86.45 +2020-08-09 08:00:00,15.47,412.0,164.41,307.0,4.48,77.85 +2020-08-09 09:00:00,16.07,452.0,126.61,359.0,4.41,72.65 +2020-08-09 10:00:00,17.22,123.0,0.0,123.0,4.69,60.95 +2020-08-09 11:00:00,17.98,110.0,0.0,110.0,4.97,54.85 +2020-08-09 12:00:00,18.61,203.0,0.0,203.0,5.17,49.3 +2020-08-09 13:00:00,18.91,142.0,0.0,142.0,5.03,47.55 +2020-08-09 14:00:00,19.19,495.0,345.71,275.0,4.83,44.3 +2020-08-09 15:00:00,18.98,372.0,287.82,225.0,4.21,42.7 +2020-08-09 16:00:00,18.95,284.0,411.45,134.0,4.07,44.2 +2020-08-09 17:00:00,18.58,136.0,264.66,81.0,3.38,44.2 +2020-08-09 18:00:00,17.95,19.0,58.62,16.0,2.55,49.05 +2020-08-09 19:00:00,16.65,0.0,-0.0,0.0,1.1,60.85 +2020-08-09 20:00:00,15.31,0.0,-0.0,0.0,1.17,69.9 +2020-08-09 21:00:00,14.04,0.0,-0.0,0.0,1.52,72.3 +2020-08-09 22:00:00,12.75,0.0,-0.0,0.0,1.66,80.3 +2020-08-09 23:00:00,12.27,0.0,-0.0,0.0,1.72,83.15 +2020-08-10 00:00:00,11.87,0.0,-0.0,0.0,1.79,83.1 +2020-08-10 01:00:00,11.35,0.0,-0.0,0.0,1.93,86.1 +2020-08-10 02:00:00,11.33,0.0,-0.0,0.0,2.07,86.1 +2020-08-10 03:00:00,11.25,0.0,-0.0,0.0,2.21,83.05 +2020-08-10 04:00:00,11.28,8.0,0.0,8.0,2.21,83.05 +2020-08-10 05:00:00,12.58,58.0,4.82,57.0,2.21,80.3 +2020-08-10 06:00:00,15.62,144.0,21.95,136.0,2.14,70.0 +2020-08-10 07:00:00,18.2,345.0,229.21,228.0,2.83,70.35 +2020-08-10 08:00:00,19.79,508.0,402.71,252.0,3.17,68.2 +2020-08-10 09:00:00,21.23,619.0,475.65,271.0,3.59,59.6 +2020-08-10 10:00:00,21.93,351.0,22.74,333.0,3.45,55.65 +2020-08-10 11:00:00,22.62,311.0,7.39,305.0,2.9,52.1 +2020-08-10 12:00:00,23.94,272.0,2.53,270.0,3.1,48.8 +2020-08-10 13:00:00,23.88,224.0,0.0,224.0,2.41,50.55 +2020-08-10 14:00:00,23.65,170.0,0.0,170.0,1.72,54.2 +2020-08-10 15:00:00,23.07,160.0,1.97,159.0,1.79,58.0 +2020-08-10 16:00:00,22.07,132.0,13.87,127.0,2.41,59.8 +2020-08-10 17:00:00,21.2,83.0,39.3,75.0,1.45,66.15 +2020-08-10 18:00:00,20.97,12.0,21.4,11.0,0.28,68.4 +2020-08-10 19:00:00,18.63,0.0,-0.0,0.0,1.45,86.75 +2020-08-10 20:00:00,17.89,0.0,-0.0,0.0,1.66,89.7 +2020-08-10 21:00:00,17.19,0.0,-0.0,0.0,1.79,86.65 +2020-08-10 22:00:00,17.29,0.0,-0.0,0.0,1.72,83.7 +2020-08-10 23:00:00,17.75,0.0,-0.0,0.0,2.0,83.7 +2020-08-11 00:00:00,17.34,0.0,-0.0,0.0,1.79,86.65 +2020-08-11 01:00:00,17.08,0.0,-0.0,0.0,1.86,86.65 +2020-08-11 02:00:00,16.74,0.0,-0.0,0.0,1.93,86.6 +2020-08-11 03:00:00,16.26,0.0,-0.0,0.0,2.07,89.65 +2020-08-11 04:00:00,16.3,2.0,0.0,2.0,2.14,89.65 +2020-08-11 05:00:00,16.64,113.0,156.77,81.0,2.07,86.6 +2020-08-11 06:00:00,17.31,199.0,116.29,157.0,1.93,86.65 +2020-08-11 07:00:00,19.57,385.0,362.66,201.0,1.72,78.35 +2020-08-11 08:00:00,21.67,582.0,674.87,155.0,2.28,68.6 +2020-08-11 09:00:00,23.16,631.0,522.86,250.0,2.97,62.15 +2020-08-11 10:00:00,24.17,392.0,45.64,356.0,3.1,58.2 +2020-08-11 11:00:00,25.13,615.0,311.57,363.0,3.03,52.75 +2020-08-11 12:00:00,24.99,231.0,0.0,231.0,2.83,52.6 +2020-08-11 13:00:00,24.8,517.0,247.85,337.0,2.28,54.45 +2020-08-11 14:00:00,25.21,577.0,662.69,160.0,1.66,52.75 +2020-08-11 15:00:00,25.15,200.0,15.9,192.0,1.24,54.55 +2020-08-11 16:00:00,24.86,292.0,521.81,106.0,0.69,56.35 +2020-08-11 17:00:00,23.11,141.0,376.44,66.0,0.55,66.55 +2020-08-11 18:00:00,21.76,15.0,47.42,13.0,0.97,76.0 +2020-08-11 19:00:00,21.68,0.0,-0.0,0.0,1.72,64.0 +2020-08-11 20:00:00,20.46,0.0,-0.0,0.0,1.72,73.2 +2020-08-11 21:00:00,19.22,0.0,-0.0,0.0,1.17,83.85 +2020-08-11 22:00:00,18.61,0.0,-0.0,0.0,0.97,86.75 +2020-08-11 23:00:00,18.19,0.0,-0.0,0.0,0.83,89.75 +2020-08-12 00:00:00,18.42,0.0,-0.0,0.0,0.34,89.75 +2020-08-12 01:00:00,17.25,0.0,-0.0,0.0,0.83,92.85 +2020-08-12 02:00:00,17.22,0.0,-0.0,0.0,1.72,92.85 +2020-08-12 03:00:00,17.42,0.0,-0.0,0.0,2.14,92.85 +2020-08-12 04:00:00,17.25,1.0,0.0,1.0,2.41,89.7 +2020-08-12 05:00:00,17.6,21.0,0.0,21.0,2.48,92.85 +2020-08-12 06:00:00,19.06,251.0,310.14,140.0,2.41,89.8 +2020-08-12 07:00:00,19.67,408.0,468.03,172.0,3.17,83.9 +2020-08-12 08:00:00,21.32,509.0,436.72,234.0,3.93,75.95 +2020-08-12 09:00:00,23.08,528.0,278.35,326.0,4.83,64.3 +2020-08-12 10:00:00,24.33,258.0,1.27,257.0,5.1,58.2 +2020-08-12 11:00:00,25.45,133.0,0.0,133.0,4.69,54.55 +2020-08-12 12:00:00,24.8,140.0,0.0,140.0,3.86,60.35 +2020-08-12 13:00:00,23.81,117.0,0.0,117.0,2.62,71.35 +2020-08-12 14:00:00,23.59,65.0,0.0,65.0,2.14,73.75 +2020-08-12 15:00:00,23.67,83.0,0.0,83.0,3.03,71.35 +2020-08-12 16:00:00,22.61,54.0,0.0,54.0,2.9,76.15 +2020-08-12 17:00:00,21.8,12.0,0.0,12.0,3.52,78.65 +2020-08-12 18:00:00,21.27,10.0,0.0,10.0,4.07,75.95 +2020-08-12 19:00:00,20.22,0.0,-0.0,0.0,2.55,81.15 +2020-08-12 20:00:00,19.75,0.0,-0.0,0.0,1.93,81.1 +2020-08-12 21:00:00,19.45,0.0,-0.0,0.0,1.86,83.85 +2020-08-12 22:00:00,19.24,0.0,-0.0,0.0,2.76,78.3 +2020-08-12 23:00:00,18.6,0.0,-0.0,0.0,3.31,78.25 +2020-08-13 00:00:00,17.63,0.0,-0.0,0.0,3.1,83.7 +2020-08-13 01:00:00,17.34,0.0,-0.0,0.0,2.76,83.7 +2020-08-13 02:00:00,17.04,0.0,-0.0,0.0,2.97,83.7 +2020-08-13 03:00:00,16.77,0.0,-0.0,0.0,3.31,80.8 +2020-08-13 04:00:00,16.6,12.0,0.0,12.0,3.52,78.0 +2020-08-13 05:00:00,16.52,13.0,0.0,13.0,3.66,75.3 +2020-08-13 06:00:00,16.82,138.0,19.74,131.0,3.59,78.0 +2020-08-13 07:00:00,17.79,275.0,91.8,229.0,3.38,78.1 +2020-08-13 08:00:00,19.62,629.0,836.22,105.0,4.21,65.85 +2020-08-13 09:00:00,20.83,540.0,290.59,330.0,4.69,55.45 +2020-08-13 10:00:00,21.51,704.0,563.45,263.0,4.83,50.05 +2020-08-13 11:00:00,21.89,566.0,220.55,389.0,5.17,48.3 +2020-08-13 12:00:00,22.63,767.0,767.06,168.0,5.24,45.25 +2020-08-13 13:00:00,22.7,594.0,421.26,291.0,4.9,43.65 +2020-08-13 14:00:00,23.19,532.0,496.94,223.0,5.17,40.75 +2020-08-13 15:00:00,23.22,420.0,512.95,166.0,4.62,39.3 +2020-08-13 16:00:00,22.99,299.0,589.09,94.0,3.79,42.1 +2020-08-13 17:00:00,22.35,133.0,346.81,67.0,3.1,45.1 +2020-08-13 18:00:00,20.79,9.0,0.0,9.0,2.28,48.05 +2020-08-13 19:00:00,19.24,0.0,-0.0,0.0,2.0,59.15 +2020-08-13 20:00:00,17.95,0.0,-0.0,0.0,2.0,65.45 +2020-08-13 21:00:00,16.76,0.0,-0.0,0.0,2.14,67.75 +2020-08-13 22:00:00,15.86,0.0,-0.0,0.0,2.28,72.55 +2020-08-13 23:00:00,15.01,0.0,-0.0,0.0,2.21,75.1 +2020-08-14 00:00:00,14.38,0.0,-0.0,0.0,2.34,77.65 +2020-08-14 01:00:00,14.05,0.0,-0.0,0.0,2.48,77.65 +2020-08-14 02:00:00,13.87,0.0,-0.0,0.0,2.55,77.6 +2020-08-14 03:00:00,13.67,0.0,-0.0,0.0,2.55,77.6 +2020-08-14 04:00:00,13.44,9.0,0.0,9.0,2.48,77.6 +2020-08-14 05:00:00,14.03,132.0,330.59,68.0,2.41,77.65 +2020-08-14 06:00:00,16.74,114.0,5.69,112.0,2.21,70.2 +2020-08-14 07:00:00,18.81,272.0,90.38,227.0,2.83,70.45 +2020-08-14 08:00:00,20.06,588.0,697.65,153.0,2.9,61.5 +2020-08-14 09:00:00,21.13,556.0,325.18,322.0,2.83,55.55 +2020-08-14 10:00:00,22.13,566.0,248.86,372.0,2.83,50.2 +2020-08-14 11:00:00,22.9,667.0,421.62,330.0,3.24,46.85 +2020-08-14 12:00:00,23.73,626.0,370.41,338.0,3.59,42.35 +2020-08-14 13:00:00,24.11,654.0,598.05,226.0,3.03,41.0 +2020-08-14 14:00:00,23.66,422.0,210.37,292.0,2.21,43.9 +2020-08-14 15:00:00,23.45,284.0,112.0,229.0,1.66,47.0 +2020-08-14 16:00:00,23.01,93.0,0.0,93.0,1.03,54.1 +2020-08-14 17:00:00,22.26,77.0,43.08,69.0,0.69,66.35 +2020-08-14 18:00:00,21.27,8.0,0.0,8.0,1.24,63.9 +2020-08-14 19:00:00,20.03,0.0,-0.0,0.0,1.59,61.5 +2020-08-14 20:00:00,18.88,0.0,-0.0,0.0,1.59,68.05 +2020-08-14 21:00:00,17.93,0.0,-0.0,0.0,1.59,70.3 +2020-08-14 22:00:00,17.25,0.0,-0.0,0.0,1.72,67.85 +2020-08-14 23:00:00,16.98,0.0,-0.0,0.0,1.79,67.85 +2020-08-15 00:00:00,16.57,0.0,-0.0,0.0,1.59,72.7 +2020-08-15 01:00:00,16.0,0.0,-0.0,0.0,1.66,77.95 +2020-08-15 02:00:00,15.35,0.0,-0.0,0.0,2.21,86.5 +2020-08-15 03:00:00,14.71,0.0,-0.0,0.0,2.83,89.5 +2020-08-15 04:00:00,14.24,1.0,0.0,1.0,3.17,89.5 +2020-08-15 05:00:00,14.04,17.0,0.0,17.0,3.17,89.5 +2020-08-15 06:00:00,13.93,32.0,0.0,32.0,3.17,89.5 +2020-08-15 07:00:00,14.19,62.0,0.0,62.0,3.24,92.7 +2020-08-15 08:00:00,14.27,68.0,0.0,68.0,3.93,92.7 +2020-08-15 09:00:00,14.71,102.0,0.0,102.0,4.34,92.7 +2020-08-15 10:00:00,14.32,96.0,0.0,96.0,4.9,92.7 +2020-08-15 11:00:00,14.24,108.0,0.0,108.0,4.34,92.7 +2020-08-15 12:00:00,14.12,91.0,0.0,91.0,4.28,92.7 +2020-08-15 13:00:00,13.98,87.0,0.0,87.0,4.41,92.7 +2020-08-15 14:00:00,13.74,88.0,0.0,88.0,4.55,92.65 +2020-08-15 15:00:00,13.65,50.0,0.0,50.0,4.62,92.65 +2020-08-15 16:00:00,13.56,41.0,0.0,41.0,4.76,89.45 +2020-08-15 17:00:00,13.32,25.0,0.0,25.0,4.28,92.65 +2020-08-15 18:00:00,13.05,2.0,0.0,2.0,3.59,89.4 +2020-08-15 19:00:00,12.69,0.0,-0.0,0.0,3.59,92.6 +2020-08-15 20:00:00,12.48,0.0,-0.0,0.0,3.24,92.6 +2020-08-15 21:00:00,12.25,0.0,-0.0,0.0,3.03,92.6 +2020-08-15 22:00:00,12.1,0.0,-0.0,0.0,2.76,92.6 +2020-08-15 23:00:00,11.9,0.0,-0.0,0.0,2.69,92.6 +2020-08-16 00:00:00,11.64,0.0,-0.0,0.0,2.62,95.9 +2020-08-16 01:00:00,11.57,0.0,-0.0,0.0,2.34,99.4 +2020-08-16 02:00:00,11.49,0.0,-0.0,0.0,2.07,99.4 +2020-08-16 03:00:00,11.48,0.0,-0.0,0.0,2.21,99.4 +2020-08-16 04:00:00,11.63,1.0,0.0,1.0,2.41,99.4 +2020-08-16 05:00:00,11.87,71.0,32.18,65.0,2.9,99.4 +2020-08-16 06:00:00,12.26,108.0,2.9,107.0,3.52,99.4 +2020-08-16 07:00:00,12.71,89.0,0.0,89.0,2.55,99.4 +2020-08-16 08:00:00,13.15,95.0,0.0,95.0,2.69,99.4 +2020-08-16 09:00:00,13.56,138.0,0.0,138.0,3.79,95.95 +2020-08-16 10:00:00,14.09,162.0,0.0,162.0,3.31,92.7 +2020-08-16 11:00:00,14.46,91.0,0.0,91.0,3.17,89.5 +2020-08-16 12:00:00,14.42,69.0,0.0,69.0,2.97,92.7 +2020-08-16 13:00:00,14.63,86.0,0.0,86.0,2.9,92.7 +2020-08-16 14:00:00,14.81,97.0,0.0,97.0,2.76,89.5 +2020-08-16 15:00:00,14.97,109.0,0.0,109.0,2.83,86.5 +2020-08-16 16:00:00,14.87,168.0,74.7,143.0,2.69,89.5 +2020-08-16 17:00:00,14.67,95.0,141.81,70.0,2.41,89.5 +2020-08-16 18:00:00,14.43,0.0,0.0,0.0,2.21,89.5 +2020-08-16 19:00:00,13.93,0.0,-0.0,0.0,2.48,89.5 +2020-08-16 20:00:00,13.74,0.0,-0.0,0.0,2.28,92.65 +2020-08-16 21:00:00,13.45,0.0,-0.0,0.0,2.21,92.65 +2020-08-16 22:00:00,13.28,0.0,-0.0,0.0,2.21,95.95 +2020-08-16 23:00:00,13.05,0.0,-0.0,0.0,2.21,92.65 +2020-08-17 00:00:00,12.33,0.0,-0.0,0.0,2.28,92.6 +2020-08-17 01:00:00,12.06,0.0,-0.0,0.0,2.28,89.35 +2020-08-17 02:00:00,11.71,0.0,-0.0,0.0,2.41,92.55 +2020-08-17 03:00:00,11.42,0.0,-0.0,0.0,2.41,89.3 +2020-08-17 04:00:00,11.13,1.0,0.0,1.0,2.34,92.55 +2020-08-17 05:00:00,11.67,65.0,21.88,61.0,2.28,89.3 +2020-08-17 06:00:00,13.39,189.0,114.36,150.0,2.0,89.4 +2020-08-17 07:00:00,15.46,455.0,702.73,112.0,2.83,83.55 +2020-08-17 08:00:00,16.97,599.0,770.41,126.0,3.24,75.4 +2020-08-17 09:00:00,18.33,696.0,764.72,153.0,3.72,65.55 +2020-08-17 10:00:00,19.28,757.0,757.3,174.0,3.66,57.1 +2020-08-17 11:00:00,19.79,784.0,779.24,169.0,3.31,53.25 +2020-08-17 12:00:00,20.3,719.0,640.24,228.0,3.03,51.5 +2020-08-17 13:00:00,20.71,672.0,692.8,184.0,2.97,48.05 +2020-08-17 14:00:00,20.91,563.0,656.83,165.0,2.9,46.35 +2020-08-17 15:00:00,20.71,403.0,495.54,166.0,2.48,46.35 +2020-08-17 16:00:00,20.42,265.0,463.53,112.0,1.79,51.5 +2020-08-17 17:00:00,19.56,64.0,23.33,60.0,1.24,63.6 +2020-08-17 18:00:00,18.95,0.0,0.0,0.0,0.83,63.4 +2020-08-17 19:00:00,17.79,0.0,-0.0,0.0,1.24,67.85 +2020-08-17 20:00:00,15.83,0.0,-0.0,0.0,1.86,75.15 +2020-08-17 21:00:00,14.66,0.0,-0.0,0.0,1.86,80.55 +2020-08-17 22:00:00,13.85,0.0,-0.0,0.0,1.93,83.35 +2020-08-17 23:00:00,13.42,0.0,-0.0,0.0,2.0,83.35 +2020-08-18 00:00:00,12.79,0.0,-0.0,0.0,2.14,86.25 +2020-08-18 01:00:00,12.5,0.0,-0.0,0.0,2.14,86.25 +2020-08-18 02:00:00,12.47,0.0,-0.0,0.0,2.28,83.25 +2020-08-18 03:00:00,12.42,0.0,-0.0,0.0,2.34,83.25 +2020-08-18 04:00:00,12.04,0.0,0.0,0.0,2.48,83.15 +2020-08-18 05:00:00,12.57,91.0,117.21,70.0,2.41,80.3 +2020-08-18 06:00:00,15.58,210.0,189.59,146.0,2.21,75.15 +2020-08-18 07:00:00,18.49,389.0,433.2,179.0,3.03,70.45 +2020-08-18 08:00:00,20.15,498.0,429.02,236.0,3.17,68.3 +2020-08-18 09:00:00,21.64,629.0,561.7,232.0,3.17,61.85 +2020-08-18 10:00:00,22.46,716.0,652.32,216.0,4.0,57.75 +2020-08-18 11:00:00,22.92,562.0,237.99,375.0,4.14,54.0 +2020-08-18 12:00:00,23.3,642.0,448.1,300.0,4.0,52.25 +2020-08-18 13:00:00,23.69,264.0,7.14,259.0,3.86,48.8 +2020-08-18 14:00:00,24.0,575.0,742.8,128.0,3.45,50.55 +2020-08-18 15:00:00,23.65,451.0,744.97,98.0,2.48,54.2 +2020-08-18 16:00:00,23.55,272.0,556.24,91.0,2.28,56.15 +2020-08-18 17:00:00,22.62,51.0,12.01,49.0,1.79,59.95 +2020-08-18 18:00:00,20.99,0.0,0.0,0.0,1.86,66.15 +2020-08-18 19:00:00,19.11,0.0,-0.0,0.0,2.0,73.05 +2020-08-18 20:00:00,17.75,0.0,-0.0,0.0,2.07,80.85 +2020-08-18 21:00:00,16.76,0.0,-0.0,0.0,2.14,80.8 +2020-08-18 22:00:00,16.36,0.0,-0.0,0.0,2.14,83.6 +2020-08-18 23:00:00,15.8,0.0,-0.0,0.0,2.28,83.55 +2020-08-19 00:00:00,15.6,0.0,-0.0,0.0,2.34,83.55 +2020-08-19 01:00:00,15.61,0.0,-0.0,0.0,2.41,80.65 +2020-08-19 02:00:00,15.57,0.0,-0.0,0.0,2.48,80.65 +2020-08-19 03:00:00,15.65,0.0,-0.0,0.0,2.62,77.85 +2020-08-19 04:00:00,15.89,0.0,0.0,0.0,2.69,77.85 +2020-08-19 05:00:00,16.37,38.0,0.0,38.0,2.69,77.95 +2020-08-19 06:00:00,18.24,98.0,2.99,97.0,2.55,75.45 +2020-08-19 07:00:00,20.88,420.0,608.65,127.0,2.9,70.8 +2020-08-19 08:00:00,22.84,461.0,339.16,255.0,3.38,64.2 +2020-08-19 09:00:00,24.33,451.0,163.48,336.0,3.59,60.25 +2020-08-19 10:00:00,25.61,659.0,509.77,270.0,3.72,56.6 +2020-08-19 11:00:00,26.56,647.0,432.12,309.0,3.79,54.95 +2020-08-19 12:00:00,27.34,610.0,389.74,314.0,3.66,51.45 +2020-08-19 13:00:00,27.53,139.0,0.0,139.0,3.24,53.2 +2020-08-19 14:00:00,27.53,68.0,0.0,68.0,2.83,53.2 +2020-08-19 15:00:00,27.11,205.0,31.96,190.0,2.48,53.2 +2020-08-19 16:00:00,24.97,274.0,592.57,84.0,1.45,71.5 +2020-08-19 17:00:00,23.84,24.0,0.0,24.0,0.62,76.3 +2020-08-19 18:00:00,21.91,0.0,0.0,0.0,1.66,84.1 +2020-08-19 19:00:00,20.33,0.0,-0.0,0.0,1.52,92.95 +2020-08-19 20:00:00,19.35,0.0,-0.0,0.0,0.97,96.1 +2020-08-19 21:00:00,18.75,0.0,-0.0,0.0,0.48,99.4 +2020-08-19 22:00:00,18.55,0.0,-0.0,0.0,0.28,96.1 +2020-08-19 23:00:00,17.36,0.0,-0.0,0.0,1.03,96.05 +2020-08-20 00:00:00,17.84,0.0,-0.0,0.0,1.72,99.4 +2020-08-20 01:00:00,18.06,0.0,-0.0,0.0,2.55,92.85 +2020-08-20 02:00:00,17.46,0.0,-0.0,0.0,2.97,89.7 +2020-08-20 03:00:00,16.74,0.0,-0.0,0.0,2.97,89.65 +2020-08-20 04:00:00,16.06,0.0,0.0,0.0,2.9,89.65 +2020-08-20 05:00:00,15.75,24.0,0.0,24.0,2.97,92.75 +2020-08-20 06:00:00,16.46,267.0,502.17,101.0,2.62,86.6 +2020-08-20 07:00:00,17.55,423.0,612.98,130.0,3.17,83.7 +2020-08-20 08:00:00,19.07,560.0,677.12,151.0,3.79,73.05 +2020-08-20 09:00:00,20.25,579.0,432.8,276.0,4.62,63.7 +2020-08-20 10:00:00,20.98,513.0,190.88,368.0,4.69,59.5 +2020-08-20 11:00:00,21.49,597.0,313.39,353.0,4.83,55.65 +2020-08-20 12:00:00,21.66,102.0,0.0,102.0,4.9,53.75 +2020-08-20 13:00:00,21.55,198.0,0.0,198.0,4.69,53.75 +2020-08-20 14:00:00,21.51,217.0,6.74,213.0,4.34,53.75 +2020-08-20 15:00:00,21.33,418.0,647.66,117.0,3.93,55.55 +2020-08-20 16:00:00,21.0,196.0,186.83,137.0,3.59,55.55 +2020-08-20 17:00:00,20.36,92.0,198.07,61.0,2.97,59.35 +2020-08-20 18:00:00,19.45,0.0,-0.0,0.0,2.83,61.3 +2020-08-20 19:00:00,18.51,0.0,-0.0,0.0,2.28,63.4 +2020-08-20 20:00:00,17.58,0.0,-0.0,0.0,2.41,67.85 +2020-08-20 21:00:00,17.18,0.0,-0.0,0.0,2.55,67.85 +2020-08-20 22:00:00,16.91,0.0,-0.0,0.0,2.76,70.2 +2020-08-20 23:00:00,16.81,0.0,-0.0,0.0,2.97,70.2 +2020-08-21 00:00:00,16.35,0.0,-0.0,0.0,2.9,72.65 +2020-08-21 01:00:00,16.09,0.0,-0.0,0.0,2.9,72.65 +2020-08-21 02:00:00,15.59,0.0,-0.0,0.0,2.83,75.15 +2020-08-21 03:00:00,15.31,0.0,-0.0,0.0,2.69,77.8 +2020-08-21 04:00:00,14.87,0.0,0.0,0.0,2.41,80.55 +2020-08-21 05:00:00,14.75,115.0,351.02,56.0,2.21,83.45 +2020-08-21 06:00:00,15.85,282.0,599.36,86.0,2.07,80.65 +2020-08-21 07:00:00,17.1,431.0,659.57,118.0,2.83,75.4 +2020-08-21 08:00:00,18.31,572.0,727.56,135.0,3.1,70.35 +2020-08-21 09:00:00,19.36,589.0,457.89,270.0,3.17,63.5 +2020-08-21 10:00:00,20.21,660.0,503.89,279.0,3.1,59.35 +2020-08-21 11:00:00,20.7,741.0,690.4,206.0,3.03,55.45 +2020-08-21 12:00:00,21.19,633.0,441.61,301.0,2.62,53.6 +2020-08-21 13:00:00,21.61,659.0,711.88,169.0,2.62,51.9 +2020-08-21 14:00:00,21.77,526.0,579.12,185.0,2.55,50.05 +2020-08-21 15:00:00,21.65,368.0,417.31,176.0,2.0,51.9 +2020-08-21 16:00:00,21.39,144.0,51.47,128.0,1.45,57.55 +2020-08-21 17:00:00,20.42,82.0,145.33,60.0,0.83,75.75 +2020-08-21 18:00:00,20.2,0.0,-0.0,0.0,0.76,65.95 +2020-08-21 19:00:00,20.32,0.0,-0.0,0.0,0.62,59.35 +2020-08-21 20:00:00,17.62,0.0,-0.0,0.0,1.31,75.4 +2020-08-21 21:00:00,15.98,0.0,-0.0,0.0,1.59,77.95 +2020-08-21 22:00:00,14.69,0.0,-0.0,0.0,1.72,86.45 +2020-08-21 23:00:00,13.78,0.0,-0.0,0.0,1.86,89.45 +2020-08-22 00:00:00,13.18,0.0,-0.0,0.0,1.93,89.4 +2020-08-22 01:00:00,13.03,0.0,-0.0,0.0,1.93,86.3 +2020-08-22 02:00:00,12.75,0.0,-0.0,0.0,2.0,86.25 +2020-08-22 03:00:00,12.51,0.0,-0.0,0.0,1.93,86.25 +2020-08-22 04:00:00,12.17,0.0,0.0,0.0,1.86,86.2 +2020-08-22 05:00:00,12.27,52.0,12.17,50.0,1.66,89.35 +2020-08-22 06:00:00,15.52,258.0,479.22,103.0,1.17,83.55 +2020-08-22 07:00:00,18.6,383.0,458.52,167.0,0.97,75.55 +2020-08-22 08:00:00,20.12,378.0,157.4,284.0,1.03,65.95 +2020-08-22 09:00:00,21.33,609.0,536.64,237.0,1.45,63.9 +2020-08-22 10:00:00,22.27,648.0,487.68,281.0,1.72,57.75 +2020-08-22 11:00:00,22.91,759.0,775.44,161.0,1.66,55.9 +2020-08-22 12:00:00,23.4,713.0,696.67,192.0,1.72,52.25 +2020-08-22 13:00:00,23.89,660.0,744.02,151.0,2.14,48.8 +2020-08-22 14:00:00,23.97,505.0,527.07,197.0,2.28,47.15 +2020-08-22 15:00:00,23.95,287.0,171.3,209.0,2.28,47.15 +2020-08-22 16:00:00,23.39,52.0,0.0,52.0,1.66,54.1 +2020-08-22 17:00:00,22.43,64.0,61.57,55.0,1.66,61.95 +2020-08-22 18:00:00,20.95,0.0,-0.0,0.0,1.79,66.05 +2020-08-22 19:00:00,19.59,0.0,-0.0,0.0,1.93,73.15 +2020-08-22 20:00:00,18.2,0.0,-0.0,0.0,1.93,80.9 +2020-08-22 21:00:00,17.23,0.0,-0.0,0.0,1.86,86.65 +2020-08-22 22:00:00,16.89,0.0,-0.0,0.0,1.59,89.65 +2020-08-22 23:00:00,16.56,0.0,-0.0,0.0,1.31,89.65 +2020-08-23 00:00:00,16.49,0.0,-0.0,0.0,1.66,89.65 +2020-08-23 01:00:00,16.53,0.0,-0.0,0.0,1.93,86.6 +2020-08-23 02:00:00,16.45,0.0,-0.0,0.0,2.48,86.6 +2020-08-23 03:00:00,15.89,0.0,-0.0,0.0,3.03,92.75 +2020-08-23 04:00:00,15.62,0.0,0.0,0.0,3.31,89.6 +2020-08-23 05:00:00,15.5,35.0,0.0,35.0,4.07,89.6 +2020-08-23 06:00:00,15.09,87.0,0.0,87.0,4.62,89.55 +2020-08-23 07:00:00,14.78,376.0,434.15,173.0,5.1,86.45 +2020-08-23 08:00:00,15.09,351.0,116.22,282.0,4.9,83.5 +2020-08-23 09:00:00,15.78,641.0,648.12,194.0,4.97,80.65 +2020-08-23 10:00:00,16.95,610.0,391.23,317.0,4.97,67.85 +2020-08-23 11:00:00,17.77,676.0,517.34,279.0,4.9,63.2 +2020-08-23 12:00:00,18.26,668.0,557.93,253.0,4.55,58.9 +2020-08-23 13:00:00,18.68,660.0,748.7,151.0,4.28,54.95 +2020-08-23 14:00:00,18.96,484.0,457.04,219.0,4.14,53.0 +2020-08-23 15:00:00,18.94,322.0,277.44,197.0,4.0,51.15 +2020-08-23 16:00:00,18.52,237.0,452.07,101.0,3.45,49.3 +2020-08-23 17:00:00,17.89,87.0,241.3,53.0,2.48,52.75 +2020-08-23 18:00:00,16.59,0.0,-0.0,0.0,1.93,52.65 +2020-08-23 19:00:00,14.82,0.0,-0.0,0.0,2.0,62.65 +2020-08-23 20:00:00,13.44,0.0,-0.0,0.0,1.93,69.65 +2020-08-23 21:00:00,12.41,0.0,-0.0,0.0,2.0,72.05 +2020-08-23 22:00:00,11.88,0.0,-0.0,0.0,2.14,77.3 +2020-08-23 23:00:00,12.13,0.0,-0.0,0.0,2.34,74.6 +2020-08-24 00:00:00,11.98,0.0,-0.0,0.0,2.41,74.6 +2020-08-24 01:00:00,11.59,0.0,-0.0,0.0,2.48,77.3 +2020-08-24 02:00:00,11.69,0.0,-0.0,0.0,2.76,77.3 +2020-08-24 03:00:00,11.93,0.0,-0.0,0.0,3.1,74.6 +2020-08-24 04:00:00,12.34,0.0,-0.0,0.0,3.59,77.35 +2020-08-24 05:00:00,12.44,10.0,0.0,10.0,3.59,80.3 +2020-08-24 06:00:00,12.48,53.0,0.0,53.0,3.52,86.25 +2020-08-24 07:00:00,12.57,94.0,0.0,94.0,5.31,89.4 +2020-08-24 08:00:00,12.87,140.0,0.0,140.0,4.97,92.6 +2020-08-24 09:00:00,13.69,153.0,0.0,153.0,6.14,83.35 +2020-08-24 10:00:00,14.26,246.0,1.34,245.0,6.21,80.45 +2020-08-24 11:00:00,14.9,566.0,268.49,361.0,4.76,77.75 +2020-08-24 12:00:00,15.66,216.0,0.0,216.0,5.1,67.55 +2020-08-24 13:00:00,16.62,432.0,152.48,329.0,4.0,63.05 +2020-08-24 14:00:00,17.56,432.0,307.72,255.0,4.62,58.8 +2020-08-24 15:00:00,17.82,352.0,403.9,172.0,4.48,58.8 +2020-08-24 16:00:00,17.47,220.0,368.61,111.0,4.14,60.95 +2020-08-24 17:00:00,17.16,90.0,317.19,47.0,3.86,58.8 +2020-08-24 18:00:00,16.43,0.0,-0.0,0.0,3.31,62.95 +2020-08-24 19:00:00,15.29,0.0,-0.0,0.0,2.41,69.9 +2020-08-24 20:00:00,14.62,0.0,-0.0,0.0,2.14,72.4 +2020-08-24 21:00:00,13.77,0.0,-0.0,0.0,2.0,77.6 +2020-08-24 22:00:00,12.81,0.0,-0.0,0.0,1.86,83.25 +2020-08-24 23:00:00,11.69,0.0,-0.0,0.0,1.86,86.15 +2020-08-25 00:00:00,10.82,0.0,-0.0,0.0,1.86,89.25 +2020-08-25 01:00:00,10.42,0.0,-0.0,0.0,1.79,86.05 +2020-08-25 02:00:00,10.1,0.0,-0.0,0.0,1.72,89.2 +2020-08-25 03:00:00,9.83,0.0,-0.0,0.0,1.66,92.45 +2020-08-25 04:00:00,9.54,0.0,-0.0,0.0,1.66,92.45 +2020-08-25 05:00:00,9.92,62.0,45.76,55.0,1.52,92.5 +2020-08-25 06:00:00,12.52,204.0,233.56,131.0,1.24,89.4 +2020-08-25 07:00:00,16.13,368.0,427.83,171.0,1.24,80.75 +2020-08-25 08:00:00,17.07,385.0,185.8,276.0,1.38,78.1 +2020-08-25 09:00:00,17.43,253.0,5.86,249.0,1.45,75.4 +2020-08-25 10:00:00,17.49,215.0,0.0,215.0,1.31,75.4 +2020-08-25 11:00:00,17.59,279.0,5.27,275.0,1.45,75.4 +2020-08-25 12:00:00,17.68,433.0,103.32,357.0,1.72,75.4 +2020-08-25 13:00:00,17.2,338.0,50.66,304.0,1.17,78.1 +2020-08-25 14:00:00,16.91,202.0,5.26,199.0,1.24,86.6 +2020-08-25 15:00:00,17.27,146.0,4.54,144.0,1.24,83.7 +2020-08-25 16:00:00,17.36,116.0,24.1,109.0,1.24,83.7 +2020-08-25 17:00:00,17.1,59.0,76.83,49.0,1.24,83.7 +2020-08-25 18:00:00,16.49,0.0,-0.0,0.0,1.45,83.65 +2020-08-25 19:00:00,15.89,0.0,-0.0,0.0,1.24,89.6 +2020-08-25 20:00:00,14.14,0.0,-0.0,0.0,1.66,92.7 +2020-08-25 21:00:00,13.29,0.0,-0.0,0.0,1.79,92.65 +2020-08-25 22:00:00,13.1,0.0,-0.0,0.0,1.86,92.65 +2020-08-25 23:00:00,13.54,0.0,-0.0,0.0,2.07,89.45 +2020-08-26 00:00:00,13.76,0.0,-0.0,0.0,2.28,89.45 +2020-08-26 01:00:00,14.07,0.0,-0.0,0.0,2.28,89.5 +2020-08-26 02:00:00,14.23,0.0,-0.0,0.0,2.14,92.7 +2020-08-26 03:00:00,14.41,0.0,-0.0,0.0,2.21,92.7 +2020-08-26 04:00:00,14.54,0.0,-0.0,0.0,2.21,92.7 +2020-08-26 05:00:00,14.52,43.0,6.7,42.0,2.21,92.7 +2020-08-26 06:00:00,15.44,174.0,132.74,133.0,2.07,89.6 +2020-08-26 07:00:00,16.35,343.0,345.84,185.0,2.21,89.65 +2020-08-26 08:00:00,17.79,444.0,334.44,249.0,2.55,83.7 +2020-08-26 09:00:00,18.46,528.0,346.18,293.0,2.48,80.9 +2020-08-26 10:00:00,19.69,538.0,261.62,345.0,3.1,68.2 +2020-08-26 11:00:00,20.36,589.0,334.82,336.0,3.17,63.7 +2020-08-26 12:00:00,21.05,575.0,348.66,320.0,2.97,57.55 +2020-08-26 13:00:00,21.19,579.0,519.04,233.0,2.62,57.55 +2020-08-26 14:00:00,21.35,424.0,316.39,245.0,2.41,57.55 +2020-08-26 15:00:00,21.41,345.0,417.74,163.0,2.14,57.55 +2020-08-26 16:00:00,21.28,217.0,413.69,99.0,1.66,59.6 +2020-08-26 17:00:00,20.24,68.0,168.42,47.0,0.97,73.2 +2020-08-26 18:00:00,20.53,0.0,-0.0,0.0,0.28,61.6 +2020-08-26 19:00:00,17.34,0.0,-0.0,0.0,1.31,80.85 +2020-08-26 20:00:00,16.59,0.0,-0.0,0.0,1.31,80.8 +2020-08-26 21:00:00,15.62,0.0,-0.0,0.0,1.52,83.55 +2020-08-26 22:00:00,14.7,0.0,-0.0,0.0,1.66,89.5 +2020-08-26 23:00:00,14.19,0.0,-0.0,0.0,1.66,89.5 +2020-08-27 00:00:00,14.15,0.0,-0.0,0.0,1.52,92.7 +2020-08-27 01:00:00,13.98,0.0,-0.0,0.0,1.52,92.7 +2020-08-27 02:00:00,14.01,0.0,-0.0,0.0,1.45,92.7 +2020-08-27 03:00:00,14.09,0.0,-0.0,0.0,1.45,92.7 +2020-08-27 04:00:00,14.0,0.0,-0.0,0.0,1.38,92.7 +2020-08-27 05:00:00,14.3,42.0,6.88,41.0,1.17,96.0 +2020-08-27 06:00:00,16.28,153.0,78.65,129.0,0.76,89.65 +2020-08-27 07:00:00,18.62,259.0,116.94,206.0,1.1,81.0 +2020-08-27 08:00:00,19.84,287.0,51.78,257.0,1.72,73.15 +2020-08-27 09:00:00,20.81,480.0,245.88,314.0,2.48,68.4 +2020-08-27 10:00:00,21.35,545.0,282.07,338.0,2.62,66.15 +2020-08-27 11:00:00,21.88,718.0,707.84,186.0,2.55,61.85 +2020-08-27 12:00:00,22.63,711.0,771.56,150.0,2.69,57.9 +2020-08-27 13:00:00,23.25,661.0,821.67,117.0,2.76,54.1 +2020-08-27 14:00:00,23.77,529.0,720.23,125.0,2.69,52.35 +2020-08-27 15:00:00,23.78,375.0,587.57,122.0,2.34,52.35 +2020-08-27 16:00:00,23.2,220.0,464.44,90.0,1.72,58.0 +2020-08-27 17:00:00,22.16,69.0,209.8,44.0,1.31,66.35 +2020-08-27 18:00:00,22.16,0.0,-0.0,0.0,0.55,59.8 +2020-08-27 19:00:00,21.24,0.0,-0.0,0.0,0.69,63.9 +2020-08-27 20:00:00,17.3,0.0,-0.0,0.0,1.79,83.7 +2020-08-27 21:00:00,16.04,0.0,-0.0,0.0,2.0,86.55 +2020-08-27 22:00:00,15.17,0.0,-0.0,0.0,1.86,89.55 +2020-08-27 23:00:00,14.85,0.0,-0.0,0.0,1.72,92.7 +2020-08-28 00:00:00,15.26,0.0,-0.0,0.0,2.21,89.55 +2020-08-28 01:00:00,14.88,0.0,-0.0,0.0,2.0,92.7 +2020-08-28 02:00:00,14.44,0.0,-0.0,0.0,1.93,92.7 +2020-08-28 03:00:00,14.22,0.0,-0.0,0.0,1.66,96.0 +2020-08-28 04:00:00,14.53,0.0,-0.0,0.0,1.66,96.0 +2020-08-28 05:00:00,14.63,17.0,0.0,17.0,1.52,96.0 +2020-08-28 06:00:00,15.68,152.0,86.26,126.0,2.07,92.75 +2020-08-28 07:00:00,16.71,81.0,0.0,81.0,2.55,86.6 +2020-08-28 08:00:00,17.63,90.0,0.0,90.0,2.97,83.7 +2020-08-28 09:00:00,18.14,161.0,0.0,161.0,3.1,80.9 +2020-08-28 10:00:00,18.26,190.0,0.0,190.0,3.24,80.9 +2020-08-28 11:00:00,18.95,137.0,0.0,137.0,3.52,78.25 +2020-08-28 12:00:00,19.19,223.0,0.0,223.0,3.72,75.65 +2020-08-28 13:00:00,19.17,301.0,30.42,281.0,3.66,75.65 +2020-08-28 14:00:00,19.49,316.0,102.51,259.0,4.0,73.15 +2020-08-28 15:00:00,19.31,120.0,0.0,120.0,3.38,75.65 +2020-08-28 16:00:00,19.15,111.0,29.14,103.0,3.03,73.05 +2020-08-28 17:00:00,18.76,47.0,61.64,40.0,2.55,75.55 +2020-08-28 18:00:00,18.05,0.0,-0.0,0.0,2.14,75.45 +2020-08-28 19:00:00,17.1,0.0,-0.0,0.0,2.34,75.4 +2020-08-28 20:00:00,16.76,0.0,-0.0,0.0,2.34,78.0 +2020-08-28 21:00:00,16.44,0.0,-0.0,0.0,2.34,78.0 +2020-08-28 22:00:00,15.98,0.0,-0.0,0.0,2.41,80.75 +2020-08-28 23:00:00,15.92,0.0,-0.0,0.0,2.41,83.55 +2020-08-29 00:00:00,15.92,0.0,-0.0,0.0,2.62,83.55 +2020-08-29 01:00:00,15.99,0.0,-0.0,0.0,2.83,80.75 +2020-08-29 02:00:00,15.79,0.0,-0.0,0.0,2.9,83.55 +2020-08-29 03:00:00,15.72,0.0,-0.0,0.0,3.03,83.55 +2020-08-29 04:00:00,15.85,0.0,-0.0,0.0,3.17,86.5 +2020-08-29 05:00:00,15.68,49.0,29.08,45.0,3.17,86.5 +2020-08-29 06:00:00,16.09,49.0,0.0,49.0,3.66,86.55 +2020-08-29 07:00:00,16.33,120.0,0.0,120.0,4.14,89.65 +2020-08-29 08:00:00,16.53,157.0,0.0,157.0,3.86,86.6 +2020-08-29 09:00:00,16.64,175.0,0.0,175.0,4.69,86.6 +2020-08-29 10:00:00,17.12,216.0,0.0,216.0,4.55,83.7 +2020-08-29 11:00:00,17.66,337.0,26.91,317.0,4.62,83.7 +2020-08-29 12:00:00,18.4,202.0,0.0,202.0,5.38,80.9 +2020-08-29 13:00:00,19.12,469.0,255.83,302.0,5.86,75.65 +2020-08-29 14:00:00,18.8,148.0,0.0,148.0,5.52,78.25 +2020-08-29 15:00:00,18.72,135.0,2.38,134.0,5.45,78.25 +2020-08-29 16:00:00,18.37,79.0,3.72,78.0,4.76,80.9 +2020-08-29 17:00:00,17.98,39.0,27.8,36.0,3.86,80.9 +2020-08-29 18:00:00,17.47,0.0,-0.0,0.0,3.52,78.1 +2020-08-29 19:00:00,16.65,0.0,-0.0,0.0,2.48,80.8 +2020-08-29 20:00:00,16.39,0.0,-0.0,0.0,2.28,83.6 +2020-08-29 21:00:00,15.91,0.0,-0.0,0.0,2.07,83.55 +2020-08-29 22:00:00,15.3,0.0,-0.0,0.0,1.93,86.5 +2020-08-29 23:00:00,14.92,0.0,-0.0,0.0,1.72,89.5 +2020-08-30 00:00:00,14.64,0.0,-0.0,0.0,1.52,89.5 +2020-08-30 01:00:00,14.06,0.0,-0.0,0.0,1.52,92.7 +2020-08-30 02:00:00,13.88,0.0,-0.0,0.0,1.38,95.95 +2020-08-30 03:00:00,13.65,0.0,-0.0,0.0,1.24,95.95 +2020-08-30 04:00:00,13.05,0.0,-0.0,0.0,1.24,95.95 +2020-08-30 05:00:00,13.05,59.0,74.83,49.0,1.03,95.95 +2020-08-30 06:00:00,14.28,148.0,78.27,125.0,1.24,96.0 +2020-08-30 07:00:00,15.0,342.0,375.47,176.0,1.59,86.5 +2020-08-30 08:00:00,16.02,457.0,399.46,230.0,1.45,77.95 +2020-08-30 09:00:00,17.06,577.0,506.23,241.0,1.59,70.3 +2020-08-30 10:00:00,18.11,668.0,616.34,223.0,1.45,65.55 +2020-08-30 11:00:00,18.92,699.0,658.91,212.0,1.45,59.05 +2020-08-30 12:00:00,19.47,675.0,663.99,201.0,1.45,55.05 +2020-08-30 13:00:00,19.81,640.0,782.38,133.0,1.52,53.25 +2020-08-30 14:00:00,20.07,517.0,710.55,129.0,1.59,49.7 +2020-08-30 15:00:00,19.88,364.0,588.08,120.0,1.66,51.4 +2020-08-30 16:00:00,19.53,208.0,462.95,86.0,1.72,51.4 +2020-08-30 17:00:00,18.57,57.0,176.09,39.0,1.59,59.05 +2020-08-30 18:00:00,16.91,0.0,-0.0,0.0,1.93,63.05 +2020-08-30 19:00:00,15.55,0.0,-0.0,0.0,2.21,65.15 +2020-08-30 20:00:00,14.41,0.0,-0.0,0.0,2.34,72.3 +2020-08-30 21:00:00,13.45,0.0,-0.0,0.0,2.28,72.2 +2020-08-30 22:00:00,12.51,0.0,-0.0,0.0,2.21,77.45 +2020-08-30 23:00:00,11.8,0.0,-0.0,0.0,2.21,83.1 +2020-08-31 00:00:00,11.34,0.0,-0.0,0.0,2.28,83.05 +2020-08-31 01:00:00,10.92,0.0,-0.0,0.0,2.34,80.1 +2020-08-31 02:00:00,10.61,0.0,-0.0,0.0,2.34,83.0 +2020-08-31 03:00:00,10.39,0.0,-0.0,0.0,2.41,83.0 +2020-08-31 04:00:00,10.24,0.0,-0.0,0.0,2.41,82.95 +2020-08-31 05:00:00,10.44,84.0,308.4,44.0,2.48,83.0 +2020-08-31 06:00:00,12.38,247.0,572.31,81.0,2.28,80.2 +2020-08-31 07:00:00,14.96,405.0,677.55,108.0,2.55,72.45 +2020-08-31 08:00:00,16.76,548.0,752.92,123.0,2.69,67.75 +2020-08-31 09:00:00,18.22,645.0,745.64,153.0,2.69,63.3 +2020-08-31 10:00:00,19.46,725.0,812.05,142.0,2.97,59.15 +2020-08-31 11:00:00,20.35,750.0,831.49,139.0,3.1,55.3 +2020-08-31 12:00:00,21.07,712.0,796.52,147.0,3.31,49.95 +2020-08-31 13:00:00,21.6,656.0,837.98,117.0,3.38,48.3 +2020-08-31 14:00:00,21.84,524.0,744.96,121.0,3.31,46.6 +2020-08-31 15:00:00,21.79,370.0,632.39,111.0,3.03,46.6 +2020-08-31 16:00:00,19.11,210.0,500.1,81.0,2.81,51.13 +2020-08-31 17:00:00,18.55,63.0,321.36,32.0,2.71,53.15 +2020-08-31 18:00:00,18.0,0.0,-0.0,0.0,2.61,55.17 +2020-08-31 19:00:00,17.45,0.0,-0.0,0.0,2.51,57.19 +2020-08-31 20:00:00,16.89,0.0,-0.0,0.0,2.42,59.21 +2020-08-31 21:00:00,16.34,0.0,-0.0,0.0,2.32,61.22 +2020-08-31 22:00:00,15.78,0.0,-0.0,0.0,2.22,63.24 +2020-08-31 23:00:00,15.23,0.0,-0.0,0.0,2.12,65.26 +2020-09-01 00:00:00,14.67,0.0,-0.0,0.0,2.02,67.28 +2020-09-01 01:00:00,14.12,0.0,-0.0,0.0,1.93,69.3 +2020-09-01 02:00:00,13.57,0.0,-0.0,0.0,1.83,71.32 +2020-09-01 03:00:00,13.01,0.0,-0.0,0.0,1.73,73.34 +2020-09-01 04:00:00,12.46,0.0,-0.0,0.0,1.63,75.36 +2020-09-01 05:00:00,11.9,35.0,7.71,34.0,1.53,77.38 +2020-09-01 06:00:00,11.35,117.0,27.58,109.0,1.44,79.39 +2020-09-01 07:00:00,10.79,271.0,166.54,198.0,1.34,81.41 +2020-09-01 08:00:00,13.74,192.0,3.54,190.0,2.0,74.85 +2020-09-01 09:00:00,14.47,231.0,4.55,228.0,2.14,74.95 +2020-09-01 10:00:00,14.99,118.0,0.0,118.0,2.48,69.9 +2020-09-01 11:00:00,15.32,137.0,0.0,137.0,2.55,72.45 +2020-09-01 12:00:00,15.57,129.0,0.0,129.0,2.9,70.0 +2020-09-01 13:00:00,15.62,89.0,0.0,89.0,2.83,72.55 +2020-09-01 14:00:00,15.26,61.0,0.0,61.0,2.48,77.8 +2020-09-01 15:00:00,14.88,141.0,7.32,138.0,2.21,83.45 +2020-09-01 16:00:00,14.33,107.0,34.89,98.0,2.34,86.4 +2020-09-01 17:00:00,14.0,26.0,10.37,25.0,2.21,86.4 +2020-09-01 18:00:00,13.75,0.0,-0.0,0.0,2.0,89.45 +2020-09-01 19:00:00,14.09,0.0,-0.0,0.0,2.48,83.4 +2020-09-01 20:00:00,13.79,0.0,-0.0,0.0,2.34,86.35 +2020-09-01 21:00:00,13.51,0.0,-0.0,0.0,2.21,89.45 +2020-09-01 22:00:00,13.4,0.0,-0.0,0.0,2.14,92.65 +2020-09-01 23:00:00,13.27,0.0,-0.0,0.0,2.28,92.65 +2020-09-02 00:00:00,13.24,0.0,-0.0,0.0,2.55,92.65 +2020-09-02 01:00:00,12.99,0.0,-0.0,0.0,2.76,92.65 +2020-09-02 02:00:00,12.98,0.0,-0.0,0.0,2.83,92.65 +2020-09-02 03:00:00,12.94,0.0,-0.0,0.0,2.97,95.95 +2020-09-02 04:00:00,12.9,0.0,-0.0,0.0,3.17,95.95 +2020-09-02 05:00:00,12.9,7.0,0.0,7.0,3.31,95.95 +2020-09-02 06:00:00,12.93,40.0,0.0,40.0,3.24,99.4 +2020-09-02 07:00:00,13.07,58.0,0.0,58.0,3.24,92.65 +2020-09-02 08:00:00,13.43,95.0,0.0,95.0,3.38,92.65 +2020-09-02 09:00:00,14.15,92.0,0.0,92.0,3.31,89.5 +2020-09-02 10:00:00,14.49,119.0,0.0,119.0,3.24,92.7 +2020-09-02 11:00:00,15.22,151.0,0.0,151.0,3.24,86.5 +2020-09-02 12:00:00,16.31,160.0,0.0,160.0,2.9,83.6 +2020-09-02 13:00:00,17.49,142.0,0.0,142.0,2.34,80.85 +2020-09-02 14:00:00,18.16,118.0,0.0,118.0,2.0,78.25 +2020-09-02 15:00:00,18.98,107.0,0.0,107.0,2.14,75.65 +2020-09-02 16:00:00,18.82,99.0,27.74,92.0,2.28,81.05 +2020-09-02 17:00:00,18.31,32.0,33.09,29.0,2.07,83.8 +2020-09-02 18:00:00,17.61,0.0,-0.0,0.0,1.66,86.7 +2020-09-02 19:00:00,17.83,0.0,-0.0,0.0,1.38,89.75 +2020-09-02 20:00:00,17.19,0.0,-0.0,0.0,1.38,92.85 +2020-09-02 21:00:00,16.82,0.0,-0.0,0.0,1.66,92.8 +2020-09-02 22:00:00,16.8,0.0,-0.0,0.0,1.79,96.05 +2020-09-02 23:00:00,16.84,0.0,-0.0,0.0,1.66,96.05 +2020-09-03 00:00:00,16.88,0.0,-0.0,0.0,1.52,96.05 +2020-09-03 01:00:00,16.79,0.0,-0.0,0.0,1.59,96.05 +2020-09-03 02:00:00,16.57,0.0,-0.0,0.0,1.66,92.8 +2020-09-03 03:00:00,16.48,0.0,-0.0,0.0,1.86,96.05 +2020-09-03 04:00:00,16.29,0.0,-0.0,0.0,1.93,92.8 +2020-09-03 05:00:00,16.07,64.0,156.01,45.0,1.79,92.8 +2020-09-03 06:00:00,16.79,70.0,0.0,70.0,1.66,92.8 +2020-09-03 07:00:00,17.96,250.0,130.02,194.0,1.93,86.7 +2020-09-03 08:00:00,19.6,475.0,526.26,182.0,2.55,75.7 +2020-09-03 09:00:00,20.22,451.0,227.03,303.0,2.55,73.2 +2020-09-03 10:00:00,21.22,460.0,167.69,341.0,2.76,68.5 +2020-09-03 11:00:00,21.8,128.0,0.0,128.0,2.69,66.25 +2020-09-03 12:00:00,22.28,113.0,0.0,113.0,2.69,64.1 +2020-09-03 13:00:00,22.73,287.0,30.0,268.0,2.83,62.05 +2020-09-03 14:00:00,22.69,255.0,47.12,230.0,2.83,57.9 +2020-09-03 15:00:00,21.81,136.0,7.52,133.0,2.07,66.25 +2020-09-03 16:00:00,20.86,148.0,186.52,102.0,2.14,73.3 +2020-09-03 17:00:00,20.16,36.0,70.75,30.0,2.07,75.75 +2020-09-03 18:00:00,19.48,0.0,-0.0,0.0,1.79,78.3 +2020-09-03 19:00:00,19.24,0.0,-0.0,0.0,1.38,81.05 +2020-09-03 20:00:00,18.64,0.0,-0.0,0.0,1.59,78.3 +2020-09-03 21:00:00,18.3,0.0,-0.0,0.0,1.93,78.25 +2020-09-03 22:00:00,17.67,0.0,-0.0,0.0,2.0,78.15 +2020-09-03 23:00:00,16.67,0.0,-0.0,0.0,1.86,83.65 +2020-09-04 00:00:00,16.39,0.0,-0.0,0.0,1.52,86.55 +2020-09-04 01:00:00,16.46,0.0,-0.0,0.0,1.31,86.55 +2020-09-04 02:00:00,17.33,0.0,-0.0,0.0,1.03,80.85 +2020-09-04 03:00:00,17.62,0.0,-0.0,0.0,0.76,80.9 +2020-09-04 04:00:00,17.08,0.0,-0.0,0.0,0.83,83.7 +2020-09-04 05:00:00,15.12,56.0,110.35,43.0,1.24,89.55 +2020-09-04 06:00:00,16.35,169.0,183.15,118.0,0.83,89.65 +2020-09-04 07:00:00,18.7,320.0,349.07,171.0,1.66,83.85 +2020-09-04 08:00:00,19.73,498.0,622.24,154.0,1.86,81.1 +2020-09-04 09:00:00,21.23,600.0,656.0,175.0,2.07,73.35 +2020-09-04 10:00:00,22.06,639.0,591.12,222.0,2.62,66.35 +2020-09-04 11:00:00,22.67,259.0,4.16,256.0,2.83,62.05 +2020-09-04 12:00:00,23.14,252.0,4.31,249.0,2.62,60.05 +2020-09-04 13:00:00,23.62,486.0,342.13,271.0,2.41,56.15 +2020-09-04 14:00:00,23.48,455.0,540.61,171.0,2.48,56.0 +2020-09-04 15:00:00,23.11,199.0,68.68,172.0,2.55,56.0 +2020-09-04 16:00:00,22.49,121.0,87.18,100.0,2.41,61.95 +2020-09-04 17:00:00,21.53,21.0,12.67,20.0,1.93,66.15 +2020-09-04 18:00:00,20.45,0.0,-0.0,0.0,1.66,70.7 +2020-09-04 19:00:00,19.62,0.0,-0.0,0.0,1.66,70.65 +2020-09-04 20:00:00,18.51,0.0,-0.0,0.0,1.59,72.95 +2020-09-04 21:00:00,17.67,0.0,-0.0,0.0,1.66,72.9 +2020-09-04 22:00:00,17.27,0.0,-0.0,0.0,1.52,75.4 +2020-09-04 23:00:00,18.25,0.0,-0.0,0.0,1.1,68.05 +2020-09-05 00:00:00,18.8,0.0,-0.0,0.0,0.41,65.75 +2020-09-05 01:00:00,18.39,0.0,-0.0,0.0,0.34,68.05 +2020-09-05 02:00:00,17.75,0.0,-0.0,0.0,0.83,70.35 +2020-09-05 03:00:00,17.11,0.0,-0.0,0.0,0.97,72.8 +2020-09-05 04:00:00,16.64,0.0,-0.0,0.0,0.97,75.3 +2020-09-05 05:00:00,16.35,49.0,70.3,41.0,0.76,77.95 +2020-09-05 06:00:00,16.65,226.0,539.06,78.0,0.14,78.0 +2020-09-05 07:00:00,17.68,359.0,529.61,135.0,0.21,80.9 +2020-09-05 08:00:00,19.36,478.0,537.45,183.0,0.62,75.65 +2020-09-05 09:00:00,20.82,610.0,689.67,166.0,1.17,70.8 +2020-09-05 10:00:00,22.02,717.0,852.86,119.0,1.72,64.0 +2020-09-05 11:00:00,23.05,721.0,815.74,136.0,1.93,55.9 +2020-09-05 12:00:00,23.4,619.0,553.14,237.0,1.93,50.45 +2020-09-05 13:00:00,23.6,457.0,267.9,290.0,1.93,47.15 +2020-09-05 14:00:00,23.53,425.0,428.84,202.0,2.0,48.7 +2020-09-05 15:00:00,23.2,283.0,307.04,164.0,2.14,48.7 +2020-09-05 16:00:00,22.76,158.0,267.98,95.0,2.07,50.3 +2020-09-05 17:00:00,21.7,33.0,95.95,26.0,1.72,53.75 +2020-09-05 18:00:00,20.11,0.0,-0.0,0.0,1.86,59.35 +2020-09-05 19:00:00,18.67,0.0,-0.0,0.0,1.72,65.75 +2020-09-05 20:00:00,17.16,0.0,-0.0,0.0,1.79,72.8 +2020-09-05 21:00:00,15.93,0.0,-0.0,0.0,2.0,77.85 +2020-09-05 22:00:00,15.26,0.0,-0.0,0.0,2.0,80.6 +2020-09-05 23:00:00,14.72,0.0,-0.0,0.0,1.93,83.45 +2020-09-06 00:00:00,14.38,0.0,-0.0,0.0,1.79,86.4 +2020-09-06 01:00:00,14.19,0.0,-0.0,0.0,1.59,86.4 +2020-09-06 02:00:00,13.61,0.0,-0.0,0.0,1.66,89.45 +2020-09-06 03:00:00,13.26,0.0,-0.0,0.0,1.59,92.65 +2020-09-06 04:00:00,13.07,0.0,-0.0,0.0,1.59,92.65 +2020-09-06 05:00:00,12.94,53.0,118.41,40.0,1.59,95.95 +2020-09-06 06:00:00,15.34,171.0,214.33,113.0,1.1,86.5 +2020-09-06 07:00:00,18.93,316.0,355.59,167.0,1.52,70.55 +2020-09-06 08:00:00,20.54,509.0,701.05,127.0,1.93,63.7 +2020-09-06 09:00:00,21.77,641.0,825.44,113.0,2.14,57.65 +2020-09-06 10:00:00,22.68,713.0,863.88,111.0,2.28,52.1 +2020-09-06 11:00:00,23.52,739.0,891.13,104.0,2.0,50.45 +2020-09-06 12:00:00,23.81,675.0,783.06,138.0,1.45,47.15 +2020-09-06 13:00:00,24.06,504.0,412.45,249.0,1.24,47.15 +2020-09-06 14:00:00,24.11,401.0,367.26,212.0,1.24,45.6 +2020-09-06 15:00:00,23.97,344.0,649.32,96.0,1.38,47.15 +2020-09-06 16:00:00,23.47,165.0,357.68,83.0,1.52,50.45 +2020-09-06 17:00:00,22.23,9.0,0.0,9.0,1.86,53.85 +2020-09-06 18:00:00,20.93,0.0,-0.0,0.0,2.07,59.5 +2020-09-06 19:00:00,20.41,0.0,-0.0,0.0,2.21,59.35 +2020-09-06 20:00:00,19.21,0.0,-0.0,0.0,2.21,65.75 +2020-09-06 21:00:00,18.43,0.0,-0.0,0.0,2.14,70.45 +2020-09-06 22:00:00,18.06,0.0,-0.0,0.0,2.07,70.45 +2020-09-06 23:00:00,17.56,0.0,-0.0,0.0,2.14,70.35 +2020-09-07 00:00:00,17.31,0.0,-0.0,0.0,2.21,70.3 +2020-09-07 01:00:00,16.93,0.0,-0.0,0.0,2.28,72.7 +2020-09-07 02:00:00,16.61,0.0,-0.0,0.0,2.21,70.2 +2020-09-07 03:00:00,16.06,0.0,-0.0,0.0,2.21,72.65 +2020-09-07 04:00:00,15.83,0.0,-0.0,0.0,2.07,75.15 +2020-09-07 05:00:00,15.21,6.0,0.0,6.0,1.72,80.6 +2020-09-07 06:00:00,16.13,19.0,0.0,19.0,1.31,80.75 +2020-09-07 07:00:00,16.91,318.0,387.9,157.0,1.03,83.65 +2020-09-07 08:00:00,17.73,166.0,1.85,165.0,1.24,80.9 +2020-09-07 09:00:00,17.65,330.0,62.94,290.0,1.24,86.7 +2020-09-07 10:00:00,18.18,112.0,0.0,112.0,1.31,86.75 +2020-09-07 11:00:00,18.62,88.0,0.0,88.0,1.03,83.85 +2020-09-07 12:00:00,19.67,294.0,20.56,280.0,1.31,81.1 +2020-09-07 13:00:00,19.49,104.0,0.0,104.0,1.31,83.85 +2020-09-07 14:00:00,20.12,428.0,496.88,175.0,1.1,78.45 +2020-09-07 15:00:00,20.82,278.0,342.85,149.0,1.59,68.4 +2020-09-07 16:00:00,20.93,169.0,438.73,71.0,1.52,63.8 +2020-09-07 17:00:00,20.37,27.0,98.45,21.0,0.69,70.7 +2020-09-07 18:00:00,19.14,0.0,-0.0,0.0,0.9,73.05 +2020-09-07 19:00:00,17.13,0.0,-0.0,0.0,1.52,80.85 +2020-09-07 20:00:00,16.43,0.0,-0.0,0.0,1.52,86.55 +2020-09-07 21:00:00,16.59,0.0,-0.0,0.0,1.79,78.0 +2020-09-07 22:00:00,16.81,0.0,-0.0,0.0,2.14,75.3 +2020-09-07 23:00:00,16.68,0.0,-0.0,0.0,2.41,72.7 +2020-09-08 00:00:00,16.36,0.0,-0.0,0.0,2.48,75.25 +2020-09-08 01:00:00,15.89,0.0,-0.0,0.0,2.21,77.85 +2020-09-08 02:00:00,15.13,0.0,-0.0,0.0,1.93,80.6 +2020-09-08 03:00:00,14.53,0.0,-0.0,0.0,1.86,80.55 +2020-09-08 04:00:00,13.54,0.0,-0.0,0.0,2.0,86.35 +2020-09-08 05:00:00,12.87,33.0,19.67,31.0,1.86,89.4 +2020-09-08 06:00:00,13.78,154.0,156.09,113.0,1.59,86.35 +2020-09-08 07:00:00,15.14,374.0,664.13,101.0,1.79,75.1 +2020-09-08 08:00:00,16.02,499.0,672.52,138.0,1.72,65.25 +2020-09-08 09:00:00,16.65,493.0,340.58,278.0,1.66,63.05 +2020-09-08 10:00:00,18.17,650.0,662.73,194.0,2.34,54.95 +2020-09-08 11:00:00,19.1,685.0,732.26,170.0,2.48,51.25 +2020-09-08 12:00:00,19.74,688.0,837.34,122.0,2.28,47.8 +2020-09-08 13:00:00,20.21,610.0,817.62,113.0,1.93,44.6 +2020-09-08 14:00:00,20.87,476.0,710.76,118.0,1.59,43.1 +2020-09-08 15:00:00,20.96,290.0,399.43,142.0,1.24,43.1 +2020-09-08 16:00:00,20.74,167.0,441.49,71.0,1.1,43.1 +2020-09-08 17:00:00,19.49,29.0,182.2,19.0,1.59,51.25 +2020-09-08 18:00:00,17.49,0.0,-0.0,0.0,1.72,56.75 +2020-09-08 19:00:00,15.52,0.0,-0.0,0.0,1.86,62.85 +2020-09-08 20:00:00,14.38,0.0,-0.0,0.0,1.72,69.75 +2020-09-08 21:00:00,13.7,0.0,-0.0,0.0,2.0,69.65 +2020-09-08 22:00:00,13.2,0.0,-0.0,0.0,2.14,72.1 +2020-09-08 23:00:00,12.84,0.0,-0.0,0.0,2.07,77.45 +2020-09-09 00:00:00,12.45,0.0,-0.0,0.0,1.93,80.2 +2020-09-09 01:00:00,12.01,0.0,-0.0,0.0,1.86,83.15 +2020-09-09 02:00:00,11.8,0.0,-0.0,0.0,1.79,86.15 +2020-09-09 03:00:00,11.29,0.0,-0.0,0.0,1.86,89.3 +2020-09-09 04:00:00,10.91,0.0,-0.0,0.0,1.86,89.25 +2020-09-09 05:00:00,10.87,33.0,30.73,30.0,1.79,89.25 +2020-09-09 06:00:00,12.71,114.0,46.39,102.0,1.31,86.25 +2020-09-09 07:00:00,16.22,171.0,24.57,161.0,1.1,72.65 +2020-09-09 08:00:00,18.4,326.0,137.04,253.0,0.97,63.4 +2020-09-09 09:00:00,20.05,367.0,103.67,302.0,1.03,53.35 +2020-09-09 10:00:00,21.15,657.0,708.02,173.0,0.97,48.2 +2020-09-09 11:00:00,22.18,625.0,545.39,244.0,1.17,45.1 +2020-09-09 12:00:00,22.99,561.0,432.22,271.0,1.59,43.65 +2020-09-09 13:00:00,23.43,412.0,205.78,288.0,1.66,40.75 +2020-09-09 14:00:00,23.43,491.0,792.94,96.0,1.72,40.75 +2020-09-09 15:00:00,23.07,324.0,625.09,96.0,1.86,42.1 +2020-09-09 16:00:00,22.32,173.0,524.87,62.0,1.72,46.75 +2020-09-09 17:00:00,20.99,20.0,81.98,16.0,1.66,53.5 +2020-09-09 18:00:00,18.95,0.0,-0.0,0.0,1.38,63.5 +2020-09-09 19:00:00,18.77,0.0,-0.0,0.0,1.1,57.1 +2020-09-09 20:00:00,19.65,0.0,-0.0,0.0,0.34,51.4 +2020-09-09 21:00:00,18.61,0.0,-0.0,0.0,1.1,53.15 +2020-09-09 22:00:00,16.24,0.0,-0.0,0.0,1.45,65.25 +2020-09-09 23:00:00,14.73,0.0,-0.0,0.0,1.72,72.4 +2020-09-10 00:00:00,13.86,0.0,-0.0,0.0,1.79,77.6 +2020-09-10 01:00:00,13.1,0.0,-0.0,0.0,1.86,80.35 +2020-09-10 02:00:00,12.41,0.0,-0.0,0.0,1.86,86.2 +2020-09-10 03:00:00,11.85,0.0,-0.0,0.0,1.86,89.3 +2020-09-10 04:00:00,11.46,0.0,-0.0,0.0,1.86,89.3 +2020-09-10 05:00:00,11.13,53.0,224.5,32.0,1.86,89.3 +2020-09-10 06:00:00,13.24,203.0,498.78,76.0,1.38,86.3 +2020-09-10 07:00:00,17.87,367.0,667.51,98.0,1.17,67.95 +2020-09-10 08:00:00,21.04,517.0,783.3,103.0,0.97,57.45 +2020-09-10 09:00:00,22.82,610.0,772.44,129.0,1.31,46.85 +2020-09-10 10:00:00,23.8,504.0,275.37,317.0,1.59,43.9 +2020-09-10 11:00:00,24.54,679.0,750.92,158.0,1.79,42.5 +2020-09-10 12:00:00,25.07,510.0,313.86,301.0,2.07,39.7 +2020-09-10 13:00:00,25.44,563.0,684.79,154.0,2.28,35.8 +2020-09-10 14:00:00,25.45,442.0,602.97,145.0,2.48,34.5 +2020-09-10 15:00:00,25.2,141.0,16.72,135.0,2.34,34.5 +2020-09-10 16:00:00,24.22,47.0,0.0,47.0,1.38,42.5 +2020-09-10 17:00:00,22.98,10.0,0.0,10.0,0.76,52.1 +2020-09-10 18:00:00,22.71,0.0,-0.0,0.0,0.69,43.65 +2020-09-10 19:00:00,19.68,0.0,-0.0,0.0,1.52,63.6 +2020-09-10 20:00:00,19.04,0.0,-0.0,0.0,1.93,63.5 +2020-09-10 21:00:00,18.0,0.0,-0.0,0.0,1.52,67.95 +2020-09-10 22:00:00,17.36,0.0,-0.0,0.0,1.45,72.8 +2020-09-10 23:00:00,17.16,0.0,-0.0,0.0,1.79,70.3 +2020-09-11 00:00:00,17.35,0.0,-0.0,0.0,2.0,67.85 +2020-09-11 01:00:00,16.95,0.0,-0.0,0.0,1.72,70.2 +2020-09-11 02:00:00,16.57,0.0,-0.0,0.0,1.38,72.7 +2020-09-11 03:00:00,16.19,0.0,-0.0,0.0,1.31,75.25 +2020-09-11 04:00:00,15.76,0.0,-0.0,0.0,1.31,77.85 +2020-09-11 05:00:00,15.68,15.0,0.0,15.0,1.31,80.65 +2020-09-11 06:00:00,16.38,40.0,0.0,40.0,1.24,80.75 +2020-09-11 07:00:00,18.04,272.0,245.68,174.0,0.83,68.05 +2020-09-11 08:00:00,19.21,496.0,726.62,115.0,1.31,63.5 +2020-09-11 09:00:00,21.48,585.0,706.72,148.0,2.62,55.55 +2020-09-11 10:00:00,23.04,546.0,395.84,279.0,3.66,48.55 +2020-09-11 11:00:00,23.9,649.0,674.9,184.0,3.86,43.9 +2020-09-11 12:00:00,24.76,551.0,440.37,260.0,4.0,38.3 +2020-09-11 13:00:00,25.28,413.0,231.46,276.0,4.07,34.5 +2020-09-11 14:00:00,25.2,372.0,349.13,202.0,4.0,33.3 +2020-09-11 15:00:00,24.94,266.0,368.21,136.0,3.66,34.4 +2020-09-11 16:00:00,24.25,131.0,265.75,78.0,2.69,36.8 +2020-09-11 17:00:00,22.94,9.0,0.0,9.0,2.34,42.1 +2020-09-11 18:00:00,21.52,0.0,-0.0,0.0,2.28,46.5 +2020-09-11 19:00:00,20.81,0.0,-0.0,0.0,2.41,44.7 +2020-09-11 20:00:00,19.79,0.0,-0.0,0.0,2.55,47.8 +2020-09-11 21:00:00,19.13,0.0,-0.0,0.0,2.69,51.25 +2020-09-11 22:00:00,18.6,0.0,-0.0,0.0,2.69,53.15 +2020-09-11 23:00:00,18.05,0.0,-0.0,0.0,2.62,54.95 +2020-09-12 00:00:00,17.31,0.0,-0.0,0.0,2.41,60.95 +2020-09-12 01:00:00,16.51,0.0,-0.0,0.0,2.41,63.05 +2020-09-12 02:00:00,15.75,0.0,-0.0,0.0,2.34,67.55 +2020-09-12 03:00:00,15.02,0.0,-0.0,0.0,2.28,69.9 +2020-09-12 04:00:00,14.37,0.0,-0.0,0.0,2.21,74.95 +2020-09-12 05:00:00,13.89,47.0,222.7,28.0,2.14,77.6 +2020-09-12 06:00:00,15.23,204.0,547.71,69.0,1.79,77.8 +2020-09-12 07:00:00,20.0,362.0,681.39,93.0,1.66,61.4 +2020-09-12 08:00:00,22.75,512.0,795.96,98.0,2.28,48.55 +2020-09-12 09:00:00,24.7,625.0,851.85,102.0,2.76,42.65 +2020-09-12 10:00:00,26.05,683.0,849.38,114.0,3.1,38.55 +2020-09-12 11:00:00,27.03,701.0,856.57,115.0,3.31,33.7 +2020-09-12 12:00:00,27.71,654.0,800.71,129.0,3.38,29.4 +2020-09-12 13:00:00,28.03,583.0,799.71,114.0,3.17,29.4 +2020-09-12 14:00:00,28.04,461.0,739.75,105.0,3.03,29.4 +2020-09-12 15:00:00,27.73,311.0,639.47,89.0,2.83,30.5 +2020-09-12 16:00:00,26.65,142.0,387.87,67.0,1.93,37.5 +2020-09-12 17:00:00,24.73,7.0,0.0,7.0,1.72,44.15 +2020-09-12 18:00:00,23.56,0.0,-0.0,0.0,1.1,48.7 +2020-09-12 19:00:00,22.03,0.0,-0.0,0.0,1.31,51.9 +2020-09-12 20:00:00,20.65,0.0,-0.0,0.0,3.38,55.45 +2020-09-12 21:00:00,18.71,0.0,-0.0,0.0,3.38,65.75 +2020-09-12 22:00:00,17.21,0.0,-0.0,0.0,2.41,78.1 +2020-09-12 23:00:00,16.28,0.0,-0.0,0.0,2.0,83.6 +2020-09-13 00:00:00,15.55,0.0,-0.0,0.0,1.79,86.5 +2020-09-13 01:00:00,14.54,0.0,-0.0,0.0,1.79,86.45 +2020-09-13 02:00:00,14.21,0.0,-0.0,0.0,1.66,89.5 +2020-09-13 03:00:00,13.83,0.0,-0.0,0.0,1.59,89.45 +2020-09-13 04:00:00,13.57,0.0,-0.0,0.0,1.45,89.45 +2020-09-13 05:00:00,13.51,6.0,0.0,6.0,1.38,89.45 +2020-09-13 06:00:00,14.24,60.0,0.0,60.0,1.72,86.4 +2020-09-13 07:00:00,14.67,166.0,30.72,154.0,2.55,83.45 +2020-09-13 08:00:00,15.56,184.0,5.82,181.0,2.14,77.85 +2020-09-13 09:00:00,16.2,349.0,100.08,288.0,2.0,77.95 +2020-09-13 10:00:00,17.08,175.0,0.0,175.0,2.0,75.4 +2020-09-13 11:00:00,17.63,128.0,0.0,128.0,2.0,75.45 +2020-09-13 12:00:00,18.25,91.0,0.0,91.0,2.0,75.55 +2020-09-13 13:00:00,18.36,101.0,0.0,101.0,1.86,75.55 +2020-09-13 14:00:00,18.14,195.0,18.93,186.0,1.93,75.55 +2020-09-13 15:00:00,17.78,109.0,2.93,108.0,2.07,78.15 +2020-09-13 16:00:00,17.25,39.0,0.0,39.0,2.0,80.85 +2020-09-13 17:00:00,16.64,4.0,0.0,4.0,1.86,83.65 +2020-09-13 18:00:00,16.0,0.0,-0.0,0.0,1.59,86.5 +2020-09-13 19:00:00,14.91,0.0,-0.0,0.0,1.72,92.7 +2020-09-13 20:00:00,14.55,0.0,-0.0,0.0,1.38,92.7 +2020-09-13 21:00:00,14.37,0.0,-0.0,0.0,1.24,96.0 +2020-09-13 22:00:00,14.12,0.0,-0.0,0.0,1.38,96.0 +2020-09-13 23:00:00,13.92,0.0,-0.0,0.0,1.31,95.95 +2020-09-14 00:00:00,13.78,0.0,-0.0,0.0,1.17,95.95 +2020-09-14 01:00:00,13.99,0.0,-0.0,0.0,1.17,92.7 +2020-09-14 02:00:00,13.73,0.0,-0.0,0.0,1.17,95.95 +2020-09-14 03:00:00,13.59,0.0,-0.0,0.0,1.24,95.95 +2020-09-14 04:00:00,13.55,0.0,-0.0,0.0,1.38,92.65 +2020-09-14 05:00:00,13.39,42.0,220.69,25.0,1.45,95.95 +2020-09-14 06:00:00,13.51,37.0,0.0,37.0,1.79,92.65 +2020-09-14 07:00:00,13.42,79.0,0.0,79.0,1.31,99.4 +2020-09-14 08:00:00,13.43,113.0,0.0,113.0,1.45,95.95 +2020-09-14 09:00:00,13.88,99.0,0.0,99.0,1.31,89.45 +2020-09-14 10:00:00,14.92,210.0,1.51,209.0,1.45,83.45 +2020-09-14 11:00:00,15.41,208.0,0.0,208.0,1.59,80.6 +2020-09-14 12:00:00,15.82,127.0,0.0,127.0,1.52,77.85 +2020-09-14 13:00:00,16.42,125.0,0.0,125.0,1.45,75.25 +2020-09-14 14:00:00,16.62,234.0,57.48,207.0,1.31,72.7 +2020-09-14 15:00:00,16.83,229.0,256.53,143.0,1.1,72.7 +2020-09-14 16:00:00,16.63,68.0,27.6,63.0,0.97,72.7 +2020-09-14 17:00:00,16.33,0.0,0.0,0.0,0.76,75.25 +2020-09-14 18:00:00,15.56,0.0,-0.0,0.0,0.83,80.65 +2020-09-14 19:00:00,15.15,0.0,-0.0,0.0,1.59,83.5 +2020-09-14 20:00:00,14.35,0.0,-0.0,0.0,1.72,86.4 +2020-09-14 21:00:00,13.85,0.0,-0.0,0.0,1.72,89.45 +2020-09-14 22:00:00,13.46,0.0,-0.0,0.0,1.72,92.65 +2020-09-14 23:00:00,13.46,0.0,-0.0,0.0,1.66,92.65 +2020-09-15 00:00:00,13.86,0.0,-0.0,0.0,1.52,92.65 +2020-09-15 01:00:00,14.08,0.0,-0.0,0.0,1.59,89.5 +2020-09-15 02:00:00,13.94,0.0,-0.0,0.0,1.72,92.65 +2020-09-15 03:00:00,13.92,0.0,-0.0,0.0,1.66,92.65 +2020-09-15 04:00:00,13.7,0.0,-0.0,0.0,1.79,92.65 +2020-09-15 05:00:00,13.13,22.0,13.72,21.0,1.93,92.65 +2020-09-15 06:00:00,13.81,58.0,0.0,58.0,1.79,92.65 +2020-09-15 07:00:00,15.45,212.0,104.64,172.0,2.48,86.5 +2020-09-15 08:00:00,16.5,414.0,425.84,198.0,3.1,72.65 +2020-09-15 09:00:00,17.43,538.0,562.83,200.0,3.38,58.8 +2020-09-15 10:00:00,18.38,373.0,86.92,316.0,3.72,49.3 +2020-09-15 11:00:00,19.18,242.0,4.48,239.0,4.14,44.3 +2020-09-15 12:00:00,19.66,588.0,601.62,203.0,4.07,41.3 +2020-09-15 13:00:00,19.95,207.0,5.26,204.0,4.0,39.8 +2020-09-15 14:00:00,19.69,255.0,86.22,215.0,3.79,39.8 +2020-09-15 15:00:00,19.29,271.0,485.97,111.0,3.17,44.3 +2020-09-15 16:00:00,18.72,126.0,371.49,61.0,2.21,47.65 +2020-09-15 17:00:00,17.77,0.0,0.0,0.0,1.59,52.9 +2020-09-15 18:00:00,16.1,0.0,-0.0,0.0,1.38,62.95 +2020-09-15 19:00:00,14.53,0.0,-0.0,0.0,1.59,67.35 +2020-09-15 20:00:00,13.45,0.0,-0.0,0.0,1.79,72.1 +2020-09-15 21:00:00,12.6,0.0,-0.0,0.0,1.93,72.05 +2020-09-15 22:00:00,12.08,0.0,-0.0,0.0,1.93,74.6 +2020-09-15 23:00:00,11.94,0.0,-0.0,0.0,1.79,77.3 +2020-09-16 00:00:00,11.98,0.0,-0.0,0.0,1.59,77.35 +2020-09-16 01:00:00,11.73,0.0,-0.0,0.0,1.52,80.15 +2020-09-16 02:00:00,11.59,0.0,-0.0,0.0,1.38,83.1 +2020-09-16 03:00:00,11.32,0.0,-0.0,0.0,1.31,83.05 +2020-09-16 04:00:00,11.22,0.0,-0.0,0.0,1.24,83.05 +2020-09-16 05:00:00,10.64,31.0,116.48,23.0,1.38,86.05 +2020-09-16 06:00:00,12.06,179.0,469.73,71.0,0.83,83.15 +2020-09-16 07:00:00,14.68,321.0,542.32,116.0,0.69,75.0 +2020-09-16 08:00:00,16.59,453.0,608.52,147.0,1.03,65.35 +2020-09-16 09:00:00,18.22,569.0,713.12,144.0,1.45,59.05 +2020-09-16 10:00:00,19.65,586.0,565.29,218.0,1.86,53.25 +2020-09-16 11:00:00,20.83,580.0,502.88,246.0,2.28,46.35 +2020-09-16 12:00:00,21.5,622.0,762.67,138.0,2.14,43.25 +2020-09-16 13:00:00,21.86,545.0,742.47,126.0,2.0,41.85 +2020-09-16 14:00:00,22.02,418.0,646.13,122.0,1.79,40.35 +2020-09-16 15:00:00,21.95,271.0,532.17,99.0,1.72,40.35 +2020-09-16 16:00:00,21.36,108.0,242.91,67.0,1.45,43.25 +2020-09-16 17:00:00,19.77,0.0,0.0,0.0,1.93,49.55 +2020-09-16 18:00:00,17.89,0.0,-0.0,0.0,2.21,54.85 +2020-09-16 19:00:00,16.81,0.0,-0.0,0.0,2.21,56.6 +2020-09-16 20:00:00,15.68,0.0,-0.0,0.0,2.34,60.65 +2020-09-16 21:00:00,14.81,0.0,-0.0,0.0,2.41,64.95 +2020-09-16 22:00:00,14.27,0.0,-0.0,0.0,2.41,64.85 +2020-09-16 23:00:00,13.73,0.0,-0.0,0.0,2.41,67.15 +2020-09-17 00:00:00,13.29,0.0,-0.0,0.0,2.34,69.55 +2020-09-17 01:00:00,12.63,0.0,-0.0,0.0,2.28,74.7 +2020-09-17 02:00:00,12.08,0.0,-0.0,0.0,2.28,77.35 +2020-09-17 03:00:00,11.61,0.0,-0.0,0.0,2.28,80.15 +2020-09-17 04:00:00,11.37,0.0,-0.0,0.0,2.28,83.05 +2020-09-17 05:00:00,11.14,30.0,124.06,22.0,2.21,83.05 +2020-09-17 06:00:00,12.22,184.0,522.77,66.0,2.0,83.15 +2020-09-17 07:00:00,15.72,336.0,639.48,97.0,1.93,72.55 +2020-09-17 08:00:00,18.97,484.0,754.33,108.0,1.93,61.3 +2020-09-17 09:00:00,21.27,599.0,828.58,109.0,2.0,55.55 +2020-09-17 10:00:00,22.79,649.0,801.66,131.0,1.79,50.3 +2020-09-17 11:00:00,23.84,574.0,488.57,252.0,1.59,45.5 +2020-09-17 12:00:00,24.52,616.0,743.75,148.0,1.17,42.5 +2020-09-17 13:00:00,24.97,536.0,712.37,138.0,0.83,39.7 +2020-09-17 14:00:00,25.1,417.0,654.52,121.0,0.55,38.45 +2020-09-17 15:00:00,25.03,227.0,296.4,133.0,0.62,39.7 +2020-09-17 16:00:00,24.27,113.0,332.13,59.0,1.1,45.6 +2020-09-17 17:00:00,22.27,0.0,-0.0,0.0,2.0,52.0 +2020-09-17 18:00:00,20.11,0.0,-0.0,0.0,2.21,59.35 +2020-09-17 19:00:00,19.06,0.0,-0.0,0.0,2.34,57.1 +2020-09-17 20:00:00,17.99,0.0,-0.0,0.0,2.41,58.9 +2020-09-17 21:00:00,17.21,0.0,-0.0,0.0,2.55,60.95 +2020-09-17 22:00:00,16.68,0.0,-0.0,0.0,2.55,63.05 +2020-09-17 23:00:00,16.22,0.0,-0.0,0.0,2.55,67.65 +2020-09-18 00:00:00,15.74,0.0,-0.0,0.0,2.55,70.0 +2020-09-18 01:00:00,15.28,0.0,-0.0,0.0,2.48,72.45 +2020-09-18 02:00:00,14.88,0.0,-0.0,0.0,2.55,75.0 +2020-09-18 03:00:00,14.52,0.0,-0.0,0.0,2.48,75.0 +2020-09-18 04:00:00,14.08,0.0,-0.0,0.0,2.48,77.65 +2020-09-18 05:00:00,13.89,20.0,33.18,18.0,2.55,83.35 +2020-09-18 06:00:00,15.09,157.0,338.6,82.0,2.41,77.8 +2020-09-18 07:00:00,18.16,326.0,614.44,99.0,2.28,65.65 +2020-09-18 08:00:00,20.9,471.0,730.74,110.0,2.76,61.6 +2020-09-18 09:00:00,22.82,579.0,784.0,119.0,2.83,57.9 +2020-09-18 10:00:00,24.36,627.0,751.62,145.0,2.62,54.35 +2020-09-18 11:00:00,25.66,663.0,828.83,121.0,2.48,49.3 +2020-09-18 12:00:00,26.6,618.0,782.27,130.0,2.28,46.15 +2020-09-18 13:00:00,27.27,542.0,768.5,117.0,2.21,41.8 +2020-09-18 14:00:00,27.57,418.0,696.78,107.0,2.14,40.35 +2020-09-18 15:00:00,27.57,269.0,581.9,88.0,2.0,38.95 +2020-09-18 16:00:00,26.6,106.0,306.96,58.0,1.66,44.55 +2020-09-18 17:00:00,24.41,0.0,-0.0,0.0,2.28,50.7 +2020-09-18 18:00:00,22.4,0.0,-0.0,0.0,2.41,57.75 +2020-09-18 19:00:00,21.79,0.0,-0.0,0.0,2.62,55.65 +2020-09-18 20:00:00,20.76,0.0,-0.0,0.0,2.48,61.6 +2020-09-18 21:00:00,19.59,0.0,-0.0,0.0,2.34,65.85 +2020-09-18 22:00:00,18.49,0.0,-0.0,0.0,2.28,70.45 +2020-09-18 23:00:00,17.53,0.0,-0.0,0.0,2.21,72.9 +2020-09-19 00:00:00,16.6,0.0,-0.0,0.0,2.21,78.0 +2020-09-19 01:00:00,15.92,0.0,-0.0,0.0,2.21,80.65 +2020-09-19 02:00:00,15.43,0.0,-0.0,0.0,2.07,83.5 +2020-09-19 03:00:00,14.89,0.0,-0.0,0.0,1.86,86.45 +2020-09-19 04:00:00,14.57,0.0,-0.0,0.0,1.79,83.45 +2020-09-19 05:00:00,14.71,18.0,35.68,16.0,1.72,83.45 +2020-09-19 06:00:00,16.05,153.0,349.81,77.0,1.31,77.95 +2020-09-19 07:00:00,18.8,227.0,175.29,163.0,1.17,70.55 +2020-09-19 08:00:00,19.85,426.0,557.66,153.0,1.31,68.2 +2020-09-19 09:00:00,21.26,510.0,542.9,194.0,0.97,61.7 +2020-09-19 10:00:00,22.6,606.0,707.15,156.0,1.1,57.9 +2020-09-19 11:00:00,24.38,634.0,767.63,136.0,1.1,52.5 +2020-09-19 12:00:00,25.58,600.0,755.2,133.0,0.83,49.2 +2020-09-19 13:00:00,26.37,522.0,729.01,123.0,0.9,46.15 +2020-09-19 14:00:00,27.0,401.0,656.21,112.0,0.97,43.15 +2020-09-19 15:00:00,27.13,256.0,550.93,88.0,1.24,41.8 +2020-09-19 16:00:00,26.42,95.0,246.44,58.0,1.45,46.15 +2020-09-19 17:00:00,24.5,0.0,-0.0,0.0,2.14,52.5 +2020-09-19 18:00:00,22.48,0.0,-0.0,0.0,2.21,57.75 +2020-09-19 19:00:00,21.59,0.0,-0.0,0.0,2.41,57.65 +2020-09-19 20:00:00,20.75,0.0,-0.0,0.0,2.41,59.5 +2020-09-19 21:00:00,19.72,0.0,-0.0,0.0,2.34,61.4 +2020-09-19 22:00:00,18.85,0.0,-0.0,0.0,2.21,63.5 +2020-09-19 23:00:00,18.07,0.0,-0.0,0.0,2.28,65.65 +2020-09-20 00:00:00,17.78,0.0,-0.0,0.0,2.28,67.95 +2020-09-20 01:00:00,17.25,0.0,-0.0,0.0,2.28,70.3 +2020-09-20 02:00:00,16.73,0.0,-0.0,0.0,2.28,75.3 +2020-09-20 03:00:00,16.21,0.0,-0.0,0.0,2.21,77.95 +2020-09-20 04:00:00,15.62,0.0,-0.0,0.0,2.14,80.65 +2020-09-20 05:00:00,15.26,16.0,38.61,14.0,2.07,83.5 +2020-09-20 06:00:00,16.15,155.0,394.38,71.0,1.79,83.6 +2020-09-20 07:00:00,20.19,298.0,512.81,113.0,1.1,73.2 +2020-09-20 08:00:00,22.99,436.0,626.76,132.0,1.1,62.05 +2020-09-20 09:00:00,24.54,541.0,689.36,143.0,1.45,56.25 +2020-09-20 10:00:00,26.18,446.0,237.57,296.0,1.86,49.45 +2020-09-20 11:00:00,27.31,477.0,281.26,296.0,2.62,46.4 +2020-09-20 12:00:00,28.08,499.0,409.54,248.0,3.03,40.5 +2020-09-20 13:00:00,28.56,402.0,300.97,239.0,2.9,37.9 +2020-09-20 14:00:00,28.85,331.0,356.78,176.0,2.76,35.45 +2020-09-20 15:00:00,28.63,214.0,317.93,119.0,2.34,34.2 +2020-09-20 16:00:00,27.46,85.0,201.54,56.0,1.52,41.8 +2020-09-20 17:00:00,25.5,0.0,-0.0,0.0,1.86,45.9 +2020-09-20 18:00:00,23.29,0.0,-0.0,0.0,2.21,52.25 +2020-09-20 19:00:00,22.04,0.0,-0.0,0.0,2.41,53.75 +2020-09-20 20:00:00,21.16,0.0,-0.0,0.0,2.48,55.55 +2020-09-20 21:00:00,20.32,0.0,-0.0,0.0,2.48,59.35 +2020-09-20 22:00:00,19.61,0.0,-0.0,0.0,2.48,61.4 +2020-09-20 23:00:00,19.07,0.0,-0.0,0.0,2.55,65.75 +2020-09-21 00:00:00,18.73,0.0,-0.0,0.0,2.62,65.75 +2020-09-21 01:00:00,18.44,0.0,-0.0,0.0,2.62,68.05 +2020-09-21 02:00:00,18.1,0.0,-0.0,0.0,2.69,68.05 +2020-09-21 03:00:00,17.83,0.0,-0.0,0.0,2.76,70.35 +2020-09-21 04:00:00,17.73,0.0,-0.0,0.0,3.03,70.35 +2020-09-21 05:00:00,17.68,7.0,0.0,7.0,3.24,70.35 +2020-09-21 06:00:00,18.5,131.0,229.99,83.0,3.52,70.45 +2020-09-21 07:00:00,20.42,277.0,415.29,129.0,3.31,65.95 +2020-09-21 08:00:00,22.6,363.0,337.16,201.0,3.93,57.9 +2020-09-21 09:00:00,24.46,508.0,571.09,181.0,4.34,50.7 +2020-09-21 10:00:00,26.11,600.0,727.96,144.0,4.76,43.05 +2020-09-21 11:00:00,27.51,440.0,211.51,305.0,4.76,38.95 +2020-09-21 12:00:00,28.62,573.0,693.16,152.0,4.76,35.3 +2020-09-21 13:00:00,29.2,266.0,52.26,238.0,4.97,30.9 +2020-09-21 14:00:00,28.77,322.0,338.42,177.0,5.17,30.75 +2020-09-21 15:00:00,27.49,66.0,0.0,66.0,4.62,36.3 +2020-09-21 16:00:00,26.25,19.0,0.0,19.0,5.03,38.7 +2020-09-21 17:00:00,24.02,0.0,-0.0,0.0,5.66,45.5 +2020-09-21 18:00:00,19.72,0.0,-0.0,0.0,5.17,68.2 +2020-09-21 19:00:00,15.6,0.0,-0.0,0.0,4.9,89.6 +2020-09-21 20:00:00,13.89,0.0,-0.0,0.0,3.72,89.45 +2020-09-21 21:00:00,13.36,0.0,-0.0,0.0,3.86,80.35 +2020-09-21 22:00:00,13.29,0.0,-0.0,0.0,3.93,77.5 +2020-09-21 23:00:00,13.34,0.0,-0.0,0.0,3.93,74.75 +2020-09-22 00:00:00,13.33,0.0,-0.0,0.0,3.86,69.55 +2020-09-22 01:00:00,13.26,0.0,-0.0,0.0,3.93,62.3 +2020-09-22 02:00:00,12.92,0.0,-0.0,0.0,4.07,59.95 +2020-09-22 03:00:00,12.77,0.0,-0.0,0.0,4.21,57.75 +2020-09-22 04:00:00,12.62,0.0,-0.0,0.0,4.28,57.75 +2020-09-22 05:00:00,12.46,13.0,46.21,11.0,4.34,59.85 +2020-09-22 06:00:00,12.94,160.0,474.56,63.0,4.0,57.75 +2020-09-22 07:00:00,13.42,285.0,443.23,129.0,6.07,62.3 +2020-09-22 08:00:00,13.96,440.0,649.28,131.0,6.21,58.0 +2020-09-22 09:00:00,14.85,512.0,565.34,191.0,6.28,50.25 +2020-09-22 10:00:00,15.64,407.0,160.93,307.0,5.72,45.15 +2020-09-22 11:00:00,16.35,470.0,263.83,303.0,5.31,42.05 +2020-09-22 12:00:00,16.91,393.0,159.52,297.0,5.1,37.65 +2020-09-22 13:00:00,17.19,306.0,98.11,254.0,4.69,35.05 +2020-09-22 14:00:00,17.62,199.0,40.24,182.0,4.48,32.65 +2020-09-22 15:00:00,17.95,80.0,0.0,80.0,4.48,29.1 +2020-09-22 16:00:00,17.53,46.0,15.22,44.0,3.24,29.1 +2020-09-22 17:00:00,16.89,0.0,-0.0,0.0,2.9,31.15 +2020-09-22 18:00:00,16.47,0.0,-0.0,0.0,3.1,32.2 +2020-09-22 19:00:00,15.35,0.0,-0.0,0.0,2.28,40.2 +2020-09-22 20:00:00,15.05,0.0,-0.0,0.0,2.9,41.75 +2020-09-22 21:00:00,14.66,0.0,-0.0,0.0,3.17,44.9 +2020-09-22 22:00:00,14.3,0.0,-0.0,0.0,3.24,50.1 +2020-09-22 23:00:00,13.89,0.0,-0.0,0.0,3.38,55.9 +2020-09-23 00:00:00,13.34,0.0,-0.0,0.0,3.31,64.65 +2020-09-23 01:00:00,12.45,0.0,-0.0,0.0,2.97,69.35 +2020-09-23 02:00:00,12.43,0.0,-0.0,0.0,2.83,71.95 +2020-09-23 03:00:00,12.32,0.0,-0.0,0.0,2.76,71.95 +2020-09-23 04:00:00,12.08,0.0,-0.0,0.0,2.55,71.95 +2020-09-23 05:00:00,12.1,1.0,0.0,1.0,2.41,71.95 +2020-09-23 06:00:00,12.13,65.0,10.0,63.0,1.66,71.95 +2020-09-23 07:00:00,12.25,113.0,5.76,111.0,1.52,74.6 +2020-09-23 08:00:00,12.27,117.0,0.0,117.0,1.52,77.35 +2020-09-23 09:00:00,12.51,180.0,1.78,179.0,1.72,77.45 +2020-09-23 10:00:00,12.65,191.0,0.0,191.0,1.72,77.45 +2020-09-23 11:00:00,12.78,83.0,0.0,83.0,1.93,80.3 +2020-09-23 12:00:00,12.7,134.0,0.0,134.0,2.28,83.25 +2020-09-23 13:00:00,12.62,118.0,0.0,118.0,2.83,86.25 +2020-09-23 14:00:00,12.68,34.0,0.0,34.0,3.66,86.25 +2020-09-23 15:00:00,12.96,28.0,0.0,28.0,4.0,89.4 +2020-09-23 16:00:00,13.89,22.0,0.0,22.0,4.28,89.45 +2020-09-23 17:00:00,16.39,0.0,-0.0,0.0,4.28,86.55 +2020-09-23 18:00:00,17.26,0.0,-0.0,0.0,4.21,86.65 +2020-09-23 19:00:00,10.57,0.0,-0.0,0.0,2.9,95.9 +2020-09-23 20:00:00,9.96,0.0,-0.0,0.0,6.0,95.9 +2020-09-23 21:00:00,8.78,0.0,-0.0,0.0,6.69,99.4 +2020-09-23 22:00:00,7.92,0.0,-0.0,0.0,5.45,95.8 +2020-09-23 23:00:00,7.81,0.0,-0.0,0.0,5.59,92.4 +2020-09-24 00:00:00,7.92,0.0,-0.0,0.0,5.52,89.1 +2020-09-24 01:00:00,8.33,0.0,-0.0,0.0,5.45,89.1 +2020-09-24 02:00:00,8.05,0.0,-0.0,0.0,5.93,89.1 +2020-09-24 03:00:00,7.95,0.0,-0.0,0.0,6.14,89.1 +2020-09-24 04:00:00,7.71,0.0,-0.0,0.0,5.79,89.05 +2020-09-24 05:00:00,7.63,4.0,0.0,4.0,5.79,89.05 +2020-09-24 06:00:00,7.69,121.0,204.37,81.0,6.07,85.85 +2020-09-24 07:00:00,8.21,208.0,145.75,158.0,4.83,89.1 +2020-09-24 08:00:00,9.02,296.0,156.43,223.0,5.1,85.95 +2020-09-24 09:00:00,10.1,327.0,100.34,271.0,5.72,79.95 +2020-09-24 10:00:00,11.01,444.0,242.15,296.0,6.07,74.45 +2020-09-24 11:00:00,11.87,244.0,8.04,239.0,6.55,66.75 +2020-09-24 12:00:00,12.28,214.0,5.08,211.0,6.69,59.85 +2020-09-24 13:00:00,12.57,418.0,374.28,224.0,6.69,53.6 +2020-09-24 14:00:00,12.66,284.0,226.61,191.0,6.83,47.85 +2020-09-24 15:00:00,12.29,161.0,138.62,123.0,6.41,47.75 +2020-09-24 16:00:00,11.87,18.0,0.0,18.0,5.66,47.6 +2020-09-24 17:00:00,11.15,0.0,-0.0,0.0,5.52,49.3 +2020-09-24 18:00:00,10.38,0.0,-0.0,0.0,5.66,57.15 +2020-09-24 19:00:00,8.93,0.0,-0.0,0.0,4.97,74.15 +2020-09-24 20:00:00,8.14,0.0,-0.0,0.0,4.69,74.05 +2020-09-24 21:00:00,7.43,0.0,-0.0,0.0,4.28,79.7 +2020-09-24 22:00:00,7.05,0.0,-0.0,0.0,5.03,82.65 +2020-09-24 23:00:00,6.63,0.0,-0.0,0.0,4.21,85.75 +2020-09-25 00:00:00,6.45,0.0,-0.0,0.0,4.0,82.6 +2020-09-25 01:00:00,6.37,0.0,-0.0,0.0,3.86,85.7 +2020-09-25 02:00:00,6.36,0.0,-0.0,0.0,3.72,82.55 +2020-09-25 03:00:00,6.11,0.0,-0.0,0.0,3.59,82.55 +2020-09-25 04:00:00,5.92,0.0,-0.0,0.0,3.59,82.55 +2020-09-25 05:00:00,5.99,6.0,0.0,6.0,3.52,82.55 +2020-09-25 06:00:00,6.21,147.0,433.76,64.0,3.31,85.7 +2020-09-25 07:00:00,6.91,250.0,307.18,146.0,4.76,82.6 +2020-09-25 08:00:00,8.08,321.0,216.45,221.0,4.55,71.35 +2020-09-25 09:00:00,9.27,351.0,139.18,274.0,4.21,63.8 +2020-09-25 10:00:00,10.16,449.0,254.1,295.0,4.07,55.05 +2020-09-25 11:00:00,11.16,327.0,56.74,292.0,4.07,49.3 +2020-09-25 12:00:00,11.64,407.0,198.31,291.0,3.79,44.1 +2020-09-25 13:00:00,12.1,389.0,290.76,240.0,3.66,40.95 +2020-09-25 14:00:00,12.27,324.0,383.35,169.0,3.38,39.4 +2020-09-25 15:00:00,12.27,199.0,328.43,111.0,3.17,37.9 +2020-09-25 16:00:00,11.82,67.0,204.29,44.0,2.34,39.25 +2020-09-25 17:00:00,10.55,0.0,-0.0,0.0,1.52,45.55 +2020-09-25 18:00:00,8.64,0.0,-0.0,0.0,1.45,61.3 +2020-09-25 19:00:00,7.05,0.0,-0.0,0.0,1.72,63.5 +2020-09-25 20:00:00,6.21,0.0,-0.0,0.0,1.72,68.3 +2020-09-25 21:00:00,5.12,0.0,-0.0,0.0,1.86,73.55 +2020-09-25 22:00:00,4.31,0.0,-0.0,0.0,1.86,79.2 +2020-09-25 23:00:00,3.93,0.0,-0.0,0.0,1.79,79.2 +2020-09-26 00:00:00,3.61,0.0,-0.0,0.0,1.86,82.25 +2020-09-26 01:00:00,2.98,0.0,-0.0,0.0,1.86,82.2 +2020-09-26 02:00:00,2.65,0.0,-0.0,0.0,1.93,85.35 +2020-09-26 03:00:00,2.68,0.0,-0.0,0.0,2.14,85.35 +2020-09-26 04:00:00,2.91,0.0,-0.0,0.0,2.21,82.2 +2020-09-26 05:00:00,3.25,6.0,0.0,6.0,2.28,79.1 +2020-09-26 06:00:00,4.62,77.0,37.44,70.0,2.21,73.45 +2020-09-26 07:00:00,7.31,262.0,401.15,128.0,2.28,68.5 +2020-09-26 08:00:00,9.81,381.0,450.48,175.0,3.1,57.05 +2020-09-26 09:00:00,11.27,476.0,494.25,205.0,3.38,49.3 +2020-09-26 10:00:00,12.68,588.0,728.92,150.0,3.66,42.7 +2020-09-26 11:00:00,14.11,530.0,474.28,240.0,3.45,38.45 +2020-09-26 12:00:00,15.34,516.0,528.24,210.0,3.31,34.5 +2020-09-26 13:00:00,16.1,448.0,534.97,177.0,3.24,31.0 +2020-09-26 14:00:00,16.3,200.0,55.24,178.0,3.17,29.8 +2020-09-26 15:00:00,16.05,159.0,164.28,116.0,2.55,33.5 +2020-09-26 16:00:00,15.03,43.0,47.03,38.0,2.41,35.85 +2020-09-26 17:00:00,13.57,0.0,-0.0,0.0,2.48,39.8 +2020-09-26 18:00:00,12.58,0.0,-0.0,0.0,2.69,41.1 +2020-09-26 19:00:00,12.09,0.0,-0.0,0.0,2.9,45.95 +2020-09-26 20:00:00,11.85,0.0,-0.0,0.0,3.24,45.8 +2020-09-26 21:00:00,11.7,0.0,-0.0,0.0,3.38,42.4 +2020-09-26 22:00:00,11.59,0.0,-0.0,0.0,3.38,40.8 +2020-09-26 23:00:00,11.5,0.0,-0.0,0.0,3.38,40.8 +2020-09-27 00:00:00,11.58,0.0,-0.0,0.0,3.38,40.8 +2020-09-27 01:00:00,11.62,0.0,-0.0,0.0,3.31,40.8 +2020-09-27 02:00:00,11.67,0.0,-0.0,0.0,3.31,40.8 +2020-09-27 03:00:00,11.74,0.0,-0.0,0.0,3.17,42.4 +2020-09-27 04:00:00,11.72,0.0,-0.0,0.0,2.97,42.4 +2020-09-27 05:00:00,11.63,0.0,0.0,0.0,2.83,44.1 +2020-09-27 06:00:00,11.82,93.0,109.56,73.0,2.62,45.8 +2020-09-27 07:00:00,13.38,108.0,6.07,106.0,2.76,49.85 +2020-09-27 08:00:00,13.98,186.0,19.89,177.0,2.48,51.9 +2020-09-27 09:00:00,14.95,188.0,3.68,186.0,2.69,50.25 +2020-09-27 10:00:00,15.84,310.0,60.43,274.0,2.76,50.5 +2020-09-27 11:00:00,17.29,466.0,325.08,269.0,2.76,52.75 +2020-09-27 12:00:00,18.37,524.0,615.42,171.0,2.76,59.05 +2020-09-27 13:00:00,19.3,465.0,675.11,127.0,2.83,63.5 +2020-09-27 14:00:00,19.88,351.0,629.81,104.0,2.69,61.4 +2020-09-27 15:00:00,20.12,207.0,504.8,78.0,2.21,59.35 +2020-09-27 16:00:00,19.58,55.0,169.88,38.0,1.17,63.6 +2020-09-27 17:00:00,17.91,0.0,-0.0,0.0,1.45,70.35 +2020-09-27 18:00:00,15.65,0.0,-0.0,0.0,2.07,77.85 +2020-09-27 19:00:00,14.96,0.0,-0.0,0.0,2.34,80.55 +2020-09-27 20:00:00,14.23,0.0,-0.0,0.0,2.41,77.65 +2020-09-27 21:00:00,13.65,0.0,-0.0,0.0,2.41,74.85 +2020-09-27 22:00:00,13.07,0.0,-0.0,0.0,2.34,72.1 +2020-09-27 23:00:00,12.37,0.0,-0.0,0.0,2.34,69.35 +2020-09-28 00:00:00,11.64,0.0,-0.0,0.0,2.34,66.75 +2020-09-28 01:00:00,11.01,0.0,-0.0,0.0,2.41,66.65 +2020-09-28 02:00:00,10.56,0.0,-0.0,0.0,2.55,64.1 +2020-09-28 03:00:00,10.27,0.0,-0.0,0.0,2.69,66.45 +2020-09-28 04:00:00,9.88,0.0,-0.0,0.0,2.76,68.9 +2020-09-28 05:00:00,9.47,0.0,0.0,0.0,2.76,68.9 +2020-09-28 06:00:00,9.8,121.0,308.79,66.0,2.55,71.5 +2020-09-28 07:00:00,12.37,267.0,483.19,110.0,2.28,69.35 +2020-09-28 08:00:00,14.11,213.0,42.43,194.0,2.9,62.55 +2020-09-28 09:00:00,14.79,103.0,0.0,103.0,3.31,67.35 +2020-09-28 10:00:00,14.54,72.0,0.0,72.0,3.72,75.0 +2020-09-28 11:00:00,14.29,82.0,0.0,82.0,3.66,77.65 +2020-09-28 12:00:00,14.25,70.0,0.0,70.0,3.38,77.65 +2020-09-28 13:00:00,14.22,70.0,0.0,70.0,2.97,77.65 +2020-09-28 14:00:00,14.12,120.0,2.59,119.0,3.1,74.95 +2020-09-28 15:00:00,13.96,102.0,28.07,95.0,3.31,67.15 +2020-09-28 16:00:00,13.6,30.0,10.66,29.0,2.9,60.2 +2020-09-28 17:00:00,12.74,0.0,-0.0,0.0,2.9,55.65 +2020-09-28 18:00:00,11.98,0.0,-0.0,0.0,3.03,51.5 +2020-09-28 19:00:00,10.67,0.0,-0.0,0.0,2.97,55.15 +2020-09-28 20:00:00,9.97,0.0,-0.0,0.0,2.83,55.05 +2020-09-28 21:00:00,9.19,0.0,-0.0,0.0,2.34,59.15 +2020-09-28 22:00:00,8.12,0.0,-0.0,0.0,1.93,63.7 +2020-09-28 23:00:00,6.96,0.0,-0.0,0.0,1.93,68.5 +2020-09-29 00:00:00,5.92,0.0,-0.0,0.0,1.86,73.7 +2020-09-29 01:00:00,5.15,0.0,-0.0,0.0,1.79,79.35 +2020-09-29 02:00:00,4.72,0.0,-0.0,0.0,1.86,82.35 +2020-09-29 03:00:00,4.29,0.0,-0.0,0.0,1.86,82.3 +2020-09-29 04:00:00,4.16,0.0,-0.0,0.0,1.86,82.3 +2020-09-29 05:00:00,3.97,0.0,0.0,0.0,1.79,82.3 +2020-09-29 06:00:00,4.66,131.0,420.34,58.0,1.45,85.55 +2020-09-29 07:00:00,7.03,283.0,596.27,92.0,1.45,79.65 +2020-09-29 08:00:00,8.68,419.0,686.22,115.0,1.52,76.85 +2020-09-29 09:00:00,9.94,520.0,723.73,134.0,1.24,74.2 +2020-09-29 10:00:00,10.99,519.0,505.81,223.0,1.38,61.85 +2020-09-29 11:00:00,11.47,498.0,408.39,255.0,1.52,57.55 +2020-09-29 12:00:00,11.79,407.0,238.38,273.0,1.17,55.4 +2020-09-29 13:00:00,12.09,441.0,560.56,167.0,0.76,51.5 +2020-09-29 14:00:00,12.38,353.0,663.15,101.0,0.41,49.6 +2020-09-29 15:00:00,12.23,200.0,493.5,80.0,0.28,47.75 +2020-09-29 16:00:00,11.9,50.0,194.06,33.0,0.28,49.45 +2020-09-29 17:00:00,10.31,0.0,-0.0,0.0,1.03,66.45 +2020-09-29 18:00:00,8.86,0.0,-0.0,0.0,1.31,68.7 +2020-09-29 19:00:00,7.08,0.0,-0.0,0.0,1.79,71.15 +2020-09-29 20:00:00,5.94,0.0,-0.0,0.0,2.0,76.55 +2020-09-29 21:00:00,5.38,0.0,-0.0,0.0,2.14,82.4 +2020-09-29 22:00:00,5.08,0.0,-0.0,0.0,2.28,79.35 +2020-09-29 23:00:00,4.93,0.0,-0.0,0.0,2.34,79.35 +2020-09-30 00:00:00,4.8,0.0,-0.0,0.0,2.34,79.3 +2020-09-30 01:00:00,4.59,0.0,-0.0,0.0,2.34,79.3 +2020-09-30 02:00:00,4.51,0.0,-0.0,0.0,2.41,76.3 +2020-09-30 03:00:00,4.57,0.0,-0.0,0.0,2.55,76.3 +2020-09-30 04:00:00,4.74,0.0,-0.0,0.0,2.69,73.45 +2020-09-30 05:00:00,4.89,0.0,0.0,0.0,2.76,70.8 +2020-09-30 06:00:00,5.62,128.0,431.44,55.0,2.69,70.85 +2020-09-30 07:00:00,7.71,284.0,630.33,85.0,2.41,66.05 +2020-09-30 08:00:00,10.29,423.0,727.99,104.0,2.9,57.15 +2020-09-30 09:00:00,12.2,523.0,762.81,120.0,2.76,49.6 +2020-09-30 10:00:00,13.74,578.0,762.18,136.0,2.55,42.95 +2020-09-30 11:00:00,15.01,583.0,737.93,148.0,2.34,38.7 +2020-09-30 12:00:00,15.89,522.0,623.67,175.0,2.48,37.4 +2020-09-30 13:00:00,16.39,454.0,648.23,141.0,2.41,36.15 +2020-09-30 14:00:00,16.48,343.0,633.84,106.0,2.21,36.15 +2020-09-30 15:00:00,16.19,198.0,527.48,73.0,2.0,36.15 +2020-09-30 16:00:00,11.83,43.0,159.73,30.0,2.36,48.24 +2020-09-30 17:00:00,11.37,0.0,-0.0,0.0,2.31,51.71 +2020-09-30 18:00:00,10.92,0.0,-0.0,0.0,2.25,55.17 +2020-09-30 19:00:00,10.47,0.0,-0.0,0.0,2.19,58.64 +2020-09-30 20:00:00,10.02,0.0,-0.0,0.0,2.14,62.1 +2020-09-30 21:00:00,9.57,0.0,-0.0,0.0,2.08,65.56 +2020-09-30 22:00:00,9.12,0.0,-0.0,0.0,2.03,69.03 +2020-09-30 23:00:00,8.67,0.0,-0.0,0.0,1.97,72.49 +2020-10-01 00:00:00,8.22,0.0,-0.0,0.0,1.92,75.96 +2020-10-01 01:00:00,7.77,0.0,-0.0,0.0,1.86,79.42 +2020-10-01 02:00:00,7.32,0.0,-0.0,0.0,1.8,82.89 +2020-10-01 03:00:00,6.87,0.0,-0.0,0.0,1.75,86.35 +2020-10-01 04:00:00,6.42,0.0,-0.0,0.0,1.69,89.81 +2020-10-01 05:00:00,5.96,0.0,0.0,0.0,1.64,93.28 +2020-10-01 06:00:00,5.51,121.0,406.77,54.0,1.58,96.74 +2020-10-01 07:00:00,5.06,270.0,581.87,89.0,1.53,100.0 +2020-10-01 08:00:00,8.89,417.0,740.74,96.0,2.14,85.95 +2020-10-01 09:00:00,10.44,532.0,837.09,94.0,2.07,77.15 +2020-10-01 10:00:00,11.21,588.0,844.06,103.0,1.79,74.45 +2020-10-01 11:00:00,12.0,581.0,770.63,131.0,1.72,69.35 +2020-10-01 12:00:00,12.64,573.0,879.02,89.0,1.79,64.55 +2020-10-01 13:00:00,12.8,477.0,803.11,94.0,2.0,64.55 +2020-10-01 14:00:00,12.78,345.0,701.44,87.0,2.21,64.55 +2020-10-01 15:00:00,12.54,190.0,515.6,71.0,2.21,66.95 +2020-10-01 16:00:00,11.91,34.0,93.1,27.0,2.0,69.35 +2020-10-01 17:00:00,10.77,0.0,-0.0,0.0,2.0,77.15 +2020-10-01 18:00:00,9.62,0.0,-0.0,0.0,2.07,82.9 +2020-10-01 19:00:00,9.22,0.0,-0.0,0.0,2.34,85.95 +2020-10-01 20:00:00,8.47,0.0,-0.0,0.0,2.41,89.1 +2020-10-01 21:00:00,8.02,0.0,-0.0,0.0,2.48,89.05 +2020-10-01 22:00:00,7.82,0.0,-0.0,0.0,2.55,92.35 +2020-10-01 23:00:00,7.82,0.0,-0.0,0.0,2.62,89.0 +2020-10-02 00:00:00,7.81,0.0,-0.0,0.0,2.62,85.8 +2020-10-02 01:00:00,7.58,0.0,-0.0,0.0,2.69,82.65 +2020-10-02 02:00:00,7.83,0.0,-0.0,0.0,2.76,82.65 +2020-10-02 03:00:00,7.91,0.0,-0.0,0.0,2.83,79.7 +2020-10-02 04:00:00,8.09,0.0,-0.0,0.0,2.97,79.7 +2020-10-02 05:00:00,8.35,0.0,-0.0,0.0,3.03,79.7 +2020-10-02 06:00:00,8.67,57.0,18.73,54.0,3.03,76.85 +2020-10-02 07:00:00,8.62,255.0,528.72,93.0,3.1,89.1 +2020-10-02 08:00:00,9.73,263.0,147.03,200.0,3.31,82.9 +2020-10-02 09:00:00,10.63,385.0,293.35,233.0,3.45,80.05 +2020-10-02 10:00:00,11.85,469.0,418.08,231.0,3.66,77.3 +2020-10-02 11:00:00,12.27,321.0,79.53,275.0,3.66,74.6 +2020-10-02 12:00:00,12.81,523.0,728.67,126.0,3.66,74.7 +2020-10-02 13:00:00,13.23,208.0,25.48,196.0,3.79,72.1 +2020-10-02 14:00:00,13.24,126.0,5.53,124.0,3.45,74.75 +2020-10-02 15:00:00,13.18,79.0,13.35,76.0,3.03,74.75 +2020-10-02 16:00:00,12.68,11.0,0.0,11.0,2.69,77.45 +2020-10-02 17:00:00,12.24,0.0,-0.0,0.0,2.76,80.2 +2020-10-02 18:00:00,12.03,0.0,-0.0,0.0,2.97,77.35 +2020-10-02 19:00:00,11.7,0.0,-0.0,0.0,3.1,83.1 +2020-10-02 20:00:00,11.58,0.0,-0.0,0.0,3.17,83.1 +2020-10-02 21:00:00,11.44,0.0,-0.0,0.0,3.24,83.1 +2020-10-02 22:00:00,11.19,0.0,-0.0,0.0,3.31,86.1 +2020-10-02 23:00:00,10.62,0.0,-0.0,0.0,3.31,89.25 +2020-10-03 00:00:00,10.26,0.0,-0.0,0.0,3.31,92.5 +2020-10-03 01:00:00,9.22,0.0,-0.0,0.0,3.38,92.45 +2020-10-03 02:00:00,8.92,0.0,-0.0,0.0,3.45,92.45 +2020-10-03 03:00:00,8.71,0.0,-0.0,0.0,3.59,95.85 +2020-10-03 04:00:00,8.73,0.0,-0.0,0.0,3.66,95.85 +2020-10-03 05:00:00,8.7,0.0,-0.0,0.0,3.79,95.85 +2020-10-03 06:00:00,8.96,93.0,211.96,60.0,4.07,92.45 +2020-10-03 07:00:00,9.75,233.0,417.61,107.0,4.41,92.45 +2020-10-03 08:00:00,10.86,294.0,247.88,189.0,4.83,89.25 +2020-10-03 09:00:00,11.7,424.0,444.42,196.0,4.9,83.1 +2020-10-03 10:00:00,12.53,546.0,757.21,119.0,5.24,77.45 +2020-10-03 11:00:00,13.66,455.0,368.38,244.0,5.03,74.85 +2020-10-03 12:00:00,13.93,190.0,3.71,188.0,5.1,74.95 +2020-10-03 13:00:00,14.29,201.0,23.66,190.0,5.59,72.3 +2020-10-03 14:00:00,14.35,169.0,47.8,152.0,5.66,72.3 +2020-10-03 15:00:00,14.11,143.0,247.15,89.0,5.45,72.3 +2020-10-03 16:00:00,13.48,23.0,47.72,20.0,4.9,74.85 +2020-10-03 17:00:00,12.55,0.0,-0.0,0.0,5.17,77.45 +2020-10-03 18:00:00,11.91,0.0,-0.0,0.0,5.52,77.35 +2020-10-03 19:00:00,10.38,0.0,-0.0,0.0,5.59,77.15 +2020-10-03 20:00:00,10.1,0.0,-0.0,0.0,5.72,79.95 +2020-10-03 21:00:00,9.94,0.0,-0.0,0.0,5.79,79.95 +2020-10-03 22:00:00,9.89,0.0,-0.0,0.0,5.79,79.95 +2020-10-03 23:00:00,9.8,0.0,-0.0,0.0,5.66,82.9 +2020-10-04 00:00:00,9.84,0.0,-0.0,0.0,5.66,82.9 +2020-10-04 01:00:00,9.75,0.0,-0.0,0.0,5.59,82.9 +2020-10-04 02:00:00,9.66,0.0,-0.0,0.0,5.45,82.9 +2020-10-04 03:00:00,9.58,0.0,-0.0,0.0,5.38,79.9 +2020-10-04 04:00:00,9.48,0.0,-0.0,0.0,5.31,79.9 +2020-10-04 05:00:00,9.35,0.0,-0.0,0.0,5.45,79.85 +2020-10-04 06:00:00,9.43,112.0,443.25,45.0,5.52,77.0 +2020-10-04 07:00:00,9.94,257.0,606.04,77.0,4.9,79.95 +2020-10-04 08:00:00,10.98,399.0,745.19,87.0,5.59,77.2 +2020-10-04 09:00:00,12.07,454.0,576.9,161.0,5.79,74.6 +2020-10-04 10:00:00,13.51,243.0,21.48,231.0,6.41,69.65 +2020-10-04 11:00:00,14.12,118.0,0.0,118.0,6.14,69.75 +2020-10-04 12:00:00,14.44,78.0,0.0,78.0,6.28,69.85 +2020-10-04 13:00:00,14.41,78.0,0.0,78.0,6.14,72.3 +2020-10-04 14:00:00,14.07,51.0,0.0,51.0,5.86,72.3 +2020-10-04 15:00:00,13.59,56.0,0.0,56.0,5.52,72.2 +2020-10-04 16:00:00,12.91,17.0,17.62,16.0,5.72,72.1 +2020-10-04 17:00:00,12.37,0.0,-0.0,0.0,5.72,77.35 +2020-10-04 18:00:00,12.18,0.0,-0.0,0.0,5.66,77.35 +2020-10-04 19:00:00,11.12,0.0,-0.0,0.0,5.59,80.1 +2020-10-04 20:00:00,11.1,0.0,-0.0,0.0,5.66,83.05 +2020-10-04 21:00:00,11.15,0.0,-0.0,0.0,5.52,83.05 +2020-10-04 22:00:00,11.22,0.0,-0.0,0.0,5.31,83.05 +2020-10-04 23:00:00,11.24,0.0,-0.0,0.0,5.03,86.1 +2020-10-05 00:00:00,11.23,0.0,-0.0,0.0,4.83,86.1 +2020-10-05 01:00:00,11.22,0.0,-0.0,0.0,4.69,86.1 +2020-10-05 02:00:00,11.15,0.0,-0.0,0.0,4.55,86.1 +2020-10-05 03:00:00,11.2,0.0,-0.0,0.0,4.41,86.1 +2020-10-05 04:00:00,11.08,0.0,-0.0,0.0,4.34,89.3 +2020-10-05 05:00:00,11.06,0.0,-0.0,0.0,4.28,89.3 +2020-10-05 06:00:00,11.23,67.0,75.03,56.0,4.14,89.3 +2020-10-05 07:00:00,10.92,163.0,119.75,128.0,3.66,86.1 +2020-10-05 08:00:00,11.66,291.0,258.61,184.0,3.86,86.15 +2020-10-05 09:00:00,12.62,430.0,503.26,177.0,4.21,80.3 +2020-10-05 10:00:00,13.53,401.0,260.33,257.0,4.07,77.6 +2020-10-05 11:00:00,14.24,323.0,96.16,269.0,4.07,74.95 +2020-10-05 12:00:00,14.7,185.0,3.79,183.0,4.14,75.0 +2020-10-05 13:00:00,15.06,189.0,19.87,180.0,4.0,72.45 +2020-10-05 14:00:00,14.92,109.0,2.91,108.0,3.72,75.0 +2020-10-05 15:00:00,14.68,99.0,67.88,85.0,3.38,75.0 +2020-10-05 16:00:00,14.15,15.0,19.75,14.0,3.17,77.65 +2020-10-05 17:00:00,13.58,0.0,-0.0,0.0,3.17,80.4 +2020-10-05 18:00:00,13.18,0.0,-0.0,0.0,3.17,80.35 +2020-10-05 19:00:00,11.83,0.0,-0.0,0.0,3.03,86.15 +2020-10-05 20:00:00,11.52,0.0,-0.0,0.0,3.24,86.15 +2020-10-05 21:00:00,11.27,0.0,-0.0,0.0,3.03,89.3 +2020-10-05 22:00:00,10.92,0.0,-0.0,0.0,3.03,89.3 +2020-10-05 23:00:00,10.51,0.0,-0.0,0.0,3.03,92.5 +2020-10-06 00:00:00,10.17,0.0,-0.0,0.0,3.1,95.9 +2020-10-06 01:00:00,9.99,0.0,-0.0,0.0,3.17,95.9 +2020-10-06 02:00:00,9.95,0.0,-0.0,0.0,3.1,95.9 +2020-10-06 03:00:00,9.93,0.0,-0.0,0.0,3.17,95.9 +2020-10-06 04:00:00,10.05,0.0,-0.0,0.0,3.17,95.9 +2020-10-06 05:00:00,10.05,0.0,-0.0,0.0,3.17,95.9 +2020-10-06 06:00:00,10.27,79.0,176.01,54.0,3.31,95.9 +2020-10-06 07:00:00,10.81,136.0,59.12,119.0,3.52,95.9 +2020-10-06 08:00:00,11.7,248.0,149.22,187.0,3.38,89.3 +2020-10-06 09:00:00,12.33,339.0,219.08,230.0,3.31,89.35 +2020-10-06 10:00:00,13.24,542.0,812.44,97.0,3.79,83.25 +2020-10-06 11:00:00,14.14,553.0,818.43,98.0,4.14,77.65 +2020-10-06 12:00:00,14.0,510.0,787.93,99.0,4.21,77.65 +2020-10-06 13:00:00,14.25,297.0,205.77,205.0,4.34,77.65 +2020-10-06 14:00:00,14.14,152.0,38.52,139.0,4.28,77.65 +2020-10-06 15:00:00,13.84,91.0,59.95,79.0,3.86,80.4 +2020-10-06 16:00:00,13.33,11.0,0.0,11.0,3.59,83.25 +2020-10-06 17:00:00,12.76,0.0,-0.0,0.0,3.66,86.25 +2020-10-06 18:00:00,12.5,0.0,-0.0,0.0,3.45,83.25 +2020-10-06 19:00:00,12.13,0.0,-0.0,0.0,3.52,86.2 +2020-10-06 20:00:00,11.91,0.0,-0.0,0.0,3.59,83.15 +2020-10-06 21:00:00,11.74,0.0,-0.0,0.0,3.52,86.15 +2020-10-06 22:00:00,11.41,0.0,-0.0,0.0,3.24,86.15 +2020-10-06 23:00:00,11.13,0.0,-0.0,0.0,2.76,89.3 +2020-10-07 00:00:00,11.1,0.0,-0.0,0.0,2.55,89.3 +2020-10-07 01:00:00,11.05,0.0,-0.0,0.0,2.55,89.3 +2020-10-07 02:00:00,11.01,0.0,-0.0,0.0,2.62,89.3 +2020-10-07 03:00:00,11.0,0.0,-0.0,0.0,2.62,89.3 +2020-10-07 04:00:00,10.97,0.0,-0.0,0.0,2.69,89.3 +2020-10-07 05:00:00,10.94,0.0,-0.0,0.0,2.69,89.3 +2020-10-07 06:00:00,11.02,65.0,87.3,53.0,2.76,89.3 +2020-10-07 07:00:00,11.68,166.0,152.06,123.0,2.21,89.3 +2020-10-07 08:00:00,12.64,248.0,153.53,186.0,2.55,86.25 +2020-10-07 09:00:00,13.34,149.0,0.0,149.0,2.97,83.25 +2020-10-07 10:00:00,13.97,106.0,0.0,106.0,2.83,77.65 +2020-10-07 11:00:00,14.38,121.0,0.0,121.0,2.97,77.65 +2020-10-07 12:00:00,14.73,64.0,0.0,64.0,3.1,75.0 +2020-10-07 13:00:00,14.93,111.0,0.0,111.0,3.03,72.45 +2020-10-07 14:00:00,15.16,136.0,24.14,128.0,3.17,72.45 +2020-10-07 15:00:00,15.22,112.0,164.86,80.0,2.76,72.45 +2020-10-07 16:00:00,14.9,10.0,0.0,10.0,2.48,75.0 +2020-10-07 17:00:00,14.16,0.0,-0.0,0.0,2.48,77.65 +2020-10-07 18:00:00,13.48,0.0,-0.0,0.0,2.69,77.6 +2020-10-07 19:00:00,12.46,0.0,-0.0,0.0,2.62,80.3 +2020-10-07 20:00:00,11.95,0.0,-0.0,0.0,2.9,80.2 +2020-10-07 21:00:00,11.62,0.0,-0.0,0.0,3.1,80.15 +2020-10-07 22:00:00,11.28,0.0,-0.0,0.0,3.17,83.05 +2020-10-07 23:00:00,10.72,0.0,-0.0,0.0,3.17,83.0 +2020-10-08 00:00:00,10.27,0.0,-0.0,0.0,3.31,86.0 +2020-10-08 01:00:00,9.82,0.0,-0.0,0.0,3.38,85.95 +2020-10-08 02:00:00,9.63,0.0,-0.0,0.0,3.45,85.95 +2020-10-08 03:00:00,9.37,0.0,-0.0,0.0,3.52,85.95 +2020-10-08 04:00:00,9.15,0.0,-0.0,0.0,3.59,89.1 +2020-10-08 05:00:00,8.94,0.0,-0.0,0.0,3.59,89.1 +2020-10-08 06:00:00,9.07,63.0,82.79,52.0,3.66,89.1 +2020-10-08 07:00:00,10.14,172.0,165.47,126.0,3.1,89.2 +2020-10-08 08:00:00,11.25,313.0,366.07,167.0,4.0,86.1 +2020-10-08 09:00:00,12.22,489.0,804.73,97.0,4.34,80.2 +2020-10-08 10:00:00,13.33,543.0,812.12,107.0,4.28,74.75 +2020-10-08 11:00:00,14.46,556.0,828.03,105.0,4.55,67.35 +2020-10-08 12:00:00,15.32,509.0,788.25,107.0,4.55,65.05 +2020-10-08 13:00:00,15.81,430.0,776.65,92.0,4.48,60.65 +2020-10-08 14:00:00,15.76,290.0,596.19,96.0,4.28,60.65 +2020-10-08 15:00:00,15.17,153.0,515.77,56.0,3.72,62.75 +2020-10-08 16:00:00,13.99,9.0,0.0,9.0,3.72,64.85 +2020-10-08 17:00:00,12.79,0.0,-0.0,0.0,3.86,69.45 +2020-10-08 18:00:00,12.04,0.0,-0.0,0.0,4.07,71.95 +2020-10-08 19:00:00,11.54,0.0,-0.0,0.0,4.28,71.85 +2020-10-08 20:00:00,11.08,0.0,-0.0,0.0,4.34,71.75 +2020-10-08 21:00:00,10.6,0.0,-0.0,0.0,4.34,71.7 +2020-10-08 22:00:00,10.1,0.0,-0.0,0.0,4.28,74.3 +2020-10-08 23:00:00,9.59,0.0,-0.0,0.0,4.07,74.2 +2020-10-09 00:00:00,9.2,0.0,-0.0,0.0,3.86,76.95 +2020-10-09 01:00:00,8.92,0.0,-0.0,0.0,3.72,74.15 +2020-10-09 02:00:00,8.75,0.0,-0.0,0.0,3.66,76.85 +2020-10-09 03:00:00,8.51,0.0,-0.0,0.0,3.59,74.05 +2020-10-09 04:00:00,8.33,0.0,-0.0,0.0,3.59,76.8 +2020-10-09 05:00:00,8.19,0.0,-0.0,0.0,3.59,76.8 +2020-10-09 06:00:00,8.14,59.0,70.17,50.0,3.59,76.8 +2020-10-09 07:00:00,8.59,224.0,475.84,94.0,3.1,82.75 +2020-10-09 08:00:00,9.95,376.0,721.14,92.0,3.38,74.3 +2020-10-09 09:00:00,11.48,486.0,809.31,96.0,3.31,66.75 +2020-10-09 10:00:00,13.03,543.0,827.98,103.0,3.31,62.3 +2020-10-09 11:00:00,14.09,537.0,766.21,124.0,3.38,58.1 +2020-10-09 12:00:00,14.84,482.0,672.38,143.0,3.38,56.15 +2020-10-09 13:00:00,15.1,399.0,635.96,126.0,3.45,56.25 +2020-10-09 14:00:00,14.96,242.0,334.99,135.0,3.24,58.35 +2020-10-09 15:00:00,14.22,53.0,0.0,53.0,2.69,64.85 +2020-10-09 16:00:00,12.72,1.0,0.0,1.0,2.76,69.45 +2020-10-09 17:00:00,11.14,0.0,-0.0,0.0,2.83,74.45 +2020-10-09 18:00:00,9.91,0.0,-0.0,0.0,2.76,74.3 +2020-10-09 19:00:00,8.64,0.0,-0.0,0.0,2.76,71.35 +2020-10-09 20:00:00,7.79,0.0,-0.0,0.0,2.62,73.9 +2020-10-09 21:00:00,7.03,0.0,-0.0,0.0,2.41,76.65 +2020-10-09 22:00:00,6.3,0.0,-0.0,0.0,2.34,76.55 +2020-10-09 23:00:00,5.66,0.0,-0.0,0.0,2.28,76.5 +2020-10-10 00:00:00,5.06,0.0,-0.0,0.0,2.28,79.35 +2020-10-10 01:00:00,4.6,0.0,-0.0,0.0,2.21,79.3 +2020-10-10 02:00:00,4.04,0.0,-0.0,0.0,2.14,79.2 +2020-10-10 03:00:00,3.42,0.0,-0.0,0.0,2.07,82.25 +2020-10-10 04:00:00,2.84,0.0,-0.0,0.0,2.0,85.4 +2020-10-10 05:00:00,2.27,0.0,-0.0,0.0,2.0,88.65 +2020-10-10 06:00:00,2.06,89.0,396.31,40.0,1.86,88.65 +2020-10-10 07:00:00,4.5,241.0,633.41,71.0,1.31,85.55 +2020-10-10 08:00:00,7.72,387.0,787.05,81.0,1.38,76.7 +2020-10-10 09:00:00,9.72,487.0,824.51,94.0,1.86,66.35 +2020-10-10 10:00:00,11.19,541.0,830.87,104.0,2.34,59.6 +2020-10-10 11:00:00,12.27,548.0,823.07,109.0,2.62,55.5 +2020-10-10 12:00:00,12.87,506.0,806.62,104.0,2.69,53.6 +2020-10-10 13:00:00,13.07,427.0,805.46,86.0,2.83,53.7 +2020-10-10 14:00:00,12.77,295.0,692.32,78.0,2.83,55.65 +2020-10-10 15:00:00,12.02,145.0,516.75,54.0,2.34,62.1 +2020-10-10 16:00:00,10.53,5.0,0.0,5.0,2.28,66.55 +2020-10-10 17:00:00,8.88,0.0,-0.0,0.0,2.28,74.15 +2020-10-10 18:00:00,7.57,0.0,-0.0,0.0,2.21,79.65 +2020-10-10 19:00:00,6.52,0.0,-0.0,0.0,1.93,79.55 +2020-10-10 20:00:00,5.14,0.0,-0.0,0.0,1.79,85.6 +2020-10-10 21:00:00,4.06,0.0,-0.0,0.0,1.79,88.8 +2020-10-10 22:00:00,3.24,0.0,-0.0,0.0,1.93,88.7 +2020-10-10 23:00:00,2.67,0.0,-0.0,0.0,2.07,92.15 +2020-10-11 00:00:00,2.41,0.0,-0.0,0.0,1.66,88.65 +2020-10-11 01:00:00,2.57,0.0,-0.0,0.0,1.38,92.15 +2020-10-11 02:00:00,1.96,0.0,-0.0,0.0,1.45,92.1 +2020-10-11 03:00:00,2.58,0.0,-0.0,0.0,1.24,92.15 +2020-10-11 04:00:00,2.01,0.0,-0.0,0.0,1.45,92.1 +2020-10-11 05:00:00,1.52,0.0,-0.0,0.0,1.59,92.05 +2020-10-11 06:00:00,1.93,70.0,201.66,46.0,1.45,88.65 +2020-10-11 07:00:00,4.27,204.0,383.2,103.0,0.69,88.8 +2020-10-11 08:00:00,6.84,362.0,677.51,102.0,0.9,82.6 +2020-10-11 09:00:00,8.85,477.0,808.25,96.0,1.52,74.05 +2020-10-11 10:00:00,10.23,534.0,824.22,105.0,1.86,66.45 +2020-10-11 11:00:00,11.3,543.0,829.99,105.0,1.86,59.6 +2020-10-11 12:00:00,11.98,501.0,812.06,101.0,1.93,53.5 +2020-10-11 13:00:00,12.2,420.0,797.67,87.0,2.21,53.5 +2020-10-11 14:00:00,11.86,288.0,682.94,78.0,2.48,57.55 +2020-10-11 15:00:00,11.02,138.0,505.38,52.0,2.28,61.85 +2020-10-11 16:00:00,9.47,0.0,0.0,0.0,2.28,68.9 +2020-10-11 17:00:00,7.86,0.0,-0.0,0.0,2.07,76.8 +2020-10-11 18:00:00,6.57,0.0,-0.0,0.0,1.86,82.6 +2020-10-11 19:00:00,5.14,0.0,-0.0,0.0,1.86,88.85 +2020-10-11 20:00:00,3.98,0.0,-0.0,0.0,1.66,92.2 +2020-10-11 21:00:00,3.29,0.0,-0.0,0.0,1.45,95.7 +2020-10-11 22:00:00,3.21,0.0,-0.0,0.0,1.24,95.7 +2020-10-11 23:00:00,3.34,0.0,-0.0,0.0,1.1,95.7 +2020-10-12 00:00:00,3.61,0.0,-0.0,0.0,0.97,99.4 +2020-10-12 01:00:00,3.46,0.0,-0.0,0.0,0.9,95.7 +2020-10-12 02:00:00,2.95,0.0,-0.0,0.0,0.83,95.7 +2020-10-12 03:00:00,2.1,0.0,-0.0,0.0,0.55,99.4 +2020-10-12 04:00:00,2.32,0.0,-0.0,0.0,0.41,95.7 +2020-10-12 05:00:00,1.99,0.0,-0.0,0.0,0.76,95.65 +2020-10-12 06:00:00,2.13,40.0,26.23,37.0,0.83,99.4 +2020-10-12 07:00:00,3.25,83.0,3.87,82.0,0.28,99.4 +2020-10-12 08:00:00,3.79,96.0,0.0,96.0,0.69,99.4 +2020-10-12 09:00:00,4.88,70.0,0.0,70.0,1.17,92.25 +2020-10-12 10:00:00,5.98,86.0,0.0,86.0,1.38,88.95 +2020-10-12 11:00:00,7.24,83.0,0.0,83.0,1.52,85.75 +2020-10-12 12:00:00,8.07,70.0,0.0,70.0,1.59,79.7 +2020-10-12 13:00:00,8.68,77.0,0.0,77.0,1.66,76.85 +2020-10-12 14:00:00,8.79,140.0,39.79,128.0,1.66,76.85 +2020-10-12 15:00:00,8.57,124.0,401.76,58.0,1.66,74.05 +2020-10-12 16:00:00,7.77,0.0,0.0,0.0,1.52,79.65 +2020-10-12 17:00:00,6.39,0.0,-0.0,0.0,1.66,79.55 +2020-10-12 18:00:00,5.09,0.0,-0.0,0.0,1.59,88.85 +2020-10-12 19:00:00,3.78,0.0,-0.0,0.0,1.66,95.7 +2020-10-12 20:00:00,3.17,0.0,-0.0,0.0,1.66,95.7 +2020-10-12 21:00:00,2.54,0.0,-0.0,0.0,1.59,95.7 +2020-10-12 22:00:00,2.16,0.0,-0.0,0.0,1.59,95.65 +2020-10-12 23:00:00,1.87,0.0,-0.0,0.0,1.59,95.65 +2020-10-13 00:00:00,1.75,0.0,-0.0,0.0,1.59,95.65 +2020-10-13 01:00:00,1.88,0.0,-0.0,0.0,1.66,95.65 +2020-10-13 02:00:00,1.46,0.0,-0.0,0.0,1.59,95.65 +2020-10-13 03:00:00,1.48,0.0,-0.0,0.0,1.45,95.65 +2020-10-13 04:00:00,0.96,0.0,-0.0,0.0,1.45,99.35 +2020-10-13 05:00:00,0.69,0.0,-0.0,0.0,1.45,100.0 +2020-10-13 06:00:00,1.13,40.0,27.34,37.0,1.38,99.35 +2020-10-13 07:00:00,3.07,120.0,51.2,107.0,1.79,95.7 +2020-10-13 08:00:00,3.86,174.0,37.47,160.0,1.59,88.8 +2020-10-13 09:00:00,5.38,432.0,640.13,137.0,1.24,79.4 +2020-10-13 10:00:00,7.13,513.0,796.79,107.0,1.17,76.65 +2020-10-13 11:00:00,8.62,522.0,801.67,108.0,1.45,71.35 +2020-10-13 12:00:00,9.57,488.0,817.02,95.0,1.86,66.35 +2020-10-13 13:00:00,10.09,404.0,791.17,83.0,2.14,64.0 +2020-10-13 14:00:00,10.09,273.0,669.6,75.0,2.34,64.0 +2020-10-13 15:00:00,9.57,123.0,448.16,52.0,1.93,66.35 +2020-10-13 16:00:00,8.3,0.0,0.0,0.0,1.86,73.95 +2020-10-13 17:00:00,6.83,0.0,-0.0,0.0,1.93,79.55 +2020-10-13 18:00:00,5.57,0.0,-0.0,0.0,1.93,85.65 +2020-10-13 19:00:00,4.25,0.0,-0.0,0.0,1.72,92.2 +2020-10-13 20:00:00,3.14,0.0,-0.0,0.0,1.66,95.7 +2020-10-13 21:00:00,2.18,0.0,-0.0,0.0,1.66,95.65 +2020-10-13 22:00:00,1.44,0.0,-0.0,0.0,1.72,95.65 +2020-10-13 23:00:00,0.84,0.0,-0.0,0.0,1.72,99.35 +2020-10-14 00:00:00,0.33,0.0,-0.0,0.0,1.66,99.4 +2020-10-14 01:00:00,-0.02,0.0,-0.0,0.0,1.66,99.4 +2020-10-14 02:00:00,-0.35,0.0,-0.0,0.0,1.66,99.4 +2020-10-14 03:00:00,-0.63,0.0,-0.0,0.0,1.66,99.4 +2020-10-14 04:00:00,-0.88,0.0,-0.0,0.0,1.45,99.4 +2020-10-14 05:00:00,-0.94,0.0,-0.0,0.0,1.52,99.4 +2020-10-14 06:00:00,-0.65,39.0,28.55,36.0,1.38,99.4 +2020-10-14 07:00:00,1.65,101.0,24.09,95.0,1.45,95.65 +2020-10-14 08:00:00,3.19,149.0,16.28,143.0,1.66,99.4 +2020-10-14 09:00:00,4.46,82.0,0.0,82.0,2.28,92.25 +2020-10-14 10:00:00,5.42,97.0,0.0,97.0,2.41,88.9 +2020-10-14 11:00:00,6.3,72.0,0.0,72.0,2.62,85.7 +2020-10-14 12:00:00,6.87,152.0,0.0,152.0,2.83,82.6 +2020-10-14 13:00:00,7.23,191.0,40.01,175.0,2.97,82.6 +2020-10-14 14:00:00,7.27,243.0,500.25,98.0,2.97,82.6 +2020-10-14 15:00:00,7.06,56.0,19.66,53.0,2.69,82.6 +2020-10-14 16:00:00,6.48,0.0,-0.0,0.0,2.41,82.6 +2020-10-14 17:00:00,6.2,0.0,-0.0,0.0,2.48,88.95 +2020-10-14 18:00:00,6.45,0.0,-0.0,0.0,2.62,85.75 +2020-10-14 19:00:00,7.03,0.0,-0.0,0.0,2.62,89.0 +2020-10-14 20:00:00,7.3,0.0,-0.0,0.0,2.62,92.35 +2020-10-14 21:00:00,7.39,0.0,-0.0,0.0,2.55,89.0 +2020-10-14 22:00:00,7.41,0.0,-0.0,0.0,2.62,92.35 +2020-10-14 23:00:00,7.41,0.0,-0.0,0.0,2.62,92.35 +2020-10-15 00:00:00,7.39,0.0,-0.0,0.0,2.62,92.35 +2020-10-15 01:00:00,7.98,0.0,-0.0,0.0,2.55,95.8 +2020-10-15 02:00:00,7.95,0.0,-0.0,0.0,2.69,95.8 +2020-10-15 03:00:00,7.85,0.0,-0.0,0.0,2.69,99.4 +2020-10-15 04:00:00,7.78,0.0,-0.0,0.0,2.69,100.0 +2020-10-15 05:00:00,7.81,0.0,-0.0,0.0,2.62,100.0 +2020-10-15 06:00:00,7.75,34.0,19.92,32.0,2.48,100.0 +2020-10-15 07:00:00,8.06,77.0,4.1,76.0,2.28,99.4 +2020-10-15 08:00:00,8.37,94.0,0.0,94.0,2.83,92.4 +2020-10-15 09:00:00,8.41,80.0,0.0,80.0,2.62,92.4 +2020-10-15 10:00:00,9.07,98.0,0.0,98.0,2.55,85.95 +2020-10-15 11:00:00,9.31,100.0,0.0,100.0,2.28,85.95 +2020-10-15 12:00:00,9.54,163.0,4.26,161.0,2.48,82.9 +2020-10-15 13:00:00,9.71,112.0,0.0,112.0,2.55,79.9 +2020-10-15 14:00:00,9.69,59.0,0.0,59.0,2.41,79.9 +2020-10-15 15:00:00,9.49,36.0,0.0,36.0,1.52,79.9 +2020-10-15 16:00:00,9.0,0.0,-0.0,0.0,1.03,82.85 +2020-10-15 17:00:00,8.2,0.0,-0.0,0.0,1.24,89.05 +2020-10-15 18:00:00,7.28,0.0,-0.0,0.0,1.52,95.8 +2020-10-15 19:00:00,6.33,0.0,-0.0,0.0,1.31,95.8 +2020-10-15 20:00:00,5.67,0.0,-0.0,0.0,1.45,95.75 +2020-10-15 21:00:00,5.29,0.0,-0.0,0.0,1.52,95.75 +2020-10-15 22:00:00,5.14,0.0,-0.0,0.0,1.59,95.75 +2020-10-15 23:00:00,4.75,0.0,-0.0,0.0,1.72,95.75 +2020-10-16 00:00:00,4.78,0.0,-0.0,0.0,1.72,95.75 +2020-10-16 01:00:00,4.61,0.0,-0.0,0.0,1.79,95.75 +2020-10-16 02:00:00,4.77,0.0,-0.0,0.0,1.72,95.75 +2020-10-16 03:00:00,4.72,0.0,-0.0,0.0,1.66,95.75 +2020-10-16 04:00:00,4.88,0.0,-0.0,0.0,1.45,95.75 +2020-10-16 05:00:00,5.53,0.0,-0.0,0.0,1.31,95.75 +2020-10-16 06:00:00,5.96,57.0,240.22,34.0,1.24,95.8 +2020-10-16 07:00:00,6.28,121.0,79.4,102.0,1.17,95.8 +2020-10-16 08:00:00,7.73,190.0,78.12,162.0,1.1,95.8 +2020-10-16 09:00:00,8.7,317.0,249.45,206.0,1.03,89.1 +2020-10-16 10:00:00,9.52,349.0,231.18,235.0,1.03,82.9 +2020-10-16 11:00:00,9.95,326.0,166.15,243.0,0.69,79.95 +2020-10-16 12:00:00,10.34,150.0,2.16,149.0,0.48,82.95 +2020-10-16 13:00:00,10.31,229.0,118.46,183.0,1.03,86.0 +2020-10-16 14:00:00,10.26,127.0,43.12,115.0,1.38,89.2 +2020-10-16 15:00:00,9.77,56.0,35.42,51.0,1.31,92.45 +2020-10-16 16:00:00,8.89,0.0,-0.0,0.0,1.45,89.1 +2020-10-16 17:00:00,8.22,0.0,-0.0,0.0,1.45,95.8 +2020-10-16 18:00:00,7.87,0.0,-0.0,0.0,1.52,92.4 +2020-10-16 19:00:00,7.42,0.0,-0.0,0.0,1.72,95.8 +2020-10-16 20:00:00,6.78,0.0,-0.0,0.0,1.72,95.8 +2020-10-16 21:00:00,6.27,0.0,-0.0,0.0,1.72,95.8 +2020-10-16 22:00:00,5.95,0.0,-0.0,0.0,1.72,92.3 +2020-10-16 23:00:00,5.5,0.0,-0.0,0.0,1.79,92.3 +2020-10-17 00:00:00,4.84,0.0,-0.0,0.0,1.93,92.25 +2020-10-17 01:00:00,4.34,0.0,-0.0,0.0,2.0,92.25 +2020-10-17 02:00:00,4.04,0.0,-0.0,0.0,2.07,95.75 +2020-10-17 03:00:00,3.32,0.0,-0.0,0.0,2.07,92.15 +2020-10-17 04:00:00,2.76,0.0,-0.0,0.0,2.0,95.7 +2020-10-17 05:00:00,2.22,0.0,-0.0,0.0,2.0,95.65 +2020-10-17 06:00:00,2.08,48.0,164.7,33.0,2.0,95.65 +2020-10-17 07:00:00,3.29,181.0,405.27,86.0,2.0,95.7 +2020-10-17 08:00:00,4.6,329.0,684.94,87.0,2.97,85.55 +2020-10-17 09:00:00,5.72,430.0,757.34,97.0,2.97,82.5 +2020-10-17 10:00:00,6.65,481.0,762.83,109.0,3.03,76.65 +2020-10-17 11:00:00,7.49,489.0,767.32,110.0,3.38,71.15 +2020-10-17 12:00:00,7.93,450.0,759.67,102.0,3.45,63.6 +2020-10-17 13:00:00,8.16,370.0,739.71,87.0,3.45,61.2 +2020-10-17 14:00:00,7.95,242.0,620.08,73.0,3.17,58.9 +2020-10-17 15:00:00,7.33,101.0,420.7,44.0,2.62,63.35 +2020-10-17 16:00:00,6.03,0.0,-0.0,0.0,2.62,63.25 +2020-10-17 17:00:00,4.93,0.0,-0.0,0.0,2.48,65.55 +2020-10-17 18:00:00,4.03,0.0,-0.0,0.0,2.34,70.6 +2020-10-17 19:00:00,3.45,0.0,-0.0,0.0,2.28,70.5 +2020-10-17 20:00:00,2.47,0.0,-0.0,0.0,2.0,76.0 +2020-10-17 21:00:00,1.45,0.0,-0.0,0.0,1.93,78.9 +2020-10-17 22:00:00,0.5,0.0,-0.0,0.0,1.72,81.9 +2020-10-17 23:00:00,0.18,0.0,-0.0,0.0,1.52,81.8 +2020-10-18 00:00:00,0.56,0.0,-0.0,0.0,1.17,78.75 +2020-10-18 01:00:00,0.87,0.0,-0.0,0.0,0.97,75.75 +2020-10-18 02:00:00,1.07,0.0,-0.0,0.0,0.69,75.75 +2020-10-18 03:00:00,0.78,0.0,-0.0,0.0,0.48,78.75 +2020-10-18 04:00:00,0.61,0.0,-0.0,0.0,0.34,81.9 +2020-10-18 05:00:00,0.44,0.0,-0.0,0.0,0.48,78.75 +2020-10-18 06:00:00,0.35,59.0,381.94,26.0,0.55,78.75 +2020-10-18 07:00:00,0.99,197.0,579.45,64.0,0.0,88.55 +2020-10-18 08:00:00,2.68,329.0,703.57,84.0,0.55,82.15 +2020-10-18 09:00:00,4.32,426.0,755.06,98.0,0.97,76.3 +2020-10-18 10:00:00,5.6,480.0,771.47,108.0,1.24,73.65 +2020-10-18 11:00:00,6.67,495.0,810.91,99.0,1.24,71.05 +2020-10-18 12:00:00,7.39,453.0,789.1,96.0,0.97,71.15 +2020-10-18 13:00:00,7.78,367.0,748.26,85.0,0.76,71.15 +2020-10-18 14:00:00,7.85,232.0,569.58,80.0,0.55,71.15 +2020-10-18 15:00:00,7.54,95.0,400.36,43.0,0.76,71.15 +2020-10-18 16:00:00,6.32,0.0,-0.0,0.0,1.38,76.55 +2020-10-18 17:00:00,4.84,0.0,-0.0,0.0,0.76,82.4 +2020-10-18 18:00:00,4.41,0.0,-0.0,0.0,0.9,82.35 +2020-10-18 19:00:00,1.78,0.0,-0.0,0.0,1.79,95.65 +2020-10-18 20:00:00,0.53,0.0,-0.0,0.0,2.0,99.4 +2020-10-18 21:00:00,-0.03,0.0,-0.0,0.0,2.0,99.4 +2020-10-18 22:00:00,-0.15,0.0,-0.0,0.0,2.14,95.6 +2020-10-18 23:00:00,-0.02,0.0,-0.0,0.0,2.21,95.6 +2020-10-19 00:00:00,-0.01,0.0,-0.0,0.0,2.21,95.6 +2020-10-19 01:00:00,0.02,0.0,-0.0,0.0,2.21,95.6 +2020-10-19 02:00:00,0.09,0.0,-0.0,0.0,2.34,92.0 +2020-10-19 03:00:00,0.45,0.0,-0.0,0.0,2.55,92.0 +2020-10-19 04:00:00,0.76,0.0,-0.0,0.0,2.62,95.65 +2020-10-19 05:00:00,1.12,0.0,-0.0,0.0,2.69,92.05 +2020-10-19 06:00:00,2.05,13.0,0.0,13.0,2.97,88.65 +2020-10-19 07:00:00,3.66,28.0,0.0,28.0,3.17,82.25 +2020-10-19 08:00:00,4.97,94.0,0.0,94.0,3.17,76.4 +2020-10-19 09:00:00,6.3,241.0,88.55,203.0,3.38,76.55 +2020-10-19 10:00:00,7.03,142.0,0.0,142.0,3.52,73.8 +2020-10-19 11:00:00,7.5,230.0,35.21,213.0,3.45,76.7 +2020-10-19 12:00:00,7.74,141.0,0.0,141.0,3.52,79.65 +2020-10-19 13:00:00,8.17,71.0,0.0,71.0,2.9,79.7 +2020-10-19 14:00:00,7.97,20.0,0.0,20.0,3.31,76.8 +2020-10-19 15:00:00,7.56,42.0,16.09,40.0,3.17,76.7 +2020-10-19 16:00:00,6.91,0.0,-0.0,0.0,3.03,79.55 +2020-10-19 17:00:00,6.39,0.0,-0.0,0.0,3.17,79.55 +2020-10-19 18:00:00,5.82,0.0,-0.0,0.0,3.17,88.9 +2020-10-19 19:00:00,5.79,0.0,-0.0,0.0,2.76,88.9 +2020-10-19 20:00:00,5.7,0.0,-0.0,0.0,2.69,92.3 +2020-10-19 21:00:00,5.66,0.0,-0.0,0.0,2.48,92.3 +2020-10-19 22:00:00,5.55,0.0,-0.0,0.0,2.48,92.3 +2020-10-19 23:00:00,5.36,0.0,-0.0,0.0,2.69,88.9 +2020-10-20 00:00:00,4.96,0.0,-0.0,0.0,2.76,92.25 +2020-10-20 01:00:00,4.8,0.0,-0.0,0.0,2.9,92.25 +2020-10-20 02:00:00,4.7,0.0,-0.0,0.0,3.03,92.25 +2020-10-20 03:00:00,4.41,0.0,-0.0,0.0,3.03,88.85 +2020-10-20 04:00:00,4.23,0.0,-0.0,0.0,3.1,92.2 +2020-10-20 05:00:00,4.26,0.0,-0.0,0.0,3.17,92.2 +2020-10-20 06:00:00,4.8,30.0,51.91,26.0,3.31,92.25 +2020-10-20 07:00:00,5.21,19.0,0.0,19.0,3.31,88.85 +2020-10-20 08:00:00,5.99,234.0,236.66,154.0,3.03,85.7 +2020-10-20 09:00:00,6.67,92.0,0.0,92.0,4.55,85.75 +2020-10-20 10:00:00,7.41,283.0,112.45,230.0,3.79,85.8 +2020-10-20 11:00:00,7.74,342.0,236.79,229.0,3.45,89.0 +2020-10-20 12:00:00,7.85,79.0,0.0,79.0,3.93,89.0 +2020-10-20 13:00:00,6.81,78.0,0.0,78.0,4.41,89.0 +2020-10-20 14:00:00,6.44,137.0,93.88,113.0,4.34,85.75 +2020-10-20 15:00:00,5.94,21.0,0.0,21.0,3.66,88.95 +2020-10-20 16:00:00,5.6,0.0,-0.0,0.0,3.38,88.9 +2020-10-20 17:00:00,5.33,0.0,-0.0,0.0,3.79,85.65 +2020-10-20 18:00:00,4.89,0.0,-0.0,0.0,4.14,85.6 +2020-10-20 19:00:00,4.74,0.0,-0.0,0.0,4.14,88.85 +2020-10-20 20:00:00,4.42,0.0,-0.0,0.0,4.62,85.55 +2020-10-20 21:00:00,4.1,0.0,-0.0,0.0,4.83,85.5 +2020-10-20 22:00:00,3.77,0.0,-0.0,0.0,5.17,85.45 +2020-10-20 23:00:00,3.38,0.0,-0.0,0.0,5.59,82.25 +2020-10-21 00:00:00,3.21,0.0,-0.0,0.0,5.93,85.4 +2020-10-21 01:00:00,3.14,0.0,-0.0,0.0,5.86,85.4 +2020-10-21 02:00:00,3.16,0.0,-0.0,0.0,5.72,85.4 +2020-10-21 03:00:00,3.01,0.0,-0.0,0.0,5.59,85.4 +2020-10-21 04:00:00,2.88,0.0,-0.0,0.0,5.66,85.4 +2020-10-21 05:00:00,2.68,0.0,-0.0,0.0,5.45,85.35 +2020-10-21 06:00:00,2.51,37.0,165.8,25.0,4.83,85.35 +2020-10-21 07:00:00,2.7,122.0,125.66,95.0,5.52,88.65 +2020-10-21 08:00:00,3.35,292.0,549.64,109.0,6.69,82.25 +2020-10-21 09:00:00,3.99,323.0,322.53,188.0,6.48,76.25 +2020-10-21 10:00:00,5.08,202.0,19.32,193.0,6.62,65.55 +2020-10-21 11:00:00,6.3,225.0,33.92,209.0,6.97,56.3 +2020-10-21 12:00:00,7.04,355.0,390.3,185.0,6.41,54.3 +2020-10-21 13:00:00,7.17,133.0,8.33,130.0,5.86,56.45 +2020-10-21 14:00:00,6.48,218.0,579.74,73.0,4.9,63.35 +2020-10-21 15:00:00,6.3,41.0,26.45,38.0,5.1,63.25 +2020-10-21 16:00:00,6.2,0.0,-0.0,0.0,4.97,60.85 +2020-10-21 17:00:00,5.96,0.0,-0.0,0.0,4.83,58.55 +2020-10-21 18:00:00,5.77,0.0,-0.0,0.0,4.62,58.45 +2020-10-21 19:00:00,5.2,0.0,-0.0,0.0,4.41,65.55 +2020-10-21 20:00:00,4.75,0.0,-0.0,0.0,4.48,68.0 +2020-10-21 21:00:00,4.47,0.0,-0.0,0.0,4.55,65.4 +2020-10-21 22:00:00,4.32,0.0,-0.0,0.0,4.62,65.4 +2020-10-21 23:00:00,4.1,0.0,-0.0,0.0,4.55,67.9 +2020-10-22 00:00:00,3.9,0.0,-0.0,0.0,4.34,67.9 +2020-10-22 01:00:00,3.43,0.0,-0.0,0.0,4.21,67.8 +2020-10-22 02:00:00,3.21,0.0,-0.0,0.0,4.21,67.7 +2020-10-22 03:00:00,2.98,0.0,-0.0,0.0,4.28,67.7 +2020-10-22 04:00:00,2.82,0.0,-0.0,0.0,4.41,67.7 +2020-10-22 05:00:00,2.72,0.0,-0.0,0.0,4.48,67.6 +2020-10-22 06:00:00,2.68,31.0,118.17,23.0,4.48,67.6 +2020-10-22 07:00:00,3.42,140.0,242.88,89.0,4.41,67.8 +2020-10-22 08:00:00,4.76,262.0,402.61,130.0,3.72,68.0 +2020-10-22 09:00:00,6.1,376.0,590.37,132.0,4.34,58.55 +2020-10-22 10:00:00,7.18,310.0,184.59,225.0,4.41,58.65 +2020-10-22 11:00:00,8.19,334.0,235.96,224.0,4.41,58.9 +2020-10-22 12:00:00,8.9,310.0,248.83,203.0,4.21,56.9 +2020-10-22 13:00:00,9.14,205.0,107.21,167.0,3.86,59.15 +2020-10-22 14:00:00,8.89,115.0,49.05,103.0,3.1,59.15 +2020-10-22 15:00:00,8.41,39.0,27.77,36.0,2.62,61.3 +2020-10-22 16:00:00,7.55,0.0,-0.0,0.0,2.62,63.5 +2020-10-22 17:00:00,6.91,0.0,-0.0,0.0,2.55,65.85 +2020-10-22 18:00:00,6.2,0.0,-0.0,0.0,2.41,68.3 +2020-10-22 19:00:00,5.7,0.0,-0.0,0.0,2.21,65.65 +2020-10-22 20:00:00,5.05,0.0,-0.0,0.0,2.14,68.1 +2020-10-22 21:00:00,4.29,0.0,-0.0,0.0,2.14,73.4 +2020-10-22 22:00:00,3.33,0.0,-0.0,0.0,2.21,73.3 +2020-10-22 23:00:00,2.37,0.0,-0.0,0.0,2.07,79.0 +2020-10-23 00:00:00,1.42,0.0,-0.0,0.0,2.07,85.25 +2020-10-23 01:00:00,0.6,0.0,-0.0,0.0,2.07,88.5 +2020-10-23 02:00:00,0.07,0.0,-0.0,0.0,2.14,88.45 +2020-10-23 03:00:00,-0.23,0.0,-0.0,0.0,2.21,91.95 +2020-10-23 04:00:00,-0.41,0.0,-0.0,0.0,2.28,88.45 +2020-10-23 05:00:00,-0.35,0.0,-0.0,0.0,2.41,88.45 +2020-10-23 06:00:00,-0.35,34.0,206.24,21.0,2.34,88.45 +2020-10-23 07:00:00,0.94,166.0,487.56,66.0,2.28,88.55 +2020-10-23 08:00:00,3.77,299.0,672.28,82.0,2.41,82.25 +2020-10-23 09:00:00,6.1,382.0,656.79,114.0,3.03,70.95 +2020-10-23 10:00:00,7.77,435.0,685.58,123.0,3.24,71.15 +2020-10-23 11:00:00,9.04,442.0,688.09,125.0,3.38,66.25 +2020-10-23 12:00:00,10.0,415.0,749.12,97.0,3.79,64.0 +2020-10-23 13:00:00,10.85,316.0,607.53,104.0,4.0,61.75 +2020-10-23 14:00:00,11.39,188.0,413.86,89.0,3.79,61.85 +2020-10-23 15:00:00,10.85,50.0,97.32,40.0,3.45,64.1 +2020-10-23 16:00:00,9.78,0.0,-0.0,0.0,3.59,68.9 +2020-10-23 17:00:00,8.98,0.0,-0.0,0.0,3.66,71.4 +2020-10-23 18:00:00,8.4,0.0,-0.0,0.0,3.79,76.85 +2020-10-23 19:00:00,8.01,0.0,-0.0,0.0,4.0,82.7 +2020-10-23 20:00:00,7.89,0.0,-0.0,0.0,4.0,85.85 +2020-10-23 21:00:00,8.33,0.0,-0.0,0.0,4.0,85.85 +2020-10-23 22:00:00,8.55,0.0,-0.0,0.0,3.93,82.75 +2020-10-23 23:00:00,8.41,0.0,-0.0,0.0,3.86,79.75 +2020-10-24 00:00:00,8.4,0.0,-0.0,0.0,3.93,79.75 +2020-10-24 01:00:00,8.82,0.0,-0.0,0.0,4.0,79.75 +2020-10-24 02:00:00,8.69,0.0,-0.0,0.0,4.0,79.75 +2020-10-24 03:00:00,8.86,0.0,-0.0,0.0,3.86,82.75 +2020-10-24 04:00:00,9.02,0.0,-0.0,0.0,3.86,82.85 +2020-10-24 05:00:00,8.99,0.0,-0.0,0.0,3.86,82.85 +2020-10-24 06:00:00,8.67,25.0,85.66,20.0,3.93,82.75 +2020-10-24 07:00:00,8.4,172.0,589.33,54.0,4.21,76.85 +2020-10-24 08:00:00,9.01,191.0,138.49,147.0,3.66,76.95 +2020-10-24 09:00:00,9.85,127.0,2.48,126.0,6.62,68.9 +2020-10-24 10:00:00,10.26,189.0,17.79,181.0,5.17,69.0 +2020-10-24 11:00:00,10.74,113.0,0.0,113.0,5.45,64.1 +2020-10-24 12:00:00,11.13,254.0,124.1,202.0,5.52,59.6 +2020-10-24 13:00:00,11.15,304.0,576.41,106.0,5.1,57.4 +2020-10-24 14:00:00,10.67,125.0,94.08,103.0,4.62,57.3 +2020-10-24 15:00:00,9.84,69.0,389.67,31.0,4.28,61.55 +2020-10-24 16:00:00,8.97,0.0,-0.0,0.0,4.0,61.45 +2020-10-24 17:00:00,8.16,0.0,-0.0,0.0,3.79,63.6 +2020-10-24 18:00:00,7.31,0.0,-0.0,0.0,3.72,68.4 +2020-10-24 19:00:00,6.76,0.0,-0.0,0.0,3.24,71.05 +2020-10-24 20:00:00,6.13,0.0,-0.0,0.0,3.31,73.7 +2020-10-24 21:00:00,5.86,0.0,-0.0,0.0,3.24,73.7 +2020-10-24 22:00:00,5.37,0.0,-0.0,0.0,3.03,76.5 +2020-10-24 23:00:00,4.83,0.0,-0.0,0.0,2.9,79.35 +2020-10-25 00:00:00,4.59,0.0,-0.0,0.0,2.83,82.35 +2020-10-25 01:00:00,4.38,0.0,-0.0,0.0,2.62,82.35 +2020-10-25 02:00:00,4.0,0.0,-0.0,0.0,2.55,85.5 +2020-10-25 03:00:00,3.62,0.0,-0.0,0.0,2.41,88.75 +2020-10-25 04:00:00,3.38,0.0,-0.0,0.0,2.28,88.75 +2020-10-25 05:00:00,3.1,0.0,-0.0,0.0,2.07,92.15 +2020-10-25 06:00:00,2.91,15.0,18.62,14.0,1.93,88.7 +2020-10-25 07:00:00,3.98,88.0,46.07,79.0,2.14,92.2 +2020-10-25 08:00:00,4.81,194.0,150.33,147.0,3.31,88.85 +2020-10-25 09:00:00,5.45,106.0,0.0,106.0,3.45,79.4 +2020-10-25 10:00:00,5.94,215.0,38.26,198.0,3.59,76.55 +2020-10-25 11:00:00,6.41,211.0,31.12,197.0,3.45,71.05 +2020-10-25 12:00:00,6.36,212.0,53.19,190.0,3.72,71.05 +2020-10-25 13:00:00,6.58,182.0,76.9,156.0,3.86,71.05 +2020-10-25 14:00:00,6.59,147.0,196.9,102.0,3.24,68.4 +2020-10-25 15:00:00,6.44,45.0,97.45,36.0,2.83,68.4 +2020-10-25 16:00:00,5.96,0.0,-0.0,0.0,2.55,68.3 +2020-10-25 17:00:00,5.36,0.0,-0.0,0.0,2.41,70.85 +2020-10-25 18:00:00,4.67,0.0,-0.0,0.0,2.34,76.3 +2020-10-25 19:00:00,4.17,0.0,-0.0,0.0,2.14,82.3 +2020-10-25 20:00:00,3.63,0.0,-0.0,0.0,2.48,82.25 +2020-10-25 21:00:00,3.27,0.0,-0.0,0.0,2.69,88.7 +2020-10-25 22:00:00,3.08,0.0,-0.0,0.0,2.83,88.7 +2020-10-25 23:00:00,3.02,0.0,-0.0,0.0,3.03,88.7 +2020-10-26 00:00:00,3.28,0.0,-0.0,0.0,3.38,88.7 +2020-10-26 01:00:00,3.4,0.0,-0.0,0.0,3.72,85.45 +2020-10-26 02:00:00,3.48,0.0,-0.0,0.0,3.79,85.45 +2020-10-26 03:00:00,3.65,0.0,-0.0,0.0,3.72,85.45 +2020-10-26 04:00:00,3.25,0.0,-0.0,0.0,3.52,88.7 +2020-10-26 05:00:00,3.15,0.0,-0.0,0.0,3.31,88.7 +2020-10-26 06:00:00,3.3,22.0,122.3,16.0,3.17,88.7 +2020-10-26 07:00:00,3.7,116.0,178.48,82.0,2.9,88.75 +2020-10-26 08:00:00,4.5,132.0,26.01,124.0,3.03,85.55 +2020-10-26 09:00:00,5.15,125.0,2.55,124.0,4.28,82.4 +2020-10-26 10:00:00,5.78,227.0,54.66,203.0,4.34,79.4 +2020-10-26 11:00:00,6.16,102.0,0.0,102.0,4.14,73.7 +2020-10-26 12:00:00,6.59,57.0,0.0,57.0,3.86,68.4 +2020-10-26 13:00:00,6.89,223.0,198.33,157.0,3.79,68.4 +2020-10-26 14:00:00,6.76,163.0,322.42,91.0,3.59,65.85 +2020-10-26 15:00:00,6.26,61.0,378.14,28.0,2.41,68.3 +2020-10-26 16:00:00,5.14,0.0,-0.0,0.0,1.79,73.55 +2020-10-26 17:00:00,3.62,0.0,-0.0,0.0,1.72,82.25 +2020-10-26 18:00:00,2.08,0.0,-0.0,0.0,1.72,88.65 +2020-10-26 19:00:00,1.96,0.0,-0.0,0.0,1.66,92.1 +2020-10-26 20:00:00,1.51,0.0,-0.0,0.0,1.59,95.65 +2020-10-26 21:00:00,1.16,0.0,-0.0,0.0,1.59,99.35 +2020-10-26 22:00:00,0.53,0.0,-0.0,0.0,1.66,99.4 +2020-10-26 23:00:00,-0.43,0.0,-0.0,0.0,1.72,99.4 +2020-10-27 00:00:00,-1.05,0.0,-0.0,0.0,1.79,99.4 +2020-10-27 01:00:00,-1.56,0.0,-0.0,0.0,1.79,99.4 +2020-10-27 02:00:00,-1.55,0.0,-0.0,0.0,1.79,99.4 +2020-10-27 03:00:00,-1.41,0.0,-0.0,0.0,1.93,99.4 +2020-10-27 04:00:00,-0.9,0.0,-0.0,0.0,1.93,99.4 +2020-10-27 05:00:00,-0.54,0.0,-0.0,0.0,2.0,99.4 +2020-10-27 06:00:00,-0.15,19.0,112.56,14.0,2.21,99.4 +2020-10-27 07:00:00,1.0,147.0,484.78,57.0,2.41,99.35 +2020-10-27 08:00:00,2.48,267.0,591.61,88.0,2.28,92.15 +2020-10-27 09:00:00,4.47,353.0,619.85,113.0,2.41,85.55 +2020-10-27 10:00:00,6.37,401.0,627.07,129.0,2.97,76.65 +2020-10-27 11:00:00,7.4,405.0,619.44,133.0,2.97,76.7 +2020-10-27 12:00:00,8.25,371.0,630.49,117.0,3.03,73.95 +2020-10-27 13:00:00,8.68,243.0,308.4,142.0,2.97,68.7 +2020-10-27 14:00:00,8.57,110.0,77.93,93.0,2.83,68.7 +2020-10-27 15:00:00,7.9,33.0,48.63,29.0,2.48,71.25 +2020-10-27 16:00:00,6.87,0.0,-0.0,0.0,2.55,76.65 +2020-10-27 17:00:00,6.34,0.0,-0.0,0.0,2.62,76.55 +2020-10-27 18:00:00,6.23,0.0,-0.0,0.0,2.55,76.55 +2020-10-27 19:00:00,6.41,0.0,-0.0,0.0,2.55,73.8 +2020-10-27 20:00:00,6.33,0.0,-0.0,0.0,2.69,73.7 +2020-10-27 21:00:00,6.63,0.0,-0.0,0.0,2.76,71.05 +2020-10-27 22:00:00,6.62,0.0,-0.0,0.0,2.76,68.4 +2020-10-27 23:00:00,7.0,0.0,-0.0,0.0,2.83,68.4 +2020-10-28 00:00:00,7.11,0.0,-0.0,0.0,2.69,68.4 +2020-10-28 01:00:00,7.05,0.0,-0.0,0.0,2.62,68.4 +2020-10-28 02:00:00,7.02,0.0,-0.0,0.0,2.55,65.85 +2020-10-28 03:00:00,7.04,0.0,-0.0,0.0,2.55,65.85 +2020-10-28 04:00:00,7.03,0.0,-0.0,0.0,2.48,65.85 +2020-10-28 05:00:00,7.03,0.0,-0.0,0.0,2.55,65.85 +2020-10-28 06:00:00,7.09,10.0,0.0,10.0,2.62,65.85 +2020-10-28 07:00:00,7.16,62.0,11.06,60.0,2.83,73.8 +2020-10-28 08:00:00,8.11,179.0,144.52,136.0,2.76,71.25 +2020-10-28 09:00:00,9.07,73.0,0.0,73.0,2.9,66.25 +2020-10-28 10:00:00,10.24,360.0,455.07,165.0,2.97,61.65 +2020-10-28 11:00:00,11.15,163.0,6.92,160.0,2.97,55.3 +2020-10-28 12:00:00,11.59,395.0,794.83,79.0,2.83,53.35 +2020-10-28 13:00:00,11.89,248.0,363.05,131.0,2.9,55.4 +2020-10-28 14:00:00,11.86,164.0,413.07,76.0,2.76,57.55 +2020-10-28 15:00:00,11.19,31.0,64.67,26.0,2.69,59.6 +2020-10-28 16:00:00,9.83,0.0,-0.0,0.0,2.55,63.9 +2020-10-28 17:00:00,8.85,0.0,-0.0,0.0,2.69,66.15 +2020-10-28 18:00:00,8.21,0.0,-0.0,0.0,2.69,68.6 +2020-10-28 19:00:00,8.08,0.0,-0.0,0.0,2.55,68.6 +2020-10-28 20:00:00,7.42,0.0,-0.0,0.0,2.62,68.5 +2020-10-28 21:00:00,6.67,0.0,-0.0,0.0,2.62,71.05 +2020-10-28 22:00:00,6.24,0.0,-0.0,0.0,2.48,73.7 +2020-10-28 23:00:00,5.52,0.0,-0.0,0.0,2.48,76.5 +2020-10-29 00:00:00,4.83,0.0,-0.0,0.0,2.28,79.35 +2020-10-29 01:00:00,4.03,0.0,-0.0,0.0,2.14,85.5 +2020-10-29 02:00:00,3.19,0.0,-0.0,0.0,2.07,88.7 +2020-10-29 03:00:00,2.36,0.0,-0.0,0.0,2.0,88.65 +2020-10-29 04:00:00,1.76,0.0,-0.0,0.0,2.0,95.65 +2020-10-29 05:00:00,1.44,0.0,-0.0,0.0,2.07,95.65 +2020-10-29 06:00:00,1.24,10.0,0.0,10.0,2.14,99.35 +2020-10-29 07:00:00,2.6,142.0,511.38,52.0,2.21,92.15 +2020-10-29 08:00:00,5.87,257.0,594.78,83.0,2.07,82.55 +2020-10-29 09:00:00,8.79,346.0,636.79,106.0,2.28,76.85 +2020-10-29 10:00:00,10.55,395.0,654.42,118.0,2.83,69.1 +2020-10-29 11:00:00,11.78,369.0,492.37,158.0,3.03,64.35 +2020-10-29 12:00:00,12.5,348.0,560.75,128.0,3.03,59.95 +2020-10-29 13:00:00,12.72,273.0,532.94,104.0,2.97,59.95 +2020-10-29 14:00:00,12.3,148.0,307.68,84.0,2.76,64.45 +2020-10-29 15:00:00,10.95,35.0,110.41,27.0,3.03,69.15 +2020-10-29 16:00:00,9.41,0.0,-0.0,0.0,3.38,74.2 +2020-10-29 17:00:00,8.67,0.0,-0.0,0.0,3.52,76.85 +2020-10-29 18:00:00,8.29,0.0,-0.0,0.0,3.66,79.7 +2020-10-29 19:00:00,8.03,0.0,-0.0,0.0,3.93,71.25 +2020-10-29 20:00:00,7.66,0.0,-0.0,0.0,4.21,73.9 +2020-10-29 21:00:00,7.18,0.0,-0.0,0.0,4.41,76.65 +2020-10-29 22:00:00,6.63,0.0,-0.0,0.0,4.48,73.8 +2020-10-29 23:00:00,6.1,0.0,-0.0,0.0,4.48,76.55 +2020-10-30 00:00:00,5.63,0.0,-0.0,0.0,4.55,79.4 +2020-10-30 01:00:00,5.36,0.0,-0.0,0.0,4.62,76.5 +2020-10-30 02:00:00,5.06,0.0,-0.0,0.0,4.69,79.35 +2020-10-30 03:00:00,4.82,0.0,-0.0,0.0,4.76,79.3 +2020-10-30 04:00:00,4.58,0.0,-0.0,0.0,4.76,76.3 +2020-10-30 05:00:00,4.34,0.0,-0.0,0.0,4.83,76.3 +2020-10-30 06:00:00,4.12,7.0,0.0,7.0,4.83,76.25 +2020-10-30 07:00:00,4.21,121.0,327.12,65.0,4.55,79.2 +2020-10-30 08:00:00,5.42,251.0,573.78,86.0,4.34,73.65 +2020-10-30 09:00:00,7.17,330.0,562.17,121.0,4.41,73.8 +2020-10-30 10:00:00,8.95,375.0,566.88,138.0,4.28,68.8 +2020-10-30 11:00:00,10.45,386.0,592.93,135.0,4.07,66.55 +2020-10-30 12:00:00,11.54,360.0,650.92,108.0,3.79,64.35 +2020-10-30 13:00:00,12.08,281.0,621.77,87.0,3.52,64.45 +2020-10-30 14:00:00,11.82,176.0,595.92,55.0,3.1,66.75 +2020-10-30 15:00:00,10.64,44.0,339.85,21.0,2.83,69.1 +2020-10-30 16:00:00,8.94,0.0,-0.0,0.0,2.76,76.95 +2020-10-30 17:00:00,7.83,0.0,-0.0,0.0,2.83,82.65 +2020-10-30 18:00:00,6.98,0.0,-0.0,0.0,2.97,85.75 +2020-10-30 19:00:00,6.47,0.0,-0.0,0.0,3.24,76.65 +2020-10-30 20:00:00,6.05,0.0,-0.0,0.0,3.24,79.5 +2020-10-30 21:00:00,5.75,0.0,-0.0,0.0,3.17,82.5 +2020-10-30 22:00:00,5.47,0.0,-0.0,0.0,3.17,79.4 +2020-10-30 23:00:00,5.24,0.0,-0.0,0.0,3.24,82.4 +2020-10-31 00:00:00,5.02,0.0,-0.0,0.0,3.17,82.4 +2020-10-31 01:00:00,4.66,0.0,-0.0,0.0,3.17,82.35 +2020-10-31 02:00:00,4.56,0.0,-0.0,0.0,3.17,82.35 +2020-10-31 03:00:00,4.16,0.0,-0.0,0.0,3.24,82.3 +2020-10-31 04:00:00,3.99,0.0,-0.0,0.0,3.31,82.3 +2020-10-31 05:00:00,3.9,0.0,-0.0,0.0,3.31,85.5 +2020-10-31 06:00:00,3.74,5.0,0.0,5.0,3.45,88.75 +2020-10-31 07:00:00,4.21,62.0,24.04,58.0,3.38,88.8 +2020-10-31 08:00:00,5.03,75.0,0.0,75.0,3.93,85.6 +2020-10-31 09:00:00,6.04,99.0,0.0,99.0,4.07,82.55 +2020-10-31 10:00:00,7.43,76.0,0.0,76.0,3.93,79.65 +2020-10-31 11:00:00,8.7,285.0,208.06,198.0,3.66,76.85 +2020-10-31 12:00:00,10.09,345.0,609.93,112.0,3.45,74.3 +2020-10-31 13:00:00,10.73,196.0,182.42,140.0,3.31,71.7 +2020-10-31 14:00:00,10.83,93.0,60.56,81.0,3.03,74.4 +2020-10-31 15:00:00,10.06,20.0,15.88,19.0,3.03,77.1 +2020-10-31 16:00:00,8.22,0.0,-0.0,0.0,3.5,86.67 +2020-10-31 17:00:00,8.18,0.0,-0.0,0.0,3.53,86.64 +2020-10-31 18:00:00,8.13,0.0,-0.0,0.0,3.56,86.62 +2020-10-31 19:00:00,8.08,0.0,-0.0,0.0,3.59,86.59 +2020-10-31 20:00:00,8.03,0.0,-0.0,0.0,3.62,86.56 +2020-10-31 21:00:00,7.98,0.0,-0.0,0.0,3.65,86.53 +2020-10-31 22:00:00,7.94,0.0,-0.0,0.0,3.69,86.5 +2020-10-31 23:00:00,7.89,0.0,-0.0,0.0,3.72,86.48 +2020-11-01 00:00:00,7.84,0.0,-0.0,0.0,3.75,86.45 +2020-11-01 01:00:00,7.79,0.0,-0.0,0.0,3.78,86.42 +2020-11-01 02:00:00,7.75,0.0,-0.0,0.0,3.81,86.39 +2020-11-01 03:00:00,7.7,0.0,-0.0,0.0,3.84,86.36 +2020-11-01 04:00:00,7.65,0.0,-0.0,0.0,3.87,86.34 +2020-11-01 05:00:00,7.6,0.0,-0.0,0.0,3.9,86.31 +2020-11-01 06:00:00,7.55,0.0,0.0,0.0,3.94,86.28 +2020-11-01 07:00:00,7.51,78.0,80.43,65.0,3.97,86.25 +2020-11-01 08:00:00,8.35,40.0,0.0,40.0,2.69,82.75 +2020-11-01 09:00:00,9.47,46.0,0.0,46.0,2.48,82.9 +2020-11-01 10:00:00,10.15,76.0,0.0,76.0,2.14,82.95 +2020-11-01 11:00:00,10.1,79.0,0.0,79.0,1.66,86.0 +2020-11-01 12:00:00,10.17,74.0,0.0,74.0,1.24,92.5 +2020-11-01 13:00:00,10.18,24.0,0.0,24.0,1.1,92.5 +2020-11-01 14:00:00,10.26,71.0,15.52,68.0,1.17,89.25 +2020-11-01 15:00:00,10.18,25.0,85.66,20.0,1.38,92.5 +2020-11-01 16:00:00,9.79,0.0,-0.0,0.0,1.38,89.2 +2020-11-01 17:00:00,9.36,0.0,-0.0,0.0,1.31,89.15 +2020-11-01 18:00:00,8.99,0.0,-0.0,0.0,1.31,92.45 +2020-11-01 19:00:00,7.97,0.0,-0.0,0.0,1.24,95.8 +2020-11-01 20:00:00,7.94,0.0,-0.0,0.0,1.31,95.8 +2020-11-01 21:00:00,8.18,0.0,-0.0,0.0,1.52,95.8 +2020-11-01 22:00:00,8.19,0.0,-0.0,0.0,1.52,92.4 +2020-11-01 23:00:00,8.05,0.0,-0.0,0.0,1.52,92.4 +2020-11-02 00:00:00,7.74,0.0,-0.0,0.0,1.38,89.05 +2020-11-02 01:00:00,8.06,0.0,-0.0,0.0,0.69,89.05 +2020-11-02 02:00:00,7.51,0.0,-0.0,0.0,0.9,89.0 +2020-11-02 03:00:00,7.08,0.0,-0.0,0.0,1.1,92.35 +2020-11-02 04:00:00,7.04,0.0,-0.0,0.0,1.24,92.35 +2020-11-02 05:00:00,6.99,0.0,-0.0,0.0,1.52,92.35 +2020-11-02 06:00:00,6.93,0.0,0.0,0.0,1.52,92.35 +2020-11-02 07:00:00,7.53,19.0,0.0,19.0,1.24,95.8 +2020-11-02 08:00:00,8.48,70.0,0.0,70.0,1.24,92.4 +2020-11-02 09:00:00,8.96,149.0,19.63,142.0,2.69,89.1 +2020-11-02 10:00:00,9.58,263.0,176.3,192.0,3.1,82.9 +2020-11-02 11:00:00,10.09,278.0,205.91,194.0,2.9,77.1 +2020-11-02 12:00:00,10.56,212.0,99.48,175.0,2.83,71.7 +2020-11-02 13:00:00,10.77,43.0,0.0,43.0,2.41,66.65 +2020-11-02 14:00:00,10.7,15.0,0.0,15.0,1.86,71.7 +2020-11-02 15:00:00,10.07,3.0,0.0,3.0,1.24,74.3 +2020-11-02 16:00:00,8.55,0.0,-0.0,0.0,1.17,85.9 +2020-11-02 17:00:00,7.21,0.0,-0.0,0.0,1.45,85.8 +2020-11-02 18:00:00,5.84,0.0,-0.0,0.0,1.72,88.9 +2020-11-02 19:00:00,5.02,0.0,-0.0,0.0,1.72,95.75 +2020-11-02 20:00:00,4.81,0.0,-0.0,0.0,1.86,95.75 +2020-11-02 21:00:00,4.96,0.0,-0.0,0.0,1.86,95.75 +2020-11-02 22:00:00,4.83,0.0,-0.0,0.0,2.14,92.25 +2020-11-02 23:00:00,5.35,0.0,-0.0,0.0,2.34,92.25 +2020-11-03 00:00:00,6.14,0.0,-0.0,0.0,2.34,88.9 +2020-11-03 01:00:00,5.67,0.0,-0.0,0.0,1.93,92.25 +2020-11-03 02:00:00,5.17,0.0,-0.0,0.0,1.72,95.75 +2020-11-03 03:00:00,5.41,0.0,-0.0,0.0,1.52,92.25 +2020-11-03 04:00:00,5.82,0.0,-0.0,0.0,1.52,92.3 +2020-11-03 05:00:00,5.92,0.0,-0.0,0.0,1.72,92.3 +2020-11-03 06:00:00,5.76,0.0,0.0,0.0,1.72,92.3 +2020-11-03 07:00:00,4.55,34.0,0.0,34.0,1.66,99.4 +2020-11-03 08:00:00,5.97,192.0,287.42,115.0,1.59,92.3 +2020-11-03 09:00:00,7.05,331.0,699.57,85.0,2.07,89.0 +2020-11-03 10:00:00,8.22,351.0,538.09,137.0,2.41,79.75 +2020-11-03 11:00:00,8.96,381.0,680.01,107.0,2.34,74.15 +2020-11-03 12:00:00,9.4,341.0,659.42,99.0,2.21,71.5 +2020-11-03 13:00:00,9.78,270.0,680.79,71.0,2.28,69.0 +2020-11-03 14:00:00,9.69,101.0,119.57,79.0,2.21,74.2 +2020-11-03 15:00:00,9.14,10.0,0.0,10.0,2.21,76.95 +2020-11-03 16:00:00,8.2,0.0,-0.0,0.0,2.28,82.7 +2020-11-03 17:00:00,7.95,0.0,-0.0,0.0,2.62,82.7 +2020-11-03 18:00:00,7.8,0.0,-0.0,0.0,2.34,85.85 +2020-11-03 19:00:00,8.01,0.0,-0.0,0.0,2.48,85.85 +2020-11-03 20:00:00,7.82,0.0,-0.0,0.0,2.48,85.85 +2020-11-03 21:00:00,7.39,0.0,-0.0,0.0,1.86,89.0 +2020-11-03 22:00:00,6.7,0.0,-0.0,0.0,1.72,92.35 +2020-11-03 23:00:00,6.54,0.0,-0.0,0.0,1.66,95.8 +2020-11-04 00:00:00,6.27,0.0,-0.0,0.0,1.72,95.8 +2020-11-04 01:00:00,6.11,0.0,-0.0,0.0,1.31,99.4 +2020-11-04 02:00:00,5.89,0.0,-0.0,0.0,1.24,95.75 +2020-11-04 03:00:00,5.82,0.0,-0.0,0.0,1.1,95.75 +2020-11-04 04:00:00,5.69,0.0,-0.0,0.0,1.03,95.75 +2020-11-04 05:00:00,6.15,0.0,-0.0,0.0,0.69,99.4 +2020-11-04 06:00:00,6.58,0.0,0.0,0.0,0.48,95.8 +2020-11-04 07:00:00,6.24,34.0,0.0,34.0,1.38,99.4 +2020-11-04 08:00:00,6.3,20.0,0.0,20.0,1.66,99.4 +2020-11-04 09:00:00,6.31,33.0,0.0,33.0,2.34,99.4 +2020-11-04 10:00:00,6.15,53.0,0.0,53.0,3.31,99.4 +2020-11-04 11:00:00,6.03,34.0,0.0,34.0,4.83,99.4 +2020-11-04 12:00:00,5.78,99.0,0.0,99.0,4.9,95.75 +2020-11-04 13:00:00,5.51,80.0,0.0,80.0,3.93,99.4 +2020-11-04 14:00:00,5.11,76.0,39.01,69.0,3.93,99.4 +2020-11-04 15:00:00,4.84,15.0,44.39,13.0,6.41,95.75 +2020-11-04 16:00:00,4.85,0.0,-0.0,0.0,6.0,95.75 +2020-11-04 17:00:00,5.26,0.0,-0.0,0.0,6.21,92.25 +2020-11-04 18:00:00,5.62,0.0,-0.0,0.0,6.28,95.75 +2020-11-04 19:00:00,5.76,0.0,-0.0,0.0,6.21,92.3 +2020-11-04 20:00:00,5.92,0.0,-0.0,0.0,5.93,92.3 +2020-11-04 21:00:00,5.92,0.0,-0.0,0.0,5.52,92.3 +2020-11-04 22:00:00,5.89,0.0,-0.0,0.0,5.31,88.9 +2020-11-04 23:00:00,6.0,0.0,-0.0,0.0,5.1,88.9 +2020-11-05 00:00:00,6.03,0.0,-0.0,0.0,4.83,88.9 +2020-11-05 01:00:00,6.44,0.0,-0.0,0.0,4.62,85.7 +2020-11-05 02:00:00,6.51,0.0,-0.0,0.0,4.55,85.7 +2020-11-05 03:00:00,6.51,0.0,-0.0,0.0,4.34,85.7 +2020-11-05 04:00:00,6.49,0.0,-0.0,0.0,4.14,85.7 +2020-11-05 05:00:00,6.46,0.0,-0.0,0.0,4.0,88.95 +2020-11-05 06:00:00,6.32,0.0,0.0,0.0,3.72,88.95 +2020-11-05 07:00:00,6.37,19.0,0.0,19.0,3.66,92.3 +2020-11-05 08:00:00,6.33,41.0,0.0,41.0,3.45,92.3 +2020-11-05 09:00:00,6.3,86.0,0.0,86.0,3.31,95.8 +2020-11-05 10:00:00,6.58,71.0,0.0,71.0,2.69,95.8 +2020-11-05 11:00:00,6.92,100.0,0.0,100.0,2.83,95.8 +2020-11-05 12:00:00,7.34,120.0,2.8,119.0,2.9,92.35 +2020-11-05 13:00:00,7.52,115.0,21.21,109.0,3.1,92.35 +2020-11-05 14:00:00,7.43,72.0,34.3,66.0,3.1,92.35 +2020-11-05 15:00:00,7.23,6.0,0.0,6.0,3.1,89.0 +2020-11-05 16:00:00,6.96,0.0,-0.0,0.0,3.24,89.0 +2020-11-05 17:00:00,6.65,0.0,-0.0,0.0,3.31,88.95 +2020-11-05 18:00:00,6.56,0.0,-0.0,0.0,3.45,88.95 +2020-11-05 19:00:00,6.37,0.0,-0.0,0.0,2.97,88.95 +2020-11-05 20:00:00,5.77,0.0,-0.0,0.0,2.9,92.3 +2020-11-05 21:00:00,5.45,0.0,-0.0,0.0,2.97,92.25 +2020-11-05 22:00:00,5.29,0.0,-0.0,0.0,3.1,92.25 +2020-11-05 23:00:00,5.17,0.0,-0.0,0.0,3.1,95.75 +2020-11-06 00:00:00,5.04,0.0,-0.0,0.0,3.17,95.75 +2020-11-06 01:00:00,4.96,0.0,-0.0,0.0,3.17,95.75 +2020-11-06 02:00:00,4.88,0.0,-0.0,0.0,3.1,95.75 +2020-11-06 03:00:00,4.72,0.0,-0.0,0.0,3.03,92.25 +2020-11-06 04:00:00,4.45,0.0,-0.0,0.0,3.1,95.75 +2020-11-06 05:00:00,4.39,0.0,-0.0,0.0,3.1,95.75 +2020-11-06 06:00:00,4.5,0.0,-0.0,0.0,3.17,95.75 +2020-11-06 07:00:00,5.28,97.0,347.36,49.0,2.9,92.25 +2020-11-06 08:00:00,6.44,212.0,512.87,82.0,2.76,88.95 +2020-11-06 09:00:00,7.66,275.0,421.43,133.0,3.45,85.8 +2020-11-06 10:00:00,8.68,340.0,548.43,130.0,3.93,76.85 +2020-11-06 11:00:00,9.39,298.0,321.97,173.0,3.86,68.9 +2020-11-06 12:00:00,9.87,278.0,368.73,148.0,4.0,66.45 +2020-11-06 13:00:00,10.0,245.0,585.67,82.0,3.79,66.45 +2020-11-06 14:00:00,9.65,122.0,334.21,65.0,3.17,68.9 +2020-11-06 15:00:00,8.69,9.0,0.0,9.0,2.62,74.05 +2020-11-06 16:00:00,7.55,0.0,-0.0,0.0,2.69,76.7 +2020-11-06 17:00:00,6.65,0.0,-0.0,0.0,2.69,82.55 +2020-11-06 18:00:00,6.01,0.0,-0.0,0.0,2.9,82.5 +2020-11-06 19:00:00,5.83,0.0,-0.0,0.0,3.24,82.5 +2020-11-06 20:00:00,5.42,0.0,-0.0,0.0,3.17,82.4 +2020-11-06 21:00:00,5.16,0.0,-0.0,0.0,3.38,82.35 +2020-11-06 22:00:00,4.83,0.0,-0.0,0.0,3.45,82.35 +2020-11-06 23:00:00,4.62,0.0,-0.0,0.0,3.52,85.5 +2020-11-07 00:00:00,4.6,0.0,-0.0,0.0,3.59,82.3 +2020-11-07 01:00:00,4.43,0.0,-0.0,0.0,3.45,82.3 +2020-11-07 02:00:00,4.76,0.0,-0.0,0.0,3.59,76.3 +2020-11-07 03:00:00,5.06,0.0,-0.0,0.0,3.93,76.3 +2020-11-07 04:00:00,5.28,0.0,-0.0,0.0,4.07,73.55 +2020-11-07 05:00:00,5.08,0.0,-0.0,0.0,4.48,73.45 +2020-11-07 06:00:00,5.12,0.0,-0.0,0.0,4.62,76.3 +2020-11-07 07:00:00,5.66,67.0,97.32,54.0,4.9,82.4 +2020-11-07 08:00:00,6.2,56.0,0.0,56.0,5.1,79.5 +2020-11-07 09:00:00,6.48,58.0,0.0,58.0,5.31,82.55 +2020-11-07 10:00:00,6.57,310.0,425.83,149.0,5.38,82.55 +2020-11-07 11:00:00,7.11,62.0,0.0,62.0,4.41,82.6 +2020-11-07 12:00:00,7.37,76.0,0.0,76.0,5.24,79.65 +2020-11-07 13:00:00,7.71,22.0,0.0,22.0,5.1,79.7 +2020-11-07 14:00:00,7.64,13.0,0.0,13.0,5.1,82.65 +2020-11-07 15:00:00,7.55,2.0,0.0,2.0,4.62,82.65 +2020-11-07 16:00:00,7.45,0.0,-0.0,0.0,4.41,82.65 +2020-11-07 17:00:00,7.36,0.0,-0.0,0.0,4.48,82.65 +2020-11-07 18:00:00,7.13,0.0,-0.0,0.0,4.48,85.75 +2020-11-07 19:00:00,6.59,0.0,-0.0,0.0,4.34,88.95 +2020-11-07 20:00:00,6.24,0.0,-0.0,0.0,4.34,88.95 +2020-11-07 21:00:00,6.08,0.0,-0.0,0.0,4.41,88.9 +2020-11-07 22:00:00,6.19,0.0,-0.0,0.0,4.34,85.7 +2020-11-07 23:00:00,6.47,0.0,-0.0,0.0,4.41,88.95 +2020-11-08 00:00:00,6.51,0.0,-0.0,0.0,4.41,88.95 +2020-11-08 01:00:00,6.75,0.0,-0.0,0.0,4.41,85.75 +2020-11-08 02:00:00,6.66,0.0,-0.0,0.0,4.48,88.95 +2020-11-08 03:00:00,6.55,0.0,-0.0,0.0,4.48,88.95 +2020-11-08 04:00:00,6.42,0.0,-0.0,0.0,4.28,85.7 +2020-11-08 05:00:00,6.32,0.0,-0.0,0.0,4.0,85.7 +2020-11-08 06:00:00,6.36,0.0,-0.0,0.0,3.79,85.7 +2020-11-08 07:00:00,6.45,74.0,170.51,52.0,3.93,88.95 +2020-11-08 08:00:00,6.55,114.0,49.17,102.0,3.59,92.3 +2020-11-08 09:00:00,6.78,90.0,0.0,90.0,3.79,89.0 +2020-11-08 10:00:00,7.31,121.0,2.68,120.0,4.41,85.8 +2020-11-08 11:00:00,7.82,172.0,29.04,161.0,4.0,82.7 +2020-11-08 12:00:00,8.35,192.0,93.21,160.0,3.59,79.75 +2020-11-08 13:00:00,8.64,157.0,126.21,123.0,3.24,79.75 +2020-11-08 14:00:00,8.77,37.0,0.0,37.0,3.1,76.95 +2020-11-08 15:00:00,8.36,3.0,0.0,3.0,2.83,76.85 +2020-11-08 16:00:00,7.94,0.0,-0.0,0.0,2.55,79.7 +2020-11-08 17:00:00,7.7,0.0,-0.0,0.0,2.28,82.65 +2020-11-08 18:00:00,7.4,0.0,-0.0,0.0,2.14,85.8 +2020-11-08 19:00:00,6.67,0.0,-0.0,0.0,2.28,85.7 +2020-11-08 20:00:00,6.69,0.0,-0.0,0.0,2.34,82.6 +2020-11-08 21:00:00,6.69,0.0,-0.0,0.0,2.41,85.7 +2020-11-08 22:00:00,6.67,0.0,-0.0,0.0,2.34,85.7 +2020-11-08 23:00:00,6.71,0.0,-0.0,0.0,2.21,82.6 +2020-11-09 00:00:00,6.65,0.0,-0.0,0.0,2.0,85.7 +2020-11-09 01:00:00,6.4,0.0,-0.0,0.0,1.86,85.7 +2020-11-09 02:00:00,6.56,0.0,-0.0,0.0,1.79,85.7 +2020-11-09 03:00:00,6.59,0.0,-0.0,0.0,1.66,85.7 +2020-11-09 04:00:00,6.62,0.0,-0.0,0.0,1.45,85.7 +2020-11-09 05:00:00,6.59,0.0,-0.0,0.0,1.38,88.95 +2020-11-09 06:00:00,6.44,0.0,-0.0,0.0,1.31,88.95 +2020-11-09 07:00:00,4.71,82.0,289.17,46.0,1.31,95.75 +2020-11-09 08:00:00,6.37,187.0,413.54,88.0,0.83,88.95 +2020-11-09 09:00:00,7.99,264.0,440.09,122.0,0.83,79.7 +2020-11-09 10:00:00,8.77,357.0,746.05,82.0,0.76,74.15 +2020-11-09 11:00:00,9.67,353.0,689.64,95.0,1.86,71.5 +2020-11-09 12:00:00,9.93,307.0,613.96,99.0,2.41,66.45 +2020-11-09 13:00:00,9.98,227.0,550.85,81.0,2.55,66.45 +2020-11-09 14:00:00,9.58,110.0,316.61,60.0,2.07,68.9 +2020-11-09 15:00:00,8.74,5.0,0.0,5.0,2.07,71.4 +2020-11-09 16:00:00,7.58,0.0,-0.0,0.0,2.41,79.65 +2020-11-09 17:00:00,6.92,0.0,-0.0,0.0,2.76,82.6 +2020-11-09 18:00:00,6.37,0.0,-0.0,0.0,2.97,82.55 +2020-11-09 19:00:00,5.81,0.0,-0.0,0.0,2.55,85.65 +2020-11-09 20:00:00,5.17,0.0,-0.0,0.0,2.69,88.85 +2020-11-09 21:00:00,4.86,0.0,-0.0,0.0,2.97,85.55 +2020-11-09 22:00:00,4.79,0.0,-0.0,0.0,3.24,82.35 +2020-11-09 23:00:00,4.56,0.0,-0.0,0.0,3.03,85.5 +2020-11-10 00:00:00,4.15,0.0,-0.0,0.0,2.9,88.75 +2020-11-10 01:00:00,3.42,0.0,-0.0,0.0,2.83,85.45 +2020-11-10 02:00:00,3.11,0.0,-0.0,0.0,2.69,88.7 +2020-11-10 03:00:00,2.75,0.0,-0.0,0.0,2.83,88.7 +2020-11-10 04:00:00,2.57,0.0,-0.0,0.0,3.1,88.65 +2020-11-10 05:00:00,2.45,0.0,-0.0,0.0,3.1,88.65 +2020-11-10 06:00:00,2.29,0.0,-0.0,0.0,2.97,88.65 +2020-11-10 07:00:00,2.16,59.0,91.66,48.0,2.76,92.15 +2020-11-10 08:00:00,3.27,129.0,102.22,105.0,2.41,88.75 +2020-11-10 09:00:00,4.9,238.0,314.46,138.0,2.69,88.85 +2020-11-10 10:00:00,6.23,330.0,596.22,113.0,2.55,82.55 +2020-11-10 11:00:00,7.32,261.0,235.43,174.0,2.28,76.7 +2020-11-10 12:00:00,7.78,235.0,242.26,154.0,2.0,73.95 +2020-11-10 13:00:00,8.08,201.0,375.77,103.0,2.0,76.8 +2020-11-10 14:00:00,7.9,96.0,214.43,63.0,1.72,76.8 +2020-11-10 15:00:00,7.05,0.0,0.0,0.0,1.79,82.6 +2020-11-10 16:00:00,5.7,0.0,-0.0,0.0,2.14,85.65 +2020-11-10 17:00:00,4.85,0.0,-0.0,0.0,2.21,88.85 +2020-11-10 18:00:00,4.28,0.0,-0.0,0.0,2.14,92.2 +2020-11-10 19:00:00,3.85,0.0,-0.0,0.0,2.14,92.15 +2020-11-10 20:00:00,3.64,0.0,-0.0,0.0,2.07,92.15 +2020-11-10 21:00:00,3.79,0.0,-0.0,0.0,1.93,92.15 +2020-11-10 22:00:00,3.38,0.0,-0.0,0.0,1.72,92.15 +2020-11-10 23:00:00,2.57,0.0,-0.0,0.0,1.79,95.7 +2020-11-11 00:00:00,2.02,0.0,-0.0,0.0,1.79,95.65 +2020-11-11 01:00:00,1.61,0.0,-0.0,0.0,1.79,95.65 +2020-11-11 02:00:00,1.25,0.0,-0.0,0.0,1.79,95.65 +2020-11-11 03:00:00,0.97,0.0,-0.0,0.0,1.72,99.35 +2020-11-11 04:00:00,0.88,0.0,-0.0,0.0,1.86,99.35 +2020-11-11 05:00:00,1.09,0.0,-0.0,0.0,2.0,99.35 +2020-11-11 06:00:00,1.16,0.0,-0.0,0.0,1.86,95.65 +2020-11-11 07:00:00,1.88,75.0,294.22,41.0,1.79,95.65 +2020-11-11 08:00:00,3.14,184.0,451.71,80.0,1.72,95.7 +2020-11-11 09:00:00,4.53,252.0,421.19,120.0,1.66,92.2 +2020-11-11 10:00:00,5.49,303.0,475.83,132.0,1.45,88.85 +2020-11-11 11:00:00,6.81,341.0,679.38,93.0,1.24,82.6 +2020-11-11 12:00:00,7.77,303.0,651.53,88.0,1.24,76.8 +2020-11-11 13:00:00,8.39,219.0,553.31,77.0,1.17,74.05 +2020-11-11 14:00:00,8.31,107.0,360.08,53.0,0.76,74.05 +2020-11-11 15:00:00,7.46,0.0,0.0,0.0,0.62,82.65 +2020-11-11 16:00:00,6.16,0.0,-0.0,0.0,0.9,88.9 +2020-11-11 17:00:00,4.93,0.0,-0.0,0.0,1.03,92.25 +2020-11-11 18:00:00,4.8,0.0,-0.0,0.0,0.97,92.25 +2020-11-11 19:00:00,0.88,0.0,-0.0,0.0,1.72,95.65 +2020-11-11 20:00:00,0.2,0.0,-0.0,0.0,1.59,95.65 +2020-11-11 21:00:00,0.3,0.0,-0.0,0.0,1.24,99.4 +2020-11-11 22:00:00,0.36,0.0,-0.0,0.0,1.24,99.4 +2020-11-11 23:00:00,0.37,0.0,-0.0,0.0,1.31,99.4 +2020-11-12 00:00:00,0.35,0.0,-0.0,0.0,1.45,99.4 +2020-11-12 01:00:00,0.33,0.0,-0.0,0.0,1.31,99.4 +2020-11-12 02:00:00,0.45,0.0,-0.0,0.0,1.31,99.4 +2020-11-12 03:00:00,0.53,0.0,-0.0,0.0,1.38,99.4 +2020-11-12 04:00:00,0.68,0.0,-0.0,0.0,1.45,95.65 +2020-11-12 05:00:00,0.73,0.0,-0.0,0.0,1.59,95.65 +2020-11-12 06:00:00,0.71,0.0,-0.0,0.0,1.79,95.65 +2020-11-12 07:00:00,0.56,54.0,98.96,43.0,2.0,99.4 +2020-11-12 08:00:00,1.02,177.0,420.86,82.0,2.07,99.35 +2020-11-12 09:00:00,2.15,259.0,482.43,110.0,2.14,95.65 +2020-11-12 10:00:00,3.73,337.0,707.32,86.0,2.41,92.15 +2020-11-12 11:00:00,5.21,342.0,698.81,90.0,2.83,82.4 +2020-11-12 12:00:00,6.22,297.0,632.44,91.0,2.9,79.5 +2020-11-12 13:00:00,6.61,221.0,593.9,71.0,2.97,79.5 +2020-11-12 14:00:00,6.44,102.0,328.47,54.0,2.9,79.5 +2020-11-12 15:00:00,5.38,0.0,0.0,0.0,2.83,85.6 +2020-11-12 16:00:00,4.37,0.0,-0.0,0.0,2.9,92.2 +2020-11-12 17:00:00,3.84,0.0,-0.0,0.0,2.9,92.15 +2020-11-12 18:00:00,3.63,0.0,-0.0,0.0,2.76,92.15 +2020-11-12 19:00:00,3.19,0.0,-0.0,0.0,2.76,88.75 +2020-11-12 20:00:00,3.17,0.0,-0.0,0.0,2.76,88.75 +2020-11-12 21:00:00,3.27,0.0,-0.0,0.0,2.97,88.75 +2020-11-12 22:00:00,3.19,0.0,-0.0,0.0,3.24,88.75 +2020-11-12 23:00:00,3.22,0.0,-0.0,0.0,3.45,88.75 +2020-11-13 00:00:00,3.3,0.0,-0.0,0.0,3.38,88.75 +2020-11-13 01:00:00,3.78,0.0,-0.0,0.0,3.31,92.15 +2020-11-13 02:00:00,3.69,0.0,-0.0,0.0,3.03,92.15 +2020-11-13 03:00:00,3.57,0.0,-0.0,0.0,2.76,92.15 +2020-11-13 04:00:00,3.7,0.0,-0.0,0.0,2.76,95.7 +2020-11-13 05:00:00,3.71,0.0,-0.0,0.0,2.9,95.7 +2020-11-13 06:00:00,3.87,0.0,-0.0,0.0,3.03,95.7 +2020-11-13 07:00:00,3.87,39.0,28.09,36.0,2.69,88.75 +2020-11-13 08:00:00,4.28,98.0,40.67,89.0,2.28,88.8 +2020-11-13 09:00:00,5.24,233.0,364.69,122.0,2.21,85.6 +2020-11-13 10:00:00,6.23,146.0,19.98,139.0,1.79,82.55 +2020-11-13 11:00:00,7.34,260.0,286.3,158.0,1.72,79.65 +2020-11-13 12:00:00,8.21,167.0,71.53,144.0,1.86,74.05 +2020-11-13 13:00:00,8.33,73.0,0.0,73.0,1.86,74.05 +2020-11-13 14:00:00,8.01,21.0,0.0,21.0,1.72,79.7 +2020-11-13 15:00:00,7.28,0.0,0.0,0.0,1.79,79.65 +2020-11-13 16:00:00,6.34,0.0,-0.0,0.0,1.86,85.7 +2020-11-13 17:00:00,5.34,0.0,-0.0,0.0,1.86,92.25 +2020-11-13 18:00:00,4.85,0.0,-0.0,0.0,1.79,95.75 +2020-11-13 19:00:00,5.36,0.0,-0.0,0.0,1.59,92.25 +2020-11-13 20:00:00,4.92,0.0,-0.0,0.0,1.52,95.75 +2020-11-13 21:00:00,4.84,0.0,-0.0,0.0,1.45,95.75 +2020-11-13 22:00:00,4.63,0.0,-0.0,0.0,1.45,99.4 +2020-11-13 23:00:00,4.49,0.0,-0.0,0.0,1.38,99.4 +2020-11-14 00:00:00,4.51,0.0,-0.0,0.0,1.31,99.4 +2020-11-14 01:00:00,4.12,0.0,-0.0,0.0,1.24,99.4 +2020-11-14 02:00:00,3.96,0.0,-0.0,0.0,1.17,99.4 +2020-11-14 03:00:00,3.76,0.0,-0.0,0.0,1.24,99.4 +2020-11-14 04:00:00,3.92,0.0,-0.0,0.0,1.17,99.4 +2020-11-14 05:00:00,4.37,0.0,-0.0,0.0,1.03,95.75 +2020-11-14 06:00:00,4.32,0.0,-0.0,0.0,1.1,95.75 +2020-11-14 07:00:00,4.12,69.0,361.02,32.0,0.97,100.0 +2020-11-14 08:00:00,5.33,119.0,115.27,94.0,0.28,95.75 +2020-11-14 09:00:00,6.47,99.0,3.33,98.0,0.41,92.3 +2020-11-14 10:00:00,7.24,154.0,28.9,144.0,0.69,85.8 +2020-11-14 11:00:00,7.78,157.0,31.25,146.0,1.17,85.85 +2020-11-14 12:00:00,7.81,56.0,0.0,56.0,1.45,85.85 +2020-11-14 13:00:00,7.79,57.0,0.0,57.0,1.72,85.85 +2020-11-14 14:00:00,7.6,40.0,7.21,39.0,1.86,89.0 +2020-11-14 15:00:00,7.17,0.0,0.0,0.0,1.72,92.35 +2020-11-14 16:00:00,6.74,0.0,-0.0,0.0,1.79,92.35 +2020-11-14 17:00:00,6.41,0.0,-0.0,0.0,2.0,95.8 +2020-11-14 18:00:00,6.14,0.0,-0.0,0.0,2.14,99.4 +2020-11-14 19:00:00,5.51,0.0,-0.0,0.0,2.69,95.75 +2020-11-14 20:00:00,5.21,0.0,-0.0,0.0,2.34,92.25 +2020-11-14 21:00:00,4.97,0.0,-0.0,0.0,2.07,95.75 +2020-11-14 22:00:00,4.74,0.0,-0.0,0.0,1.93,95.75 +2020-11-14 23:00:00,4.46,0.0,-0.0,0.0,2.21,99.4 +2020-11-15 00:00:00,4.05,0.0,-0.0,0.0,2.69,99.4 +2020-11-15 01:00:00,3.49,0.0,-0.0,0.0,2.76,95.7 +2020-11-15 02:00:00,3.16,0.0,-0.0,0.0,2.48,99.4 +2020-11-15 03:00:00,2.99,0.0,-0.0,0.0,2.28,95.7 +2020-11-15 04:00:00,2.85,0.0,-0.0,0.0,2.28,95.7 +2020-11-15 05:00:00,2.76,0.0,-0.0,0.0,2.41,92.15 +2020-11-15 06:00:00,2.67,0.0,-0.0,0.0,2.55,92.15 +2020-11-15 07:00:00,3.15,25.0,0.0,25.0,2.48,92.15 +2020-11-15 08:00:00,3.49,58.0,0.0,58.0,2.41,88.75 +2020-11-15 09:00:00,3.93,49.0,0.0,49.0,2.28,92.15 +2020-11-15 10:00:00,4.7,62.0,0.0,62.0,2.21,85.55 +2020-11-15 11:00:00,5.43,71.0,0.0,71.0,2.14,82.4 +2020-11-15 12:00:00,6.16,61.0,0.0,61.0,2.07,82.5 +2020-11-15 13:00:00,6.66,66.0,0.0,66.0,2.0,79.5 +2020-11-15 14:00:00,6.72,37.0,0.0,37.0,1.93,76.65 +2020-11-15 15:00:00,6.01,0.0,0.0,0.0,1.86,82.5 +2020-11-15 16:00:00,5.02,0.0,-0.0,0.0,2.0,88.85 +2020-11-15 17:00:00,4.26,0.0,-0.0,0.0,2.14,92.2 +2020-11-15 18:00:00,3.69,0.0,-0.0,0.0,2.14,92.15 +2020-11-15 19:00:00,3.31,0.0,-0.0,0.0,1.93,92.15 +2020-11-15 20:00:00,2.91,0.0,-0.0,0.0,1.93,95.7 +2020-11-15 21:00:00,2.64,0.0,-0.0,0.0,2.07,95.7 +2020-11-15 22:00:00,2.47,0.0,-0.0,0.0,2.14,95.7 +2020-11-15 23:00:00,2.76,0.0,-0.0,0.0,2.14,92.15 +2020-11-16 00:00:00,3.14,0.0,-0.0,0.0,2.14,95.7 +2020-11-16 01:00:00,3.59,0.0,-0.0,0.0,2.07,95.7 +2020-11-16 02:00:00,3.97,0.0,-0.0,0.0,2.0,99.4 +2020-11-16 03:00:00,4.32,0.0,-0.0,0.0,1.93,99.4 +2020-11-16 04:00:00,4.63,0.0,-0.0,0.0,2.07,99.4 +2020-11-16 05:00:00,4.93,0.0,-0.0,0.0,2.34,95.75 +2020-11-16 06:00:00,4.98,0.0,-0.0,0.0,2.69,95.75 +2020-11-16 07:00:00,5.21,41.0,74.45,34.0,3.03,92.25 +2020-11-16 08:00:00,5.7,114.0,110.44,91.0,3.17,88.9 +2020-11-16 09:00:00,6.56,214.0,302.1,126.0,4.55,85.7 +2020-11-16 10:00:00,7.09,245.0,275.55,152.0,4.21,82.6 +2020-11-16 11:00:00,7.53,252.0,282.21,155.0,3.45,79.65 +2020-11-16 12:00:00,7.86,237.0,348.93,129.0,2.97,76.8 +2020-11-16 13:00:00,8.13,135.0,118.02,107.0,2.62,76.8 +2020-11-16 14:00:00,8.09,57.0,53.11,50.0,1.86,76.8 +2020-11-16 15:00:00,7.7,0.0,0.0,0.0,1.72,76.8 +2020-11-16 16:00:00,7.17,0.0,-0.0,0.0,1.79,82.6 +2020-11-16 17:00:00,6.76,0.0,-0.0,0.0,1.86,82.6 +2020-11-16 18:00:00,6.49,0.0,-0.0,0.0,1.86,85.7 +2020-11-16 19:00:00,6.07,0.0,-0.0,0.0,1.24,88.9 +2020-11-16 20:00:00,5.7,0.0,-0.0,0.0,1.38,88.9 +2020-11-16 21:00:00,5.26,0.0,-0.0,0.0,1.31,92.25 +2020-11-16 22:00:00,4.48,0.0,-0.0,0.0,1.31,95.75 +2020-11-16 23:00:00,4.2,0.0,-0.0,0.0,1.17,95.75 +2020-11-17 00:00:00,3.79,0.0,-0.0,0.0,1.17,99.4 +2020-11-17 01:00:00,3.74,0.0,-0.0,0.0,1.03,99.4 +2020-11-17 02:00:00,3.72,0.0,-0.0,0.0,0.9,99.4 +2020-11-17 03:00:00,3.91,0.0,-0.0,0.0,0.69,100.0 +2020-11-17 04:00:00,3.86,0.0,-0.0,0.0,0.76,99.4 +2020-11-17 05:00:00,3.91,0.0,-0.0,0.0,0.69,100.0 +2020-11-17 06:00:00,3.98,0.0,-0.0,0.0,0.76,100.0 +2020-11-17 07:00:00,4.44,8.0,0.0,8.0,1.1,99.4 +2020-11-17 08:00:00,4.92,82.0,24.51,77.0,1.79,95.75 +2020-11-17 09:00:00,5.26,98.0,6.97,96.0,2.14,88.85 +2020-11-17 10:00:00,5.4,30.0,0.0,30.0,2.28,88.85 +2020-11-17 11:00:00,5.57,57.0,0.0,57.0,2.97,85.6 +2020-11-17 12:00:00,5.72,37.0,0.0,37.0,3.1,82.5 +2020-11-17 13:00:00,5.81,18.0,0.0,18.0,3.17,79.4 +2020-11-17 14:00:00,5.6,13.0,0.0,13.0,2.83,82.4 +2020-11-17 15:00:00,5.44,0.0,-0.0,0.0,2.9,79.35 +2020-11-17 16:00:00,5.2,0.0,-0.0,0.0,2.83,79.35 +2020-11-17 17:00:00,4.95,0.0,-0.0,0.0,2.83,82.35 +2020-11-17 18:00:00,5.09,0.0,-0.0,0.0,2.9,85.55 +2020-11-17 19:00:00,5.29,0.0,-0.0,0.0,3.38,82.4 +2020-11-17 20:00:00,5.63,0.0,-0.0,0.0,3.79,85.6 +2020-11-17 21:00:00,5.64,0.0,-0.0,0.0,3.93,88.85 +2020-11-17 22:00:00,5.62,0.0,-0.0,0.0,4.0,88.85 +2020-11-17 23:00:00,5.47,0.0,-0.0,0.0,3.79,88.85 +2020-11-18 00:00:00,5.34,0.0,-0.0,0.0,3.72,88.85 +2020-11-18 01:00:00,5.64,0.0,-0.0,0.0,3.72,92.25 +2020-11-18 02:00:00,5.5,0.0,-0.0,0.0,3.79,92.25 +2020-11-18 03:00:00,5.38,0.0,-0.0,0.0,3.72,88.85 +2020-11-18 04:00:00,5.18,0.0,-0.0,0.0,3.72,88.85 +2020-11-18 05:00:00,4.82,0.0,-0.0,0.0,3.72,92.25 +2020-11-18 06:00:00,4.8,0.0,-0.0,0.0,3.79,92.25 +2020-11-18 07:00:00,5.51,47.0,174.87,32.0,3.59,92.25 +2020-11-18 08:00:00,6.13,166.0,545.35,57.0,3.72,88.9 +2020-11-18 09:00:00,7.05,92.0,3.53,91.0,3.59,89.0 +2020-11-18 10:00:00,7.57,77.0,0.0,77.0,3.86,85.8 +2020-11-18 11:00:00,7.64,140.0,20.85,133.0,3.79,85.8 +2020-11-18 12:00:00,8.38,189.0,162.26,140.0,5.59,76.85 +2020-11-18 13:00:00,8.62,103.0,39.1,94.0,4.76,76.85 +2020-11-18 14:00:00,8.26,69.0,151.67,50.0,4.07,76.85 +2020-11-18 15:00:00,7.49,0.0,-0.0,0.0,3.86,79.65 +2020-11-18 16:00:00,7.11,0.0,-0.0,0.0,3.93,79.55 +2020-11-18 17:00:00,6.92,0.0,-0.0,0.0,4.28,76.65 +2020-11-18 18:00:00,6.7,0.0,-0.0,0.0,4.48,76.65 +2020-11-18 19:00:00,6.37,0.0,-0.0,0.0,4.62,85.7 +2020-11-18 20:00:00,6.58,0.0,-0.0,0.0,4.83,85.7 +2020-11-18 21:00:00,6.79,0.0,-0.0,0.0,4.83,82.6 +2020-11-18 22:00:00,7.08,0.0,-0.0,0.0,5.1,82.6 +2020-11-18 23:00:00,7.39,0.0,-0.0,0.0,5.24,79.65 +2020-11-19 00:00:00,7.64,0.0,-0.0,0.0,5.38,82.65 +2020-11-19 01:00:00,7.2,0.0,-0.0,0.0,5.38,79.65 +2020-11-19 02:00:00,7.3,0.0,-0.0,0.0,5.24,82.65 +2020-11-19 03:00:00,7.47,0.0,-0.0,0.0,5.17,85.8 +2020-11-19 04:00:00,7.7,0.0,-0.0,0.0,5.1,89.0 +2020-11-19 05:00:00,7.87,0.0,-0.0,0.0,5.03,85.85 +2020-11-19 06:00:00,7.97,0.0,-0.0,0.0,5.17,89.05 +2020-11-19 07:00:00,7.75,25.0,12.23,24.0,5.17,85.85 +2020-11-19 08:00:00,7.93,74.0,20.43,70.0,5.24,89.05 +2020-11-19 09:00:00,8.17,45.0,0.0,45.0,5.45,89.05 +2020-11-19 10:00:00,8.39,109.0,3.07,108.0,4.97,85.9 +2020-11-19 11:00:00,8.56,67.0,0.0,67.0,5.79,89.1 +2020-11-19 12:00:00,8.73,104.0,6.7,102.0,6.41,85.95 +2020-11-19 13:00:00,8.92,38.0,0.0,38.0,5.66,89.1 +2020-11-19 14:00:00,8.97,31.0,0.0,31.0,5.03,89.1 +2020-11-19 15:00:00,8.78,0.0,-0.0,0.0,4.83,89.1 +2020-11-19 16:00:00,8.58,0.0,-0.0,0.0,4.83,92.4 +2020-11-19 17:00:00,8.42,0.0,-0.0,0.0,4.83,92.4 +2020-11-19 18:00:00,8.31,0.0,-0.0,0.0,4.69,92.4 +2020-11-19 19:00:00,7.91,0.0,-0.0,0.0,4.69,92.4 +2020-11-19 20:00:00,7.8,0.0,-0.0,0.0,4.69,92.4 +2020-11-19 21:00:00,7.87,0.0,-0.0,0.0,4.62,92.4 +2020-11-19 22:00:00,7.92,0.0,-0.0,0.0,4.48,92.4 +2020-11-19 23:00:00,7.91,0.0,-0.0,0.0,4.34,92.4 +2020-11-20 00:00:00,8.01,0.0,-0.0,0.0,4.34,95.8 +2020-11-20 01:00:00,8.27,0.0,-0.0,0.0,4.21,92.4 +2020-11-20 02:00:00,8.31,0.0,-0.0,0.0,4.14,92.4 +2020-11-20 03:00:00,8.3,0.0,-0.0,0.0,4.07,92.4 +2020-11-20 04:00:00,8.35,0.0,-0.0,0.0,4.0,95.85 +2020-11-20 05:00:00,8.35,0.0,-0.0,0.0,3.86,95.85 +2020-11-20 06:00:00,8.33,0.0,-0.0,0.0,3.72,95.85 +2020-11-20 07:00:00,8.48,16.0,0.0,16.0,3.79,95.85 +2020-11-20 08:00:00,8.52,50.0,0.0,50.0,3.59,95.85 +2020-11-20 09:00:00,8.7,30.0,0.0,30.0,3.1,95.85 +2020-11-20 10:00:00,8.86,33.0,0.0,33.0,3.03,92.45 +2020-11-20 11:00:00,9.22,52.0,0.0,52.0,3.52,92.45 +2020-11-20 12:00:00,9.34,48.0,0.0,48.0,3.31,92.45 +2020-11-20 13:00:00,9.39,38.0,0.0,38.0,2.9,89.15 +2020-11-20 14:00:00,9.28,13.0,0.0,13.0,2.97,89.15 +2020-11-20 15:00:00,9.02,0.0,-0.0,0.0,2.9,89.1 +2020-11-20 16:00:00,8.6,0.0,-0.0,0.0,2.9,89.1 +2020-11-20 17:00:00,8.17,0.0,-0.0,0.0,2.97,89.05 +2020-11-20 18:00:00,7.62,0.0,-0.0,0.0,2.97,89.0 +2020-11-20 19:00:00,7.04,0.0,-0.0,0.0,2.76,89.0 +2020-11-20 20:00:00,6.74,0.0,-0.0,0.0,2.83,85.75 +2020-11-20 21:00:00,6.46,0.0,-0.0,0.0,2.83,88.95 +2020-11-20 22:00:00,6.03,0.0,-0.0,0.0,2.76,88.9 +2020-11-20 23:00:00,5.54,0.0,-0.0,0.0,2.62,92.25 +2020-11-21 00:00:00,5.6,0.0,-0.0,0.0,2.62,92.25 +2020-11-21 01:00:00,5.41,0.0,-0.0,0.0,2.55,88.85 +2020-11-21 02:00:00,5.29,0.0,-0.0,0.0,2.41,88.85 +2020-11-21 03:00:00,5.06,0.0,-0.0,0.0,2.34,92.25 +2020-11-21 04:00:00,4.84,0.0,-0.0,0.0,2.34,88.85 +2020-11-21 05:00:00,4.82,0.0,-0.0,0.0,2.28,88.85 +2020-11-21 06:00:00,4.54,0.0,-0.0,0.0,2.34,88.8 +2020-11-21 07:00:00,4.29,4.0,0.0,4.0,2.41,92.2 +2020-11-21 08:00:00,4.76,14.0,0.0,14.0,2.41,88.85 +2020-11-21 09:00:00,5.55,45.0,0.0,45.0,3.1,85.6 +2020-11-21 10:00:00,6.09,71.0,0.0,71.0,3.52,85.65 +2020-11-21 11:00:00,6.33,98.0,0.0,98.0,3.1,85.7 +2020-11-21 12:00:00,6.43,55.0,0.0,55.0,2.97,85.7 +2020-11-21 13:00:00,6.47,68.0,4.54,67.0,2.83,85.7 +2020-11-21 14:00:00,6.45,33.0,8.6,32.0,2.83,85.7 +2020-11-21 15:00:00,6.32,0.0,-0.0,0.0,2.0,85.7 +2020-11-21 16:00:00,6.19,0.0,-0.0,0.0,1.86,85.7 +2020-11-21 17:00:00,6.07,0.0,-0.0,0.0,1.93,88.9 +2020-11-21 18:00:00,5.94,0.0,-0.0,0.0,1.93,88.9 +2020-11-21 19:00:00,5.75,0.0,-0.0,0.0,1.59,92.3 +2020-11-21 20:00:00,5.71,0.0,-0.0,0.0,1.52,95.75 +2020-11-21 21:00:00,5.61,0.0,-0.0,0.0,1.66,99.4 +2020-11-21 22:00:00,5.51,0.0,-0.0,0.0,1.86,99.4 +2020-11-21 23:00:00,5.42,0.0,-0.0,0.0,1.66,99.4 +2020-11-22 00:00:00,5.3,0.0,-0.0,0.0,1.72,99.4 +2020-11-22 01:00:00,5.51,0.0,-0.0,0.0,1.72,99.4 +2020-11-22 02:00:00,5.27,0.0,-0.0,0.0,1.72,99.4 +2020-11-22 03:00:00,5.04,0.0,-0.0,0.0,1.52,99.4 +2020-11-22 04:00:00,4.83,0.0,-0.0,0.0,1.52,99.4 +2020-11-22 05:00:00,4.66,0.0,-0.0,0.0,1.59,99.4 +2020-11-22 06:00:00,4.54,0.0,-0.0,0.0,1.72,99.4 +2020-11-22 07:00:00,4.28,21.0,14.28,20.0,1.79,99.4 +2020-11-22 08:00:00,4.28,46.0,0.0,46.0,2.07,99.4 +2020-11-22 09:00:00,4.31,43.0,0.0,43.0,2.41,99.4 +2020-11-22 10:00:00,4.32,134.0,22.3,127.0,2.48,99.4 +2020-11-22 11:00:00,4.46,57.0,0.0,57.0,2.28,99.4 +2020-11-22 12:00:00,4.28,74.0,0.0,74.0,2.83,95.75 +2020-11-22 13:00:00,4.19,110.0,73.63,94.0,3.38,92.2 +2020-11-22 14:00:00,3.9,32.0,0.0,32.0,3.45,95.7 +2020-11-22 15:00:00,3.42,0.0,-0.0,0.0,2.76,92.15 +2020-11-22 16:00:00,2.85,0.0,-0.0,0.0,2.76,92.15 +2020-11-22 17:00:00,2.21,0.0,-0.0,0.0,2.69,92.15 +2020-11-22 18:00:00,1.57,0.0,-0.0,0.0,2.41,95.65 +2020-11-22 19:00:00,1.74,0.0,-0.0,0.0,2.28,92.1 +2020-11-22 20:00:00,1.34,0.0,-0.0,0.0,1.93,92.05 +2020-11-22 21:00:00,1.07,0.0,-0.0,0.0,1.79,92.05 +2020-11-22 22:00:00,0.65,0.0,-0.0,0.0,1.72,92.05 +2020-11-22 23:00:00,-0.2,0.0,-0.0,0.0,1.86,92.0 +2020-11-23 00:00:00,-0.75,0.0,-0.0,0.0,1.93,91.95 +2020-11-23 01:00:00,-1.18,0.0,-0.0,0.0,2.0,91.95 +2020-11-23 02:00:00,-1.5,0.0,-0.0,0.0,2.14,91.9 +2020-11-23 03:00:00,-1.58,0.0,-0.0,0.0,2.21,91.9 +2020-11-23 04:00:00,-1.6,0.0,-0.0,0.0,2.34,88.35 +2020-11-23 05:00:00,-1.39,0.0,-0.0,0.0,2.55,88.35 +2020-11-23 06:00:00,-1.1,0.0,-0.0,0.0,2.62,88.4 +2020-11-23 07:00:00,-0.38,32.0,135.88,23.0,2.83,88.45 +2020-11-23 08:00:00,0.79,113.0,199.86,77.0,2.83,85.2 +2020-11-23 09:00:00,2.01,66.0,0.0,66.0,2.83,85.3 +2020-11-23 10:00:00,2.8,57.0,0.0,57.0,3.03,85.4 +2020-11-23 11:00:00,3.27,181.0,100.77,149.0,3.17,85.45 +2020-11-23 12:00:00,3.48,99.0,7.02,97.0,3.17,88.75 +2020-11-23 13:00:00,3.7,69.0,4.67,68.0,2.97,88.75 +2020-11-23 14:00:00,3.69,48.0,54.14,42.0,2.83,92.15 +2020-11-23 15:00:00,3.55,0.0,-0.0,0.0,2.62,92.15 +2020-11-23 16:00:00,3.38,0.0,-0.0,0.0,2.41,92.15 +2020-11-23 17:00:00,3.43,0.0,-0.0,0.0,2.28,95.7 +2020-11-23 18:00:00,3.55,0.0,-0.0,0.0,2.21,95.7 +2020-11-23 19:00:00,3.4,0.0,-0.0,0.0,2.21,95.7 +2020-11-23 20:00:00,3.47,0.0,-0.0,0.0,2.21,95.7 +2020-11-23 21:00:00,3.55,0.0,-0.0,0.0,2.28,95.7 +2020-11-23 22:00:00,3.64,0.0,-0.0,0.0,2.34,99.4 +2020-11-23 23:00:00,3.71,0.0,-0.0,0.0,2.34,99.4 +2020-11-24 00:00:00,3.8,0.0,-0.0,0.0,2.28,99.4 +2020-11-24 01:00:00,3.79,0.0,-0.0,0.0,2.28,99.4 +2020-11-24 02:00:00,3.81,0.0,-0.0,0.0,2.21,99.4 +2020-11-24 03:00:00,3.88,0.0,-0.0,0.0,2.21,99.4 +2020-11-24 04:00:00,3.86,0.0,-0.0,0.0,2.21,99.4 +2020-11-24 05:00:00,3.87,0.0,-0.0,0.0,2.14,99.4 +2020-11-24 06:00:00,3.91,0.0,-0.0,0.0,1.93,99.4 +2020-11-24 07:00:00,4.0,19.0,15.99,18.0,1.45,100.0 +2020-11-24 08:00:00,4.28,76.0,39.68,69.0,1.38,99.4 +2020-11-24 09:00:00,4.53,75.0,0.0,75.0,1.38,99.4 +2020-11-24 10:00:00,4.84,102.0,3.26,101.0,1.17,95.75 +2020-11-24 11:00:00,5.32,60.0,0.0,60.0,0.97,92.25 +2020-11-24 12:00:00,5.38,37.0,0.0,37.0,0.69,92.25 +2020-11-24 13:00:00,5.54,33.0,0.0,33.0,0.14,88.85 +2020-11-24 14:00:00,5.49,14.0,0.0,14.0,0.55,88.85 +2020-11-24 15:00:00,4.97,0.0,-0.0,0.0,1.24,92.25 +2020-11-24 16:00:00,4.28,0.0,-0.0,0.0,1.45,92.2 +2020-11-24 17:00:00,3.6,0.0,-0.0,0.0,1.66,95.7 +2020-11-24 18:00:00,3.37,0.0,-0.0,0.0,1.72,92.15 +2020-11-24 19:00:00,3.67,0.0,-0.0,0.0,1.93,92.15 +2020-11-24 20:00:00,3.03,0.0,-0.0,0.0,2.07,88.7 +2020-11-24 21:00:00,2.97,0.0,-0.0,0.0,2.21,85.4 +2020-11-24 22:00:00,2.67,0.0,-0.0,0.0,2.28,85.4 +2020-11-24 23:00:00,1.88,0.0,-0.0,0.0,2.34,88.65 +2020-11-25 00:00:00,1.19,0.0,-0.0,0.0,2.34,88.6 +2020-11-25 01:00:00,0.71,0.0,-0.0,0.0,2.34,88.55 +2020-11-25 02:00:00,0.3,0.0,-0.0,0.0,2.34,92.0 +2020-11-25 03:00:00,-0.31,0.0,-0.0,0.0,2.41,92.0 +2020-11-25 04:00:00,-0.7,0.0,-0.0,0.0,2.48,91.95 +2020-11-25 05:00:00,-0.87,0.0,-0.0,0.0,2.48,91.95 +2020-11-25 06:00:00,-0.87,0.0,-0.0,0.0,2.34,91.95 +2020-11-25 07:00:00,0.24,20.0,33.96,18.0,1.86,92.0 +2020-11-25 08:00:00,0.84,69.0,23.15,65.0,1.86,88.55 +2020-11-25 09:00:00,1.52,68.0,0.0,68.0,2.07,85.25 +2020-11-25 10:00:00,2.01,60.0,0.0,60.0,2.07,78.95 +2020-11-25 11:00:00,2.47,54.0,0.0,54.0,2.14,76.0 +2020-11-25 12:00:00,2.99,36.0,0.0,36.0,2.14,73.2 +2020-11-25 13:00:00,3.12,29.0,0.0,29.0,2.14,70.4 +2020-11-25 14:00:00,3.08,29.0,0.0,29.0,2.28,70.4 +2020-11-25 15:00:00,2.68,0.0,-0.0,0.0,1.93,73.2 +2020-11-25 16:00:00,2.32,0.0,-0.0,0.0,2.21,76.0 +2020-11-25 17:00:00,1.94,0.0,-0.0,0.0,2.21,78.95 +2020-11-25 18:00:00,1.69,0.0,-0.0,0.0,2.28,78.95 +2020-11-25 19:00:00,1.25,0.0,-0.0,0.0,2.48,82.0 +2020-11-25 20:00:00,1.44,0.0,-0.0,0.0,2.41,82.0 +2020-11-25 21:00:00,1.35,0.0,-0.0,0.0,2.48,82.0 +2020-11-25 22:00:00,1.46,0.0,-0.0,0.0,2.48,82.0 +2020-11-25 23:00:00,1.79,0.0,-0.0,0.0,2.48,78.95 +2020-11-26 00:00:00,2.02,0.0,-0.0,0.0,2.41,82.05 +2020-11-26 01:00:00,2.43,0.0,-0.0,0.0,2.28,82.15 +2020-11-26 02:00:00,2.48,0.0,-0.0,0.0,2.28,82.15 +2020-11-26 03:00:00,2.61,0.0,-0.0,0.0,2.28,85.35 +2020-11-26 04:00:00,2.59,0.0,-0.0,0.0,2.14,85.35 +2020-11-26 05:00:00,2.22,0.0,-0.0,0.0,1.86,85.35 +2020-11-26 06:00:00,1.72,0.0,-0.0,0.0,1.86,88.65 +2020-11-26 07:00:00,1.8,24.0,90.37,19.0,2.0,88.65 +2020-11-26 08:00:00,1.93,42.0,0.0,42.0,1.86,85.3 +2020-11-26 09:00:00,2.28,67.0,0.0,67.0,1.93,82.15 +2020-11-26 10:00:00,2.62,95.0,3.33,94.0,1.86,82.15 +2020-11-26 11:00:00,3.05,106.0,3.25,105.0,1.72,76.1 +2020-11-26 12:00:00,3.42,84.0,0.0,84.0,1.66,73.3 +2020-11-26 13:00:00,3.88,41.0,0.0,41.0,1.72,70.5 +2020-11-26 14:00:00,3.79,10.0,0.0,10.0,1.24,70.5 +2020-11-26 15:00:00,3.32,0.0,-0.0,0.0,0.76,76.15 +2020-11-26 16:00:00,2.8,0.0,-0.0,0.0,1.1,79.1 +2020-11-26 17:00:00,2.15,0.0,-0.0,0.0,1.31,82.15 +2020-11-26 18:00:00,0.96,0.0,-0.0,0.0,1.59,92.05 +2020-11-26 19:00:00,0.53,0.0,-0.0,0.0,1.79,92.0 +2020-11-26 20:00:00,0.01,0.0,-0.0,0.0,1.93,92.0 +2020-11-26 21:00:00,-0.72,0.0,-0.0,0.0,2.0,88.45 +2020-11-26 22:00:00,-1.24,0.0,-0.0,0.0,2.0,88.4 +2020-11-26 23:00:00,-1.5,0.0,-0.0,0.0,2.07,88.35 +2020-11-27 00:00:00,-1.83,0.0,-0.0,0.0,2.14,88.35 +2020-11-27 01:00:00,-1.75,0.0,-0.0,0.0,2.21,88.35 +2020-11-27 02:00:00,-1.94,0.0,-0.0,0.0,2.41,88.3 +2020-11-27 03:00:00,-1.73,0.0,-0.0,0.0,2.62,88.35 +2020-11-27 04:00:00,-1.42,0.0,-0.0,0.0,2.83,88.35 +2020-11-27 05:00:00,-1.16,0.0,-0.0,0.0,2.97,88.4 +2020-11-27 06:00:00,-0.89,0.0,-0.0,0.0,3.03,88.4 +2020-11-27 07:00:00,-0.39,20.0,57.86,17.0,2.9,88.45 +2020-11-27 08:00:00,0.54,131.0,476.74,52.0,2.97,88.5 +2020-11-27 09:00:00,1.65,83.0,4.01,82.0,2.9,82.05 +2020-11-27 10:00:00,2.48,139.0,40.45,127.0,2.97,82.15 +2020-11-27 11:00:00,3.09,135.0,29.53,126.0,3.03,79.1 +2020-11-27 12:00:00,3.57,113.0,21.97,107.0,3.03,79.15 +2020-11-27 13:00:00,3.66,125.0,171.86,90.0,3.03,79.15 +2020-11-27 14:00:00,3.82,18.0,0.0,18.0,2.83,82.25 +2020-11-27 15:00:00,3.42,0.0,-0.0,0.0,2.69,79.15 +2020-11-27 16:00:00,3.4,0.0,-0.0,0.0,2.55,79.15 +2020-11-27 17:00:00,2.79,0.0,-0.0,0.0,2.34,82.2 +2020-11-27 18:00:00,1.86,0.0,-0.0,0.0,2.34,85.3 +2020-11-27 19:00:00,0.62,0.0,-0.0,0.0,2.07,95.65 +2020-11-27 20:00:00,-0.02,0.0,-0.0,0.0,2.14,92.0 +2020-11-27 21:00:00,-0.09,0.0,-0.0,0.0,1.93,92.0 +2020-11-27 22:00:00,-0.03,0.0,-0.0,0.0,1.72,92.0 +2020-11-27 23:00:00,-0.8,0.0,-0.0,0.0,1.72,91.95 +2020-11-28 00:00:00,-1.26,0.0,-0.0,0.0,1.72,91.95 +2020-11-28 01:00:00,-1.16,0.0,-0.0,0.0,1.66,91.95 +2020-11-28 02:00:00,-1.9,0.0,-0.0,0.0,1.79,95.55 +2020-11-28 03:00:00,-2.25,0.0,-0.0,0.0,1.86,91.85 +2020-11-28 04:00:00,-2.41,0.0,-0.0,0.0,1.86,91.85 +2020-11-28 05:00:00,-2.31,0.0,-0.0,0.0,1.66,88.3 +2020-11-28 06:00:00,-2.13,0.0,-0.0,0.0,1.52,88.3 +2020-11-28 07:00:00,-2.41,9.0,0.0,9.0,1.38,95.55 +2020-11-28 08:00:00,-0.67,58.0,12.32,56.0,0.69,95.6 +2020-11-28 09:00:00,0.46,76.0,4.07,75.0,0.55,95.65 +2020-11-28 10:00:00,1.35,87.0,0.0,87.0,0.21,88.6 +2020-11-28 11:00:00,1.73,112.0,6.63,110.0,0.21,85.3 +2020-11-28 12:00:00,1.96,129.0,44.38,117.0,0.41,85.3 +2020-11-28 13:00:00,1.98,96.0,54.65,85.0,0.76,85.3 +2020-11-28 14:00:00,1.67,49.0,110.88,38.0,0.76,88.65 +2020-11-28 15:00:00,0.76,0.0,-0.0,0.0,0.9,92.05 +2020-11-28 16:00:00,0.0,0.0,-0.0,0.0,1.1,95.6 +2020-11-28 17:00:00,-0.88,0.0,-0.0,0.0,1.38,95.6 +2020-11-28 18:00:00,-1.18,0.0,-0.0,0.0,1.45,91.95 +2020-11-28 19:00:00,-0.76,0.0,-0.0,0.0,1.38,95.6 +2020-11-28 20:00:00,-0.61,0.0,-0.0,0.0,1.52,95.6 +2020-11-28 21:00:00,-0.69,0.0,-0.0,0.0,1.52,95.6 +2020-11-28 22:00:00,-1.36,0.0,-0.0,0.0,1.45,91.95 +2020-11-28 23:00:00,-1.4,0.0,-0.0,0.0,1.31,95.6 +2020-11-29 00:00:00,-1.94,0.0,-0.0,0.0,1.52,95.55 +2020-11-29 01:00:00,-1.79,0.0,-0.0,0.0,1.72,91.9 +2020-11-29 02:00:00,-1.57,0.0,-0.0,0.0,2.07,95.6 +2020-11-29 03:00:00,-1.55,0.0,-0.0,0.0,2.21,95.6 +2020-11-29 04:00:00,-1.24,0.0,-0.0,0.0,2.07,95.6 +2020-11-29 05:00:00,-0.5,0.0,-0.0,0.0,3.1,95.6 +2020-11-29 06:00:00,-0.17,0.0,-0.0,0.0,2.9,92.0 +2020-11-29 07:00:00,-1.14,8.0,0.0,8.0,2.0,95.6 +2020-11-29 08:00:00,-0.26,59.0,18.87,56.0,3.59,92.0 +2020-11-29 09:00:00,0.39,56.0,0.0,56.0,3.66,92.0 +2020-11-29 10:00:00,1.08,83.0,0.0,83.0,4.0,92.05 +2020-11-29 11:00:00,1.28,94.0,0.0,94.0,4.48,88.6 +2020-11-29 12:00:00,1.53,74.0,0.0,74.0,4.62,82.0 +2020-11-29 13:00:00,1.62,58.0,0.0,58.0,4.48,82.0 +2020-11-29 14:00:00,1.37,48.0,113.14,37.0,3.52,85.25 +2020-11-29 15:00:00,1.03,0.0,-0.0,0.0,3.72,92.05 +2020-11-29 16:00:00,0.94,0.0,-0.0,0.0,4.0,92.05 +2020-11-29 17:00:00,0.79,0.0,-0.0,0.0,4.28,92.05 +2020-11-29 18:00:00,0.73,0.0,-0.0,0.0,4.41,92.05 +2020-11-29 19:00:00,0.31,0.0,-0.0,0.0,3.72,92.0 +2020-11-29 20:00:00,0.17,0.0,-0.0,0.0,3.79,88.5 +2020-11-29 21:00:00,0.32,0.0,-0.0,0.0,3.79,92.0 +2020-11-29 22:00:00,0.5,0.0,-0.0,0.0,4.07,95.65 +2020-11-29 23:00:00,0.66,0.0,-0.0,0.0,4.34,92.05 +2020-11-30 00:00:00,0.69,0.0,-0.0,0.0,4.48,88.55 +2020-11-30 01:00:00,0.43,0.0,-0.0,0.0,4.28,88.5 +2020-11-30 02:00:00,0.33,0.0,-0.0,0.0,4.28,85.15 +2020-11-30 03:00:00,0.15,0.0,-0.0,0.0,4.34,81.9 +2020-11-30 04:00:00,0.34,0.0,-0.0,0.0,4.28,78.75 +2020-11-30 05:00:00,0.69,0.0,-0.0,0.0,4.34,78.8 +2020-11-30 06:00:00,1.17,0.0,-0.0,0.0,4.69,82.0 +2020-11-30 07:00:00,1.5,5.0,0.0,5.0,5.31,85.25 +2020-11-30 08:00:00,1.71,44.0,0.0,44.0,5.93,78.95 +2020-11-30 09:00:00,2.06,56.0,0.0,56.0,6.83,78.95 +2020-11-30 10:00:00,2.59,64.0,0.0,64.0,7.45,79.0 +2020-11-30 11:00:00,2.72,69.0,0.0,69.0,7.72,79.1 +2020-11-30 12:00:00,2.71,85.0,3.77,84.0,7.31,82.2 +2020-11-30 13:00:00,2.67,46.0,0.0,46.0,6.83,88.7 +2020-11-30 14:00:00,2.75,6.0,0.0,6.0,6.76,92.15 +2020-11-30 15:00:00,3.32,0.0,-0.0,0.0,6.55,92.15 +2020-11-30 16:00:00,5.02,0.0,-0.0,0.0,7.33,95.8 +2020-11-30 17:00:00,4.61,0.0,-0.0,0.0,6.85,95.6 +2020-11-30 18:00:00,4.2,0.0,-0.0,0.0,6.37,95.41 +2020-11-30 19:00:00,3.79,0.0,-0.0,0.0,5.89,95.21 +2020-11-30 20:00:00,3.37,0.0,-0.0,0.0,5.4,95.02 +2020-11-30 21:00:00,2.96,0.0,-0.0,0.0,4.92,94.82 +2020-11-30 22:00:00,2.55,0.0,-0.0,0.0,4.44,94.62 +2020-11-30 23:00:00,2.14,0.0,-0.0,0.0,3.96,94.43 +2020-12-01 00:00:00,1.73,0.0,-0.0,0.0,3.47,94.23 +2020-12-01 01:00:00,1.31,0.0,-0.0,0.0,2.99,94.04 +2020-12-01 02:00:00,0.9,0.0,-0.0,0.0,2.51,93.84 +2020-12-01 03:00:00,0.49,0.0,-0.0,0.0,2.03,93.65 +2020-12-01 04:00:00,0.08,0.0,-0.0,0.0,1.54,93.45 +2020-12-01 05:00:00,-0.33,0.0,-0.0,0.0,1.06,93.25 +2020-12-01 06:00:00,-0.75,0.0,-0.0,0.0,0.58,93.06 +2020-12-01 07:00:00,-1.16,2.0,0.0,2.0,0.1,92.86 +2020-12-01 08:00:00,0.83,73.0,65.5,63.0,1.31,92.05 +2020-12-01 09:00:00,2.35,150.0,173.32,109.0,1.31,85.35 +2020-12-01 10:00:00,2.75,168.0,112.42,136.0,2.48,73.2 +2020-12-01 11:00:00,2.97,247.0,459.66,112.0,2.62,73.2 +2020-12-01 12:00:00,3.1,250.0,722.3,60.0,2.28,70.4 +2020-12-01 13:00:00,3.07,166.0,559.51,57.0,1.93,70.4 +2020-12-01 14:00:00,2.87,58.0,256.31,34.0,1.79,67.7 +2020-12-01 15:00:00,2.19,0.0,-0.0,0.0,1.52,75.95 +2020-12-01 16:00:00,1.25,0.0,-0.0,0.0,1.66,78.9 +2020-12-01 17:00:00,0.55,0.0,-0.0,0.0,1.59,85.15 +2020-12-01 18:00:00,0.22,0.0,-0.0,0.0,1.31,85.15 +2020-12-01 19:00:00,-0.13,0.0,-0.0,0.0,1.24,88.45 +2020-12-01 20:00:00,-0.17,0.0,-0.0,0.0,1.1,88.45 +2020-12-01 21:00:00,-0.04,0.0,-0.0,0.0,0.97,88.45 +2020-12-01 22:00:00,0.84,0.0,-0.0,0.0,0.69,81.95 +2020-12-01 23:00:00,0.75,0.0,-0.0,0.0,0.48,85.2 +2020-12-02 00:00:00,-0.35,0.0,-0.0,0.0,0.97,91.95 +2020-12-02 01:00:00,-0.53,0.0,-0.0,0.0,1.1,95.6 +2020-12-02 02:00:00,-0.71,0.0,-0.0,0.0,1.24,95.6 +2020-12-02 03:00:00,-0.68,0.0,-0.0,0.0,1.45,95.6 +2020-12-02 04:00:00,-0.26,0.0,-0.0,0.0,1.59,95.6 +2020-12-02 05:00:00,0.39,0.0,-0.0,0.0,2.34,95.65 +2020-12-02 06:00:00,0.42,0.0,-0.0,0.0,2.07,99.4 +2020-12-02 07:00:00,0.46,5.0,0.0,5.0,2.41,99.4 +2020-12-02 08:00:00,0.74,68.0,53.46,60.0,4.0,92.05 +2020-12-02 09:00:00,1.57,71.0,0.0,71.0,4.14,88.6 +2020-12-02 10:00:00,2.51,97.0,3.55,96.0,4.83,82.15 +2020-12-02 11:00:00,2.89,78.0,0.0,78.0,5.38,82.2 +2020-12-02 12:00:00,3.05,75.0,0.0,75.0,5.45,82.2 +2020-12-02 13:00:00,2.97,44.0,0.0,44.0,5.31,82.2 +2020-12-02 14:00:00,3.06,32.0,21.73,30.0,4.41,82.2 +2020-12-02 15:00:00,2.89,0.0,-0.0,0.0,3.86,85.4 +2020-12-02 16:00:00,2.77,0.0,-0.0,0.0,3.79,85.4 +2020-12-02 17:00:00,2.69,0.0,-0.0,0.0,4.14,88.65 +2020-12-02 18:00:00,2.6,0.0,-0.0,0.0,4.28,92.15 +2020-12-02 19:00:00,2.78,0.0,-0.0,0.0,5.24,92.15 +2020-12-02 20:00:00,2.23,0.0,-0.0,0.0,4.21,92.15 +2020-12-02 21:00:00,2.4,0.0,-0.0,0.0,4.34,88.65 +2020-12-02 22:00:00,2.37,0.0,-0.0,0.0,4.48,95.7 +2020-12-02 23:00:00,2.08,0.0,-0.0,0.0,4.76,95.65 +2020-12-03 00:00:00,1.67,0.0,-0.0,0.0,4.41,99.4 +2020-12-03 01:00:00,1.45,0.0,-0.0,0.0,4.21,95.65 +2020-12-03 02:00:00,1.17,0.0,-0.0,0.0,3.38,99.35 +2020-12-03 03:00:00,1.01,0.0,-0.0,0.0,2.9,99.35 +2020-12-03 04:00:00,1.01,0.0,-0.0,0.0,2.55,99.35 +2020-12-03 05:00:00,0.8,0.0,-0.0,0.0,2.07,95.65 +2020-12-03 06:00:00,0.48,0.0,-0.0,0.0,1.66,99.4 +2020-12-03 07:00:00,0.77,2.0,0.0,2.0,1.24,99.35 +2020-12-03 08:00:00,1.57,69.0,61.35,60.0,1.59,95.65 +2020-12-03 09:00:00,2.3,50.0,0.0,50.0,1.66,88.65 +2020-12-03 10:00:00,3.16,135.0,46.54,122.0,1.86,85.4 +2020-12-03 11:00:00,3.67,152.0,69.24,132.0,1.93,82.25 +2020-12-03 12:00:00,3.85,191.0,309.18,111.0,2.21,82.3 +2020-12-03 13:00:00,4.04,0.0,0.0,0.0,2.41,85.5 +2020-12-03 14:00:00,3.85,48.0,154.65,34.0,1.93,88.8 +2020-12-03 15:00:00,3.61,0.0,-0.0,0.0,1.93,95.7 +2020-12-03 16:00:00,3.51,0.0,-0.0,0.0,1.93,95.7 +2020-12-03 17:00:00,3.45,0.0,-0.0,0.0,2.07,95.7 +2020-12-03 18:00:00,3.43,0.0,-0.0,0.0,2.28,95.7 +2020-12-03 19:00:00,3.8,0.0,-0.0,0.0,2.41,95.75 +2020-12-03 20:00:00,3.71,0.0,-0.0,0.0,2.62,92.2 +2020-12-03 21:00:00,3.72,0.0,-0.0,0.0,2.55,92.2 +2020-12-03 22:00:00,3.71,0.0,-0.0,0.0,2.41,92.2 +2020-12-03 23:00:00,3.46,0.0,-0.0,0.0,2.34,92.15 +2020-12-04 00:00:00,3.44,0.0,-0.0,0.0,2.21,92.15 +2020-12-04 01:00:00,2.73,0.0,-0.0,0.0,2.0,88.7 +2020-12-04 02:00:00,2.29,0.0,-0.0,0.0,1.86,88.65 +2020-12-04 03:00:00,2.04,0.0,-0.0,0.0,1.72,88.65 +2020-12-04 04:00:00,1.67,0.0,-0.0,0.0,1.66,92.05 +2020-12-04 05:00:00,1.05,0.0,-0.0,0.0,1.72,92.05 +2020-12-04 06:00:00,0.97,0.0,-0.0,0.0,1.66,92.05 +2020-12-04 07:00:00,0.9,4.0,0.0,4.0,1.93,92.05 +2020-12-04 08:00:00,0.98,113.0,500.5,41.0,1.72,92.05 +2020-12-04 09:00:00,2.1,205.0,666.3,53.0,1.86,88.65 +2020-12-04 10:00:00,3.22,254.0,679.18,66.0,2.14,79.15 +2020-12-04 11:00:00,4.15,247.0,537.37,93.0,2.41,76.25 +2020-12-04 12:00:00,4.83,233.0,669.83,61.0,2.48,73.45 +2020-12-04 13:00:00,4.98,158.0,554.26,53.0,2.34,73.45 +2020-12-04 14:00:00,4.43,64.0,448.68,24.0,2.14,73.45 +2020-12-04 15:00:00,3.19,0.0,-0.0,0.0,2.55,82.2 +2020-12-04 16:00:00,2.26,0.0,-0.0,0.0,2.9,82.15 +2020-12-04 17:00:00,1.81,0.0,-0.0,0.0,3.03,85.3 +2020-12-04 18:00:00,1.59,0.0,-0.0,0.0,3.17,85.25 +2020-12-04 19:00:00,1.28,0.0,-0.0,0.0,3.24,82.0 +2020-12-04 20:00:00,1.09,0.0,-0.0,0.0,3.31,81.95 +2020-12-04 21:00:00,0.67,0.0,-0.0,0.0,3.31,81.9 +2020-12-04 22:00:00,0.35,0.0,-0.0,0.0,3.31,78.75 +2020-12-04 23:00:00,0.08,0.0,-0.0,0.0,3.31,81.8 +2020-12-05 00:00:00,-0.17,0.0,-0.0,0.0,3.45,78.65 +2020-12-05 01:00:00,-0.48,0.0,-0.0,0.0,3.59,81.75 +2020-12-05 02:00:00,-0.54,0.0,-0.0,0.0,3.72,81.75 +2020-12-05 03:00:00,-0.55,0.0,-0.0,0.0,3.79,81.75 +2020-12-05 04:00:00,-0.72,0.0,-0.0,0.0,3.79,81.75 +2020-12-05 05:00:00,-0.92,0.0,-0.0,0.0,3.72,81.7 +2020-12-05 06:00:00,-1.07,0.0,-0.0,0.0,3.59,81.7 +2020-12-05 07:00:00,-1.18,3.0,0.0,3.0,3.66,81.7 +2020-12-05 08:00:00,-0.81,110.0,474.79,43.0,3.66,81.75 +2020-12-05 09:00:00,0.14,207.0,682.85,53.0,3.52,81.8 +2020-12-05 10:00:00,1.33,259.0,714.26,63.0,3.38,75.85 +2020-12-05 11:00:00,2.42,270.0,724.23,64.0,3.1,73.1 +2020-12-05 12:00:00,3.11,243.0,745.3,53.0,2.9,70.4 +2020-12-05 13:00:00,3.23,162.0,606.72,48.0,2.55,70.5 +2020-12-05 14:00:00,2.53,62.0,432.32,24.0,2.48,76.0 +2020-12-05 15:00:00,1.24,0.0,-0.0,0.0,2.69,78.9 +2020-12-05 16:00:00,0.5,0.0,-0.0,0.0,2.69,81.9 +2020-12-05 17:00:00,0.23,0.0,-0.0,0.0,2.83,81.9 +2020-12-05 18:00:00,0.1,0.0,-0.0,0.0,2.97,81.8 +2020-12-05 19:00:00,-1.02,0.0,-0.0,0.0,3.1,81.7 +2020-12-05 20:00:00,-1.09,0.0,-0.0,0.0,2.97,78.5 +2020-12-05 21:00:00,-1.22,0.0,-0.0,0.0,2.76,78.5 +2020-12-05 22:00:00,-1.19,0.0,-0.0,0.0,2.76,75.45 +2020-12-05 23:00:00,-1.05,0.0,-0.0,0.0,2.83,75.45 +2020-12-06 00:00:00,-1.0,0.0,-0.0,0.0,2.83,75.45 +2020-12-06 01:00:00,-0.8,0.0,-0.0,0.0,2.83,72.6 +2020-12-06 02:00:00,-0.47,0.0,-0.0,0.0,2.9,72.6 +2020-12-06 03:00:00,-0.44,0.0,-0.0,0.0,3.03,69.75 +2020-12-06 04:00:00,-0.03,0.0,-0.0,0.0,3.31,64.45 +2020-12-06 05:00:00,0.38,0.0,-0.0,0.0,3.52,59.6 +2020-12-06 06:00:00,0.57,0.0,-0.0,0.0,3.45,57.2 +2020-12-06 07:00:00,0.06,3.0,0.0,3.0,3.72,61.9 +2020-12-06 08:00:00,0.64,109.0,476.61,43.0,3.72,62.0 +2020-12-06 09:00:00,2.09,203.0,663.57,55.0,3.72,62.35 +2020-12-06 10:00:00,3.56,223.0,433.61,105.0,3.72,60.3 +2020-12-06 11:00:00,4.7,216.0,332.83,122.0,3.79,60.5 +2020-12-06 12:00:00,5.32,217.0,521.32,85.0,3.86,58.3 +2020-12-06 13:00:00,5.27,148.0,445.11,65.0,3.72,58.3 +2020-12-06 14:00:00,4.56,30.0,23.05,28.0,3.72,58.2 +2020-12-06 15:00:00,3.66,0.0,-0.0,0.0,4.0,62.7 +2020-12-06 16:00:00,3.29,0.0,-0.0,0.0,4.34,65.2 +2020-12-06 17:00:00,3.29,0.0,-0.0,0.0,4.62,67.8 +2020-12-06 18:00:00,3.56,0.0,-0.0,0.0,4.69,73.3 +2020-12-06 19:00:00,3.11,0.0,-0.0,0.0,4.76,70.4 +2020-12-06 20:00:00,3.65,0.0,-0.0,0.0,4.9,73.3 +2020-12-06 21:00:00,4.32,0.0,-0.0,0.0,5.03,70.7 +2020-12-06 22:00:00,4.73,0.0,-0.0,0.0,5.03,76.3 +2020-12-06 23:00:00,5.04,0.0,-0.0,0.0,5.17,82.35 +2020-12-07 00:00:00,5.12,0.0,-0.0,0.0,5.31,88.85 +2020-12-07 01:00:00,5.73,0.0,-0.0,0.0,5.45,85.65 +2020-12-07 02:00:00,5.59,0.0,-0.0,0.0,5.52,88.85 +2020-12-07 03:00:00,5.57,0.0,-0.0,0.0,5.45,88.85 +2020-12-07 04:00:00,5.79,0.0,-0.0,0.0,5.52,85.65 +2020-12-07 05:00:00,6.11,0.0,-0.0,0.0,5.59,85.65 +2020-12-07 06:00:00,6.38,0.0,-0.0,0.0,5.86,85.7 +2020-12-07 07:00:00,7.05,0.0,0.0,0.0,6.07,85.75 +2020-12-07 08:00:00,7.23,61.0,58.85,53.0,6.0,85.75 +2020-12-07 09:00:00,7.39,65.0,0.0,65.0,5.72,85.8 +2020-12-07 10:00:00,7.59,94.0,7.41,92.0,6.0,85.8 +2020-12-07 11:00:00,7.81,39.0,0.0,39.0,5.93,82.7 +2020-12-07 12:00:00,7.82,33.0,0.0,33.0,6.0,82.7 +2020-12-07 13:00:00,7.67,77.0,32.4,71.0,5.38,85.8 +2020-12-07 14:00:00,7.27,15.0,0.0,15.0,4.97,82.65 +2020-12-07 15:00:00,7.0,0.0,-0.0,0.0,4.83,85.75 +2020-12-07 16:00:00,6.8,0.0,-0.0,0.0,4.69,85.75 +2020-12-07 17:00:00,6.73,0.0,-0.0,0.0,4.62,85.7 +2020-12-07 18:00:00,6.7,0.0,-0.0,0.0,4.62,85.7 +2020-12-07 19:00:00,6.72,0.0,-0.0,0.0,4.62,88.95 +2020-12-07 20:00:00,6.66,0.0,-0.0,0.0,4.55,85.7 +2020-12-07 21:00:00,6.58,0.0,-0.0,0.0,4.41,85.7 +2020-12-07 22:00:00,6.46,0.0,-0.0,0.0,4.28,85.7 +2020-12-07 23:00:00,6.31,0.0,-0.0,0.0,4.21,82.55 +2020-12-08 00:00:00,6.27,0.0,-0.0,0.0,4.0,82.55 +2020-12-08 01:00:00,5.97,0.0,-0.0,0.0,3.86,85.65 +2020-12-08 02:00:00,5.94,0.0,-0.0,0.0,3.72,85.65 +2020-12-08 03:00:00,5.98,0.0,-0.0,0.0,3.72,85.65 +2020-12-08 04:00:00,5.95,0.0,-0.0,0.0,3.79,85.65 +2020-12-08 05:00:00,5.96,0.0,-0.0,0.0,4.0,85.65 +2020-12-08 06:00:00,5.73,0.0,-0.0,0.0,4.07,82.5 +2020-12-08 07:00:00,6.4,0.0,0.0,0.0,4.21,82.55 +2020-12-08 08:00:00,6.69,84.0,239.69,52.0,4.34,82.55 +2020-12-08 09:00:00,7.19,105.0,59.53,92.0,4.55,79.55 +2020-12-08 10:00:00,7.9,219.0,459.08,96.0,3.86,76.8 +2020-12-08 11:00:00,8.39,159.0,107.62,129.0,4.21,74.05 +2020-12-08 12:00:00,8.68,70.0,0.0,70.0,4.07,76.85 +2020-12-08 13:00:00,8.93,87.0,59.78,76.0,5.86,71.4 +2020-12-08 14:00:00,8.95,29.0,23.56,27.0,5.45,71.4 +2020-12-08 15:00:00,8.83,0.0,-0.0,0.0,5.31,71.4 +2020-12-08 16:00:00,8.98,0.0,-0.0,0.0,5.17,74.15 +2020-12-08 17:00:00,9.26,0.0,-0.0,0.0,5.03,76.95 +2020-12-08 18:00:00,9.45,0.0,-0.0,0.0,4.97,77.0 +2020-12-08 19:00:00,9.92,0.0,-0.0,0.0,4.83,74.3 +2020-12-08 20:00:00,9.97,0.0,-0.0,0.0,4.69,74.3 +2020-12-08 21:00:00,9.89,0.0,-0.0,0.0,4.41,77.1 +2020-12-08 22:00:00,9.72,0.0,-0.0,0.0,4.07,79.9 +2020-12-08 23:00:00,9.55,0.0,-0.0,0.0,3.66,79.9 +2020-12-09 00:00:00,9.32,0.0,-0.0,0.0,3.72,82.9 +2020-12-09 01:00:00,9.19,0.0,-0.0,0.0,3.86,89.1 +2020-12-09 02:00:00,8.72,0.0,-0.0,0.0,3.93,85.9 +2020-12-09 03:00:00,8.15,0.0,-0.0,0.0,4.41,85.85 +2020-12-09 04:00:00,7.43,0.0,-0.0,0.0,4.21,82.65 +2020-12-09 05:00:00,6.84,0.0,-0.0,0.0,3.93,82.6 +2020-12-09 06:00:00,6.13,0.0,-0.0,0.0,3.66,85.65 +2020-12-09 07:00:00,6.3,0.0,0.0,0.0,3.52,82.55 +2020-12-09 08:00:00,6.88,104.0,525.99,35.0,3.79,82.6 +2020-12-09 09:00:00,7.59,145.0,235.86,94.0,3.93,79.65 +2020-12-09 10:00:00,8.09,176.0,203.01,122.0,4.14,76.8 +2020-12-09 11:00:00,8.5,195.0,259.84,123.0,3.86,71.35 +2020-12-09 12:00:00,8.83,57.0,0.0,57.0,4.0,68.8 +2020-12-09 13:00:00,9.07,37.0,0.0,37.0,4.14,66.25 +2020-12-09 14:00:00,8.36,7.0,0.0,7.0,3.72,68.7 +2020-12-09 15:00:00,7.7,0.0,-0.0,0.0,3.52,71.15 +2020-12-09 16:00:00,7.03,0.0,-0.0,0.0,3.45,73.8 +2020-12-09 17:00:00,6.59,0.0,-0.0,0.0,3.59,79.5 +2020-12-09 18:00:00,6.27,0.0,-0.0,0.0,3.59,79.5 +2020-12-09 19:00:00,6.07,0.0,-0.0,0.0,3.59,85.65 +2020-12-09 20:00:00,6.13,0.0,-0.0,0.0,4.21,85.65 +2020-12-09 21:00:00,5.83,0.0,-0.0,0.0,4.69,85.65 +2020-12-09 22:00:00,5.47,0.0,-0.0,0.0,4.55,88.85 +2020-12-09 23:00:00,4.95,0.0,-0.0,0.0,4.55,92.25 +2020-12-10 00:00:00,4.48,0.0,-0.0,0.0,4.97,92.25 +2020-12-10 01:00:00,4.16,0.0,-0.0,0.0,4.9,95.75 +2020-12-10 02:00:00,3.55,0.0,-0.0,0.0,4.9,92.15 +2020-12-10 03:00:00,3.05,0.0,-0.0,0.0,4.9,92.15 +2020-12-10 04:00:00,2.68,0.0,-0.0,0.0,5.1,95.7 +2020-12-10 05:00:00,2.35,0.0,-0.0,0.0,4.69,92.15 +2020-12-10 06:00:00,2.27,0.0,-0.0,0.0,4.69,92.15 +2020-12-10 07:00:00,2.34,0.0,0.0,0.0,4.34,92.15 +2020-12-10 08:00:00,2.5,105.0,550.55,34.0,4.0,92.15 +2020-12-10 09:00:00,2.84,136.0,182.08,97.0,4.76,88.7 +2020-12-10 10:00:00,3.19,193.0,287.67,117.0,4.55,88.7 +2020-12-10 11:00:00,3.69,237.0,515.3,95.0,4.34,85.45 +2020-12-10 12:00:00,4.02,207.0,484.77,87.0,4.34,79.2 +2020-12-10 13:00:00,4.01,56.0,5.49,55.0,3.45,79.2 +2020-12-10 14:00:00,3.82,34.0,47.92,30.0,2.62,79.2 +2020-12-10 15:00:00,3.39,0.0,-0.0,0.0,2.07,85.45 +2020-12-10 16:00:00,2.71,0.0,-0.0,0.0,1.86,85.4 +2020-12-10 17:00:00,2.4,0.0,-0.0,0.0,1.66,88.65 +2020-12-10 18:00:00,1.63,0.0,-0.0,0.0,1.86,92.05 +2020-12-10 19:00:00,0.66,0.0,-0.0,0.0,2.14,92.0 +2020-12-10 20:00:00,0.11,0.0,-0.0,0.0,2.28,92.0 +2020-12-10 21:00:00,-0.23,0.0,-0.0,0.0,2.41,88.45 +2020-12-10 22:00:00,-0.36,0.0,-0.0,0.0,2.55,88.45 +2020-12-10 23:00:00,-0.28,0.0,-0.0,0.0,2.83,85.1 +2020-12-11 00:00:00,-0.18,0.0,-0.0,0.0,3.24,85.1 +2020-12-11 01:00:00,-0.57,0.0,-0.0,0.0,3.66,81.75 +2020-12-11 02:00:00,-0.6,0.0,-0.0,0.0,3.86,81.75 +2020-12-11 03:00:00,-0.53,0.0,-0.0,0.0,3.86,81.75 +2020-12-11 04:00:00,-0.63,0.0,-0.0,0.0,3.79,81.75 +2020-12-11 05:00:00,-0.76,0.0,-0.0,0.0,3.79,78.6 +2020-12-11 06:00:00,-0.82,0.0,-0.0,0.0,3.86,78.6 +2020-12-11 07:00:00,-0.44,0.0,0.0,0.0,4.21,78.6 +2020-12-11 08:00:00,-0.04,88.0,315.33,48.0,4.21,78.65 +2020-12-11 09:00:00,0.89,129.0,150.76,97.0,4.07,75.75 +2020-12-11 10:00:00,1.83,126.0,45.71,114.0,3.79,70.2 +2020-12-11 11:00:00,2.62,217.0,375.69,114.0,3.93,67.6 +2020-12-11 12:00:00,3.01,227.0,669.55,62.0,3.86,67.7 +2020-12-11 13:00:00,2.95,144.0,452.49,62.0,3.72,67.7 +2020-12-11 14:00:00,2.34,62.0,494.26,21.0,3.79,70.3 +2020-12-11 15:00:00,1.41,0.0,-0.0,0.0,3.72,72.95 +2020-12-11 16:00:00,0.78,0.0,-0.0,0.0,3.66,75.75 +2020-12-11 17:00:00,0.53,0.0,-0.0,0.0,3.66,75.7 +2020-12-11 18:00:00,0.36,0.0,-0.0,0.0,3.52,75.7 +2020-12-11 19:00:00,0.18,0.0,-0.0,0.0,3.59,75.7 +2020-12-11 20:00:00,0.24,0.0,-0.0,0.0,3.31,72.75 +2020-12-11 21:00:00,0.11,0.0,-0.0,0.0,3.1,75.6 +2020-12-11 22:00:00,0.13,0.0,-0.0,0.0,2.9,72.65 +2020-12-11 23:00:00,0.0,0.0,-0.0,0.0,2.83,72.65 +2020-12-12 00:00:00,-0.09,0.0,-0.0,0.0,2.69,72.65 +2020-12-12 01:00:00,-0.17,0.0,-0.0,0.0,2.55,75.6 +2020-12-12 02:00:00,-0.42,0.0,-0.0,0.0,2.55,78.6 +2020-12-12 03:00:00,-0.33,0.0,-0.0,0.0,2.48,78.6 +2020-12-12 04:00:00,0.05,0.0,-0.0,0.0,2.34,78.65 +2020-12-12 05:00:00,0.23,0.0,-0.0,0.0,2.21,78.75 +2020-12-12 06:00:00,0.34,0.0,-0.0,0.0,2.07,81.9 +2020-12-12 07:00:00,0.71,0.0,0.0,0.0,2.0,88.55 +2020-12-12 08:00:00,1.03,51.0,40.05,46.0,1.93,92.05 +2020-12-12 09:00:00,1.62,92.0,33.26,85.0,1.86,92.05 +2020-12-12 10:00:00,2.63,61.0,0.0,61.0,2.07,88.65 +2020-12-12 11:00:00,3.49,125.0,36.65,115.0,2.34,82.25 +2020-12-12 12:00:00,4.18,77.0,4.07,76.0,2.55,79.2 +2020-12-12 13:00:00,4.57,76.0,33.23,70.0,2.07,76.3 +2020-12-12 14:00:00,4.33,54.0,363.37,24.0,1.59,79.3 +2020-12-12 15:00:00,3.45,0.0,-0.0,0.0,1.79,82.25 +2020-12-12 16:00:00,2.66,0.0,-0.0,0.0,2.07,88.65 +2020-12-12 17:00:00,2.36,0.0,-0.0,0.0,2.28,85.35 +2020-12-12 18:00:00,2.08,0.0,-0.0,0.0,2.48,88.65 +2020-12-12 19:00:00,1.24,0.0,-0.0,0.0,2.62,88.6 +2020-12-12 20:00:00,0.78,0.0,-0.0,0.0,2.76,92.05 +2020-12-12 21:00:00,0.83,0.0,-0.0,0.0,2.76,92.05 +2020-12-12 22:00:00,0.62,0.0,-0.0,0.0,2.83,92.0 +2020-12-12 23:00:00,0.52,0.0,-0.0,0.0,2.97,92.0 +2020-12-13 00:00:00,0.4,0.0,-0.0,0.0,3.17,92.0 +2020-12-13 01:00:00,0.15,0.0,-0.0,0.0,3.38,92.0 +2020-12-13 02:00:00,-0.05,0.0,-0.0,0.0,3.52,92.0 +2020-12-13 03:00:00,-0.3,0.0,-0.0,0.0,3.79,88.45 +2020-12-13 04:00:00,-0.41,0.0,-0.0,0.0,4.14,91.95 +2020-12-13 05:00:00,-0.27,0.0,-0.0,0.0,4.34,88.45 +2020-12-13 06:00:00,-0.27,0.0,-0.0,0.0,4.62,88.45 +2020-12-13 07:00:00,-0.22,0.0,0.0,0.0,4.62,88.45 +2020-12-13 08:00:00,-0.06,49.0,32.53,45.0,4.41,88.45 +2020-12-13 09:00:00,0.14,108.0,76.64,92.0,5.1,88.45 +2020-12-13 10:00:00,0.55,143.0,92.49,119.0,5.59,85.15 +2020-12-13 11:00:00,1.36,184.0,217.13,125.0,5.59,75.85 +2020-12-13 12:00:00,1.57,159.0,196.23,111.0,5.31,78.9 +2020-12-13 13:00:00,1.84,50.0,0.0,50.0,4.41,75.95 +2020-12-13 14:00:00,1.72,44.0,170.13,30.0,3.72,75.95 +2020-12-13 15:00:00,1.43,0.0,-0.0,0.0,3.66,78.9 +2020-12-13 16:00:00,1.34,0.0,-0.0,0.0,3.66,78.9 +2020-12-13 17:00:00,1.26,0.0,-0.0,0.0,3.86,78.9 +2020-12-13 18:00:00,1.29,0.0,-0.0,0.0,3.93,82.0 +2020-12-13 19:00:00,2.01,0.0,-0.0,0.0,3.59,85.3 +2020-12-13 20:00:00,2.59,0.0,-0.0,0.0,3.45,85.35 +2020-12-13 21:00:00,3.23,0.0,-0.0,0.0,3.45,82.25 +2020-12-13 22:00:00,3.26,0.0,-0.0,0.0,3.66,82.25 +2020-12-13 23:00:00,3.33,0.0,-0.0,0.0,3.79,82.25 +2020-12-14 00:00:00,3.33,0.0,-0.0,0.0,4.0,82.25 +2020-12-14 01:00:00,3.67,0.0,-0.0,0.0,4.28,88.75 +2020-12-14 02:00:00,3.62,0.0,-0.0,0.0,4.41,92.15 +2020-12-14 03:00:00,3.93,0.0,-0.0,0.0,4.48,88.8 +2020-12-14 04:00:00,4.16,0.0,-0.0,0.0,4.41,88.8 +2020-12-14 05:00:00,4.14,0.0,-0.0,0.0,4.14,85.5 +2020-12-14 06:00:00,3.81,0.0,-0.0,0.0,3.59,85.5 +2020-12-14 07:00:00,3.29,0.0,0.0,0.0,3.66,85.45 +2020-12-14 08:00:00,3.37,33.0,0.0,33.0,3.86,85.45 +2020-12-14 09:00:00,3.68,110.0,91.71,91.0,4.48,85.45 +2020-12-14 10:00:00,3.08,32.0,0.0,32.0,5.03,92.15 +2020-12-14 11:00:00,3.33,22.0,0.0,22.0,5.1,88.75 +2020-12-14 12:00:00,4.81,34.0,0.0,34.0,6.28,88.85 +2020-12-14 13:00:00,4.55,34.0,0.0,34.0,8.14,88.85 +2020-12-14 14:00:00,4.46,52.0,328.69,25.0,6.34,85.55 +2020-12-14 15:00:00,4.57,0.0,-0.0,0.0,5.52,82.35 +2020-12-14 16:00:00,4.38,0.0,-0.0,0.0,5.66,79.3 +2020-12-14 17:00:00,4.2,0.0,-0.0,0.0,5.72,76.25 +2020-12-14 18:00:00,3.96,0.0,-0.0,0.0,5.1,70.6 +2020-12-14 19:00:00,4.17,0.0,-0.0,0.0,4.55,79.2 +2020-12-14 20:00:00,4.58,0.0,-0.0,0.0,5.59,79.3 +2020-12-14 21:00:00,4.97,0.0,-0.0,0.0,6.28,82.35 +2020-12-14 22:00:00,5.15,0.0,-0.0,0.0,6.41,82.35 +2020-12-14 23:00:00,5.1,0.0,-0.0,0.0,6.48,79.3 +2020-12-15 00:00:00,4.67,0.0,-0.0,0.0,6.07,73.45 +2020-12-15 01:00:00,4.22,0.0,-0.0,0.0,4.76,70.7 +2020-12-15 02:00:00,3.6,0.0,-0.0,0.0,3.52,76.15 +2020-12-15 03:00:00,3.09,0.0,-0.0,0.0,2.76,79.1 +2020-12-15 04:00:00,2.81,0.0,-0.0,0.0,2.9,79.1 +2020-12-15 05:00:00,2.88,0.0,-0.0,0.0,3.38,82.2 +2020-12-15 06:00:00,3.49,0.0,-0.0,0.0,4.14,82.25 +2020-12-15 07:00:00,4.76,0.0,0.0,0.0,4.48,88.85 +2020-12-15 08:00:00,5.86,19.0,0.0,19.0,5.24,88.9 +2020-12-15 09:00:00,7.08,41.0,0.0,41.0,5.31,89.0 +2020-12-15 10:00:00,8.48,122.0,46.7,110.0,5.59,85.9 +2020-12-15 11:00:00,10.32,141.0,74.13,121.0,5.72,80.05 +2020-12-15 12:00:00,10.87,83.0,4.11,82.0,5.66,80.1 +2020-12-15 13:00:00,10.37,20.0,0.0,20.0,5.1,83.0 +2020-12-15 14:00:00,9.22,38.0,97.42,30.0,4.14,85.95 +2020-12-15 15:00:00,8.6,0.0,-0.0,0.0,4.0,79.75 +2020-12-15 16:00:00,8.34,0.0,-0.0,0.0,4.07,76.85 +2020-12-15 17:00:00,7.88,0.0,-0.0,0.0,3.93,73.95 +2020-12-15 18:00:00,7.25,0.0,-0.0,0.0,3.79,76.7 +2020-12-15 19:00:00,6.67,0.0,-0.0,0.0,3.72,82.55 +2020-12-15 20:00:00,6.56,0.0,-0.0,0.0,3.79,82.55 +2020-12-15 21:00:00,6.42,0.0,-0.0,0.0,3.72,82.55 +2020-12-15 22:00:00,6.23,0.0,-0.0,0.0,3.52,82.55 +2020-12-15 23:00:00,5.94,0.0,-0.0,0.0,3.24,85.65 +2020-12-16 00:00:00,5.31,0.0,-0.0,0.0,2.9,85.6 +2020-12-16 01:00:00,4.62,0.0,-0.0,0.0,2.55,85.55 +2020-12-16 02:00:00,4.58,0.0,-0.0,0.0,2.34,88.85 +2020-12-16 03:00:00,4.27,0.0,-0.0,0.0,2.28,85.55 +2020-12-16 04:00:00,4.74,0.0,-0.0,0.0,2.34,88.85 +2020-12-16 05:00:00,5.21,0.0,-0.0,0.0,2.48,92.25 +2020-12-16 06:00:00,5.7,0.0,-0.0,0.0,2.62,88.85 +2020-12-16 07:00:00,5.69,0.0,0.0,0.0,2.83,92.25 +2020-12-16 08:00:00,6.38,42.0,16.95,40.0,2.9,88.95 +2020-12-16 09:00:00,7.21,29.0,0.0,29.0,2.83,92.35 +2020-12-16 10:00:00,7.69,162.0,175.84,117.0,2.97,89.0 +2020-12-16 11:00:00,8.31,220.0,457.2,97.0,3.17,85.9 +2020-12-16 12:00:00,8.7,137.0,111.19,110.0,3.52,89.1 +2020-12-16 13:00:00,9.05,93.0,94.91,76.0,3.52,85.95 +2020-12-16 14:00:00,8.69,34.0,60.81,29.0,3.31,85.9 +2020-12-16 15:00:00,7.72,0.0,-0.0,0.0,3.24,89.0 +2020-12-16 16:00:00,7.05,0.0,-0.0,0.0,3.24,89.0 +2020-12-16 17:00:00,6.78,0.0,-0.0,0.0,3.38,89.0 +2020-12-16 18:00:00,6.62,0.0,-0.0,0.0,3.45,88.95 +2020-12-16 19:00:00,6.23,0.0,-0.0,0.0,3.52,92.3 +2020-12-16 20:00:00,5.95,0.0,-0.0,0.0,3.59,92.3 +2020-12-16 21:00:00,5.56,0.0,-0.0,0.0,3.45,92.25 +2020-12-16 22:00:00,5.45,0.0,-0.0,0.0,3.52,92.25 +2020-12-16 23:00:00,5.17,0.0,-0.0,0.0,3.38,95.75 +2020-12-17 00:00:00,5.12,0.0,-0.0,0.0,3.52,92.25 +2020-12-17 01:00:00,5.31,0.0,-0.0,0.0,3.59,88.85 +2020-12-17 02:00:00,5.17,0.0,-0.0,0.0,3.66,92.25 +2020-12-17 03:00:00,5.33,0.0,-0.0,0.0,3.79,88.85 +2020-12-17 04:00:00,5.41,0.0,-0.0,0.0,4.0,85.6 +2020-12-17 05:00:00,5.48,0.0,-0.0,0.0,4.21,85.6 +2020-12-17 06:00:00,5.1,0.0,-0.0,0.0,4.28,88.85 +2020-12-17 07:00:00,4.4,0.0,0.0,0.0,4.34,85.55 +2020-12-17 08:00:00,4.66,72.0,240.27,44.0,4.14,85.55 +2020-12-17 09:00:00,5.35,167.0,521.7,61.0,3.45,85.6 +2020-12-17 10:00:00,6.69,206.0,451.02,91.0,3.52,82.55 +2020-12-17 11:00:00,7.77,161.0,141.59,123.0,3.45,73.95 +2020-12-17 12:00:00,8.64,175.0,309.28,100.0,3.1,74.05 +2020-12-17 13:00:00,9.0,91.0,83.76,76.0,2.97,74.15 +2020-12-17 14:00:00,8.52,52.0,339.63,24.0,2.9,76.85 +2020-12-17 15:00:00,7.22,0.0,-0.0,0.0,3.03,82.6 +2020-12-17 16:00:00,6.42,0.0,-0.0,0.0,3.03,82.55 +2020-12-17 17:00:00,5.66,0.0,-0.0,0.0,3.1,88.85 +2020-12-17 18:00:00,5.25,0.0,-0.0,0.0,3.17,88.85 +2020-12-17 19:00:00,4.88,0.0,-0.0,0.0,3.52,88.85 +2020-12-17 20:00:00,5.08,0.0,-0.0,0.0,3.52,88.85 +2020-12-17 21:00:00,5.62,0.0,-0.0,0.0,3.45,85.6 +2020-12-17 22:00:00,5.88,0.0,-0.0,0.0,3.52,82.5 +2020-12-17 23:00:00,6.1,0.0,-0.0,0.0,3.45,82.5 +2020-12-18 00:00:00,6.03,0.0,-0.0,0.0,3.31,82.5 +2020-12-18 01:00:00,5.82,0.0,-0.0,0.0,3.38,82.5 +2020-12-18 02:00:00,5.77,0.0,-0.0,0.0,3.52,85.65 +2020-12-18 03:00:00,6.42,0.0,-0.0,0.0,3.79,82.55 +2020-12-18 04:00:00,8.03,0.0,-0.0,0.0,3.93,73.95 +2020-12-18 05:00:00,9.11,0.0,-0.0,0.0,3.72,68.8 +2020-12-18 06:00:00,9.5,0.0,-0.0,0.0,3.59,68.9 +2020-12-18 07:00:00,9.49,0.0,-0.0,0.0,3.31,71.5 +2020-12-18 08:00:00,9.63,77.0,329.8,39.0,3.1,77.0 +2020-12-18 09:00:00,9.91,79.0,19.79,75.0,2.83,79.95 +2020-12-18 10:00:00,10.43,75.0,0.0,75.0,2.34,80.05 +2020-12-18 11:00:00,11.39,91.0,3.73,90.0,2.14,77.3 +2020-12-18 12:00:00,11.33,73.0,0.0,73.0,1.79,77.3 +2020-12-18 13:00:00,10.88,80.0,44.65,72.0,1.52,80.1 +2020-12-18 14:00:00,10.51,12.0,0.0,12.0,1.59,83.0 +2020-12-18 15:00:00,10.01,0.0,-0.0,0.0,1.45,86.0 +2020-12-18 16:00:00,9.1,0.0,-0.0,0.0,1.38,92.45 +2020-12-18 17:00:00,8.11,0.0,-0.0,0.0,1.45,95.8 +2020-12-18 18:00:00,6.89,0.0,-0.0,0.0,1.52,95.8 +2020-12-18 19:00:00,4.99,0.0,-0.0,0.0,1.93,99.4 +2020-12-18 20:00:00,4.09,0.0,-0.0,0.0,1.79,95.75 +2020-12-18 21:00:00,3.88,0.0,-0.0,0.0,1.93,95.75 +2020-12-18 22:00:00,4.19,0.0,-0.0,0.0,2.14,95.75 +2020-12-18 23:00:00,4.07,0.0,-0.0,0.0,2.21,95.75 +2020-12-19 00:00:00,4.33,0.0,-0.0,0.0,2.48,95.75 +2020-12-19 01:00:00,4.11,0.0,-0.0,0.0,2.62,95.75 +2020-12-19 02:00:00,4.37,0.0,-0.0,0.0,2.76,95.75 +2020-12-19 03:00:00,4.42,0.0,-0.0,0.0,2.69,95.75 +2020-12-19 04:00:00,4.32,0.0,-0.0,0.0,2.69,95.75 +2020-12-19 05:00:00,4.12,0.0,-0.0,0.0,2.83,95.75 +2020-12-19 06:00:00,4.3,0.0,-0.0,0.0,2.97,95.75 +2020-12-19 07:00:00,4.2,0.0,-0.0,0.0,3.1,95.75 +2020-12-19 08:00:00,4.56,89.0,517.44,30.0,3.52,92.25 +2020-12-19 09:00:00,5.36,157.0,432.55,70.0,3.38,92.25 +2020-12-19 10:00:00,6.39,224.0,607.56,70.0,3.72,92.3 +2020-12-19 11:00:00,6.93,208.0,381.33,106.0,3.93,92.35 +2020-12-19 12:00:00,7.17,139.0,119.73,110.0,4.14,92.35 +2020-12-19 13:00:00,7.15,56.0,5.57,55.0,3.86,92.35 +2020-12-19 14:00:00,6.9,25.0,12.01,24.0,4.0,92.35 +2020-12-19 15:00:00,6.67,0.0,-0.0,0.0,3.86,95.8 +2020-12-19 16:00:00,6.66,0.0,-0.0,0.0,3.79,95.8 +2020-12-19 17:00:00,6.62,0.0,-0.0,0.0,3.72,95.8 +2020-12-19 18:00:00,6.61,0.0,-0.0,0.0,3.72,92.3 +2020-12-19 19:00:00,6.26,0.0,-0.0,0.0,4.07,88.95 +2020-12-19 20:00:00,6.19,0.0,-0.0,0.0,4.14,92.3 +2020-12-19 21:00:00,5.79,0.0,-0.0,0.0,4.07,88.9 +2020-12-19 22:00:00,5.71,0.0,-0.0,0.0,4.07,95.75 +2020-12-19 23:00:00,5.65,0.0,-0.0,0.0,4.14,95.75 +2020-12-20 00:00:00,5.83,0.0,-0.0,0.0,4.14,92.3 +2020-12-20 01:00:00,6.27,0.0,-0.0,0.0,4.28,92.3 +2020-12-20 02:00:00,6.32,0.0,-0.0,0.0,4.34,92.3 +2020-12-20 03:00:00,6.47,0.0,-0.0,0.0,4.41,95.8 +2020-12-20 04:00:00,6.44,0.0,-0.0,0.0,4.34,95.8 +2020-12-20 05:00:00,6.31,0.0,-0.0,0.0,4.41,92.3 +2020-12-20 06:00:00,6.25,0.0,-0.0,0.0,4.55,92.3 +2020-12-20 07:00:00,5.88,0.0,-0.0,0.0,4.97,92.3 +2020-12-20 08:00:00,5.94,83.0,451.55,32.0,4.83,92.3 +2020-12-20 09:00:00,6.61,176.0,639.07,48.0,4.34,92.3 +2020-12-20 10:00:00,7.65,229.0,668.21,60.0,4.55,89.0 +2020-12-20 11:00:00,8.58,245.0,681.06,63.0,4.69,85.9 +2020-12-20 12:00:00,9.08,216.0,643.93,60.0,4.48,82.85 +2020-12-20 13:00:00,9.4,157.0,645.47,41.0,4.41,82.9 +2020-12-20 14:00:00,9.22,41.0,131.2,30.0,4.62,85.95 +2020-12-20 15:00:00,8.53,0.0,-0.0,0.0,4.62,89.1 +2020-12-20 16:00:00,7.99,0.0,-0.0,0.0,4.69,89.05 +2020-12-20 17:00:00,8.04,0.0,-0.0,0.0,4.55,89.05 +2020-12-20 18:00:00,7.87,0.0,-0.0,0.0,4.14,89.05 +2020-12-20 19:00:00,7.3,0.0,-0.0,0.0,3.66,89.0 +2020-12-20 20:00:00,7.02,0.0,-0.0,0.0,3.31,92.35 +2020-12-20 21:00:00,6.86,0.0,-0.0,0.0,2.76,92.35 +2020-12-20 22:00:00,6.65,0.0,-0.0,0.0,2.34,95.8 +2020-12-20 23:00:00,6.79,0.0,-0.0,0.0,2.0,92.35 +2020-12-21 00:00:00,7.07,0.0,-0.0,0.0,2.07,92.35 +2020-12-21 01:00:00,7.95,0.0,-0.0,0.0,2.97,89.05 +2020-12-21 02:00:00,8.38,0.0,-0.0,0.0,3.52,89.1 +2020-12-21 03:00:00,8.32,0.0,-0.0,0.0,3.59,85.9 +2020-12-21 04:00:00,7.97,0.0,-0.0,0.0,3.31,89.05 +2020-12-21 05:00:00,7.32,0.0,-0.0,0.0,2.55,89.0 +2020-12-21 06:00:00,6.42,0.0,-0.0,0.0,2.41,88.95 +2020-12-21 07:00:00,5.79,0.0,-0.0,0.0,2.41,88.9 +2020-12-21 08:00:00,5.25,84.0,473.28,31.0,2.21,92.25 +2020-12-21 09:00:00,6.39,177.0,651.38,47.0,2.28,88.95 +2020-12-21 10:00:00,7.31,233.0,701.03,56.0,2.0,85.8 +2020-12-21 11:00:00,7.96,241.0,647.68,68.0,1.79,82.7 +2020-12-21 12:00:00,8.31,196.0,457.84,85.0,1.38,76.85 +2020-12-21 13:00:00,8.29,129.0,338.55,68.0,1.17,76.85 +2020-12-21 14:00:00,7.62,49.0,236.53,29.0,1.52,82.65 +2020-12-21 15:00:00,6.64,0.0,-0.0,0.0,1.72,88.95 +2020-12-21 16:00:00,5.61,0.0,-0.0,0.0,1.52,92.25 +2020-12-21 17:00:00,4.59,0.0,-0.0,0.0,1.59,92.25 +2020-12-21 18:00:00,4.14,0.0,-0.0,0.0,1.79,95.75 +2020-12-21 19:00:00,4.01,0.0,-0.0,0.0,1.93,95.75 +2020-12-21 20:00:00,3.69,0.0,-0.0,0.0,2.0,99.4 +2020-12-21 21:00:00,3.4,0.0,-0.0,0.0,2.07,95.7 +2020-12-21 22:00:00,3.21,0.0,-0.0,0.0,2.0,95.7 +2020-12-21 23:00:00,3.06,0.0,-0.0,0.0,2.07,99.4 +2020-12-22 00:00:00,2.9,0.0,-0.0,0.0,2.14,99.4 +2020-12-22 01:00:00,3.03,0.0,-0.0,0.0,2.28,99.4 +2020-12-22 02:00:00,3.3,0.0,-0.0,0.0,2.48,95.7 +2020-12-22 03:00:00,3.68,0.0,-0.0,0.0,2.83,99.4 +2020-12-22 04:00:00,3.64,0.0,-0.0,0.0,2.97,95.7 +2020-12-22 05:00:00,3.39,0.0,-0.0,0.0,2.83,95.7 +2020-12-22 06:00:00,3.44,0.0,-0.0,0.0,2.9,95.7 +2020-12-22 07:00:00,3.98,0.0,-0.0,0.0,2.97,95.75 +2020-12-22 08:00:00,4.22,74.0,314.91,39.0,3.03,92.25 +2020-12-22 09:00:00,4.74,110.0,110.56,88.0,3.17,92.25 +2020-12-22 10:00:00,5.45,174.0,249.82,111.0,3.1,88.85 +2020-12-22 11:00:00,6.06,112.0,22.46,106.0,3.03,85.65 +2020-12-22 12:00:00,6.39,166.0,243.04,107.0,2.69,82.55 +2020-12-22 13:00:00,6.4,116.0,221.27,76.0,2.41,82.55 +2020-12-22 14:00:00,6.08,42.0,117.12,32.0,2.34,85.65 +2020-12-22 15:00:00,5.56,0.0,-0.0,0.0,2.48,88.85 +2020-12-22 16:00:00,5.14,0.0,-0.0,0.0,2.48,92.25 +2020-12-22 17:00:00,4.97,0.0,-0.0,0.0,2.48,92.25 +2020-12-22 18:00:00,4.79,0.0,-0.0,0.0,2.41,92.25 +2020-12-22 19:00:00,4.4,0.0,-0.0,0.0,2.55,92.25 +2020-12-22 20:00:00,4.6,0.0,-0.0,0.0,2.28,92.25 +2020-12-22 21:00:00,4.49,0.0,-0.0,0.0,2.14,92.25 +2020-12-22 22:00:00,4.54,0.0,-0.0,0.0,2.0,92.25 +2020-12-22 23:00:00,5.25,0.0,-0.0,0.0,1.93,88.85 +2020-12-23 00:00:00,5.89,0.0,-0.0,0.0,2.34,92.3 +2020-12-23 01:00:00,6.38,0.0,-0.0,0.0,3.03,92.3 +2020-12-23 02:00:00,6.47,0.0,-0.0,0.0,3.45,92.3 +2020-12-23 03:00:00,6.4,0.0,-0.0,0.0,3.79,92.3 +2020-12-23 04:00:00,6.27,0.0,-0.0,0.0,3.93,92.3 +2020-12-23 05:00:00,6.22,0.0,-0.0,0.0,4.07,95.75 +2020-12-23 06:00:00,6.15,0.0,-0.0,0.0,4.48,95.75 +2020-12-23 07:00:00,6.04,0.0,-0.0,0.0,4.76,92.3 +2020-12-23 08:00:00,5.94,24.0,0.0,24.0,4.76,92.3 +2020-12-23 09:00:00,5.88,78.0,20.15,74.0,4.9,92.3 +2020-12-23 10:00:00,5.97,142.0,107.14,115.0,4.83,92.3 +2020-12-23 11:00:00,6.18,113.0,22.45,107.0,5.03,88.9 +2020-12-23 12:00:00,6.37,74.0,0.0,74.0,5.17,85.7 +2020-12-23 13:00:00,6.49,59.0,5.51,58.0,4.76,85.7 +2020-12-23 14:00:00,6.4,22.0,0.0,22.0,4.0,85.7 +2020-12-23 15:00:00,5.89,0.0,-0.0,0.0,3.79,85.65 +2020-12-23 16:00:00,5.64,0.0,-0.0,0.0,3.79,92.25 +2020-12-23 17:00:00,5.47,0.0,-0.0,0.0,4.0,92.25 +2020-12-23 18:00:00,5.4,0.0,-0.0,0.0,4.21,92.25 +2020-12-23 19:00:00,5.14,0.0,-0.0,0.0,4.0,92.25 +2020-12-23 20:00:00,4.95,0.0,-0.0,0.0,4.07,92.25 +2020-12-23 21:00:00,4.9,0.0,-0.0,0.0,4.07,92.25 +2020-12-23 22:00:00,5.03,0.0,-0.0,0.0,4.0,92.25 +2020-12-23 23:00:00,5.09,0.0,-0.0,0.0,4.07,92.25 +2020-12-24 00:00:00,5.18,0.0,-0.0,0.0,4.28,92.25 +2020-12-24 01:00:00,5.36,0.0,-0.0,0.0,4.34,88.85 +2020-12-24 02:00:00,5.38,0.0,-0.0,0.0,4.41,88.85 +2020-12-24 03:00:00,5.32,0.0,-0.0,0.0,4.48,88.85 +2020-12-24 04:00:00,5.23,0.0,-0.0,0.0,4.14,88.85 +2020-12-24 05:00:00,5.06,0.0,-0.0,0.0,3.93,92.25 +2020-12-24 06:00:00,4.96,0.0,-0.0,0.0,3.79,88.85 +2020-12-24 07:00:00,4.96,0.0,-0.0,0.0,3.38,92.25 +2020-12-24 08:00:00,5.02,47.0,54.63,41.0,3.38,92.25 +2020-12-24 09:00:00,5.34,96.0,60.55,84.0,3.38,88.85 +2020-12-24 10:00:00,5.79,197.0,388.94,99.0,2.83,85.65 +2020-12-24 11:00:00,6.45,201.0,332.7,112.0,3.31,79.5 +2020-12-24 12:00:00,6.75,192.0,410.24,92.0,3.31,68.4 +2020-12-24 13:00:00,6.83,120.0,241.32,76.0,2.41,71.05 +2020-12-24 14:00:00,6.68,34.0,45.76,30.0,2.14,73.7 +2020-12-24 15:00:00,6.12,0.0,-0.0,0.0,2.41,76.5 +2020-12-24 16:00:00,5.41,0.0,-0.0,0.0,2.97,79.35 +2020-12-24 17:00:00,4.32,0.0,-0.0,0.0,3.38,88.85 +2020-12-24 18:00:00,3.81,0.0,-0.0,0.0,3.59,92.2 +2020-12-24 19:00:00,4.46,0.0,-0.0,0.0,3.17,92.25 +2020-12-24 20:00:00,5.39,0.0,-0.0,0.0,3.93,92.25 +2020-12-24 21:00:00,6.4,0.0,-0.0,0.0,5.03,88.95 +2020-12-24 22:00:00,6.57,0.0,-0.0,0.0,4.9,88.95 +2020-12-24 23:00:00,6.45,0.0,-0.0,0.0,4.69,88.95 +2020-12-25 00:00:00,6.33,0.0,-0.0,0.0,4.62,92.3 +2020-12-25 01:00:00,6.16,0.0,-0.0,0.0,4.41,95.75 +2020-12-25 02:00:00,6.15,0.0,-0.0,0.0,4.28,92.3 +2020-12-25 03:00:00,6.13,0.0,-0.0,0.0,4.69,92.3 +2020-12-25 04:00:00,5.91,0.0,-0.0,0.0,4.76,92.3 +2020-12-25 05:00:00,5.97,0.0,-0.0,0.0,4.55,92.3 +2020-12-25 06:00:00,5.84,0.0,-0.0,0.0,4.0,92.3 +2020-12-25 07:00:00,5.85,0.0,-0.0,0.0,4.0,95.75 +2020-12-25 08:00:00,5.86,24.0,0.0,24.0,4.0,95.75 +2020-12-25 09:00:00,5.7,91.0,50.51,81.0,3.52,99.4 +2020-12-25 10:00:00,5.42,80.0,3.97,79.0,2.97,95.75 +2020-12-25 11:00:00,5.13,42.0,0.0,42.0,3.17,99.4 +2020-12-25 12:00:00,4.84,41.0,0.0,41.0,2.97,99.4 +2020-12-25 13:00:00,4.63,14.0,0.0,14.0,3.1,95.75 +2020-12-25 14:00:00,4.47,16.0,0.0,16.0,3.1,95.75 +2020-12-25 15:00:00,4.26,0.0,-0.0,0.0,2.76,95.75 +2020-12-25 16:00:00,4.12,0.0,-0.0,0.0,3.24,95.75 +2020-12-25 17:00:00,3.99,0.0,-0.0,0.0,3.31,95.75 +2020-12-25 18:00:00,3.91,0.0,-0.0,0.0,2.83,95.75 +2020-12-25 19:00:00,3.91,0.0,-0.0,0.0,3.45,95.75 +2020-12-25 20:00:00,3.87,0.0,-0.0,0.0,3.52,95.75 +2020-12-25 21:00:00,3.93,0.0,-0.0,0.0,2.55,95.75 +2020-12-25 22:00:00,4.07,0.0,-0.0,0.0,3.1,95.75 +2020-12-25 23:00:00,3.95,0.0,-0.0,0.0,3.66,95.75 +2020-12-26 00:00:00,3.77,0.0,-0.0,0.0,3.93,95.75 +2020-12-26 01:00:00,3.78,0.0,-0.0,0.0,3.52,95.75 +2020-12-26 02:00:00,3.68,0.0,-0.0,0.0,2.62,99.4 +2020-12-26 03:00:00,3.49,0.0,-0.0,0.0,2.48,99.4 +2020-12-26 04:00:00,3.21,0.0,-0.0,0.0,2.21,99.4 +2020-12-26 05:00:00,3.1,0.0,-0.0,0.0,2.07,99.4 +2020-12-26 06:00:00,3.1,0.0,-0.0,0.0,2.21,99.4 +2020-12-26 07:00:00,3.48,0.0,-0.0,0.0,2.41,99.4 +2020-12-26 08:00:00,3.26,53.0,91.74,43.0,2.34,99.4 +2020-12-26 09:00:00,3.53,101.0,75.8,86.0,2.83,99.4 +2020-12-26 10:00:00,4.0,137.0,87.21,115.0,3.86,88.8 +2020-12-26 11:00:00,4.43,119.0,29.8,111.0,3.59,88.85 +2020-12-26 12:00:00,4.93,103.0,24.46,97.0,3.86,85.55 +2020-12-26 13:00:00,4.83,85.0,48.81,76.0,3.31,85.55 +2020-12-26 14:00:00,4.72,38.0,66.74,32.0,2.83,85.55 +2020-12-26 15:00:00,4.01,0.0,-0.0,0.0,2.48,85.5 +2020-12-26 16:00:00,3.24,0.0,-0.0,0.0,2.28,88.75 +2020-12-26 17:00:00,2.53,0.0,-0.0,0.0,2.14,88.65 +2020-12-26 18:00:00,2.44,0.0,-0.0,0.0,2.0,88.65 +2020-12-26 19:00:00,2.67,0.0,-0.0,0.0,2.14,88.65 +2020-12-26 20:00:00,2.63,0.0,-0.0,0.0,2.07,88.65 +2020-12-26 21:00:00,2.52,0.0,-0.0,0.0,2.0,88.65 +2020-12-26 22:00:00,2.59,0.0,-0.0,0.0,1.79,85.35 +2020-12-26 23:00:00,1.99,0.0,-0.0,0.0,1.59,88.65 +2020-12-27 00:00:00,0.95,0.0,-0.0,0.0,1.45,92.05 +2020-12-27 01:00:00,0.02,0.0,-0.0,0.0,1.45,92.0 +2020-12-27 02:00:00,-0.35,0.0,-0.0,0.0,1.45,91.95 +2020-12-27 03:00:00,-0.67,0.0,-0.0,0.0,1.45,88.45 +2020-12-27 04:00:00,-0.29,0.0,-0.0,0.0,1.24,88.45 +2020-12-27 05:00:00,0.29,0.0,-0.0,0.0,1.1,85.15 +2020-12-27 06:00:00,0.38,0.0,-0.0,0.0,1.17,85.15 +2020-12-27 07:00:00,0.03,0.0,-0.0,0.0,1.31,88.45 +2020-12-27 08:00:00,0.44,49.0,64.35,42.0,1.17,88.5 +2020-12-27 09:00:00,1.37,100.0,70.73,86.0,1.31,92.05 +2020-12-27 10:00:00,2.51,102.0,15.83,98.0,2.28,92.15 +2020-12-27 11:00:00,2.99,118.0,26.01,111.0,3.1,85.4 +2020-12-27 12:00:00,2.81,62.0,0.0,62.0,3.38,85.4 +2020-12-27 13:00:00,2.76,51.0,0.0,51.0,3.38,82.2 +2020-12-27 14:00:00,2.69,35.0,43.8,31.0,3.17,85.35 +2020-12-27 15:00:00,2.13,0.0,-0.0,0.0,2.28,92.1 +2020-12-27 16:00:00,1.7,0.0,-0.0,0.0,2.07,92.1 +2020-12-27 17:00:00,1.26,0.0,-0.0,0.0,2.0,95.65 +2020-12-27 18:00:00,1.19,0.0,-0.0,0.0,2.07,95.65 +2020-12-27 19:00:00,1.63,0.0,-0.0,0.0,3.24,95.65 +2020-12-27 20:00:00,1.48,0.0,-0.0,0.0,2.9,99.4 +2020-12-27 21:00:00,1.25,0.0,-0.0,0.0,2.41,99.4 +2020-12-27 22:00:00,1.14,0.0,-0.0,0.0,2.21,99.35 +2020-12-27 23:00:00,1.1,0.0,-0.0,0.0,2.62,99.35 +2020-12-28 00:00:00,0.84,0.0,-0.0,0.0,2.76,95.65 +2020-12-28 01:00:00,0.39,0.0,-0.0,0.0,2.55,99.4 +2020-12-28 02:00:00,0.12,0.0,-0.0,0.0,2.41,99.4 +2020-12-28 03:00:00,-0.1,0.0,-0.0,0.0,2.48,99.4 +2020-12-28 04:00:00,-0.12,0.0,-0.0,0.0,2.76,99.4 +2020-12-28 05:00:00,-0.27,0.0,-0.0,0.0,2.9,95.6 +2020-12-28 06:00:00,-0.37,0.0,-0.0,0.0,2.83,99.4 +2020-12-28 07:00:00,-0.37,0.0,-0.0,0.0,2.62,95.6 +2020-12-28 08:00:00,-0.38,49.0,64.42,42.0,2.55,95.6 +2020-12-28 09:00:00,-0.12,105.0,85.81,88.0,2.62,92.0 +2020-12-28 10:00:00,0.06,124.0,47.42,112.0,2.83,88.45 +2020-12-28 11:00:00,0.65,126.0,37.06,116.0,3.1,78.75 +2020-12-28 12:00:00,0.76,86.0,4.04,85.0,2.83,75.75 +2020-12-28 13:00:00,0.92,53.0,0.0,53.0,2.9,75.75 +2020-12-28 14:00:00,0.74,40.0,64.6,34.0,3.1,75.75 +2020-12-28 15:00:00,0.22,0.0,-0.0,0.0,2.55,81.9 +2020-12-28 16:00:00,-0.37,0.0,-0.0,0.0,2.28,88.45 +2020-12-28 17:00:00,-0.63,0.0,-0.0,0.0,2.07,88.45 +2020-12-28 18:00:00,-1.02,0.0,-0.0,0.0,1.59,88.4 +2020-12-28 19:00:00,-1.03,0.0,-0.0,0.0,1.72,81.7 +2020-12-28 20:00:00,-1.13,0.0,-0.0,0.0,1.45,81.7 +2020-12-28 21:00:00,-1.27,0.0,-0.0,0.0,1.38,81.7 +2020-12-28 22:00:00,-1.65,0.0,-0.0,0.0,1.17,84.95 +2020-12-28 23:00:00,-1.97,0.0,-0.0,0.0,1.03,88.3 +2020-12-29 00:00:00,-1.76,0.0,-0.0,0.0,0.9,84.95 +2020-12-29 01:00:00,-2.16,0.0,-0.0,0.0,0.62,84.85 +2020-12-29 02:00:00,-2.37,0.0,-0.0,0.0,0.9,88.25 +2020-12-29 03:00:00,-1.69,0.0,-0.0,0.0,0.41,78.45 +2020-12-29 04:00:00,-1.76,0.0,-0.0,0.0,0.34,81.65 +2020-12-29 05:00:00,-1.91,0.0,-0.0,0.0,0.28,84.85 +2020-12-29 06:00:00,-1.99,0.0,-0.0,0.0,0.55,84.85 +2020-12-29 07:00:00,-3.95,0.0,-0.0,0.0,1.59,91.75 +2020-12-29 08:00:00,-3.32,77.0,386.44,35.0,1.72,88.25 +2020-12-29 09:00:00,-1.59,168.0,554.44,58.0,1.45,84.95 +2020-12-29 10:00:00,-0.26,240.0,725.31,56.0,1.66,72.65 +2020-12-29 11:00:00,0.59,258.0,734.93,59.0,1.24,69.95 +2020-12-29 12:00:00,1.13,227.0,663.98,62.0,1.31,64.65 +2020-12-29 13:00:00,1.35,161.0,594.51,49.0,1.79,62.25 +2020-12-29 14:00:00,0.78,60.0,296.2,32.0,1.59,67.3 +2020-12-29 15:00:00,-0.02,0.0,-0.0,0.0,1.79,69.85 +2020-12-29 16:00:00,-0.68,0.0,-0.0,0.0,2.07,72.6 +2020-12-29 17:00:00,-0.86,0.0,-0.0,0.0,2.0,72.5 +2020-12-29 18:00:00,-1.05,0.0,-0.0,0.0,2.0,72.5 +2020-12-29 19:00:00,-1.07,0.0,-0.0,0.0,2.21,66.9 +2020-12-29 20:00:00,-1.15,0.0,-0.0,0.0,2.07,69.65 +2020-12-29 21:00:00,-1.1,0.0,-0.0,0.0,2.0,69.65 +2020-12-29 22:00:00,-0.9,0.0,-0.0,0.0,2.07,69.65 +2020-12-29 23:00:00,-0.67,0.0,-0.0,0.0,2.28,67.0 +2020-12-30 00:00:00,-0.5,0.0,-0.0,0.0,2.41,64.35 +2020-12-30 01:00:00,-0.54,0.0,-0.0,0.0,2.55,61.8 +2020-12-30 02:00:00,-0.37,0.0,-0.0,0.0,2.55,61.8 +2020-12-30 03:00:00,-0.15,0.0,-0.0,0.0,2.41,59.45 +2020-12-30 04:00:00,-0.29,0.0,-0.0,0.0,2.41,59.45 +2020-12-30 05:00:00,-0.58,0.0,-0.0,0.0,2.55,59.35 +2020-12-30 06:00:00,-0.24,0.0,-0.0,0.0,2.62,54.8 +2020-12-30 07:00:00,1.03,0.0,-0.0,0.0,2.55,52.85 +2020-12-30 08:00:00,1.35,49.0,73.51,41.0,2.62,53.0 +2020-12-30 09:00:00,2.61,93.0,55.33,82.0,2.55,53.25 +2020-12-30 10:00:00,4.22,166.0,196.53,116.0,2.48,51.7 +2020-12-30 11:00:00,5.75,99.0,7.36,97.0,2.48,49.95 +2020-12-30 12:00:00,6.65,53.0,0.0,53.0,2.76,50.05 +2020-12-30 13:00:00,7.08,108.0,136.87,82.0,2.9,48.25 +2020-12-30 14:00:00,6.49,62.0,332.29,30.0,2.9,46.25 +2020-12-30 15:00:00,5.48,0.0,-0.0,0.0,2.97,46.0 +2020-12-30 16:00:00,4.93,0.0,-0.0,0.0,3.03,45.85 +2020-12-30 17:00:00,5.23,0.0,-0.0,0.0,3.1,44.15 +2020-12-30 18:00:00,5.75,0.0,-0.0,0.0,3.17,40.9 +2020-12-30 19:00:00,5.42,0.0,-0.0,0.0,3.31,42.45 +2020-12-30 20:00:00,5.89,0.0,-0.0,0.0,3.38,40.9 +2020-12-30 21:00:00,6.27,0.0,-0.0,0.0,3.59,39.4 +2020-12-30 22:00:00,6.29,0.0,-0.0,0.0,4.0,39.4 +2020-12-30 23:00:00,6.09,0.0,-0.0,0.0,4.14,39.25 +2020-12-31 00:00:00,6.01,0.0,-0.0,0.0,4.14,39.25 +2020-12-31 01:00:00,5.77,0.0,-0.0,0.0,4.28,37.7 +2020-12-31 02:00:00,5.72,0.0,-0.0,0.0,4.55,36.05 +2020-12-31 03:00:00,5.65,0.0,-0.0,0.0,4.83,34.6 +2020-12-31 04:00:00,5.31,0.0,-0.0,0.0,5.03,33.2 +2020-12-31 05:00:00,4.78,0.0,-0.0,0.0,5.17,35.9 +2020-12-31 06:00:00,4.15,0.0,-0.0,0.0,5.17,42.15 +2020-12-31 07:00:00,4.0,0.0,-0.0,0.0,5.1,51.55 +2020-12-31 08:00:00,3.89,19.0,0.0,19.0,4.76,62.8 +2020-12-31 09:00:00,3.93,21.0,0.0,21.0,4.21,73.4 +2020-12-31 10:00:00,4.33,47.0,0.0,47.0,4.41,79.3 +2020-12-31 11:00:00,4.68,58.0,0.0,58.0,5.45,82.35 +2020-12-31 12:00:00,4.72,42.0,0.0,42.0,4.55,85.55 +2020-12-31 13:00:00,4.87,124.0,219.13,82.0,5.1,79.3 +2020-12-31 14:00:00,4.62,22.0,0.0,22.0,4.0,82.35 +2020-12-31 15:00:00,4.38,0.0,-0.0,0.0,3.79,82.35 +2020-12-31 16:00:00,5.13,0.0,-0.0,0.0,3.61,85.9 +2020-12-31 17:00:00,4.26,0.0,-0.0,0.0,3.52,86.18 +2020-12-31 18:00:00,3.4,0.0,-0.0,0.0,3.42,86.46 +2020-12-31 19:00:00,2.54,0.0,-0.0,0.0,3.33,86.74 +2020-12-31 20:00:00,1.68,0.0,-0.0,0.0,3.24,87.02 +2020-12-31 21:00:00,0.82,0.0,-0.0,0.0,3.15,87.31 +2020-12-31 22:00:00,-0.04,0.0,-0.0,0.0,3.05,87.59 +2020-12-31 23:00:00,-0.91,0.0,-0.0,0.0,2.96,87.87 diff --git a/flixopt/structure.py b/flixopt/structure.py index 69925ae77..7996565e8 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -10,6 +10,7 @@ import logging import pathlib import re +import warnings from dataclasses import dataclass from difflib import get_close_matches from typing import ( @@ -255,7 +256,15 @@ def _add_scenario_equality_constraints(self): @property def solution(self): """Build solution dataset, reindexing to timesteps_extra for consistency.""" - solution = super().solution + # Suppress the linopy warning about coordinate mismatch. + # This warning is expected when storage charge_state has one more timestep than other variables. + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', + category=UserWarning, + message='Coordinates across variables not equal', + ) + solution = super().solution solution['objective'] = self.objective.value # Store attrs as JSON strings for netCDF compatibility solution.attrs = { diff --git a/pyproject.toml b/pyproject.toml index 3fdc031e8..8c4749797 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -110,6 +110,10 @@ docs = [ "mkdocs-git-revision-date-localized-plugin==1.5.0", "mkdocs-minify-plugin==0.8.0", "notebook>=7.5.0", + # Realistic profile generation for examples + "demandlib >= 0.2.2, < 0.3", + "pvlib >= 0.10.0, < 0.14", + "holidays >= 0.40, < 1", ] [project.urls] From d488e852fa27bfa18d396f3d24977edcc7fa9a13 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 12:22:47 +0100 Subject: [PATCH 167/191] Fix notebook --- docs/notebooks/08c2-clustering-storage-modes.ipynb | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/notebooks/08c2-clustering-storage-modes.ipynb b/docs/notebooks/08c2-clustering-storage-modes.ipynb index d6201382d..b498b39d7 100644 --- a/docs/notebooks/08c2-clustering-storage-modes.ipynb +++ b/docs/notebooks/08c2-clustering-storage-modes.ipynb @@ -138,11 +138,7 @@ "cell_type": "code", "execution_count": null, "id": "7", - "metadata": { - "jupyter": { - "is_executing": true - } - }, + "metadata": {}, "outputs": [], "source": [ "solver = fx.solvers.HighsSolver(mip_gap=0.02)\n", @@ -174,7 +170,11 @@ "cell_type": "code", "execution_count": null, "id": "9", - "metadata": {}, + "metadata": { + "jupyter": { + "is_executing": true + } + }, "outputs": [], "source": [ "# Clustering parameters\n", @@ -193,7 +193,7 @@ "\n", " # Create a copy and set the storage mode\n", " fs_copy = flow_system.copy()\n", - " fs_copy.components['SeasonalStorage'].cluster_storage_mode = mode\n", + " fs_copy.storages['SeasonalStorage'].cluster_mode = mode\n", "\n", " start = timeit.default_timer()\n", " fs_clustered = fs_copy.transform.cluster(\n", From 14abf858622b6aff74cb64446a935d559d42096d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 12:56:56 +0100 Subject: [PATCH 168/191] add catchwarning and fix example system --- docs/notebooks/data/generate_example_systems.py | 2 +- flixopt/components.py | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index 15b6b13a7..a9951b5ea 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -576,7 +576,7 @@ def create_seasonal_storage_system() -> fx.FlowSystem: 'SeasonalStorage', capacity_in_flow_hours=fx.InvestParameters( minimum_size=0, - maximum_size=5000, # MWh - large for seasonal storage + maximum_size=50000, # MWh - large for seasonal storage effects_of_investment_per_size={'costs': 20}, # €/MWh (pit storage is cheap) ), initial_charge_state='equals_final', # Yearly cyclic diff --git a/flixopt/components.py b/flixopt/components.py index 47db39441..0f2a6077e 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -5,6 +5,7 @@ from __future__ import annotations import logging +import warnings from typing import TYPE_CHECKING, Literal import numpy as np @@ -1520,8 +1521,12 @@ def _add_combined_bound_constraints( for sample_name, offset in zip(['start', 'mid', 'end'], sample_offsets, strict=False): # With 2D structure: select time offset, then reorder by cluster_order cs_at_offset = charge_state.isel(time=offset) # Shape: (cluster, ...) - cs_t = cs_at_offset.isel(cluster=cluster_order) # Reorder to original_period order - cs_t = cs_t.rename({'cluster': 'original_period'}) + # Reorder to original_period order using cluster_order indexer + cs_t = cs_at_offset.isel(cluster=cluster_order) + # Suppress xarray warning about index loss - we immediately assign new coords anyway + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', message='.*does not create an index anymore.*') + cs_t = cs_t.rename({'cluster': 'original_period'}) cs_t = cs_t.assign_coords(original_period=np.arange(n_original_periods)) # Apply decay factor (1-loss)^t to SOC_boundary per Eq. 9 From dcea6670958d0f5583140ac3df2397c1ae871f05 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 12:57:20 +0100 Subject: [PATCH 169/191] improve notebook --- docs/notebooks/08c2-clustering-storage-modes.ipynb | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/notebooks/08c2-clustering-storage-modes.ipynb b/docs/notebooks/08c2-clustering-storage-modes.ipynb index b498b39d7..163cf8729 100644 --- a/docs/notebooks/08c2-clustering-storage-modes.ipynb +++ b/docs/notebooks/08c2-clustering-storage-modes.ipynb @@ -170,15 +170,11 @@ "cell_type": "code", "execution_count": null, "id": "9", - "metadata": { - "jupyter": { - "is_executing": true - } - }, + "metadata": {}, "outputs": [], "source": [ "# Clustering parameters\n", - "N_CLUSTERS = 12 # 12 typical days for a full year\n", + "N_CLUSTERS = 24 # 12 typical days for a full year\n", "CLUSTER_DURATION = '1D'\n", "PEAK_SERIES = ['HeatDemand(Q_th)|fixed_relative_profile']\n", "\n", From 70146896325252daf272ebaf0b9b5b116f376864 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 13:06:47 +0100 Subject: [PATCH 170/191] Improve docs, changelog and add tests --- CHANGELOG.md | 138 +++--- docs/user-guide/optimization/clustering.md | 208 +++++++++ docs/user-guide/optimization/index.md | 24 +- tests/test_cluster_reduce_expand.py | 466 +++++++++++++++++++++ 4 files changed, 736 insertions(+), 100 deletions(-) create mode 100644 docs/user-guide/optimization/clustering.md diff --git a/CHANGELOG.md b/CHANGELOG.md index db39c86ef..20f2de7d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,118 +53,82 @@ Until here --> ## [5.1.0] - Upcoming -**Summary**: This release introduces a new **aggregation abstraction layer** for time series clustering, making flixopt future-proof for alternative clustering methods beyond TSAM. The API is simplified to focus on timestep reduction (`cluster_reduce`), removing the constraint-based clustering approach. +**Summary**: Time-series clustering for faster optimization with configurable storage behavior across typical periods. ### ✨ Added -**New Clustering Module** (`flixopt.clustering`): Data structures for time series clustering: +**Time-Series Clustering**: Reduce large time series to representative typical periods for faster investment optimization, then expand results back to full resolution. ```python -from flixopt import clustering - -# Core data structures for clustering -clustering.ClusterResult # Universal result format -clustering.ClusterStructure # For storage inter-cluster linking -clustering.Clustering # Stored on FlowSystem after clustering -``` - -**Unified Clustering API**: New `transform.cluster()` method for time series reduction: - -```python -# TSAM clustering (default) - clusters 365 days into 8 typical days -fs_reduced = flow_system.transform.cluster( - n_clusters=8, - cluster_duration='1D', - time_series_for_high_peaks=['Demand|fixed_relative_profile'], +# Stage 1: Cluster and optimize (fast sizing) +fs_clustered = flow_system.transform.cluster( + n_clusters=12, # 12 typical days from a year + cluster_duration='1D', # Each cluster represents one day + time_series_for_high_peaks=['HeatDemand(Q)|fixed_relative_profile'], ) -fs_reduced.optimize(solver) +fs_clustered.optimize(solver) -# Expand back to full resolution -fs_expanded = fs_reduced.transform.expand_solution() +# Stage 2: Expand back to full resolution +fs_expanded = fs_clustered.transform.expand_solution() ``` -**TimeSeriesWeights Class**: PyPSA-inspired unified weighting system: +**Storage Modes for Clustering**: Control how storage behaves across clustered periods via `Storage(cluster_mode=...)`: -```python -# Access weights on any FlowSystem -weights = flow_system.weights +| Mode | Description | Use Case | +|------|-------------|----------| +| `'intercluster_cyclic'` | Links storage across clusters + yearly cyclic (default) | Seasonal storage with yearly optimization | +| `'intercluster'` | Links storage across clusters, free start/end | Multi-year optimization without cyclic constraint | +| `'cyclic'` | Each cluster independent, but cyclic (start = end) | Daily storage only, ignores seasonal patterns | +| `'independent'` | Each cluster fully independent, free start/end | Fastest solve, no long-term storage value | -# temporal = timestep_duration × cluster_weight -weights.temporal # Applied to objective and constraints -weights.effective_objective # For objective function (with optional override) +**Clustering Parameters**: -# Convenience method for weighted summation -total_energy = weights.sum_over_time(flow_rates) -``` +- `n_clusters` (int): Number of representative periods to create +- `cluster_duration` (str): Duration of each cluster period (e.g., `'1D'`, `'24h'`, or integer hours) +- `time_series_for_high_peaks` (list[str]): Ensure clusters containing peak values are captured +- `time_series_for_low_peaks` (list[str]): Ensure clusters containing minimum values are captured -**Manual Clustering Support**: Helper function for creating cluster structures from external tools: +**Key Features**: -```python -from flixopt.clustering import create_cluster_structure_from_mapping - -# Use sklearn or any clustering algorithm -from sklearn.cluster import KMeans -# ... perform clustering, get labels ... - -# Create cluster structure from mapping -cluster_structure = create_cluster_structure_from_mapping( - timestep_mapping=my_mapping, # xr.DataArray: original → representative - representative_weights=my_weights, # xr.DataArray: weight per representative -) -``` +- **Inter-cluster storage linking**: For `'intercluster'` and `'intercluster_cyclic'` modes, a `SOC_boundary` variable tracks absolute state-of-charge at period boundaries, enabling accurate seasonal storage modeling +- **Self-discharge decay**: Storage losses are correctly applied during solution expansion using the formula: `actual_SOC(t) = SOC_boundary × (1 - loss)^t + ΔE(t)` +- **Multi-dimensional support**: Works with periods, scenarios, and clusters dimensions simultaneously +- **Solution expansion**: `transform.expand_solution()` maps clustered results back to original timesteps with proper storage state reconstruction -**set_aggregation() Method** (placeholder): Future PyPSA-style manual aggregation: +**Example: Seasonal Storage with Clustering**: ```python -# Coming soon - apply external clustering results directly -fs_agg = flow_system.transform.set_aggregation( - timestep_mapping=mapping, - weights=weights, +# Configure storage for seasonal behavior +storage = fx.Storage( + 'SeasonalPit', + capacity_in_flow_hours=5000, + cluster_mode='intercluster_cyclic', # Enable seasonal storage in clustering + relative_loss_per_hour=0.0001, # Small self-discharge + ... ) -``` -### 💥 Breaking Changes - -**Simplified `transform.cluster()` API**: The constraint-based clustering approach has been replaced with timestep reduction: +# Cluster, optimize, and expand +fs_clustered = flow_system.transform.cluster(n_clusters=12, cluster_duration='1D') +fs_clustered.optimize(solver) +fs_expanded = fs_clustered.transform.expand_solution() -```python -# New API - reduces timesteps via TSAM clustering -reduced_fs = flow_system.transform.cluster( - n_clusters=8, - cluster_duration='1D', - time_series_for_high_peaks=['Demand|fixed_relative_profile'], -) -reduced_fs.optimize(solver) +# Full-resolution charge state now available +charge_state = fs_expanded.solution['SeasonalPit|charge_state'] ``` -**Removed constraint-based clustering infrastructure**: -- `ClusteredOptimization` class - removed (use `transform.cluster()` + `Optimization`) -- `ClusteringParameters` class - removed (parameters passed directly to `transform.cluster()`) -- `transform.add_clustering()` - removed -- `FlowSystem._add_clustering_constraints()` - removed - -### ♻️ Changed - -**Terminology clarification** in clustering module: -- "cluster" = a group of similar time chunks (e.g., similar days grouped together) -- "typical period" = a representative time chunk for a cluster (TSAM terminology) -- "cluster duration" = the length of each time chunk (e.g., 24h for daily clustering) +!!! tip "Choosing the Right Storage Mode" + Use `'intercluster_cyclic'` (default) for seasonal storage like pit storage or underground thermal storage. + Use `'cyclic'` for short-term storage like batteries or hot water tanks where only daily patterns matter. + Use `'independent'` for quick estimates when storage behavior isn't critical. -Note: This is separate from the model's "period" dimension (years/months) and "scenario" dimension. - -**xarray-native data structures**: All clustering interfaces use `xr.DataArray` and `xr.Dataset` for proper coordinate handling. - -### 🔥 Removed - -- `ClusteredOptimization` class (use `transform.cluster()` + `Optimization`) -- `ClusteringParameters` class (parameters passed directly to `transform.cluster()`) -- `transform.add_clustering()` method -- `ClusteringModel` constraint generation (internal) +### 👷 Development -### 📝 Docs +**New Test Suites for Clustering**: -- Improved terminology: clarified distinction between clustering "typical periods" and model "period" dimension -- Added clustering module documentation with examples +- `TestStorageClusterModes`: Tests for all 4 storage `cluster_mode` options +- `TestInterclusterStorageLinking`: Tests for `SOC_boundary` variable and expansion logic +- `TestMultiPeriodClustering`: Tests for clustering with periods and scenarios dimensions +- `TestPeakSelection`: Tests for `time_series_for_high_peaks` and `time_series_for_low_peaks` parameters --- diff --git a/docs/user-guide/optimization/clustering.md b/docs/user-guide/optimization/clustering.md new file mode 100644 index 000000000..7ec5faac1 --- /dev/null +++ b/docs/user-guide/optimization/clustering.md @@ -0,0 +1,208 @@ +# Time-Series Clustering + +Time-series clustering reduces large optimization problems by aggregating timesteps into representative **typical periods**. This enables fast investment optimization while preserving key system dynamics. + +## When to Use Clustering + +Use clustering when: + +- Optimizing over a **full year** or longer +- **Investment sizing** is the primary goal (not detailed dispatch) +- You need **faster solve times** and can accept approximation +- The system has **repeating patterns** (daily, weekly, seasonal) + +**Skip clustering** for: + +- Short optimization horizons (days to weeks) +- Dispatch-only problems without investments +- Systems requiring exact temporal sequences + +## Two-Stage Workflow + +The recommended approach: cluster for fast sizing, then validate at full resolution. + +```python +import flixopt as fx + +# Load or create your FlowSystem +flow_system = fx.FlowSystem(timesteps) +flow_system.add_elements(...) + +# Stage 1: Cluster and optimize (fast) +fs_clustered = flow_system.transform.cluster( + n_clusters=12, + cluster_duration='1D', + time_series_for_high_peaks=['HeatDemand(Q)|fixed_relative_profile'], +) +fs_clustered.optimize(fx.solvers.HighsSolver()) + +# Stage 2: Expand back to full resolution +fs_expanded = fs_clustered.transform.expand_solution() + +# Access full-resolution results +charge_state = fs_expanded.solution['Storage|charge_state'] +flow_rates = fs_expanded.solution['Boiler(Q_th)|flow_rate'] +``` + +## Clustering Parameters + +| Parameter | Description | Example | +|-----------|-------------|---------| +| `n_clusters` | Number of typical periods | `12` (typical days for a year) | +| `cluster_duration` | Duration of each cluster | `'1D'`, `'24h'`, or `24` (hours) | +| `time_series_for_high_peaks` | Time series where peak clusters must be captured | `['HeatDemand(Q)|fixed_relative_profile']` | +| `time_series_for_low_peaks` | Time series where minimum clusters must be captured | `['SolarGen(P)|fixed_relative_profile']` | + +### Peak Selection + +Use `time_series_for_high_peaks` to ensure extreme conditions are represented: + +```python +# Ensure the peak demand day is included +fs_clustered = flow_system.transform.cluster( + n_clusters=8, + cluster_duration='1D', + time_series_for_high_peaks=['HeatDemand(Q)|fixed_relative_profile'], +) +``` + +Without peak selection, the clustering algorithm might average out extreme days, leading to undersized equipment. + +## Storage Modes + +Storage behavior during clustering is controlled via the `cluster_mode` parameter: + +```python +storage = fx.Storage( + 'SeasonalPit', + capacity_in_flow_hours=5000, + cluster_mode='intercluster_cyclic', # Default + ... +) +``` + +### Available Modes + +| Mode | Behavior | Best For | +|------|----------|----------| +| `'intercluster_cyclic'` | Links storage across clusters + yearly cycling | Seasonal storage (pit, underground) | +| `'intercluster'` | Links storage across clusters, free start/end | Multi-year optimization | +| `'cyclic'` | Each cluster independent, but start = end | Daily storage (battery, hot water tank) | +| `'independent'` | Each cluster fully independent | Quick estimates, debugging | + +### How Inter-Cluster Linking Works + +For `'intercluster'` and `'intercluster_cyclic'` modes, the optimizer tracks: + +1. **`SOC_boundary`**: Absolute state-of-charge at the start of each original period +2. **`charge_state`**: Relative change (ΔE) within each typical period + +During expansion, these combine with self-discharge decay: + +``` +actual_SOC(t) = SOC_boundary[period] × (1 - loss)^t + ΔE(t) +``` + +This enables accurate modeling of seasonal storage that charges in summer and discharges in winter. + +### Choosing the Right Mode + +```python +# Seasonal pit storage - needs yearly linking +pit_storage = fx.Storage( + 'SeasonalPit', + cluster_mode='intercluster_cyclic', + capacity_in_flow_hours=10000, + relative_loss_per_hour=0.0001, + ... +) + +# Daily hot water tank - only needs daily cycling +tank = fx.Storage( + 'HotWaterTank', + cluster_mode='cyclic', + capacity_in_flow_hours=50, + ... +) + +# Battery with quick estimate +battery = fx.Storage( + 'Battery', + cluster_mode='independent', # Fastest, ignores long-term effects + ... +) +``` + +## Multi-Dimensional Support + +Clustering works with periods and scenarios: + +```python +# FlowSystem with multiple periods and scenarios +flow_system = fx.FlowSystem( + timesteps, + periods=pd.Index([2025, 2030, 2035], name='period'), + scenarios=pd.Index(['low', 'base', 'high'], name='scenario'), +) + +# Cluster - dimensions are preserved +fs_clustered = flow_system.transform.cluster( + n_clusters=8, + cluster_duration='1D', +) + +# Solution has all dimensions +# Dims: (time, cluster, period, scenario) +flow_rate = fs_clustered.solution['Boiler(Q_th)|flow_rate'] +``` + +## Expanding Solutions + +After optimization, expand results back to full resolution: + +```python +fs_expanded = fs_clustered.transform.expand_solution() + +# Full timesteps are restored +print(f"Original: {len(flow_system.timesteps)} timesteps") +print(f"Clustered: {len(fs_clustered.timesteps)} timesteps") +print(f"Expanded: {len(fs_expanded.timesteps)} timesteps") + +# Storage charge state correctly reconstructed +charge_state = fs_expanded.solution['Storage|charge_state'] +``` + +The expansion: + +1. Maps each original timestep to its assigned cluster +2. For storage with inter-cluster linking, combines `SOC_boundary` with within-cluster `charge_state` +3. Applies self-discharge decay factors + +## Performance Tips + +### Cluster Count Selection + +| Time Horizon | Cluster Duration | Suggested n_clusters | +|----------------|------------------|---------------------| +| 1 year | 1 day | 8-16 | +| 1 year | 1 week | 4-8 | +| Multiple years | 1 day | 12-24 | + +### Speed vs Accuracy Trade-off + +```python +# Fast (less accurate) - for quick estimates +fs_fast = flow_system.transform.cluster(n_clusters=4, cluster_duration='1D') + +# Balanced - typical production use +fs_balanced = flow_system.transform.cluster(n_clusters=12, cluster_duration='1D') + +# Accurate (slower) - for final results +fs_accurate = flow_system.transform.cluster(n_clusters=24, cluster_duration='1D') +``` + +## See Also + +- [Storage Component](../mathematical-notation/elements/Storage.md) - Storage mathematical formulation +- [Notebooks: Clustering](../../notebooks/08c-clustering.ipynb) - Interactive examples +- [Notebooks: Storage Modes](../../notebooks/08c2-clustering-storage-modes.ipynb) - Storage mode comparison diff --git a/docs/user-guide/optimization/index.md b/docs/user-guide/optimization/index.md index 1d36eb9ba..103ff12ea 100644 --- a/docs/user-guide/optimization/index.md +++ b/docs/user-guide/optimization/index.md @@ -56,22 +56,18 @@ flow_system.solve(fx.solvers.HighsSolver()) For large problems, use time series clustering to reduce computational complexity: ```python -# Define clustering parameters -params = fx.ClusteringParameters( - hours_per_period=24, # Hours per typical period - nr_of_periods=8, # Number of typical periods - fix_storage_flows=True, - aggregate_data_and_fix_non_binary_vars=True, +# Cluster to 12 typical days +fs_clustered = flow_system.transform.cluster( + n_clusters=12, + cluster_duration='1D', + time_series_for_high_peaks=['HeatDemand(Q)|fixed_relative_profile'], ) -# Create clustered FlowSystem -clustered_fs = flow_system.transform.cluster(params) - # Optimize the clustered system -clustered_fs.optimize(fx.solvers.HighsSolver()) +fs_clustered.optimize(fx.solvers.HighsSolver()) -# Access results - same structure as original -print(clustered_fs.solution) +# Expand back to full resolution +fs_expanded = fs_clustered.transform.expand_solution() ``` **Best for:** @@ -86,6 +82,8 @@ print(clustered_fs.solution) - Approximates the full problem - Best when patterns repeat (e.g., typical days) +See the **[Clustering Guide](clustering.md)** for details on storage modes, peak selection, and multi-dimensional support. + ## Choosing an Optimization Mode | Mode | Problem Size | Solve Time | Solution Quality | @@ -133,7 +131,7 @@ fs_4h.optimize(fx.solvers.HighsSolver()) ### Clustering -See [Clustered Optimization](#clustered-optimization) above. +See the **[Clustering Guide](clustering.md)** for comprehensive documentation. ### Use Cases diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index a25fd9eaa..af2864563 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -367,3 +367,469 @@ def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_ actual = expanded_scenario[orig_start:orig_end] assert_allclose(actual, expected, rtol=1e-10, err_msg=f'Mismatch for scenario {scenario}') + + +# ==================== Storage Clustering Tests ==================== + + +def create_system_with_storage( + timesteps: pd.DatetimeIndex, + cluster_mode: str = 'intercluster_cyclic', + relative_loss_per_hour: float = 0.0, +) -> fx.FlowSystem: + """Create a FlowSystem with storage for testing clustering. + + Args: + timesteps: DatetimeIndex for the simulation. + cluster_mode: Storage cluster mode ('independent', 'cyclic', 'intercluster', 'intercluster_cyclic'). + relative_loss_per_hour: Self-discharge rate per hour (0.0 = no loss). + """ + # Create demand pattern: high during day (hours 8-18), low at night + hour_of_day = np.array([t.hour for t in timesteps]) + demand = np.where((hour_of_day >= 8) & (hour_of_day < 18), 20, 5) + + flow_system = fx.FlowSystem(timesteps) + flow_system.add_elements( + fx.Bus('Elec'), + fx.Effect('costs', '€', is_standard=True, is_objective=True), + fx.Source('Grid', outputs=[fx.Flow('P', bus='Elec', size=100, effects_per_flow_hour=0.1)]), + fx.Sink('Load', inputs=[fx.Flow('P', bus='Elec', fixed_relative_profile=demand, size=1)]), + fx.Storage( + 'Battery', + charging=fx.Flow('charge', bus='Elec', size=30), + discharging=fx.Flow('discharge', bus='Elec', size=30), + capacity_in_flow_hours=100, + relative_loss_per_hour=relative_loss_per_hour, + cluster_mode=cluster_mode, + ), + ) + return flow_system + + +class TestStorageClusterModes: + """Tests for different storage cluster_mode options.""" + + def test_storage_cluster_mode_independent(self, solver_fixture, timesteps_8_days): + """Storage with cluster_mode='independent' - each cluster starts fresh.""" + fs = create_system_with_storage(timesteps_8_days, cluster_mode='independent') + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Should have charge_state in solution + assert 'Battery|charge_state' in fs_clustered.solution + + # Independent mode should NOT have SOC_boundary + assert 'Battery|SOC_boundary' not in fs_clustered.solution + + # Verify solution is valid (no errors) + assert fs_clustered.solution is not None + + def test_storage_cluster_mode_cyclic(self, solver_fixture, timesteps_8_days): + """Storage with cluster_mode='cyclic' - start equals end per cluster.""" + fs = create_system_with_storage(timesteps_8_days, cluster_mode='cyclic') + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Should have charge_state in solution + assert 'Battery|charge_state' in fs_clustered.solution + + # Cyclic mode should NOT have SOC_boundary (only intercluster modes do) + assert 'Battery|SOC_boundary' not in fs_clustered.solution + + def test_storage_cluster_mode_intercluster(self, solver_fixture, timesteps_8_days): + """Storage with cluster_mode='intercluster' - SOC links across clusters.""" + fs = create_system_with_storage(timesteps_8_days, cluster_mode='intercluster') + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Intercluster mode SHOULD have SOC_boundary + assert 'Battery|SOC_boundary' in fs_clustered.solution + + soc_boundary = fs_clustered.solution['Battery|SOC_boundary'] + assert 'cluster_boundary' in soc_boundary.dims + + # Number of boundaries = n_original_periods + 1 + n_original_periods = fs_clustered.clustering.result.cluster_structure.n_original_periods + assert soc_boundary.sizes['cluster_boundary'] == n_original_periods + 1 + + def test_storage_cluster_mode_intercluster_cyclic(self, solver_fixture, timesteps_8_days): + """Storage with cluster_mode='intercluster_cyclic' - linked with yearly cycling.""" + fs = create_system_with_storage(timesteps_8_days, cluster_mode='intercluster_cyclic') + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Intercluster_cyclic mode SHOULD have SOC_boundary + assert 'Battery|SOC_boundary' in fs_clustered.solution + + soc_boundary = fs_clustered.solution['Battery|SOC_boundary'] + assert 'cluster_boundary' in soc_boundary.dims + + # First and last SOC_boundary values should be equal (cyclic constraint) + first_soc = float(soc_boundary.isel(cluster_boundary=0).values) + last_soc = float(soc_boundary.isel(cluster_boundary=-1).values) + assert_allclose(first_soc, last_soc, rtol=1e-6) + + +class TestInterclusterStorageLinking: + """Tests for inter-cluster storage linking and SOC_boundary behavior.""" + + def test_intercluster_storage_has_soc_boundary(self, solver_fixture, timesteps_8_days): + """Verify intercluster storage creates SOC_boundary variable.""" + fs = create_system_with_storage(timesteps_8_days, cluster_mode='intercluster_cyclic') + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Verify SOC_boundary exists in solution + assert 'Battery|SOC_boundary' in fs_clustered.solution + soc_boundary = fs_clustered.solution['Battery|SOC_boundary'] + assert 'cluster_boundary' in soc_boundary.dims + + def test_expand_solution_combines_soc_boundary_with_charge_state(self, solver_fixture, timesteps_8_days): + """Expanded charge_state should be non-negative (combined with SOC_boundary).""" + fs = create_system_with_storage(timesteps_8_days, cluster_mode='intercluster_cyclic') + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Note: Before expansion, charge_state represents ΔE (relative to period start) + # which can be negative. After expansion, it becomes absolute SOC. + + # After expansion: charge_state should be non-negative (absolute SOC) + fs_expanded = fs_clustered.transform.expand_solution() + cs_after = fs_expanded.solution['Battery|charge_state'] + + # All values should be >= 0 (with small tolerance for numerical issues) + assert (cs_after >= -0.01).all(), f'Negative charge_state found: min={float(cs_after.min())}' + + def test_storage_self_discharge_decay_in_expansion(self, solver_fixture, timesteps_8_days): + """Verify self-discharge decay factor applied correctly during expansion.""" + # Use significant self-discharge to make decay visible + fs = create_system_with_storage( + timesteps_8_days, + cluster_mode='intercluster_cyclic', + relative_loss_per_hour=0.01, # 1% per hour + ) + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Expand solution + fs_expanded = fs_clustered.transform.expand_solution() + cs_expanded = fs_expanded.solution['Battery|charge_state'] + + # With self-discharge, SOC should decay over time within each period + # The expanded solution should still be non-negative + assert (cs_expanded >= -0.01).all() + + def test_expanded_charge_state_matches_manual_calculation(self, solver_fixture, timesteps_8_days): + """Verify expanded charge_state = SOC_boundary * decay + delta_E formula.""" + loss_rate = 0.01 # 1% per hour + fs = create_system_with_storage( + timesteps_8_days, + cluster_mode='intercluster_cyclic', + relative_loss_per_hour=loss_rate, + ) + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Get values needed for manual calculation + soc_boundary = fs_clustered.solution['Battery|SOC_boundary'] + cs_clustered = fs_clustered.solution['Battery|charge_state'] + cluster_structure = fs_clustered.clustering.result.cluster_structure + cluster_order = cluster_structure.cluster_order.values + timesteps_per_cluster = cluster_structure.timesteps_per_cluster + + fs_expanded = fs_clustered.transform.expand_solution() + cs_expanded = fs_expanded.solution['Battery|charge_state'] + + # Manual verification for first few timesteps of first period + p = 0 # First period + cluster = int(cluster_order[p]) + soc_b = float(soc_boundary.isel(cluster_boundary=p).values) + + for t in [0, 5, 12, 23]: + global_t = p * timesteps_per_cluster + t + delta_e = float(cs_clustered.isel(cluster=cluster, time=t).values) + decay = (1 - loss_rate) ** t + expected = soc_b * decay + delta_e + expected_clipped = max(0.0, expected) + actual = float(cs_expanded.isel(time=global_t).values) + + assert_allclose( + actual, + expected_clipped, + rtol=0.01, + err_msg=f'Mismatch at period {p}, time {t}: expected {expected_clipped}, got {actual}', + ) + + +# ==================== Multi-Period Clustering Tests ==================== + + +def create_system_with_periods(timesteps: pd.DatetimeIndex, periods: pd.Index) -> fx.FlowSystem: + """Create a FlowSystem with periods for testing multi-period clustering.""" + hours = len(timesteps) + # Create demand pattern that varies by day to ensure multiple clusters + hour_of_day = np.array([t.hour for t in timesteps]) + day_of_year = np.arange(hours) // 24 + # Add day-based variation: odd days have higher demand + base_demand = np.where((hour_of_day >= 8) & (hour_of_day < 18), 20, 8) + demand = base_demand * (1 + 0.3 * (day_of_year % 2)) # 30% higher on odd days + + flow_system = fx.FlowSystem(timesteps, periods=periods) + flow_system.add_elements( + fx.Bus('Heat'), + fx.Bus('Gas'), + fx.Effect('costs', '€', is_standard=True, is_objective=True), + fx.Sink('HeatDemand', inputs=[fx.Flow('Q', bus='Heat', fixed_relative_profile=demand, size=1)]), + fx.Source('GasSource', outputs=[fx.Flow('Gas', bus='Gas', effects_per_flow_hour=0.05)]), + fx.linear_converters.Boiler( + 'Boiler', + thermal_efficiency=0.9, + fuel_flow=fx.Flow('Q_fu', bus='Gas'), + thermal_flow=fx.Flow('Q_th', bus='Heat'), + ), + ) + return flow_system + + +def create_system_with_periods_and_scenarios( + timesteps: pd.DatetimeIndex, periods: pd.Index, scenarios: pd.Index +) -> fx.FlowSystem: + """Create a FlowSystem with both periods and scenarios.""" + import xarray as xr + + hours = len(timesteps) + + # Create demand that varies by scenario AND by day (for clustering) + hour_of_day = np.array([t.hour for t in timesteps]) + day_of_year = np.arange(hours) // 24 + base_demand = np.where((hour_of_day >= 8) & (hour_of_day < 18), 20, 8) + # Add day variation for clustering + base_demand = base_demand * (1 + 0.3 * (day_of_year % 2)) + + # Create demand array with explicit scenario dimension using xarray + # Shape: (time, scenario) + demand_data = np.column_stack([base_demand * (1 + 0.2 * i) for i in range(len(scenarios))]) + demand_da = xr.DataArray( + demand_data, + dims=['time', 'scenario'], + coords={'time': timesteps, 'scenario': scenarios}, + ) + + flow_system = fx.FlowSystem(timesteps, periods=periods, scenarios=scenarios) + flow_system.add_elements( + fx.Bus('Heat'), + fx.Bus('Gas'), + fx.Effect('costs', '€', is_standard=True, is_objective=True), + fx.Sink( + 'HeatDemand', + inputs=[fx.Flow('Q', bus='Heat', fixed_relative_profile=demand_da, size=1)], + ), + fx.Source('GasSource', outputs=[fx.Flow('Gas', bus='Gas', effects_per_flow_hour=0.05)]), + fx.linear_converters.Boiler( + 'Boiler', + thermal_efficiency=0.9, + fuel_flow=fx.Flow('Q_fu', bus='Gas'), + thermal_flow=fx.Flow('Q_th', bus='Heat'), + ), + ) + return flow_system + + +@pytest.fixture +def periods_2(): + """Two periods for testing.""" + return pd.Index([2020, 2021], name='period') + + +class TestMultiPeriodClustering: + """Tests for clustering with periods dimension.""" + + def test_cluster_with_periods(self, timesteps_8_days, periods_2): + """Test clustering with periods dimension.""" + fs = create_system_with_periods(timesteps_8_days, periods_2) + + # Verify periods are set up correctly + assert fs.periods is not None + assert len(fs.periods) == 2 + + # Cluster + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + # Should have period dimension preserved + assert fs_clustered.periods is not None + assert len(fs_clustered.periods) == 2 + + # Clustered: 24 within-cluster timesteps, 2 clusters + assert len(fs_clustered.timesteps) == 24 + assert len(fs_clustered.clusters) == 2 + + def test_cluster_with_periods_optimizes(self, solver_fixture, timesteps_8_days, periods_2): + """Test that clustering with periods can be optimized.""" + fs = create_system_with_periods(timesteps_8_days, periods_2) + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Should have solution with period dimension + assert fs_clustered.solution is not None + flow_var = 'Boiler(Q_th)|flow_rate' + assert flow_var in fs_clustered.solution + assert 'period' in fs_clustered.solution[flow_var].dims + + def test_expand_solution_with_periods(self, solver_fixture, timesteps_8_days, periods_2): + """Verify expansion handles period dimension correctly.""" + fs = create_system_with_periods(timesteps_8_days, periods_2) + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Expand + fs_expanded = fs_clustered.transform.expand_solution() + + # Should have original timesteps and periods + assert len(fs_expanded.timesteps) == 192 + assert fs_expanded.periods is not None + assert len(fs_expanded.periods) == 2 + + # Solution should have period dimension + flow_var = 'Boiler(Q_th)|flow_rate' + assert 'period' in fs_expanded.solution[flow_var].dims + assert len(fs_expanded.solution[flow_var].coords['time']) == 192 + + def test_cluster_with_periods_and_scenarios(self, solver_fixture, timesteps_8_days, periods_2, scenarios_2): + """Clustering should work with both periods and scenarios.""" + fs = create_system_with_periods_and_scenarios(timesteps_8_days, periods_2, scenarios_2) + + # Verify setup + assert fs.periods is not None + assert fs.scenarios is not None + assert len(fs.periods) == 2 + assert len(fs.scenarios) == 2 + + # Cluster and optimize + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + fs_clustered.optimize(solver_fixture) + + # Verify dimensions + flow_var = 'Boiler(Q_th)|flow_rate' + assert 'period' in fs_clustered.solution[flow_var].dims + assert 'scenario' in fs_clustered.solution[flow_var].dims + assert 'cluster' in fs_clustered.solution[flow_var].dims + + # Expand and verify + fs_expanded = fs_clustered.transform.expand_solution() + assert 'period' in fs_expanded.solution[flow_var].dims + assert 'scenario' in fs_expanded.solution[flow_var].dims + assert len(fs_expanded.solution[flow_var].coords['time']) == 192 + + +# ==================== Peak Selection Tests ==================== + + +def create_system_with_peak_demand(timesteps: pd.DatetimeIndex) -> fx.FlowSystem: + """Create a FlowSystem with clearly identifiable peak demand days.""" + hours = len(timesteps) + + # Create demand with distinct patterns to ensure multiple clusters + # Days 0,1: low demand (base pattern) + # Days 2,3: medium demand (higher pattern) + # Days 4,5,6: normal demand (moderate pattern) + # Day 7: extreme peak (very high) + day = np.arange(hours) // 24 + hour_of_day = np.arange(hours) % 24 + + # Base pattern varies by day group + base = np.where((hour_of_day >= 8) & (hour_of_day < 18), 15, 5) + + demand = np.where( + (day == 7) & (hour_of_day >= 10) & (hour_of_day < 14), + 50, # Extreme peak on day 7 + np.where( + day <= 1, + base * 0.7, # Low days + np.where(day <= 3, base * 1.3, base), # Medium days vs normal + ), + ) + + flow_system = fx.FlowSystem(timesteps) + flow_system.add_elements( + fx.Bus('Heat'), + fx.Bus('Gas'), + fx.Effect('costs', '€', is_standard=True, is_objective=True), + fx.Sink('HeatDemand', inputs=[fx.Flow('Q', bus='Heat', fixed_relative_profile=demand, size=1)]), + fx.Source('GasSource', outputs=[fx.Flow('Gas', bus='Gas', effects_per_flow_hour=0.05)]), + fx.linear_converters.Boiler( + 'Boiler', + thermal_efficiency=0.9, + fuel_flow=fx.Flow('Q_fu', bus='Gas'), + thermal_flow=fx.Flow('Q_th', bus='Heat'), + ), + ) + return flow_system + + +class TestPeakSelection: + """Tests for time_series_for_high_peaks and time_series_for_low_peaks parameters.""" + + def test_time_series_for_high_peaks_parameter_accepted(self, timesteps_8_days): + """Verify time_series_for_high_peaks parameter is accepted.""" + fs = create_system_with_peak_demand(timesteps_8_days) + + # Should not raise an error + fs_clustered = fs.transform.cluster( + n_clusters=2, + cluster_duration='1D', + time_series_for_high_peaks=['HeatDemand(Q)|fixed_relative_profile'], + ) + + assert fs_clustered is not None + assert len(fs_clustered.clusters) == 2 + + def test_time_series_for_low_peaks_parameter_accepted(self, timesteps_8_days): + """Verify time_series_for_low_peaks parameter is accepted.""" + fs = create_system_with_peak_demand(timesteps_8_days) + + # Should not raise an error + # Note: tsam requires n_clusters >= 3 when using low_peaks to avoid index error + fs_clustered = fs.transform.cluster( + n_clusters=3, + cluster_duration='1D', + time_series_for_low_peaks=['HeatDemand(Q)|fixed_relative_profile'], + ) + + assert fs_clustered is not None + assert len(fs_clustered.clusters) == 3 + + def test_high_peaks_captures_extreme_demand_day(self, solver_fixture, timesteps_8_days): + """Verify high peak selection captures day with maximum demand.""" + fs = create_system_with_peak_demand(timesteps_8_days) + + # Cluster WITH high peak selection + fs_with_peaks = fs.transform.cluster( + n_clusters=2, + cluster_duration='1D', + time_series_for_high_peaks=['HeatDemand(Q)|fixed_relative_profile'], + ) + fs_with_peaks.optimize(solver_fixture) + + # The peak day (day 7 with demand=50) should be captured + # Check that the clustered solution can handle the peak demand + flow_rates = fs_with_peaks.solution['Boiler(Q_th)|flow_rate'] + + # At least one cluster should have flow rate >= 50 (the peak) + max_flow = float(flow_rates.max()) + assert max_flow >= 49, f'Peak demand not captured: max_flow={max_flow}' + + def test_clustering_without_peaks_may_miss_extremes(self, solver_fixture, timesteps_8_days): + """Show that without peak selection, extreme days might be averaged out.""" + fs = create_system_with_peak_demand(timesteps_8_days) + + # Cluster WITHOUT high peak selection (may or may not capture peak) + fs_no_peaks = fs.transform.cluster( + n_clusters=2, + cluster_duration='1D', + # No time_series_for_high_peaks + ) + fs_no_peaks.optimize(solver_fixture) + + # This test just verifies the clustering works + # The peak may or may not be captured depending on clustering algorithm + assert fs_no_peaks.solution is not None From 702803ab75ce31c5687c715232b9dff92b1b18b6 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 13:23:07 +0100 Subject: [PATCH 171/191] Fix plotting animation handling --- flixopt/config.py | 28 ++++----- flixopt/statistics_accessor.py | 112 ++++++++++++++++----------------- 2 files changed, 69 insertions(+), 71 deletions(-) diff --git a/flixopt/config.py b/flixopt/config.py index 4b1de189b..7e7c784cb 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -163,9 +163,8 @@ def format(self, record): 'default_facet_cols': 3, 'default_sequential_colorscale': 'turbo', 'default_qualitative_colorscale': 'plotly', - 'facet_col_priority': ('cluster', 'period', 'scenario'), - 'facet_row_priority': ('period', 'scenario'), - 'animation_frame_priority': ('scenario',), + 'extra_dim_priority': ('cluster', 'period', 'scenario'), + 'dim_slot_priority': ('facet_col', 'facet_row', 'animation_frame'), } ), 'solving': MappingProxyType( @@ -561,9 +560,10 @@ class Plotting: default_facet_cols: Default number of columns for faceted plots. default_sequential_colorscale: Default colorscale for heatmaps and continuous data. default_qualitative_colorscale: Default colormap for categorical plots (bar/line/area charts). - facet_col_priority: Priority order for auto-resolving facet_col dimension. - facet_row_priority: Priority order for auto-resolving facet_row dimension. - animation_frame_priority: Priority order for auto-resolving animation_frame dimension. + extra_dim_priority: Order of extra dimensions when auto-assigning to slots. + Default: ('cluster', 'period', 'scenario'). + dim_slot_priority: Order of slots to fill with extra dimensions. + Default: ('facet_col', 'facet_row', 'animation_frame'). Examples: ```python @@ -572,8 +572,10 @@ class Plotting: CONFIG.Plotting.default_sequential_colorscale = 'plasma' CONFIG.Plotting.default_qualitative_colorscale = 'Dark24' - # Customize auto-faceting priority - CONFIG.Plotting.facet_col_priority = ('period', 'cluster', 'scenario') + # Customize dimension handling + # With 2 extra dims (period, scenario): period → facet_col, scenario → facet_row + CONFIG.Plotting.extra_dim_priority = ('cluster', 'period', 'scenario') + CONFIG.Plotting.dim_slot_priority = ('facet_col', 'facet_row', 'animation_frame') ``` """ @@ -583,9 +585,8 @@ class Plotting: default_facet_cols: int = _DEFAULTS['plotting']['default_facet_cols'] default_sequential_colorscale: str = _DEFAULTS['plotting']['default_sequential_colorscale'] default_qualitative_colorscale: str = _DEFAULTS['plotting']['default_qualitative_colorscale'] - facet_col_priority: tuple[str, ...] = _DEFAULTS['plotting']['facet_col_priority'] - facet_row_priority: tuple[str, ...] = _DEFAULTS['plotting']['facet_row_priority'] - animation_frame_priority: tuple[str, ...] = _DEFAULTS['plotting']['animation_frame_priority'] + extra_dim_priority: tuple[str, ...] = _DEFAULTS['plotting']['extra_dim_priority'] + dim_slot_priority: tuple[str, ...] = _DEFAULTS['plotting']['dim_slot_priority'] class Carriers: """Default carrier definitions for common energy types. @@ -686,9 +687,8 @@ def to_dict(cls) -> dict: 'default_facet_cols': cls.Plotting.default_facet_cols, 'default_sequential_colorscale': cls.Plotting.default_sequential_colorscale, 'default_qualitative_colorscale': cls.Plotting.default_qualitative_colorscale, - 'facet_col_priority': cls.Plotting.facet_col_priority, - 'facet_row_priority': cls.Plotting.facet_row_priority, - 'animation_frame_priority': cls.Plotting.animation_frame_priority, + 'extra_dim_priority': cls.Plotting.extra_dim_priority, + 'dim_slot_priority': cls.Plotting.dim_slot_priority, }, } diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index 1c835e9d2..bee26a0e2 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -236,12 +236,9 @@ def _resolve_auto_facets( ) -> tuple[str | None, str | None, str | None]: """Resolve 'auto' facet/animation dimensions based on available data dimensions. - When 'auto' is specified, dimensions are assigned based on priority: - - facet_col: cluster → period → scenario (first available with size > 1) - - facet_row: period → scenario (after facet_col is assigned) - - animation_frame: scenario (after others are assigned) - - Priority order is configurable via CONFIG.Plotting.facet_col_priority, etc. + When 'auto' is specified, extra dimensions are assigned to slots based on: + - CONFIG.Plotting.extra_dim_priority: Order of dimensions (default: cluster → period → scenario) + - CONFIG.Plotting.dim_slot_priority: Order of slots (default: facet_col → facet_row → animation_frame) Args: ds: Dataset to check for available dimensions. @@ -253,38 +250,36 @@ def _resolve_auto_facets( Tuple of (resolved_facet_col, resolved_facet_row, resolved_animation_frame). Each is either a valid dimension name or None. """ - # Get available dimensions with size > 1 + # Get available extra dimensions with size > 1, sorted by priority available = {d for d in ds.dims if ds.sizes[d] > 1} + extra_dims = [d for d in CONFIG.Plotting.extra_dim_priority if d in available] used: set[str] = set() - def resolve_one( - value: str | Literal['auto'] | None, - priority: tuple[str, ...], - ) -> str | None: - if value is None: - return None - if value != 'auto': - # Explicit dimension - use if available, else None - return value if value in available and value not in used else None - - # Auto mode: pick first available from priority list - for dim in priority: - if dim in available and dim not in used: - used.add(dim) - return dim - return None - - resolved_col = resolve_one(facet_col, CONFIG.Plotting.facet_col_priority) - if resolved_col: - used.add(resolved_col) + # Map slot names to their input values + slots = { + 'facet_col': facet_col, + 'facet_row': facet_row, + 'animation_frame': animation_frame, + } + results: dict[str, str | None] = {'facet_col': None, 'facet_row': None, 'animation_frame': None} - resolved_row = resolve_one(facet_row, CONFIG.Plotting.facet_row_priority) - if resolved_row: - used.add(resolved_row) + # First pass: resolve explicit dimensions (not 'auto' or None) to mark them as used + for slot_name, value in slots.items(): + if value is not None and value != 'auto': + if value in available and value not in used: + used.add(value) + results[slot_name] = value - resolved_anim = resolve_one(animation_frame, CONFIG.Plotting.animation_frame_priority) + # Second pass: resolve 'auto' slots in dim_slot_priority order + dim_iter = iter(d for d in extra_dims if d not in used) + for slot_name in CONFIG.Plotting.dim_slot_priority: + if slots.get(slot_name) == 'auto': + next_dim = next(dim_iter, None) + if next_dim: + used.add(next_dim) + results[slot_name] = next_dim - return resolved_col, resolved_row, resolved_anim + return results['facet_col'], results['facet_row'], results['animation_frame'] def _resolve_facets( @@ -1443,8 +1438,8 @@ def balance( unit: Literal['flow_rate', 'flow_hours'] = 'flow_rate', colors: ColorType | None = None, facet_col: str | Literal['auto'] | None = 'auto', - facet_row: str | Literal['auto'] | None = None, - animation_frame: str | Literal['auto'] | None = None, + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1539,8 +1534,8 @@ def carrier_balance( unit: Literal['flow_rate', 'flow_hours'] = 'flow_rate', colors: ColorType | None = None, facet_col: str | Literal['auto'] | None = 'auto', - facet_row: str | Literal['auto'] | None = None, - animation_frame: str | Literal['auto'] | None = None, + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1727,22 +1722,18 @@ def heatmap( da.to_dataset(name='value'), facet_col, None, animation_frame ) - # Count non-time/non-cluster dims with size > 1 (these need facet/animation slots) - heatmap_core_dims = {'time', 'cluster'} if is_clustered else {'time'} - extra_dims = [d for d in da.dims if d not in heatmap_core_dims and da.sizes[d] > 1] - used_slots = len([d for d in [actual_facet, actual_animation] if d]) - would_drop = len(extra_dims) > used_slots - # Determine heatmap dimensions based on data structure if is_clustered and (reshape == 'auto' or reshape is None): # Clustered data: use (time, cluster) as natural 2D heatmap axes heatmap_dims = ['time', 'cluster'] - elif reshape and reshape != 'auto' and 'time' in da.dims and not would_drop: + elif reshape and reshape != 'auto' and 'time' in da.dims: # Non-clustered with explicit reshape: reshape time to (day, hour) etc. + # Extra dims will be handled via facet/animation or dropped da = _reshape_time_for_heatmap(da, reshape) heatmap_dims = ['timestep', 'timeframe'] - elif reshape == 'auto' and 'time' in da.dims and not would_drop and not is_clustered: + elif reshape == 'auto' and 'time' in da.dims and not is_clustered: # Auto mode for non-clustered: use default ('D', 'h') reshape + # Extra dims will be handled via facet/animation or dropped da = _reshape_time_for_heatmap(da, ('D', 'h')) heatmap_dims = ['timestep', 'timeframe'] elif has_multiple_vars: @@ -1753,7 +1744,14 @@ def heatmap( da.to_dataset(name='value'), facet_col, None, animation_frame ) else: - heatmap_dims = ['time'] if 'time' in da.dims else list(da.dims)[:1] + # Fallback: use first two available dimensions + available_dims = [d for d in da.dims if da.sizes[d] > 1] + if len(available_dims) >= 2: + heatmap_dims = available_dims[:2] + elif 'time' in da.dims: + heatmap_dims = ['time'] + else: + heatmap_dims = list(da.dims)[:1] # Keep only dims we need keep_dims = set(heatmap_dims) | {d for d in [actual_facet, actual_animation] if d is not None} @@ -1794,8 +1792,8 @@ def flows( unit: Literal['flow_rate', 'flow_hours'] = 'flow_rate', colors: ColorType | None = None, facet_col: str | Literal['auto'] | None = 'auto', - facet_row: str | Literal['auto'] | None = None, - animation_frame: str | Literal['auto'] | None = None, + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1887,8 +1885,8 @@ def sizes( select: SelectType | None = None, colors: ColorType | None = None, facet_col: str | Literal['auto'] | None = 'auto', - facet_row: str | Literal['auto'] | None = None, - animation_frame: str | Literal['auto'] | None = None, + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1954,8 +1952,8 @@ def duration_curve( normalize: bool = False, colors: ColorType | None = None, facet_col: str | Literal['auto'] | None = 'auto', - facet_row: str | Literal['auto'] | None = None, - animation_frame: str | Literal['auto'] | None = None, + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -2069,8 +2067,8 @@ def effects( select: SelectType | None = None, colors: ColorType | None = None, facet_col: str | Literal['auto'] | None = 'auto', - facet_row: str | Literal['auto'] | None = None, - animation_frame: str | Literal['auto'] | None = None, + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -2228,8 +2226,8 @@ def charge_states( select: SelectType | None = None, colors: ColorType | None = None, facet_col: str | Literal['auto'] | None = 'auto', - facet_row: str | Literal['auto'] | None = None, - animation_frame: str | Literal['auto'] | None = None, + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -2287,8 +2285,8 @@ def storage( colors: ColorType | None = None, charge_state_color: str = 'black', facet_col: str | Literal['auto'] | None = 'auto', - facet_row: str | Literal['auto'] | None = None, - animation_frame: str | Literal['auto'] | None = None, + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: From ab9a5199eff5e1a0db0f26a53a857599cbc47dba Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 13:58:51 +0100 Subject: [PATCH 172/191] fix: cluster handling in temporal shares --- flixopt/effects.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flixopt/effects.py b/flixopt/effects.py index cdac7ca7d..3a2322988 100644 --- a/flixopt/effects.py +++ b/flixopt/effects.py @@ -252,7 +252,6 @@ def transform_data(self) -> None: prefix=None, effect_values=self.share_from_temporal, suffix=f'(temporal)->{self.prefix}(temporal)', - dims=['time', 'period', 'scenario'], ) self.share_from_periodic = self._fit_effect_coords( prefix=None, From 300ea331ca7cd0e2c90afe9a533da9da473f088d Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 14:00:53 +0100 Subject: [PATCH 173/191] Change logging in clustering --- flixopt/transform_accessor.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 9a4d6e804..93a1e2247 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -73,7 +73,7 @@ def _calculate_clustering_weights(ds) -> dict[str, float]: weights[name] = da.attrs.get('clustering_weight', 1) if np.all(np.isclose(list(weights.values()), 1, atol=1e-6)): - logger.info('All Clustering weights were set to 1') + logger.debug('All Clustering weights were set to 1') return weights @@ -669,9 +669,6 @@ def cluster( has_periods = self._fs.periods is not None has_scenarios = self._fs.scenarios is not None - logger.info(f'{"":#^80}') - logger.info(f'{" Creating Typical Clusters ":#^80}') - # Determine iteration dimensions periods = list(self._fs.periods) if has_periods else [None] scenarios = list(self._fs.scenarios) if has_scenarios else [None] From 9a8cd27c7ae9dd1a3ee07963cfda6b2a8a945dc3 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 14:20:27 +0100 Subject: [PATCH 174/191] Fix imports and warnings --- .../data/generate_example_systems.py | 24 +++++++++++++------ .../data/generate_realistic_profiles.py | 3 +++ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index a9951b5ea..ec645e8b2 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -15,13 +15,23 @@ import numpy as np import pandas as pd -from generate_realistic_profiles import ( - ElectricityLoadGenerator, - GasPriceGenerator, - ThermalLoadGenerator, - load_electricity_prices, - load_weather, -) + +try: + from .generate_realistic_profiles import ( + ElectricityLoadGenerator, + GasPriceGenerator, + ThermalLoadGenerator, + load_electricity_prices, + load_weather, + ) +except ImportError: + from generate_realistic_profiles import ( + ElectricityLoadGenerator, + GasPriceGenerator, + ThermalLoadGenerator, + load_electricity_prices, + load_weather, + ) import flixopt as fx diff --git a/docs/notebooks/data/generate_realistic_profiles.py b/docs/notebooks/data/generate_realistic_profiles.py index 0a326362c..5fde58397 100644 --- a/docs/notebooks/data/generate_realistic_profiles.py +++ b/docs/notebooks/data/generate_realistic_profiles.py @@ -16,6 +16,7 @@ from __future__ import annotations +import warnings from pathlib import Path import holidays @@ -24,6 +25,8 @@ import pvlib from demandlib import bdew +warnings.resetwarnings() # Reset to default behavior due to weird dependency behaviour + # Data directory DATA_DIR = Path(__file__).parent / 'raw' From 3ba9730fedc5cb3845e210a6f54626c4a0a81741 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 28 Dec 2025 15:36:25 +0100 Subject: [PATCH 175/191] Fix netcdf warnings --- flixopt/__init__.py | 4 ---- flixopt/io.py | 23 +++++++++++++++-------- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/flixopt/__init__.py b/flixopt/__init__.py index 73784f2cd..d1a63a9c5 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -98,7 +98,3 @@ message="In a future version of xarray the default value for join will change from join='outer' to join='exact'", module='linopy', ) - -# numpy: Core numerical library -# - RuntimeWarning: Binary incompatibility warnings from compiled extensions (safe to ignore). numpy 1->2 -warnings.filterwarnings('ignore', category=RuntimeWarning, message='numpy\\.ndarray size changed') diff --git a/flixopt/io.py b/flixopt/io.py index 288d35db8..164a9fb2e 100644 --- a/flixopt/io.py +++ b/flixopt/io.py @@ -7,6 +7,7 @@ import pathlib import re import sys +import warnings from contextlib import contextmanager from dataclasses import dataclass from typing import TYPE_CHECKING, Any @@ -559,13 +560,16 @@ def save_dataset_to_netcdf( if hasattr(coord_var, 'attrs') and coord_var.attrs: ds[coord_name].attrs = {'attrs': json.dumps(coord_var.attrs)} - ds.to_netcdf( - path, - encoding=None - if compression == 0 - else {data_var: {'zlib': True, 'complevel': compression} for data_var in ds.data_vars}, - engine='netcdf4', - ) + # Suppress numpy binary compatibility warnings from netCDF4 (numpy 1->2 transition) + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=RuntimeWarning, message='numpy.ndarray size changed') + ds.to_netcdf( + path, + encoding=None + if compression == 0 + else {data_var: {'zlib': True, 'complevel': compression} for data_var in ds.data_vars}, + engine='netcdf4', + ) def load_dataset_from_netcdf(path: str | pathlib.Path) -> xr.Dataset: @@ -578,7 +582,10 @@ def load_dataset_from_netcdf(path: str | pathlib.Path) -> xr.Dataset: Returns: Dataset: Loaded dataset with restored attrs. """ - ds = xr.load_dataset(str(path), engine='netcdf4') + # Suppress numpy binary compatibility warnings from netCDF4 (numpy 1->2 transition) + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=RuntimeWarning, message='numpy.ndarray size changed') + ds = xr.load_dataset(str(path), engine='netcdf4') # Restore Dataset attrs if 'attrs' in ds.attrs: From 325f4eb3d5ccceb85fcff5bbf6440300f58de4df Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 3 Jan 2026 13:03:32 +0100 Subject: [PATCH 176/191] Fix notebooks to not save to netcdf --- docs/notebooks/08a-aggregation.ipynb | 24 +++++--------- docs/notebooks/08b-rolling-horizon.ipynb | 20 ++++------- docs/notebooks/08c-clustering.ipynb | 18 +++------- .../08c2-clustering-storage-modes.ipynb | 33 +++++++++---------- .../08d-clustering-multiperiod.ipynb | 16 +++------ docs/notebooks/08e-clustering-internals.ipynb | 13 ++------ 6 files changed, 41 insertions(+), 83 deletions(-) diff --git a/docs/notebooks/08a-aggregation.ipynb b/docs/notebooks/08a-aggregation.ipynb index 6d0260539..8bc1a4774 100644 --- a/docs/notebooks/08a-aggregation.ipynb +++ b/docs/notebooks/08a-aggregation.ipynb @@ -47,9 +47,9 @@ "id": "3", "metadata": {}, "source": [ - "## Load the FlowSystem\n", + "## Create the FlowSystem\n", "\n", - "We use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" + "We use a district heating system with real-world time series data (one month at 15-min resolution):" ] }, { @@ -59,21 +59,13 @@ "metadata": {}, "outputs": [], "source": [ - "from pathlib import Path\n", + "from data.generate_example_systems import create_district_heating_system\n", "\n", - "# Generate example data if not present (for local development)\n", - "data_file = Path('data/district_heating_system.nc4')\n", - "if not data_file.exists():\n", - " from data.generate_example_systems import create_district_heating_system\n", - "\n", - " fs = create_district_heating_system()\n", - " fs.to_netcdf(data_file)\n", - "\n", - "# Load the district heating system (real data from Zeitreihen2020.csv)\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "flow_system = create_district_heating_system()\n", + "flow_system.connect_and_transform() # Align all data as xarray\n", "\n", "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')\n", "print(f'Components: {list(flow_system.components.keys())}')" ] }, @@ -90,8 +82,8 @@ "\n", "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", "\n", - "fig.add_trace(go.Scatter(x=timesteps[:672], y=heat_demand.values[:672], name='Heat Demand'), row=1, col=1)\n", - "fig.add_trace(go.Scatter(x=timesteps[:672], y=electricity_price.values[:672], name='Electricity Price'), row=2, col=1)\n", + "fig.add_trace(go.Scatter(x=timesteps[:168], y=heat_demand.values[:168], name='Heat Demand'), row=1, col=1)\n", + "fig.add_trace(go.Scatter(x=timesteps[:168], y=electricity_price.values[:168], name='Electricity Price'), row=2, col=1)\n", "\n", "fig.update_layout(height=400, title='First Week of Data')\n", "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", diff --git a/docs/notebooks/08b-rolling-horizon.ipynb b/docs/notebooks/08b-rolling-horizon.ipynb index e43da8f2c..c0d7bdf24 100644 --- a/docs/notebooks/08b-rolling-horizon.ipynb +++ b/docs/notebooks/08b-rolling-horizon.ipynb @@ -51,9 +51,9 @@ "id": "3", "metadata": {}, "source": [ - "## Load the FlowSystem\n", + "## Create the FlowSystem\n", "\n", - "We use a pre-built operational district heating system with real-world data (two weeks at 15-min resolution):" + "We use an operational district heating system with real-world data (two weeks at 15-min resolution):" ] }, { @@ -63,21 +63,13 @@ "metadata": {}, "outputs": [], "source": [ - "from pathlib import Path\n", + "from data.generate_example_systems import create_operational_system\n", "\n", - "# Generate example data if not present (for local development)\n", - "data_file = Path('data/operational_system.nc4')\n", - "if not data_file.exists():\n", - " from data.generate_example_systems import create_operational_system\n", - "\n", - " fs = create_operational_system()\n", - " fs.to_netcdf(data_file)\n", - "\n", - "# Load the operational system (real data from Zeitreihen2020.csv, two weeks)\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "flow_system = create_operational_system()\n", + "flow_system.connect_and_transform() # Align all data as xarray\n", "\n", "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')\n", "print(f'Components: {list(flow_system.components.keys())}')" ] }, diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index cf5b53b53..de1a05482 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -27,7 +27,6 @@ "outputs": [], "source": [ "import timeit\n", - "from pathlib import Path\n", "\n", "import numpy as np\n", "import pandas as pd\n", @@ -44,9 +43,9 @@ "id": "2", "metadata": {}, "source": [ - "## Load the FlowSystem\n", + "## Create the FlowSystem\n", "\n", - "We use a pre-built district heating system with real-world time series data (one month at 15-min resolution):" + "We use a district heating system with real-world time series data (one month at 15-min resolution):" ] }, { @@ -56,19 +55,12 @@ "metadata": {}, "outputs": [], "source": [ - "# Generate example data if not present\n", - "data_file = Path('data/district_heating_system.nc4')\n", - "if not data_file.exists():\n", - " from data.generate_example_systems import create_district_heating_system\n", + "from data.generate_example_systems import create_district_heating_system\n", "\n", - " fs = create_district_heating_system()\n", - " fs.to_netcdf(data_file)\n", - "\n", - "# Load the district heating system\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "flow_system = create_district_heating_system()\n", "\n", "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 96:.0f} days at 15-min resolution)')\n", + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')\n", "print(f'Components: {list(flow_system.components.keys())}')" ] }, diff --git a/docs/notebooks/08c2-clustering-storage-modes.ipynb b/docs/notebooks/08c2-clustering-storage-modes.ipynb index 163cf8729..7a760edc3 100644 --- a/docs/notebooks/08c2-clustering-storage-modes.ipynb +++ b/docs/notebooks/08c2-clustering-storage-modes.ipynb @@ -27,9 +27,7 @@ "outputs": [], "source": [ "import timeit\n", - "from pathlib import Path\n", "\n", - "import numpy as np\n", "import pandas as pd\n", "import plotly.graph_objects as go\n", "from plotly.subplots import make_subplots\n", @@ -44,7 +42,7 @@ "id": "2", "metadata": {}, "source": [ - "## Load the Seasonal Storage System\n", + "## Create the Seasonal Storage System\n", "\n", "We use a solar thermal + seasonal pit storage system with a full year of data.\n", "This is ideal for demonstrating storage modes because:\n", @@ -61,16 +59,10 @@ "metadata": {}, "outputs": [], "source": [ - "# Generate example data if not present\n", - "data_file = Path('data/seasonal_storage_system.nc4')\n", - "if not data_file.exists():\n", - " from data.generate_example_systems import create_seasonal_storage_system\n", + "from data.generate_example_systems import create_seasonal_storage_system\n", "\n", - " fs = create_seasonal_storage_system()\n", - " fs.to_netcdf(data_file)\n", - "\n", - "# Load the seasonal storage system\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "flow_system = create_seasonal_storage_system()\n", + "flow_system.connect_and_transform() # Align all data as xarray\n", "\n", "timesteps = flow_system.timesteps\n", "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days)')\n", @@ -88,14 +80,19 @@ "solar_profile = flow_system.components['SolarThermal'].outputs[0].fixed_relative_profile\n", "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", "\n", - "# Daily average for clearer visualization\n", - "solar_daily = solar_profile.values.reshape(-1, 24).mean(axis=1)\n", - "demand_daily = heat_demand.values.reshape(-1, 24).mean(axis=1)\n", - "days = np.arange(len(solar_daily))\n", + "# Compute daily averages using xarray resample\n", + "solar_daily = solar_profile.resample(time='1D').mean()\n", + "demand_daily = heat_demand.resample(time='1D').mean()\n", "\n", "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", - "fig.add_trace(go.Scatter(x=days, y=solar_daily, name='Solar (daily avg)', fill='tozeroy'), row=1, col=1)\n", - "fig.add_trace(go.Scatter(x=days, y=demand_daily, name='Heat Demand (daily avg)', fill='tozeroy'), row=2, col=1)\n", + "fig.add_trace(\n", + " go.Scatter(x=solar_daily.time.values, y=solar_daily.values, name='Solar (daily avg)', fill='tozeroy'), row=1, col=1\n", + ")\n", + "fig.add_trace(\n", + " go.Scatter(x=demand_daily.time.values, y=demand_daily.values, name='Heat Demand (daily avg)', fill='tozeroy'),\n", + " row=2,\n", + " col=1,\n", + ")\n", "fig.update_layout(height=400, title='Seasonal Mismatch: Solar vs Heat Demand')\n", "fig.update_xaxes(title_text='Day of Year', row=2, col=1)\n", "fig.update_yaxes(title_text='Solar Profile', row=1, col=1)\n", diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index 84ff468ea..016d9555b 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -28,7 +28,6 @@ "outputs": [], "source": [ "import timeit\n", - "from pathlib import Path\n", "\n", "import numpy as np\n", "import pandas as pd\n", @@ -44,9 +43,9 @@ "id": "2", "metadata": {}, "source": [ - "## Load the Multi-Period System\n", + "## Create the Multi-Period System\n", "\n", - "We use a pre-built multi-period heating system with:\n", + "We use a multi-period heating system with:\n", "- **3 planning periods** (years 2024, 2025, 2026)\n", "- **2 scenarios** (high demand 30%, low demand 70%)\n", "- **2 weeks** at hourly resolution (336 timesteps)\n", @@ -62,16 +61,9 @@ "metadata": {}, "outputs": [], "source": [ - "# Generate example data if not present\n", - "data_file = Path('data/multiperiod_system.nc4')\n", - "if not data_file.exists():\n", - " from data.generate_example_systems import create_multiperiod_system\n", + "from data.generate_example_systems import create_multiperiod_system\n", "\n", - " fs = create_multiperiod_system()\n", - " fs.to_netcdf(data_file)\n", - "\n", - "# Load the multi-period system\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)\n", + "flow_system = create_multiperiod_system()\n", "\n", "print(f'Timesteps: {len(flow_system.timesteps)} ({len(flow_system.timesteps) // 24} days)')\n", "print(f'Periods: {list(flow_system.periods.values)}')\n", diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index a0ac80ca7..bbd54f05d 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -26,21 +26,14 @@ "metadata": {}, "outputs": [], "source": [ - "from pathlib import Path\n", + "from data.generate_example_systems import create_district_heating_system\n", "\n", "import flixopt as fx\n", "\n", "fx.CONFIG.notebook()\n", "\n", - "# Load the district heating system\n", - "data_file = Path('data/district_heating_system.nc4')\n", - "if not data_file.exists():\n", - " from data.generate_example_systems import create_district_heating_system\n", - "\n", - " fs = create_district_heating_system()\n", - " fs.to_netcdf(data_file)\n", - "\n", - "flow_system = fx.FlowSystem.from_netcdf(data_file)" + "# Create the district heating system\n", + "flow_system = create_district_heating_system()" ] }, { From 282594cfc4cab97b2ed33b22572c60c0091c6840 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 3 Jan 2026 13:23:31 +0100 Subject: [PATCH 177/191] Warning Handling Refactored --- flixopt/__init__.py | 29 ----------------------------- flixopt/transform_accessor.py | 6 +++++- pyproject.toml | 8 +------- 3 files changed, 6 insertions(+), 37 deletions(-) diff --git a/flixopt/__init__.py b/flixopt/__init__.py index d1a63a9c5..b384c8d23 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -3,7 +3,6 @@ """ import logging -import warnings from importlib.metadata import PackageNotFoundError, version try: @@ -70,31 +69,3 @@ logger = logging.getLogger('flixopt') logger.setLevel(logging.WARNING) logger.addHandler(logging.NullHandler()) - -# === Runtime warning suppression for third-party libraries === -# These warnings are from dependencies and cannot be fixed by end users. -# They are suppressed at runtime to provide a cleaner user experience. -# These filters match the test configuration in pyproject.toml for consistency. - -# tsam: Time series aggregation library -# - UserWarning: Informational message about minimal value constraints during clustering. -warnings.filterwarnings( - 'ignore', - category=UserWarning, - message='.*minimal value.*exceeds.*', - module='tsam.timeseriesaggregation', # More specific if possible -) -# TODO: Might be able to fix it in flixopt? - -# linopy: Linear optimization library -# - UserWarning: Coordinate mismatch warnings that don't affect functionality and are expected. -warnings.filterwarnings( - 'ignore', category=UserWarning, message='Coordinates across variables not equal', module='linopy' -) -# - FutureWarning: join parameter default will change in future versions -warnings.filterwarnings( - 'ignore', - category=FutureWarning, - message="In a future version of xarray the default value for join will change from join='outer' to join='exact'", - module='linopy', -) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 93a1e2247..64aedd0fb 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -8,6 +8,7 @@ from __future__ import annotations import logging +import warnings from collections import defaultdict from typing import TYPE_CHECKING, Any, Literal @@ -705,7 +706,10 @@ def cluster( addPeakMax=time_series_for_high_peaks or [], addPeakMin=time_series_for_low_peaks or [], ) - tsam_agg.createTypicalPeriods() + # Suppress tsam warning about minimal value constraints (informational, not actionable) + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', category=UserWarning, message='.*minimal value.*exceeds.*') + tsam_agg.createTypicalPeriods() tsam_results[key] = tsam_agg cluster_orders[key] = tsam_agg.clusterOrder diff --git a/pyproject.toml b/pyproject.toml index 8c4749797..561f00f57 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -209,17 +209,11 @@ filterwarnings = [ "ignore:SegmentedResults is deprecated:DeprecationWarning:flixopt", "ignore:ClusteredOptimization is deprecated:DeprecationWarning:flixopt", - # === Treat flixopt warnings as errors (strict mode for our code) === + # === Treat most flixopt warnings as errors (strict mode for our code) === # This ensures we catch deprecations, future changes, and user warnings in our own code "error::DeprecationWarning:flixopt", "error::FutureWarning:flixopt", "error::UserWarning:flixopt", - - # === Third-party warnings (mirrored from __init__.py) === - "ignore:.*minimal value.*exceeds.*:UserWarning:tsam", - "ignore:Coordinates across variables not equal:UserWarning:linopy", - "ignore:.*join will change from join='outer' to join='exact'.*:FutureWarning:linopy", - "ignore:numpy\\.ndarray size changed:RuntimeWarning", "ignore:.*network visualization is still experimental.*:UserWarning:flixopt", ] From b17859810b17e945bf8e35a3f4db8d7b094954f3 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 3 Jan 2026 19:32:15 +0100 Subject: [PATCH 178/191] Feature: Clustering IO, single selection, and weights API improvements (#549) * Enable selecting a single period/scenario * No selected_* tracking * Clustering IO * Clustering IO * Improve IO * Improve validation * Fix cluster weight IO * Fix cluster weights stuff * Fix cluster weights stuff * Refactor weights API: always normalize scenario weights (#547) * Add weights class * Add weights class * The Weights API is now used in the modeling equations: Changes made: 1. elements.py - Flow tracking: # Before: flow_hours = self.flow_rate * self._model.timestep_duration weighted_flow_hours = flow_hours * self._model.cluster_weight tracked_expression=weighted_flow_hours.sum(self._model.temporal_dims) # After: tracked_expression=self._model.weights.sum_temporal(self.flow_rate) 2. elements.py - Load factor total hours: # Before: total_hours = (self._model.timestep_duration * self._model.cluster_weight).sum(self._model.temporal_dims) # After: total_hours = self._model.weights.temporal.sum(self._model.weights.temporal_dims) 3. features.py - Status tracking: # Before: active_hours = self.status * self._model.timestep_duration weighted_active_hours = active_hours * self._model.cluster_weight tracked_expression=weighted_active_hours.sum(self._model.temporal_dims) # After: tracked_expression=self._model.weights.sum_temporal(self.status) 4. features.py - Temporal effects summing (only needs cluster weight since already per-timestep): # Before: weighted_per_timestep = self.total_per_timestep * self._model.cluster_weight temporal_dims = [d for d in self.total_per_timestep.dims if d not in ('period', 'scenario')] # After: weighted_per_timestep = self.total_per_timestep * self._model.weights.cluster self._eq_total.lhs -= weighted_per_timestep.sum(dim=self._model.weights.temporal_dims) * The Weights API is now used in the modeling equations: Changes made: 1. elements.py - Flow tracking: # Before: flow_hours = self.flow_rate * self._model.timestep_duration weighted_flow_hours = flow_hours * self._model.cluster_weight tracked_expression=weighted_flow_hours.sum(self._model.temporal_dims) # After: tracked_expression=self._model.weights.sum_temporal(self.flow_rate) 2. elements.py - Load factor total hours: # Before: total_hours = (self._model.timestep_duration * self._model.cluster_weight).sum(self._model.temporal_dims) # After: total_hours = self._model.weights.temporal.sum(self._model.weights.temporal_dims) 3. features.py - Status tracking: # Before: active_hours = self.status * self._model.timestep_duration weighted_active_hours = active_hours * self._model.cluster_weight tracked_expression=weighted_active_hours.sum(self._model.temporal_dims) # After: tracked_expression=self._model.weights.sum_temporal(self.status) 4. features.py - Temporal effects summing (only needs cluster weight since already per-timestep): # Before: weighted_per_timestep = self.total_per_timestep * self._model.cluster_weight temporal_dims = [d for d in self.total_per_timestep.dims if d not in ('period', 'scenario')] # After: weighted_per_timestep = self.total_per_timestep * self._model.weights.cluster self._eq_total.lhs -= weighted_per_timestep.sum(dim=self._model.weights.temporal_dims) * Minor fixes in test * Improve weighting system and normalization of scenrio weights * Update CHANGELOG.md * 1. ClusterStructure.n_clusters naming - Added explicit rename (matching n_representatives pattern) to avoid "None" variable names in serialized datasets 2. original_timesteps validation - Added explicit KeyError with actionable message when original_time coordinate is missing 3. active_hours bounds simplified - Passing total_hours DataArray directly instead of .max().item() fallback, allowing proper per-(period, scenario) bounds --- CHANGELOG.md | 19 +- flixopt/__init__.py | 2 - flixopt/clustering/base.py | 166 +++++++++++-- flixopt/components.py | 6 +- flixopt/core.py | 2 +- flixopt/elements.py | 15 +- flixopt/features.py | 22 +- flixopt/flow_system.py | 274 +++++++++++++++++----- flixopt/optimization.py | 9 +- flixopt/optimize_accessor.py | 17 +- flixopt/statistics_accessor.py | 2 +- flixopt/structure.py | 154 +++--------- flixopt/transform_accessor.py | 76 ++++-- tests/deprecated/test_scenarios.py | 13 +- tests/test_cluster_reduce_expand.py | 15 +- tests/test_clustering/test_base.py | 1 - tests/test_clustering/test_integration.py | 132 +++++++---- tests/test_clustering_io.py | 241 +++++++++++++++++++ tests/test_io_conversion.py | 5 + tests/test_scenarios.py | 13 +- tests/test_sel_isel_single_selection.py | 193 +++++++++++++++ 21 files changed, 1046 insertions(+), 331 deletions(-) create mode 100644 tests/test_clustering_io.py create mode 100644 tests/test_sel_isel_single_selection.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 20f2de7d7..f60497d80 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,7 +53,7 @@ Until here --> ## [5.1.0] - Upcoming -**Summary**: Time-series clustering for faster optimization with configurable storage behavior across typical periods. +**Summary**: Time-series clustering for faster optimization with configurable storage behavior across typical periods. Improved weights API with always-normalized scenario weights. ### ✨ Added @@ -121,6 +121,23 @@ charge_state = fs_expanded.solution['SeasonalPit|charge_state'] Use `'cyclic'` for short-term storage like batteries or hot water tanks where only daily patterns matter. Use `'independent'` for quick estimates when storage behavior isn't critical. +### 💥 Breaking Changes + +- `FlowSystem.scenario_weights` are now always normalized to sum to 1 when set (including after `.sel()` subsetting) + +### ♻️ Changed + +- `FlowSystem.weights` returns `dict[str, xr.DataArray]` (unit weights instead of `1.0` float fallback) +- `FlowSystemDimensions` type now includes `'cluster'` + +### 🗑️ Deprecated + +- `normalize_weights` parameter in `create_model()`, `build_model()`, `optimize()` + +### 🐛 Fixed + +- `temporal_weight` and `sum_temporal()` now use consistent implementation + ### 👷 Development **New Test Suites for Clustering**: diff --git a/flixopt/__init__.py b/flixopt/__init__.py index b384c8d23..1089bf743 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -30,7 +30,6 @@ from .interface import InvestParameters, Piece, Piecewise, PiecewiseConversion, PiecewiseEffects, StatusParameters from .optimization import Optimization, SegmentedOptimization from .plot_result import PlotResult -from .structure import TimeSeriesWeights __all__ = [ 'TimeSeriesData', @@ -57,7 +56,6 @@ 'PiecewiseConversion', 'PiecewiseEffects', 'PlotResult', - 'TimeSeriesWeights', 'clustering', 'plotting', 'results', diff --git a/flixopt/clustering/base.py b/flixopt/clustering/base.py index 2c442e3d5..fadf28247 100644 --- a/flixopt/clustering/base.py +++ b/flixopt/clustering/base.py @@ -21,11 +21,11 @@ from typing import TYPE_CHECKING, Any import numpy as np +import pandas as pd import xarray as xr if TYPE_CHECKING: from ..color_processing import ColorType - from ..flow_system import FlowSystem from ..plot_result import PlotResult from ..statistics_accessor import SelectType @@ -98,6 +98,31 @@ def __repr__(self) -> str: f')' ) + def _create_reference_structure(self) -> tuple[dict, dict[str, xr.DataArray]]: + """Create reference structure for serialization.""" + ref = {'__class__': self.__class__.__name__} + arrays = {} + + # Store DataArrays with references + arrays[str(self.cluster_order.name)] = self.cluster_order + ref['cluster_order'] = f':::{self.cluster_order.name}' + + arrays[str(self.cluster_occurrences.name)] = self.cluster_occurrences + ref['cluster_occurrences'] = f':::{self.cluster_occurrences.name}' + + # Store scalar values + if isinstance(self.n_clusters, xr.DataArray): + n_clusters_name = self.n_clusters.name or 'n_clusters' + self.n_clusters = self.n_clusters.rename(n_clusters_name) + arrays[n_clusters_name] = self.n_clusters + ref['n_clusters'] = f':::{n_clusters_name}' + else: + ref['n_clusters'] = int(self.n_clusters) + + ref['timesteps_per_cluster'] = self.timesteps_per_cluster + + return ref, arrays + @property def n_original_periods(self) -> int: """Number of original periods (before clustering).""" @@ -281,10 +306,9 @@ def __post_init__(self): self.timestep_mapping = self.timestep_mapping.rename('timestep_mapping') # Ensure representative_weights is a DataArray + # Can be (cluster, time) for 2D structure or (time,) for flat structure if not isinstance(self.representative_weights, xr.DataArray): - self.representative_weights = xr.DataArray( - self.representative_weights, dims=['time'], name='representative_weights' - ) + self.representative_weights = xr.DataArray(self.representative_weights, name='representative_weights') elif self.representative_weights.name is None: self.representative_weights = self.representative_weights.rename('representative_weights') @@ -304,6 +328,37 @@ def __repr__(self) -> str: f')' ) + def _create_reference_structure(self) -> tuple[dict, dict[str, xr.DataArray]]: + """Create reference structure for serialization.""" + ref = {'__class__': self.__class__.__name__} + arrays = {} + + # Store DataArrays with references + arrays[str(self.timestep_mapping.name)] = self.timestep_mapping + ref['timestep_mapping'] = f':::{self.timestep_mapping.name}' + + arrays[str(self.representative_weights.name)] = self.representative_weights + ref['representative_weights'] = f':::{self.representative_weights.name}' + + # Store scalar values + if isinstance(self.n_representatives, xr.DataArray): + n_rep_name = self.n_representatives.name or 'n_representatives' + self.n_representatives = self.n_representatives.rename(n_rep_name) + arrays[n_rep_name] = self.n_representatives + ref['n_representatives'] = f':::{n_rep_name}' + else: + ref['n_representatives'] = int(self.n_representatives) + + # Store nested ClusterStructure if present + if self.cluster_structure is not None: + cs_ref, cs_arrays = self.cluster_structure._create_reference_structure() + ref['cluster_structure'] = cs_ref + arrays.update(cs_arrays) + + # Skip aggregated_data and original_data - not needed for serialization + + return ref, arrays + @property def n_original_timesteps(self) -> int: """Number of original timesteps (before aggregation).""" @@ -460,24 +515,50 @@ def validate(self) -> None: if max_idx >= n_rep: raise ValueError(f'timestep_mapping contains index {max_idx} but n_representatives is {n_rep}') - # Check weights length matches n_representatives - if len(self.representative_weights) != n_rep: - raise ValueError( - f'representative_weights has {len(self.representative_weights)} elements ' - f'but n_representatives is {n_rep}' - ) + # Check weights dimensions + # representative_weights should have (cluster,) dimension with n_clusters elements + # (plus optional period/scenario dimensions) + if self.cluster_structure is not None: + n_clusters = self.cluster_structure.n_clusters + if 'cluster' in self.representative_weights.dims: + weights_n_clusters = self.representative_weights.sizes['cluster'] + if weights_n_clusters != n_clusters: + raise ValueError( + f'representative_weights has {weights_n_clusters} clusters ' + f'but cluster_structure has {n_clusters}' + ) - # Check weights sum roughly equals original timesteps - weight_sum = float(self.representative_weights.sum().values) - n_original = self.n_original_timesteps - if abs(weight_sum - n_original) > 1e-6: - # Warning only - some aggregation methods may not preserve this exactly - import warnings + # Check weights sum roughly equals number of original periods + # (each weight is how many original periods that cluster represents) + # Sum should be checked per period/scenario slice, not across all dimensions + if self.cluster_structure is not None: + n_original_periods = self.cluster_structure.n_original_periods + # Sum over cluster dimension only (keep period/scenario if present) + weight_sum_per_slice = self.representative_weights.sum(dim='cluster') + # Check each slice + if weight_sum_per_slice.size == 1: + # Simple case: no period/scenario + weight_sum = float(weight_sum_per_slice.values) + if abs(weight_sum - n_original_periods) > 1e-6: + import warnings + + warnings.warn( + f'representative_weights sum ({weight_sum}) does not match ' + f'n_original_periods ({n_original_periods})', + stacklevel=2, + ) + else: + # Multi-dimensional: check each slice + for val in weight_sum_per_slice.values.flat: + if abs(float(val) - n_original_periods) > 1e-6: + import warnings - warnings.warn( - f'representative_weights sum ({weight_sum}) does not match n_original_timesteps ({n_original})', - stacklevel=2, - ) + warnings.warn( + f'representative_weights sum per slice ({float(val)}) does not match ' + f'n_original_periods ({n_original_periods})', + stacklevel=2, + ) + break # Only warn once class ClusteringPlotAccessor: @@ -911,7 +992,6 @@ class Clustering: Attributes: result: The ClusterResult from the aggregation backend. - original_flow_system: Reference to the FlowSystem before aggregation. backend_name: Name of the aggregation backend used (e.g., 'tsam', 'manual'). Example: @@ -923,9 +1003,23 @@ class Clustering: """ result: ClusterResult - original_flow_system: FlowSystem # FlowSystem - avoid circular import backend_name: str = 'unknown' + def _create_reference_structure(self) -> tuple[dict, dict[str, xr.DataArray]]: + """Create reference structure for serialization.""" + ref = {'__class__': self.__class__.__name__} + arrays = {} + + # Store nested ClusterResult + result_ref, result_arrays = self.result._create_reference_structure() + ref['result'] = result_ref + arrays.update(result_arrays) + + # Store scalar values + ref['backend_name'] = self.backend_name + + return ref, arrays + def __repr__(self) -> str: cs = self.result.cluster_structure if cs is not None: @@ -1024,6 +1118,22 @@ def cluster_start_positions(self) -> np.ndarray: n_timesteps = self.n_clusters * self.timesteps_per_period return np.arange(0, n_timesteps, self.timesteps_per_period) + @property + def original_timesteps(self) -> pd.DatetimeIndex: + """Original timesteps before clustering. + + Derived from the 'original_time' coordinate of timestep_mapping. + + Raises: + KeyError: If 'original_time' coordinate is missing from timestep_mapping. + """ + if 'original_time' not in self.result.timestep_mapping.coords: + raise KeyError( + "timestep_mapping is missing 'original_time' coordinate. " + 'This may indicate corrupted or incompatible clustering results.' + ) + return pd.DatetimeIndex(self.result.timestep_mapping.coords['original_time'].values) + def create_cluster_structure_from_mapping( timestep_mapping: xr.DataArray, @@ -1073,3 +1183,15 @@ def create_cluster_structure_from_mapping( n_clusters=n_clusters, timesteps_per_cluster=timesteps_per_cluster, ) + + +def _register_clustering_classes(): + """Register clustering classes for IO. + + Called from flow_system.py after all imports are complete to avoid circular imports. + """ + from ..structure import CLASS_REGISTRY + + CLASS_REGISTRY['ClusterStructure'] = ClusterStructure + CLASS_REGISTRY['ClusterResult'] = ClusterResult + CLASS_REGISTRY['Clustering'] = Clustering diff --git a/flixopt/components.py b/flixopt/components.py index 0f2a6077e..390fc6f02 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -1257,10 +1257,12 @@ def _absolute_charge_state_bounds(self) -> tuple[xr.DataArray, xr.DataArray]: return -np.inf, np.inf elif isinstance(self.element.capacity_in_flow_hours, InvestParameters): cap_max = self.element.capacity_in_flow_hours.maximum_or_fixed_size * relative_upper_bound - return -cap_max, cap_max + # Adding 0.0 converts -0.0 to 0.0 (linopy LP writer bug workaround) + return -cap_max + 0.0, cap_max + 0.0 else: cap = self.element.capacity_in_flow_hours * relative_upper_bound - return -cap, cap + # Adding 0.0 converts -0.0 to 0.0 (linopy LP writer bug workaround) + return -cap + 0.0, cap + 0.0 def _do_modeling(self): """Create storage model with inter-cluster linking constraints. diff --git a/flixopt/core.py b/flixopt/core.py index fdcab029b..3d456fff1 100644 --- a/flixopt/core.py +++ b/flixopt/core.py @@ -15,7 +15,7 @@ logger = logging.getLogger('flixopt') -FlowSystemDimensions = Literal['time', 'period', 'scenario'] +FlowSystemDimensions = Literal['time', 'cluster', 'period', 'scenario'] """Possible dimensions of a FlowSystem.""" diff --git a/flixopt/elements.py b/flixopt/elements.py index ba2b72f80..0cee53738 100644 --- a/flixopt/elements.py +++ b/flixopt/elements.py @@ -677,14 +677,10 @@ def _do_modeling(self): self._constraint_flow_rate() # Total flow hours tracking (per period) - # Sum over all temporal dimensions (time, and cluster if present) - weighted_flow = self.flow_rate * self._model.aggregation_weight - # Get temporal_dims from aggregation_weight (not weighted_flow which has linopy's _term dim) - temporal_dims = [d for d in self._model.aggregation_weight.dims if d not in ('period', 'scenario')] ModelingPrimitives.expression_tracking_variable( model=self, name=f'{self.label_full}|total_flow_hours', - tracked_expression=weighted_flow.sum(temporal_dims), + tracked_expression=self._model.sum_temporal(self.flow_rate), bounds=( self.element.flow_hours_min if self.element.flow_hours_min is not None else 0, self.element.flow_hours_max if self.element.flow_hours_max is not None else None, @@ -841,9 +837,8 @@ def _create_bounds_for_load_factor(self): # Get the size (either from element or investment) size = self.investment.size if self.with_investment else self.element.size - # Sum over all temporal dimensions (time, and cluster if present) - temporal_dims = [d for d in self._model.aggregation_weight.dims if d not in ('period', 'scenario')] - total_hours = self._model.aggregation_weight.sum(temporal_dims) + # Total hours in the period (sum of temporal weights) + total_hours = self._model.temporal_weight.sum(self._model.temporal_dims) # Maximum load factor constraint if self.element.load_factor_max is not None: @@ -959,9 +954,7 @@ def _do_modeling(self): # Add virtual supply/demand to balance and penalty if needed if self.element.allows_imbalance: - imbalance_penalty = np.multiply( - self._model.aggregation_weight, self.element.imbalance_penalty_per_flow_hour - ) + imbalance_penalty = self.element.imbalance_penalty_per_flow_hour * self._model.timestep_duration self.virtual_supply = self.add_variables( lower=0, coords=self._model.get_coords(), short_name='virtual_supply' diff --git a/flixopt/features.py b/flixopt/features.py index e0a018a7f..289640ddd 100644 --- a/flixopt/features.py +++ b/flixopt/features.py @@ -196,22 +196,14 @@ def _do_modeling(self): inactive = self.add_variables(binary=True, short_name='inactive', coords=self._model.get_coords()) self.add_constraints(self.status + inactive == 1, short_name='complementary') - # 3. Total duration tracking using existing pattern - # Sum over all temporal dimensions (time, and cluster if present) - weighted_status = self.status * self._model.aggregation_weight - # Get temporal_dims from aggregation_weight (not weighted_status which has linopy's _term dim) - temporal_dims = [d for d in self._model.aggregation_weight.dims if d not in ('period', 'scenario')] - agg_weight_sum = self._model.aggregation_weight.sum(temporal_dims) + # 3. Total duration tracking + total_hours = self._model.temporal_weight.sum(self._model.temporal_dims) ModelingPrimitives.expression_tracking_variable( self, - tracked_expression=weighted_status.sum(temporal_dims), + tracked_expression=self._model.sum_temporal(self.status), bounds=( self.parameters.active_hours_min if self.parameters.active_hours_min is not None else 0, - self.parameters.active_hours_max - if self.parameters.active_hours_max is not None - else agg_weight_sum.max().item() - if hasattr(agg_weight_sum, 'max') - else agg_weight_sum, + self.parameters.active_hours_max if self.parameters.active_hours_max is not None else total_hours, ), short_name='active_hours', coords=['period', 'scenario'], @@ -631,10 +623,8 @@ def _do_modeling(self): # Add it to the total (cluster_weight handles cluster representation, defaults to 1.0) # Sum over all temporal dimensions (time, and cluster if present) - weighted_per_timestep = self.total_per_timestep * self._model.cluster_weight - # Get temporal_dims from total_per_timestep (linopy Variable) - its coords are the actual dims - temporal_dims = [d for d in self.total_per_timestep.dims if d not in ('period', 'scenario')] - self._eq_total.lhs -= weighted_per_timestep.sum(dim=temporal_dims) + weighted_per_timestep = self.total_per_timestep * self._model.weights.get('cluster', 1.0) + self._eq_total.lhs -= weighted_per_timestep.sum(dim=self._model.temporal_dims) def add_share( self, diff --git a/flixopt/flow_system.py b/flixopt/flow_system.py index c10a1defb..7e12029ed 100644 --- a/flixopt/flow_system.py +++ b/flixopt/flow_system.py @@ -40,11 +40,15 @@ from .clustering import Clustering from .solvers import _Solver - from .structure import TimeSeriesWeights from .types import Effect_TPS, Numeric_S, Numeric_TPS, NumericOrBool from .carrier import Carrier, CarrierContainer +# Register clustering classes for IO (deferred to avoid circular imports) +from .clustering.base import _register_clustering_classes + +_register_clustering_classes() + logger = logging.getLogger('flixopt') @@ -69,10 +73,10 @@ class FlowSystem(Interface, CompositeContainerMixin[Element]): scenario_weights: The weights of each scenario. If None, all scenarios have the same weight (normalized to 1). Period weights are always computed internally from the period index (like timestep_duration for time). The final `weights` array (accessible via `flow_system.model.objective_weights`) is computed as period_weights × normalized_scenario_weights, with normalization applied to the scenario weights by default. - cluster_weight: Weight for each timestep representing cluster representation count. - If None (default), all timesteps have weight 1.0. Used by cluster() to specify - how many original timesteps each cluster represents. Combined with timestep_duration - via aggregation_weight for proper time aggregation in clustered models. + cluster_weight: Weight for each cluster. + If None (default), all clusters have weight 1.0. Used by cluster() to specify + how many original timesteps each cluster represents. Multiply with timestep_duration + for proper time aggregation in clustered models. scenario_independent_sizes: Controls whether investment sizes are equalized across scenarios. - True: All sizes are shared/equalized across scenarios - False: All sizes are optimized separately per scenario @@ -201,10 +205,13 @@ def __init__( # Cluster weight for cluster() optimization (default 1.0) # Represents how many original timesteps each cluster represents # May have period/scenario dimensions if cluster() was used with those - self.cluster_weight = self.fit_to_model_coords( - 'cluster_weight', - np.ones(len(self.timesteps)) if cluster_weight is None else cluster_weight, - dims=['time', 'period', 'scenario'], # Gracefully ignores dims not present + self.cluster_weight: xr.DataArray | None = ( + self.fit_to_model_coords( + 'cluster_weight', + cluster_weight, + ) + if cluster_weight is not None + else None ) self.scenario_weights = scenario_weights # Use setter @@ -502,6 +509,9 @@ def _update_period_metadata( period index. This ensures period metadata stays synchronized with the actual periods after operations like selection. + When the period dimension is dropped (single value selected), this method + removes the scalar coordinate, period_weights DataArray, and cleans up attributes. + This is analogous to _update_time_metadata() for time-related metadata. Args: @@ -513,7 +523,16 @@ def _update_period_metadata( The same dataset with updated period-related attributes and data variables """ new_period_index = dataset.indexes.get('period') - if new_period_index is not None and len(new_period_index) >= 1: + + if new_period_index is None: + # Period dimension was dropped (single value selected) + if 'period' in dataset.coords: + dataset = dataset.drop_vars('period') + dataset = dataset.drop_vars(['period_weights'], errors='ignore') + dataset.attrs.pop('weight_of_last_period', None) + return dataset + + if len(new_period_index) >= 1: # Reuse stored weight_of_last_period when not explicitly overridden. # This is essential for single-period subsets where it cannot be inferred from intervals. if weight_of_last_period is None: @@ -542,6 +561,9 @@ def _update_scenario_metadata(cls, dataset: xr.Dataset) -> xr.Dataset: Recomputes or removes scenario weights. This ensures scenario metadata stays synchronized with the actual scenarios after operations like selection. + When the scenario dimension is dropped (single value selected), this method + removes the scalar coordinate, scenario_weights DataArray, and cleans up attributes. + This is analogous to _update_period_metadata() for time-related metadata. Args: @@ -551,7 +573,16 @@ def _update_scenario_metadata(cls, dataset: xr.Dataset) -> xr.Dataset: The same dataset with updated scenario-related attributes and data variables """ new_scenario_index = dataset.indexes.get('scenario') - if new_scenario_index is None or len(new_scenario_index) <= 1: + + if new_scenario_index is None: + # Scenario dimension was dropped (single value selected) + if 'scenario' in dataset.coords: + dataset = dataset.drop_vars('scenario') + dataset = dataset.drop_vars(['scenario_weights'], errors='ignore') + dataset.attrs.pop('scenario_weights', None) + return dataset + + if len(new_scenario_index) <= 1: dataset.attrs.pop('scenario_weights', None) return dataset @@ -645,13 +676,21 @@ def to_dataset(self, include_solution: bool = True) -> xr.Dataset: carriers_structure[name] = carrier_ref ds.attrs['carriers'] = json.dumps(carriers_structure) - # Include cluster info for clustered FlowSystems + # Include cluster info for clustered FlowSystems (old structure) if self.clusters is not None: ds.attrs['is_clustered'] = True ds.attrs['n_clusters'] = len(self.clusters) ds.attrs['timesteps_per_cluster'] = len(self.timesteps) ds.attrs['timestep_duration'] = float(self.timestep_duration.mean()) + # Serialize Clustering object if present (new structure) + if self.clustering is not None: + clustering_ref, clustering_arrays = self.clustering._create_reference_structure() + # Add clustering arrays with prefix + for name, arr in clustering_arrays.items(): + ds[f'clustering|{name}'] = arr + ds.attrs['clustering'] = json.dumps(clustering_ref) + # Add version info ds.attrs['flixopt_version'] = __version__ @@ -708,6 +747,11 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: else None ) + # Resolve scenario_weights only if scenario dimension exists + scenario_weights = None + if ds.indexes.get('scenario') is not None and 'scenario_weights' in reference_structure: + scenario_weights = cls._resolve_dataarray_reference(reference_structure['scenario_weights'], arrays_dict) + # Create FlowSystem instance with constructor parameters flow_system = cls( timesteps=ds.indexes['time'], @@ -717,9 +761,7 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: hours_of_last_timestep=reference_structure.get('hours_of_last_timestep'), hours_of_previous_timesteps=reference_structure.get('hours_of_previous_timesteps'), weight_of_last_period=reference_structure.get('weight_of_last_period'), - scenario_weights=cls._resolve_dataarray_reference(reference_structure['scenario_weights'], arrays_dict) - if 'scenario_weights' in reference_structure - else None, + scenario_weights=scenario_weights, cluster_weight=cluster_weight_for_constructor, scenario_independent_sizes=reference_structure.get('scenario_independent_sizes', True), scenario_independent_flow_rates=reference_structure.get('scenario_independent_flow_rates', False), @@ -765,6 +807,19 @@ def from_dataset(cls, ds: xr.Dataset) -> FlowSystem: carrier = cls._resolve_reference_structure(carrier_data, {}) flow_system._carriers.add(carrier) + # Restore Clustering object if present + if 'clustering' in reference_structure: + clustering_structure = json.loads(reference_structure['clustering']) + # Collect clustering arrays (prefixed with 'clustering|') + clustering_arrays = {} + for name, arr in ds.data_vars.items(): + if name.startswith('clustering|'): + # Remove 'clustering|' prefix (11 chars) + arr_name = name[11:] + clustering_arrays[arr_name] = arr + clustering = cls._resolve_reference_structure(clustering_structure, clustering_arrays) + flow_system.clustering = clustering + # Reconnect network to populate bus inputs/outputs (not stored in NetCDF). flow_system.connect_and_transform() @@ -1061,6 +1116,7 @@ def connect_and_transform(self): self._connect_network() self._register_missing_carriers() self._assign_element_colors() + for element in chain(self.components.values(), self.effects.values(), self.buses.values()): element.transform_data() @@ -1274,22 +1330,29 @@ def flow_carriers(self) -> dict[str, str]: return self._flow_carriers - def create_model(self, normalize_weights: bool = True) -> FlowSystemModel: + def create_model(self, normalize_weights: bool | None = None) -> FlowSystemModel: """ Create a linopy model from the FlowSystem. Args: - normalize_weights: Whether to automatically normalize the weights (periods and scenarios) to sum up to 1 when solving. + normalize_weights: Deprecated. Scenario weights are now always normalized in FlowSystem. """ + if normalize_weights is not None: + warnings.warn( + f'\n\nnormalize_weights parameter is deprecated and will be removed in {DEPRECATION_REMOVAL_VERSION}. ' + 'Scenario weights are now always normalized when set on FlowSystem.\n', + DeprecationWarning, + stacklevel=2, + ) if not self.connected_and_transformed: raise RuntimeError( 'FlowSystem is not connected_and_transformed. Call FlowSystem.connect_and_transform() first.' ) # System integrity was already validated in connect_and_transform() - self.model = FlowSystemModel(self, normalize_weights) + self.model = FlowSystemModel(self) return self.model - def build_model(self, normalize_weights: bool = True) -> FlowSystem: + def build_model(self, normalize_weights: bool | None = None) -> FlowSystem: """ Build the optimization model for this FlowSystem. @@ -1303,7 +1366,7 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: before solving. Args: - normalize_weights: Whether to normalize scenario/period weights to sum to 1. + normalize_weights: Deprecated. Scenario weights are now always normalized in FlowSystem. Returns: Self, for method chaining. @@ -1313,8 +1376,15 @@ def build_model(self, normalize_weights: bool = True) -> FlowSystem: >>> print(flow_system.model.variables) # Inspect variables before solving >>> flow_system.solve(solver) """ + if normalize_weights is not None: + warnings.warn( + f'\n\nnormalize_weights parameter is deprecated and will be removed in {DEPRECATION_REMOVAL_VERSION}. ' + 'Scenario weights are now always normalized when set on FlowSystem.\n', + DeprecationWarning, + stacklevel=2, + ) self.connect_and_transform() - self.create_model(normalize_weights) + self.create_model() self.model.do_modeling() @@ -1862,27 +1932,85 @@ def storages(self) -> ElementContainer[Storage]: self._storages_cache = ElementContainer(storages, element_type_name='storages', truncate_repr=10) return self._storages_cache + @property + def dims(self) -> list[str]: + """Active dimension names. + + Returns: + List of active dimension names in order. + + Example: + >>> fs.dims + ['time'] # simple case + >>> fs_clustered.dims + ['cluster', 'time', 'period', 'scenario'] # full case + """ + result = [] + if self.clusters is not None: + result.append('cluster') + result.append('time') + if self.periods is not None: + result.append('period') + if self.scenarios is not None: + result.append('scenario') + return result + + @property + def indexes(self) -> dict[str, pd.Index]: + """Indexes for active dimensions. + + Returns: + Dict mapping dimension names to pandas Index objects. + + Example: + >>> fs.indexes['time'] + DatetimeIndex(['2024-01-01', ...], dtype='datetime64[ns]', name='time') + """ + result: dict[str, pd.Index] = {} + if self.clusters is not None: + result['cluster'] = self.clusters + result['time'] = self.timesteps + if self.periods is not None: + result['period'] = self.periods + if self.scenarios is not None: + result['scenario'] = self.scenarios + return result + + @property + def temporal_dims(self) -> list[str]: + """Temporal dimensions for summing over time. + + Returns ['time', 'cluster'] for clustered systems, ['time'] otherwise. + """ + if self.clusters is not None: + return ['time', 'cluster'] + return ['time'] + + @property + def temporal_weight(self) -> xr.DataArray: + """Combined temporal weight (timestep_duration × cluster_weight). + + Use for converting rates to totals before summing. + Note: cluster_weight is used even without a clusters dimension. + """ + # Use cluster_weight directly if set, otherwise check weights dict, fallback to 1.0 + cluster_weight = self.weights.get('cluster', self.cluster_weight if self.cluster_weight is not None else 1.0) + return self.weights['time'] * cluster_weight + @property def coords(self) -> dict[FlowSystemDimensions, pd.Index]: """Active coordinates for variable creation. + .. deprecated:: + Use :attr:`indexes` instead. + Returns a dict of dimension names to coordinate arrays. When clustered, includes 'cluster' dimension before 'time'. Returns: Dict mapping dimension names to coordinate arrays. """ - active_coords: dict[str, pd.Index] = {} - - if self.clusters is not None: - active_coords['cluster'] = self.clusters - active_coords['time'] = self.timesteps - - if self.periods is not None: - active_coords['period'] = self.periods - if self.scenarios is not None: - active_coords['scenario'] = self.scenarios - return active_coords + return self.indexes @property def _use_true_cluster_dims(self) -> bool: @@ -1928,14 +2056,15 @@ def scenario_weights(self) -> xr.DataArray | None: @scenario_weights.setter def scenario_weights(self, value: Numeric_S | None) -> None: """ - Set scenario weights. + Set scenario weights (always normalized to sum to 1). Args: - value: Scenario weights to set (will be converted to DataArray with 'scenario' dimension) - or None to clear weights. + value: Scenario weights to set (will be converted to DataArray with 'scenario' dimension + and normalized to sum to 1), or None to clear weights. Raises: ValueError: If value is not None and no scenarios are defined in the FlowSystem. + ValueError: If weights sum to zero (cannot normalize). """ if value is None: self._scenario_weights = None @@ -1947,48 +2076,65 @@ def scenario_weights(self, value: Numeric_S | None) -> None: 'Either define scenarios in FlowSystem(scenarios=...) or set scenario_weights to None.' ) - self._scenario_weights = self.fit_to_model_coords('scenario_weights', value, dims=['scenario']) + weights = self.fit_to_model_coords('scenario_weights', value, dims=['scenario']) - @property - def weights(self) -> TimeSeriesWeights: - """Unified weighting system for time series aggregation. + # Normalize to sum to 1 + norm = weights.sum('scenario') + if np.isclose(norm, 0.0).any(): + raise ValueError('scenario_weights sum to 0; cannot normalize.') + self._scenario_weights = weights / norm - Returns a TimeSeriesWeights object providing a clean, unified interface - for all weight types used in flixopt. This is the recommended way to - access weights for new code (PyPSA-inspired design). + def _unit_weight(self, dim: str) -> xr.DataArray: + """Create a unit weight DataArray (all 1.0) for a dimension.""" + index = self.indexes[dim] + return xr.DataArray( + np.ones(len(index), dtype=float), + coords={dim: index}, + dims=[dim], + name=f'{dim}_weight', + ) - The temporal weight combines timestep_duration and cluster_weight, - which is the proper weight for summing over time. + @property + def weights(self) -> dict[str, xr.DataArray]: + """Weights for active dimensions (unit weights if not explicitly set). Returns: - TimeSeriesWeights with temporal, period, and scenario weights. + Dict mapping dimension names to weight DataArrays. + Keys match :attr:`dims` and :attr:`indexes`. Example: - >>> weights = flow_system.weights - >>> weighted_total = (flow_rate * weights.temporal).sum('time') - >>> # Or use the convenience method: - >>> weighted_total = weights.sum_over_time(flow_rate) + >>> fs.weights['time'] # timestep durations + >>> fs.weights['cluster'] # cluster weights (unit if not set) """ - from .structure import TimeSeriesWeights + result: dict[str, xr.DataArray] = {'time': self.timestep_duration} + if self.clusters is not None: + result['cluster'] = self.cluster_weight if self.cluster_weight is not None else self._unit_weight('cluster') + if self.periods is not None: + result['period'] = self.period_weights if self.period_weights is not None else self._unit_weight('period') + if self.scenarios is not None: + result['scenario'] = ( + self.scenario_weights if self.scenario_weights is not None else self._unit_weight('scenario') + ) + return result - return TimeSeriesWeights( - temporal=self.timestep_duration * self.cluster_weight, - period=self.period_weights, - scenario=self._scenario_weights, - ) + def sum_temporal(self, data: xr.DataArray) -> xr.DataArray: + """Sum data over temporal dimensions with full temporal weighting. - @property - def aggregation_weight(self) -> xr.DataArray: - """Combined weight for time aggregation. + Applies both timestep_duration and cluster_weight, then sums over temporal dimensions. + Use this to convert rates to totals (e.g., flow_rate → total_energy). + + Args: + data: Data with time dimension (and optionally cluster). + Typically a rate (e.g., flow_rate in MW, status as 0/1). - Combines timestep_duration (physical duration) and cluster_weight (cluster representation). - Use this for proper time aggregation in clustered models. + Returns: + Data summed over temporal dims with full temporal weighting applied. - Note: - This is equivalent to `weights.temporal`. The unified TimeSeriesWeights - interface (via `flow_system.weights`) is recommended for new code. + Example: + >>> total_energy = fs.sum_temporal(flow_rate) # MW → MWh total + >>> active_hours = fs.sum_temporal(status) # count → hours """ - return self.timestep_duration * self.cluster_weight + return (data * self.temporal_weight).sum(self.temporal_dims) @property def is_clustered(self) -> bool: diff --git a/flixopt/optimization.py b/flixopt/optimization.py index 6a1a87ce1..0b567387f 100644 --- a/flixopt/optimization.py +++ b/flixopt/optimization.py @@ -82,7 +82,7 @@ def _initialize_optimization_common( name: str, flow_system: FlowSystem, folder: pathlib.Path | None = None, - normalize_weights: bool = True, + normalize_weights: bool | None = None, ) -> None: """ Shared initialization logic for all optimization types. @@ -95,7 +95,7 @@ def _initialize_optimization_common( name: Name of the optimization flow_system: FlowSystem to optimize folder: Directory for saving results - normalize_weights: Whether to normalize scenario weights + normalize_weights: Deprecated. Scenario weights are now always normalized in FlowSystem. """ obj.name = name @@ -106,7 +106,8 @@ def _initialize_optimization_common( ) flow_system = flow_system.copy() - obj.normalize_weights = normalize_weights + # normalize_weights is deprecated but kept for backwards compatibility + obj.normalize_weights = True # Always True now flow_system._used_in_optimization = True @@ -186,7 +187,7 @@ def do_modeling(self) -> Optimization: t_start = timeit.default_timer() self.flow_system.connect_and_transform() - self.model = self.flow_system.create_model(self.normalize_weights) + self.model = self.flow_system.create_model() self.model.do_modeling() self.durations['modeling'] = round(timeit.default_timer() - t_start, 2) diff --git a/flixopt/optimize_accessor.py b/flixopt/optimize_accessor.py index f88cdf982..7aee930a4 100644 --- a/flixopt/optimize_accessor.py +++ b/flixopt/optimize_accessor.py @@ -53,7 +53,7 @@ def __init__(self, flow_system: FlowSystem) -> None: """ self._fs = flow_system - def __call__(self, solver: _Solver, normalize_weights: bool = True) -> FlowSystem: + def __call__(self, solver: _Solver, normalize_weights: bool | None = None) -> FlowSystem: """ Build and solve the optimization model in one step. @@ -64,7 +64,7 @@ def __call__(self, solver: _Solver, normalize_weights: bool = True) -> FlowSyste Args: solver: The solver to use (e.g., HighsSolver, GurobiSolver). - normalize_weights: Whether to normalize scenario/period weights to sum to 1. + normalize_weights: Deprecated. Scenario weights are now always normalized in FlowSystem. Returns: The FlowSystem, for method chaining. @@ -85,7 +85,18 @@ def __call__(self, solver: _Solver, normalize_weights: bool = True) -> FlowSyste >>> solution = flow_system.optimize(solver).solution """ - self._fs.build_model(normalize_weights) + if normalize_weights is not None: + import warnings + + from .config import DEPRECATION_REMOVAL_VERSION + + warnings.warn( + f'\n\nnormalize_weights parameter is deprecated and will be removed in {DEPRECATION_REMOVAL_VERSION}. ' + 'Scenario weights are now always normalized when set on FlowSystem.\n', + DeprecationWarning, + stacklevel=2, + ) + self._fs.build_model() self._fs.solve(solver) return self._fs diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index bee26a0e2..73d115df0 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -847,7 +847,7 @@ def get_contributor_type(contributor: str) -> str: # For total mode, sum temporal over time (apply cluster_weight for proper weighting) # Sum over all temporal dimensions (time, and cluster if present) if mode == 'total' and current_mode == 'temporal' and 'time' in da.dims: - weighted = da * self._fs.cluster_weight + weighted = da * self._fs.weights.get('cluster', 1.0) temporal_dims = [d for d in weighted.dims if d not in ('period', 'scenario')] da = weighted.sum(temporal_dims) if share_total is None: diff --git a/flixopt/structure.py b/flixopt/structure.py index 7996565e8..4b7734199 100644 --- a/flixopt/structure.py +++ b/flixopt/structure.py @@ -43,92 +43,6 @@ CLASS_REGISTRY = {} -@dataclass -class TimeSeriesWeights: - """Unified weighting system for time series aggregation (PyPSA-inspired). - - This class provides a clean, unified interface for time series weights, - combining the various weight types used in flixopt into a single object. - - Attributes: - temporal: Combined weight for temporal operations (timestep_duration × cluster_weight). - Applied to all time-summing operations. dims: [time] or [time, period, scenario] - period: Weight for each period in multi-period optimization. - dims: [period] or None - scenario: Weight for each scenario in stochastic optimization. - dims: [scenario] or None - objective: Optional override weight for objective function calculations. - If None, uses temporal weight. dims: [time] or [time, period, scenario] - storage: Optional override weight for storage balance equations. - If None, uses temporal weight. dims: [time] or [time, period, scenario] - - Example: - >>> # Access via FlowSystem - >>> weights = flow_system.weights - >>> weighted_sum = (flow_rate * weights.temporal).sum('time') - >>> - >>> # With period/scenario weighting - >>> total = weighted_sum * weights.period * weights.scenario - - Note: - For backwards compatibility, the existing properties (cluster_weight, - timestep_duration, aggregation_weight) are still available on FlowSystem - and FlowSystemModel. - """ - - temporal: xr.DataArray - period: xr.DataArray | None = None - scenario: xr.DataArray | None = None - objective: xr.DataArray | None = None - storage: xr.DataArray | None = None - - def __post_init__(self): - """Validate weights.""" - if not isinstance(self.temporal, xr.DataArray): - raise TypeError('temporal must be an xarray DataArray') - if 'time' not in self.temporal.dims: - raise ValueError("temporal must have 'time' dimension") - - @property - def effective_objective(self) -> xr.DataArray: - """Get effective objective weight (override or temporal).""" - return self.objective if self.objective is not None else self.temporal - - @property - def effective_storage(self) -> xr.DataArray: - """Get effective storage weight (override or temporal).""" - return self.storage if self.storage is not None else self.temporal - - def sum_over_time(self, data: xr.DataArray) -> xr.DataArray: - """Sum data over time dimension with proper weighting. - - Args: - data: DataArray with 'time' dimension. - - Returns: - Data summed over time with temporal weighting applied. - """ - if 'time' not in data.dims: - return data - return (data * self.temporal).sum('time') - - def apply_period_scenario_weights(self, data: xr.DataArray) -> xr.DataArray: - """Apply period and scenario weights to data. - - Args: - data: DataArray, optionally with 'period' and/or 'scenario' dims. - - Returns: - Data with period and scenario weights applied. - """ - result = data - if self.period is not None and 'period' in data.dims: - result = result * self.period - if self.scenario is not None and 'scenario' in data.dims: - result = result * self.scenario - return result - - def register_class_for_io(cls): """Register a class for serialization/deserialization.""" name = cls.__name__ @@ -176,13 +90,11 @@ class FlowSystemModel(linopy.Model, SubmodelsMixin): Args: flow_system: The flow_system that is used to create the model. - normalize_weights: Whether to automatically normalize the weights to sum up to 1 when solving. """ - def __init__(self, flow_system: FlowSystem, normalize_weights: bool): + def __init__(self, flow_system: FlowSystem): super().__init__(force_dim_names=True) self.flow_system = flow_system - self.normalize_weights = normalize_weights self.effects: EffectCollectionModel | None = None self.submodels: Submodels = Submodels({}) @@ -314,53 +226,63 @@ def hours_of_previous_timesteps(self): return self.flow_system.hours_of_previous_timesteps @property - def cluster_weight(self) -> xr.DataArray: - """Cluster weight for cluster() optimization. + def dims(self) -> list[str]: + """Active dimension names.""" + return self.flow_system.dims - Represents how many original timesteps each cluster represents. - Default is 1.0 for all timesteps. + @property + def indexes(self) -> dict[str, pd.Index]: + """Indexes for active dimensions.""" + return self.flow_system.indexes + + @property + def weights(self) -> dict[str, xr.DataArray]: + """Weights for active dimensions (unit weights if not set). + + Scenario weights are always normalized (handled by FlowSystem). """ - return self.flow_system.cluster_weight + return self.flow_system.weights @property - def aggregation_weight(self) -> xr.DataArray: - """Combined weight for time aggregation. + def temporal_dims(self) -> list[str]: + """Temporal dimensions for summing over time. - Combines timestep_duration (physical duration) and cluster_weight (cluster representation). - Use this for proper time aggregation in clustered models. + Returns ['time', 'cluster'] for clustered systems, ['time'] otherwise. """ - return self.timestep_duration * self.cluster_weight + return self.flow_system.temporal_dims + + @property + def temporal_weight(self) -> xr.DataArray: + """Combined temporal weight (timestep_duration × cluster_weight).""" + return self.flow_system.temporal_weight + + def sum_temporal(self, data: xr.DataArray) -> xr.DataArray: + """Sum data over temporal dimensions with full temporal weighting. + + Example: + >>> total_energy = model.sum_temporal(flow_rate) + """ + return self.flow_system.sum_temporal(data) @property def scenario_weights(self) -> xr.DataArray: """ - Scenario weights of model. With optional normalization. + Scenario weights of model (always normalized, via FlowSystem). + + Returns unit weights if no scenarios defined or no explicit weights set. """ if self.flow_system.scenarios is None: return xr.DataArray(1) if self.flow_system.scenario_weights is None: - scenario_weights = xr.DataArray( - np.ones(self.flow_system.scenarios.size, dtype=float), - coords={'scenario': self.flow_system.scenarios}, - dims=['scenario'], - name='scenario_weights', - ) - else: - scenario_weights = self.flow_system.scenario_weights - - if not self.normalize_weights: - return scenario_weights + return self.flow_system._unit_weight('scenario') - norm = scenario_weights.sum('scenario') - if np.isclose(norm, 0.0).any(): - raise ValueError('FlowSystemModel.scenario_weights: weights sum to 0; cannot normalize.') - return scenario_weights / norm + return self.flow_system.scenario_weights @property def objective_weights(self) -> xr.DataArray: """ - Objective weights of model. With optional normalization of scenario weights. + Objective weights of model (period_weights × scenario_weights). """ period_weights = self.flow_system.effects.objective_effect.submodel.period_weights scenario_weights = self.scenario_weights diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 64aedd0fb..3a13dbb63 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -805,7 +805,11 @@ def _build_cluster_weight_for_key(key: tuple) -> xr.DataArray: da = TimeSeriesData.from_dataarray(da.assign_attrs(original_da.attrs)) ds_new_vars[name] = da - ds_new = xr.Dataset(ds_new_vars, attrs=ds.attrs) + # Copy attrs but remove cluster_weight - the clustered FlowSystem gets its own + # cluster_weight set after from_dataset (original reference has wrong shape) + new_attrs = dict(ds.attrs) + new_attrs.pop('cluster_weight', None) + ds_new = xr.Dataset(ds_new_vars, attrs=new_attrs) ds_new.attrs['timesteps_per_cluster'] = timesteps_per_cluster ds_new.attrs['timestep_duration'] = dt ds_new.attrs['n_clusters'] = actual_n_clusters @@ -852,6 +856,9 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: timestep_mapping_slices = {} cluster_occurrences_slices = {} + # Use renamed timesteps as coordinates for multi-dimensional case + original_timesteps_coord = self._fs.timesteps.rename('original_time') + for p in periods: for s in scenarios: key = (p, s) @@ -859,7 +866,10 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: cluster_orders[key], dims=['original_period'], name='cluster_order' ) timestep_mapping_slices[key] = xr.DataArray( - _build_timestep_mapping_for_key(key), dims=['original_time'], name='timestep_mapping' + _build_timestep_mapping_for_key(key), + dims=['original_time'], + coords={'original_time': original_timesteps_coord}, + name='timestep_mapping', ) cluster_occurrences_slices[key] = xr.DataArray( _build_cluster_occurrences_for_key(key), dims=['cluster'], name='cluster_occurrences' @@ -878,8 +888,13 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: else: # Simple case: single (None, None) slice cluster_order_da = xr.DataArray(cluster_orders[first_key], dims=['original_period'], name='cluster_order') + # Use renamed timesteps as coordinates + original_timesteps_coord = self._fs.timesteps.rename('original_time') timestep_mapping_da = xr.DataArray( - _build_timestep_mapping_for_key(first_key), dims=['original_time'], name='timestep_mapping' + _build_timestep_mapping_for_key(first_key), + dims=['original_time'], + coords={'original_time': original_timesteps_coord}, + name='timestep_mapping', ) cluster_occurrences_da = xr.DataArray( _build_cluster_occurrences_for_key(first_key), dims=['cluster'], name='cluster_occurrences' @@ -892,16 +907,17 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: timesteps_per_cluster=timesteps_per_cluster, ) - # Create representative_weights in flat format for ClusterResult compatibility - # This repeats each cluster's weight for all timesteps within that cluster - def _build_flat_weights_for_key(key: tuple) -> xr.DataArray: + # Create representative_weights with (cluster,) dimension only + # Each cluster has one weight (same for all timesteps within it) + def _build_cluster_weights_for_key(key: tuple) -> xr.DataArray: occurrences = cluster_occurrences_all[key] - weights = np.repeat([occurrences.get(c, 1) for c in range(actual_n_clusters)], timesteps_per_cluster) - return xr.DataArray(weights, dims=['time'], name='representative_weights') + # Shape: (n_clusters,) - one weight per cluster + weights = np.array([occurrences.get(c, 1) for c in range(actual_n_clusters)]) + return xr.DataArray(weights, dims=['cluster'], name='representative_weights') - flat_weights_slices = {key: _build_flat_weights_for_key(key) for key in cluster_occurrences_all} + weights_slices = {key: _build_cluster_weights_for_key(key) for key in cluster_occurrences_all} representative_weights = self._combine_slices_to_dataarray_generic( - flat_weights_slices, ['time'], periods, scenarios, 'representative_weights' + weights_slices, ['cluster'], periods, scenarios, 'representative_weights' ) aggregation_result = ClusterResult( @@ -915,7 +931,6 @@ def _build_flat_weights_for_key(key: tuple) -> xr.DataArray: reduced_fs.clustering = Clustering( result=aggregation_result, - original_flow_system=self._fs, backend_name='tsam', ) @@ -1131,19 +1146,20 @@ def expand_solution(self) -> FlowSystem: raise ValueError('No cluster structure available for expansion.') timesteps_per_cluster = cluster_structure.timesteps_per_cluster - original_fs: FlowSystem = info.original_flow_system n_clusters = ( int(cluster_structure.n_clusters) if isinstance(cluster_structure.n_clusters, (int, np.integer)) else int(cluster_structure.n_clusters.values) ) - has_periods = original_fs.periods is not None - has_scenarios = original_fs.scenarios is not None - periods = list(original_fs.periods) if has_periods else [None] - scenarios = list(original_fs.scenarios) if has_scenarios else [None] + # Get original timesteps from clustering, but periods/scenarios from the FlowSystem + # (the clustered FlowSystem preserves the same periods/scenarios) + original_timesteps = info.original_timesteps + has_periods = self._fs.periods is not None + has_scenarios = self._fs.scenarios is not None - original_timesteps = original_fs.timesteps + periods = list(self._fs.periods) if has_periods else [None] + scenarios = list(self._fs.scenarios) if has_scenarios else [None] n_original_timesteps = len(original_timesteps) n_reduced_timesteps = n_clusters * timesteps_per_cluster @@ -1155,11 +1171,23 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: # 1. Expand FlowSystem data (with cluster_weight set to 1.0 for all timesteps) reduced_ds = self._fs.to_dataset(include_solution=False) - expanded_ds = xr.Dataset( - {name: expand_da(da) for name, da in reduced_ds.data_vars.items() if name != 'cluster_weight'}, - attrs=reduced_ds.attrs, - ) - expanded_ds.attrs['timestep_duration'] = original_fs.timestep_duration.values.tolist() + # Filter out cluster-related variables and copy attrs without clustering info + data_vars = { + name: expand_da(da) + for name, da in reduced_ds.data_vars.items() + if name != 'cluster_weight' and not name.startswith('clustering|') + } + attrs = { + k: v + for k, v in reduced_ds.attrs.items() + if k not in ('is_clustered', 'n_clusters', 'timesteps_per_cluster', 'clustering') + } + expanded_ds = xr.Dataset(data_vars, attrs=attrs) + # Compute timestep_duration from original timesteps + # Add extra timestep for duration calculation (assume same interval as last) + original_timesteps_extra = FlowSystem._create_timesteps_with_extra(original_timesteps, None) + timestep_duration = FlowSystem.calculate_timestep_duration(original_timesteps_extra) + expanded_ds.attrs['timestep_duration'] = timestep_duration.values.tolist() # Create cluster_weight with value 1.0 for all timesteps (no weighting needed for expanded) # Use _combine_slices_to_dataarray for consistent multi-dim handling @@ -1205,8 +1233,8 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: soc_boundary_per_timestep = soc_boundary_per_timestep.assign_coords(time=original_timesteps) # Apply self-discharge decay to SOC_boundary based on time within period - # Get the storage's relative_loss_per_hour from original flow system - storage = original_fs.storages[storage_name] + # Get the storage's relative_loss_per_hour from the clustered flow system + storage = self._fs.storages.get(storage_name) if storage is not None: # Time within period for each timestep (0, 1, 2, ..., timesteps_per_cluster-1, 0, 1, ...) time_within_period = np.arange(n_original_timesteps) % timesteps_per_cluster diff --git a/tests/deprecated/test_scenarios.py b/tests/deprecated/test_scenarios.py index 65ea62d81..2699647ad 100644 --- a/tests/deprecated/test_scenarios.py +++ b/tests/deprecated/test_scenarios.py @@ -341,12 +341,14 @@ def test_scenarios_selection(flow_system_piecewise_conversion_scenarios): assert flow_system.scenarios.equals(flow_system_full.scenarios[0:2]) - np.testing.assert_allclose(flow_system.scenario_weights.values, flow_system_full.scenario_weights[0:2]) + # Scenario weights are always normalized - subset is re-normalized to sum to 1 + subset_weights = flow_system_full.scenario_weights[0:2] + expected_normalized = subset_weights / subset_weights.sum() + np.testing.assert_allclose(flow_system.scenario_weights.values, expected_normalized.values) - # Optimize using new API with normalize_weights=False + # Optimize using new API flow_system.optimize( fx.solvers.GurobiSolver(mip_gap=0.01, time_limit_seconds=60), - normalize_weights=False, ) # Penalty has same structure as other effects: 'Penalty' is the total, 'Penalty(temporal)' and 'Penalty(periodic)' are components @@ -769,7 +771,10 @@ def test_weights_selection(): # Verify weights are correctly sliced assert fs_subset.scenarios.equals(pd.Index(['base', 'high'], name='scenario')) - np.testing.assert_allclose(fs_subset.scenario_weights.values, custom_scenario_weights[[0, 2]]) + # Scenario weights are always normalized - subset is re-normalized to sum to 1 + subset_weights = np.array([0.3, 0.2]) # Original weights for selected scenarios + expected_normalized = subset_weights / subset_weights.sum() + np.testing.assert_allclose(fs_subset.scenario_weights.values, expected_normalized) # Verify weights are 1D with just scenario dimension (no period dimension) assert fs_subset.scenario_weights.dims == ('scenario',) diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index af2864563..7072fe22e 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -292,8 +292,9 @@ def test_cluster_with_scenarios(timesteps_8_days, scenarios_2): assert info is not None assert info.result.cluster_structure is not None assert info.result.cluster_structure.n_clusters == 2 - # Original FlowSystem had scenarios - assert info.original_flow_system.scenarios is not None + # Clustered FlowSystem preserves scenarios + assert fs_reduced.scenarios is not None + assert len(fs_reduced.scenarios) == 2 def test_cluster_and_expand_with_scenarios(solver_fixture, timesteps_8_days, scenarios_2): @@ -465,8 +466,8 @@ def test_storage_cluster_mode_intercluster_cyclic(self, solver_fixture, timestep assert 'cluster_boundary' in soc_boundary.dims # First and last SOC_boundary values should be equal (cyclic constraint) - first_soc = float(soc_boundary.isel(cluster_boundary=0).values) - last_soc = float(soc_boundary.isel(cluster_boundary=-1).values) + first_soc = soc_boundary.isel(cluster_boundary=0).item() + last_soc = soc_boundary.isel(cluster_boundary=-1).item() assert_allclose(first_soc, last_soc, rtol=1e-6) @@ -543,15 +544,15 @@ def test_expanded_charge_state_matches_manual_calculation(self, solver_fixture, # Manual verification for first few timesteps of first period p = 0 # First period cluster = int(cluster_order[p]) - soc_b = float(soc_boundary.isel(cluster_boundary=p).values) + soc_b = soc_boundary.isel(cluster_boundary=p).item() for t in [0, 5, 12, 23]: global_t = p * timesteps_per_cluster + t - delta_e = float(cs_clustered.isel(cluster=cluster, time=t).values) + delta_e = cs_clustered.isel(cluster=cluster, time=t).item() decay = (1 - loss_rate) ** t expected = soc_b * decay + delta_e expected_clipped = max(0.0, expected) - actual = float(cs_expanded.isel(time=global_t).values) + actual = cs_expanded.isel(time=global_t).item() assert_allclose( actual, diff --git a/tests/test_clustering/test_base.py b/tests/test_clustering/test_base.py index a6c4d8cc7..9c63f25f6 100644 --- a/tests/test_clustering/test_base.py +++ b/tests/test_clustering/test_base.py @@ -152,7 +152,6 @@ def test_creation(self): info = Clustering( result=result, - original_flow_system=None, # Would be FlowSystem in practice backend_name='tsam', ) diff --git a/tests/test_clustering/test_integration.py b/tests/test_clustering/test_integration.py index 587e39160..2d04a51c1 100644 --- a/tests/test_clustering/test_integration.py +++ b/tests/test_clustering/test_integration.py @@ -5,85 +5,121 @@ import pytest import xarray as xr -from flixopt import FlowSystem, TimeSeriesWeights +from flixopt import FlowSystem -class TestTimeSeriesWeights: - """Tests for TimeSeriesWeights class.""" +class TestWeights: + """Tests for FlowSystem.weights dict property.""" - def test_creation(self): - """Test TimeSeriesWeights creation.""" - temporal = xr.DataArray([1.0, 1.0, 1.0], dims=['time']) - weights = TimeSeriesWeights(temporal=temporal) + def test_weights_is_dict(self): + """Test weights returns a dict.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) + weights = fs.weights + + assert isinstance(weights, dict) + assert 'time' in weights + + def test_time_weight(self): + """Test weights['time'] returns timestep_duration.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) + weights = fs.weights + + # For hourly data, timestep_duration is 1.0 + assert float(weights['time'].mean()) == 1.0 + + def test_cluster_not_in_weights_when_non_clustered(self): + """Test weights doesn't have 'cluster' key for non-clustered systems.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) + weights = fs.weights + + # Non-clustered: 'cluster' not in weights + assert 'cluster' not in weights - assert 'time' in weights.temporal.dims - assert float(weights.temporal.sum().values) == 3.0 + def test_temporal_dims_non_clustered(self): + """Test temporal_dims is ['time'] for non-clustered systems.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) + + assert fs.temporal_dims == ['time'] - def test_invalid_no_time_dim(self): - """Test error when temporal has no time dimension.""" - temporal = xr.DataArray([1.0, 1.0], dims=['other']) + def test_temporal_weight(self): + """Test temporal_weight returns time * cluster.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) - with pytest.raises(ValueError, match='time'): - TimeSeriesWeights(temporal=temporal) + expected = fs.weights['time'] * fs.weights.get('cluster', 1.0) + xr.testing.assert_equal(fs.temporal_weight, expected) - def test_sum_over_time(self): - """Test sum_over_time convenience method.""" - temporal = xr.DataArray([2.0, 3.0, 1.0], dims=['time'], coords={'time': [0, 1, 2]}) - weights = TimeSeriesWeights(temporal=temporal) + def test_sum_temporal(self): + """Test sum_temporal applies full temporal weighting (time * cluster) and sums.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=3, freq='h')) - data = xr.DataArray([10.0, 20.0, 30.0], dims=['time'], coords={'time': [0, 1, 2]}) - result = weights.sum_over_time(data) + # Input is a rate (e.g., flow_rate in MW) + data = xr.DataArray([10.0, 20.0, 30.0], dims=['time'], coords={'time': fs.timesteps}) - # 10*2 + 20*3 + 30*1 = 20 + 60 + 30 = 110 - assert float(result.values) == 110.0 + result = fs.sum_temporal(data) - def test_effective_objective(self): - """Test effective_objective property.""" - temporal = xr.DataArray([1.0, 1.0], dims=['time']) - objective = xr.DataArray([2.0, 2.0], dims=['time']) + # For hourly non-clustered: temporal = time * cluster = 1.0 * 1.0 = 1.0 + # result = sum(data * temporal) = sum(data) = 60 + assert float(result.values) == 60.0 - # Without override - weights1 = TimeSeriesWeights(temporal=temporal) - assert np.array_equal(weights1.effective_objective.values, temporal.values) - # With override - weights2 = TimeSeriesWeights(temporal=temporal, objective=objective) - assert np.array_equal(weights2.effective_objective.values, objective.values) +class TestFlowSystemDimsIndexesWeights: + """Tests for FlowSystem.dims, .indexes, .weights properties.""" + def test_dims_property(self): + """Test that FlowSystem.dims returns active dimension names.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) -class TestFlowSystemWeightsProperty: - """Tests for FlowSystem.weights property.""" + dims = fs.dims + assert dims == ['time'] - def test_weights_property_exists(self): - """Test that FlowSystem has weights property.""" + def test_indexes_property(self): + """Test that FlowSystem.indexes returns active indexes.""" fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) - weights = fs.weights - assert isinstance(weights, TimeSeriesWeights) + indexes = fs.indexes + assert isinstance(indexes, dict) + assert 'time' in indexes + assert len(indexes['time']) == 24 - def test_weights_temporal_equals_aggregation_weight(self): - """Test that weights.temporal equals aggregation_weight.""" + def test_weights_keys_match_dims(self): + """Test that weights.keys() is subset of dims (only 'time' for simple case).""" fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) - weights = fs.weights - aggregation_weight = fs.aggregation_weight + # For non-clustered, weights only has 'time' + assert set(fs.weights.keys()) == {'time'} - np.testing.assert_array_almost_equal(weights.temporal.values, aggregation_weight.values) + def test_temporal_weight_calculation(self): + """Test that temporal_weight = timestep_duration * cluster_weight.""" + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=24, freq='h')) + + expected = fs.timestep_duration * 1.0 # cluster is 1.0 for non-clustered + + np.testing.assert_array_almost_equal(fs.temporal_weight.values, expected.values) def test_weights_with_cluster_weight(self): - """Test weights property includes cluster_weight.""" + """Test weights property includes cluster_weight when provided.""" # Create FlowSystem with custom cluster_weight timesteps = pd.date_range('2024-01-01', periods=24, freq='h') - cluster_weight = np.array([2.0] * 12 + [1.0] * 12) # First 12h weighted 2x + cluster_weight = xr.DataArray( + np.array([2.0] * 12 + [1.0] * 12), + dims=['time'], + coords={'time': timesteps}, + ) fs = FlowSystem(timesteps=timesteps, cluster_weight=cluster_weight) weights = fs.weights - # temporal = timestep_duration * cluster_weight - # timestep_duration is 1h for all, so temporal = cluster_weight + # cluster weight should be in weights (FlowSystem has cluster_weight set) + # But note: 'cluster' only appears in weights if clusters dimension exists + # Since we didn't set clusters, 'cluster' won't be in weights + # The cluster_weight is applied via temporal_weight + assert 'cluster' not in weights # No cluster dimension + + # temporal_weight = timestep_duration * cluster_weight + # timestep_duration is 1h for all expected = 1.0 * cluster_weight - np.testing.assert_array_almost_equal(weights.temporal.values, expected) + np.testing.assert_array_almost_equal(fs.temporal_weight.values, expected.values) class TestClusterMethod: diff --git a/tests/test_clustering_io.py b/tests/test_clustering_io.py new file mode 100644 index 000000000..483cdc447 --- /dev/null +++ b/tests/test_clustering_io.py @@ -0,0 +1,241 @@ +"""Tests for clustering serialization and deserialization.""" + +import numpy as np +import pandas as pd +import pytest + +import flixopt as fx + + +@pytest.fixture +def simple_system_24h(): + """Create a simple flow system with 24 hourly timesteps.""" + timesteps = pd.date_range('2023-01-01', periods=24, freq='h') + + fs = fx.FlowSystem(timesteps) + fs.add_elements( + fx.Bus('heat'), + fx.Effect('costs', unit='EUR', description='costs', is_objective=True, is_standard=True), + ) + fs.add_elements( + fx.Sink('demand', inputs=[fx.Flow('in', bus='heat', fixed_relative_profile=np.ones(24), size=10)]), + fx.Source('source', outputs=[fx.Flow('out', bus='heat', size=50, effects_per_flow_hour={'costs': 0.05})]), + ) + return fs + + +@pytest.fixture +def simple_system_8_days(): + """Create a simple flow system with 8 days of hourly timesteps.""" + timesteps = pd.date_range('2023-01-01', periods=8 * 24, freq='h') + + # Create varying demand profile with different patterns for different days + # 4 "weekdays" with high demand, 4 "weekend" days with low demand + hourly_pattern = np.sin(np.linspace(0, 2 * np.pi, 24)) * 0.5 + 0.5 + weekday_profile = hourly_pattern * 1.5 # Higher demand + weekend_profile = hourly_pattern * 0.5 # Lower demand + demand_profile = np.concatenate( + [ + weekday_profile, + weekday_profile, + weekday_profile, + weekday_profile, + weekend_profile, + weekend_profile, + weekend_profile, + weekend_profile, + ] + ) + + fs = fx.FlowSystem(timesteps) + fs.add_elements( + fx.Bus('heat'), + fx.Effect('costs', unit='EUR', description='costs', is_objective=True, is_standard=True), + ) + fs.add_elements( + fx.Sink('demand', inputs=[fx.Flow('in', bus='heat', fixed_relative_profile=demand_profile, size=10)]), + fx.Source('source', outputs=[fx.Flow('out', bus='heat', size=50, effects_per_flow_hour={'costs': 0.05})]), + ) + return fs + + +class TestClusteringRoundtrip: + """Test that clustering survives dataset roundtrip.""" + + def test_clustering_to_dataset_has_clustering_attrs(self, simple_system_8_days): + """Clustered FlowSystem dataset should have clustering info.""" + fs = simple_system_8_days + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + ds = fs_clustered.to_dataset(include_solution=False) + + # Check that clustering attrs are present + assert 'clustering' in ds.attrs + + # Check that clustering arrays are present with prefix + clustering_vars = [name for name in ds.data_vars if name.startswith('clustering|')] + assert len(clustering_vars) > 0 + + def test_clustering_roundtrip_preserves_clustering_object(self, simple_system_8_days): + """Clustering object should be restored after roundtrip.""" + fs = simple_system_8_days + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + # Roundtrip + ds = fs_clustered.to_dataset(include_solution=False) + fs_restored = fx.FlowSystem.from_dataset(ds) + + # Clustering should be restored + assert fs_restored.clustering is not None + assert fs_restored.clustering.backend_name == 'tsam' + + def test_clustering_roundtrip_preserves_n_clusters(self, simple_system_8_days): + """Number of clusters should be preserved after roundtrip.""" + fs = simple_system_8_days + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + ds = fs_clustered.to_dataset(include_solution=False) + fs_restored = fx.FlowSystem.from_dataset(ds) + + assert fs_restored.clustering.n_clusters == 2 + + def test_clustering_roundtrip_preserves_timesteps_per_cluster(self, simple_system_8_days): + """Timesteps per cluster should be preserved after roundtrip.""" + fs = simple_system_8_days + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + ds = fs_clustered.to_dataset(include_solution=False) + fs_restored = fx.FlowSystem.from_dataset(ds) + + assert fs_restored.clustering.timesteps_per_cluster == 24 + + def test_clustering_roundtrip_preserves_original_timesteps(self, simple_system_8_days): + """Original timesteps should be preserved after roundtrip.""" + fs = simple_system_8_days + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + original_timesteps = fs_clustered.clustering.original_timesteps + + ds = fs_clustered.to_dataset(include_solution=False) + fs_restored = fx.FlowSystem.from_dataset(ds) + + pd.testing.assert_index_equal(fs_restored.clustering.original_timesteps, original_timesteps) + + def test_clustering_roundtrip_preserves_timestep_mapping(self, simple_system_8_days): + """Timestep mapping should be preserved after roundtrip.""" + fs = simple_system_8_days + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + original_mapping = fs_clustered.clustering.timestep_mapping.values.copy() + + ds = fs_clustered.to_dataset(include_solution=False) + fs_restored = fx.FlowSystem.from_dataset(ds) + + np.testing.assert_array_equal(fs_restored.clustering.timestep_mapping.values, original_mapping) + + +class TestClusteringWithSolutionRoundtrip: + """Test that clustering with solution survives roundtrip.""" + + def test_expand_solution_after_roundtrip(self, simple_system_8_days, solver_fixture): + """expand_solution should work after loading from dataset.""" + fs = simple_system_8_days + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + # Solve + fs_clustered.optimize(solver_fixture) + + # Roundtrip + ds = fs_clustered.to_dataset(include_solution=True) + fs_restored = fx.FlowSystem.from_dataset(ds) + + # expand_solution should work + fs_expanded = fs_restored.transform.expand_solution() + + # Check expanded FlowSystem has correct number of timesteps + assert len(fs_expanded.timesteps) == 8 * 24 + + def test_expand_solution_after_netcdf_roundtrip(self, simple_system_8_days, tmp_path, solver_fixture): + """expand_solution should work after loading from NetCDF file.""" + fs = simple_system_8_days + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + # Solve + fs_clustered.optimize(solver_fixture) + + # Save to NetCDF + nc_path = tmp_path / 'clustered.nc' + fs_clustered.to_netcdf(nc_path) + + # Load from NetCDF + fs_restored = fx.FlowSystem.from_netcdf(nc_path) + + # expand_solution should work + fs_expanded = fs_restored.transform.expand_solution() + + # Check expanded FlowSystem has correct number of timesteps + assert len(fs_expanded.timesteps) == 8 * 24 + + +class TestClusteringDerivedProperties: + """Test derived properties on Clustering object.""" + + def test_original_timesteps_property(self, simple_system_8_days): + """original_timesteps property should return correct DatetimeIndex.""" + fs = simple_system_8_days + original_timesteps = fs.timesteps + + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + # Check values are equal (name attribute may differ) + pd.testing.assert_index_equal( + fs_clustered.clustering.original_timesteps, + original_timesteps, + check_names=False, + ) + + def test_simple_system_has_no_periods_or_scenarios(self, simple_system_8_days): + """Clustered simple system should preserve that it has no periods/scenarios.""" + fs = simple_system_8_days + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + # FlowSystem without periods/scenarios should remain so after clustering + assert fs_clustered.periods is None + assert fs_clustered.scenarios is None + + +class TestClusteringWithScenarios: + """Test clustering IO with scenarios.""" + + @pytest.fixture + def system_with_scenarios(self): + """Create a flow system with scenarios.""" + timesteps = pd.date_range('2023-01-01', periods=4 * 24, freq='h') + scenarios = pd.Index(['Low', 'High'], name='scenario') + + # Create varying demand profile for clustering + demand_profile = np.tile(np.sin(np.linspace(0, 2 * np.pi, 24)) * 0.5 + 0.5, 4) + + fs = fx.FlowSystem(timesteps, scenarios=scenarios) + fs.add_elements( + fx.Bus('heat'), + fx.Effect('costs', unit='EUR', description='costs', is_objective=True, is_standard=True), + ) + fs.add_elements( + fx.Sink('demand', inputs=[fx.Flow('in', bus='heat', fixed_relative_profile=demand_profile, size=10)]), + fx.Source('source', outputs=[fx.Flow('out', bus='heat', size=50, effects_per_flow_hour={'costs': 0.05})]), + ) + return fs + + def test_clustering_roundtrip_preserves_scenarios(self, system_with_scenarios): + """Scenarios should be preserved after clustering and roundtrip.""" + fs = system_with_scenarios + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + ds = fs_clustered.to_dataset(include_solution=False) + fs_restored = fx.FlowSystem.from_dataset(ds) + + # Scenarios should be preserved in the FlowSystem itself + pd.testing.assert_index_equal( + fs_restored.scenarios, + pd.Index(['Low', 'High'], name='scenario'), + check_names=False, + ) diff --git a/tests/test_io_conversion.py b/tests/test_io_conversion.py index 33bda8c91..7775f987a 100644 --- a/tests/test_io_conversion.py +++ b/tests/test_io_conversion.py @@ -762,6 +762,11 @@ def test_v4_reoptimized_objective_matches_original(self, result_name): new_objective = float(fs.solution['objective'].item()) new_effect_total = float(fs.solution[objective_effect_label].sum().item()) + # Skip comparison for scenarios test case - scenario weights are now always normalized, + # which changes the objective value when loading old results with non-normalized weights + if result_name == '04_scenarios': + pytest.skip('Scenario weights are now always normalized - old results have different weights') + # Verify objective matches (within tolerance) assert new_objective == pytest.approx(old_objective, rel=1e-5, abs=1), ( f'Objective mismatch for {result_name}: new={new_objective}, old={old_objective}' diff --git a/tests/test_scenarios.py b/tests/test_scenarios.py index 65ea62d81..2699647ad 100644 --- a/tests/test_scenarios.py +++ b/tests/test_scenarios.py @@ -341,12 +341,14 @@ def test_scenarios_selection(flow_system_piecewise_conversion_scenarios): assert flow_system.scenarios.equals(flow_system_full.scenarios[0:2]) - np.testing.assert_allclose(flow_system.scenario_weights.values, flow_system_full.scenario_weights[0:2]) + # Scenario weights are always normalized - subset is re-normalized to sum to 1 + subset_weights = flow_system_full.scenario_weights[0:2] + expected_normalized = subset_weights / subset_weights.sum() + np.testing.assert_allclose(flow_system.scenario_weights.values, expected_normalized.values) - # Optimize using new API with normalize_weights=False + # Optimize using new API flow_system.optimize( fx.solvers.GurobiSolver(mip_gap=0.01, time_limit_seconds=60), - normalize_weights=False, ) # Penalty has same structure as other effects: 'Penalty' is the total, 'Penalty(temporal)' and 'Penalty(periodic)' are components @@ -769,7 +771,10 @@ def test_weights_selection(): # Verify weights are correctly sliced assert fs_subset.scenarios.equals(pd.Index(['base', 'high'], name='scenario')) - np.testing.assert_allclose(fs_subset.scenario_weights.values, custom_scenario_weights[[0, 2]]) + # Scenario weights are always normalized - subset is re-normalized to sum to 1 + subset_weights = np.array([0.3, 0.2]) # Original weights for selected scenarios + expected_normalized = subset_weights / subset_weights.sum() + np.testing.assert_allclose(fs_subset.scenario_weights.values, expected_normalized) # Verify weights are 1D with just scenario dimension (no period dimension) assert fs_subset.scenario_weights.dims == ('scenario',) diff --git a/tests/test_sel_isel_single_selection.py b/tests/test_sel_isel_single_selection.py new file mode 100644 index 000000000..4d84ced51 --- /dev/null +++ b/tests/test_sel_isel_single_selection.py @@ -0,0 +1,193 @@ +"""Tests for sel/isel with single period/scenario selection.""" + +import numpy as np +import pandas as pd +import pytest + +import flixopt as fx + + +@pytest.fixture +def fs_with_scenarios(): + """FlowSystem with scenarios for testing single selection.""" + timesteps = pd.date_range('2023-01-01', periods=24, freq='h') + scenarios = pd.Index(['A', 'B', 'C'], name='scenario') + scenario_weights = np.array([0.5, 0.3, 0.2]) + + fs = fx.FlowSystem(timesteps, scenarios=scenarios, scenario_weights=scenario_weights) + fs.add_elements( + fx.Bus('heat'), + fx.Effect('costs', unit='EUR', description='costs', is_objective=True, is_standard=True), + ) + fs.add_elements( + fx.Sink('demand', inputs=[fx.Flow('in', bus='heat', fixed_relative_profile=np.ones(24), size=10)]), + fx.Source('source', outputs=[fx.Flow('out', bus='heat', size=50, effects_per_flow_hour={'costs': 0.05})]), + ) + return fs + + +@pytest.fixture +def fs_with_periods(): + """FlowSystem with periods for testing single selection.""" + timesteps = pd.date_range('2023-01-01', periods=24, freq='h') + periods = pd.Index([2020, 2030, 2040], name='period') + + fs = fx.FlowSystem(timesteps, periods=periods, weight_of_last_period=10) + fs.add_elements( + fx.Bus('heat'), + fx.Effect('costs', unit='EUR', description='costs', is_objective=True, is_standard=True), + ) + fs.add_elements( + fx.Sink('demand', inputs=[fx.Flow('in', bus='heat', fixed_relative_profile=np.ones(24), size=10)]), + fx.Source('source', outputs=[fx.Flow('out', bus='heat', size=50, effects_per_flow_hour={'costs': 0.05})]), + ) + return fs + + +@pytest.fixture +def fs_with_periods_and_scenarios(): + """FlowSystem with both periods and scenarios.""" + timesteps = pd.date_range('2023-01-01', periods=24, freq='h') + periods = pd.Index([2020, 2030], name='period') + scenarios = pd.Index(['Low', 'High'], name='scenario') + + fs = fx.FlowSystem(timesteps, periods=periods, scenarios=scenarios, weight_of_last_period=10) + fs.add_elements( + fx.Bus('heat'), + fx.Effect('costs', unit='EUR', description='costs', is_objective=True, is_standard=True), + ) + fs.add_elements( + fx.Sink('demand', inputs=[fx.Flow('in', bus='heat', fixed_relative_profile=np.ones(24), size=10)]), + fx.Source('source', outputs=[fx.Flow('out', bus='heat', size=50, effects_per_flow_hour={'costs': 0.05})]), + ) + return fs + + +class TestIselSingleScenario: + """Test isel with single scenario selection.""" + + def test_isel_single_scenario_drops_dimension(self, fs_with_scenarios): + """Selecting a single scenario with isel should drop the scenario dimension.""" + fs_selected = fs_with_scenarios.transform.isel(scenario=0) + + assert fs_selected.scenarios is None + assert 'scenario' not in fs_selected.to_dataset().dims + + def test_isel_single_scenario_removes_scenario_weights(self, fs_with_scenarios): + """scenario_weights should be removed when scenario dimension is dropped.""" + fs_selected = fs_with_scenarios.transform.isel(scenario=0) + + ds = fs_selected.to_dataset() + assert 'scenario_weights' not in ds.data_vars + assert 'scenario_weights' not in ds.attrs + + def test_isel_single_scenario_preserves_time(self, fs_with_scenarios): + """Time dimension should be preserved.""" + fs_selected = fs_with_scenarios.transform.isel(scenario=0) + + assert len(fs_selected.timesteps) == 24 + + def test_isel_single_scenario_roundtrip(self, fs_with_scenarios): + """FlowSystem should survive to_dataset/from_dataset roundtrip after single selection.""" + fs_selected = fs_with_scenarios.transform.isel(scenario=0) + + ds = fs_selected.to_dataset() + fs_restored = fx.FlowSystem.from_dataset(ds) + + assert fs_restored.scenarios is None + assert len(fs_restored.timesteps) == 24 + + +class TestSelSingleScenario: + """Test sel with single scenario selection.""" + + def test_sel_single_scenario_drops_dimension(self, fs_with_scenarios): + """Selecting a single scenario with sel should drop the scenario dimension.""" + fs_selected = fs_with_scenarios.transform.sel(scenario='B') + + assert fs_selected.scenarios is None + + +class TestIselSinglePeriod: + """Test isel with single period selection.""" + + def test_isel_single_period_drops_dimension(self, fs_with_periods): + """Selecting a single period with isel should drop the period dimension.""" + fs_selected = fs_with_periods.transform.isel(period=0) + + assert fs_selected.periods is None + assert 'period' not in fs_selected.to_dataset().dims + + def test_isel_single_period_removes_period_weights(self, fs_with_periods): + """period_weights should be removed when period dimension is dropped.""" + fs_selected = fs_with_periods.transform.isel(period=0) + + ds = fs_selected.to_dataset() + assert 'period_weights' not in ds.data_vars + assert 'weight_of_last_period' not in ds.attrs + + def test_isel_single_period_roundtrip(self, fs_with_periods): + """FlowSystem should survive roundtrip after single period selection.""" + fs_selected = fs_with_periods.transform.isel(period=0) + + ds = fs_selected.to_dataset() + fs_restored = fx.FlowSystem.from_dataset(ds) + + assert fs_restored.periods is None + + +class TestSelSinglePeriod: + """Test sel with single period selection.""" + + def test_sel_single_period_drops_dimension(self, fs_with_periods): + """Selecting a single period with sel should drop the period dimension.""" + fs_selected = fs_with_periods.transform.sel(period=2030) + + assert fs_selected.periods is None + + +class TestMixedSelection: + """Test mixed selections (single + multiple).""" + + def test_single_period_multiple_scenarios(self, fs_with_periods_and_scenarios): + """Single period but multiple scenarios should only drop period.""" + fs_selected = fs_with_periods_and_scenarios.transform.isel(period=0) + + assert fs_selected.periods is None + assert fs_selected.scenarios is not None + assert len(fs_selected.scenarios) == 2 + + def test_multiple_periods_single_scenario(self, fs_with_periods_and_scenarios): + """Multiple periods but single scenario should only drop scenario.""" + fs_selected = fs_with_periods_and_scenarios.transform.isel(scenario=0) + + assert fs_selected.periods is not None + assert len(fs_selected.periods) == 2 + assert fs_selected.scenarios is None + + def test_single_period_single_scenario(self, fs_with_periods_and_scenarios): + """Single period and single scenario should drop both.""" + fs_selected = fs_with_periods_and_scenarios.transform.isel(period=0, scenario=0) + + assert fs_selected.periods is None + assert fs_selected.scenarios is None + + +class TestSliceSelection: + """Test that slice selection preserves dimensions.""" + + def test_slice_scenarios_preserves_dimension(self, fs_with_scenarios): + """Slice selection should preserve dimension even with 1 element.""" + # Select a slice that results in 2 elements + fs_selected = fs_with_scenarios.transform.isel(scenario=slice(0, 2)) + + assert fs_selected.scenarios is not None + assert len(fs_selected.scenarios) == 2 + + def test_list_selection_preserves_dimension(self, fs_with_scenarios): + """List selection should preserve dimension even with 1 element.""" + fs_selected = fs_with_scenarios.transform.isel(scenario=[0]) + + # List selection should preserve dimension + assert fs_selected.scenarios is not None + assert len(fs_selected.scenarios) == 1 From 93e71522525850beded10db4c43ac085bafeb424 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sat, 3 Jan 2026 23:02:36 +0100 Subject: [PATCH 179/191] Feature: fxplot Plotting Accessor (#548) * Add dataset plot accessor * Add fxplot acessor showcase * The internal plot accessors now leverage the shared .fxplot implementation, reducing code duplication while maintaining the same functionality (data preparation, color resolution from components, PlotResult wrapping). * Fix notebook * 1. xlabel/ylabel parameters - Added to bar(), stacked_bar(), line(), area(), and duration_curve() methods in both DatasetPlotAccessor and DataArrayPlotAccessor 2. scatter() method - Plots two variables against each other with x and y parameters 3. pie() method - Creates pie charts from aggregated (scalar) dataset values, e.g. ds.sum('time').fxplot.pie() 4. duration_curve() method - Sorts values along the time dimension in descending order, with optional normalize parameter for percentage x-axis 5. CONFIG.Plotting.default_line_shape - New config option (default 'hv') that controls the default line shape for line(), area(), and duration_curve() methods * Fix faceting of pie * Improve auto dim handling * Improve notebook * Fix pie plot * Logic order changed: 1. X-axis is now determined first using CONFIG.Plotting.x_dim_priority 2. Facets are resolved from remaining dimensions (x-axis excluded) x_dim_priority expanded: x_dim_priority = ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster') - Time-like dims first, then common grouping dims as fallback - variable stays excluded (it's used for color, not x-axis) _get_x_dim() refactored: - Now takes dims: list[str] instead of a DataFrame - More versatile - works with any list of dimension names * Add x parameter and x_dim_priority config to fxplot - Add `x` parameter to bar/stacked_bar/line/area for explicit x-axis control - Add CONFIG.Plotting.x_dim_priority for auto x-axis selection order - X-axis determined first, facets from remaining dimensions - Refactor _get_x_column -> _get_x_dim (takes dim list, not DataFrame) - Support scalar data (no dims) by using 'variable' as x-axis * Add x parameter and smart dimension handling to fxplot - Add `x` parameter to bar/stacked_bar/line/area for explicit x-axis control - Add CONFIG.Plotting.x_dim_priority for auto x-axis selection Default: ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster') - X-axis determined first, facets resolved from remaining dimensions - Refactor _get_x_column -> _get_x_dim (takes dim list, more versatile) - Support scalar data (no dims) by using 'variable' as x-axis - Skip color='variable' when x='variable' to avoid double encoding - Fix _dataset_to_long_df to use dims (not just coords) as id_vars * Add x parameter and smart dimension handling to fxplot - Add `x` parameter to bar/stacked_bar/line/area for explicit x-axis control - Add CONFIG.Plotting.x_dim_priority for auto x-axis selection Default: ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster') - X-axis determined first, facets resolved from remaining dimensions - Refactor _get_x_column -> _get_x_dim (takes dim list, more versatile) - Support scalar data (no dims) by using 'variable' as x-axis - Skip color='variable' when x='variable' to avoid double encoding - Fix _dataset_to_long_df to use dims (not just coords) as id_vars - Ensure px_kwargs properly overrides all defaults (color, facets, etc.) * Improve documentation * Fix notebook in docs * 1. heatmap kwarg merge order - Now uses **{**imshow_args, **imshow_kwargs} so user can override 2. scatter unused colors - Removed the unused parameter 3. to_duration_curve sorting - Changed [::-1] to np.flip(..., axis=time_axis) for correct multi-dimensional handling 4. DataArrayPlotAccessor.heatmap - Same kwarg merge fix * Improve docstrings * Update notebooks to not do file operations * Fix notebook * Fix CI * mkdocs-jupyter was treating this .py file as a notebook and executing it, causing the NetCDF write failure in CI * Add missing type annotation --- docs/notebooks/08c-clustering.ipynb | 1 + docs/notebooks/08e-clustering-internals.ipynb | 16 +- .../09-plotting-and-data-access.ipynb | 175 ++-- .../data/generate_example_systems.py | 9 + docs/notebooks/fxplot_accessor_demo.ipynb | 565 +++++++++++ .../recipes/plotting-custom-data.md | 129 +-- flixopt/__init__.py | 3 + flixopt/clustering/base.py | 2 +- flixopt/config.py | 16 +- flixopt/dataset_plot_accessor.py | 891 ++++++++++++++++++ flixopt/statistics_accessor.py | 133 +-- mkdocs.yml | 5 + 12 files changed, 1592 insertions(+), 353 deletions(-) create mode 100644 docs/notebooks/fxplot_accessor_demo.ipynb create mode 100644 flixopt/dataset_plot_accessor.py diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index de1a05482..0e9cda7b7 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -58,6 +58,7 @@ "from data.generate_example_systems import create_district_heating_system\n", "\n", "flow_system = create_district_heating_system()\n", + "flow_system.connect_and_transform()\n", "\n", "timesteps = flow_system.timesteps\n", "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')\n", diff --git a/docs/notebooks/08e-clustering-internals.ipynb b/docs/notebooks/08e-clustering-internals.ipynb index bbd54f05d..506a01ed9 100644 --- a/docs/notebooks/08e-clustering-internals.ipynb +++ b/docs/notebooks/08e-clustering-internals.ipynb @@ -32,8 +32,8 @@ "\n", "fx.CONFIG.notebook()\n", "\n", - "# Create the district heating system\n", - "flow_system = create_district_heating_system()" + "flow_system = create_district_heating_system()\n", + "flow_system.connect_and_transform()" ] }, { @@ -287,17 +287,7 @@ ] } ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "name": "python", - "version": "3.11" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 5 } diff --git a/docs/notebooks/09-plotting-and-data-access.ipynb b/docs/notebooks/09-plotting-and-data-access.ipynb index a4803adf4..39fa788da 100644 --- a/docs/notebooks/09-plotting-and-data-access.ipynb +++ b/docs/notebooks/09-plotting-and-data-access.ipynb @@ -11,7 +11,6 @@ "\n", "This notebook covers:\n", "\n", - "- Loading saved FlowSystems from NetCDF files\n", "- Accessing data (flow rates, sizes, effects, charge states)\n", "- Time series plots (balance, flows, storage)\n", "- Aggregated plots (sizes, effects, duration curves)\n", @@ -36,7 +35,7 @@ "metadata": {}, "outputs": [], "source": [ - "from pathlib import Path\n", + "from data.generate_example_systems import create_complex_system, create_multiperiod_system, create_simple_system\n", "\n", "import flixopt as fx\n", "\n", @@ -48,9 +47,9 @@ "id": "3", "metadata": {}, "source": [ - "## Generate Example Data\n", + "## Generate Example Systems\n", "\n", - "First, run the script that generates three example FlowSystems with solutions:" + "First, create three example FlowSystems with solutions:" ] }, { @@ -60,35 +59,19 @@ "metadata": {}, "outputs": [], "source": [ - "# Run the generation script (only needed once, or to regenerate)\n", - "!python data/generate_example_systems.py > /dev/null 2>&1" - ] - }, - { - "cell_type": "markdown", - "id": "5", - "metadata": {}, - "source": [ - "## 1. Loading Saved FlowSystems\n", + "# Create and optimize the example systems\n", + "solver = fx.solvers.HighsSolver(mip_gap=0.01, log_to_console=False)\n", "\n", - "FlowSystems can be saved to and loaded from NetCDF files, preserving the full structure and solution:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "6", - "metadata": {}, - "outputs": [], - "source": [ - "DATA_DIR = Path('data')\n", + "simple = create_simple_system()\n", + "simple.optimize(solver)\n", + "\n", + "complex_sys = create_complex_system()\n", + "complex_sys.optimize(solver)\n", "\n", - "# Load the three example systems\n", - "simple = fx.FlowSystem.from_netcdf(DATA_DIR / 'simple_system.nc4')\n", - "complex_sys = fx.FlowSystem.from_netcdf(DATA_DIR / 'complex_system.nc4')\n", - "multiperiod = fx.FlowSystem.from_netcdf(DATA_DIR / 'multiperiod_system.nc4')\n", + "multiperiod = create_multiperiod_system()\n", + "multiperiod.optimize(solver)\n", "\n", - "print('Loaded systems:')\n", + "print('Created systems:')\n", "print(f' simple: {len(simple.components)} components, {len(simple.buses)} buses')\n", "print(f' complex_sys: {len(complex_sys.components)} components, {len(complex_sys.buses)} buses')\n", "print(f' multiperiod: {len(multiperiod.components)} components, dims={dict(multiperiod.solution.sizes)}')" @@ -96,7 +79,7 @@ }, { "cell_type": "markdown", - "id": "7", + "id": "5", "metadata": {}, "source": [ "## 2. Quick Overview: Balance Plot\n", @@ -107,7 +90,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -117,7 +100,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "7", "metadata": {}, "source": [ "### Accessing Plot Data\n", @@ -128,7 +111,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -142,7 +125,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "9", "metadata": {}, "source": [ "### Energy Totals\n", @@ -153,7 +136,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -167,7 +150,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "11", "metadata": {}, "source": [ "## 3. Time Series Plots" @@ -175,7 +158,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "12", "metadata": {}, "source": [ "### 3.1 Balance Plot\n", @@ -186,7 +169,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -196,7 +179,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "14", "metadata": {}, "source": [ "### 3.2 Carrier Balance\n", @@ -207,7 +190,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -217,7 +200,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -226,7 +209,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "17", "metadata": {}, "source": [ "### 3.3 Flow Rates\n", @@ -237,7 +220,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -248,7 +231,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -258,7 +241,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "20", "metadata": {}, "source": [ "### 3.4 Storage Plot\n", @@ -269,7 +252,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -278,7 +261,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "22", "metadata": {}, "source": [ "### 3.5 Charge States Plot\n", @@ -289,7 +272,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -298,7 +281,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "24", "metadata": {}, "source": [ "## 4. Aggregated Plots" @@ -306,7 +289,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "25", "metadata": {}, "source": [ "### 4.1 Sizes Plot\n", @@ -317,7 +300,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -326,7 +309,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "27", "metadata": {}, "source": [ "### 4.2 Effects Plot\n", @@ -337,7 +320,7 @@ { "cell_type": "code", "execution_count": null, - "id": "30", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -347,7 +330,7 @@ { "cell_type": "code", "execution_count": null, - "id": "31", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -358,7 +341,7 @@ { "cell_type": "code", "execution_count": null, - "id": "32", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -367,7 +350,7 @@ }, { "cell_type": "markdown", - "id": "33", + "id": "31", "metadata": {}, "source": [ "### 4.3 Duration Curve\n", @@ -378,7 +361,7 @@ { "cell_type": "code", "execution_count": null, - "id": "34", + "id": "32", "metadata": {}, "outputs": [], "source": [ @@ -388,7 +371,7 @@ { "cell_type": "code", "execution_count": null, - "id": "35", + "id": "33", "metadata": {}, "outputs": [], "source": [ @@ -398,7 +381,7 @@ }, { "cell_type": "markdown", - "id": "36", + "id": "34", "metadata": {}, "source": [ "## 5. Heatmaps\n", @@ -409,7 +392,7 @@ { "cell_type": "code", "execution_count": null, - "id": "37", + "id": "35", "metadata": {}, "outputs": [], "source": [ @@ -420,7 +403,7 @@ { "cell_type": "code", "execution_count": null, - "id": "38", + "id": "36", "metadata": {}, "outputs": [], "source": [ @@ -431,7 +414,7 @@ { "cell_type": "code", "execution_count": null, - "id": "39", + "id": "37", "metadata": {}, "outputs": [], "source": [ @@ -441,7 +424,7 @@ }, { "cell_type": "markdown", - "id": "40", + "id": "38", "metadata": {}, "source": [ "## 6. Sankey Diagrams\n", @@ -451,7 +434,7 @@ }, { "cell_type": "markdown", - "id": "41", + "id": "39", "metadata": {}, "source": [ "### 6.1 Flow Sankey\n", @@ -462,7 +445,7 @@ { "cell_type": "code", "execution_count": null, - "id": "42", + "id": "40", "metadata": {}, "outputs": [], "source": [ @@ -472,7 +455,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43", + "id": "41", "metadata": {}, "outputs": [], "source": [ @@ -482,7 +465,7 @@ }, { "cell_type": "markdown", - "id": "44", + "id": "42", "metadata": {}, "source": [ "### 6.2 Sizes Sankey\n", @@ -493,7 +476,7 @@ { "cell_type": "code", "execution_count": null, - "id": "45", + "id": "43", "metadata": {}, "outputs": [], "source": [ @@ -502,7 +485,7 @@ }, { "cell_type": "markdown", - "id": "46", + "id": "44", "metadata": {}, "source": [ "### 6.3 Peak Flow Sankey\n", @@ -513,7 +496,7 @@ { "cell_type": "code", "execution_count": null, - "id": "47", + "id": "45", "metadata": {}, "outputs": [], "source": [ @@ -522,7 +505,7 @@ }, { "cell_type": "markdown", - "id": "48", + "id": "46", "metadata": {}, "source": [ "### 6.4 Effects Sankey\n", @@ -533,7 +516,7 @@ { "cell_type": "code", "execution_count": null, - "id": "49", + "id": "47", "metadata": {}, "outputs": [], "source": [ @@ -543,7 +526,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50", + "id": "48", "metadata": {}, "outputs": [], "source": [ @@ -553,7 +536,7 @@ }, { "cell_type": "markdown", - "id": "51", + "id": "49", "metadata": {}, "source": [ "### 6.5 Filtering with `select`\n", @@ -564,7 +547,7 @@ { "cell_type": "code", "execution_count": null, - "id": "52", + "id": "50", "metadata": {}, "outputs": [], "source": [ @@ -574,7 +557,7 @@ }, { "cell_type": "markdown", - "id": "53", + "id": "51", "metadata": {}, "source": [ "## 7. Topology Visualization\n", @@ -584,7 +567,7 @@ }, { "cell_type": "markdown", - "id": "54", + "id": "52", "metadata": {}, "source": [ "### 7.1 Topology Plot\n", @@ -595,7 +578,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55", + "id": "53", "metadata": {}, "outputs": [], "source": [ @@ -605,7 +588,7 @@ { "cell_type": "code", "execution_count": null, - "id": "56", + "id": "54", "metadata": {}, "outputs": [], "source": [ @@ -614,7 +597,7 @@ }, { "cell_type": "markdown", - "id": "57", + "id": "55", "metadata": {}, "source": [ "### 7.2 Topology Info\n", @@ -625,7 +608,7 @@ { "cell_type": "code", "execution_count": null, - "id": "58", + "id": "56", "metadata": {}, "outputs": [], "source": [ @@ -642,7 +625,7 @@ }, { "cell_type": "markdown", - "id": "59", + "id": "57", "metadata": {}, "source": [ "## 8. Multi-Period/Scenario Data\n", @@ -653,7 +636,7 @@ { "cell_type": "code", "execution_count": null, - "id": "60", + "id": "58", "metadata": {}, "outputs": [], "source": [ @@ -666,7 +649,7 @@ { "cell_type": "code", "execution_count": null, - "id": "61", + "id": "59", "metadata": {}, "outputs": [], "source": [ @@ -677,7 +660,7 @@ { "cell_type": "code", "execution_count": null, - "id": "62", + "id": "60", "metadata": {}, "outputs": [], "source": [ @@ -688,7 +671,7 @@ { "cell_type": "code", "execution_count": null, - "id": "63", + "id": "61", "metadata": {}, "outputs": [], "source": [ @@ -698,7 +681,7 @@ }, { "cell_type": "markdown", - "id": "64", + "id": "62", "metadata": {}, "source": [ "## 9. Color Customization\n", @@ -709,7 +692,7 @@ { "cell_type": "code", "execution_count": null, - "id": "65", + "id": "63", "metadata": {}, "outputs": [], "source": [ @@ -720,7 +703,7 @@ { "cell_type": "code", "execution_count": null, - "id": "66", + "id": "64", "metadata": {}, "outputs": [], "source": [ @@ -731,7 +714,7 @@ { "cell_type": "code", "execution_count": null, - "id": "67", + "id": "65", "metadata": {}, "outputs": [], "source": [ @@ -749,7 +732,7 @@ }, { "cell_type": "markdown", - "id": "68", + "id": "66", "metadata": {}, "source": [ "## 10. Exporting Results\n", @@ -760,7 +743,7 @@ { "cell_type": "code", "execution_count": null, - "id": "69", + "id": "67", "metadata": {}, "outputs": [], "source": [ @@ -775,7 +758,7 @@ { "cell_type": "code", "execution_count": null, - "id": "70", + "id": "68", "metadata": {}, "outputs": [], "source": [ @@ -787,7 +770,7 @@ { "cell_type": "code", "execution_count": null, - "id": "71", + "id": "69", "metadata": {}, "outputs": [], "source": [ @@ -800,7 +783,7 @@ }, { "cell_type": "markdown", - "id": "72", + "id": "70", "metadata": {}, "source": [ "## Summary\n", diff --git a/docs/notebooks/data/generate_example_systems.py b/docs/notebooks/data/generate_example_systems.py index ec645e8b2..c53322ef2 100644 --- a/docs/notebooks/data/generate_example_systems.py +++ b/docs/notebooks/data/generate_example_systems.py @@ -11,11 +11,13 @@ Run this script to regenerate the example data files. """ +import sys from pathlib import Path import numpy as np import pandas as pd +# Handle imports in different contexts (direct run, package import, mkdocs-jupyter) try: from .generate_realistic_profiles import ( ElectricityLoadGenerator, @@ -25,6 +27,13 @@ load_weather, ) except ImportError: + # Add data directory to path for mkdocs-jupyter context + try: + _data_dir = Path(__file__).parent + except NameError: + _data_dir = Path('docs/notebooks/data') + if str(_data_dir) not in sys.path: + sys.path.insert(0, str(_data_dir)) from generate_realistic_profiles import ( ElectricityLoadGenerator, GasPriceGenerator, diff --git a/docs/notebooks/fxplot_accessor_demo.ipynb b/docs/notebooks/fxplot_accessor_demo.ipynb new file mode 100644 index 000000000..db8684d82 --- /dev/null +++ b/docs/notebooks/fxplot_accessor_demo.ipynb @@ -0,0 +1,565 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Dataset Plot Accessor Demo (`.fxplot`)\n", + "\n", + "This notebook demonstrates the new `.fxplot` accessor for `xr.Dataset` objects.\n", + "It provides convenient Plotly Express plotting methods with smart auto-faceting and coloring." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "import xarray as xr\n", + "\n", + "import flixopt as fx\n", + "\n", + "fx.__version__" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import plotly.io as pio\n", + "\n", + "pio.renderers.default = 'notebook_connected'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create Sample Data\n", + "\n", + "Let's create a multi-dimensional dataset to demonstrate the plotting capabilities." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Simple time-series dataset\n", + "np.random.seed(42)\n", + "time = pd.date_range('2024-01-01', periods=24, freq='h')\n", + "\n", + "ds_simple = xr.Dataset(\n", + " {\n", + " 'Solar': (['time'], np.maximum(0, np.sin(np.linspace(0, 2 * np.pi, 24)) * 50 + np.random.randn(24) * 5)),\n", + " 'Wind': (['time'], np.abs(np.random.randn(24) * 20 + 30)),\n", + " 'Demand': (['time'], np.abs(np.sin(np.linspace(0, 2 * np.pi, 24) + 1) * 40 + 50 + np.random.randn(24) * 5)),\n", + " },\n", + " coords={'time': time},\n", + ")\n", + "\n", + "ds_simple.to_dataframe().head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Line Plot" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_simple.fxplot.line(title='Energy Generation & Demand')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Stacked Bar Chart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_simple[['Solar', 'Wind']].fxplot.stacked_bar(title='Renewable Generation')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Area Chart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_simple[['Solar', 'Wind']].fxplot.area(title='Stacked Area - Generation')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Grouped Bar Chart" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_simple.fxplot.bar(title='Grouped Bar Chart')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Heatmap" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create 2D data for heatmap\n", + "ds_heatmap = xr.Dataset(\n", + " {\n", + " 'temperature': (['day', 'hour'], np.random.randn(7, 24) * 5 + 20),\n", + " },\n", + " coords={\n", + " 'day': pd.date_range('2024-01-01', periods=7, freq='D'),\n", + " 'hour': range(24),\n", + " },\n", + ")\n", + "\n", + "ds_heatmap.fxplot.heatmap('temperature', title='Temperature Heatmap')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Automatic Faceting & Animation\n", + "\n", + "Extra dimensions are **automatically** assigned to `facet_col`, `facet_row`, and `animation_frame` based on CONFIG priority. Just call the plot method - no configuration needed!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Dataset with scenario AND period dimensions\n", + "ds_multi = xr.Dataset(\n", + " {\n", + " 'Solar': (['time', 'scenario', 'period'], np.random.rand(24, 2, 3) * 50),\n", + " 'Wind': (['time', 'scenario', 'period'], np.random.rand(24, 2, 3) * 40 + 20),\n", + " },\n", + " coords={\n", + " 'time': time,\n", + " 'scenario': ['base', 'high'],\n", + " 'period': ['winter', 'spring', 'summer'],\n", + " },\n", + ")\n", + "\n", + "ds_multi.to_dataframe().head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Just call .line() - dimensions are auto-assigned to facet_col, facet_row, animation_frame\n", + "ds_multi.fxplot.line(title='Auto-Faceted: Just Works!')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Same for stacked bar - auto-assigns period to facet_col, scenario to animation\n", + "ds_multi.fxplot.stacked_bar(title='Stacked Bar: Also Just Works!')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Same for stacked bar - auto-assigns period to facet_col, scenario to animation\n", + "ds_multi.sum('time').fxplot.stacked_bar(title='Stacked Bar: Also Just Works!', x='variable', colors=None)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Customizing Facets & Animation\n", + "\n", + "Override auto-assignment when needed. Use `None` to disable a slot entirely." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Swap: put scenario in facet_col, period in animation\n", + "ds_multi.fxplot.line(facet_col='scenario', animation_frame='period', title='Swapped: Scenario in Columns')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Use both row and column facets - no animation\n", + "ds_multi.sum('time').fxplot.area(\n", + " facet_col='scenario', facet_row='period', animation_frame=None, title='Grid: Period × Scenario'\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Or reduce dimensions with .sel() for a simpler plot\n", + "ds_multi.sel(scenario='base', period='summer').fxplot.line(title='Single Slice: No Faceting Needed')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom Colors" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Using a colorscale name\n", + "ds_simple.fxplot.line(colors='viridis', title='With Viridis Colorscale')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Using explicit color mapping\n", + "ds_simple.fxplot.stacked_bar(\n", + " colors={'Solar': 'gold', 'Wind': 'skyblue', 'Demand': 'salmon'}, title='With Custom Colors'\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Chaining with Plotly Methods\n", + "\n", + "Since all methods return `go.Figure`, you can chain Plotly's update methods." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "(\n", + " ds_simple.fxplot.line(title='Customized Plot')\n", + " .update_layout(xaxis_title='Time of Day', yaxis_title='Power (MW)', legend_title='Source', template='plotly_white')\n", + " .update_traces(line_width=2)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pre-filtering with xarray\n", + "\n", + "Filter data using xarray methods before plotting." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Select specific time range\n", + "ds_simple.sel(time=slice('2024-01-01 06:00', '2024-01-01 18:00')).fxplot.line(title='Daytime Only')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Select specific variables\n", + "ds_simple[['Solar', 'Wind']].fxplot.area(title='Renewables Only')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## DataArray Accessor\n", + "\n", + "The `.fxplot` accessor also works on `xr.DataArray` objects directly." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Create a DataArray\n", + "da = xr.DataArray(\n", + " np.random.randn(24, 7) * 5 + 20,\n", + " dims=['time', 'day'],\n", + " coords={\n", + " 'time': pd.date_range('2024-01-01', periods=24, freq='h'),\n", + " 'day': pd.date_range('2024-01-01', periods=7, freq='D'),\n", + " },\n", + " name='temperature',\n", + ")\n", + "\n", + "# Heatmap directly from DataArray\n", + "da.fxplot.heatmap(title='DataArray Heatmap')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Line plot from DataArray (converts to Dataset internally)\n", + "da_1d = xr.DataArray(\n", + " np.sin(np.linspace(0, 4 * np.pi, 100)) * 50,\n", + " dims=['time'],\n", + " coords={'time': pd.date_range('2024-01-01', periods=100, freq='h')},\n", + " name='signal',\n", + ")\n", + "da_1d.fxplot.line(title='DataArray Line Plot')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Axis Labels\n", + "\n", + "Use `xlabel` and `ylabel` parameters to customize axis labels." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds_simple.fxplot.line(title='Generation with Custom Axis Labels', xlabel='Time of Day', ylabel='Power [MW]')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Scatter Plot\n", + "\n", + "Plot two variables against each other." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Basic scatter plot\n", + "ds_simple.fxplot.scatter(\n", + " x='Solar', y='Demand', title='Solar vs Demand Correlation', xlabel='Solar Generation [MW]', ylabel='Demand [MW]'\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Scatter with faceting by period, for one scenario\n", + "ds_multi.sel(scenario='high').fxplot.scatter(\n", + " x='Solar', y='Wind', facet_col='period', title='Solar vs Wind by Period (High Scenario)'\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Pie Chart\n", + "\n", + "Aggregate data to at most 1D per variable. Scalar data creates a single pie; 1D data creates faceted pies." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Single pie from scalar values (sum over time)\n", + "ds_simple[['Solar', 'Wind']].sum('time').fxplot.pie(\n", + " title='Total Generation by Source', colors={'Solar': 'gold', 'Wind': 'skyblue'}\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Faceted pie - auto-assigns scenario and period to facets\n", + "ds_multi.sum('time').fxplot.pie(\n", + " title='Generation by Source (Scenario × Period)',\n", + " colors={'Solar': 'gold', 'Wind': 'skyblue'},\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Duration Curve\n", + "\n", + "Use `.fxstats.to_duration_curve()` to transform data, then `.fxplot.line()` to plot. Clean separation of transformation and plotting." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Duration curve with normalized x-axis (percentage)\n", + "ds_simple.fxstats.to_duration_curve().fxplot.line(title='Duration Curves', xlabel='Duration [%]')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Duration curve with absolute timesteps\n", + "ds_simple.fxstats.to_duration_curve(normalize=False).fxplot.line(title='Duration Curves', xlabel='Timesteps')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Duration curve with auto-faceting - works seamlessly!\n", + "ds_multi.fxstats.to_duration_curve().fxplot.line(title='Duration Curves (Auto-Faceted)', xlabel='Duration [%]')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Line Shape Configuration\n", + "\n", + "The default line shape is controlled by `CONFIG.Plotting.default_line_shape` (default: `'hv'` for step plots).\n", + "Override per-plot with the `line_shape` parameter. Options: `'linear'`, `'hv'`, `'vh'`, `'hvh'`, `'vhv'`, `'spline'`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Default step plot (hv)\n", + "ds_simple[['Solar']].fxplot.line(title='Default Step Plot (hv)')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Override to linear interpolation\n", + "ds_simple[['Solar']].fxplot.line(line_shape='linear', title='Linear Interpolation')" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/docs/user-guide/recipes/plotting-custom-data.md b/docs/user-guide/recipes/plotting-custom-data.md index 3c539e6ce..8c19931f3 100644 --- a/docs/user-guide/recipes/plotting-custom-data.md +++ b/docs/user-guide/recipes/plotting-custom-data.md @@ -1,125 +1,30 @@ # Plotting Custom Data -The plot accessor (`flow_system.statistics.plot`) is designed for visualizing optimization results using element labels. If you want to create faceted plots with your own custom data (not from a FlowSystem), you can use Plotly Express directly with xarray data. +While the plot accessor (`flow_system.statistics.plot`) is designed for optimization results, you often need to plot custom xarray data. The `.fxplot` accessor provides the same convenience for any `xr.Dataset` or `xr.DataArray`. -## Faceted Plots with Custom xarray Data - -The key is converting your xarray Dataset to a long-form DataFrame that Plotly Express expects: +## Quick Example ```python +import flixopt as fx import xarray as xr -import pandas as pd -import plotly.express as px -# Your custom xarray Dataset -my_data = xr.Dataset({ - 'Solar': (['time', 'scenario'], solar_values), - 'Wind': (['time', 'scenario'], wind_values), - 'Demand': (['time', 'scenario'], demand_values), -}, coords={ - 'time': timestamps, - 'scenario': ['Base', 'High RE', 'Low Demand'] +ds = xr.Dataset({ + 'Solar': (['time'], solar_values), + 'Wind': (['time'], wind_values), }) -# Convert to long-form DataFrame for Plotly Express -df = ( - my_data - .to_dataframe() - .reset_index() - .melt( - id_vars=['time', 'scenario'], # Keep as columns - var_name='variable', - value_name='value' - ) -) - -# Faceted stacked bar chart -fig = px.bar( - df, - x='time', - y='value', - color='variable', - facet_col='scenario', - barmode='relative', - title='Energy Balance by Scenario' -) -fig.show() - -# Faceted line plot -fig = px.line( - df, - x='time', - y='value', - color='variable', - facet_col='scenario' -) -fig.show() - -# Faceted area chart -fig = px.area( - df, - x='time', - y='value', - color='variable', - facet_col='scenario' -) -fig.show() -``` - -## Common Plotly Express Faceting Options - -| Parameter | Description | -|-----------|-------------| -| `facet_col` | Dimension for column subplots | -| `facet_row` | Dimension for row subplots | -| `animation_frame` | Dimension for animation slider | -| `facet_col_wrap` | Number of columns before wrapping | - -```python -# Row and column facets -fig = px.line(df, x='time', y='value', color='variable', - facet_col='scenario', facet_row='region') - -# Animation over time periods -fig = px.bar(df, x='variable', y='value', color='variable', - animation_frame='period', barmode='group') - -# Wrap columns -fig = px.line(df, x='time', y='value', color='variable', - facet_col='scenario', facet_col_wrap=2) +# Plot directly - no conversion needed! +ds.fxplot.line(title='Energy Generation') +ds.fxplot.stacked_bar(title='Stacked Generation') ``` -## Heatmaps with Custom Data - -For heatmaps, you can pass 2D arrays directly to `px.imshow`: - -```python -import plotly.express as px - -# 2D data (e.g., days × hours) -heatmap_data = my_data['Solar'].sel(scenario='Base').values.reshape(365, 24) +## Full Documentation -fig = px.imshow( - heatmap_data, - labels={'x': 'Hour', 'y': 'Day', 'color': 'Power [kW]'}, - aspect='auto', - color_continuous_scale='portland' -) -fig.show() - -# Faceted heatmaps using subplots -from plotly.subplots import make_subplots -import plotly.graph_objects as go - -scenarios = ['Base', 'High RE'] -fig = make_subplots(rows=1, cols=len(scenarios), subplot_titles=scenarios) - -for i, scenario in enumerate(scenarios, 1): - data = my_data['Solar'].sel(scenario=scenario).values.reshape(365, 24) - fig.add_trace(go.Heatmap(z=data, colorscale='portland'), row=1, col=i) - -fig.update_layout(title='Solar Output by Scenario') -fig.show() -``` +For comprehensive documentation with interactive examples, see the [Custom Data Plotting](../../notebooks/fxplot_accessor_demo.ipynb) notebook which covers: -This approach gives you full control over your visualizations while leveraging Plotly's powerful faceting capabilities. +- All available plot methods (line, bar, stacked_bar, area, scatter, heatmap, pie) +- Automatic x-axis selection and faceting +- Custom colors and axis labels +- Duration curves with `.fxstats.to_duration_curve()` +- Configuration options +- Combining with xarray operations diff --git a/flixopt/__init__.py b/flixopt/__init__.py index 1089bf743..0fd550707 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -13,6 +13,9 @@ # Import commonly used classes and functions from . import clustering, linear_converters, plotting, results, solvers + +# Register xr.Dataset.fxplot accessor (import triggers registration via decorator) +from . import dataset_plot_accessor as _ # noqa: F401 from .carrier import Carrier, CarrierContainer from .components import ( LinearConverter, diff --git a/flixopt/clustering/base.py b/flixopt/clustering/base.py index fadf28247..4b31832e4 100644 --- a/flixopt/clustering/base.py +++ b/flixopt/clustering/base.py @@ -197,7 +197,7 @@ def get_cluster_weight_per_timestep(self) -> xr.DataArray: name='cluster_weight', ) - def plot(self, show: bool | None = None): + def plot(self, show: bool | None = None) -> PlotResult: """Plot cluster assignment visualization. Shows which cluster each original period belongs to, and the diff --git a/flixopt/config.py b/flixopt/config.py index 7e7c784cb..454f8ad3e 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -163,8 +163,10 @@ def format(self, record): 'default_facet_cols': 3, 'default_sequential_colorscale': 'turbo', 'default_qualitative_colorscale': 'plotly', + 'default_line_shape': 'hv', 'extra_dim_priority': ('cluster', 'period', 'scenario'), 'dim_slot_priority': ('facet_col', 'facet_row', 'animation_frame'), + 'x_dim_priority': ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster'), } ), 'solving': MappingProxyType( @@ -561,9 +563,8 @@ class Plotting: default_sequential_colorscale: Default colorscale for heatmaps and continuous data. default_qualitative_colorscale: Default colormap for categorical plots (bar/line/area charts). extra_dim_priority: Order of extra dimensions when auto-assigning to slots. - Default: ('cluster', 'period', 'scenario'). dim_slot_priority: Order of slots to fill with extra dimensions. - Default: ('facet_col', 'facet_row', 'animation_frame'). + x_dim_priority: Order of dimensions to prefer for x-axis when 'auto'. Examples: ```python @@ -572,10 +573,9 @@ class Plotting: CONFIG.Plotting.default_sequential_colorscale = 'plasma' CONFIG.Plotting.default_qualitative_colorscale = 'Dark24' - # Customize dimension handling - # With 2 extra dims (period, scenario): period → facet_col, scenario → facet_row - CONFIG.Plotting.extra_dim_priority = ('cluster', 'period', 'scenario') - CONFIG.Plotting.dim_slot_priority = ('facet_col', 'facet_row', 'animation_frame') + # Customize dimension handling for faceting + CONFIG.Plotting.extra_dim_priority = ('scenario', 'period', 'cluster') + CONFIG.Plotting.dim_slot_priority = ('facet_row', 'facet_col', 'animation_frame') ``` """ @@ -585,8 +585,10 @@ class Plotting: default_facet_cols: int = _DEFAULTS['plotting']['default_facet_cols'] default_sequential_colorscale: str = _DEFAULTS['plotting']['default_sequential_colorscale'] default_qualitative_colorscale: str = _DEFAULTS['plotting']['default_qualitative_colorscale'] + default_line_shape: str = _DEFAULTS['plotting']['default_line_shape'] extra_dim_priority: tuple[str, ...] = _DEFAULTS['plotting']['extra_dim_priority'] dim_slot_priority: tuple[str, ...] = _DEFAULTS['plotting']['dim_slot_priority'] + x_dim_priority: tuple[str, ...] = _DEFAULTS['plotting']['x_dim_priority'] class Carriers: """Default carrier definitions for common energy types. @@ -687,8 +689,10 @@ def to_dict(cls) -> dict: 'default_facet_cols': cls.Plotting.default_facet_cols, 'default_sequential_colorscale': cls.Plotting.default_sequential_colorscale, 'default_qualitative_colorscale': cls.Plotting.default_qualitative_colorscale, + 'default_line_shape': cls.Plotting.default_line_shape, 'extra_dim_priority': cls.Plotting.extra_dim_priority, 'dim_slot_priority': cls.Plotting.dim_slot_priority, + 'x_dim_priority': cls.Plotting.x_dim_priority, }, } diff --git a/flixopt/dataset_plot_accessor.py b/flixopt/dataset_plot_accessor.py new file mode 100644 index 000000000..fc38f730b --- /dev/null +++ b/flixopt/dataset_plot_accessor.py @@ -0,0 +1,891 @@ +"""Xarray accessors for plotting (``.fxplot``) and statistics (``.fxstats``).""" + +from __future__ import annotations + +from typing import Any, Literal + +import pandas as pd +import plotly.express as px +import plotly.graph_objects as go +import xarray as xr + +from .color_processing import ColorType, process_colors +from .config import CONFIG + + +def _get_x_dim(dims: list[str], x: str | Literal['auto'] | None = 'auto') -> str: + """Select x-axis dim from priority list, or 'variable' for scalar data.""" + if x and x != 'auto': + return x + + # Check priority list first + for dim in CONFIG.Plotting.x_dim_priority: + if dim in dims: + return dim + + # Fallback to first available dimension, or 'variable' for scalar data + return dims[0] if dims else 'variable' + + +def _resolve_auto_facets( + ds: xr.Dataset, + facet_col: str | Literal['auto'] | None, + facet_row: str | Literal['auto'] | None, + animation_frame: str | Literal['auto'] | None = None, + exclude_dims: set[str] | None = None, +) -> tuple[str | None, str | None, str | None]: + """Assign 'auto' facet slots from available dims using CONFIG priority lists.""" + # Get available extra dimensions with size > 1, excluding specified dims + exclude = exclude_dims or set() + available = {d for d in ds.dims if ds.sizes[d] > 1 and d not in exclude} + extra_dims = [d for d in CONFIG.Plotting.extra_dim_priority if d in available] + used: set[str] = set() + + # Map slot names to their input values + slots = { + 'facet_col': facet_col, + 'facet_row': facet_row, + 'animation_frame': animation_frame, + } + results: dict[str, str | None] = {'facet_col': None, 'facet_row': None, 'animation_frame': None} + + # First pass: resolve explicit dimensions (not 'auto' or None) to mark them as used + for slot_name, value in slots.items(): + if value is not None and value != 'auto': + if value in available and value not in used: + used.add(value) + results[slot_name] = value + + # Second pass: resolve 'auto' slots in dim_slot_priority order + dim_iter = iter(d for d in extra_dims if d not in used) + for slot_name in CONFIG.Plotting.dim_slot_priority: + if slots.get(slot_name) == 'auto': + next_dim = next(dim_iter, None) + if next_dim: + used.add(next_dim) + results[slot_name] = next_dim + + return results['facet_col'], results['facet_row'], results['animation_frame'] + + +def _dataset_to_long_df(ds: xr.Dataset, value_name: str = 'value', var_name: str = 'variable') -> pd.DataFrame: + """Convert Dataset to long-form DataFrame for Plotly Express.""" + if not ds.data_vars: + return pd.DataFrame() + if all(ds[var].ndim == 0 for var in ds.data_vars): + rows = [{var_name: var, value_name: float(ds[var].values)} for var in ds.data_vars] + return pd.DataFrame(rows) + df = ds.to_dataframe().reset_index() + # Use dims (not just coords) as id_vars - dims without coords become integer indices + id_cols = [c for c in ds.dims if c in df.columns] + return df.melt(id_vars=id_cols, var_name=var_name, value_name=value_name) + + +@xr.register_dataset_accessor('fxplot') +class DatasetPlotAccessor: + """Plot accessor for any xr.Dataset. Access via ``dataset.fxplot``. + + Provides convenient plotting methods that automatically handle multi-dimensional + data through faceting and animation. All methods return a Plotly Figure. + + This accessor is globally registered when flixopt is imported and works on + any xr.Dataset. + + Examples: + Basic usage:: + + import flixopt + import xarray as xr + + ds = xr.Dataset({'A': (['time'], [1, 2, 3]), 'B': (['time'], [3, 2, 1])}) + ds.fxplot.stacked_bar() + ds.fxplot.line() + ds.fxplot.area() + + With faceting:: + + ds.fxplot.stacked_bar(facet_col='scenario') + ds.fxplot.line(facet_col='period', animation_frame='scenario') + + Heatmap:: + + ds.fxplot.heatmap('temperature') + """ + + def __init__(self, xarray_obj: xr.Dataset) -> None: + """Initialize the accessor with an xr.Dataset object.""" + self._ds = xarray_obj + + def bar( + self, + *, + x: str | Literal['auto'] | None = 'auto', + colors: ColorType | None = None, + title: str = '', + xlabel: str = '', + ylabel: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a grouped bar chart from the dataset. + + Args: + x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.x_dim_priority. + colors: Color specification (colorscale name, color list, or dict mapping). + title: Plot title. + xlabel: X-axis label. + ylabel: Y-axis label. + facet_col: Dimension for column facets. 'auto' uses CONFIG priority. + facet_row: Dimension for row facets. 'auto' uses CONFIG priority. + animation_frame: Dimension for animation slider. + facet_cols: Number of columns in facet grid wrap. + **px_kwargs: Additional arguments passed to plotly.express.bar. + + Returns: + Plotly Figure. + """ + # Determine x-axis first, then resolve facets from remaining dims + dims = list(self._ds.dims) + x_col = _get_x_dim(dims, x) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + self._ds, facet_col, facet_row, animation_frame, exclude_dims={x_col} + ) + + df = _dataset_to_long_df(self._ds) + if df.empty: + return go.Figure() + + variables = df['variable'].unique().tolist() + color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + + facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols + fig_kwargs: dict[str, Any] = { + 'data_frame': df, + 'x': x_col, + 'y': 'value', + 'title': title, + 'barmode': 'group', + } + # Only color by variable if it's not already on x-axis (and user didn't override) + if x_col != 'variable' and 'color' not in px_kwargs: + fig_kwargs['color'] = 'variable' + fig_kwargs['color_discrete_map'] = color_map + if xlabel: + fig_kwargs['labels'] = {x_col: xlabel} + if ylabel: + fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), 'value': ylabel} + + if actual_facet_col and 'facet_col' not in px_kwargs: + fig_kwargs['facet_col'] = actual_facet_col + if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): + fig_kwargs['facet_col_wrap'] = facet_col_wrap + if actual_facet_row and 'facet_row' not in px_kwargs: + fig_kwargs['facet_row'] = actual_facet_row + if actual_anim and 'animation_frame' not in px_kwargs: + fig_kwargs['animation_frame'] = actual_anim + + return px.bar(**{**fig_kwargs, **px_kwargs}) + + def stacked_bar( + self, + *, + x: str | Literal['auto'] | None = 'auto', + colors: ColorType | None = None, + title: str = '', + xlabel: str = '', + ylabel: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a stacked bar chart from the dataset. + + Variables in the dataset become stacked segments. Positive and negative + values are stacked separately. + + Args: + x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.x_dim_priority. + colors: Color specification (colorscale name, color list, or dict mapping). + title: Plot title. + xlabel: X-axis label. + ylabel: Y-axis label. + facet_col: Dimension for column facets. 'auto' uses CONFIG priority. + facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. + facet_cols: Number of columns in facet grid wrap. + **px_kwargs: Additional arguments passed to plotly.express.bar. + + Returns: + Plotly Figure. + """ + # Determine x-axis first, then resolve facets from remaining dims + dims = list(self._ds.dims) + x_col = _get_x_dim(dims, x) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + self._ds, facet_col, facet_row, animation_frame, exclude_dims={x_col} + ) + + df = _dataset_to_long_df(self._ds) + if df.empty: + return go.Figure() + + variables = df['variable'].unique().tolist() + color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + + facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols + fig_kwargs: dict[str, Any] = { + 'data_frame': df, + 'x': x_col, + 'y': 'value', + 'title': title, + } + # Only color by variable if it's not already on x-axis (and user didn't override) + if x_col != 'variable' and 'color' not in px_kwargs: + fig_kwargs['color'] = 'variable' + fig_kwargs['color_discrete_map'] = color_map + if xlabel: + fig_kwargs['labels'] = {x_col: xlabel} + if ylabel: + fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), 'value': ylabel} + + if actual_facet_col and 'facet_col' not in px_kwargs: + fig_kwargs['facet_col'] = actual_facet_col + if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): + fig_kwargs['facet_col_wrap'] = facet_col_wrap + if actual_facet_row and 'facet_row' not in px_kwargs: + fig_kwargs['facet_row'] = actual_facet_row + if actual_anim and 'animation_frame' not in px_kwargs: + fig_kwargs['animation_frame'] = actual_anim + + fig = px.bar(**{**fig_kwargs, **px_kwargs}) + fig.update_layout(barmode='relative', bargap=0, bargroupgap=0) + fig.update_traces(marker_line_width=0) + return fig + + def line( + self, + *, + x: str | Literal['auto'] | None = 'auto', + colors: ColorType | None = None, + title: str = '', + xlabel: str = '', + ylabel: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + line_shape: str | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a line chart from the dataset. + + Each variable in the dataset becomes a separate line. + + Args: + x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.x_dim_priority. + colors: Color specification (colorscale name, color list, or dict mapping). + title: Plot title. + xlabel: X-axis label. + ylabel: Y-axis label. + facet_col: Dimension for column facets. 'auto' uses CONFIG priority. + facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. + facet_cols: Number of columns in facet grid wrap. + line_shape: Line interpolation ('linear', 'hv', 'vh', 'hvh', 'vhv', 'spline'). + Default from CONFIG.Plotting.default_line_shape. + **px_kwargs: Additional arguments passed to plotly.express.line. + + Returns: + Plotly Figure. + """ + # Determine x-axis first, then resolve facets from remaining dims + dims = list(self._ds.dims) + x_col = _get_x_dim(dims, x) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + self._ds, facet_col, facet_row, animation_frame, exclude_dims={x_col} + ) + + df = _dataset_to_long_df(self._ds) + if df.empty: + return go.Figure() + + variables = df['variable'].unique().tolist() + color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + + facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols + fig_kwargs: dict[str, Any] = { + 'data_frame': df, + 'x': x_col, + 'y': 'value', + 'title': title, + 'line_shape': line_shape or CONFIG.Plotting.default_line_shape, + } + # Only color by variable if it's not already on x-axis (and user didn't override) + if x_col != 'variable' and 'color' not in px_kwargs: + fig_kwargs['color'] = 'variable' + fig_kwargs['color_discrete_map'] = color_map + if xlabel: + fig_kwargs['labels'] = {x_col: xlabel} + if ylabel: + fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), 'value': ylabel} + + if actual_facet_col and 'facet_col' not in px_kwargs: + fig_kwargs['facet_col'] = actual_facet_col + if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): + fig_kwargs['facet_col_wrap'] = facet_col_wrap + if actual_facet_row and 'facet_row' not in px_kwargs: + fig_kwargs['facet_row'] = actual_facet_row + if actual_anim and 'animation_frame' not in px_kwargs: + fig_kwargs['animation_frame'] = actual_anim + + return px.line(**{**fig_kwargs, **px_kwargs}) + + def area( + self, + *, + x: str | Literal['auto'] | None = 'auto', + colors: ColorType | None = None, + title: str = '', + xlabel: str = '', + ylabel: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + line_shape: str | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a stacked area chart from the dataset. + + Args: + x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.x_dim_priority. + colors: Color specification (colorscale name, color list, or dict mapping). + title: Plot title. + xlabel: X-axis label. + ylabel: Y-axis label. + facet_col: Dimension for column facets. 'auto' uses CONFIG priority. + facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. + facet_cols: Number of columns in facet grid wrap. + line_shape: Line interpolation. Default from CONFIG.Plotting.default_line_shape. + **px_kwargs: Additional arguments passed to plotly.express.area. + + Returns: + Plotly Figure. + """ + # Determine x-axis first, then resolve facets from remaining dims + dims = list(self._ds.dims) + x_col = _get_x_dim(dims, x) + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + self._ds, facet_col, facet_row, animation_frame, exclude_dims={x_col} + ) + + df = _dataset_to_long_df(self._ds) + if df.empty: + return go.Figure() + + variables = df['variable'].unique().tolist() + color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + + facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols + fig_kwargs: dict[str, Any] = { + 'data_frame': df, + 'x': x_col, + 'y': 'value', + 'title': title, + 'line_shape': line_shape or CONFIG.Plotting.default_line_shape, + } + # Only color by variable if it's not already on x-axis (and user didn't override) + if x_col != 'variable' and 'color' not in px_kwargs: + fig_kwargs['color'] = 'variable' + fig_kwargs['color_discrete_map'] = color_map + if xlabel: + fig_kwargs['labels'] = {x_col: xlabel} + if ylabel: + fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), 'value': ylabel} + + if actual_facet_col and 'facet_col' not in px_kwargs: + fig_kwargs['facet_col'] = actual_facet_col + if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): + fig_kwargs['facet_col_wrap'] = facet_col_wrap + if actual_facet_row and 'facet_row' not in px_kwargs: + fig_kwargs['facet_row'] = actual_facet_row + if actual_anim and 'animation_frame' not in px_kwargs: + fig_kwargs['animation_frame'] = actual_anim + + return px.area(**{**fig_kwargs, **px_kwargs}) + + def heatmap( + self, + variable: str | None = None, + *, + colors: str | list[str] | None = None, + title: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + **imshow_kwargs: Any, + ) -> go.Figure: + """Create a heatmap visualization. + + If the dataset has multiple variables, select one with the `variable` parameter. + If only one variable exists, it is used automatically. + + Args: + variable: Variable name to plot. Required if dataset has multiple variables. + If None and dataset has one variable, that variable is used. + colors: Colorscale name or list of colors. + title: Plot title. + facet_col: Dimension for column facets. + animation_frame: Dimension for animation slider. + facet_cols: Number of columns in facet grid wrap. + **imshow_kwargs: Additional arguments passed to plotly.express.imshow. + + Returns: + Plotly Figure. + """ + # Select single variable + if variable is None: + if len(self._ds.data_vars) == 1: + variable = list(self._ds.data_vars)[0] + else: + raise ValueError( + f'Dataset has {len(self._ds.data_vars)} variables. ' + f"Please specify which variable to plot with variable='name'." + ) + + da = self._ds[variable] + + if da.size == 0: + return go.Figure() + + colors = colors or CONFIG.Plotting.default_sequential_colorscale + facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols + + actual_facet_col, _, actual_anim = _resolve_auto_facets(self._ds, facet_col, None, animation_frame) + + imshow_args: dict[str, Any] = { + 'img': da, + 'color_continuous_scale': colors, + 'title': title or variable, + } + + if actual_facet_col and actual_facet_col in da.dims: + imshow_args['facet_col'] = actual_facet_col + if facet_col_wrap < da.sizes[actual_facet_col]: + imshow_args['facet_col_wrap'] = facet_col_wrap + + if actual_anim and actual_anim in da.dims: + imshow_args['animation_frame'] = actual_anim + + return px.imshow(**{**imshow_args, **imshow_kwargs}) + + def scatter( + self, + x: str, + y: str, + *, + title: str = '', + xlabel: str = '', + ylabel: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a scatter plot from two variables in the dataset. + + Args: + x: Variable name for x-axis. + y: Variable name for y-axis. + title: Plot title. + xlabel: X-axis label. + ylabel: Y-axis label. + facet_col: Dimension for column facets. 'auto' uses CONFIG priority. + facet_row: Dimension for row facets. + animation_frame: Dimension for animation slider. + facet_cols: Number of columns in facet grid wrap. + **px_kwargs: Additional arguments passed to plotly.express.scatter. + + Returns: + Plotly Figure. + """ + if x not in self._ds.data_vars: + raise ValueError(f"Variable '{x}' not found in dataset. Available: {list(self._ds.data_vars)}") + if y not in self._ds.data_vars: + raise ValueError(f"Variable '{y}' not found in dataset. Available: {list(self._ds.data_vars)}") + + df = self._ds[[x, y]].to_dataframe().reset_index() + if df.empty: + return go.Figure() + + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + self._ds, facet_col, facet_row, animation_frame + ) + + facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols + fig_kwargs: dict[str, Any] = { + 'data_frame': df, + 'x': x, + 'y': y, + 'title': title, + **px_kwargs, + } + if xlabel: + fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), x: xlabel} + if ylabel: + fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), y: ylabel} + + if actual_facet_col: + fig_kwargs['facet_col'] = actual_facet_col + if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): + fig_kwargs['facet_col_wrap'] = facet_col_wrap + if actual_facet_row: + fig_kwargs['facet_row'] = actual_facet_row + if actual_anim: + fig_kwargs['animation_frame'] = actual_anim + + return px.scatter(**fig_kwargs) + + def pie( + self, + *, + colors: ColorType | None = None, + title: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a pie chart from aggregated dataset values. + + Extra dimensions are auto-assigned to facet_col, facet_row, and animation_frame. + For scalar values, a single pie is shown. + + Args: + colors: Color specification (colorscale name, color list, or dict mapping). + title: Plot title. + facet_col: Dimension for column facets. 'auto' uses CONFIG priority. + facet_row: Dimension for row facets. 'auto' uses CONFIG priority. + animation_frame: Dimension for animation slider. 'auto' uses CONFIG priority. + facet_cols: Number of columns in facet grid wrap. + **px_kwargs: Additional arguments passed to plotly.express.pie. + + Returns: + Plotly Figure. + + Example: + >>> ds.sum('time').fxplot.pie() # Sum over time, then pie chart + >>> ds.sum('time').fxplot.pie(facet_col='scenario') # Pie per scenario + """ + max_ndim = max((self._ds[v].ndim for v in self._ds.data_vars), default=0) + + names = list(self._ds.data_vars) + color_map = process_colors(colors, names, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + + # Scalar case - single pie + if max_ndim == 0: + values = [float(self._ds[v].values) for v in names] + df = pd.DataFrame({'variable': names, 'value': values}) + return px.pie( + df, + names='variable', + values='value', + title=title, + color='variable', + color_discrete_map=color_map, + **px_kwargs, + ) + + # Multi-dimensional case - faceted/animated pies + df = _dataset_to_long_df(self._ds) + if df.empty: + return go.Figure() + + actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( + self._ds, facet_col, facet_row, animation_frame + ) + + facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols + fig_kwargs: dict[str, Any] = { + 'data_frame': df, + 'names': 'variable', + 'values': 'value', + 'title': title, + 'color': 'variable', + 'color_discrete_map': color_map, + **px_kwargs, + } + + if actual_facet_col: + fig_kwargs['facet_col'] = actual_facet_col + if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): + fig_kwargs['facet_col_wrap'] = facet_col_wrap + if actual_facet_row: + fig_kwargs['facet_row'] = actual_facet_row + if actual_anim: + fig_kwargs['animation_frame'] = actual_anim + + return px.pie(**fig_kwargs) + + +@xr.register_dataset_accessor('fxstats') +class DatasetStatsAccessor: + """Statistics/transformation accessor for any xr.Dataset. Access via ``dataset.fxstats``. + + Provides data transformation methods that return new datasets. + Chain with ``.fxplot`` for visualization. + + Examples: + Duration curve:: + + ds.fxstats.to_duration_curve().fxplot.line() + """ + + def __init__(self, xarray_obj: xr.Dataset) -> None: + self._ds = xarray_obj + + def to_duration_curve(self, *, normalize: bool = True) -> xr.Dataset: + """Transform dataset to duration curve format (sorted values). + + Values are sorted in descending order along the 'time' dimension. + The time coordinate is replaced with duration (percentage or index). + + Args: + normalize: If True, x-axis shows percentage (0-100). If False, shows timestep index. + + Returns: + Transformed xr.Dataset with duration coordinate instead of time. + + Example: + >>> ds.fxstats.to_duration_curve().fxplot.line(title='Duration Curve') + """ + import numpy as np + + if 'time' not in self._ds.dims: + raise ValueError("Duration curve requires a 'time' dimension.") + + # Sort each variable along time dimension (descending) + sorted_ds = self._ds.copy() + for var in sorted_ds.data_vars: + da = sorted_ds[var] + time_axis = da.dims.index('time') + # Sort along time axis (descending) - use flip for correct axis + sorted_values = np.flip(np.sort(da.values, axis=time_axis), axis=time_axis) + sorted_ds[var] = (da.dims, sorted_values) + + # Replace time coordinate with duration + n_timesteps = sorted_ds.sizes['time'] + if normalize: + duration_coord = np.linspace(0, 100, n_timesteps) + sorted_ds = sorted_ds.assign_coords({'time': duration_coord}) + sorted_ds = sorted_ds.rename({'time': 'duration_pct'}) + else: + duration_coord = np.arange(n_timesteps) + sorted_ds = sorted_ds.assign_coords({'time': duration_coord}) + sorted_ds = sorted_ds.rename({'time': 'duration'}) + + return sorted_ds + + +@xr.register_dataarray_accessor('fxplot') +class DataArrayPlotAccessor: + """Plot accessor for any xr.DataArray. Access via ``dataarray.fxplot``. + + Provides convenient plotting methods. For bar/stacked_bar/line/area, + the DataArray is converted to a Dataset first. For heatmap, it works + directly with the DataArray. + + Examples: + Basic usage:: + + import flixopt + import xarray as xr + + da = xr.DataArray([1, 2, 3], dims=['time'], name='temperature') + da.fxplot.line() + da.fxplot.heatmap() + """ + + def __init__(self, xarray_obj: xr.DataArray) -> None: + """Initialize the accessor with an xr.DataArray object.""" + self._da = xarray_obj + + def _to_dataset(self) -> xr.Dataset: + """Convert DataArray to Dataset for plotting.""" + name = self._da.name or 'value' + return self._da.to_dataset(name=name) + + def bar( + self, + *, + x: str | Literal['auto'] | None = 'auto', + colors: ColorType | None = None, + title: str = '', + xlabel: str = '', + ylabel: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a grouped bar chart. See DatasetPlotAccessor.bar for details.""" + return self._to_dataset().fxplot.bar( + x=x, + colors=colors, + title=title, + xlabel=xlabel, + ylabel=ylabel, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + facet_cols=facet_cols, + **px_kwargs, + ) + + def stacked_bar( + self, + *, + x: str | Literal['auto'] | None = 'auto', + colors: ColorType | None = None, + title: str = '', + xlabel: str = '', + ylabel: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a stacked bar chart. See DatasetPlotAccessor.stacked_bar for details.""" + return self._to_dataset().fxplot.stacked_bar( + x=x, + colors=colors, + title=title, + xlabel=xlabel, + ylabel=ylabel, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + facet_cols=facet_cols, + **px_kwargs, + ) + + def line( + self, + *, + x: str | Literal['auto'] | None = 'auto', + colors: ColorType | None = None, + title: str = '', + xlabel: str = '', + ylabel: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + line_shape: str | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a line chart. See DatasetPlotAccessor.line for details.""" + return self._to_dataset().fxplot.line( + x=x, + colors=colors, + title=title, + xlabel=xlabel, + ylabel=ylabel, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + facet_cols=facet_cols, + line_shape=line_shape, + **px_kwargs, + ) + + def area( + self, + *, + x: str | Literal['auto'] | None = 'auto', + colors: ColorType | None = None, + title: str = '', + xlabel: str = '', + ylabel: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + line_shape: str | None = None, + **px_kwargs: Any, + ) -> go.Figure: + """Create a stacked area chart. See DatasetPlotAccessor.area for details.""" + return self._to_dataset().fxplot.area( + x=x, + colors=colors, + title=title, + xlabel=xlabel, + ylabel=ylabel, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + facet_cols=facet_cols, + line_shape=line_shape, + **px_kwargs, + ) + + def heatmap( + self, + *, + colors: str | list[str] | None = None, + title: str = '', + facet_col: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', + facet_cols: int | None = None, + **imshow_kwargs: Any, + ) -> go.Figure: + """Create a heatmap visualization directly from the DataArray. + + Args: + colors: Colorscale name or list of colors. + title: Plot title. + facet_col: Dimension for column facets. + animation_frame: Dimension for animation slider. + facet_cols: Number of columns in facet grid wrap. + **imshow_kwargs: Additional arguments passed to plotly.express.imshow. + + Returns: + Plotly Figure. + """ + da = self._da + + if da.size == 0: + return go.Figure() + + colors = colors or CONFIG.Plotting.default_sequential_colorscale + facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols + + # Use Dataset for facet resolution + ds_for_resolution = da.to_dataset(name='_temp') + actual_facet_col, _, actual_anim = _resolve_auto_facets(ds_for_resolution, facet_col, None, animation_frame) + + imshow_args: dict[str, Any] = { + 'img': da, + 'color_continuous_scale': colors, + 'title': title or (da.name if da.name else ''), + } + + if actual_facet_col and actual_facet_col in da.dims: + imshow_args['facet_col'] = actual_facet_col + if facet_col_wrap < da.sizes[actual_facet_col]: + imshow_args['facet_col_wrap'] = facet_col_wrap + + if actual_anim and actual_anim in da.dims: + imshow_args['animation_frame'] = actual_anim + + return px.imshow(**{**imshow_args, **imshow_kwargs}) diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index 73d115df0..382ed1bf0 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -124,54 +124,6 @@ def _reshape_time_for_heatmap( return result.transpose('timestep', 'timeframe', *other_dims) -def _heatmap_figure( - data: xr.DataArray, - colors: str | list[str] | None = None, - title: str = '', - facet_col: str | None = None, - animation_frame: str | None = None, - facet_col_wrap: int | None = None, - **imshow_kwargs: Any, -) -> go.Figure: - """Create heatmap figure using px.imshow. - - Args: - data: DataArray with 2-4 dimensions. First two are heatmap axes. - colors: Colorscale name (str) or list of colors. Dicts are not supported - for heatmaps as color_continuous_scale requires a colorscale specification. - title: Plot title. - facet_col: Dimension for subplot columns. - animation_frame: Dimension for animation slider. - facet_col_wrap: Max columns before wrapping. - **imshow_kwargs: Additional args for px.imshow. - - Returns: - Plotly Figure. - """ - if data.size == 0: - return go.Figure() - - colors = colors or CONFIG.Plotting.default_sequential_colorscale - facet_col_wrap = facet_col_wrap or CONFIG.Plotting.default_facet_cols - - imshow_args: dict[str, Any] = { - 'img': data, - 'color_continuous_scale': colors, - 'title': title, - **imshow_kwargs, - } - - if facet_col and facet_col in data.dims: - imshow_args['facet_col'] = facet_col - if facet_col_wrap < data.sizes[facet_col]: - imshow_args['facet_col_wrap'] = facet_col_wrap - - if animation_frame and animation_frame in data.dims: - imshow_args['animation_frame'] = animation_frame - - return px.imshow(**imshow_args) - - # --- Helper functions --- @@ -237,8 +189,8 @@ def _resolve_auto_facets( """Resolve 'auto' facet/animation dimensions based on available data dimensions. When 'auto' is specified, extra dimensions are assigned to slots based on: - - CONFIG.Plotting.extra_dim_priority: Order of dimensions (default: cluster → period → scenario) - - CONFIG.Plotting.dim_slot_priority: Order of slots (default: facet_col → facet_row → animation_frame) + - CONFIG.Plotting.extra_dim_priority: Order of dimensions to assign. + - CONFIG.Plotting.dim_slot_priority: Order of slots to fill. Args: ds: Dataset to check for available dimensions. @@ -308,69 +260,6 @@ def _dataset_to_long_df(ds: xr.Dataset, value_name: str = 'value', var_name: str return df.melt(id_vars=coord_cols, var_name=var_name, value_name=value_name) -def _create_stacked_bar( - ds: xr.Dataset, - colors: ColorType, - title: str, - facet_col: str | None, - facet_row: str | None, - animation_frame: str | None = None, - **plotly_kwargs: Any, -) -> go.Figure: - """Create a stacked bar chart from xarray Dataset.""" - df = _dataset_to_long_df(ds) - if df.empty: - return go.Figure() - x_col = 'time' if 'time' in df.columns else df.columns[0] - variables = df['variable'].unique().tolist() - color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) - fig = px.bar( - df, - x=x_col, - y='value', - color='variable', - facet_col=facet_col, - facet_row=facet_row, - animation_frame=animation_frame, - color_discrete_map=color_map, - title=title, - **plotly_kwargs, - ) - fig.update_layout(barmode='relative', bargap=0, bargroupgap=0) - fig.update_traces(marker_line_width=0) - return fig - - -def _create_line( - ds: xr.Dataset, - colors: ColorType, - title: str, - facet_col: str | None, - facet_row: str | None, - animation_frame: str | None = None, - **plotly_kwargs: Any, -) -> go.Figure: - """Create a line chart from xarray Dataset.""" - df = _dataset_to_long_df(ds) - if df.empty: - return go.Figure() - x_col = 'time' if 'time' in df.columns else df.columns[0] - variables = df['variable'].unique().tolist() - color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) - return px.line( - df, - x=x_col, - y='value', - color='variable', - facet_col=facet_col, - facet_row=facet_row, - animation_frame=animation_frame, - color_discrete_map=color_map, - title=title, - **plotly_kwargs, - ) - - # --- Statistics Accessor (data only) --- @@ -1507,8 +1396,7 @@ def balance( first_var = next(iter(ds.data_vars)) unit_label = ds[first_var].attrs.get('unit', '') - fig = _create_stacked_bar( - ds, + fig = ds.fxplot.stacked_bar( colors=colors, title=f'{node} [{unit_label}]' if unit_label else node, facet_col=actual_facet_col, @@ -1632,8 +1520,7 @@ def carrier_balance( first_var = next(iter(ds.data_vars)) unit_label = ds[first_var].attrs.get('unit', '') - fig = _create_stacked_bar( - ds, + fig = ds.fxplot.stacked_bar( colors=colors, title=f'{carrier.capitalize()} Balance [{unit_label}]' if unit_label else f'{carrier.capitalize()} Balance', facet_col=actual_facet_col, @@ -1766,8 +1653,7 @@ def heatmap( if has_multiple_vars: da = da.rename('') - fig = _heatmap_figure( - da, + fig = da.fxplot.heatmap( colors=colors, facet_col=actual_facet, animation_frame=actual_animation, @@ -1861,8 +1747,7 @@ def flows( first_var = next(iter(ds.data_vars)) unit_label = ds[first_var].attrs.get('unit', '') - fig = _create_line( - ds, + fig = ds.fxplot.line( colors=colors, title=f'Flows [{unit_label}]' if unit_label else 'Flows', facet_col=actual_facet_col, @@ -2038,8 +1923,7 @@ def sort_descending(arr: np.ndarray) -> np.ndarray: first_var = next(iter(ds.data_vars)) unit_label = ds[first_var].attrs.get('unit', '') - fig = _create_line( - result_ds, + fig = result_ds.fxplot.line( colors=colors, title=f'Duration Curve [{unit_label}]' if unit_label else 'Duration Curve', facet_col=actual_facet_col, @@ -2258,8 +2142,7 @@ def charge_states( ds, facet_col, facet_row, animation_frame ) - fig = _create_line( - ds, + fig = ds.fxplot.line( colors=colors, title='Storage Charge States', facet_col=actual_facet_col, diff --git a/mkdocs.yml b/mkdocs.yml index 493937983..ab2e9309f 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -73,6 +73,7 @@ nav: - Rolling Horizon: notebooks/08b-rolling-horizon.ipynb - Results: - Plotting: notebooks/09-plotting-and-data-access.ipynb + - Custom Data Plotting: notebooks/fxplot_accessor_demo.ipynb - API Reference: api-reference/ @@ -233,6 +234,10 @@ plugins: allow_errors: false include_source: true include_requirejs: true + ignore: + - "notebooks/data/*.py" # Data generation scripts, not notebooks + execute_ignore: + - "notebooks/data/*.py" - plotly From bf6962c5580b3627a5e9417528847544d56658b4 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 4 Jan 2026 12:07:27 +0100 Subject: [PATCH 180/191] ci: Speedup Notebook execution (#551) * Add dataset plot accessor * Add fxplot acessor showcase * The internal plot accessors now leverage the shared .fxplot implementation, reducing code duplication while maintaining the same functionality (data preparation, color resolution from components, PlotResult wrapping). * Fix notebook * 1. xlabel/ylabel parameters - Added to bar(), stacked_bar(), line(), area(), and duration_curve() methods in both DatasetPlotAccessor and DataArrayPlotAccessor 2. scatter() method - Plots two variables against each other with x and y parameters 3. pie() method - Creates pie charts from aggregated (scalar) dataset values, e.g. ds.sum('time').fxplot.pie() 4. duration_curve() method - Sorts values along the time dimension in descending order, with optional normalize parameter for percentage x-axis 5. CONFIG.Plotting.default_line_shape - New config option (default 'hv') that controls the default line shape for line(), area(), and duration_curve() methods * Fix faceting of pie * Improve auto dim handling * Improve notebook * Fix pie plot * Logic order changed: 1. X-axis is now determined first using CONFIG.Plotting.x_dim_priority 2. Facets are resolved from remaining dimensions (x-axis excluded) x_dim_priority expanded: x_dim_priority = ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster') - Time-like dims first, then common grouping dims as fallback - variable stays excluded (it's used for color, not x-axis) _get_x_dim() refactored: - Now takes dims: list[str] instead of a DataFrame - More versatile - works with any list of dimension names * Add x parameter and x_dim_priority config to fxplot - Add `x` parameter to bar/stacked_bar/line/area for explicit x-axis control - Add CONFIG.Plotting.x_dim_priority for auto x-axis selection order - X-axis determined first, facets from remaining dimensions - Refactor _get_x_column -> _get_x_dim (takes dim list, not DataFrame) - Support scalar data (no dims) by using 'variable' as x-axis * Add x parameter and smart dimension handling to fxplot - Add `x` parameter to bar/stacked_bar/line/area for explicit x-axis control - Add CONFIG.Plotting.x_dim_priority for auto x-axis selection Default: ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster') - X-axis determined first, facets resolved from remaining dimensions - Refactor _get_x_column -> _get_x_dim (takes dim list, more versatile) - Support scalar data (no dims) by using 'variable' as x-axis - Skip color='variable' when x='variable' to avoid double encoding - Fix _dataset_to_long_df to use dims (not just coords) as id_vars * Add x parameter and smart dimension handling to fxplot - Add `x` parameter to bar/stacked_bar/line/area for explicit x-axis control - Add CONFIG.Plotting.x_dim_priority for auto x-axis selection Default: ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster') - X-axis determined first, facets resolved from remaining dimensions - Refactor _get_x_column -> _get_x_dim (takes dim list, more versatile) - Support scalar data (no dims) by using 'variable' as x-axis - Skip color='variable' when x='variable' to avoid double encoding - Fix _dataset_to_long_df to use dims (not just coords) as id_vars - Ensure px_kwargs properly overrides all defaults (color, facets, etc.) * Improve documentation * Fix notebook in docs * 1. heatmap kwarg merge order - Now uses **{**imshow_args, **imshow_kwargs} so user can override 2. scatter unused colors - Removed the unused parameter 3. to_duration_curve sorting - Changed [::-1] to np.flip(..., axis=time_axis) for correct multi-dimensional handling 4. DataArrayPlotAccessor.heatmap - Same kwarg merge fix * Improve docstrings * Update notebooks to not do file operations * Fix notebook * Summary of Changes .github/workflows/docs.yaml 1. Notebook caching - Caches executed notebooks using a hash of notebooks + source code 2. Parallel execution - Runs jupyter execute with -P 4 (4 notebooks in parallel) 3. Skip mkdocs-jupyter execution - Sets MKDOCS_JUPYTER_EXECUTE=false since notebooks are pre-executed * Fix CI * mkdocs-jupyter was treating this .py file as a notebook and executing it, causing the NetCDF write failure in CI * Add missing type annotation * cache key computation now sorts files before hashing to ensure stable keys across runs --- .github/workflows/docs.yaml | 45 +++++++++++++++++++++++++++++++++++++ mkdocs.yml | 2 +- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index b6121b23b..147677a78 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -57,7 +57,30 @@ jobs: - name: Install dependencies run: uv pip install --system ".[docs,full]" + - name: Get notebook cache key + id: notebook-cache-key + run: | + # Hash notebooks + flixopt source code (sorted for stable cache keys) + HASH=$(find docs/notebooks -name '*.ipynb' | sort | xargs cat | cat - <(find flixopt -name '*.py' | sort | xargs cat) | sha256sum | cut -d' ' -f1) + echo "hash=$HASH" >> $GITHUB_OUTPUT + + - name: Cache executed notebooks + uses: actions/cache@v4 + id: notebook-cache + with: + path: docs/notebooks/*.ipynb + key: notebooks-${{ steps.notebook-cache-key.outputs.hash }} + + - name: Execute notebooks in parallel + if: steps.notebook-cache.outputs.cache-hit != 'true' + run: | + # Execute all notebooks in parallel (4 at a time) + find docs/notebooks -name '*.ipynb' -print0 | \ + xargs -0 -P 4 -I {} jupyter execute --inplace {} + - name: Build docs + env: + MKDOCS_JUPYTER_EXECUTE: "false" run: mkdocs build --strict - uses: actions/upload-artifact@v4 @@ -95,12 +118,34 @@ jobs: - name: Install dependencies run: uv pip install --system ".[docs,full]" + - name: Get notebook cache key + id: notebook-cache-key + run: | + # Hash notebooks + flixopt source code (sorted for stable cache keys) + HASH=$(find docs/notebooks -name '*.ipynb' | sort | xargs cat | cat - <(find flixopt -name '*.py' | sort | xargs cat) | sha256sum | cut -d' ' -f1) + echo "hash=$HASH" >> $GITHUB_OUTPUT + + - name: Cache executed notebooks + uses: actions/cache@v4 + id: notebook-cache + with: + path: docs/notebooks/*.ipynb + key: notebooks-${{ steps.notebook-cache-key.outputs.hash }} + + - name: Execute notebooks in parallel + if: steps.notebook-cache.outputs.cache-hit != 'true' + run: | + find docs/notebooks -name '*.ipynb' -print0 | \ + xargs -0 -P 4 -I {} jupyter execute --inplace {} + - name: Configure Git run: | git config user.name "github-actions[bot]" git config user.email "41898282+github-actions[bot]@users.noreply.github.com" - name: Deploy docs + env: + MKDOCS_JUPYTER_EXECUTE: "false" run: | VERSION=${{ inputs.version }} VERSION=${VERSION#v} diff --git a/mkdocs.yml b/mkdocs.yml index ab2e9309f..847f3fbd6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -230,7 +230,7 @@ plugins: separator: '[\s\u200b\-_,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|(?!\b)(?=[A-Z][a-z])' - mkdocs-jupyter: - execute: true # Execute notebooks during build + execute: !ENV [MKDOCS_JUPYTER_EXECUTE, true] # CI pre-executes in parallel allow_errors: false include_source: true include_requirejs: true From d4dd58bfff1ccf36b2ee5abddbc426b9cf2b7d9f Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 4 Jan 2026 21:00:01 +0100 Subject: [PATCH 181/191] Simplify notebooks --- docs/notebooks/01-quickstart.ipynb | 17 +++-- docs/notebooks/02-heat-system.ipynb | 18 ++--- .../03-investment-optimization.ipynb | 47 ++++++++----- .../04-operational-constraints.ipynb | 25 ++++--- docs/notebooks/05-multi-carrier-system.ipynb | 47 ++++++++----- docs/notebooks/07-scenarios-and-periods.ipynb | 58 ++++++++-------- docs/notebooks/08a-aggregation.ipynb | 28 ++------ docs/notebooks/08b-rolling-horizon.ipynb | 27 ++++---- docs/notebooks/08c-clustering.ipynb | 68 +++---------------- .../09-plotting-and-data-access.ipynb | 29 +++----- 10 files changed, 163 insertions(+), 201 deletions(-) diff --git a/docs/notebooks/01-quickstart.ipynb b/docs/notebooks/01-quickstart.ipynb index 1500bce77..8e599511d 100644 --- a/docs/notebooks/01-quickstart.ipynb +++ b/docs/notebooks/01-quickstart.ipynb @@ -59,8 +59,7 @@ "metadata": {}, "outputs": [], "source": [ - "timesteps = pd.date_range('2024-01-15 08:00', periods=4, freq='h')\n", - "print(f'Optimizing from {timesteps[0]} to {timesteps[-1]}')" + "timesteps = pd.date_range('2024-01-15 08:00', periods=4, freq='h')" ] }, { @@ -203,14 +202,18 @@ "metadata": {}, "outputs": [], "source": [ - "total_costs = flow_system.solution['costs'].item()\n", "total_heat = float(heat_demand.sum())\n", "gas_consumed = total_heat / 0.9 # Account for boiler efficiency\n", "\n", - "print(f'Total heat demand: {total_heat:.1f} kWh')\n", - "print(f'Gas consumed: {gas_consumed:.1f} kWh')\n", - "print(f'Total costs: {total_costs:.2f} €')\n", - "print(f'Average cost: {total_costs / total_heat:.3f} €/kWh_heat')" + "pd.DataFrame(\n", + " {\n", + " 'Total heat demand [kWh]': total_heat,\n", + " 'Gas consumed [kWh]': gas_consumed,\n", + " 'Total costs [EUR]': flow_system.solution['costs'].item(),\n", + " 'Average cost [EUR/kWh_heat]': flow_system.solution['costs'].item() / total_heat,\n", + " },\n", + " index=['Value'],\n", + ").T" ] }, { diff --git a/docs/notebooks/02-heat-system.ipynb b/docs/notebooks/02-heat-system.ipynb index 3ff933ec3..15393d870 100644 --- a/docs/notebooks/02-heat-system.ipynb +++ b/docs/notebooks/02-heat-system.ipynb @@ -81,11 +81,7 @@ "# Add some random variation\n", "np.random.seed(42)\n", "heat_demand = heat_demand + np.random.normal(0, 5, len(heat_demand))\n", - "heat_demand = np.clip(heat_demand, 20, 100)\n", - "\n", - "print(f'Time range: {timesteps[0]} to {timesteps[-1]}')\n", - "print(f'Peak demand: {heat_demand.max():.1f} kW')\n", - "print(f'Total demand: {heat_demand.sum():.0f} kWh')" + "heat_demand = np.clip(heat_demand, 20, 100)" ] }, { @@ -309,12 +305,16 @@ "metadata": {}, "outputs": [], "source": [ - "total_costs = flow_system.solution['costs'].item()\n", "total_heat = heat_demand.sum()\n", "\n", - "print(f'Total operating costs: {total_costs:.2f} €')\n", - "print(f'Total heat delivered: {total_heat:.0f} kWh')\n", - "print(f'Average cost: {total_costs / total_heat * 100:.2f} ct/kWh')" + "pd.DataFrame(\n", + " {\n", + " 'Total operating costs [EUR]': flow_system.solution['costs'].item(),\n", + " 'Total heat delivered [kWh]': total_heat,\n", + " 'Average cost [ct/kWh]': flow_system.solution['costs'].item() / total_heat * 100,\n", + " },\n", + " index=['Value'],\n", + ").T" ] }, { diff --git a/docs/notebooks/03-investment-optimization.ipynb b/docs/notebooks/03-investment-optimization.ipynb index 349c84ccf..1d2419a79 100644 --- a/docs/notebooks/03-investment-optimization.ipynb +++ b/docs/notebooks/03-investment-optimization.ipynb @@ -100,10 +100,7 @@ "solar_profile = solar_profile * cloud_factor\n", "\n", "# Pool operates 8am-10pm, constant demand when open\n", - "pool_demand = np.where((hour_of_day >= 8) & (hour_of_day <= 22), 150, 50) # kW\n", - "\n", - "print(f'Peak solar: {solar_profile.max():.2f} kW/kW_installed')\n", - "print(f'Pool demand: {pool_demand.max():.0f} kW (open), {pool_demand.min():.0f} kW (closed)')" + "pool_demand = np.where((hour_of_day >= 8) & (hour_of_day <= 22), 150, 50) # kW" ] }, { @@ -160,10 +157,7 @@ "# So we scale investment costs to weekly equivalent\n", "WEEKS_PER_YEAR = 52\n", "SOLAR_COST_WEEKLY = SOLAR_COST_PER_KW / WEEKS_PER_YEAR\n", - "TANK_COST_WEEKLY = TANK_COST_PER_KWH / WEEKS_PER_YEAR\n", - "\n", - "print(f'Solar cost: {SOLAR_COST_WEEKLY:.3f} €/kW/week')\n", - "print(f'Tank cost: {TANK_COST_WEEKLY:.4f} €/kWh/week')" + "TANK_COST_WEEKLY = TANK_COST_PER_KWH / WEEKS_PER_YEAR" ] }, { @@ -286,9 +280,14 @@ "solar_size = flow_system.statistics.sizes['SolarCollectors(Heat)'].item()\n", "tank_size = flow_system.statistics.sizes['BufferTank'].item()\n", "\n", - "print(\n", - " f'Optimal sizes: Solar {solar_size:.0f} kW, Tank {tank_size:.0f} kWh (ratio: {tank_size / solar_size:.1f} kWh/kW)'\n", - ")" + "pd.DataFrame(\n", + " {\n", + " 'Solar [kW]': solar_size,\n", + " 'Tank [kWh]': tank_size,\n", + " 'Ratio [kWh/kW]': tank_size / solar_size,\n", + " },\n", + " index=['Optimal Size'],\n", + ").T" ] }, { @@ -331,8 +330,13 @@ "tank_invest = tank_size * TANK_COST_WEEKLY\n", "gas_costs = total_costs - solar_invest - tank_invest\n", "\n", - "print(\n", - " f'Weekly costs: Solar {solar_invest:.1f}€ ({solar_invest / total_costs * 100:.0f}%) + Tank {tank_invest:.1f}€ ({tank_invest / total_costs * 100:.0f}%) + Gas {gas_costs:.1f}€ ({gas_costs / total_costs * 100:.0f}%) = {total_costs:.1f}€'\n", + "pd.DataFrame(\n", + " {\n", + " 'Solar Investment': {'EUR': solar_invest, '%': solar_invest / total_costs * 100},\n", + " 'Tank Investment': {'EUR': tank_invest, '%': tank_invest / total_costs * 100},\n", + " 'Gas Costs': {'EUR': gas_costs, '%': gas_costs / total_costs * 100},\n", + " 'Total': {'EUR': total_costs, '%': 100.0},\n", + " }\n", ")" ] }, @@ -391,14 +395,21 @@ "metadata": {}, "outputs": [], "source": [ - "# Gas-only scenario\n", + "# Gas-only scenario for comparison\n", "total_demand = pool_demand.sum()\n", "gas_only_cost = total_demand / 0.92 * GAS_PRICE # All heat from gas boiler\n", - "\n", "savings = gas_only_cost - total_costs\n", - "print(\n", - " f'Solar saves {savings:.1f}€/week ({savings / gas_only_cost * 100:.0f}%) vs gas-only ({gas_only_cost:.1f}€) → {savings * 52:.0f}€/year'\n", - ")" + "\n", + "pd.DataFrame(\n", + " {\n", + " 'Gas-only [EUR/week]': gas_only_cost,\n", + " 'With Solar [EUR/week]': total_costs,\n", + " 'Savings [EUR/week]': savings,\n", + " 'Savings [%]': savings / gas_only_cost * 100,\n", + " 'Savings [EUR/year]': savings * 52,\n", + " },\n", + " index=['Value'],\n", + ").T" ] }, { diff --git a/docs/notebooks/04-operational-constraints.ipynb b/docs/notebooks/04-operational-constraints.ipynb index fbb611d1c..18c0a93ce 100644 --- a/docs/notebooks/04-operational-constraints.ipynb +++ b/docs/notebooks/04-operational-constraints.ipynb @@ -95,10 +95,7 @@ "# Add some variation\n", "np.random.seed(123)\n", "steam_demand = steam_demand + np.random.normal(0, 20, len(steam_demand))\n", - "steam_demand = np.clip(steam_demand, 50, 450).astype(float)\n", - "\n", - "print(f'Peak demand: {steam_demand.max():.0f} kW')\n", - "print(f'Min demand: {steam_demand.min():.0f} kW')" + "steam_demand = np.clip(steam_demand, 50, 450).astype(float)" ] }, { @@ -294,8 +291,12 @@ "startup_costs = total_startups * 50\n", "gas_costs = total_costs - startup_costs\n", "\n", - "print(\n", - " f'{total_startups} startups × 50€ = {startup_costs:.0f}€ startup + {gas_costs:.0f}€ gas = {total_costs:.0f}€ total'\n", + "pd.DataFrame(\n", + " {\n", + " 'Startups': {'Count': total_startups, 'EUR': startup_costs},\n", + " 'Gas': {'Count': '-', 'EUR': gas_costs},\n", + " 'Total': {'Count': '-', 'EUR': total_costs},\n", + " }\n", ")" ] }, @@ -377,8 +378,16 @@ "fs_unconstrained.optimize(fx.solvers.HighsSolver())\n", "unconstrained_costs = fs_unconstrained.solution['costs'].item()\n", "\n", - "constraint_overhead = (total_costs - unconstrained_costs) / unconstrained_costs * 100\n", - "print(f'Constraints add {constraint_overhead:.1f}% cost: {unconstrained_costs:.0f}€ → {total_costs:.0f}€')" + "pd.DataFrame(\n", + " {\n", + " 'Without Constraints': {'Cost [EUR]': unconstrained_costs},\n", + " 'With Constraints': {'Cost [EUR]': total_costs},\n", + " 'Overhead': {\n", + " 'Cost [EUR]': total_costs - unconstrained_costs,\n", + " '%': (total_costs - unconstrained_costs) / unconstrained_costs * 100,\n", + " },\n", + " }\n", + ")" ] }, { diff --git a/docs/notebooks/05-multi-carrier-system.ipynb b/docs/notebooks/05-multi-carrier-system.ipynb index a1a9543fa..507ba0a6a 100644 --- a/docs/notebooks/05-multi-carrier-system.ipynb +++ b/docs/notebooks/05-multi-carrier-system.ipynb @@ -115,10 +115,7 @@ "electricity_demand += np.random.normal(0, 15, len(timesteps))\n", "heat_demand += np.random.normal(0, 20, len(timesteps))\n", "electricity_demand = np.clip(electricity_demand, 100, 300)\n", - "heat_demand = np.clip(heat_demand, 150, 400)\n", - "\n", - "print(f'Electricity: {electricity_demand.min():.0f} - {electricity_demand.max():.0f} kW')\n", - "print(f'Heat: {heat_demand.min():.0f} - {heat_demand.max():.0f} kW')" + "heat_demand = np.clip(heat_demand, 150, 400)" ] }, { @@ -375,9 +372,6 @@ "metadata": {}, "outputs": [], "source": [ - "total_costs = flow_system.solution['costs'].item()\n", - "total_co2 = flow_system.solution['CO2'].item()\n", - "\n", "# Energy flows\n", "flow_rates = flow_system.statistics.flow_rates\n", "grid_buy = flow_rates['GridBuy(Electricity)'].sum().item()\n", @@ -389,12 +383,20 @@ "total_elec = electricity_demand.sum()\n", "total_heat = heat_demand.sum()\n", "\n", - "# Display as compact summary\n", - "print(\n", - " f'Electricity: {chp_elec:.0f} kWh CHP ({chp_elec / total_elec * 100:.0f}%) + {grid_buy:.0f} kWh grid, {grid_sell:.0f} kWh sold'\n", - ")\n", - "print(f'Heat: {chp_heat:.0f} kWh CHP ({chp_heat / total_heat * 100:.0f}%) + {boiler_heat:.0f} kWh boiler')\n", - "print(f'Costs: {total_costs:.2f} € | CO2: {total_co2:.0f} kg')" + "pd.DataFrame(\n", + " {\n", + " 'CHP Electricity [kWh]': chp_elec,\n", + " 'CHP Electricity [%]': chp_elec / total_elec * 100,\n", + " 'Grid Buy [kWh]': grid_buy,\n", + " 'Grid Sell [kWh]': grid_sell,\n", + " 'CHP Heat [kWh]': chp_heat,\n", + " 'CHP Heat [%]': chp_heat / total_heat * 100,\n", + " 'Boiler Heat [kWh]': boiler_heat,\n", + " 'Total Costs [EUR]': flow_system.solution['costs'].item(),\n", + " 'Total CO2 [kg]': flow_system.solution['CO2'].item(),\n", + " },\n", + " index=['Value'],\n", + ").T" ] }, { @@ -454,13 +456,24 @@ "\n", "fs_no_chp.optimize(fx.solvers.HighsSolver())\n", "\n", + "total_costs = flow_system.solution['costs'].item()\n", + "total_co2 = flow_system.solution['CO2'].item()\n", "no_chp_costs = fs_no_chp.solution['costs'].item()\n", "no_chp_co2 = fs_no_chp.solution['CO2'].item()\n", "\n", - "cost_saving = (no_chp_costs - total_costs) / no_chp_costs * 100\n", - "co2_saving = (no_chp_co2 - total_co2) / no_chp_co2 * 100\n", - "print(\n", - " f'CHP saves {cost_saving:.1f}% costs ({no_chp_costs:.0f}→{total_costs:.0f} €) and {co2_saving:.1f}% CO2 ({no_chp_co2:.0f}→{total_co2:.0f} kg)'\n", + "pd.DataFrame(\n", + " {\n", + " 'Without CHP': {'Cost [EUR]': no_chp_costs, 'CO2 [kg]': no_chp_co2},\n", + " 'With CHP': {'Cost [EUR]': total_costs, 'CO2 [kg]': total_co2},\n", + " 'Savings': {\n", + " 'Cost [EUR]': no_chp_costs - total_costs,\n", + " 'CO2 [kg]': no_chp_co2 - total_co2,\n", + " },\n", + " 'Savings [%]': {\n", + " 'Cost [EUR]': (no_chp_costs - total_costs) / no_chp_costs * 100,\n", + " 'CO2 [kg]': (no_chp_co2 - total_co2) / no_chp_co2 * 100,\n", + " },\n", + " }\n", ")" ] }, diff --git a/docs/notebooks/07-scenarios-and-periods.ipynb b/docs/notebooks/07-scenarios-and-periods.ipynb index db74afefb..e770946eb 100644 --- a/docs/notebooks/07-scenarios-and-periods.ipynb +++ b/docs/notebooks/07-scenarios-and-periods.ipynb @@ -80,12 +80,7 @@ "\n", "# Scenarios with probabilities\n", "scenarios = pd.Index(['Mild Winter', 'Harsh Winter'], name='scenario')\n", - "scenario_weights = np.array([0.6, 0.4]) # 60% mild, 40% harsh\n", - "\n", - "print(f'Time dimension: {len(timesteps)} hours')\n", - "print(f'Periods: {list(periods)}')\n", - "print(f'Scenarios: {list(scenarios)}')\n", - "print(f'Scenario weights: {dict(zip(scenarios, scenario_weights, strict=False))}')" + "scenario_weights = np.array([0.6, 0.4]) # 60% mild, 40% harsh" ] }, { @@ -138,10 +133,7 @@ " 'Harsh Winter': harsh_demand,\n", " },\n", " index=timesteps,\n", - ")\n", - "\n", - "print(f'Mild winter demand: {mild_demand.min():.0f} - {mild_demand.max():.0f} kW')\n", - "print(f'Harsh winter demand: {harsh_demand.min():.0f} - {harsh_demand.max():.0f} kW')" + ")" ] }, { @@ -182,15 +174,7 @@ "gas_prices = np.array([0.06, 0.08, 0.10]) # 2024, 2025, 2026\n", "\n", "# Electricity sell prices by period (€/kWh) - CHP revenue\n", - "elec_prices = np.array([0.28, 0.34, 0.43]) # Rising with gas\n", - "\n", - "print('Gas prices by period:')\n", - "for period, price in zip(periods, gas_prices, strict=False):\n", - " print(f' {period}: {price:.2f} €/kWh')\n", - "\n", - "print('\\nElectricity sell prices by period:')\n", - "for period, price in zip(periods, elec_prices, strict=False):\n", - " print(f' {period}: {price:.2f} €/kWh')" + "elec_prices = np.array([0.28, 0.34, 0.43]) # Rising with gas" ] }, { @@ -222,7 +206,7 @@ " fx.Carrier('heat', '#e74c3c', 'kW'),\n", ")\n", "\n", - "print(flow_system)" + "flow_system" ] }, { @@ -347,10 +331,15 @@ "outputs": [], "source": [ "chp_size = flow_system.statistics.sizes['CHP(P_el)']\n", - "total_cost = flow_system.solution['costs']\n", "\n", - "print(f'Optimal CHP: {float(chp_size.max()):.0f} kW electrical ({float(chp_size.max()) * 0.50 / 0.35:.0f} kW thermal)')\n", - "print(f'Expected cost: {float(total_cost.sum()):.0f} €')" + "pd.DataFrame(\n", + " {\n", + " 'CHP Electrical [kW]': float(chp_size.max()),\n", + " 'CHP Thermal [kW]': float(chp_size.max()) * 0.50 / 0.35,\n", + " 'Expected Cost [EUR]': float(flow_system.solution['costs'].sum()),\n", + " },\n", + " index=['Optimal'],\n", + ").T" ] }, { @@ -408,9 +397,7 @@ "metadata": {}, "outputs": [], "source": [ - "# View dimensions\n", "flow_rates = flow_system.statistics.flow_rates\n", - "print('Flow rates dimensions:', dict(flow_rates.sizes))\n", "\n", "# Plot flow rates\n", "flow_system.statistics.plot.flows()" @@ -426,10 +413,15 @@ "# CHP operation summary by scenario\n", "chp_heat = flow_rates['CHP(Q_th)']\n", "\n", - "for scenario in scenarios:\n", - " scenario_avg = float(chp_heat.sel(scenario=scenario).mean())\n", - " scenario_max = float(chp_heat.sel(scenario=scenario).max())\n", - " print(f'{scenario}: avg {scenario_avg:.0f} kW, max {scenario_max:.0f} kW')" + "pd.DataFrame(\n", + " {\n", + " scenario: {\n", + " 'Avg [kW]': float(chp_heat.sel(scenario=scenario).mean()),\n", + " 'Max [kW]': float(chp_heat.sel(scenario=scenario).max()),\n", + " }\n", + " for scenario in scenarios\n", + " }\n", + ")" ] }, { @@ -456,8 +448,12 @@ "chp_size_mild = float(fs_mild.statistics.sizes['CHP(P_el)'].max())\n", "chp_size_both = float(chp_size.max())\n", "\n", - "print(\n", - " f'CHP sizing: {chp_size_mild:.0f} kW (mild only) vs {chp_size_both:.0f} kW (both scenarios) → +{chp_size_both - chp_size_mild:.0f} kW for uncertainty'\n", + "pd.DataFrame(\n", + " {\n", + " 'Mild Only': {'CHP Size [kW]': chp_size_mild},\n", + " 'Both Scenarios': {'CHP Size [kW]': chp_size_both},\n", + " 'Uncertainty Buffer': {'CHP Size [kW]': chp_size_both - chp_size_mild},\n", + " }\n", ")" ] }, diff --git a/docs/notebooks/08a-aggregation.ipynb b/docs/notebooks/08a-aggregation.ipynb index 8bc1a4774..cccb7dce0 100644 --- a/docs/notebooks/08a-aggregation.ipynb +++ b/docs/notebooks/08a-aggregation.ipynb @@ -65,8 +65,8 @@ "flow_system.connect_and_transform() # Align all data as xarray\n", "\n", "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')\n", - "print(f'Components: {list(flow_system.components.keys())}')" + "\n", + "flow_system" ] }, { @@ -111,10 +111,7 @@ "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", "\n", "# Resample from 15-min to 4h resolution\n", - "fs_resampled = flow_system.transform.resample('4h')\n", - "\n", - "reduction = (1 - len(fs_resampled.timesteps) / len(flow_system.timesteps)) * 100\n", - "print(f'Resampled: {len(flow_system.timesteps)} → {len(fs_resampled.timesteps)} timesteps ({reduction:.0f}% reduction)')" + "fs_resampled = flow_system.transform.resample('4h')" ] }, { @@ -127,9 +124,7 @@ "# Optimize resampled system\n", "start = timeit.default_timer()\n", "fs_resampled.optimize(solver)\n", - "time_resampled = timeit.default_timer() - start\n", - "\n", - "print(f'Resampled: {time_resampled:.1f}s, {fs_resampled.solution[\"costs\"].item():,.0f} €')" + "time_resampled = timeit.default_timer() - start" ] }, { @@ -156,10 +151,7 @@ "fs_sizing.optimize(solver)\n", "time_stage1 = timeit.default_timer() - start\n", "\n", - "sizes = {k: float(v.item()) for k, v in fs_sizing.statistics.sizes.items()}\n", - "print(\n", - " f'Stage 1 (sizing): {time_stage1:.1f}s → CHP {sizes[\"CHP(Q_th)\"]:.0f}, Boiler {sizes[\"Boiler(Q_th)\"]:.0f}, Storage {sizes[\"Storage\"]:.0f}'\n", - ")" + "sizes = {k: float(v.item()) for k, v in fs_sizing.statistics.sizes.items()}" ] }, { @@ -173,11 +165,7 @@ "start = timeit.default_timer()\n", "fs_dispatch = flow_system.transform.fix_sizes(fs_sizing.statistics.sizes)\n", "fs_dispatch.optimize(solver)\n", - "time_stage2 = timeit.default_timer() - start\n", - "\n", - "print(\n", - " f'Stage 2 (dispatch): {time_stage2:.1f}s, {fs_dispatch.solution[\"costs\"].item():,.0f} € (total: {time_stage1 + time_stage2:.1f}s)'\n", - ")" + "time_stage2 = timeit.default_timer() - start" ] }, { @@ -200,9 +188,7 @@ "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start\n", - "\n", - "print(f'Full optimization: {time_full:.1f}s, {fs_full.solution[\"costs\"].item():,.0f} €')" + "time_full = timeit.default_timer() - start" ] }, { diff --git a/docs/notebooks/08b-rolling-horizon.ipynb b/docs/notebooks/08b-rolling-horizon.ipynb index c0d7bdf24..3b91fd980 100644 --- a/docs/notebooks/08b-rolling-horizon.ipynb +++ b/docs/notebooks/08b-rolling-horizon.ipynb @@ -69,8 +69,8 @@ "flow_system.connect_and_transform() # Align all data as xarray\n", "\n", "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')\n", - "print(f'Components: {list(flow_system.components.keys())}')" + "\n", + "flow_system" ] }, { @@ -95,9 +95,7 @@ "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start\n", - "\n", - "print(f'Full: {time_full:.1f}s, {fs_full.solution[\"costs\"].item():,.0f} €')" + "time_full = timeit.default_timer() - start" ] }, { @@ -138,9 +136,7 @@ " horizon=192, # 2-day segments (192 timesteps at 15-min resolution)\n", " overlap=96, # 1-day lookahead\n", ")\n", - "time_rolling = timeit.default_timer() - start\n", - "\n", - "print(f'Rolling ({len(segments)} segments): {time_rolling:.1f}s, {fs_rolling.solution[\"costs\"].item():,.0f} €')" + "time_rolling = timeit.default_timer() - start" ] }, { @@ -254,11 +250,16 @@ "metadata": {}, "outputs": [], "source": [ - "print(f'{len(segments)} segments:')\n", - "for i, seg in enumerate(segments):\n", - " print(\n", - " f' {i + 1}: {seg.timesteps[0]:%m-%d %H:%M} → {seg.timesteps[-1]:%m-%d %H:%M} | {seg.solution[\"costs\"].item():,.0f} €'\n", - " )" + "pd.DataFrame(\n", + " {\n", + " f'Segment {i + 1}': {\n", + " 'Start': f'{seg.timesteps[0]:%m-%d %H:%M}',\n", + " 'End': f'{seg.timesteps[-1]:%m-%d %H:%M}',\n", + " 'Cost [EUR]': seg.solution['costs'].item(),\n", + " }\n", + " for i, seg in enumerate(segments)\n", + " }\n", + ")" ] }, { diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 0e9cda7b7..825aadd3a 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -28,7 +28,6 @@ "source": [ "import timeit\n", "\n", - "import numpy as np\n", "import pandas as pd\n", "import plotly.graph_objects as go\n", "from plotly.subplots import make_subplots\n", @@ -61,8 +60,8 @@ "flow_system.connect_and_transform()\n", "\n", "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')\n", - "print(f'Components: {list(flow_system.components.keys())}')" + "\n", + "flow_system" ] }, { @@ -109,13 +108,7 @@ "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start\n", - "\n", - "print(f'Full optimization: {time_full:.1f} seconds')\n", - "print(f'Total cost: {fs_full.solution[\"costs\"].item():,.0f} €')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_full.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" + "time_full = timeit.default_timer() - start" ] }, { @@ -156,9 +149,7 @@ " time_series_for_high_peaks=peak_series, # Capture peak demand day\n", ")\n", "\n", - "time_clustering = timeit.default_timer() - start\n", - "print(f'Clustering time: {time_clustering:.1f} seconds')\n", - "print(f'Reduced: {len(flow_system.timesteps)} → {len(fs_clustered.timesteps)} timesteps')" + "time_clustering = timeit.default_timer() - start" ] }, { @@ -171,14 +162,7 @@ "# Optimize the reduced system\n", "start = timeit.default_timer()\n", "fs_clustered.optimize(solver)\n", - "time_clustered = timeit.default_timer() - start\n", - "\n", - "print(f'Clustered optimization: {time_clustered:.1f} seconds')\n", - "print(f'Total cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", - "print(f'\\nSpeedup vs full: {time_full / (time_clustering + time_clustered):.1f}x')\n", - "print('\\nOptimized sizes:')\n", - "for name, size in fs_clustered.statistics.sizes.items():\n", - " print(f' {name}: {float(size.item()):.1f}')" + "time_clustered = timeit.default_timer() - start" ] }, { @@ -198,21 +182,8 @@ "metadata": {}, "outputs": [], "source": [ - "# Show clustering info\n", - "info = fs_clustered.clustering\n", - "cs = info.result.cluster_structure\n", - "print('Clustering Configuration:')\n", - "print(f' Number of typical periods: {cs.n_clusters}')\n", - "print(f' Timesteps per period: {cs.timesteps_per_cluster}')\n", - "print(f' Total reduced timesteps: {cs.n_clusters * cs.timesteps_per_cluster}')\n", - "print(f' Cluster order (first 10 days): {cs.cluster_order.values[:10]}...')\n", - "\n", - "# Show how many times each cluster appears\n", - "cluster_order = cs.cluster_order.values\n", - "unique, counts = np.unique(cluster_order, return_counts=True)\n", - "print('\\nCluster occurrences:')\n", - "for cluster_id, count in zip(unique, counts, strict=False):\n", - " print(f' Cluster {cluster_id}: {count} days')" + "# Show clustering info using __repr__\n", + "fs_clustered.clustering" ] }, { @@ -239,18 +210,9 @@ "metadata": {}, "outputs": [], "source": [ - "# Stage 1 already done above\n", - "print('Stage 1: Sizing with typical periods')\n", - "print(f' Time: {time_clustering + time_clustered:.1f} seconds')\n", - "print(f' Cost estimate: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", - "\n", "# Apply safety margin to sizes\n", "SAFETY_MARGIN = 1.05 # 5% buffer\n", - "sizes_with_margin = {name: float(size.item()) * SAFETY_MARGIN for name, size in fs_clustered.statistics.sizes.items()}\n", - "print(f'\\nSizes with {(SAFETY_MARGIN - 1) * 100:.0f}% safety margin:')\n", - "for name, size in sizes_with_margin.items():\n", - " original = fs_clustered.statistics.sizes[name].item()\n", - " print(f' {name}: {original:.1f} → {size:.1f}')" + "sizes_with_margin = {name: float(size.item()) * SAFETY_MARGIN for name, size in fs_clustered.statistics.sizes.items()}" ] }, { @@ -261,20 +223,15 @@ "outputs": [], "source": [ "# Stage 2: Fix sizes and optimize at full resolution\n", - "print('Stage 2: Dispatch at full resolution')\n", "start = timeit.default_timer()\n", "\n", "fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin)\n", "fs_dispatch.optimize(solver)\n", "\n", "time_dispatch = timeit.default_timer() - start\n", - "print(f' Time: {time_dispatch:.1f} seconds')\n", - "print(f' Actual cost: {fs_dispatch.solution[\"costs\"].item():,.0f} €')\n", "\n", - "# Total comparison\n", - "total_two_stage = time_clustering + time_clustered + time_dispatch\n", - "print(f'\\nTotal two-stage time: {total_two_stage:.1f} seconds')\n", - "print(f'Speedup vs full: {time_full / total_two_stage:.1f}x')" + "# Total two-stage time\n", + "total_two_stage = time_clustering + time_clustered + time_dispatch" ] }, { @@ -354,10 +311,7 @@ "outputs": [], "source": [ "# Expand the clustered solution to full resolution\n", - "fs_expanded = fs_clustered.transform.expand_solution()\n", - "\n", - "print(f'Expanded: {len(fs_clustered.timesteps)} → {len(fs_expanded.timesteps)} timesteps')\n", - "print(f'Cost: {fs_expanded.solution[\"costs\"].item():,.0f} €')" + "fs_expanded = fs_clustered.transform.expand_solution()" ] }, { diff --git a/docs/notebooks/09-plotting-and-data-access.ipynb b/docs/notebooks/09-plotting-and-data-access.ipynb index 39fa788da..20a7e6f4f 100644 --- a/docs/notebooks/09-plotting-and-data-access.ipynb +++ b/docs/notebooks/09-plotting-and-data-access.ipynb @@ -71,10 +71,7 @@ "multiperiod = create_multiperiod_system()\n", "multiperiod.optimize(solver)\n", "\n", - "print('Created systems:')\n", - "print(f' simple: {len(simple.components)} components, {len(simple.buses)} buses')\n", - "print(f' complex_sys: {len(complex_sys.components)} components, {len(complex_sys.buses)} buses')\n", - "print(f' multiperiod: {len(multiperiod.components)} components, dims={dict(multiperiod.solution.sizes)}')" + "simple" ] }, { @@ -614,13 +611,12 @@ "source": [ "nodes, edges = simple.topology.infos()\n", "\n", - "print('Nodes:')\n", - "for label, info in nodes.items():\n", - " print(f' {label}: {info[\"class\"]}')\n", - "\n", - "print('\\nEdges (flows):')\n", - "for label, info in edges.items():\n", - " print(f' {info[\"start\"]} -> {info[\"end\"]}: {label}')" + "pd.DataFrame(\n", + " {\n", + " 'Nodes': {label: info['class'] for label, info in nodes.items()},\n", + " 'Edges': {label: f'{info[\"start\"]} -> {info[\"end\"]}' for label, info in edges.items()},\n", + " }\n", + ")" ] }, { @@ -640,10 +636,7 @@ "metadata": {}, "outputs": [], "source": [ - "print('Multiperiod system dimensions:')\n", - "print(f' Periods: {list(multiperiod.periods)}')\n", - "print(f' Scenarios: {list(multiperiod.scenarios)}')\n", - "print(f' Solution dims: {dict(multiperiod.solution.sizes)}')" + "multiperiod" ] }, { @@ -748,11 +741,7 @@ "outputs": [], "source": [ "# Get plot result\n", - "result = simple.statistics.plot.balance('Heat')\n", - "\n", - "print('PlotResult contains:')\n", - "print(f' data: {type(result.data).__name__} with vars {list(result.data.data_vars)}')\n", - "print(f' figure: {type(result.figure).__name__}')" + "result = simple.statistics.plot.balance('Heat')" ] }, { From 7b7484659f59857665c1f5ef5021a4fa5ca62869 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 4 Jan 2026 21:27:00 +0100 Subject: [PATCH 182/191] Created helper module: docs/notebooks/data/tutorial_data.py - get_quickstart_data() - 01-quickstart - get_heat_system_data() - 02-heat-system - get_investment_data() - 03-investment - get_constraints_data() - 04-constraints - get_multicarrier_data() - 05-multi-carrier - get_time_varying_data() - 06a-time-varying - get_scenarios_data() - 07-scenarios Notebook changes: | Notebook | Changes | |------------------|------------------------------------------------------| | 01-quickstart | Uses fxplot for heat demand visualization | | 02-heat-system | Uses helper + fxplot for demand/price plots | | 03-investment | Uses helper + fxplot; removed cost definition cell | | 04-constraints | Uses helper + fxplot for demand visualization | | 05-multi-carrier | Uses helper + fxplot for profiles | | 06a-time-varying | Uses helper + fxplot; removed inline COP calculation | | 07-scenarios | Uses helper + fxplot; removed inline data generation | Benefits: - Centralized data generation reduces code duplication - Consistent visualization with .fxplot accessor - Easier maintenance - change data in one place - Notebooks focus on teaching flixopt concepts, not data setup --- docs/notebooks/01-quickstart.ipynb | 6 +- docs/notebooks/02-heat-system.ipynb | 57 ++----- .../03-investment-optimization.ipynb | 100 ++++-------- .../04-operational-constraints.ipynb | 35 ++--- docs/notebooks/05-multi-carrier-system.ipynb | 113 ++++---------- .../06a-time-varying-parameters.ipynb | 87 +++-------- docs/notebooks/07-scenarios-and-periods.ipynb | 147 +++++------------- 7 files changed, 156 insertions(+), 389 deletions(-) diff --git a/docs/notebooks/01-quickstart.ipynb b/docs/notebooks/01-quickstart.ipynb index 8e599511d..9e6850214 100644 --- a/docs/notebooks/01-quickstart.ipynb +++ b/docs/notebooks/01-quickstart.ipynb @@ -34,7 +34,6 @@ "outputs": [], "source": [ "import pandas as pd\n", - "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -87,9 +86,8 @@ " name='Heat Demand [kW]',\n", ")\n", "\n", - "# Visualize the demand with plotly\n", - "fig = px.bar(x=heat_demand.time.values, y=heat_demand.values, labels={'x': 'Time', 'y': 'Heat Demand [kW]'})\n", - "fig" + "# Visualize the demand with fxplot accessor\n", + "heat_demand.to_dataset().fxplot.bar(title='Heat Demand')" ] }, { diff --git a/docs/notebooks/02-heat-system.ipynb b/docs/notebooks/02-heat-system.ipynb index 15393d870..9d71dc69a 100644 --- a/docs/notebooks/02-heat-system.ipynb +++ b/docs/notebooks/02-heat-system.ipynb @@ -32,9 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "import pandas as pd\n", - "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -59,29 +57,12 @@ "metadata": {}, "outputs": [], "source": [ - "# One week, hourly resolution\n", - "timesteps = pd.date_range('2024-01-15', periods=168, freq='h')\n", + "from data.tutorial_data import get_heat_system_data\n", "\n", - "# Create realistic office heat demand pattern\n", - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "day_of_week = (hours // 24) % 7\n", - "\n", - "# Base demand pattern (kW)\n", - "base_demand = np.where(\n", - " (hour_of_day >= 7) & (hour_of_day <= 18), # Office hours\n", - " 80, # Daytime\n", - " 30, # Night setback\n", - ")\n", - "\n", - "# Reduce on weekends (days 5, 6)\n", - "weekend_factor = np.where(day_of_week >= 5, 0.5, 1.0)\n", - "heat_demand = base_demand * weekend_factor\n", - "\n", - "# Add some random variation\n", - "np.random.seed(42)\n", - "heat_demand = heat_demand + np.random.normal(0, 5, len(heat_demand))\n", - "heat_demand = np.clip(heat_demand, 20, 100)" + "data = get_heat_system_data()\n", + "timesteps = data['timesteps']\n", + "heat_demand = data['heat_demand']\n", + "gas_price = data['gas_price']" ] }, { @@ -91,15 +72,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize the demand pattern with plotly\n", - "demand_series = xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}, name='Heat Demand [kW]')\n", - "fig = px.line(\n", - " x=demand_series.time.values,\n", - " y=demand_series.values,\n", - " title='Office Heat Demand Profile',\n", - " labels={'x': 'Time', 'y': 'kW'},\n", + "# Visualize the demand pattern with fxplot\n", + "demand_ds = xr.Dataset(\n", + " {\n", + " 'Heat Demand [kW]': xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}),\n", + " }\n", ")\n", - "fig" + "demand_ds.fxplot.line(title='Office Heat Demand Profile')" ] }, { @@ -119,15 +98,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Time-of-use gas prices (€/kWh)\n", - "gas_price = np.where(\n", - " (hour_of_day >= 6) & (hour_of_day <= 22),\n", - " 0.08, # Peak: 6am-10pm\n", - " 0.05, # Off-peak: 10pm-6am\n", + "# Visualize gas price with fxplot\n", + "price_ds = xr.Dataset(\n", + " {\n", + " 'Gas Price [EUR/kWh]': xr.DataArray(gas_price, dims=['time'], coords={'time': timesteps}),\n", + " }\n", ")\n", - "\n", - "fig = px.line(x=timesteps, y=gas_price, title='Gas Price [€/kWh]', labels={'x': 'Time', 'y': '€/kWh'})\n", - "fig" + "price_ds.fxplot.line(title='Gas Price')" ] }, { diff --git a/docs/notebooks/03-investment-optimization.ipynb b/docs/notebooks/03-investment-optimization.ipynb index 1d2419a79..9ddb17df4 100644 --- a/docs/notebooks/03-investment-optimization.ipynb +++ b/docs/notebooks/03-investment-optimization.ipynb @@ -32,9 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "import pandas as pd\n", - "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -84,23 +82,15 @@ "metadata": {}, "outputs": [], "source": [ - "# One week in summer, hourly\n", - "timesteps = pd.date_range('2024-07-15', periods=168, freq='h')\n", - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Solar radiation profile (kW/m² equivalent, simplified)\n", - "# Peak around noon, zero at night\n", - "solar_profile = np.maximum(0, np.sin((hour_of_day - 6) * np.pi / 12)) * 0.8\n", - "solar_profile = np.where((hour_of_day >= 6) & (hour_of_day <= 20), solar_profile, 0)\n", - "\n", - "# Add some cloud variation\n", - "np.random.seed(42)\n", - "cloud_factor = np.random.uniform(0.6, 1.0, len(timesteps))\n", - "solar_profile = solar_profile * cloud_factor\n", - "\n", - "# Pool operates 8am-10pm, constant demand when open\n", - "pool_demand = np.where((hour_of_day >= 8) & (hour_of_day <= 22), 150, 50) # kW" + "from data.tutorial_data import get_investment_data\n", + "\n", + "data = get_investment_data()\n", + "timesteps = data['timesteps']\n", + "solar_profile = data['solar_profile']\n", + "pool_demand = data['pool_demand']\n", + "GAS_PRICE = data['gas_price']\n", + "SOLAR_COST_WEEKLY = data['solar_cost_per_kw_week']\n", + "TANK_COST_WEEKLY = data['tank_cost_per_kwh_week']" ] }, { @@ -110,20 +100,14 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize profiles with plotly - using xarray and faceting\n", + "# Visualize profiles with fxplot\n", "profiles = xr.Dataset(\n", " {\n", " 'Solar Profile [kW/kW]': xr.DataArray(solar_profile, dims=['time'], coords={'time': timesteps}),\n", " 'Pool Demand [kW]': xr.DataArray(pool_demand, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", - "\n", - "# Convert to long format for faceting\n", - "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", - "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", - "fig.update_yaxes(matches=None, showticklabels=True)\n", - "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", - "fig" + "profiles.fxplot.line(title='Input Profiles')" ] }, { @@ -136,33 +120,9 @@ "Investment costs are **annualized** (€/year) to compare with operating costs:" ] }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "# Cost parameters\n", - "GAS_PRICE = 0.12 # €/kWh - high gas price makes solar attractive\n", - "\n", - "# Solar collectors: 400 €/kW installed, 20-year lifetime → ~25 €/kW/year annualized\n", - "# (simplified, real calculation would include interest rate)\n", - "SOLAR_COST_PER_KW = 20 # €/kW/year\n", - "\n", - "# Buffer tank: 50 €/kWh capacity, 30-year lifetime → ~2 €/kWh/year\n", - "TANK_COST_PER_KWH = 1.5 # €/kWh/year\n", - "\n", - "# Scale factor: We model 1 week, but costs are annual\n", - "# So we scale investment costs to weekly equivalent\n", - "WEEKS_PER_YEAR = 52\n", - "SOLAR_COST_WEEKLY = SOLAR_COST_PER_KW / WEEKS_PER_YEAR\n", - "TANK_COST_WEEKLY = TANK_COST_PER_KWH / WEEKS_PER_YEAR" - ] - }, { "cell_type": "markdown", - "id": "9", + "id": "8", "metadata": {}, "source": [ "## Build the System with Investment Options\n", @@ -173,7 +133,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -244,7 +204,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "10", "metadata": {}, "source": [ "## Run Optimization" @@ -253,7 +213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -262,7 +222,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "12", "metadata": {}, "source": [ "## Analyze Investment Decisions\n", @@ -273,7 +233,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -292,7 +252,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "14", "metadata": {}, "source": [ "### Visualize Sizes" @@ -301,7 +261,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -310,7 +270,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "16", "metadata": {}, "source": [ "### Cost Breakdown" @@ -319,7 +279,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -342,7 +302,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "18", "metadata": {}, "source": [ "### System Operation" @@ -351,7 +311,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -361,7 +321,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -371,7 +331,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -380,7 +340,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "22", "metadata": {}, "source": [ "## Compare: What if No Solar?\n", @@ -391,7 +351,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -414,7 +374,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "24", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -425,7 +385,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -434,7 +394,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "26", "metadata": {}, "source": [ "## Key Concepts\n", diff --git a/docs/notebooks/04-operational-constraints.ipynb b/docs/notebooks/04-operational-constraints.ipynb index 18c0a93ce..2f2163886 100644 --- a/docs/notebooks/04-operational-constraints.ipynb +++ b/docs/notebooks/04-operational-constraints.ipynb @@ -32,7 +32,6 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "import pandas as pd\n", "import plotly.express as px\n", "import xarray as xr\n", @@ -73,29 +72,11 @@ "metadata": {}, "outputs": [], "source": [ - "# 3 days, hourly resolution\n", - "timesteps = pd.date_range('2024-03-11', periods=72, freq='h')\n", - "hours = np.arange(72)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Factory operates in shifts:\n", - "# - Day shift (6am-2pm): 400 kW\n", - "# - Evening shift (2pm-10pm): 350 kW\n", - "# - Night (10pm-6am): 80 kW (maintenance heating only)\n", - "\n", - "steam_demand = np.select(\n", - " [\n", - " (hour_of_day >= 6) & (hour_of_day < 14), # Day shift\n", - " (hour_of_day >= 14) & (hour_of_day < 22), # Evening shift\n", - " ],\n", - " [400, 350],\n", - " default=80, # Night\n", - ")\n", + "from data.tutorial_data import get_constraints_data\n", "\n", - "# Add some variation\n", - "np.random.seed(123)\n", - "steam_demand = steam_demand + np.random.normal(0, 20, len(steam_demand))\n", - "steam_demand = np.clip(steam_demand, 50, 450).astype(float)" + "data = get_constraints_data()\n", + "timesteps = data['timesteps']\n", + "steam_demand = data['steam_demand']" ] }, { @@ -105,7 +86,13 @@ "metadata": {}, "outputs": [], "source": [ - "px.line(x=timesteps, y=steam_demand, title='Factory Steam Demand', labels={'x': 'Time', 'y': 'kW'})" + "# Visualize the demand with fxplot\n", + "demand_ds = xr.Dataset(\n", + " {\n", + " 'Steam Demand [kW]': xr.DataArray(steam_demand, dims=['time'], coords={'time': timesteps}),\n", + " }\n", + ")\n", + "demand_ds.fxplot.line(title='Factory Steam Demand')" ] }, { diff --git a/docs/notebooks/05-multi-carrier-system.ipynb b/docs/notebooks/05-multi-carrier-system.ipynb index 507ba0a6a..707ef517a 100644 --- a/docs/notebooks/05-multi-carrier-system.ipynb +++ b/docs/notebooks/05-multi-carrier-system.ipynb @@ -32,9 +32,7 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "import pandas as pd\n", - "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -85,37 +83,15 @@ "metadata": {}, "outputs": [], "source": [ - "# One week, hourly\n", - "timesteps = pd.date_range('2024-02-05', periods=168, freq='h')\n", - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Hospital electricity demand (kW)\n", - "# Base load + daily pattern (higher during day for equipment, lighting)\n", - "elec_base = 150 # 24/7 critical systems\n", - "elec_daily = 100 * np.sin((hour_of_day - 6) * np.pi / 12) # Peak at noon\n", - "elec_daily = np.maximum(0, elec_daily)\n", - "electricity_demand = elec_base + elec_daily\n", - "\n", - "# Hospital heat demand (kW)\n", - "# Higher in morning, drops during day, increases for hot water in evening\n", - "heat_pattern = np.select(\n", - " [\n", - " (hour_of_day >= 5) & (hour_of_day < 9), # Morning warmup\n", - " (hour_of_day >= 9) & (hour_of_day < 17), # Daytime\n", - " (hour_of_day >= 17) & (hour_of_day < 22), # Evening\n", - " ],\n", - " [350, 250, 300],\n", - " default=200, # Night\n", - ")\n", - "heat_demand = heat_pattern.astype(float)\n", - "\n", - "# Add random variation\n", - "np.random.seed(456)\n", - "electricity_demand += np.random.normal(0, 15, len(timesteps))\n", - "heat_demand += np.random.normal(0, 20, len(timesteps))\n", - "electricity_demand = np.clip(electricity_demand, 100, 300)\n", - "heat_demand = np.clip(heat_demand, 150, 400)" + "from data.tutorial_data import get_multicarrier_data\n", + "\n", + "data = get_multicarrier_data()\n", + "timesteps = data['timesteps']\n", + "electricity_demand = data['electricity_demand']\n", + "heat_demand = data['heat_demand']\n", + "elec_buy_price = data['elec_buy_price']\n", + "elec_sell_price = data['elec_sell_price']\n", + "gas_price = data['gas_price']" ] }, { @@ -125,47 +101,20 @@ "metadata": {}, "outputs": [], "source": [ - "# Electricity prices (€/kWh)\n", - "# Time-of-use: expensive during day, cheaper at night\n", - "elec_buy_price = np.where(\n", - " (hour_of_day >= 7) & (hour_of_day <= 21),\n", - " 0.35, # Peak - high electricity prices make CHP attractive\n", - " 0.20, # Off-peak\n", - ")\n", - "\n", - "# Feed-in tariff (sell price) - allows selling excess CHP electricity\n", - "elec_sell_price = 0.12 # Fixed feed-in rate\n", - "\n", - "# Gas price - relatively low, favoring gas-based generation\n", - "gas_price = 0.05 # €/kWh" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "# Visualize demands and prices with plotly - using xarray and faceting\n", + "# Visualize demands and prices with fxplot\n", "profiles = xr.Dataset(\n", " {\n", " 'Electricity Demand [kW]': xr.DataArray(electricity_demand, dims=['time'], coords={'time': timesteps}),\n", " 'Heat Demand [kW]': xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}),\n", - " 'Elec. Buy Price [€/kWh]': xr.DataArray(elec_buy_price, dims=['time'], coords={'time': timesteps}),\n", + " 'Elec. Buy Price [EUR/kWh]': xr.DataArray(elec_buy_price, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", - "\n", - "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", - "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", - "fig.update_yaxes(matches=None, showticklabels=True)\n", - "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", - "fig" + "profiles.fxplot.line(title='Input Profiles')" ] }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "## Build the Multi-Carrier System" @@ -174,7 +123,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -267,7 +216,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "## Run Optimization" @@ -276,7 +225,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -285,7 +234,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Analyze Results\n", @@ -296,7 +245,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -305,7 +254,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "### Heat Balance" @@ -314,7 +263,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -323,7 +272,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "### Gas Balance" @@ -332,7 +281,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -341,7 +290,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "### CHP Operation Pattern" @@ -350,7 +299,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -359,7 +308,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "### Cost and Emissions Summary" @@ -368,7 +317,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -401,7 +350,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "### Compare: What if No CHP?\n", @@ -412,7 +361,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -479,7 +428,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "23", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -490,7 +439,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -499,7 +448,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "25", "metadata": {}, "source": [ "## Key Concepts\n", diff --git a/docs/notebooks/06a-time-varying-parameters.ipynb b/docs/notebooks/06a-time-varying-parameters.ipynb index 5c833b2ea..4eaba9854 100644 --- a/docs/notebooks/06a-time-varying-parameters.ipynb +++ b/docs/notebooks/06a-time-varying-parameters.ipynb @@ -32,7 +32,6 @@ "outputs": [], "source": [ "import numpy as np\n", - "import pandas as pd\n", "import plotly.express as px\n", "import xarray as xr\n", "\n", @@ -78,20 +77,13 @@ "metadata": {}, "outputs": [], "source": [ - "# One winter week\n", - "timesteps = pd.date_range('2024-01-22', periods=168, freq='h')\n", - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Outdoor temperature: daily cycle with cold nights\n", - "temp_base = 2 # Average temp in °C\n", - "temp_amplitude = 5 # Daily variation\n", - "outdoor_temp = temp_base + temp_amplitude * np.sin((hour_of_day - 6) * np.pi / 12)\n", - "\n", - "# Add day-to-day variation for realism\n", - "np.random.seed(789)\n", - "daily_offset = np.repeat(np.random.uniform(-3, 3, 7), 24)\n", - "outdoor_temp = outdoor_temp + daily_offset" + "from data.tutorial_data import get_time_varying_data\n", + "\n", + "data = get_time_varying_data()\n", + "timesteps = data['timesteps']\n", + "outdoor_temp = data['outdoor_temp']\n", + "heat_demand = data['heat_demand']\n", + "cop = data['cop']" ] }, { @@ -101,41 +93,24 @@ "metadata": {}, "outputs": [], "source": [ - "# Heat demand: inversely related to outdoor temp (higher demand when colder)\n", - "heat_demand = 200 - 8 * outdoor_temp\n", - "heat_demand = np.clip(heat_demand, 100, 300)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "# Visualize input profiles\n", + "# Visualize input profiles with fxplot\n", "profiles = xr.Dataset(\n", " {\n", " 'Outdoor Temp [°C]': xr.DataArray(outdoor_temp, dims=['time'], coords={'time': timesteps}),\n", " 'Heat Demand [kW]': xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", - "\n", - "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", - "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", - "fig.update_yaxes(matches=None, showticklabels=True)\n", - "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", - "fig" + "profiles.fxplot.line(title='Input Profiles')" ] }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ - "## Calculate Time-Varying COP\n", + "## Time-Varying COP\n", "\n", - "The COP depends on outdoor temperature. We use a simplified Carnot-based formula:\n", + "The COP depends on outdoor temperature. The helper function uses a simplified Carnot-based formula:\n", "\n", "$$\\text{COP}_{\\text{real}} \\approx 0.45 \\times \\text{COP}_{\\text{Carnot}} = 0.45 \\times \\frac{T_{\\text{supply}}}{T_{\\text{supply}} - T_{\\text{source}}}$$\n", "\n", @@ -145,30 +120,14 @@ { "cell_type": "code", "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "# COP calculation\n", - "T_supply = 45 + 273.15 # Supply temperature 45°C in Kelvin\n", - "T_source = outdoor_temp + 273.15 # Outdoor temp in Kelvin\n", - "\n", - "carnot_cop = T_supply / (T_supply - T_source)\n", - "real_cop = 0.45 * carnot_cop\n", - "real_cop = np.clip(real_cop, 2.0, 5.0) # Physical limits" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", + "id": "8", "metadata": {}, "outputs": [], "source": [ "# Visualize COP vs temperature relationship\n", "px.scatter(\n", " x=outdoor_temp,\n", - " y=real_cop,\n", + " y=cop,\n", " title='Heat Pump COP vs Outdoor Temperature',\n", " labels={'x': 'Outdoor Temperature [°C]', 'y': 'COP'},\n", " opacity=0.5,\n", @@ -177,7 +136,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "9", "metadata": {}, "source": [ "## Build the Model\n", @@ -192,7 +151,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -214,7 +173,7 @@ " 'HeatPump',\n", " inputs=[fx.Flow('Elec', bus='Electricity', size=150)],\n", " outputs=[fx.Flow('Heat', bus='Heat', size=500)],\n", - " conversion_factors=[{'Elec': real_cop, 'Heat': 1}], # <-- Array for time-varying COP\n", + " conversion_factors=[{'Elec': cop, 'Heat': 1}], # <-- Array for time-varying COP\n", " ),\n", " # Heat demand\n", " fx.Sink('Building', inputs=[fx.Flow('Heat', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", @@ -225,7 +184,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "11", "metadata": {}, "source": [ "## Analyze Results" @@ -234,7 +193,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -244,7 +203,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -254,7 +213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -283,7 +242,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "15", "metadata": {}, "source": [ "## Key Concepts\n", @@ -323,7 +282,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "16", "metadata": {}, "source": [ "## Summary\n", diff --git a/docs/notebooks/07-scenarios-and-periods.ipynb b/docs/notebooks/07-scenarios-and-periods.ipynb index e770946eb..52d04b0d8 100644 --- a/docs/notebooks/07-scenarios-and-periods.ipynb +++ b/docs/notebooks/07-scenarios-and-periods.ipynb @@ -32,9 +32,8 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", "import pandas as pd\n", - "import plotly.express as px\n", + "import xarray as xr\n", "\n", "import flixopt as fx\n", "\n", @@ -72,15 +71,16 @@ "metadata": {}, "outputs": [], "source": [ - "# Time horizon: one representative winter week\n", - "timesteps = pd.date_range('2024-01-15', periods=168, freq='h') # 7 days\n", - "\n", - "# Planning periods (years)\n", - "periods = pd.Index([2024, 2025, 2026], name='period')\n", - "\n", - "# Scenarios with probabilities\n", - "scenarios = pd.Index(['Mild Winter', 'Harsh Winter'], name='scenario')\n", - "scenario_weights = np.array([0.6, 0.4]) # 60% mild, 40% harsh" + "from data.tutorial_data import get_scenarios_data\n", + "\n", + "data = get_scenarios_data()\n", + "timesteps = data['timesteps']\n", + "periods = data['periods']\n", + "scenarios = data['scenarios']\n", + "scenario_weights = data['scenario_weights']\n", + "heat_demand = data['heat_demand']\n", + "gas_prices = data['gas_prices']\n", + "elec_prices = data['elec_prices']" ] }, { @@ -88,7 +88,7 @@ "id": "6", "metadata": {}, "source": [ - "## Create Scenario-Dependent Demand Profiles\n", + "## Scenario-Dependent Demand Profiles\n", "\n", "Heat demand differs significantly between mild and harsh winters:" ] @@ -100,86 +100,23 @@ "metadata": {}, "outputs": [], "source": [ - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Base daily pattern (kW): higher in morning/evening\n", - "daily_pattern = np.select(\n", - " [\n", - " (hour_of_day >= 6) & (hour_of_day < 9), # Morning peak\n", - " (hour_of_day >= 9) & (hour_of_day < 17), # Daytime\n", - " (hour_of_day >= 17) & (hour_of_day < 22), # Evening peak\n", - " ],\n", - " [180, 120, 160],\n", - " default=100, # Night\n", - ").astype(float)\n", - "\n", - "# Add random variation\n", - "np.random.seed(42)\n", - "noise = np.random.normal(0, 10, len(timesteps))\n", - "\n", - "# Mild winter: lower demand\n", - "mild_demand = daily_pattern * 0.8 + noise\n", - "mild_demand = np.clip(mild_demand, 60, 200)\n", - "\n", - "# Harsh winter: higher demand\n", - "harsh_demand = daily_pattern * 1.3 + noise * 1.5\n", - "harsh_demand = np.clip(harsh_demand, 100, 280)\n", - "\n", - "# Create DataFrame with scenario columns (flixopt uses column names to match scenarios)\n", - "heat_demand = pd.DataFrame(\n", + "# Visualize demand scenarios with fxplot\n", + "demand_ds = xr.Dataset(\n", " {\n", - " 'Mild Winter': mild_demand,\n", - " 'Harsh Winter': harsh_demand,\n", - " },\n", - " index=timesteps,\n", - ")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "# Visualize demand scenarios with plotly\n", - "fig = px.line(\n", - " heat_demand.iloc[:48],\n", - " title='Heat Demand by Scenario (First 2 Days)',\n", - " labels={'index': 'Time', 'value': 'kW', 'variable': 'Scenario'},\n", + " scenario: xr.DataArray(\n", + " heat_demand[scenario].values,\n", + " dims=['time'],\n", + " coords={'time': timesteps},\n", + " )\n", + " for scenario in scenarios\n", + " }\n", ")\n", - "fig.update_traces(mode='lines')\n", - "fig" + "demand_ds.fxplot.line(title='Heat Demand by Scenario')" ] }, { "cell_type": "markdown", - "id": "9", - "metadata": {}, - "source": [ - "## Create Period-Dependent Prices\n", - "\n", - "Energy prices change across planning years:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "# Gas prices by period (€/kWh) - expected to rise\n", - "gas_prices = np.array([0.06, 0.08, 0.10]) # 2024, 2025, 2026\n", - "\n", - "# Electricity sell prices by period (€/kWh) - CHP revenue\n", - "elec_prices = np.array([0.28, 0.34, 0.43]) # Rising with gas" - ] - }, - { - "cell_type": "markdown", - "id": "11", + "id": "8", "metadata": {}, "source": [ "## Build the Flow System\n", @@ -190,7 +127,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -211,7 +148,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "10", "metadata": {}, "source": [ "## Add Components" @@ -220,7 +157,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -297,7 +234,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "12", "metadata": {}, "source": [ "## Run Optimization" @@ -306,7 +243,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -315,7 +252,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "14", "metadata": {}, "source": [ "## Analyze Results\n", @@ -326,7 +263,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -344,7 +281,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "16", "metadata": {}, "source": [ "### Heat Balance by Scenario\n", @@ -355,7 +292,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -364,7 +301,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "18", "metadata": {}, "source": [ "### CHP Operation Patterns" @@ -373,7 +310,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -382,7 +319,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "20", "metadata": {}, "source": [ "### Multi-Dimensional Data Access\n", @@ -393,7 +330,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -406,7 +343,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -426,7 +363,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "23", "metadata": {}, "source": [ "## Sensitivity: What if Only Mild Winter?\n", @@ -437,7 +374,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -459,7 +396,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "25", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -470,7 +407,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -479,7 +416,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "27", "metadata": {}, "source": [ "## Key Concepts\n", From b1fa086a7703d5def16b4bfa92272243b1229a55 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 4 Jan 2026 21:33:31 +0100 Subject: [PATCH 183/191] Removed animation_frame from the method signature, docstring, and updated the internal call to pass Non --- flixopt/dataset_plot_accessor.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/flixopt/dataset_plot_accessor.py b/flixopt/dataset_plot_accessor.py index fc38f730b..a022f3988 100644 --- a/flixopt/dataset_plot_accessor.py +++ b/flixopt/dataset_plot_accessor.py @@ -560,13 +560,12 @@ def pie( title: str = '', facet_col: str | Literal['auto'] | None = 'auto', facet_row: str | Literal['auto'] | None = 'auto', - animation_frame: str | Literal['auto'] | None = 'auto', facet_cols: int | None = None, **px_kwargs: Any, ) -> go.Figure: """Create a pie chart from aggregated dataset values. - Extra dimensions are auto-assigned to facet_col, facet_row, and animation_frame. + Extra dimensions are auto-assigned to facet_col and facet_row. For scalar values, a single pie is shown. Args: @@ -574,7 +573,6 @@ def pie( title: Plot title. facet_col: Dimension for column facets. 'auto' uses CONFIG priority. facet_row: Dimension for row facets. 'auto' uses CONFIG priority. - animation_frame: Dimension for animation slider. 'auto' uses CONFIG priority. facet_cols: Number of columns in facet grid wrap. **px_kwargs: Additional arguments passed to plotly.express.pie. @@ -609,9 +607,8 @@ def pie( if df.empty: return go.Figure() - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - self._ds, facet_col, facet_row, animation_frame - ) + # Note: px.pie doesn't support animation_frame + actual_facet_col, actual_facet_row, _ = _resolve_auto_facets(self._ds, facet_col, facet_row, None) facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols fig_kwargs: dict[str, Any] = { @@ -630,8 +627,6 @@ def pie( fig_kwargs['facet_col_wrap'] = facet_col_wrap if actual_facet_row: fig_kwargs['facet_row'] = actual_facet_row - if actual_anim: - fig_kwargs['animation_frame'] = actual_anim return px.pie(**fig_kwargs) From 5f370039fb1698ab933e06c9c88e8ba45b7e10fc Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 4 Jan 2026 22:01:00 +0100 Subject: [PATCH 184/191] Add missing notebooks to docs --- mkdocs.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/mkdocs.yml b/mkdocs.yml index 847f3fbd6..9eed96ad6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -69,8 +69,13 @@ nav: - Piecewise Effects: notebooks/06c-piecewise-effects.ipynb - Scaling: - Scenarios: notebooks/07-scenarios-and-periods.ipynb - - Clustering: notebooks/08a-aggregation.ipynb + - Aggregation: notebooks/08a-aggregation.ipynb - Rolling Horizon: notebooks/08b-rolling-horizon.ipynb + - Clustering: + - Introduction: notebooks/08c-clustering.ipynb + - Storage Modes: notebooks/08c2-clustering-storage-modes.ipynb + - Multi-Period: notebooks/08d-clustering-multiperiod.ipynb + - Internals: notebooks/08e-clustering-internals.ipynb - Results: - Plotting: notebooks/09-plotting-and-data-access.ipynb - Custom Data Plotting: notebooks/fxplot_accessor_demo.ipynb From 184513eca064212618158200ac5be7a15f6256bf Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 4 Jan 2026 22:06:33 +0100 Subject: [PATCH 185/191] Fix ci --- .github/workflows/docs.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index 147677a78..ac9ac9cd3 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -73,6 +73,8 @@ jobs: - name: Execute notebooks in parallel if: steps.notebook-cache.outputs.cache-hit != 'true' + env: + PYTHONPATH: docs/notebooks run: | # Execute all notebooks in parallel (4 at a time) find docs/notebooks -name '*.ipynb' -print0 | \ @@ -134,6 +136,8 @@ jobs: - name: Execute notebooks in parallel if: steps.notebook-cache.outputs.cache-hit != 'true' + env: + PYTHONPATH: docs/notebooks run: | find docs/notebooks -name '*.ipynb' -print0 | \ xargs -0 -P 4 -I {} jupyter execute --inplace {} From 0439d10f3a3e26ce4da82173ab8c36c6f6824c31 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Sun, 4 Jan 2026 22:20:28 +0100 Subject: [PATCH 186/191] Fix ci --- .github/workflows/docs.yaml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index ac9ac9cd3..8943252fc 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -73,11 +73,10 @@ jobs: - name: Execute notebooks in parallel if: steps.notebook-cache.outputs.cache-hit != 'true' - env: - PYTHONPATH: docs/notebooks run: | # Execute all notebooks in parallel (4 at a time) - find docs/notebooks -name '*.ipynb' -print0 | \ + # Run from notebooks directory so relative imports work + cd docs/notebooks && find . -name '*.ipynb' -print0 | \ xargs -0 -P 4 -I {} jupyter execute --inplace {} - name: Build docs @@ -136,10 +135,8 @@ jobs: - name: Execute notebooks in parallel if: steps.notebook-cache.outputs.cache-hit != 'true' - env: - PYTHONPATH: docs/notebooks run: | - find docs/notebooks -name '*.ipynb' -print0 | \ + cd docs/notebooks && find . -name '*.ipynb' -print0 | \ xargs -0 -P 4 -I {} jupyter execute --inplace {} - name: Configure Git From cfe8b9c21c3b3324b9c9cce6dc81ba3add128c43 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 5 Jan 2026 11:42:00 +0100 Subject: [PATCH 187/191] Add tutorial_data.py --- docs/notebooks/data/tutorial_data.py | 246 +++++++++++++++++++++++++++ 1 file changed, 246 insertions(+) create mode 100644 docs/notebooks/data/tutorial_data.py diff --git a/docs/notebooks/data/tutorial_data.py b/docs/notebooks/data/tutorial_data.py new file mode 100644 index 000000000..3b4997e0a --- /dev/null +++ b/docs/notebooks/data/tutorial_data.py @@ -0,0 +1,246 @@ +"""Generate tutorial data for notebooks 01-07. + +These functions return data (timesteps, profiles, prices) rather than full FlowSystems, +so notebooks can demonstrate building systems step by step. + +Usage: + from data.tutorial_data import get_quickstart_data, get_heat_system_data, ... +""" + +import numpy as np +import pandas as pd +import xarray as xr + + +def get_quickstart_data() -> dict: + """Data for 01-quickstart: minimal 4-hour example. + + Returns: + dict with: timesteps, heat_demand (xr.DataArray) + """ + timesteps = pd.date_range('2024-01-15 08:00', periods=4, freq='h') + heat_demand = xr.DataArray( + [30, 50, 45, 25], + dims=['time'], + coords={'time': timesteps}, + name='Heat Demand [kW]', + ) + return { + 'timesteps': timesteps, + 'heat_demand': heat_demand, + } + + +def get_heat_system_data() -> dict: + """Data for 02-heat-system: one week with storage. + + Returns: + dict with: timesteps, heat_demand, gas_price (arrays) + """ + timesteps = pd.date_range('2024-01-15', periods=168, freq='h') + hours = np.arange(168) + hour_of_day = hours % 24 + day_of_week = (hours // 24) % 7 + + # Office heat demand pattern + base_demand = np.where((hour_of_day >= 7) & (hour_of_day <= 18), 80, 30) + weekend_factor = np.where(day_of_week >= 5, 0.5, 1.0) + np.random.seed(42) + heat_demand = base_demand * weekend_factor + np.random.normal(0, 5, len(timesteps)) + heat_demand = np.clip(heat_demand, 20, 100) + + # Time-of-use gas prices + gas_price = np.where((hour_of_day >= 6) & (hour_of_day <= 22), 0.08, 0.05) + + return { + 'timesteps': timesteps, + 'heat_demand': heat_demand, + 'gas_price': gas_price, + } + + +def get_investment_data() -> dict: + """Data for 03-investment-optimization: solar pool heating. + + Returns: + dict with: timesteps, solar_profile, pool_demand, costs + """ + timesteps = pd.date_range('2024-07-15', periods=168, freq='h') + hours = np.arange(168) + hour_of_day = hours % 24 + + # Solar profile + solar_profile = np.maximum(0, np.sin((hour_of_day - 6) * np.pi / 12)) * 0.8 + solar_profile = np.where((hour_of_day >= 6) & (hour_of_day <= 20), solar_profile, 0) + np.random.seed(42) + solar_profile = solar_profile * np.random.uniform(0.6, 1.0, len(timesteps)) + + # Pool demand + pool_demand = np.where((hour_of_day >= 8) & (hour_of_day <= 22), 150, 50) + + return { + 'timesteps': timesteps, + 'solar_profile': solar_profile, + 'pool_demand': pool_demand, + 'gas_price': 0.12, + 'solar_cost_per_kw_week': 20 / 52, + 'tank_cost_per_kwh_week': 1.5 / 52, + } + + +def get_constraints_data() -> dict: + """Data for 04-operational-constraints: factory steam demand. + + Returns: + dict with: timesteps, steam_demand + """ + timesteps = pd.date_range('2024-03-11', periods=72, freq='h') + hours = np.arange(72) + hour_of_day = hours % 24 + + # Shift-based demand + steam_demand = np.select( + [ + (hour_of_day >= 6) & (hour_of_day < 14), + (hour_of_day >= 14) & (hour_of_day < 22), + ], + [400, 350], + default=80, + ).astype(float) + + np.random.seed(123) + steam_demand = steam_demand + np.random.normal(0, 20, len(steam_demand)) + steam_demand = np.clip(steam_demand, 50, 450) + + return { + 'timesteps': timesteps, + 'steam_demand': steam_demand, + } + + +def get_multicarrier_data() -> dict: + """Data for 05-multi-carrier-system: hospital CHP. + + Returns: + dict with: timesteps, electricity_demand, heat_demand, prices + """ + timesteps = pd.date_range('2024-02-05', periods=168, freq='h') + hours = np.arange(168) + hour_of_day = hours % 24 + + # Electricity demand + elec_base = 150 + elec_daily = 100 * np.sin((hour_of_day - 6) * np.pi / 12) + elec_daily = np.maximum(0, elec_daily) + electricity_demand = elec_base + elec_daily + + # Heat demand + heat_pattern = np.select( + [ + (hour_of_day >= 5) & (hour_of_day < 9), + (hour_of_day >= 9) & (hour_of_day < 17), + (hour_of_day >= 17) & (hour_of_day < 22), + ], + [350, 250, 300], + default=200, + ).astype(float) + + np.random.seed(456) + electricity_demand += np.random.normal(0, 15, len(timesteps)) + heat_demand = heat_pattern + np.random.normal(0, 20, len(timesteps)) + electricity_demand = np.clip(electricity_demand, 100, 300) + heat_demand = np.clip(heat_demand, 150, 400) + + # Prices + elec_buy_price = np.where((hour_of_day >= 7) & (hour_of_day <= 21), 0.35, 0.20) + + return { + 'timesteps': timesteps, + 'electricity_demand': electricity_demand, + 'heat_demand': heat_demand, + 'elec_buy_price': elec_buy_price, + 'elec_sell_price': 0.12, + 'gas_price': 0.05, + } + + +def get_time_varying_data() -> dict: + """Data for 06a-time-varying-parameters: heat pump with variable COP. + + Returns: + dict with: timesteps, outdoor_temp, heat_demand, cop + """ + timesteps = pd.date_range('2024-01-22', periods=168, freq='h') + hours = np.arange(168) + hour_of_day = hours % 24 + + # Outdoor temperature + temp_base = 2 + temp_amplitude = 5 + outdoor_temp = temp_base + temp_amplitude * np.sin((hour_of_day - 6) * np.pi / 12) + np.random.seed(789) + outdoor_temp = outdoor_temp + np.repeat(np.random.uniform(-3, 3, 7), 24) + + # Heat demand (inversely related to temperature) + heat_demand = 200 - 8 * outdoor_temp + heat_demand = np.clip(heat_demand, 100, 300) + + # COP calculation + t_supply = 45 + 273.15 + t_source = outdoor_temp + 273.15 + carnot_cop = t_supply / (t_supply - t_source) + cop = np.clip(0.45 * carnot_cop, 2.0, 5.0) + + return { + 'timesteps': timesteps, + 'outdoor_temp': outdoor_temp, + 'heat_demand': heat_demand, + 'cop': cop, + } + + +def get_scenarios_data() -> dict: + """Data for 07-scenarios-and-periods: multi-year planning. + + Returns: + dict with: timesteps, periods, scenarios, weights, heat_demand (DataFrame), prices + """ + timesteps = pd.date_range('2024-01-15', periods=168, freq='h') + periods = pd.Index([2024, 2025, 2026], name='period') + scenarios = pd.Index(['Mild Winter', 'Harsh Winter'], name='scenario') + scenario_weights = np.array([0.6, 0.4]) + + hours = np.arange(168) + hour_of_day = hours % 24 + + # Base pattern + daily_pattern = np.select( + [ + (hour_of_day >= 6) & (hour_of_day < 9), + (hour_of_day >= 9) & (hour_of_day < 17), + (hour_of_day >= 17) & (hour_of_day < 22), + ], + [180, 120, 160], + default=100, + ).astype(float) + + np.random.seed(42) + noise = np.random.normal(0, 10, len(timesteps)) + + mild_demand = np.clip(daily_pattern * 0.8 + noise, 60, 200) + harsh_demand = np.clip(daily_pattern * 1.3 + noise * 1.5, 100, 280) + + heat_demand = pd.DataFrame( + {'Mild Winter': mild_demand, 'Harsh Winter': harsh_demand}, + index=timesteps, + ) + + return { + 'timesteps': timesteps, + 'periods': periods, + 'scenarios': scenarios, + 'scenario_weights': scenario_weights, + 'heat_demand': heat_demand, + 'gas_prices': np.array([0.06, 0.08, 0.10]), + 'elec_prices': np.array([0.28, 0.34, 0.43]), + } From c9bd406dbbd17ba9f5d4d48de7e0ba12ce864e02 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 5 Jan 2026 11:52:17 +0100 Subject: [PATCH 188/191] Retrigger ci --- flixopt/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flixopt/__init__.py b/flixopt/__init__.py index 0fd550707..54fa21274 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -66,7 +66,7 @@ 'solvers', ] -# Initialize logger with default configuration (silent: WARNING level, NullHandler) +# Initialize logger with default configuration (silent: WARNING level, NullHandler). logger = logging.getLogger('flixopt') logger.setLevel(logging.WARNING) logger.addHandler(logging.NullHandler()) From c94f6dace2df0e4bf4d3eea2ebee84036310b0b2 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 5 Jan 2026 14:23:11 +0100 Subject: [PATCH 189/191] Clustering Parameter Refinements & Unified Slot Assignment (#552) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Improve documentation and improve CHANGELOG.md * FIx CHangelog and change to v6.0.0 * FIx CHangelog and change to v6.0.0 * FIx CHangelog and change to v6.0.0 * Enhanced Clustering Control New Parameters Added to cluster() Method | Parameter | Type | Default | Purpose | |-------------------------|-------------------------------|----------------------|--------------------------------------------------------------------------------------------------------------------| | cluster_method | Literal[...] | 'k_means' | Clustering algorithm ('k_means', 'hierarchical', 'k_medoids', 'k_maxoids', 'averaging') | | representation_method | Literal[...] | 'meanRepresentation' | How clusters are represented ('meanRepresentation', 'medoidRepresentation', 'distributionAndMinMaxRepresentation') | | extreme_period_method | Literal[...] | 'new_cluster_center' | How peaks are integrated ('None', 'append', 'new_cluster_center', 'replace_cluster_center') | | rescale_cluster_periods | bool | True | Rescale clusters to match original means | | random_state | int | None | None | Random seed for reproducibility | | predef_cluster_order | np.ndarray | list[int] | None | None | Manual clustering assignments | | **tsam_kwargs | Any | - | Pass-through for any tsam parameter | Clustering Quality Metrics Access via fs.clustering.metrics after clustering - returns a DataFrame with RMSE, MAE, and other accuracy indicators per time series. Files Modified 1. flixopt/transform_accessor.py - Updated cluster() signature and tsam call 2. flixopt/clustering/base.py - Added metrics field to Clustering class 3. tests/test_clustering/test_integration.py - Added tests for new parameters 4. docs/user-guide/optimization/clustering.md - Updated documentation * Dimension renamed: original_period → original_cluster Property renamed: n_original_periods → n_original_clusters * Problem: Expanded FlowSystem from clustering didn't have the extra timestep that regular FlowSystems have. Root Cause: In expand_solution(), the solution was only indexed by original_timesteps (n elements) instead of original_timesteps_extra (n+1 elements). Fix in flixopt/transform_accessor.py: 1. Reindex solution to timesteps_extra (line 1296-1298): - Added expanded_fs._solution.reindex(time=original_timesteps_extra) for consistency with non-expanded FlowSystems 2. Fill extra timestep for charge_state (lines 1300-1333): - Added special handling to properly fill the extra timestep for storage charge_state variables using the last cluster's extra timestep value 3. Updated intercluster storage handling (lines 1340-1388): - Modified to work with original_timesteps_extra instead of just original_timesteps - The extra timestep now correctly gets the final SOC boundary value with proper decay applied Tests updated in tests/test_cluster_reduce_expand.py: - Updated 4 assertions that check solution time coordinates to expect 193 (192 + 1 extra) instead of 192 * - 'variable' is treated as a special valid facet value (since it exists in the melted DataFrame from data_var names, not as a dimension) - When facet_row='variable' or facet_col='variable' is passed, it's passed through directly - In line(), when faceting by variable, it's not also used for color (avoids double encoding) * Add variable and color to auto resolving in fxplot * Added 'variable' to both priority lists and updated the logic to treat it consistently: flixopt/config.py: 'extra_dim_priority': ('variable', 'cluster', 'period', 'scenario'), 'x_dim_priority': ('time', 'duration', 'duration_pct', 'variable', 'period', 'scenario', 'cluster'), flixopt/dataset_plot_accessor.py: - _get_x_dim: Now takes n_data_vars parameter; 'variable' is available when > 1 - _resolve_auto_facets: 'variable' is available when len(data_vars) > 1 and respects exclude_dims Behavior: - 'variable' is treated like any other dimension in the priority system - Only available when there are multiple data_vars - Properly excluded when already used (e.g., for x-axis) * Improve plotting, especially for clustering * Drop cluster index when expanding * Fix storage expansion * Improve clustering * fix scatter plot faceting * ⏺ Fixed the documentation in the notebook: 1. Cell 32 (API Reference table): Updated defaults to 'hierarchical', 'medoidRepresentation', and None 2. Cell 16: Swapped the example to show k_means as the alternative (since hierarchical is now default) 3. Cell 17: Updated variable names to match 4. Cell 33 (Key Takeaways): Clarified that random_state is only needed for non-deterministic methods like 'k_means' The code review * 1. Error handling for accuracyIndicators() - Added try/except with warning log and empty DataFrame fallback, plus handling empty DataFrames when building the metrics Dataset 2. Random state to tsam - Replaced global np.random.seed() with passing seed parameter directly to tsam's TimeSeriesAggregation 3. tsam_kwargs conflict validation - Added validation that raises ValueError if user tries to override explicit parameters via **tsam_kwargs (including seed) 4. predef_cluster_order validation - Added dimension validation for DataArray inputs, checking they match the FlowSystem's period/scenario structure 5. Out-of-bounds fix - Clamped last_original_cluster_idx to n_original_clusters - 1 to handle partial clusters at the end * 1. DataFrame truth ambiguity - Changed non_empty_metrics.get(first_key) or next(...) to explicit if metrics_df is None: check 2. removed random state * Fix pie plot animation frame and add warnings for unassigned dims * Change logger warning to regular warning * ⏺ The centralized slot assignment system is now complete. Here's a summary of the changes made: Changes Made 1. flixopt/config.py - Replaced three separate config attributes (extra_dim_priority, dim_slot_priority, x_dim_priority) with a single unified dim_priority tuple - Updated CONFIG.Plotting class docstring and attribute definitions - Updated to_dict() method to use the new attribute - The new priority order: ('time', 'duration', 'duration_pct', 'variable', 'cluster', 'period', 'scenario') 2. flixopt/dataset_plot_accessor.py - Created new assign_slots() function that centralizes all dimension-to-slot assignment logic - Fixed slot fill order: x → color → facet_col → facet_row → animation_frame - Updated all plot methods (bar, stacked_bar, line, area, heatmap, scatter, pie) to use assign_slots() - Removed old _get_x_dim() and _resolve_auto_facets() functions - Updated docstrings to reference dim_priority instead of x_dim_priority 3. flixopt/statistics_accessor.py - Updated _resolve_auto_facets() to use the new assign_slots() function internally - Added import for assign_slots from dataset_plot_accessor Key Design Decisions - Single priority list controls all auto-assignment - Slots are filled in fixed order based on availability - None means a slot is not available for that plot type - 'auto' triggers auto-assignment from priority list - Explicit string values override auto-assignment * Add slot_order to config * Add new assign_slots() method * Add new assign_slots() method * Fix heatmap and convert all to use fxplot * Fix heatmap * Fix heatmap * Fix heatmap * Fix heatmap * Squeeze signleton dims in heatmap() --- CHANGELOG.md | 132 ++++- docs/notebooks/01-quickstart.ipynb | 23 +- docs/notebooks/02-heat-system.ipynb | 73 ++- .../03-investment-optimization.ipynb | 143 ++++-- .../04-operational-constraints.ipynb | 58 ++- docs/notebooks/05-multi-carrier-system.ipynb | 158 +++--- .../06a-time-varying-parameters.ipynb | 87 +++- docs/notebooks/07-scenarios-and-periods.ipynb | 199 ++++--- docs/notebooks/08a-aggregation.ipynb | 28 +- docs/notebooks/08b-rolling-horizon.ipynb | 27 +- docs/notebooks/08c-clustering.ipynb | 284 +++++++--- .../09-plotting-and-data-access.ipynb | 29 +- docs/user-guide/optimization/clustering.md | 56 ++ docs/user-guide/results-plotting.md | 3 + flixopt/clustering/base.py | 258 +++++----- flixopt/clustering/intercluster_helpers.py | 6 +- flixopt/components.py | 45 +- flixopt/config.py | 32 +- flixopt/dataset_plot_accessor.py | 485 +++++++++++------- flixopt/statistics_accessor.py | 342 ++++-------- flixopt/transform_accessor.py | 230 ++++++++- tests/test_cluster_reduce_expand.py | 14 +- tests/test_clustering/test_base.py | 10 +- tests/test_clustering/test_integration.py | 98 ++++ 24 files changed, 1798 insertions(+), 1022 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0176bced1..68d3d6b92 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,9 +51,12 @@ If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOp Until here --> -## [5.1.0] - Upcoming +## [6.0.0] - Upcoming -**Summary**: Time-series clustering for faster optimization with configurable storage behavior across typical periods. Improved weights API with always-normalized scenario weights. +**Summary**: Major release introducing time-series clustering with storage inter-cluster linking, the new `fxplot` accessor for universal xarray plotting, and removal of deprecated v5.0 classes. Includes configurable storage behavior across typical periods and improved weights API. + +!!! warning "Breaking Changes" + This release removes `ClusteredOptimization` and `ClusteringParameters` which were deprecated in v5.0.0. Use `flow_system.transform.cluster()` instead. See [Migration](#migration-from-clusteredoptimization) below. ### ✨ Added @@ -121,6 +124,44 @@ charge_state = fs_expanded.solution['SeasonalPit|charge_state'] Use `'cyclic'` for short-term storage like batteries or hot water tanks where only daily patterns matter. Use `'independent'` for quick estimates when storage behavior isn't critical. +**FXPlot Accessor**: New global xarray accessors for universal plotting with automatic faceting and smart dimension handling. Works on any xarray Dataset, not just flixopt results. + +```python +import flixopt as fx # Registers accessors automatically + +# Plot any xarray Dataset with automatic faceting +dataset.fxplot.bar(x='component') +dataset.fxplot.area(x='time') +dataset.fxplot.heatmap(x='time', y='component') +dataset.fxplot.line(x='time', facet_col='scenario') + +# DataArray support +data_array.fxplot.line() + +# Statistics transformations +dataset.fxstats.to_duration_curve() +``` + +**Available Plot Methods**: + +| Method | Description | +|--------|-------------| +| `.fxplot.bar()` | Grouped bar charts | +| `.fxplot.stacked_bar()` | Stacked bar charts | +| `.fxplot.line()` | Line charts with faceting | +| `.fxplot.area()` | Stacked area charts | +| `.fxplot.heatmap()` | Heatmap visualizations | +| `.fxplot.scatter()` | Scatter plots | +| `.fxplot.pie()` | Pie charts with faceting | +| `.fxstats.to_duration_curve()` | Transform to duration curve format | + +**Key Features**: + +- **Auto-faceting**: Automatically assigns extra dimensions (period, scenario, cluster) to `facet_col`, `facet_row`, or `animation_frame` +- **Smart x-axis**: Intelligently selects x dimension based on priority (time > duration > period > scenario) +- **Universal**: Works on any xarray Dataset/DataArray, not limited to flixopt +- **Configurable**: Customize via `CONFIG.Plotting` (colorscales, facet columns, line shapes) + ### 💥 Breaking Changes - `FlowSystem.scenario_weights` are now always normalized to sum to 1 when set (including after `.sel()` subsetting) @@ -132,12 +173,94 @@ charge_state = fs_expanded.solution['SeasonalPit|charge_state'] ### 🗑️ Deprecated +The following items are deprecated and will be removed in **v7.0.0**: + +**Classes** (use FlowSystem methods instead): + +- `Optimization` class → Use `flow_system.optimize(solver)` +- `SegmentedOptimization` class → Use `flow_system.optimize.rolling_horizon()` +- `Results` class → Use `flow_system.solution` and `flow_system.statistics` +- `SegmentedResults` class → Use segment FlowSystems directly + +**FlowSystem methods** (use `transform` or `topology` accessor instead): + +- `flow_system.sel()` → Use `flow_system.transform.sel()` +- `flow_system.isel()` → Use `flow_system.transform.isel()` +- `flow_system.resample()` → Use `flow_system.transform.resample()` +- `flow_system.plot_network()` → Use `flow_system.topology.plot()` +- `flow_system.start_network_app()` → Use `flow_system.topology.start_app()` +- `flow_system.stop_network_app()` → Use `flow_system.topology.stop_app()` +- `flow_system.network_infos()` → Use `flow_system.topology.infos()` + +**Parameters:** + - `normalize_weights` parameter in `create_model()`, `build_model()`, `optimize()` +**Topology method name simplifications** (old names still work with deprecation warnings, removal in v7.0.0): + +| Old (v5.x) | New (v6.0.0) | +|------------|--------------| +| `topology.plot_network()` | `topology.plot()` | +| `topology.start_network_app()` | `topology.start_app()` | +| `topology.stop_network_app()` | `topology.stop_app()` | +| `topology.network_infos()` | `topology.infos()` | + +Note: `topology.plot()` now renders a Sankey diagram. The old PyVis visualization is available via `topology.plot_legacy()`. + +### 🔥 Removed + +**Clustering classes removed** (deprecated in v5.0.0): + +- `ClusteredOptimization` class - Use `flow_system.transform.cluster()` then `optimize()` +- `ClusteringParameters` class - Parameters are now passed directly to `transform.cluster()` +- `flixopt/clustering.py` module - Restructured to `flixopt/clustering/` package with new classes + +#### Migration from ClusteredOptimization + +=== "v5.x (Old - No longer works)" + ```python + from flixopt import ClusteredOptimization, ClusteringParameters + + params = ClusteringParameters(hours_per_period=24, nr_of_periods=8) + calc = ClusteredOptimization('model', flow_system, params) + calc.do_modeling_and_solve(solver) + results = calc.results + ``` + +=== "v6.0.0 (New)" + ```python + # Cluster using transform accessor + fs_clustered = flow_system.transform.cluster( + n_clusters=8, # was: nr_of_periods + cluster_duration='1D', # was: hours_per_period=24 + ) + fs_clustered.optimize(solver) + + # Results on the clustered FlowSystem + costs = fs_clustered.solution['costs'].item() + + # Expand back to full resolution if needed + fs_expanded = fs_clustered.transform.expand_solution() + ``` + ### 🐛 Fixed - `temporal_weight` and `sum_temporal()` now use consistent implementation +### 📝 Docs + +**New Documentation Pages:** + +- [Time-Series Clustering Guide](https://flixopt.github.io/flixopt/latest/user-guide/optimization/clustering/) - Comprehensive guide to clustering workflows + +**New Jupyter Notebooks:** + +- **08c-clustering.ipynb** - Introduction to time-series clustering +- **08c2-clustering-storage-modes.ipynb** - Comparison of all 4 storage cluster modes +- **08d-clustering-multiperiod.ipynb** - Clustering with periods and scenarios +- **08e-clustering-internals.ipynb** - Understanding clustering internals +- **fxplot_accessor_demo.ipynb** - Demo of the new fxplot accessor + ### 👷 Development **New Test Suites for Clustering**: @@ -147,6 +270,11 @@ charge_state = fs_expanded.solution['SeasonalPit|charge_state'] - `TestMultiPeriodClustering`: Tests for clustering with periods and scenarios dimensions - `TestPeakSelection`: Tests for `time_series_for_high_peaks` and `time_series_for_low_peaks` parameters +**New Test Suites for Other Features**: + +- `test_clustering_io.py` - Tests for clustering serialization roundtrip +- `test_sel_isel_single_selection.py` - Tests for transform selection methods + --- ## [5.0.4] - 2026-01-05 diff --git a/docs/notebooks/01-quickstart.ipynb b/docs/notebooks/01-quickstart.ipynb index 9e6850214..1500bce77 100644 --- a/docs/notebooks/01-quickstart.ipynb +++ b/docs/notebooks/01-quickstart.ipynb @@ -34,6 +34,7 @@ "outputs": [], "source": [ "import pandas as pd\n", + "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -58,7 +59,8 @@ "metadata": {}, "outputs": [], "source": [ - "timesteps = pd.date_range('2024-01-15 08:00', periods=4, freq='h')" + "timesteps = pd.date_range('2024-01-15 08:00', periods=4, freq='h')\n", + "print(f'Optimizing from {timesteps[0]} to {timesteps[-1]}')" ] }, { @@ -86,8 +88,9 @@ " name='Heat Demand [kW]',\n", ")\n", "\n", - "# Visualize the demand with fxplot accessor\n", - "heat_demand.to_dataset().fxplot.bar(title='Heat Demand')" + "# Visualize the demand with plotly\n", + "fig = px.bar(x=heat_demand.time.values, y=heat_demand.values, labels={'x': 'Time', 'y': 'Heat Demand [kW]'})\n", + "fig" ] }, { @@ -200,18 +203,14 @@ "metadata": {}, "outputs": [], "source": [ + "total_costs = flow_system.solution['costs'].item()\n", "total_heat = float(heat_demand.sum())\n", "gas_consumed = total_heat / 0.9 # Account for boiler efficiency\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Total heat demand [kWh]': total_heat,\n", - " 'Gas consumed [kWh]': gas_consumed,\n", - " 'Total costs [EUR]': flow_system.solution['costs'].item(),\n", - " 'Average cost [EUR/kWh_heat]': flow_system.solution['costs'].item() / total_heat,\n", - " },\n", - " index=['Value'],\n", - ").T" + "print(f'Total heat demand: {total_heat:.1f} kWh')\n", + "print(f'Gas consumed: {gas_consumed:.1f} kWh')\n", + "print(f'Total costs: {total_costs:.2f} €')\n", + "print(f'Average cost: {total_costs / total_heat:.3f} €/kWh_heat')" ] }, { diff --git a/docs/notebooks/02-heat-system.ipynb b/docs/notebooks/02-heat-system.ipynb index 9d71dc69a..3ff933ec3 100644 --- a/docs/notebooks/02-heat-system.ipynb +++ b/docs/notebooks/02-heat-system.ipynb @@ -32,7 +32,9 @@ "metadata": {}, "outputs": [], "source": [ + "import numpy as np\n", "import pandas as pd\n", + "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -57,12 +59,33 @@ "metadata": {}, "outputs": [], "source": [ - "from data.tutorial_data import get_heat_system_data\n", + "# One week, hourly resolution\n", + "timesteps = pd.date_range('2024-01-15', periods=168, freq='h')\n", "\n", - "data = get_heat_system_data()\n", - "timesteps = data['timesteps']\n", - "heat_demand = data['heat_demand']\n", - "gas_price = data['gas_price']" + "# Create realistic office heat demand pattern\n", + "hours = np.arange(168)\n", + "hour_of_day = hours % 24\n", + "day_of_week = (hours // 24) % 7\n", + "\n", + "# Base demand pattern (kW)\n", + "base_demand = np.where(\n", + " (hour_of_day >= 7) & (hour_of_day <= 18), # Office hours\n", + " 80, # Daytime\n", + " 30, # Night setback\n", + ")\n", + "\n", + "# Reduce on weekends (days 5, 6)\n", + "weekend_factor = np.where(day_of_week >= 5, 0.5, 1.0)\n", + "heat_demand = base_demand * weekend_factor\n", + "\n", + "# Add some random variation\n", + "np.random.seed(42)\n", + "heat_demand = heat_demand + np.random.normal(0, 5, len(heat_demand))\n", + "heat_demand = np.clip(heat_demand, 20, 100)\n", + "\n", + "print(f'Time range: {timesteps[0]} to {timesteps[-1]}')\n", + "print(f'Peak demand: {heat_demand.max():.1f} kW')\n", + "print(f'Total demand: {heat_demand.sum():.0f} kWh')" ] }, { @@ -72,13 +95,15 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize the demand pattern with fxplot\n", - "demand_ds = xr.Dataset(\n", - " {\n", - " 'Heat Demand [kW]': xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}),\n", - " }\n", + "# Visualize the demand pattern with plotly\n", + "demand_series = xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}, name='Heat Demand [kW]')\n", + "fig = px.line(\n", + " x=demand_series.time.values,\n", + " y=demand_series.values,\n", + " title='Office Heat Demand Profile',\n", + " labels={'x': 'Time', 'y': 'kW'},\n", ")\n", - "demand_ds.fxplot.line(title='Office Heat Demand Profile')" + "fig" ] }, { @@ -98,13 +123,15 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize gas price with fxplot\n", - "price_ds = xr.Dataset(\n", - " {\n", - " 'Gas Price [EUR/kWh]': xr.DataArray(gas_price, dims=['time'], coords={'time': timesteps}),\n", - " }\n", + "# Time-of-use gas prices (€/kWh)\n", + "gas_price = np.where(\n", + " (hour_of_day >= 6) & (hour_of_day <= 22),\n", + " 0.08, # Peak: 6am-10pm\n", + " 0.05, # Off-peak: 10pm-6am\n", ")\n", - "price_ds.fxplot.line(title='Gas Price')" + "\n", + "fig = px.line(x=timesteps, y=gas_price, title='Gas Price [€/kWh]', labels={'x': 'Time', 'y': '€/kWh'})\n", + "fig" ] }, { @@ -282,16 +309,12 @@ "metadata": {}, "outputs": [], "source": [ + "total_costs = flow_system.solution['costs'].item()\n", "total_heat = heat_demand.sum()\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Total operating costs [EUR]': flow_system.solution['costs'].item(),\n", - " 'Total heat delivered [kWh]': total_heat,\n", - " 'Average cost [ct/kWh]': flow_system.solution['costs'].item() / total_heat * 100,\n", - " },\n", - " index=['Value'],\n", - ").T" + "print(f'Total operating costs: {total_costs:.2f} €')\n", + "print(f'Total heat delivered: {total_heat:.0f} kWh')\n", + "print(f'Average cost: {total_costs / total_heat * 100:.2f} ct/kWh')" ] }, { diff --git a/docs/notebooks/03-investment-optimization.ipynb b/docs/notebooks/03-investment-optimization.ipynb index 9ddb17df4..349c84ccf 100644 --- a/docs/notebooks/03-investment-optimization.ipynb +++ b/docs/notebooks/03-investment-optimization.ipynb @@ -32,7 +32,9 @@ "metadata": {}, "outputs": [], "source": [ + "import numpy as np\n", "import pandas as pd\n", + "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -82,15 +84,26 @@ "metadata": {}, "outputs": [], "source": [ - "from data.tutorial_data import get_investment_data\n", - "\n", - "data = get_investment_data()\n", - "timesteps = data['timesteps']\n", - "solar_profile = data['solar_profile']\n", - "pool_demand = data['pool_demand']\n", - "GAS_PRICE = data['gas_price']\n", - "SOLAR_COST_WEEKLY = data['solar_cost_per_kw_week']\n", - "TANK_COST_WEEKLY = data['tank_cost_per_kwh_week']" + "# One week in summer, hourly\n", + "timesteps = pd.date_range('2024-07-15', periods=168, freq='h')\n", + "hours = np.arange(168)\n", + "hour_of_day = hours % 24\n", + "\n", + "# Solar radiation profile (kW/m² equivalent, simplified)\n", + "# Peak around noon, zero at night\n", + "solar_profile = np.maximum(0, np.sin((hour_of_day - 6) * np.pi / 12)) * 0.8\n", + "solar_profile = np.where((hour_of_day >= 6) & (hour_of_day <= 20), solar_profile, 0)\n", + "\n", + "# Add some cloud variation\n", + "np.random.seed(42)\n", + "cloud_factor = np.random.uniform(0.6, 1.0, len(timesteps))\n", + "solar_profile = solar_profile * cloud_factor\n", + "\n", + "# Pool operates 8am-10pm, constant demand when open\n", + "pool_demand = np.where((hour_of_day >= 8) & (hour_of_day <= 22), 150, 50) # kW\n", + "\n", + "print(f'Peak solar: {solar_profile.max():.2f} kW/kW_installed')\n", + "print(f'Pool demand: {pool_demand.max():.0f} kW (open), {pool_demand.min():.0f} kW (closed)')" ] }, { @@ -100,14 +113,20 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize profiles with fxplot\n", + "# Visualize profiles with plotly - using xarray and faceting\n", "profiles = xr.Dataset(\n", " {\n", " 'Solar Profile [kW/kW]': xr.DataArray(solar_profile, dims=['time'], coords={'time': timesteps}),\n", " 'Pool Demand [kW]': xr.DataArray(pool_demand, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", - "profiles.fxplot.line(title='Input Profiles')" + "\n", + "# Convert to long format for faceting\n", + "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", + "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", + "fig.update_yaxes(matches=None, showticklabels=True)\n", + "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", + "fig" ] }, { @@ -121,9 +140,36 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "8", "metadata": {}, + "outputs": [], + "source": [ + "# Cost parameters\n", + "GAS_PRICE = 0.12 # €/kWh - high gas price makes solar attractive\n", + "\n", + "# Solar collectors: 400 €/kW installed, 20-year lifetime → ~25 €/kW/year annualized\n", + "# (simplified, real calculation would include interest rate)\n", + "SOLAR_COST_PER_KW = 20 # €/kW/year\n", + "\n", + "# Buffer tank: 50 €/kWh capacity, 30-year lifetime → ~2 €/kWh/year\n", + "TANK_COST_PER_KWH = 1.5 # €/kWh/year\n", + "\n", + "# Scale factor: We model 1 week, but costs are annual\n", + "# So we scale investment costs to weekly equivalent\n", + "WEEKS_PER_YEAR = 52\n", + "SOLAR_COST_WEEKLY = SOLAR_COST_PER_KW / WEEKS_PER_YEAR\n", + "TANK_COST_WEEKLY = TANK_COST_PER_KWH / WEEKS_PER_YEAR\n", + "\n", + "print(f'Solar cost: {SOLAR_COST_WEEKLY:.3f} €/kW/week')\n", + "print(f'Tank cost: {TANK_COST_WEEKLY:.4f} €/kWh/week')" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, "source": [ "## Build the System with Investment Options\n", "\n", @@ -133,7 +179,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -204,7 +250,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "11", "metadata": {}, "source": [ "## Run Optimization" @@ -213,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -222,7 +268,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "13", "metadata": {}, "source": [ "## Analyze Investment Decisions\n", @@ -233,26 +279,21 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "14", "metadata": {}, "outputs": [], "source": [ "solar_size = flow_system.statistics.sizes['SolarCollectors(Heat)'].item()\n", "tank_size = flow_system.statistics.sizes['BufferTank'].item()\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Solar [kW]': solar_size,\n", - " 'Tank [kWh]': tank_size,\n", - " 'Ratio [kWh/kW]': tank_size / solar_size,\n", - " },\n", - " index=['Optimal Size'],\n", - ").T" + "print(\n", + " f'Optimal sizes: Solar {solar_size:.0f} kW, Tank {tank_size:.0f} kWh (ratio: {tank_size / solar_size:.1f} kWh/kW)'\n", + ")" ] }, { "cell_type": "markdown", - "id": "14", + "id": "15", "metadata": {}, "source": [ "### Visualize Sizes" @@ -261,7 +302,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -270,7 +311,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "17", "metadata": {}, "source": [ "### Cost Breakdown" @@ -279,7 +320,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -290,19 +331,14 @@ "tank_invest = tank_size * TANK_COST_WEEKLY\n", "gas_costs = total_costs - solar_invest - tank_invest\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Solar Investment': {'EUR': solar_invest, '%': solar_invest / total_costs * 100},\n", - " 'Tank Investment': {'EUR': tank_invest, '%': tank_invest / total_costs * 100},\n", - " 'Gas Costs': {'EUR': gas_costs, '%': gas_costs / total_costs * 100},\n", - " 'Total': {'EUR': total_costs, '%': 100.0},\n", - " }\n", + "print(\n", + " f'Weekly costs: Solar {solar_invest:.1f}€ ({solar_invest / total_costs * 100:.0f}%) + Tank {tank_invest:.1f}€ ({tank_invest / total_costs * 100:.0f}%) + Gas {gas_costs:.1f}€ ({gas_costs / total_costs * 100:.0f}%) = {total_costs:.1f}€'\n", ")" ] }, { "cell_type": "markdown", - "id": "18", + "id": "19", "metadata": {}, "source": [ "### System Operation" @@ -311,7 +347,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -321,7 +357,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -331,7 +367,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -340,7 +376,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "23", "metadata": {}, "source": [ "## Compare: What if No Solar?\n", @@ -351,30 +387,23 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ - "# Gas-only scenario for comparison\n", + "# Gas-only scenario\n", "total_demand = pool_demand.sum()\n", "gas_only_cost = total_demand / 0.92 * GAS_PRICE # All heat from gas boiler\n", - "savings = gas_only_cost - total_costs\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Gas-only [EUR/week]': gas_only_cost,\n", - " 'With Solar [EUR/week]': total_costs,\n", - " 'Savings [EUR/week]': savings,\n", - " 'Savings [%]': savings / gas_only_cost * 100,\n", - " 'Savings [EUR/year]': savings * 52,\n", - " },\n", - " index=['Value'],\n", - ").T" + "savings = gas_only_cost - total_costs\n", + "print(\n", + " f'Solar saves {savings:.1f}€/week ({savings / gas_only_cost * 100:.0f}%) vs gas-only ({gas_only_cost:.1f}€) → {savings * 52:.0f}€/year'\n", + ")" ] }, { "cell_type": "markdown", - "id": "24", + "id": "25", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -385,7 +414,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -394,7 +423,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "27", "metadata": {}, "source": [ "## Key Concepts\n", diff --git a/docs/notebooks/04-operational-constraints.ipynb b/docs/notebooks/04-operational-constraints.ipynb index 2f2163886..fbb611d1c 100644 --- a/docs/notebooks/04-operational-constraints.ipynb +++ b/docs/notebooks/04-operational-constraints.ipynb @@ -32,6 +32,7 @@ "metadata": {}, "outputs": [], "source": [ + "import numpy as np\n", "import pandas as pd\n", "import plotly.express as px\n", "import xarray as xr\n", @@ -72,11 +73,32 @@ "metadata": {}, "outputs": [], "source": [ - "from data.tutorial_data import get_constraints_data\n", + "# 3 days, hourly resolution\n", + "timesteps = pd.date_range('2024-03-11', periods=72, freq='h')\n", + "hours = np.arange(72)\n", + "hour_of_day = hours % 24\n", + "\n", + "# Factory operates in shifts:\n", + "# - Day shift (6am-2pm): 400 kW\n", + "# - Evening shift (2pm-10pm): 350 kW\n", + "# - Night (10pm-6am): 80 kW (maintenance heating only)\n", + "\n", + "steam_demand = np.select(\n", + " [\n", + " (hour_of_day >= 6) & (hour_of_day < 14), # Day shift\n", + " (hour_of_day >= 14) & (hour_of_day < 22), # Evening shift\n", + " ],\n", + " [400, 350],\n", + " default=80, # Night\n", + ")\n", + "\n", + "# Add some variation\n", + "np.random.seed(123)\n", + "steam_demand = steam_demand + np.random.normal(0, 20, len(steam_demand))\n", + "steam_demand = np.clip(steam_demand, 50, 450).astype(float)\n", "\n", - "data = get_constraints_data()\n", - "timesteps = data['timesteps']\n", - "steam_demand = data['steam_demand']" + "print(f'Peak demand: {steam_demand.max():.0f} kW')\n", + "print(f'Min demand: {steam_demand.min():.0f} kW')" ] }, { @@ -86,13 +108,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize the demand with fxplot\n", - "demand_ds = xr.Dataset(\n", - " {\n", - " 'Steam Demand [kW]': xr.DataArray(steam_demand, dims=['time'], coords={'time': timesteps}),\n", - " }\n", - ")\n", - "demand_ds.fxplot.line(title='Factory Steam Demand')" + "px.line(x=timesteps, y=steam_demand, title='Factory Steam Demand', labels={'x': 'Time', 'y': 'kW'})" ] }, { @@ -278,12 +294,8 @@ "startup_costs = total_startups * 50\n", "gas_costs = total_costs - startup_costs\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Startups': {'Count': total_startups, 'EUR': startup_costs},\n", - " 'Gas': {'Count': '-', 'EUR': gas_costs},\n", - " 'Total': {'Count': '-', 'EUR': total_costs},\n", - " }\n", + "print(\n", + " f'{total_startups} startups × 50€ = {startup_costs:.0f}€ startup + {gas_costs:.0f}€ gas = {total_costs:.0f}€ total'\n", ")" ] }, @@ -365,16 +377,8 @@ "fs_unconstrained.optimize(fx.solvers.HighsSolver())\n", "unconstrained_costs = fs_unconstrained.solution['costs'].item()\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Without Constraints': {'Cost [EUR]': unconstrained_costs},\n", - " 'With Constraints': {'Cost [EUR]': total_costs},\n", - " 'Overhead': {\n", - " 'Cost [EUR]': total_costs - unconstrained_costs,\n", - " '%': (total_costs - unconstrained_costs) / unconstrained_costs * 100,\n", - " },\n", - " }\n", - ")" + "constraint_overhead = (total_costs - unconstrained_costs) / unconstrained_costs * 100\n", + "print(f'Constraints add {constraint_overhead:.1f}% cost: {unconstrained_costs:.0f}€ → {total_costs:.0f}€')" ] }, { diff --git a/docs/notebooks/05-multi-carrier-system.ipynb b/docs/notebooks/05-multi-carrier-system.ipynb index 707ef517a..a1a9543fa 100644 --- a/docs/notebooks/05-multi-carrier-system.ipynb +++ b/docs/notebooks/05-multi-carrier-system.ipynb @@ -32,7 +32,9 @@ "metadata": {}, "outputs": [], "source": [ + "import numpy as np\n", "import pandas as pd\n", + "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -83,15 +85,40 @@ "metadata": {}, "outputs": [], "source": [ - "from data.tutorial_data import get_multicarrier_data\n", - "\n", - "data = get_multicarrier_data()\n", - "timesteps = data['timesteps']\n", - "electricity_demand = data['electricity_demand']\n", - "heat_demand = data['heat_demand']\n", - "elec_buy_price = data['elec_buy_price']\n", - "elec_sell_price = data['elec_sell_price']\n", - "gas_price = data['gas_price']" + "# One week, hourly\n", + "timesteps = pd.date_range('2024-02-05', periods=168, freq='h')\n", + "hours = np.arange(168)\n", + "hour_of_day = hours % 24\n", + "\n", + "# Hospital electricity demand (kW)\n", + "# Base load + daily pattern (higher during day for equipment, lighting)\n", + "elec_base = 150 # 24/7 critical systems\n", + "elec_daily = 100 * np.sin((hour_of_day - 6) * np.pi / 12) # Peak at noon\n", + "elec_daily = np.maximum(0, elec_daily)\n", + "electricity_demand = elec_base + elec_daily\n", + "\n", + "# Hospital heat demand (kW)\n", + "# Higher in morning, drops during day, increases for hot water in evening\n", + "heat_pattern = np.select(\n", + " [\n", + " (hour_of_day >= 5) & (hour_of_day < 9), # Morning warmup\n", + " (hour_of_day >= 9) & (hour_of_day < 17), # Daytime\n", + " (hour_of_day >= 17) & (hour_of_day < 22), # Evening\n", + " ],\n", + " [350, 250, 300],\n", + " default=200, # Night\n", + ")\n", + "heat_demand = heat_pattern.astype(float)\n", + "\n", + "# Add random variation\n", + "np.random.seed(456)\n", + "electricity_demand += np.random.normal(0, 15, len(timesteps))\n", + "heat_demand += np.random.normal(0, 20, len(timesteps))\n", + "electricity_demand = np.clip(electricity_demand, 100, 300)\n", + "heat_demand = np.clip(heat_demand, 150, 400)\n", + "\n", + "print(f'Electricity: {electricity_demand.min():.0f} - {electricity_demand.max():.0f} kW')\n", + "print(f'Heat: {heat_demand.min():.0f} - {heat_demand.max():.0f} kW')" ] }, { @@ -101,20 +128,47 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize demands and prices with fxplot\n", + "# Electricity prices (€/kWh)\n", + "# Time-of-use: expensive during day, cheaper at night\n", + "elec_buy_price = np.where(\n", + " (hour_of_day >= 7) & (hour_of_day <= 21),\n", + " 0.35, # Peak - high electricity prices make CHP attractive\n", + " 0.20, # Off-peak\n", + ")\n", + "\n", + "# Feed-in tariff (sell price) - allows selling excess CHP electricity\n", + "elec_sell_price = 0.12 # Fixed feed-in rate\n", + "\n", + "# Gas price - relatively low, favoring gas-based generation\n", + "gas_price = 0.05 # €/kWh" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize demands and prices with plotly - using xarray and faceting\n", "profiles = xr.Dataset(\n", " {\n", " 'Electricity Demand [kW]': xr.DataArray(electricity_demand, dims=['time'], coords={'time': timesteps}),\n", " 'Heat Demand [kW]': xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}),\n", - " 'Elec. Buy Price [EUR/kWh]': xr.DataArray(elec_buy_price, dims=['time'], coords={'time': timesteps}),\n", + " 'Elec. Buy Price [€/kWh]': xr.DataArray(elec_buy_price, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", - "profiles.fxplot.line(title='Input Profiles')" + "\n", + "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", + "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", + "fig.update_yaxes(matches=None, showticklabels=True)\n", + "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", + "fig" ] }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ "## Build the Multi-Carrier System" @@ -123,7 +177,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -216,7 +270,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "## Run Optimization" @@ -225,7 +279,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -234,7 +288,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Analyze Results\n", @@ -245,7 +299,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -254,7 +308,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "### Heat Balance" @@ -263,7 +317,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -272,7 +326,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "### Gas Balance" @@ -281,7 +335,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -290,7 +344,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "### CHP Operation Pattern" @@ -299,7 +353,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -308,7 +362,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "### Cost and Emissions Summary" @@ -317,10 +371,13 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ + "total_costs = flow_system.solution['costs'].item()\n", + "total_co2 = flow_system.solution['CO2'].item()\n", + "\n", "# Energy flows\n", "flow_rates = flow_system.statistics.flow_rates\n", "grid_buy = flow_rates['GridBuy(Electricity)'].sum().item()\n", @@ -332,25 +389,17 @@ "total_elec = electricity_demand.sum()\n", "total_heat = heat_demand.sum()\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'CHP Electricity [kWh]': chp_elec,\n", - " 'CHP Electricity [%]': chp_elec / total_elec * 100,\n", - " 'Grid Buy [kWh]': grid_buy,\n", - " 'Grid Sell [kWh]': grid_sell,\n", - " 'CHP Heat [kWh]': chp_heat,\n", - " 'CHP Heat [%]': chp_heat / total_heat * 100,\n", - " 'Boiler Heat [kWh]': boiler_heat,\n", - " 'Total Costs [EUR]': flow_system.solution['costs'].item(),\n", - " 'Total CO2 [kg]': flow_system.solution['CO2'].item(),\n", - " },\n", - " index=['Value'],\n", - ").T" + "# Display as compact summary\n", + "print(\n", + " f'Electricity: {chp_elec:.0f} kWh CHP ({chp_elec / total_elec * 100:.0f}%) + {grid_buy:.0f} kWh grid, {grid_sell:.0f} kWh sold'\n", + ")\n", + "print(f'Heat: {chp_heat:.0f} kWh CHP ({chp_heat / total_heat * 100:.0f}%) + {boiler_heat:.0f} kWh boiler')\n", + "print(f'Costs: {total_costs:.2f} € | CO2: {total_co2:.0f} kg')" ] }, { "cell_type": "markdown", - "id": "21", + "id": "22", "metadata": {}, "source": [ "### Compare: What if No CHP?\n", @@ -361,7 +410,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -405,30 +454,19 @@ "\n", "fs_no_chp.optimize(fx.solvers.HighsSolver())\n", "\n", - "total_costs = flow_system.solution['costs'].item()\n", - "total_co2 = flow_system.solution['CO2'].item()\n", "no_chp_costs = fs_no_chp.solution['costs'].item()\n", "no_chp_co2 = fs_no_chp.solution['CO2'].item()\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Without CHP': {'Cost [EUR]': no_chp_costs, 'CO2 [kg]': no_chp_co2},\n", - " 'With CHP': {'Cost [EUR]': total_costs, 'CO2 [kg]': total_co2},\n", - " 'Savings': {\n", - " 'Cost [EUR]': no_chp_costs - total_costs,\n", - " 'CO2 [kg]': no_chp_co2 - total_co2,\n", - " },\n", - " 'Savings [%]': {\n", - " 'Cost [EUR]': (no_chp_costs - total_costs) / no_chp_costs * 100,\n", - " 'CO2 [kg]': (no_chp_co2 - total_co2) / no_chp_co2 * 100,\n", - " },\n", - " }\n", + "cost_saving = (no_chp_costs - total_costs) / no_chp_costs * 100\n", + "co2_saving = (no_chp_co2 - total_co2) / no_chp_co2 * 100\n", + "print(\n", + " f'CHP saves {cost_saving:.1f}% costs ({no_chp_costs:.0f}→{total_costs:.0f} €) and {co2_saving:.1f}% CO2 ({no_chp_co2:.0f}→{total_co2:.0f} kg)'\n", ")" ] }, { "cell_type": "markdown", - "id": "23", + "id": "24", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -439,7 +477,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -448,7 +486,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "26", "metadata": {}, "source": [ "## Key Concepts\n", diff --git a/docs/notebooks/06a-time-varying-parameters.ipynb b/docs/notebooks/06a-time-varying-parameters.ipynb index 4eaba9854..5c833b2ea 100644 --- a/docs/notebooks/06a-time-varying-parameters.ipynb +++ b/docs/notebooks/06a-time-varying-parameters.ipynb @@ -32,6 +32,7 @@ "outputs": [], "source": [ "import numpy as np\n", + "import pandas as pd\n", "import plotly.express as px\n", "import xarray as xr\n", "\n", @@ -77,13 +78,20 @@ "metadata": {}, "outputs": [], "source": [ - "from data.tutorial_data import get_time_varying_data\n", - "\n", - "data = get_time_varying_data()\n", - "timesteps = data['timesteps']\n", - "outdoor_temp = data['outdoor_temp']\n", - "heat_demand = data['heat_demand']\n", - "cop = data['cop']" + "# One winter week\n", + "timesteps = pd.date_range('2024-01-22', periods=168, freq='h')\n", + "hours = np.arange(168)\n", + "hour_of_day = hours % 24\n", + "\n", + "# Outdoor temperature: daily cycle with cold nights\n", + "temp_base = 2 # Average temp in °C\n", + "temp_amplitude = 5 # Daily variation\n", + "outdoor_temp = temp_base + temp_amplitude * np.sin((hour_of_day - 6) * np.pi / 12)\n", + "\n", + "# Add day-to-day variation for realism\n", + "np.random.seed(789)\n", + "daily_offset = np.repeat(np.random.uniform(-3, 3, 7), 24)\n", + "outdoor_temp = outdoor_temp + daily_offset" ] }, { @@ -93,24 +101,41 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize input profiles with fxplot\n", + "# Heat demand: inversely related to outdoor temp (higher demand when colder)\n", + "heat_demand = 200 - 8 * outdoor_temp\n", + "heat_demand = np.clip(heat_demand, 100, 300)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize input profiles\n", "profiles = xr.Dataset(\n", " {\n", " 'Outdoor Temp [°C]': xr.DataArray(outdoor_temp, dims=['time'], coords={'time': timesteps}),\n", " 'Heat Demand [kW]': xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", - "profiles.fxplot.line(title='Input Profiles')" + "\n", + "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", + "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", + "fig.update_yaxes(matches=None, showticklabels=True)\n", + "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", + "fig" ] }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ - "## Time-Varying COP\n", + "## Calculate Time-Varying COP\n", "\n", - "The COP depends on outdoor temperature. The helper function uses a simplified Carnot-based formula:\n", + "The COP depends on outdoor temperature. We use a simplified Carnot-based formula:\n", "\n", "$$\\text{COP}_{\\text{real}} \\approx 0.45 \\times \\text{COP}_{\\text{Carnot}} = 0.45 \\times \\frac{T_{\\text{supply}}}{T_{\\text{supply}} - T_{\\text{source}}}$$\n", "\n", @@ -120,14 +145,30 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", + "metadata": {}, + "outputs": [], + "source": [ + "# COP calculation\n", + "T_supply = 45 + 273.15 # Supply temperature 45°C in Kelvin\n", + "T_source = outdoor_temp + 273.15 # Outdoor temp in Kelvin\n", + "\n", + "carnot_cop = T_supply / (T_supply - T_source)\n", + "real_cop = 0.45 * carnot_cop\n", + "real_cop = np.clip(real_cop, 2.0, 5.0) # Physical limits" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", "metadata": {}, "outputs": [], "source": [ "# Visualize COP vs temperature relationship\n", "px.scatter(\n", " x=outdoor_temp,\n", - " y=cop,\n", + " y=real_cop,\n", " title='Heat Pump COP vs Outdoor Temperature',\n", " labels={'x': 'Outdoor Temperature [°C]', 'y': 'COP'},\n", " opacity=0.5,\n", @@ -136,7 +177,7 @@ }, { "cell_type": "markdown", - "id": "9", + "id": "11", "metadata": {}, "source": [ "## Build the Model\n", @@ -151,7 +192,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -173,7 +214,7 @@ " 'HeatPump',\n", " inputs=[fx.Flow('Elec', bus='Electricity', size=150)],\n", " outputs=[fx.Flow('Heat', bus='Heat', size=500)],\n", - " conversion_factors=[{'Elec': cop, 'Heat': 1}], # <-- Array for time-varying COP\n", + " conversion_factors=[{'Elec': real_cop, 'Heat': 1}], # <-- Array for time-varying COP\n", " ),\n", " # Heat demand\n", " fx.Sink('Building', inputs=[fx.Flow('Heat', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", @@ -184,7 +225,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "13", "metadata": {}, "source": [ "## Analyze Results" @@ -193,7 +234,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -203,7 +244,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -213,7 +254,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -242,7 +283,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "17", "metadata": {}, "source": [ "## Key Concepts\n", @@ -282,7 +323,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "18", "metadata": {}, "source": [ "## Summary\n", diff --git a/docs/notebooks/07-scenarios-and-periods.ipynb b/docs/notebooks/07-scenarios-and-periods.ipynb index 52d04b0d8..db74afefb 100644 --- a/docs/notebooks/07-scenarios-and-periods.ipynb +++ b/docs/notebooks/07-scenarios-and-periods.ipynb @@ -32,8 +32,9 @@ "metadata": {}, "outputs": [], "source": [ + "import numpy as np\n", "import pandas as pd\n", - "import xarray as xr\n", + "import plotly.express as px\n", "\n", "import flixopt as fx\n", "\n", @@ -71,16 +72,20 @@ "metadata": {}, "outputs": [], "source": [ - "from data.tutorial_data import get_scenarios_data\n", - "\n", - "data = get_scenarios_data()\n", - "timesteps = data['timesteps']\n", - "periods = data['periods']\n", - "scenarios = data['scenarios']\n", - "scenario_weights = data['scenario_weights']\n", - "heat_demand = data['heat_demand']\n", - "gas_prices = data['gas_prices']\n", - "elec_prices = data['elec_prices']" + "# Time horizon: one representative winter week\n", + "timesteps = pd.date_range('2024-01-15', periods=168, freq='h') # 7 days\n", + "\n", + "# Planning periods (years)\n", + "periods = pd.Index([2024, 2025, 2026], name='period')\n", + "\n", + "# Scenarios with probabilities\n", + "scenarios = pd.Index(['Mild Winter', 'Harsh Winter'], name='scenario')\n", + "scenario_weights = np.array([0.6, 0.4]) # 60% mild, 40% harsh\n", + "\n", + "print(f'Time dimension: {len(timesteps)} hours')\n", + "print(f'Periods: {list(periods)}')\n", + "print(f'Scenarios: {list(scenarios)}')\n", + "print(f'Scenario weights: {dict(zip(scenarios, scenario_weights, strict=False))}')" ] }, { @@ -88,7 +93,7 @@ "id": "6", "metadata": {}, "source": [ - "## Scenario-Dependent Demand Profiles\n", + "## Create Scenario-Dependent Demand Profiles\n", "\n", "Heat demand differs significantly between mild and harsh winters:" ] @@ -100,24 +105,98 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize demand scenarios with fxplot\n", - "demand_ds = xr.Dataset(\n", + "hours = np.arange(168)\n", + "hour_of_day = hours % 24\n", + "\n", + "# Base daily pattern (kW): higher in morning/evening\n", + "daily_pattern = np.select(\n", + " [\n", + " (hour_of_day >= 6) & (hour_of_day < 9), # Morning peak\n", + " (hour_of_day >= 9) & (hour_of_day < 17), # Daytime\n", + " (hour_of_day >= 17) & (hour_of_day < 22), # Evening peak\n", + " ],\n", + " [180, 120, 160],\n", + " default=100, # Night\n", + ").astype(float)\n", + "\n", + "# Add random variation\n", + "np.random.seed(42)\n", + "noise = np.random.normal(0, 10, len(timesteps))\n", + "\n", + "# Mild winter: lower demand\n", + "mild_demand = daily_pattern * 0.8 + noise\n", + "mild_demand = np.clip(mild_demand, 60, 200)\n", + "\n", + "# Harsh winter: higher demand\n", + "harsh_demand = daily_pattern * 1.3 + noise * 1.5\n", + "harsh_demand = np.clip(harsh_demand, 100, 280)\n", + "\n", + "# Create DataFrame with scenario columns (flixopt uses column names to match scenarios)\n", + "heat_demand = pd.DataFrame(\n", " {\n", - " scenario: xr.DataArray(\n", - " heat_demand[scenario].values,\n", - " dims=['time'],\n", - " coords={'time': timesteps},\n", - " )\n", - " for scenario in scenarios\n", - " }\n", + " 'Mild Winter': mild_demand,\n", + " 'Harsh Winter': harsh_demand,\n", + " },\n", + " index=timesteps,\n", ")\n", - "demand_ds.fxplot.line(title='Heat Demand by Scenario')" + "\n", + "print(f'Mild winter demand: {mild_demand.min():.0f} - {mild_demand.max():.0f} kW')\n", + "print(f'Harsh winter demand: {harsh_demand.min():.0f} - {harsh_demand.max():.0f} kW')" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "8", "metadata": {}, + "outputs": [], + "source": [ + "# Visualize demand scenarios with plotly\n", + "fig = px.line(\n", + " heat_demand.iloc[:48],\n", + " title='Heat Demand by Scenario (First 2 Days)',\n", + " labels={'index': 'Time', 'value': 'kW', 'variable': 'Scenario'},\n", + ")\n", + "fig.update_traces(mode='lines')\n", + "fig" + ] + }, + { + "cell_type": "markdown", + "id": "9", + "metadata": {}, + "source": [ + "## Create Period-Dependent Prices\n", + "\n", + "Energy prices change across planning years:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "10", + "metadata": {}, + "outputs": [], + "source": [ + "# Gas prices by period (€/kWh) - expected to rise\n", + "gas_prices = np.array([0.06, 0.08, 0.10]) # 2024, 2025, 2026\n", + "\n", + "# Electricity sell prices by period (€/kWh) - CHP revenue\n", + "elec_prices = np.array([0.28, 0.34, 0.43]) # Rising with gas\n", + "\n", + "print('Gas prices by period:')\n", + "for period, price in zip(periods, gas_prices, strict=False):\n", + " print(f' {period}: {price:.2f} €/kWh')\n", + "\n", + "print('\\nElectricity sell prices by period:')\n", + "for period, price in zip(periods, elec_prices, strict=False):\n", + " print(f' {period}: {price:.2f} €/kWh')" + ] + }, + { + "cell_type": "markdown", + "id": "11", + "metadata": {}, "source": [ "## Build the Flow System\n", "\n", @@ -127,7 +206,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -143,12 +222,12 @@ " fx.Carrier('heat', '#e74c3c', 'kW'),\n", ")\n", "\n", - "flow_system" + "print(flow_system)" ] }, { "cell_type": "markdown", - "id": "10", + "id": "13", "metadata": {}, "source": [ "## Add Components" @@ -157,7 +236,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -234,7 +313,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "15", "metadata": {}, "source": [ "## Run Optimization" @@ -243,7 +322,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -252,7 +331,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "17", "metadata": {}, "source": [ "## Analyze Results\n", @@ -263,25 +342,20 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "18", "metadata": {}, "outputs": [], "source": [ "chp_size = flow_system.statistics.sizes['CHP(P_el)']\n", + "total_cost = flow_system.solution['costs']\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'CHP Electrical [kW]': float(chp_size.max()),\n", - " 'CHP Thermal [kW]': float(chp_size.max()) * 0.50 / 0.35,\n", - " 'Expected Cost [EUR]': float(flow_system.solution['costs'].sum()),\n", - " },\n", - " index=['Optimal'],\n", - ").T" + "print(f'Optimal CHP: {float(chp_size.max()):.0f} kW electrical ({float(chp_size.max()) * 0.50 / 0.35:.0f} kW thermal)')\n", + "print(f'Expected cost: {float(total_cost.sum()):.0f} €')" ] }, { "cell_type": "markdown", - "id": "16", + "id": "19", "metadata": {}, "source": [ "### Heat Balance by Scenario\n", @@ -292,7 +366,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -301,7 +375,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "21", "metadata": {}, "source": [ "### CHP Operation Patterns" @@ -310,7 +384,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -319,7 +393,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "23", "metadata": {}, "source": [ "### Multi-Dimensional Data Access\n", @@ -330,11 +404,13 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "24", "metadata": {}, "outputs": [], "source": [ + "# View dimensions\n", "flow_rates = flow_system.statistics.flow_rates\n", + "print('Flow rates dimensions:', dict(flow_rates.sizes))\n", "\n", "# Plot flow rates\n", "flow_system.statistics.plot.flows()" @@ -343,27 +419,22 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "25", "metadata": {}, "outputs": [], "source": [ "# CHP operation summary by scenario\n", "chp_heat = flow_rates['CHP(Q_th)']\n", "\n", - "pd.DataFrame(\n", - " {\n", - " scenario: {\n", - " 'Avg [kW]': float(chp_heat.sel(scenario=scenario).mean()),\n", - " 'Max [kW]': float(chp_heat.sel(scenario=scenario).max()),\n", - " }\n", - " for scenario in scenarios\n", - " }\n", - ")" + "for scenario in scenarios:\n", + " scenario_avg = float(chp_heat.sel(scenario=scenario).mean())\n", + " scenario_max = float(chp_heat.sel(scenario=scenario).max())\n", + " print(f'{scenario}: avg {scenario_avg:.0f} kW, max {scenario_max:.0f} kW')" ] }, { "cell_type": "markdown", - "id": "23", + "id": "26", "metadata": {}, "source": [ "## Sensitivity: What if Only Mild Winter?\n", @@ -374,7 +445,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -385,18 +456,14 @@ "chp_size_mild = float(fs_mild.statistics.sizes['CHP(P_el)'].max())\n", "chp_size_both = float(chp_size.max())\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Mild Only': {'CHP Size [kW]': chp_size_mild},\n", - " 'Both Scenarios': {'CHP Size [kW]': chp_size_both},\n", - " 'Uncertainty Buffer': {'CHP Size [kW]': chp_size_both - chp_size_mild},\n", - " }\n", + "print(\n", + " f'CHP sizing: {chp_size_mild:.0f} kW (mild only) vs {chp_size_both:.0f} kW (both scenarios) → +{chp_size_both - chp_size_mild:.0f} kW for uncertainty'\n", ")" ] }, { "cell_type": "markdown", - "id": "25", + "id": "28", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -407,7 +474,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "29", "metadata": {}, "outputs": [], "source": [ @@ -416,7 +483,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "30", "metadata": {}, "source": [ "## Key Concepts\n", diff --git a/docs/notebooks/08a-aggregation.ipynb b/docs/notebooks/08a-aggregation.ipynb index cccb7dce0..8bc1a4774 100644 --- a/docs/notebooks/08a-aggregation.ipynb +++ b/docs/notebooks/08a-aggregation.ipynb @@ -65,8 +65,8 @@ "flow_system.connect_and_transform() # Align all data as xarray\n", "\n", "timesteps = flow_system.timesteps\n", - "\n", - "flow_system" + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')\n", + "print(f'Components: {list(flow_system.components.keys())}')" ] }, { @@ -111,7 +111,10 @@ "solver = fx.solvers.HighsSolver(mip_gap=0.01)\n", "\n", "# Resample from 15-min to 4h resolution\n", - "fs_resampled = flow_system.transform.resample('4h')" + "fs_resampled = flow_system.transform.resample('4h')\n", + "\n", + "reduction = (1 - len(fs_resampled.timesteps) / len(flow_system.timesteps)) * 100\n", + "print(f'Resampled: {len(flow_system.timesteps)} → {len(fs_resampled.timesteps)} timesteps ({reduction:.0f}% reduction)')" ] }, { @@ -124,7 +127,9 @@ "# Optimize resampled system\n", "start = timeit.default_timer()\n", "fs_resampled.optimize(solver)\n", - "time_resampled = timeit.default_timer() - start" + "time_resampled = timeit.default_timer() - start\n", + "\n", + "print(f'Resampled: {time_resampled:.1f}s, {fs_resampled.solution[\"costs\"].item():,.0f} €')" ] }, { @@ -151,7 +156,10 @@ "fs_sizing.optimize(solver)\n", "time_stage1 = timeit.default_timer() - start\n", "\n", - "sizes = {k: float(v.item()) for k, v in fs_sizing.statistics.sizes.items()}" + "sizes = {k: float(v.item()) for k, v in fs_sizing.statistics.sizes.items()}\n", + "print(\n", + " f'Stage 1 (sizing): {time_stage1:.1f}s → CHP {sizes[\"CHP(Q_th)\"]:.0f}, Boiler {sizes[\"Boiler(Q_th)\"]:.0f}, Storage {sizes[\"Storage\"]:.0f}'\n", + ")" ] }, { @@ -165,7 +173,11 @@ "start = timeit.default_timer()\n", "fs_dispatch = flow_system.transform.fix_sizes(fs_sizing.statistics.sizes)\n", "fs_dispatch.optimize(solver)\n", - "time_stage2 = timeit.default_timer() - start" + "time_stage2 = timeit.default_timer() - start\n", + "\n", + "print(\n", + " f'Stage 2 (dispatch): {time_stage2:.1f}s, {fs_dispatch.solution[\"costs\"].item():,.0f} € (total: {time_stage1 + time_stage2:.1f}s)'\n", + ")" ] }, { @@ -188,7 +200,9 @@ "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start" + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full optimization: {time_full:.1f}s, {fs_full.solution[\"costs\"].item():,.0f} €')" ] }, { diff --git a/docs/notebooks/08b-rolling-horizon.ipynb b/docs/notebooks/08b-rolling-horizon.ipynb index 3b91fd980..c0d7bdf24 100644 --- a/docs/notebooks/08b-rolling-horizon.ipynb +++ b/docs/notebooks/08b-rolling-horizon.ipynb @@ -69,8 +69,8 @@ "flow_system.connect_and_transform() # Align all data as xarray\n", "\n", "timesteps = flow_system.timesteps\n", - "\n", - "flow_system" + "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days at hourly resolution)')\n", + "print(f'Components: {list(flow_system.components.keys())}')" ] }, { @@ -95,7 +95,9 @@ "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", "fs_full.optimize(solver)\n", - "time_full = timeit.default_timer() - start" + "time_full = timeit.default_timer() - start\n", + "\n", + "print(f'Full: {time_full:.1f}s, {fs_full.solution[\"costs\"].item():,.0f} €')" ] }, { @@ -136,7 +138,9 @@ " horizon=192, # 2-day segments (192 timesteps at 15-min resolution)\n", " overlap=96, # 1-day lookahead\n", ")\n", - "time_rolling = timeit.default_timer() - start" + "time_rolling = timeit.default_timer() - start\n", + "\n", + "print(f'Rolling ({len(segments)} segments): {time_rolling:.1f}s, {fs_rolling.solution[\"costs\"].item():,.0f} €')" ] }, { @@ -250,16 +254,11 @@ "metadata": {}, "outputs": [], "source": [ - "pd.DataFrame(\n", - " {\n", - " f'Segment {i + 1}': {\n", - " 'Start': f'{seg.timesteps[0]:%m-%d %H:%M}',\n", - " 'End': f'{seg.timesteps[-1]:%m-%d %H:%M}',\n", - " 'Cost [EUR]': seg.solution['costs'].item(),\n", - " }\n", - " for i, seg in enumerate(segments)\n", - " }\n", - ")" + "print(f'{len(segments)} segments:')\n", + "for i, seg in enumerate(segments):\n", + " print(\n", + " f' {i + 1}: {seg.timesteps[0]:%m-%d %H:%M} → {seg.timesteps[-1]:%m-%d %H:%M} | {seg.solution[\"costs\"].item():,.0f} €'\n", + " )" ] }, { diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 825aadd3a..0f3b4cc29 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -29,8 +29,7 @@ "import timeit\n", "\n", "import pandas as pd\n", - "import plotly.graph_objects as go\n", - "from plotly.subplots import make_subplots\n", + "import xarray as xr\n", "\n", "import flixopt as fx\n", "\n", @@ -72,18 +71,13 @@ "outputs": [], "source": [ "# Visualize input data\n", - "heat_demand = flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile\n", - "electricity_price = flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs']\n", - "\n", - "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.1)\n", - "fig.add_trace(go.Scatter(x=timesteps, y=heat_demand.values, name='Heat Demand', line=dict(width=0.5)), row=1, col=1)\n", - "fig.add_trace(\n", - " go.Scatter(x=timesteps, y=electricity_price.values, name='Electricity Price', line=dict(width=0.5)), row=2, col=1\n", + "input_ds = xr.Dataset(\n", + " {\n", + " 'Heat Demand': flow_system.components['HeatDemand'].inputs[0].fixed_relative_profile,\n", + " 'Electricity Price': flow_system.components['GridBuy'].outputs[0].effects_per_flow_hour['costs'],\n", + " }\n", ")\n", - "fig.update_layout(height=400, title='One Month of Input Data')\n", - "fig.update_yaxes(title_text='Heat Demand [MW]', row=1, col=1)\n", - "fig.update_yaxes(title_text='El. Price [€/MWh]', row=2, col=1)\n", - "fig.show()" + "input_ds.fxplot.line(facet_row='variable', title='One Month of Input Data')" ] }, { @@ -172,7 +166,7 @@ "source": [ "## Understanding the Clustering\n", "\n", - "The clustering algorithm groups similar days together. Let's inspect the cluster structure:" + "The clustering algorithm groups similar days together. Access all metadata via `fs.clustering`:" ] }, { @@ -181,14 +175,150 @@ "id": "11", "metadata": {}, "outputs": [], + "source": [ + "# Access clustering metadata directly\n", + "clustering = fs_clustered.clustering\n", + "clustering" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "12", + "metadata": {}, + "outputs": [], "source": [ "# Show clustering info using __repr__\n", "fs_clustered.clustering" ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "13", + "metadata": {}, + "outputs": [], + "source": [ + "# Quality metrics - how well do the clusters represent the original data?\n", + "# Lower RMSE/MAE = better representation\n", + "clustering.metrics.to_dataframe().style.format('{:.3f}')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "14", + "metadata": {}, + "outputs": [], + "source": [ + "# Visual comparison: original vs clustered time series\n", + "clustering.plot.compare()" + ] + }, { "cell_type": "markdown", - "id": "12", + "id": "15", + "metadata": {}, + "source": [ + "## Advanced Clustering Options\n", + "\n", + "The `cluster()` method exposes many parameters for fine-tuning:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16", + "metadata": {}, + "outputs": [], + "source": [ + "# Try different clustering algorithms\n", + "fs_kmeans = flow_system.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " cluster_method='k_means', # Alternative: 'hierarchical' (default), 'k_medoids', 'averaging'\n", + ")\n", + "\n", + "# Compare cluster assignments between algorithms\n", + "print('hierarchical clusters:', fs_clustered.clustering.cluster_order.values)\n", + "print('k_means clusters: ', fs_kmeans.clustering.cluster_order.values)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "17", + "metadata": {}, + "outputs": [], + "source": [ + "# Compare RMSE between algorithms\n", + "print('Quality comparison (RMSE for HeatDemand):')\n", + "print(\n", + " f' hierarchical: {float(fs_clustered.clustering.metrics[\"RMSE\"].sel(time_series=\"HeatDemand(Q_th)|fixed_relative_profile\")):.4f}'\n", + ")\n", + "print(\n", + " f' k_means: {float(fs_kmeans.clustering.metrics[\"RMSE\"].sel(time_series=\"HeatDemand(Q_th)|fixed_relative_profile\")):.4f}'\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "18", + "metadata": {}, + "outputs": [], + "source": [ + "# Visualize cluster structure with heatmap\n", + "clustering.plot.heatmap()" + ] + }, + { + "cell_type": "markdown", + "id": "19", + "metadata": {}, + "source": [ + "### Manual Cluster Assignment\n", + "\n", + "When comparing design variants or performing sensitivity analysis, you often want to\n", + "use the **same cluster structure** across different FlowSystem configurations.\n", + "Use `predef_cluster_order` to ensure comparable results:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "20", + "metadata": {}, + "outputs": [], + "source": [ + "# Save the cluster order from our optimized system\n", + "cluster_order = fs_clustered.clustering.cluster_order.values\n", + "print(f'Cluster order to reuse: {cluster_order}')\n", + "\n", + "# Now modify the FlowSystem (e.g., increase storage capacity limits)\n", + "flow_system_modified = flow_system.copy()\n", + "flow_system_modified.components['Storage'].capacity_in_flow_hours.maximum_size = 2000 # Larger storage option\n", + "\n", + "# Cluster with the SAME cluster structure for fair comparison\n", + "fs_modified_clustered = flow_system_modified.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " predef_cluster_order=cluster_order, # Reuse cluster assignments\n", + ")\n", + "\n", + "# Optimize the modified system\n", + "fs_modified_clustered.optimize(solver)\n", + "\n", + "print('\\nComparison (same cluster structure):')\n", + "print(f' Original storage size: {fs_clustered.statistics.sizes[\"Storage\"].item():.0f}')\n", + "print(f' Modified storage size: {fs_modified_clustered.statistics.sizes[\"Storage\"].item():.0f}')\n", + "print(f' Original cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", + "print(f' Modified cost: {fs_modified_clustered.solution[\"costs\"].item():,.0f} €')" + ] + }, + { + "cell_type": "markdown", + "id": "21", "metadata": {}, "source": [ "## Method 3: Two-Stage Workflow (Recommended)\n", @@ -206,7 +336,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -218,7 +348,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -236,7 +366,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "24", "metadata": {}, "source": [ "## Compare Results" @@ -245,7 +375,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -294,7 +424,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "26", "metadata": {}, "source": [ "## Expand Solution to Full Resolution\n", @@ -306,7 +436,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -317,34 +447,29 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "28", "metadata": {}, "outputs": [], "source": [ - "# Compare heat balance: Full vs Expanded\n", - "fig = make_subplots(rows=2, cols=1, shared_xaxes=True, subplot_titles=['Full Optimization', 'Expanded from Clustering'])\n", + "# Compare heat production: Full vs Expanded\n", + "heat_flows = ['CHP(Q_th)|flow_rate', 'Boiler(Q_th)|flow_rate']\n", "\n", - "# Full\n", - "for var in ['CHP(Q_th)', 'Boiler(Q_th)']:\n", - " values = fs_full.solution[f'{var}|flow_rate'].values\n", - " fig.add_trace(go.Scatter(x=fs_full.timesteps, y=values, name=var, legendgroup=var, showlegend=True), row=1, col=1)\n", - "\n", - "# Expanded\n", - "for var in ['CHP(Q_th)', 'Boiler(Q_th)']:\n", - " values = fs_expanded.solution[f'{var}|flow_rate'].values\n", - " fig.add_trace(\n", - " go.Scatter(x=fs_expanded.timesteps, y=values, name=var, legendgroup=var, showlegend=False), row=2, col=1\n", - " )\n", + "# Create comparison dataset\n", + "comparison_ds = xr.Dataset(\n", + " {\n", + " name.replace('|flow_rate', ''): xr.concat(\n", + " [fs_full.solution[name], fs_expanded.solution[name]], dim=pd.Index(['Full', 'Expanded'], name='method')\n", + " )\n", + " for name in heat_flows\n", + " }\n", + ")\n", "\n", - "fig.update_layout(height=500, title='Heat Production Comparison')\n", - "fig.update_yaxes(title_text='MW', row=1, col=1)\n", - "fig.update_yaxes(title_text='MW', row=2, col=1)\n", - "fig.show()" + "comparison_ds.fxplot.line(facet_col='variable', color='method', title='Heat Production Comparison')" ] }, { "cell_type": "markdown", - "id": "20", + "id": "29", "metadata": {}, "source": [ "## Visualize Clustered Heat Balance" @@ -353,7 +478,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "30", "metadata": {}, "outputs": [], "source": [ @@ -363,7 +488,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "31", "metadata": {}, "outputs": [], "source": [ @@ -372,24 +497,45 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "32", "metadata": {}, "source": [ "## API Reference\n", "\n", "### `transform.cluster()` Parameters\n", "\n", - "| Parameter | Type | Description |\n", - "|-----------|------|-------------|\n", - "| `n_clusters` | `int` | Number of typical periods (e.g., 8 typical days) |\n", - "| `cluster_duration` | `str \\| float` | Duration per cluster ('1D', '24h') or hours |\n", - "| `weights` | `dict[str, float]` | Optional weights for time series in clustering |\n", - "| `time_series_for_high_peaks` | `list[str]` | **Essential**: Force inclusion of peak periods |\n", - "| `time_series_for_low_peaks` | `list[str]` | Force inclusion of minimum periods |\n", + "| Parameter | Type | Default | Description |\n", + "|-----------|------|---------|-------------|\n", + "| `n_clusters` | `int` | - | Number of typical periods (e.g., 8 typical days) |\n", + "| `cluster_duration` | `str \\| float` | - | Duration per cluster ('1D', '24h') or hours |\n", + "| `weights` | `dict[str, float]` | None | Optional weights for time series in clustering |\n", + "| `time_series_for_high_peaks` | `list[str]` | None | **Essential**: Force inclusion of peak periods |\n", + "| `time_series_for_low_peaks` | `list[str]` | None | Force inclusion of minimum periods |\n", + "| `cluster_method` | `str` | 'hierarchical' | Algorithm: 'hierarchical', 'k_means', 'k_medoids', 'k_maxoids', 'averaging' |\n", + "| `representation_method` | `str` | 'medoidRepresentation' | 'medoidRepresentation', 'meanRepresentation', 'distributionAndMinMaxRepresentation' |\n", + "| `extreme_period_method` | `str \\| None` | None | How peaks are integrated: None, 'append', 'new_cluster_center', 'replace_cluster_center' |\n", + "| `rescale_cluster_periods` | `bool` | True | Rescale clusters to match original means |\n", + "| `predef_cluster_order` | `array` | None | Manual cluster assignments |\n", + "| `**tsam_kwargs` | - | - | Additional tsam parameters |\n", + "\n", + "### Clustering Object Properties\n", + "\n", + "After clustering, access metadata via `fs.clustering`:\n", + "\n", + "| Property | Description |\n", + "|----------|-------------|\n", + "| `n_clusters` | Number of clusters |\n", + "| `n_original_clusters` | Number of original time segments (e.g., 365 days) |\n", + "| `timesteps_per_cluster` | Timesteps in each cluster (e.g., 24 for daily) |\n", + "| `cluster_order` | xr.DataArray mapping original segment → cluster ID |\n", + "| `occurrences` | How many original segments each cluster represents |\n", + "| `metrics` | xr.Dataset with RMSE, MAE per time series |\n", + "| `plot.compare()` | Compare original vs clustered time series |\n", + "| `plot.heatmap()` | Visualize cluster structure |\n", "\n", "### Storage Behavior\n", "\n", - "Each `Storage` component has a `cluster_storage_mode` parameter that controls how it behaves during clustering:\n", + "Each `Storage` component has a `cluster_mode` parameter:\n", "\n", "| Mode | Description |\n", "|------|-------------|\n", @@ -398,37 +544,12 @@ "| `'cyclic'` | Each cluster is independent but cyclic (start = end) |\n", "| `'independent'` | Each cluster is independent, free start/end |\n", "\n", - "For a detailed comparison of storage modes, see [08c2-clustering-storage-modes](08c2-clustering-storage-modes.ipynb).\n", - "\n", - "### Peak Forcing Format\n", - "\n", - "```python\n", - "time_series_for_high_peaks = ['ComponentName(FlowName)|fixed_relative_profile']\n", - "```\n", - "\n", - "### Recommended Workflow\n", - "\n", - "```python\n", - "# Stage 1: Fast sizing\n", - "fs_sizing = flow_system.transform.cluster(\n", - " n_clusters=8,\n", - " cluster_duration='1D',\n", - " time_series_for_high_peaks=['Demand(Flow)|fixed_relative_profile'],\n", - ")\n", - "fs_sizing.optimize(solver)\n", - "\n", - "# Apply safety margin\n", - "sizes = {k: v.item() * 1.05 for k, v in fs_sizing.statistics.sizes.items()}\n", - "\n", - "# Stage 2: Accurate dispatch\n", - "fs_dispatch = flow_system.transform.fix_sizes(sizes)\n", - "fs_dispatch.optimize(solver)\n", - "```" + "For a detailed comparison of storage modes, see [08c2-clustering-storage-modes](08c2-clustering-storage-modes.ipynb)." ] }, { "cell_type": "markdown", - "id": "24", + "id": "33", "metadata": {}, "source": [ "## Summary\n", @@ -439,13 +560,18 @@ "- Apply **peak forcing** to capture extreme demand days\n", "- Use **two-stage optimization** for fast yet accurate investment decisions\n", "- **Expand solutions** back to full resolution with `expand_solution()`\n", + "- Access **clustering metadata** via `fs.clustering` (metrics, cluster_order, occurrences)\n", + "- Use **advanced options** like different algorithms\n", + "- **Manually assign clusters** using `predef_cluster_order`\n", "\n", "### Key Takeaways\n", "\n", "1. **Always use peak forcing** (`time_series_for_high_peaks`) for demand time series\n", "2. **Add safety margin** (5-10%) when fixing sizes from clustering\n", "3. **Two-stage is recommended**: clustering for sizing, full resolution for dispatch\n", - "4. **Storage handling** is configurable via `storage_mode`\n", + "4. **Storage handling** is configurable via `cluster_mode`\n", + "5. **Check metrics** to evaluate clustering quality\n", + "6. **Use `predef_cluster_order`** to reproduce or define custom cluster assignments\n", "\n", "### Next Steps\n", "\n", diff --git a/docs/notebooks/09-plotting-and-data-access.ipynb b/docs/notebooks/09-plotting-and-data-access.ipynb index 20a7e6f4f..39fa788da 100644 --- a/docs/notebooks/09-plotting-and-data-access.ipynb +++ b/docs/notebooks/09-plotting-and-data-access.ipynb @@ -71,7 +71,10 @@ "multiperiod = create_multiperiod_system()\n", "multiperiod.optimize(solver)\n", "\n", - "simple" + "print('Created systems:')\n", + "print(f' simple: {len(simple.components)} components, {len(simple.buses)} buses')\n", + "print(f' complex_sys: {len(complex_sys.components)} components, {len(complex_sys.buses)} buses')\n", + "print(f' multiperiod: {len(multiperiod.components)} components, dims={dict(multiperiod.solution.sizes)}')" ] }, { @@ -611,12 +614,13 @@ "source": [ "nodes, edges = simple.topology.infos()\n", "\n", - "pd.DataFrame(\n", - " {\n", - " 'Nodes': {label: info['class'] for label, info in nodes.items()},\n", - " 'Edges': {label: f'{info[\"start\"]} -> {info[\"end\"]}' for label, info in edges.items()},\n", - " }\n", - ")" + "print('Nodes:')\n", + "for label, info in nodes.items():\n", + " print(f' {label}: {info[\"class\"]}')\n", + "\n", + "print('\\nEdges (flows):')\n", + "for label, info in edges.items():\n", + " print(f' {info[\"start\"]} -> {info[\"end\"]}: {label}')" ] }, { @@ -636,7 +640,10 @@ "metadata": {}, "outputs": [], "source": [ - "multiperiod" + "print('Multiperiod system dimensions:')\n", + "print(f' Periods: {list(multiperiod.periods)}')\n", + "print(f' Scenarios: {list(multiperiod.scenarios)}')\n", + "print(f' Solution dims: {dict(multiperiod.solution.sizes)}')" ] }, { @@ -741,7 +748,11 @@ "outputs": [], "source": [ "# Get plot result\n", - "result = simple.statistics.plot.balance('Heat')" + "result = simple.statistics.plot.balance('Heat')\n", + "\n", + "print('PlotResult contains:')\n", + "print(f' data: {type(result.data).__name__} with vars {list(result.data.data_vars)}')\n", + "print(f' figure: {type(result.figure).__name__}')" ] }, { diff --git a/docs/user-guide/optimization/clustering.md b/docs/user-guide/optimization/clustering.md index 7ec5faac1..793fbf8fe 100644 --- a/docs/user-guide/optimization/clustering.md +++ b/docs/user-guide/optimization/clustering.md @@ -52,6 +52,10 @@ flow_rates = fs_expanded.solution['Boiler(Q_th)|flow_rate'] | `cluster_duration` | Duration of each cluster | `'1D'`, `'24h'`, or `24` (hours) | | `time_series_for_high_peaks` | Time series where peak clusters must be captured | `['HeatDemand(Q)|fixed_relative_profile']` | | `time_series_for_low_peaks` | Time series where minimum clusters must be captured | `['SolarGen(P)|fixed_relative_profile']` | +| `cluster_method` | Clustering algorithm | `'k_means'`, `'hierarchical'`, `'k_medoids'` | +| `representation_method` | How clusters are represented | `'meanRepresentation'`, `'medoidRepresentation'` | +| `random_state` | Random seed for reproducibility | `42` | +| `rescale_cluster_periods` | Rescale clusters to match original means | `True` (default) | ### Peak Selection @@ -68,6 +72,58 @@ fs_clustered = flow_system.transform.cluster( Without peak selection, the clustering algorithm might average out extreme days, leading to undersized equipment. +### Advanced Clustering Options + +Fine-tune the clustering algorithm with advanced parameters: + +```python +fs_clustered = flow_system.transform.cluster( + n_clusters=8, + cluster_duration='1D', + cluster_method='hierarchical', # Alternative to k_means + representation_method='medoidRepresentation', # Use actual periods, not averages + rescale_cluster_periods=True, # Match original time series means + random_state=42, # Reproducible results +) +``` + +**Available clustering algorithms** (`cluster_method`): + +| Method | Description | +|--------|-------------| +| `'k_means'` | Fast, good for most cases (default) | +| `'hierarchical'` | Produces consistent hierarchical groupings | +| `'k_medoids'` | Uses actual periods as representatives | +| `'k_maxoids'` | Maximizes representativeness | +| `'averaging'` | Simple averaging of similar periods | + +For advanced tsam parameters not exposed directly, use `**kwargs`: + +```python +# Pass any tsam.TimeSeriesAggregation parameter +fs_clustered = flow_system.transform.cluster( + n_clusters=8, + cluster_duration='1D', + sameMean=True, # Normalize all time series to same mean + sortValues=True, # Cluster by duration curves instead of shape +) +``` + +### Clustering Quality Metrics + +After clustering, access quality metrics to evaluate the aggregation accuracy: + +```python +fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') + +# Access clustering metrics (xr.Dataset) +metrics = fs_clustered.clustering.metrics +print(metrics) # Shows RMSE, MAE, etc. per time series + +# Access specific metric +rmse = metrics['RMSE'] # xr.DataArray with dims [time_series, period?, scenario?] +``` + ## Storage Modes Storage behavior during clustering is controlled via the `cluster_mode` parameter: diff --git a/docs/user-guide/results-plotting.md b/docs/user-guide/results-plotting.md index 1ecd26aa1..28e3d2b2b 100644 --- a/docs/user-guide/results-plotting.md +++ b/docs/user-guide/results-plotting.md @@ -2,6 +2,9 @@ After solving an optimization, flixOpt provides a powerful plotting API to visualize and analyze your results. The API is designed to be intuitive and chainable, giving you quick access to common plots while still allowing deep customization. +!!! tip "Plotting Custom Data" + For plotting arbitrary xarray data (not just flixopt results), see the [Custom Data Plotting](recipes/plotting-custom-data.md) guide which covers the `.fxplot` accessor. + ## The Plot Accessor All plotting is accessed through the `statistics.plot` accessor on your FlowSystem: diff --git a/flixopt/clustering/base.py b/flixopt/clustering/base.py index 4b31832e4..0f154484b 100644 --- a/flixopt/clustering/base.py +++ b/flixopt/clustering/base.py @@ -38,15 +38,15 @@ class ClusterStructure: which is needed for proper storage state-of-charge tracking across typical periods when using cluster(). - Note: "original_period" here refers to the original time chunks before - clustering (e.g., 365 original days), NOT the model's "period" dimension - (years/months). Each original time chunk gets assigned to a cluster. + Note: The "original_cluster" dimension indexes the original cluster-sized + time segments (e.g., 0..364 for 365 days), NOT the model's "period" dimension + (years). Each original segment gets assigned to a representative cluster. Attributes: - cluster_order: Maps each original time chunk index to its cluster ID. - dims: [original_period] for simple case, or - [original_period, period, scenario] for multi-period/scenario systems. - Values are cluster indices (0 to n_clusters-1). + cluster_order: Maps original cluster index → representative cluster ID. + dims: [original_cluster] for simple case, or + [original_cluster, period, scenario] for multi-period/scenario systems. + Values are cluster IDs (0 to n_clusters-1). cluster_occurrences: Count of how many original time chunks each cluster represents. dims: [cluster] for simple case, or [cluster, period, scenario] for multi-dim. n_clusters: Number of distinct clusters (typical periods). @@ -60,7 +60,7 @@ class ClusterStructure: - timesteps_per_cluster: 24 (for hourly data) For multi-scenario (e.g., 2 scenarios): - - cluster_order: shape (365, 2) with dims [original_period, scenario] + - cluster_order: shape (365, 2) with dims [original_cluster, scenario] - cluster_occurrences: shape (8, 2) with dims [cluster, scenario] """ @@ -73,7 +73,7 @@ def __post_init__(self): """Validate and ensure proper DataArray formatting.""" # Ensure cluster_order is a DataArray with proper dims if not isinstance(self.cluster_order, xr.DataArray): - self.cluster_order = xr.DataArray(self.cluster_order, dims=['original_period'], name='cluster_order') + self.cluster_order = xr.DataArray(self.cluster_order, dims=['original_cluster'], name='cluster_order') elif self.cluster_order.name is None: self.cluster_order = self.cluster_order.rename('cluster_order') @@ -92,7 +92,7 @@ def __repr__(self) -> str: occ = [int(self.cluster_occurrences.sel(cluster=c).values) for c in range(n_clusters)] return ( f'ClusterStructure(\n' - f' {self.n_original_periods} original periods → {n_clusters} clusters\n' + f' {self.n_original_clusters} original periods → {n_clusters} clusters\n' f' timesteps_per_cluster={self.timesteps_per_cluster}\n' f' occurrences={occ}\n' f')' @@ -124,9 +124,9 @@ def _create_reference_structure(self) -> tuple[dict, dict[str, xr.DataArray]]: return ref, arrays @property - def n_original_periods(self) -> int: + def n_original_clusters(self) -> int: """Number of original periods (before clustering).""" - return len(self.cluster_order.coords['original_period']) + return len(self.cluster_order.coords['original_cluster']) @property def has_multi_dims(self) -> bool: @@ -197,20 +197,20 @@ def get_cluster_weight_per_timestep(self) -> xr.DataArray: name='cluster_weight', ) - def plot(self, show: bool | None = None) -> PlotResult: + def plot(self, colors: str | list[str] | None = None, show: bool | None = None) -> PlotResult: """Plot cluster assignment visualization. Shows which cluster each original period belongs to, and the number of occurrences per cluster. Args: + colors: Colorscale name (str) or list of colors. + Defaults to CONFIG.Plotting.default_sequential_colorscale. show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. Returns: PlotResult containing the figure and underlying data. """ - import plotly.express as px - from ..config import CONFIG from ..plot_result import PlotResult @@ -218,27 +218,24 @@ def plot(self, show: bool | None = None) -> PlotResult: int(self.n_clusters) if isinstance(self.n_clusters, (int, np.integer)) else int(self.n_clusters.values) ) - # Create DataFrame for plotting - import pandas as pd - cluster_order = self.get_cluster_order_for_slice() - df = pd.DataFrame( - { - 'Original Period': range(1, len(cluster_order) + 1), - 'Cluster': cluster_order, - } + + # Build DataArray for fxplot heatmap + cluster_da = xr.DataArray( + cluster_order.reshape(1, -1), + dims=['y', 'original_cluster'], + coords={'y': ['Cluster'], 'original_cluster': range(1, len(cluster_order) + 1)}, + name='cluster_assignment', ) - # Bar chart showing cluster assignment - fig = px.bar( - df, - x='Original Period', - y=[1] * len(df), - color='Cluster', - color_continuous_scale='Viridis', - title=f'Cluster Assignment ({self.n_original_periods} periods → {n_clusters} clusters)', + # Use fxplot.heatmap for smart defaults + colorscale = colors or CONFIG.Plotting.default_sequential_colorscale + fig = cluster_da.fxplot.heatmap( + colors=colorscale, + title=f'Cluster Assignment ({self.n_original_clusters} periods → {n_clusters} clusters)', ) - fig.update_layout(yaxis_visible=False, coloraxis_colorbar_title='Cluster') + fig.update_yaxes(showticklabels=False) + fig.update_coloraxes(colorbar_title='Cluster') # Build data for PlotResult data = xr.Dataset( @@ -532,30 +529,30 @@ def validate(self) -> None: # (each weight is how many original periods that cluster represents) # Sum should be checked per period/scenario slice, not across all dimensions if self.cluster_structure is not None: - n_original_periods = self.cluster_structure.n_original_periods + n_original_clusters = self.cluster_structure.n_original_clusters # Sum over cluster dimension only (keep period/scenario if present) weight_sum_per_slice = self.representative_weights.sum(dim='cluster') # Check each slice if weight_sum_per_slice.size == 1: # Simple case: no period/scenario weight_sum = float(weight_sum_per_slice.values) - if abs(weight_sum - n_original_periods) > 1e-6: + if abs(weight_sum - n_original_clusters) > 1e-6: import warnings warnings.warn( f'representative_weights sum ({weight_sum}) does not match ' - f'n_original_periods ({n_original_periods})', + f'n_original_clusters ({n_original_clusters})', stacklevel=2, ) else: # Multi-dimensional: check each slice for val in weight_sum_per_slice.values.flat: - if abs(float(val) - n_original_periods) > 1e-6: + if abs(float(val) - n_original_clusters) > 1e-6: import warnings warnings.warn( f'representative_weights sum per slice ({float(val)}) does not match ' - f'n_original_periods ({n_original_periods})', + f'n_original_clusters ({n_original_clusters})', stacklevel=2, ) break # Only warn once @@ -585,8 +582,10 @@ def compare( *, select: SelectType | None = None, colors: ColorType | None = None, - facet_col: str | None = 'period', - facet_row: str | None = 'scenario', + color: str | None = 'auto', + line_dash: str | None = 'representation', + facet_col: str | None = 'auto', + facet_row: str | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -600,8 +599,14 @@ def compare( or None to plot all time-varying variables. select: xarray-style selection dict, e.g. {'scenario': 'Base Case'}. colors: Color specification (colorscale name, color list, or label-to-color dict). - facet_col: Dimension for subplot columns (default: 'period'). - facet_row: Dimension for subplot rows (default: 'scenario'). + color: Dimension for line colors. 'auto' uses CONFIG priority (typically 'variable'). + Use 'representation' to color by Original/Clustered instead of line_dash. + line_dash: Dimension for line dash styles. Defaults to 'representation'. + Set to None to disable line dash differentiation. + facet_col: Dimension for subplot columns. 'auto' uses CONFIG priority. + Use 'variable' to create separate columns per variable. + facet_row: Dimension for subplot rows. 'auto' uses CONFIG priority. + Use 'variable' to create separate rows per variable. show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. **plotly_kwargs: Additional arguments passed to plotly. @@ -610,9 +615,7 @@ def compare( PlotResult containing the comparison figure and underlying data. """ import pandas as pd - import plotly.express as px - from ..color_processing import process_colors from ..config import CONFIG from ..plot_result import PlotResult from ..statistics_accessor import _apply_selection @@ -626,7 +629,7 @@ def compare( resolved_variables = self._resolve_variables(variables) - # Build Dataset with 'representation' dimension for Original/Clustered + # Build Dataset with variables as data_vars data_vars = {} for var in resolved_variables: original = result.original_data[var] @@ -650,54 +653,41 @@ def compare( { var: xr.DataArray( [sorted_vars[(var, r)] for r in ['Original', 'Clustered']], - dims=['representation', 'rank'], - coords={'representation': ['Original', 'Clustered'], 'rank': range(n)}, + dims=['representation', 'duration'], + coords={'representation': ['Original', 'Clustered'], 'duration': range(n)}, ) for var in resolved_variables } ) - # Resolve facets (only for timeseries) - actual_facet_col = facet_col if kind == 'timeseries' and facet_col in ds.dims else None - actual_facet_row = facet_row if kind == 'timeseries' and facet_row in ds.dims else None - - # Convert to long-form DataFrame - df = ds.to_dataframe().reset_index() - coord_cols = [c for c in ds.coords.keys() if c in df.columns] - df = df.melt(id_vars=coord_cols, var_name='variable', value_name='value') - - variable_labels = df['variable'].unique().tolist() - color_map = process_colors(colors, variable_labels, CONFIG.Plotting.default_qualitative_colorscale) - - # Set x-axis and title based on kind - x_col = 'time' if kind == 'timeseries' else 'rank' + # Set title based on kind if kind == 'timeseries': title = ( 'Original vs Clustered' if len(resolved_variables) > 1 else f'Original vs Clustered: {resolved_variables[0]}' ) - labels = {} else: title = 'Duration Curve' if len(resolved_variables) > 1 else f'Duration Curve: {resolved_variables[0]}' - labels = {'rank': 'Hours (sorted)', 'value': 'Value'} - - fig = px.line( - df, - x=x_col, - y='value', - color='variable', - line_dash='representation', - facet_col=actual_facet_col, - facet_row=actual_facet_row, + + # Use fxplot for smart defaults + line_kwargs = {} + if line_dash is not None: + line_kwargs['line_dash'] = line_dash + if line_dash == 'representation': + line_kwargs['line_dash_map'] = {'Original': 'dot', 'Clustered': 'solid'} + + fig = ds.fxplot.line( + colors=colors, + color=color, title=title, - labels=labels, - color_discrete_map=color_map, + facet_col=facet_col, + facet_row=facet_row, + **line_kwargs, **plotly_kwargs, ) - if actual_facet_row or actual_facet_col: - fig.update_yaxes(matches=None) - fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) + fig.update_yaxes(matches=None) + fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) plot_result = PlotResult(data=ds, figure=fig) @@ -743,8 +733,8 @@ def heatmap( *, select: SelectType | None = None, colors: str | list[str] | None = None, - facet_col: str | None = 'period', - animation_frame: str | None = 'scenario', + facet_col: str | None = 'auto', + animation_frame: str | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -762,8 +752,8 @@ def heatmap( colors: Colorscale name (str) or list of colors for heatmap coloring. Dicts are not supported for heatmaps. Defaults to CONFIG.Plotting.default_sequential_colorscale. - facet_col: Dimension to facet on columns (default: 'period'). - animation_frame: Dimension for animation slider (default: 'scenario'). + facet_col: Dimension to facet on columns. 'auto' uses CONFIG priority. + animation_frame: Dimension for animation slider. 'auto' uses CONFIG priority. show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. **plotly_kwargs: Additional arguments passed to plotly. @@ -773,7 +763,6 @@ def heatmap( The data has 'cluster' variable with time dimension, matching original timesteps. """ import pandas as pd - import plotly.express as px from ..config import CONFIG from ..plot_result import PlotResult @@ -833,34 +822,29 @@ def heatmap( else: cluster_da = cluster_slices[(None, None)] - # Resolve facet_col and animation_frame - only use if dimension exists - actual_facet_col = facet_col if facet_col and facet_col in cluster_da.dims else None - actual_animation = animation_frame if animation_frame and animation_frame in cluster_da.dims else None - # Add dummy y dimension for heatmap visualization (single row) heatmap_da = cluster_da.expand_dims('y', axis=-1) heatmap_da = heatmap_da.assign_coords(y=['Cluster']) + heatmap_da.name = 'cluster_assignment' - colorscale = colors or CONFIG.Plotting.default_sequential_colorscale + # Reorder dims so 'time' and 'y' are first (heatmap x/y axes) + # Other dims (period, scenario) will be used for faceting/animation + target_order = ['time', 'y'] + [d for d in heatmap_da.dims if d not in ('time', 'y')] + heatmap_da = heatmap_da.transpose(*target_order) - # Use px.imshow with xr.DataArray - fig = px.imshow( - heatmap_da, - color_continuous_scale=colorscale, - facet_col=actual_facet_col, - animation_frame=actual_animation, + # Use fxplot.heatmap for smart defaults + fig = heatmap_da.fxplot.heatmap( + colors=colors, title='Cluster Assignments', - labels={'time': 'Time', 'color': 'Cluster'}, + facet_col=facet_col, + animation_frame=animation_frame, aspect='auto', **plotly_kwargs, ) - # Clean up facet labels - if actual_facet_col: - fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) - - # Hide y-axis since it's just a single row + # Clean up: hide y-axis since it's just a single row fig.update_yaxes(showticklabels=False) + fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) # Data is exactly what we plotted (without dummy y dimension) cluster_da.name = 'cluster' @@ -880,21 +864,27 @@ def clusters( *, select: SelectType | None = None, colors: ColorType | None = None, - facet_col_wrap: int | None = None, + color: str | None = 'auto', + facet_col: str | None = 'cluster', + facet_cols: int | None = None, show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: """Plot each cluster's typical period profile. - Shows each cluster as a separate faceted subplot. Useful for - understanding what each cluster represents. + Shows each cluster as a separate faceted subplot with all variables + colored differently. Useful for understanding what each cluster represents. Args: variables: Variable(s) to plot. Can be a string, list of strings, or None to plot all time-varying variables. select: xarray-style selection dict, e.g. {'scenario': 'Base Case'}. colors: Color specification (colorscale name, color list, or label-to-color dict). - facet_col_wrap: Max columns before wrapping facets. + color: Dimension for line colors. 'auto' uses CONFIG priority (typically 'variable'). + Use 'cluster' to color by cluster instead of faceting. + facet_col: Dimension for subplot columns. Defaults to 'cluster'. + Use 'variable' to facet by variable instead. + facet_cols: Max columns before wrapping facets. Defaults to CONFIG.Plotting.default_facet_cols. show: Whether to display the figure. Defaults to CONFIG.Plotting.default_show. @@ -903,10 +893,6 @@ def clusters( Returns: PlotResult containing the figure and underlying data. """ - import pandas as pd - import plotly.express as px - - from ..color_processing import process_colors from ..config import CONFIG from ..plot_result import PlotResult from ..statistics_accessor import _apply_selection @@ -929,45 +915,37 @@ def clusters( n_clusters = int(cs.n_clusters) if isinstance(cs.n_clusters, (int, np.integer)) else int(cs.n_clusters.values) timesteps_per_cluster = cs.timesteps_per_cluster - # Build long-form DataFrame with cluster labels including occurrence counts - rows = [] + # Build Dataset with cluster dimension, using labels with occurrence counts + cluster_labels = [ + f'Cluster {c} (×{int(cs.cluster_occurrences.sel(cluster=c).values)})' for c in range(n_clusters) + ] + data_vars = {} for var in resolved_variables: data = aggregated_data[var].values data_by_cluster = data.reshape(n_clusters, timesteps_per_cluster) data_vars[var] = xr.DataArray( data_by_cluster, - dims=['cluster', 'timestep'], - coords={'cluster': range(n_clusters), 'timestep': range(timesteps_per_cluster)}, + dims=['cluster', 'time'], + coords={'cluster': cluster_labels, 'time': range(timesteps_per_cluster)}, ) - for c in range(n_clusters): - occurrence = int(cs.cluster_occurrences.sel(cluster=c).values) - label = f'Cluster {c} (×{occurrence})' - for t in range(timesteps_per_cluster): - rows.append({'cluster': label, 'timestep': t, 'value': data_by_cluster[c, t], 'variable': var}) - df = pd.DataFrame(rows) - - cluster_labels = df['cluster'].unique().tolist() - color_map = process_colors(colors, cluster_labels, CONFIG.Plotting.default_qualitative_colorscale) - facet_col_wrap = facet_col_wrap or CONFIG.Plotting.default_facet_cols + + ds = xr.Dataset(data_vars) title = 'Clusters' if len(resolved_variables) > 1 else f'Clusters: {resolved_variables[0]}' - fig = px.line( - df, - x='timestep', - y='value', - facet_col='cluster', - facet_row='variable' if len(resolved_variables) > 1 else None, - facet_col_wrap=facet_col_wrap if len(resolved_variables) == 1 else None, + # Use fxplot for smart defaults + fig = ds.fxplot.line( + colors=colors, + color=color, title=title, - color_discrete_map=color_map, + facet_col=facet_col, + facet_cols=facet_cols, **plotly_kwargs, ) - fig.update_layout(showlegend=False) - if len(resolved_variables) > 1: - fig.update_yaxes(matches=None) - fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) + fig.update_yaxes(matches=None) + fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1])) + # Include occurrences in result data data_vars['occurrences'] = cs.cluster_occurrences result_data = xr.Dataset(data_vars) plot_result = PlotResult(data=result_data, figure=fig) @@ -993,6 +971,9 @@ class Clustering: Attributes: result: The ClusterResult from the aggregation backend. backend_name: Name of the aggregation backend used (e.g., 'tsam', 'manual'). + metrics: Clustering quality metrics (RMSE, MAE, etc.) as xr.Dataset. + Each metric (e.g., 'RMSE', 'MAE') is a DataArray with dims + ``[time_series, period?, scenario?]``. Example: >>> fs_clustered = flow_system.transform.cluster(n_clusters=8, cluster_duration='1D') @@ -1004,6 +985,7 @@ class Clustering: result: ClusterResult backend_name: str = 'unknown' + metrics: xr.Dataset | None = None def _create_reference_structure(self) -> tuple[dict, dict[str, xr.DataArray]]: """Create reference structure for serialization.""" @@ -1026,7 +1008,7 @@ def __repr__(self) -> str: n_clusters = ( int(cs.n_clusters) if isinstance(cs.n_clusters, (int, np.integer)) else int(cs.n_clusters.values) ) - structure_info = f'{cs.n_original_periods} periods → {n_clusters} clusters' + structure_info = f'{cs.n_original_clusters} periods → {n_clusters} clusters' else: structure_info = 'no structure' return f'Clustering(\n backend={self.backend_name!r}\n {structure_info}\n)' @@ -1071,11 +1053,11 @@ def n_clusters(self) -> int: return int(n) if isinstance(n, (int, np.integer)) else int(n.values) @property - def n_original_periods(self) -> int: + def n_original_clusters(self) -> int: """Number of original periods (before clustering).""" if self.result.cluster_structure is None: raise ValueError('No cluster_structure available') - return self.result.cluster_structure.n_original_periods + return self.result.cluster_structure.n_original_clusters @property def timesteps_per_period(self) -> int: @@ -1152,17 +1134,17 @@ def create_cluster_structure_from_mapping( ClusterStructure derived from the mapping. """ n_original = len(timestep_mapping) - n_original_periods = n_original // timesteps_per_cluster + n_original_clusters = n_original // timesteps_per_cluster # Determine cluster order from the mapping # Each original period maps to the cluster of its first timestep cluster_order = [] - for p in range(n_original_periods): + for p in range(n_original_clusters): start_idx = p * timesteps_per_cluster cluster_idx = int(timestep_mapping.isel(original_time=start_idx).values) // timesteps_per_cluster cluster_order.append(cluster_idx) - cluster_order_da = xr.DataArray(cluster_order, dims=['original_period'], name='cluster_order') + cluster_order_da = xr.DataArray(cluster_order, dims=['original_cluster'], name='cluster_order') # Count occurrences of each cluster unique_clusters = np.unique(cluster_order) diff --git a/flixopt/clustering/intercluster_helpers.py b/flixopt/clustering/intercluster_helpers.py index d2a5eb9d3..a89a80862 100644 --- a/flixopt/clustering/intercluster_helpers.py +++ b/flixopt/clustering/intercluster_helpers.py @@ -132,7 +132,7 @@ def extract_capacity_bounds( def build_boundary_coords( - n_original_periods: int, + n_original_clusters: int, flow_system: FlowSystem, ) -> tuple[dict, list[str]]: """Build coordinates and dimensions for SOC_boundary variable. @@ -146,7 +146,7 @@ def build_boundary_coords( multi-period or stochastic optimizations. Args: - n_original_periods: Number of original (non-aggregated) time periods. + n_original_clusters: Number of original (non-aggregated) time periods. For example, if a year is clustered into 8 typical days but originally had 365 days, this would be 365. flow_system: The FlowSystem containing optional period/scenario dimensions. @@ -163,7 +163,7 @@ def build_boundary_coords( >>> coords['cluster_boundary'] array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]) """ - n_boundaries = n_original_periods + 1 + n_boundaries = n_original_clusters + 1 coords = {'cluster_boundary': np.arange(n_boundaries)} dims = ['cluster_boundary'] diff --git a/flixopt/components.py b/flixopt/components.py index 390fc6f02..e962791d8 100644 --- a/flixopt/components.py +++ b/flixopt/components.py @@ -1195,7 +1195,7 @@ class InterclusterStorageModel(StorageModel): Variables Created ----------------- - ``SOC_boundary``: Absolute SOC at each original period boundary. - Shape: (n_original_periods + 1,) plus any period/scenario dimensions. + Shape: (n_original_clusters + 1,) plus any period/scenario dimensions. Constraints Created ------------------- @@ -1330,7 +1330,7 @@ def _add_intercluster_linking(self) -> None: else int(cluster_structure.n_clusters.values) ) timesteps_per_cluster = cluster_structure.timesteps_per_cluster - n_original_periods = cluster_structure.n_original_periods + n_original_clusters = cluster_structure.n_original_clusters cluster_order = cluster_structure.cluster_order # 1. Constrain ΔE = 0 at cluster starts @@ -1338,7 +1338,7 @@ def _add_intercluster_linking(self) -> None: # 2. Create SOC_boundary variable flow_system = self._model.flow_system - boundary_coords, boundary_dims = build_boundary_coords(n_original_periods, flow_system) + boundary_coords, boundary_dims = build_boundary_coords(n_original_clusters, flow_system) capacity_bounds = extract_capacity_bounds(self.element.capacity_in_flow_hours, boundary_coords, boundary_dims) soc_boundary = self.add_variables( @@ -1360,12 +1360,14 @@ def _add_intercluster_linking(self) -> None: delta_soc = self._compute_delta_soc(n_clusters, timesteps_per_cluster) # 5. Add linking constraints - self._add_linking_constraints(soc_boundary, delta_soc, cluster_order, n_original_periods, timesteps_per_cluster) + self._add_linking_constraints( + soc_boundary, delta_soc, cluster_order, n_original_clusters, timesteps_per_cluster + ) # 6. Add cyclic or initial constraint if self.element.cluster_mode == 'intercluster_cyclic': self.add_constraints( - soc_boundary.isel(cluster_boundary=0) == soc_boundary.isel(cluster_boundary=n_original_periods), + soc_boundary.isel(cluster_boundary=0) == soc_boundary.isel(cluster_boundary=n_original_clusters), short_name='cyclic', ) else: @@ -1375,7 +1377,8 @@ def _add_intercluster_linking(self) -> None: if isinstance(initial, str): # 'equals_final' means cyclic self.add_constraints( - soc_boundary.isel(cluster_boundary=0) == soc_boundary.isel(cluster_boundary=n_original_periods), + soc_boundary.isel(cluster_boundary=0) + == soc_boundary.isel(cluster_boundary=n_original_clusters), short_name='initial_SOC_boundary', ) else: @@ -1389,7 +1392,7 @@ def _add_intercluster_linking(self) -> None: soc_boundary, cluster_order, capacity_bounds.has_investment, - n_original_periods, + n_original_clusters, timesteps_per_cluster, ) @@ -1438,7 +1441,7 @@ def _add_linking_constraints( soc_boundary: xr.DataArray, delta_soc: xr.DataArray, cluster_order: xr.DataArray, - n_original_periods: int, + n_original_clusters: int, timesteps_per_cluster: int, ) -> None: """Add constraints linking consecutive SOC_boundary values. @@ -1455,17 +1458,17 @@ def _add_linking_constraints( soc_boundary: SOC_boundary variable. delta_soc: Net SOC change per cluster. cluster_order: Mapping from original periods to representative clusters. - n_original_periods: Number of original (non-clustered) periods. + n_original_clusters: Number of original (non-clustered) periods. timesteps_per_cluster: Number of timesteps in each cluster period. """ soc_after = soc_boundary.isel(cluster_boundary=slice(1, None)) soc_before = soc_boundary.isel(cluster_boundary=slice(None, -1)) # Rename for alignment - soc_after = soc_after.rename({'cluster_boundary': 'original_period'}) - soc_after = soc_after.assign_coords(original_period=np.arange(n_original_periods)) - soc_before = soc_before.rename({'cluster_boundary': 'original_period'}) - soc_before = soc_before.assign_coords(original_period=np.arange(n_original_periods)) + soc_after = soc_after.rename({'cluster_boundary': 'original_cluster'}) + soc_after = soc_after.assign_coords(original_cluster=np.arange(n_original_clusters)) + soc_before = soc_before.rename({'cluster_boundary': 'original_cluster'}) + soc_before = soc_before.assign_coords(original_cluster=np.arange(n_original_clusters)) # Get delta_soc for each original period using cluster_order delta_soc_ordered = delta_soc.isel(cluster=cluster_order) @@ -1484,7 +1487,7 @@ def _add_combined_bound_constraints( soc_boundary: xr.DataArray, cluster_order: xr.DataArray, has_investment: bool, - n_original_periods: int, + n_original_clusters: int, timesteps_per_cluster: int, ) -> None: """Add constraints ensuring actual SOC stays within bounds. @@ -1498,21 +1501,21 @@ def _add_combined_bound_constraints( middle, and end of each cluster. With 2D (cluster, time) structure, we simply select charge_state at a - given time offset, then reorder by cluster_order to get original_period order. + given time offset, then reorder by cluster_order to get original_cluster order. Args: soc_boundary: SOC_boundary variable. cluster_order: Mapping from original periods to clusters. has_investment: Whether the storage has investment sizing. - n_original_periods: Number of original periods. + n_original_clusters: Number of original periods. timesteps_per_cluster: Timesteps in each cluster. """ charge_state = self.charge_state # soc_d: SOC at start of each original period soc_d = soc_boundary.isel(cluster_boundary=slice(None, -1)) - soc_d = soc_d.rename({'cluster_boundary': 'original_period'}) - soc_d = soc_d.assign_coords(original_period=np.arange(n_original_periods)) + soc_d = soc_d.rename({'cluster_boundary': 'original_cluster'}) + soc_d = soc_d.assign_coords(original_cluster=np.arange(n_original_clusters)) # Get self-discharge rate for decay calculation # Keep as DataArray to respect per-period/scenario values @@ -1523,13 +1526,13 @@ def _add_combined_bound_constraints( for sample_name, offset in zip(['start', 'mid', 'end'], sample_offsets, strict=False): # With 2D structure: select time offset, then reorder by cluster_order cs_at_offset = charge_state.isel(time=offset) # Shape: (cluster, ...) - # Reorder to original_period order using cluster_order indexer + # Reorder to original_cluster order using cluster_order indexer cs_t = cs_at_offset.isel(cluster=cluster_order) # Suppress xarray warning about index loss - we immediately assign new coords anyway with warnings.catch_warnings(): warnings.filterwarnings('ignore', message='.*does not create an index anymore.*') - cs_t = cs_t.rename({'cluster': 'original_period'}) - cs_t = cs_t.assign_coords(original_period=np.arange(n_original_periods)) + cs_t = cs_t.rename({'cluster': 'original_cluster'}) + cs_t = cs_t.assign_coords(original_cluster=np.arange(n_original_clusters)) # Apply decay factor (1-loss)^t to SOC_boundary per Eq. 9 decay_t = (1 - rel_loss) ** offset diff --git a/flixopt/config.py b/flixopt/config.py index 454f8ad3e..3bc3d5ebf 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -30,7 +30,7 @@ logging.addLevelName(SUCCESS_LEVEL, 'SUCCESS') # Deprecation removal version - update this when planning the next major version -DEPRECATION_REMOVAL_VERSION = '6.0.0' +DEPRECATION_REMOVAL_VERSION = '7.0.0' class MultilineFormatter(logging.Formatter): @@ -164,9 +164,8 @@ def format(self, record): 'default_sequential_colorscale': 'turbo', 'default_qualitative_colorscale': 'plotly', 'default_line_shape': 'hv', - 'extra_dim_priority': ('cluster', 'period', 'scenario'), - 'dim_slot_priority': ('facet_col', 'facet_row', 'animation_frame'), - 'x_dim_priority': ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster'), + 'dim_priority': ('time', 'duration', 'duration_pct', 'variable', 'cluster', 'period', 'scenario'), + 'slot_priority': ('x', 'color', 'facet_col', 'facet_row', 'animation_frame'), } ), 'solving': MappingProxyType( @@ -562,9 +561,10 @@ class Plotting: default_facet_cols: Default number of columns for faceted plots. default_sequential_colorscale: Default colorscale for heatmaps and continuous data. default_qualitative_colorscale: Default colormap for categorical plots (bar/line/area charts). - extra_dim_priority: Order of extra dimensions when auto-assigning to slots. - dim_slot_priority: Order of slots to fill with extra dimensions. - x_dim_priority: Order of dimensions to prefer for x-axis when 'auto'. + dim_priority: Priority order for assigning dimensions to plot slots. + Dimensions are assigned to slots based on this order. + slot_priority: Order in which slots are filled during auto-assignment. + Default: x → color → facet_col → facet_row → animation_frame. Examples: ```python @@ -573,9 +573,11 @@ class Plotting: CONFIG.Plotting.default_sequential_colorscale = 'plasma' CONFIG.Plotting.default_qualitative_colorscale = 'Dark24' - # Customize dimension handling for faceting - CONFIG.Plotting.extra_dim_priority = ('scenario', 'period', 'cluster') - CONFIG.Plotting.dim_slot_priority = ('facet_row', 'facet_col', 'animation_frame') + # Customize dimension priority for auto-assignment + CONFIG.Plotting.dim_priority = ('time', 'scenario', 'variable', 'period', 'cluster') + + # Change slot fill order (e.g., prioritize facets over color) + CONFIG.Plotting.slot_priority = ('x', 'facet_col', 'facet_row', 'color', 'animation_frame') ``` """ @@ -586,9 +588,8 @@ class Plotting: default_sequential_colorscale: str = _DEFAULTS['plotting']['default_sequential_colorscale'] default_qualitative_colorscale: str = _DEFAULTS['plotting']['default_qualitative_colorscale'] default_line_shape: str = _DEFAULTS['plotting']['default_line_shape'] - extra_dim_priority: tuple[str, ...] = _DEFAULTS['plotting']['extra_dim_priority'] - dim_slot_priority: tuple[str, ...] = _DEFAULTS['plotting']['dim_slot_priority'] - x_dim_priority: tuple[str, ...] = _DEFAULTS['plotting']['x_dim_priority'] + dim_priority: tuple[str, ...] = _DEFAULTS['plotting']['dim_priority'] + slot_priority: tuple[str, ...] = _DEFAULTS['plotting']['slot_priority'] class Carriers: """Default carrier definitions for common energy types. @@ -690,9 +691,8 @@ def to_dict(cls) -> dict: 'default_sequential_colorscale': cls.Plotting.default_sequential_colorscale, 'default_qualitative_colorscale': cls.Plotting.default_qualitative_colorscale, 'default_line_shape': cls.Plotting.default_line_shape, - 'extra_dim_priority': cls.Plotting.extra_dim_priority, - 'dim_slot_priority': cls.Plotting.dim_slot_priority, - 'x_dim_priority': cls.Plotting.x_dim_priority, + 'dim_priority': cls.Plotting.dim_priority, + 'slot_priority': cls.Plotting.slot_priority, }, } diff --git a/flixopt/dataset_plot_accessor.py b/flixopt/dataset_plot_accessor.py index a022f3988..6c833e652 100644 --- a/flixopt/dataset_plot_accessor.py +++ b/flixopt/dataset_plot_accessor.py @@ -2,6 +2,7 @@ from __future__ import annotations +import warnings from typing import Any, Literal import pandas as pd @@ -13,59 +14,130 @@ from .config import CONFIG -def _get_x_dim(dims: list[str], x: str | Literal['auto'] | None = 'auto') -> str: - """Select x-axis dim from priority list, or 'variable' for scalar data.""" - if x and x != 'auto': - return x - - # Check priority list first - for dim in CONFIG.Plotting.x_dim_priority: - if dim in dims: - return dim - - # Fallback to first available dimension, or 'variable' for scalar data - return dims[0] if dims else 'variable' - - -def _resolve_auto_facets( +def assign_slots( ds: xr.Dataset, - facet_col: str | Literal['auto'] | None, - facet_row: str | Literal['auto'] | None, - animation_frame: str | Literal['auto'] | None = None, + *, + x: str | Literal['auto'] | None = 'auto', + color: str | Literal['auto'] | None = 'auto', + facet_col: str | Literal['auto'] | None = 'auto', + facet_row: str | Literal['auto'] | None = 'auto', + animation_frame: str | Literal['auto'] | None = 'auto', exclude_dims: set[str] | None = None, -) -> tuple[str | None, str | None, str | None]: - """Assign 'auto' facet slots from available dims using CONFIG priority lists.""" - # Get available extra dimensions with size > 1, excluding specified dims +) -> dict[str, str | None]: + """Assign dimensions to plot slots using CONFIG.Plotting.dim_priority. + + Dimensions are assigned in priority order to slots based on CONFIG.Plotting.slot_priority. + + Slot values: + - 'auto': auto-assign from available dims using priority + - None: skip this slot (not available for this plot type) + - str: use this specific dimension + + 'variable' is treated as a dimension when len(data_vars) > 1. It represents + the data_var names column in the melted DataFrame. + + Args: + ds: Dataset to analyze for available dimensions. + x: X-axis dimension. 'auto' assigns first available from priority. + color: Color grouping dimension. + facet_col: Column faceting dimension. + facet_row: Row faceting dimension. + animation_frame: Animation slider dimension. + exclude_dims: Dimensions to exclude from auto-assignment (e.g., already used for x elsewhere). + + Returns: + Dict with keys 'x', 'color', 'facet_col', 'facet_row', 'animation_frame' + and values being assigned dimension names (or None if slot skipped/unfilled). + """ + # Get available dimensions with size > 1, excluding specified dims exclude = exclude_dims or set() available = {d for d in ds.dims if ds.sizes[d] > 1 and d not in exclude} - extra_dims = [d for d in CONFIG.Plotting.extra_dim_priority if d in available] - used: set[str] = set() + # 'variable' is available when there are multiple data_vars (and not excluded) + if len(ds.data_vars) > 1 and 'variable' not in exclude: + available.add('variable') + + # Get priority-ordered list of available dims + priority_dims = [d for d in CONFIG.Plotting.dim_priority if d in available] + # Add any available dims not in priority list (fallback) + priority_dims.extend(d for d in available if d not in priority_dims) - # Map slot names to their input values + # Slot specification slots = { + 'x': x, + 'color': color, 'facet_col': facet_col, 'facet_row': facet_row, 'animation_frame': animation_frame, } - results: dict[str, str | None] = {'facet_col': None, 'facet_row': None, 'animation_frame': None} + # Slot fill order from config + slot_order = CONFIG.Plotting.slot_priority + + results: dict[str, str | None] = {k: None for k in slot_order} + used: set[str] = set() # First pass: resolve explicit dimensions (not 'auto' or None) to mark them as used for slot_name, value in slots.items(): if value is not None and value != 'auto': - if value in available and value not in used: - used.add(value) - results[slot_name] = value - - # Second pass: resolve 'auto' slots in dim_slot_priority order - dim_iter = iter(d for d in extra_dims if d not in used) - for slot_name in CONFIG.Plotting.dim_slot_priority: - if slots.get(slot_name) == 'auto': + used.add(value) + results[slot_name] = value + + # Second pass: resolve 'auto' slots in config-defined fill order + dim_iter = iter(d for d in priority_dims if d not in used) + for slot_name in slot_order: + if slots[slot_name] == 'auto': next_dim = next(dim_iter, None) if next_dim: used.add(next_dim) results[slot_name] = next_dim - return results['facet_col'], results['facet_row'], results['animation_frame'] + # Warn if any dimensions were not assigned to any slot + unassigned = available - used + if unassigned: + available_slots = [k for k, v in slots.items() if v is not None] + unavailable_slots = [k for k, v in slots.items() if v is None] + if unavailable_slots: + warnings.warn( + f'Dimensions {unassigned} not assigned to any plot dimension. ' + f'Not available for this plot type: {unavailable_slots}. ' + f'Reduce dimensions before plotting (e.g., .sel(), .isel(), .mean()).', + stacklevel=3, + ) + else: + warnings.warn( + f'Dimensions {unassigned} not assigned to any plot dimension ({available_slots}). ' + f'Reduce dimensions before plotting (e.g., .sel(), .isel(), .mean()).', + stacklevel=3, + ) + + return results + + +def _build_fig_kwargs( + slots: dict[str, str | None], + ds_sizes: dict[str, int], + px_kwargs: dict[str, Any], + facet_cols: int | None = None, +) -> dict[str, Any]: + """Build plotly express kwargs from slot assignments. + + Adds facet/animation args only if slots are assigned and not overridden in px_kwargs. + Handles facet_col_wrap based on dimension size. + """ + facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols + result: dict[str, Any] = {} + + # Add facet/animation kwargs from slots (skip if None or already in px_kwargs) + for slot in ('color', 'facet_col', 'facet_row', 'animation_frame'): + if slots.get(slot) and slot not in px_kwargs: + result[slot] = slots[slot] + + # Add facet_col_wrap if facet_col is set and dimension is large enough + if result.get('facet_col'): + dim_size = ds_sizes.get(result['facet_col'], facet_col_wrap + 1) + if facet_col_wrap < dim_size: + result['facet_col_wrap'] = facet_col_wrap + + return result def _dataset_to_long_df(ds: xr.Dataset, value_name: str = 'value', var_name: str = 'variable') -> pd.DataFrame: @@ -120,6 +192,7 @@ def bar( self, *, x: str | Literal['auto'] | None = 'auto', + color: str | Literal['auto'] | None = 'auto', colors: ColorType | None = None, title: str = '', xlabel: str = '', @@ -128,12 +201,15 @@ def bar( facet_row: str | Literal['auto'] | None = 'auto', animation_frame: str | Literal['auto'] | None = 'auto', facet_cols: int | None = None, + exclude_dims: set[str] | None = None, **px_kwargs: Any, ) -> go.Figure: """Create a grouped bar chart from the dataset. Args: - x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.x_dim_priority. + x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.dim_priority. + color: Dimension for color grouping. 'auto' uses 'variable' (data_var names) + if available, otherwise uses CONFIG priority. colors: Color specification (colorscale name, color list, or dict mapping). title: Plot title. xlabel: X-axis label. @@ -142,57 +218,46 @@ def bar( facet_row: Dimension for row facets. 'auto' uses CONFIG priority. animation_frame: Dimension for animation slider. facet_cols: Number of columns in facet grid wrap. + exclude_dims: Dimensions to exclude from auto-assignment. **px_kwargs: Additional arguments passed to plotly.express.bar. Returns: Plotly Figure. """ - # Determine x-axis first, then resolve facets from remaining dims - dims = list(self._ds.dims) - x_col = _get_x_dim(dims, x) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - self._ds, facet_col, facet_row, animation_frame, exclude_dims={x_col} + slots = assign_slots( + self._ds, + x=x, + color=color, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + exclude_dims=exclude_dims, ) - df = _dataset_to_long_df(self._ds) if df.empty: return go.Figure() - variables = df['variable'].unique().tolist() - color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + color_labels = df[slots['color']].unique().tolist() if slots['color'] and slots['color'] in df.columns else [] + color_map = process_colors(colors, color_labels, CONFIG.Plotting.default_qualitative_colorscale) - facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols - fig_kwargs: dict[str, Any] = { + labels = {**(({slots['x']: xlabel}) if xlabel and slots['x'] else {}), **({'value': ylabel} if ylabel else {})} + fig_kwargs = { 'data_frame': df, - 'x': x_col, + 'x': slots['x'], 'y': 'value', 'title': title, 'barmode': 'group', + 'color_discrete_map': color_map, + **({'labels': labels} if labels else {}), + **_build_fig_kwargs(slots, dict(self._ds.sizes), px_kwargs, facet_cols), } - # Only color by variable if it's not already on x-axis (and user didn't override) - if x_col != 'variable' and 'color' not in px_kwargs: - fig_kwargs['color'] = 'variable' - fig_kwargs['color_discrete_map'] = color_map - if xlabel: - fig_kwargs['labels'] = {x_col: xlabel} - if ylabel: - fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), 'value': ylabel} - - if actual_facet_col and 'facet_col' not in px_kwargs: - fig_kwargs['facet_col'] = actual_facet_col - if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): - fig_kwargs['facet_col_wrap'] = facet_col_wrap - if actual_facet_row and 'facet_row' not in px_kwargs: - fig_kwargs['facet_row'] = actual_facet_row - if actual_anim and 'animation_frame' not in px_kwargs: - fig_kwargs['animation_frame'] = actual_anim - return px.bar(**{**fig_kwargs, **px_kwargs}) def stacked_bar( self, *, x: str | Literal['auto'] | None = 'auto', + color: str | Literal['auto'] | None = 'auto', colors: ColorType | None = None, title: str = '', xlabel: str = '', @@ -201,6 +266,7 @@ def stacked_bar( facet_row: str | Literal['auto'] | None = 'auto', animation_frame: str | Literal['auto'] | None = 'auto', facet_cols: int | None = None, + exclude_dims: set[str] | None = None, **px_kwargs: Any, ) -> go.Figure: """Create a stacked bar chart from the dataset. @@ -209,7 +275,9 @@ def stacked_bar( values are stacked separately. Args: - x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.x_dim_priority. + x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.dim_priority. + color: Dimension for color grouping. 'auto' uses 'variable' (data_var names) + if available, otherwise uses CONFIG priority. colors: Color specification (colorscale name, color list, or dict mapping). title: Plot title. xlabel: X-axis label. @@ -223,45 +291,32 @@ def stacked_bar( Returns: Plotly Figure. """ - # Determine x-axis first, then resolve facets from remaining dims - dims = list(self._ds.dims) - x_col = _get_x_dim(dims, x) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - self._ds, facet_col, facet_row, animation_frame, exclude_dims={x_col} + slots = assign_slots( + self._ds, + x=x, + color=color, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + exclude_dims=exclude_dims, ) - df = _dataset_to_long_df(self._ds) if df.empty: return go.Figure() - variables = df['variable'].unique().tolist() - color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + color_labels = df[slots['color']].unique().tolist() if slots['color'] and slots['color'] in df.columns else [] + color_map = process_colors(colors, color_labels, CONFIG.Plotting.default_qualitative_colorscale) - facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols - fig_kwargs: dict[str, Any] = { + labels = {**(({slots['x']: xlabel}) if xlabel and slots['x'] else {}), **({'value': ylabel} if ylabel else {})} + fig_kwargs = { 'data_frame': df, - 'x': x_col, + 'x': slots['x'], 'y': 'value', 'title': title, + 'color_discrete_map': color_map, + **({'labels': labels} if labels else {}), + **_build_fig_kwargs(slots, dict(self._ds.sizes), px_kwargs, facet_cols), } - # Only color by variable if it's not already on x-axis (and user didn't override) - if x_col != 'variable' and 'color' not in px_kwargs: - fig_kwargs['color'] = 'variable' - fig_kwargs['color_discrete_map'] = color_map - if xlabel: - fig_kwargs['labels'] = {x_col: xlabel} - if ylabel: - fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), 'value': ylabel} - - if actual_facet_col and 'facet_col' not in px_kwargs: - fig_kwargs['facet_col'] = actual_facet_col - if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): - fig_kwargs['facet_col_wrap'] = facet_col_wrap - if actual_facet_row and 'facet_row' not in px_kwargs: - fig_kwargs['facet_row'] = actual_facet_row - if actual_anim and 'animation_frame' not in px_kwargs: - fig_kwargs['animation_frame'] = actual_anim - fig = px.bar(**{**fig_kwargs, **px_kwargs}) fig.update_layout(barmode='relative', bargap=0, bargroupgap=0) fig.update_traces(marker_line_width=0) @@ -271,6 +326,7 @@ def line( self, *, x: str | Literal['auto'] | None = 'auto', + color: str | Literal['auto'] | None = 'auto', colors: ColorType | None = None, title: str = '', xlabel: str = '', @@ -280,6 +336,7 @@ def line( animation_frame: str | Literal['auto'] | None = 'auto', facet_cols: int | None = None, line_shape: str | None = None, + exclude_dims: set[str] | None = None, **px_kwargs: Any, ) -> go.Figure: """Create a line chart from the dataset. @@ -287,7 +344,9 @@ def line( Each variable in the dataset becomes a separate line. Args: - x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.x_dim_priority. + x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.dim_priority. + color: Dimension for color grouping. 'auto' uses 'variable' (data_var names) + if available, otherwise uses CONFIG priority. colors: Color specification (colorscale name, color list, or dict mapping). title: Plot title. xlabel: X-axis label. @@ -303,52 +362,40 @@ def line( Returns: Plotly Figure. """ - # Determine x-axis first, then resolve facets from remaining dims - dims = list(self._ds.dims) - x_col = _get_x_dim(dims, x) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - self._ds, facet_col, facet_row, animation_frame, exclude_dims={x_col} + slots = assign_slots( + self._ds, + x=x, + color=color, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + exclude_dims=exclude_dims, ) - df = _dataset_to_long_df(self._ds) if df.empty: return go.Figure() - variables = df['variable'].unique().tolist() - color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + color_labels = df[slots['color']].unique().tolist() if slots['color'] and slots['color'] in df.columns else [] + color_map = process_colors(colors, color_labels, CONFIG.Plotting.default_qualitative_colorscale) - facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols - fig_kwargs: dict[str, Any] = { + labels = {**(({slots['x']: xlabel}) if xlabel and slots['x'] else {}), **({'value': ylabel} if ylabel else {})} + fig_kwargs = { 'data_frame': df, - 'x': x_col, + 'x': slots['x'], 'y': 'value', 'title': title, 'line_shape': line_shape or CONFIG.Plotting.default_line_shape, + 'color_discrete_map': color_map, + **({'labels': labels} if labels else {}), + **_build_fig_kwargs(slots, dict(self._ds.sizes), px_kwargs, facet_cols), } - # Only color by variable if it's not already on x-axis (and user didn't override) - if x_col != 'variable' and 'color' not in px_kwargs: - fig_kwargs['color'] = 'variable' - fig_kwargs['color_discrete_map'] = color_map - if xlabel: - fig_kwargs['labels'] = {x_col: xlabel} - if ylabel: - fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), 'value': ylabel} - - if actual_facet_col and 'facet_col' not in px_kwargs: - fig_kwargs['facet_col'] = actual_facet_col - if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): - fig_kwargs['facet_col_wrap'] = facet_col_wrap - if actual_facet_row and 'facet_row' not in px_kwargs: - fig_kwargs['facet_row'] = actual_facet_row - if actual_anim and 'animation_frame' not in px_kwargs: - fig_kwargs['animation_frame'] = actual_anim - return px.line(**{**fig_kwargs, **px_kwargs}) def area( self, *, x: str | Literal['auto'] | None = 'auto', + color: str | Literal['auto'] | None = 'auto', colors: ColorType | None = None, title: str = '', xlabel: str = '', @@ -358,12 +405,15 @@ def area( animation_frame: str | Literal['auto'] | None = 'auto', facet_cols: int | None = None, line_shape: str | None = None, + exclude_dims: set[str] | None = None, **px_kwargs: Any, ) -> go.Figure: """Create a stacked area chart from the dataset. Args: - x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.x_dim_priority. + x: Dimension for x-axis. 'auto' uses CONFIG.Plotting.dim_priority. + color: Dimension for color grouping. 'auto' uses 'variable' (data_var names) + if available, otherwise uses CONFIG priority. colors: Color specification (colorscale name, color list, or dict mapping). title: Plot title. xlabel: X-axis label. @@ -378,46 +428,33 @@ def area( Returns: Plotly Figure. """ - # Determine x-axis first, then resolve facets from remaining dims - dims = list(self._ds.dims) - x_col = _get_x_dim(dims, x) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - self._ds, facet_col, facet_row, animation_frame, exclude_dims={x_col} + slots = assign_slots( + self._ds, + x=x, + color=color, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + exclude_dims=exclude_dims, ) - df = _dataset_to_long_df(self._ds) if df.empty: return go.Figure() - variables = df['variable'].unique().tolist() - color_map = process_colors(colors, variables, default_colorscale=CONFIG.Plotting.default_qualitative_colorscale) + color_labels = df[slots['color']].unique().tolist() if slots['color'] and slots['color'] in df.columns else [] + color_map = process_colors(colors, color_labels, CONFIG.Plotting.default_qualitative_colorscale) - facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols - fig_kwargs: dict[str, Any] = { + labels = {**(({slots['x']: xlabel}) if xlabel and slots['x'] else {}), **({'value': ylabel} if ylabel else {})} + fig_kwargs = { 'data_frame': df, - 'x': x_col, + 'x': slots['x'], 'y': 'value', 'title': title, 'line_shape': line_shape or CONFIG.Plotting.default_line_shape, + 'color_discrete_map': color_map, + **({'labels': labels} if labels else {}), + **_build_fig_kwargs(slots, dict(self._ds.sizes), px_kwargs, facet_cols), } - # Only color by variable if it's not already on x-axis (and user didn't override) - if x_col != 'variable' and 'color' not in px_kwargs: - fig_kwargs['color'] = 'variable' - fig_kwargs['color_discrete_map'] = color_map - if xlabel: - fig_kwargs['labels'] = {x_col: xlabel} - if ylabel: - fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), 'value': ylabel} - - if actual_facet_col and 'facet_col' not in px_kwargs: - fig_kwargs['facet_col'] = actual_facet_col - if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): - fig_kwargs['facet_col_wrap'] = facet_col_wrap - if actual_facet_row and 'facet_row' not in px_kwargs: - fig_kwargs['facet_row'] = actual_facet_row - if actual_anim and 'animation_frame' not in px_kwargs: - fig_kwargs['animation_frame'] = actual_anim - return px.area(**{**fig_kwargs, **px_kwargs}) def heatmap( @@ -467,21 +504,54 @@ def heatmap( colors = colors or CONFIG.Plotting.default_sequential_colorscale facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols - actual_facet_col, _, actual_anim = _resolve_auto_facets(self._ds, facet_col, None, animation_frame) + # Heatmap uses imshow - first 2 dims are the x/y axes of the heatmap + # Only call assign_slots if we need to resolve 'auto' values + if facet_col == 'auto' or animation_frame == 'auto': + heatmap_axes = set(list(da.dims)[:2]) if len(da.dims) >= 2 else set() + slots = assign_slots( + self._ds, + x=None, + color=None, + facet_col=facet_col, + facet_row=None, + animation_frame=animation_frame, + exclude_dims=heatmap_axes, + ) + resolved_facet = slots['facet_col'] + resolved_animation = slots['animation_frame'] + else: + # Values already resolved (or None), use directly without re-resolving + resolved_facet = facet_col + resolved_animation = animation_frame imshow_args: dict[str, Any] = { - 'img': da, 'color_continuous_scale': colors, 'title': title or variable, } - if actual_facet_col and actual_facet_col in da.dims: - imshow_args['facet_col'] = actual_facet_col - if facet_col_wrap < da.sizes[actual_facet_col]: + if resolved_facet and resolved_facet in da.dims: + imshow_args['facet_col'] = resolved_facet + if facet_col_wrap < da.sizes[resolved_facet]: imshow_args['facet_col_wrap'] = facet_col_wrap - if actual_anim and actual_anim in da.dims: - imshow_args['animation_frame'] = actual_anim + if resolved_animation and resolved_animation in da.dims: + imshow_args['animation_frame'] = resolved_animation + + # Squeeze singleton dimensions not used for faceting/animation + # px.imshow can't handle extra singleton dims in multi-dimensional data + dims_to_preserve = set(list(da.dims)[:2]) # First 2 dims are heatmap x/y axes + if resolved_facet and resolved_facet in da.dims: + dims_to_preserve.add(resolved_facet) + if resolved_animation and resolved_animation in da.dims: + dims_to_preserve.add(resolved_animation) + for dim in list(da.dims): + if dim not in dims_to_preserve and da.sizes[dim] == 1: + da = da.squeeze(dim) + imshow_args['img'] = da + + # Use binary_string=False to handle non-numeric coords (e.g., string labels) + if 'binary_string' not in imshow_kwargs: + imshow_args['binary_string'] = False return px.imshow(**{**imshow_args, **imshow_kwargs}) @@ -525,8 +595,9 @@ def scatter( if df.empty: return go.Figure() - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - self._ds, facet_col, facet_row, animation_frame + # Scatter uses explicit x/y variable names, not dimensions + slots = assign_slots( + self._ds, x=None, color=None, facet_col=facet_col, facet_row=facet_row, animation_frame=animation_frame ) facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols @@ -542,14 +613,16 @@ def scatter( if ylabel: fig_kwargs['labels'] = {**fig_kwargs.get('labels', {}), y: ylabel} - if actual_facet_col: - fig_kwargs['facet_col'] = actual_facet_col - if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): + # Only use facets if the column actually exists in the dataframe + # (scatter uses wide format, so 'variable' column doesn't exist) + if slots['facet_col'] and slots['facet_col'] in df.columns: + fig_kwargs['facet_col'] = slots['facet_col'] + if facet_col_wrap < self._ds.sizes.get(slots['facet_col'], facet_col_wrap + 1): fig_kwargs['facet_col_wrap'] = facet_col_wrap - if actual_facet_row: - fig_kwargs['facet_row'] = actual_facet_row - if actual_anim: - fig_kwargs['animation_frame'] = actual_anim + if slots['facet_row'] and slots['facet_row'] in df.columns: + fig_kwargs['facet_row'] = slots['facet_row'] + if slots['animation_frame'] and slots['animation_frame'] in df.columns: + fig_kwargs['animation_frame'] = slots['animation_frame'] return px.scatter(**fig_kwargs) @@ -568,6 +641,9 @@ def pie( Extra dimensions are auto-assigned to facet_col and facet_row. For scalar values, a single pie is shown. + Note: + ``px.pie()`` does not support animation_frame, so only facets are available. + Args: colors: Color specification (colorscale name, color list, or dict mapping). title: Plot title. @@ -602,13 +678,15 @@ def pie( **px_kwargs, ) - # Multi-dimensional case - faceted/animated pies + # Multi-dimensional case - faceted pies (px.pie doesn't support animation_frame) df = _dataset_to_long_df(self._ds) if df.empty: return go.Figure() - # Note: px.pie doesn't support animation_frame - actual_facet_col, actual_facet_row, _ = _resolve_auto_facets(self._ds, facet_col, facet_row, None) + # Pie uses 'variable' for names and 'value' for values, no x/color/animation_frame + slots = assign_slots( + self._ds, x=None, color=None, facet_col=facet_col, facet_row=facet_row, animation_frame=None + ) facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols fig_kwargs: dict[str, Any] = { @@ -621,12 +699,12 @@ def pie( **px_kwargs, } - if actual_facet_col: - fig_kwargs['facet_col'] = actual_facet_col - if facet_col_wrap < self._ds.sizes.get(actual_facet_col, facet_col_wrap + 1): + if slots['facet_col']: + fig_kwargs['facet_col'] = slots['facet_col'] + if facet_col_wrap < self._ds.sizes.get(slots['facet_col'], facet_col_wrap + 1): fig_kwargs['facet_col_wrap'] = facet_col_wrap - if actual_facet_row: - fig_kwargs['facet_row'] = actual_facet_row + if slots['facet_row']: + fig_kwargs['facet_row'] = slots['facet_row'] return px.pie(**fig_kwargs) @@ -758,6 +836,7 @@ def stacked_bar( facet_row: str | Literal['auto'] | None = 'auto', animation_frame: str | Literal['auto'] | None = 'auto', facet_cols: int | None = None, + exclude_dims: set[str] | None = None, **px_kwargs: Any, ) -> go.Figure: """Create a stacked bar chart. See DatasetPlotAccessor.stacked_bar for details.""" @@ -865,22 +944,54 @@ def heatmap( colors = colors or CONFIG.Plotting.default_sequential_colorscale facet_col_wrap = facet_cols or CONFIG.Plotting.default_facet_cols - # Use Dataset for facet resolution - ds_for_resolution = da.to_dataset(name='_temp') - actual_facet_col, _, actual_anim = _resolve_auto_facets(ds_for_resolution, facet_col, None, animation_frame) + # Heatmap uses imshow - first 2 dims are the x/y axes of the heatmap + # Only call assign_slots if we need to resolve 'auto' values + if facet_col == 'auto' or animation_frame == 'auto': + heatmap_axes = set(list(da.dims)[:2]) if len(da.dims) >= 2 else set() + ds_for_resolution = da.to_dataset(name='_temp') + slots = assign_slots( + ds_for_resolution, + x=None, + color=None, + facet_col=facet_col, + facet_row=None, + animation_frame=animation_frame, + exclude_dims=heatmap_axes, + ) + resolved_facet = slots['facet_col'] + resolved_animation = slots['animation_frame'] + else: + # Values already resolved (or None), use directly without re-resolving + resolved_facet = facet_col + resolved_animation = animation_frame imshow_args: dict[str, Any] = { - 'img': da, 'color_continuous_scale': colors, 'title': title or (da.name if da.name else ''), } - if actual_facet_col and actual_facet_col in da.dims: - imshow_args['facet_col'] = actual_facet_col - if facet_col_wrap < da.sizes[actual_facet_col]: + if resolved_facet and resolved_facet in da.dims: + imshow_args['facet_col'] = resolved_facet + if facet_col_wrap < da.sizes[resolved_facet]: imshow_args['facet_col_wrap'] = facet_col_wrap - if actual_anim and actual_anim in da.dims: - imshow_args['animation_frame'] = actual_anim + if resolved_animation and resolved_animation in da.dims: + imshow_args['animation_frame'] = resolved_animation + + # Squeeze singleton dimensions not used for faceting/animation + # px.imshow can't handle extra singleton dims in multi-dimensional data + dims_to_preserve = set(list(da.dims)[:2]) # First 2 dims are heatmap x/y axes + if resolved_facet and resolved_facet in da.dims: + dims_to_preserve.add(resolved_facet) + if resolved_animation and resolved_animation in da.dims: + dims_to_preserve.add(resolved_animation) + for dim in list(da.dims): + if dim not in dims_to_preserve and da.sizes[dim] == 1: + da = da.squeeze(dim) + imshow_args['img'] = da + + # Use binary_string=False to handle non-numeric coords (e.g., string labels) + if 'binary_string' not in imshow_kwargs: + imshow_args['binary_string'] = False return px.imshow(**{**imshow_args, **imshow_kwargs}) diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index 382ed1bf0..e3581e4e3 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -127,6 +127,50 @@ def _reshape_time_for_heatmap( # --- Helper functions --- +def _prepare_for_heatmap( + da: xr.DataArray, + reshape: tuple[str, str] | Literal['auto'] | None, + facet_col: str | Literal['auto'] | None, + animation_frame: str | Literal['auto'] | None, +) -> xr.DataArray: + """Prepare DataArray for heatmap: determine axes, reshape if needed, transpose/squeeze.""" + + def finalize(da: xr.DataArray, heatmap_dims: list[str]) -> xr.DataArray: + """Transpose, squeeze, and clear name if needed.""" + other = [d for d in da.dims if d not in heatmap_dims] + da = da.transpose(*[d for d in heatmap_dims if d in da.dims], *other) + for dim in [d for d in da.dims if d not in heatmap_dims and da.sizes[d] == 1]: + da = da.squeeze(dim, drop=True) + return da.rename('') if da.sizes.get('variable', 1) > 1 else da + + def fallback_dims() -> list[str]: + """Default dims: (variable, time) if multi-var, else first 2 dims with size > 1.""" + if da.sizes.get('variable', 1) > 1: + return ['variable', 'time'] + dims = [d for d in da.dims if da.sizes[d] > 1][:2] + return dims if len(dims) >= 2 else list(da.dims)[:2] + + is_clustered = 'cluster' in da.dims and da.sizes['cluster'] > 1 + has_time = 'time' in da.dims + + # Clustered: use (time, cluster) as natural 2D + if is_clustered and reshape in ('auto', None): + return finalize(da, ['time', 'cluster']) + + # Explicit reshape: always apply + if reshape and reshape != 'auto' and has_time: + return finalize(_reshape_time_for_heatmap(da, reshape), ['timestep', 'timeframe']) + + # Auto reshape (non-clustered): apply only if extra dims fit in available slots + if reshape == 'auto' and has_time: + extra = [d for d in da.dims if d not in ('time', 'variable') and da.sizes[d] > 1] + slots = (facet_col == 'auto') + (animation_frame == 'auto') + if len(extra) <= slots: + return finalize(_reshape_time_for_heatmap(da, ('D', 'h')), ['timestep', 'timeframe']) + + return finalize(da, fallback_dims()) + + def _filter_by_pattern( names: list[str], include: FilterType | None, @@ -180,73 +224,6 @@ def _filter_by_carrier(ds: xr.Dataset, carrier: str | list[str] | None) -> xr.Da return ds[matching_vars] if matching_vars else xr.Dataset() -def _resolve_auto_facets( - ds: xr.Dataset, - facet_col: str | Literal['auto'] | None, - facet_row: str | Literal['auto'] | None, - animation_frame: str | Literal['auto'] | None = None, -) -> tuple[str | None, str | None, str | None]: - """Resolve 'auto' facet/animation dimensions based on available data dimensions. - - When 'auto' is specified, extra dimensions are assigned to slots based on: - - CONFIG.Plotting.extra_dim_priority: Order of dimensions to assign. - - CONFIG.Plotting.dim_slot_priority: Order of slots to fill. - - Args: - ds: Dataset to check for available dimensions. - facet_col: Dimension name, 'auto', or None. - facet_row: Dimension name, 'auto', or None. - animation_frame: Dimension name, 'auto', or None. - - Returns: - Tuple of (resolved_facet_col, resolved_facet_row, resolved_animation_frame). - Each is either a valid dimension name or None. - """ - # Get available extra dimensions with size > 1, sorted by priority - available = {d for d in ds.dims if ds.sizes[d] > 1} - extra_dims = [d for d in CONFIG.Plotting.extra_dim_priority if d in available] - used: set[str] = set() - - # Map slot names to their input values - slots = { - 'facet_col': facet_col, - 'facet_row': facet_row, - 'animation_frame': animation_frame, - } - results: dict[str, str | None] = {'facet_col': None, 'facet_row': None, 'animation_frame': None} - - # First pass: resolve explicit dimensions (not 'auto' or None) to mark them as used - for slot_name, value in slots.items(): - if value is not None and value != 'auto': - if value in available and value not in used: - used.add(value) - results[slot_name] = value - - # Second pass: resolve 'auto' slots in dim_slot_priority order - dim_iter = iter(d for d in extra_dims if d not in used) - for slot_name in CONFIG.Plotting.dim_slot_priority: - if slots.get(slot_name) == 'auto': - next_dim = next(dim_iter, None) - if next_dim: - used.add(next_dim) - results[slot_name] = next_dim - - return results['facet_col'], results['facet_row'], results['animation_frame'] - - -def _resolve_facets( - ds: xr.Dataset, - facet_col: str | Literal['auto'] | None, - facet_row: str | Literal['auto'] | None, -) -> tuple[str | None, str | None]: - """Resolve facet dimensions, returning None if not present in data. - - Legacy wrapper for _resolve_auto_facets for backward compatibility. - """ - resolved_col, resolved_row, _ = _resolve_auto_facets(ds, facet_col, facet_row, None) - return resolved_col, resolved_row - - def _dataset_to_long_df(ds: xr.Dataset, value_name: str = 'value', var_name: str = 'variable') -> pd.DataFrame: """Convert xarray Dataset to long-form DataFrame for plotly express.""" if not ds.data_vars: @@ -1382,9 +1359,6 @@ def balance( ds[label] = -ds[label] ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - ds, facet_col, facet_row, animation_frame - ) # Build color map from Element.color attributes if no colors specified if colors is None: @@ -1399,9 +1373,9 @@ def balance( fig = ds.fxplot.stacked_bar( colors=colors, title=f'{node} [{unit_label}]' if unit_label else node, - facet_col=actual_facet_col, - facet_row=actual_facet_row, - animation_frame=actual_anim, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, **plotly_kwargs, ) @@ -1493,9 +1467,6 @@ def carrier_balance( ds[label] = -ds[label] ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - ds, facet_col, facet_row, animation_frame - ) # Use cached component colors for flows if colors is None: @@ -1523,9 +1494,9 @@ def carrier_balance( fig = ds.fxplot.stacked_bar( colors=colors, title=f'{carrier.capitalize()} Balance [{unit_label}]' if unit_label else f'{carrier.capitalize()} Balance', - facet_col=actual_facet_col, - facet_row=actual_facet_row, - animation_frame=actual_anim, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, **plotly_kwargs, ) @@ -1544,7 +1515,7 @@ def heatmap( reshape: tuple[str, str] | Literal['auto'] | None = 'auto', colors: str | list[str] | None = None, facet_col: str | Literal['auto'] | None = 'auto', - animation_frame: str | Literal['auto'] | None = None, + animation_frame: str | Literal['auto'] | None = 'auto', show: bool | None = None, **plotly_kwargs: Any, ) -> PlotResult: @@ -1576,97 +1547,25 @@ def heatmap( PlotResult with processed data and figure. """ solution = self._stats._require_solution() - if isinstance(variables, str): variables = [variables] - # Resolve flow labels to variable names - resolved_variables = self._resolve_variable_names(variables, solution) - - ds = solution[resolved_variables] - ds = _apply_selection(ds, select) - - # Stack variables into single DataArray - variable_names = list(ds.data_vars) - dataarrays = [ds[var] for var in variable_names] - da = xr.concat(dataarrays, dim=pd.Index(variable_names, name='variable')) - - # Check if data is clustered (has cluster dimension with size > 1) - is_clustered = 'cluster' in da.dims and da.sizes['cluster'] > 1 - - # Determine facet and animation from available dims - has_multiple_vars = 'variable' in da.dims and da.sizes['variable'] > 1 - - if has_multiple_vars: - actual_facet = 'variable' - # Resolve animation using auto logic, excluding 'variable' which is used for facet - _, _, actual_animation = _resolve_auto_facets(da.to_dataset(name='value'), None, None, animation_frame) - if actual_animation == 'variable': - actual_animation = None - else: - # Resolve facet and animation using auto logic - actual_facet, _, actual_animation = _resolve_auto_facets( - da.to_dataset(name='value'), facet_col, None, animation_frame - ) - - # Determine heatmap dimensions based on data structure - if is_clustered and (reshape == 'auto' or reshape is None): - # Clustered data: use (time, cluster) as natural 2D heatmap axes - heatmap_dims = ['time', 'cluster'] - elif reshape and reshape != 'auto' and 'time' in da.dims: - # Non-clustered with explicit reshape: reshape time to (day, hour) etc. - # Extra dims will be handled via facet/animation or dropped - da = _reshape_time_for_heatmap(da, reshape) - heatmap_dims = ['timestep', 'timeframe'] - elif reshape == 'auto' and 'time' in da.dims and not is_clustered: - # Auto mode for non-clustered: use default ('D', 'h') reshape - # Extra dims will be handled via facet/animation or dropped - da = _reshape_time_for_heatmap(da, ('D', 'h')) - heatmap_dims = ['timestep', 'timeframe'] - elif has_multiple_vars: - # Can't reshape but have multiple vars: use variable + time as heatmap axes - heatmap_dims = ['variable', 'time'] - # variable is now a heatmap dim, use period/scenario for facet/animation - actual_facet, _, actual_animation = _resolve_auto_facets( - da.to_dataset(name='value'), facet_col, None, animation_frame - ) - else: - # Fallback: use first two available dimensions - available_dims = [d for d in da.dims if da.sizes[d] > 1] - if len(available_dims) >= 2: - heatmap_dims = available_dims[:2] - elif 'time' in da.dims: - heatmap_dims = ['time'] - else: - heatmap_dims = list(da.dims)[:1] - - # Keep only dims we need - keep_dims = set(heatmap_dims) | {d for d in [actual_facet, actual_animation] if d is not None} - for dim in [d for d in da.dims if d not in keep_dims]: - da = da.isel({dim: 0}, drop=True) if da.sizes[dim] > 1 else da.squeeze(dim, drop=True) + # Resolve, select, and stack into single DataArray + resolved = self._resolve_variable_names(variables, solution) + ds = _apply_selection(solution[resolved], select) + da = xr.concat([ds[v] for v in ds.data_vars], dim=pd.Index(list(ds.data_vars), name='variable')) - # Transpose to expected order - dim_order = heatmap_dims + [d for d in [actual_facet, actual_animation] if d] - da = da.transpose(*dim_order) + # Prepare for heatmap (reshape, transpose, squeeze) + da = _prepare_for_heatmap(da, reshape, facet_col, animation_frame) - # Clear name for multiple variables (colorbar would show first var's name) - if has_multiple_vars: - da = da.rename('') - - fig = da.fxplot.heatmap( - colors=colors, - facet_col=actual_facet, - animation_frame=actual_animation, - **plotly_kwargs, - ) + fig = da.fxplot.heatmap(colors=colors, facet_col=facet_col, animation_frame=animation_frame, **plotly_kwargs) if show is None: show = CONFIG.Plotting.default_show if show: fig.show() - reshaped_ds = da.to_dataset(name='value') if isinstance(da, xr.DataArray) else da - return PlotResult(data=reshaped_ds, figure=fig) + return PlotResult(data=da.to_dataset(name='value'), figure=fig) def flows( self, @@ -1737,9 +1636,6 @@ def flows( ds = ds[[lbl for lbl in matching_labels if lbl in ds]] ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - ds, facet_col, facet_row, animation_frame - ) # Get unit label from first data variable's attributes unit_label = '' @@ -1750,9 +1646,9 @@ def flows( fig = ds.fxplot.line( colors=colors, title=f'Flows [{unit_label}]' if unit_label else 'Flows', - facet_col=actual_facet_col, - facet_row=actual_facet_row, - animation_frame=actual_anim, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, **plotly_kwargs, ) @@ -1798,27 +1694,18 @@ def sizes( valid_labels = [lbl for lbl in ds.data_vars if float(ds[lbl].max()) < max_size] ds = ds[valid_labels] - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - ds, facet_col, facet_row, animation_frame - ) - - df = _dataset_to_long_df(ds) - if df.empty: + if not ds.data_vars: fig = go.Figure() else: - variables = df['variable'].unique().tolist() - color_map = process_colors(colors, variables) - fig = px.bar( - df, + fig = ds.fxplot.bar( x='variable', - y='value', color='variable', - facet_col=actual_facet_col, - facet_row=actual_facet_row, - animation_frame=actual_anim, - color_discrete_map=color_map, + colors=colors, title='Investment Sizes', - labels={'variable': 'Flow', 'value': 'Size'}, + ylabel='Size', + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, **plotly_kwargs, ) @@ -1913,10 +1800,6 @@ def sort_descending(arr: np.ndarray) -> np.ndarray: duration_coord = np.linspace(0, 100, n_timesteps) if normalize else np.arange(n_timesteps) result_ds = result_ds.assign_coords({duration_name: duration_coord}) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - result_ds, facet_col, facet_row, animation_frame - ) - # Get unit label from first data variable's attributes unit_label = '' if ds.data_vars: @@ -1926,9 +1809,9 @@ def sort_descending(arr: np.ndarray) -> np.ndarray: fig = result_ds.fxplot.line( colors=colors, title=f'Duration Curve [{unit_label}]' if unit_label else 'Duration Curve', - facet_col=actual_facet_col, - facet_row=actual_facet_row, - animation_frame=actual_anim, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, **plotly_kwargs, ) @@ -2057,14 +1940,14 @@ def effects( else: raise ValueError(f"'by' must be one of 'component', 'contributor', 'time', or None, got {by!r}") - # Resolve facets - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - combined.to_dataset(name='value'), facet_col, facet_row, animation_frame - ) - # Convert to DataFrame for plotly express df = combined.to_dataframe(name='value').reset_index() + # Resolve facet/animation: 'auto' means None for DataFrames (no dimension priority) + resolved_facet_col = None if facet_col == 'auto' else facet_col + resolved_facet_row = None if facet_row == 'auto' else facet_row + resolved_animation = None if animation_frame == 'auto' else animation_frame + # Build color map if color_col and color_col in df.columns: color_items = df[color_col].unique().tolist() @@ -2087,9 +1970,9 @@ def effects( y='value', color=color_col, color_discrete_map=color_map, - facet_col=actual_facet_col, - facet_row=actual_facet_row, - animation_frame=actual_anim, + facet_col=resolved_facet_col, + facet_row=resolved_facet_row, + animation_frame=resolved_animation, title=title, **plotly_kwargs, ) @@ -2138,16 +2021,13 @@ def charge_states( ds = ds[[s for s in storages if s in ds]] ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - ds, facet_col, facet_row, animation_frame - ) fig = ds.fxplot.line( colors=colors, title='Storage Charge States', - facet_col=actual_facet_col, - facet_row=actual_facet_row, - animation_frame=actual_anim, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, **plotly_kwargs, ) fig.update_yaxes(title_text='Charge State') @@ -2231,61 +2111,46 @@ def storage( # Apply selection ds = _apply_selection(ds, select) - actual_facet_col, actual_facet_row, actual_anim = _resolve_auto_facets( - ds, facet_col, facet_row, animation_frame - ) - # Build color map + # Separate flow data from charge_state flow_labels = [lbl for lbl in ds.data_vars if lbl != 'charge_state'] + flow_ds = ds[flow_labels] + charge_da = ds['charge_state'] + + # Build color map for flows if colors is None: colors = self._get_color_map_for_balance(storage, flow_labels) - color_map = process_colors(colors, flow_labels) - color_map['charge_state'] = 'black' - # Convert to long-form DataFrame - df = _dataset_to_long_df(ds) - - # Create figure with facets using px.bar for flows - flow_df = df[df['variable'] != 'charge_state'] - charge_df = df[df['variable'] == 'charge_state'] - - fig = px.bar( - flow_df, + # Create stacked bar chart for flows using fxplot + fig = flow_ds.fxplot.stacked_bar( x='time', - y='value', color='variable', - facet_col=actual_facet_col, - facet_row=actual_facet_row, - animation_frame=actual_anim, - color_discrete_map=color_map, + colors=colors, title=f'{storage} Operation ({unit})', + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, **plotly_kwargs, ) - fig.update_layout(bargap=0, bargroupgap=0) - fig.update_traces(marker_line_width=0) # Add charge state as line on secondary y-axis - if not charge_df.empty: - # Create line figure with same facets to get matching trace structure - line_fig = px.line( - charge_df, + if charge_da.size > 0: + # Create line figure with same facets + line_fig = charge_da.fxplot.line( x='time', - y='value', - facet_col=actual_facet_col, - facet_row=actual_facet_row, - animation_frame=actual_anim, + color=None, # Single line, no color grouping + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, ) # Get the primary y-axes from the bar figure to create matching secondary axes - # px creates axes named: yaxis, yaxis2, yaxis3, etc. primary_yaxes = [key for key in fig.layout if key.startswith('yaxis')] # For each primary y-axis, create a secondary y-axis for i, primary_key in enumerate(sorted(primary_yaxes, key=lambda x: int(x[5:]) if x[5:] else 0)): - # Determine secondary axis name (y -> y2, y2 -> y3 pattern won't work) - # Instead use a consistent offset: yaxis -> yaxis10, yaxis2 -> yaxis11, etc. primary_num = primary_key[5:] if primary_key[5:] else '1' - secondary_num = int(primary_num) + 100 # Use high offset to avoid conflicts + secondary_num = int(primary_num) + 100 secondary_key = f'yaxis{secondary_num}' secondary_anchor = f'x{primary_num}' if primary_num != '1' else 'x' @@ -2299,14 +2164,13 @@ def storage( # Add line traces with correct axis assignments for i, trace in enumerate(line_fig.data): - # Map trace index to secondary y-axis primary_num = i + 1 if i > 0 else 1 secondary_yaxis = f'y{primary_num + 100}' trace.name = 'charge_state' trace.line = dict(color=charge_state_color, width=2) trace.yaxis = secondary_yaxis - trace.showlegend = i == 0 # Only show legend for first trace + trace.showlegend = i == 0 trace.legendgroup = 'charge_state' fig.add_trace(trace) diff --git a/flixopt/transform_accessor.py b/flixopt/transform_accessor.py index 3a13dbb63..6a5b51caa 100644 --- a/flixopt/transform_accessor.py +++ b/flixopt/transform_accessor.py @@ -582,6 +582,14 @@ def cluster( weights: dict[str, float] | None = None, time_series_for_high_peaks: list[str] | None = None, time_series_for_low_peaks: list[str] | None = None, + cluster_method: Literal['k_means', 'k_medoids', 'hierarchical', 'k_maxoids', 'averaging'] = 'hierarchical', + representation_method: Literal[ + 'meanRepresentation', 'medoidRepresentation', 'distributionAndMinMaxRepresentation' + ] = 'medoidRepresentation', + extreme_period_method: Literal['append', 'new_cluster_center', 'replace_cluster_center'] | None = None, + rescale_cluster_periods: bool = True, + predef_cluster_order: xr.DataArray | np.ndarray | list[int] | None = None, + **tsam_kwargs: Any, ) -> FlowSystem: """ Create a FlowSystem with reduced timesteps using typical clusters. @@ -591,7 +599,7 @@ def cluster( through time series aggregation using the tsam package. The method: - 1. Performs time series clustering using tsam (k-means) + 1. Performs time series clustering using tsam (hierarchical by default) 2. Extracts only the typical clusters (not all original timesteps) 3. Applies timestep weighting for accurate cost representation 4. Handles storage states between clusters based on each Storage's ``cluster_mode`` @@ -607,6 +615,25 @@ def cluster( time_series_for_high_peaks: Time series labels for explicitly selecting high-value clusters. **Recommended** for demand time series to capture peak demand days. time_series_for_low_peaks: Time series labels for explicitly selecting low-value clusters. + cluster_method: Clustering algorithm to use. Options: + ``'hierarchical'`` (default), ``'k_means'``, ``'k_medoids'``, + ``'k_maxoids'``, ``'averaging'``. + representation_method: How cluster representatives are computed. Options: + ``'medoidRepresentation'`` (default), ``'meanRepresentation'``, + ``'distributionAndMinMaxRepresentation'``. + extreme_period_method: How extreme periods (peaks) are integrated. Options: + ``None`` (default, no special handling), ``'append'``, + ``'new_cluster_center'``, ``'replace_cluster_center'``. + rescale_cluster_periods: If True (default), rescale cluster periods so their + weighted mean matches the original time series mean. + predef_cluster_order: Predefined cluster assignments for manual clustering. + Array of cluster indices (0 to n_clusters-1) for each original period. + If provided, clustering is skipped and these assignments are used directly. + For multi-dimensional FlowSystems, use an xr.DataArray with dims + ``[original_cluster, period?, scenario?]`` to specify different assignments + per period/scenario combination. + **tsam_kwargs: Additional keyword arguments passed to + ``tsam.TimeSeriesAggregation``. See tsam documentation for all options. Returns: A new FlowSystem with reduced timesteps (only typical clusters). @@ -676,11 +703,47 @@ def cluster( ds = self._fs.to_dataset(include_solution=False) + # Validate tsam_kwargs doesn't override explicit parameters + reserved_tsam_keys = { + 'noTypicalPeriods', + 'hoursPerPeriod', + 'resolution', + 'clusterMethod', + 'extremePeriodMethod', + 'representationMethod', + 'rescaleClusterPeriods', + 'predefClusterOrder', + 'weightDict', + 'addPeakMax', + 'addPeakMin', + } + conflicts = reserved_tsam_keys & set(tsam_kwargs.keys()) + if conflicts: + raise ValueError( + f'Cannot override explicit parameters via tsam_kwargs: {conflicts}. ' + f'Use the corresponding cluster() parameters instead.' + ) + + # Validate predef_cluster_order dimensions if it's a DataArray + if isinstance(predef_cluster_order, xr.DataArray): + expected_dims = {'original_cluster'} + if has_periods: + expected_dims.add('period') + if has_scenarios: + expected_dims.add('scenario') + if set(predef_cluster_order.dims) != expected_dims: + raise ValueError( + f'predef_cluster_order dimensions {set(predef_cluster_order.dims)} ' + f'do not match expected {expected_dims} for this FlowSystem.' + ) + # Cluster each (period, scenario) combination using tsam directly tsam_results: dict[tuple, tsam.TimeSeriesAggregation] = {} cluster_orders: dict[tuple, np.ndarray] = {} cluster_occurrences_all: dict[tuple, dict] = {} - use_extreme_periods = bool(time_series_for_high_peaks or time_series_for_low_peaks) + + # Collect metrics per (period, scenario) slice + clustering_metrics_all: dict[tuple, pd.DataFrame] = {} for period_label in periods: for scenario_label in scenarios: @@ -693,18 +756,34 @@ def cluster( if selector: logger.info(f'Clustering {", ".join(f"{k}={v}" for k, v in selector.items())}...') + # Handle predef_cluster_order for multi-dimensional case + predef_order_slice = None + if predef_cluster_order is not None: + if isinstance(predef_cluster_order, xr.DataArray): + # Extract slice for this (period, scenario) combination + predef_order_slice = predef_cluster_order.sel(**selector, drop=True).values + else: + # Simple array/list - use directly + predef_order_slice = predef_cluster_order + # Use tsam directly clustering_weights = weights or self._calculate_clustering_weights(temporaly_changing_ds) + # tsam expects 'None' as a string, not Python None + tsam_extreme_method = 'None' if extreme_period_method is None else extreme_period_method tsam_agg = tsam.TimeSeriesAggregation( df, noTypicalPeriods=n_clusters, hoursPerPeriod=hours_per_cluster, resolution=dt, - clusterMethod='k_means', - extremePeriodMethod='new_cluster_center' if use_extreme_periods else 'None', + clusterMethod=cluster_method, + extremePeriodMethod=tsam_extreme_method, + representationMethod=representation_method, + rescaleClusterPeriods=rescale_cluster_periods, + predefClusterOrder=predef_order_slice, weightDict={name: w for name, w in clustering_weights.items() if name in df.columns}, addPeakMax=time_series_for_high_peaks or [], addPeakMin=time_series_for_low_peaks or [], + **tsam_kwargs, ) # Suppress tsam warning about minimal value constraints (informational, not actionable) with warnings.catch_warnings(): @@ -714,10 +793,60 @@ def cluster( tsam_results[key] = tsam_agg cluster_orders[key] = tsam_agg.clusterOrder cluster_occurrences_all[key] = tsam_agg.clusterPeriodNoOccur + # Compute accuracy metrics with error handling + try: + clustering_metrics_all[key] = tsam_agg.accuracyIndicators() + except Exception as e: + logger.warning(f'Failed to compute clustering metrics for {key}: {e}') + clustering_metrics_all[key] = pd.DataFrame() # Use first result for structure first_key = (periods[0], scenarios[0]) first_tsam = tsam_results[first_key] + + # Convert metrics to xr.Dataset with period/scenario dims if multi-dimensional + # Filter out empty DataFrames (from failed accuracyIndicators calls) + non_empty_metrics = {k: v for k, v in clustering_metrics_all.items() if not v.empty} + if not non_empty_metrics: + # All metrics failed - create empty Dataset + clustering_metrics = xr.Dataset() + elif len(non_empty_metrics) == 1 or len(clustering_metrics_all) == 1: + # Simple case: convert single DataFrame to Dataset + metrics_df = non_empty_metrics.get(first_key) + if metrics_df is None: + metrics_df = next(iter(non_empty_metrics.values())) + clustering_metrics = xr.Dataset( + { + col: xr.DataArray( + metrics_df[col].values, dims=['time_series'], coords={'time_series': metrics_df.index} + ) + for col in metrics_df.columns + } + ) + else: + # Multi-dim case: combine metrics into Dataset with period/scenario dims + # First, get the metric columns from any non-empty DataFrame + sample_df = next(iter(non_empty_metrics.values())) + metric_names = list(sample_df.columns) + time_series_names = list(sample_df.index) + + # Build DataArrays for each metric + data_vars = {} + for metric in metric_names: + # Shape: (time_series, period?, scenario?) + slices = {} + for (p, s), df in clustering_metrics_all.items(): + if df.empty: + # Use NaN for failed metrics + slices[(p, s)] = xr.DataArray(np.full(len(time_series_names), np.nan), dims=['time_series']) + else: + slices[(p, s)] = xr.DataArray(df[metric].values, dims=['time_series']) + + da = self._combine_slices_to_dataarray_generic(slices, ['time_series'], periods, scenarios, metric) + da = da.assign_coords(time_series=time_series_names) + data_vars[metric] = da + + clustering_metrics = xr.Dataset(data_vars) n_reduced_timesteps = len(first_tsam.typicalPeriods) actual_n_clusters = len(first_tsam.clusterPeriodNoOccur) @@ -851,7 +980,7 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: # Build multi-dimensional arrays if has_periods or has_scenarios: # Multi-dimensional case: build arrays for each (period, scenario) combination - # cluster_order: dims [original_period, period?, scenario?] + # cluster_order: dims [original_cluster, period?, scenario?] cluster_order_slices = {} timestep_mapping_slices = {} cluster_occurrences_slices = {} @@ -863,7 +992,7 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: for s in scenarios: key = (p, s) cluster_order_slices[key] = xr.DataArray( - cluster_orders[key], dims=['original_period'], name='cluster_order' + cluster_orders[key], dims=['original_cluster'], name='cluster_order' ) timestep_mapping_slices[key] = xr.DataArray( _build_timestep_mapping_for_key(key), @@ -877,7 +1006,7 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: # Combine slices into multi-dimensional DataArrays cluster_order_da = self._combine_slices_to_dataarray_generic( - cluster_order_slices, ['original_period'], periods, scenarios, 'cluster_order' + cluster_order_slices, ['original_cluster'], periods, scenarios, 'cluster_order' ) timestep_mapping_da = self._combine_slices_to_dataarray_generic( timestep_mapping_slices, ['original_time'], periods, scenarios, 'timestep_mapping' @@ -887,7 +1016,7 @@ def _build_cluster_occurrences_for_key(key: tuple) -> np.ndarray: ) else: # Simple case: single (None, None) slice - cluster_order_da = xr.DataArray(cluster_orders[first_key], dims=['original_period'], name='cluster_order') + cluster_order_da = xr.DataArray(cluster_orders[first_key], dims=['original_cluster'], name='cluster_order') # Use renamed timesteps as coordinates original_timesteps_coord = self._fs.timesteps.rename('original_time') timestep_mapping_da = xr.DataArray( @@ -932,6 +1061,7 @@ def _build_cluster_weights_for_key(key: tuple) -> xr.DataArray: reduced_fs.clustering = Clustering( result=aggregation_result, backend_name='tsam', + metrics=clustering_metrics, ) return reduced_fs @@ -996,7 +1126,7 @@ def _combine_slices_to_dataarray_generic( Args: slices: Dict mapping (period, scenario) tuples to DataArrays. - base_dims: Base dimensions of each slice (e.g., ['original_period'] or ['original_time']). + base_dims: Base dimensions of each slice (e.g., ['original_cluster'] or ['original_time']). periods: List of period labels ([None] if no periods dimension). scenarios: List of scenario labels ([None] if no scenarios dimension). name: Name for the resulting DataArray. @@ -1085,7 +1215,7 @@ def expand_solution(self) -> FlowSystem: disaggregates the FlowSystem by: 1. Expanding all time series data from typical clusters to full timesteps 2. Expanding the solution by mapping each typical cluster back to all - original segments it represents + original clusters it represents For FlowSystems with periods and/or scenarios, each (period, scenario) combination is expanded using its own cluster assignment. @@ -1121,7 +1251,7 @@ def expand_solution(self) -> FlowSystem: Note: The expanded FlowSystem repeats the typical cluster values for all - segments belonging to the same cluster. Both input data and solution + original clusters belonging to the same cluster. Both input data and solution are consistently expanded, so they match. This is an approximation - the actual dispatch at full resolution would differ due to intra-cluster variations in time series data. @@ -1162,18 +1292,44 @@ def expand_solution(self) -> FlowSystem: scenarios = list(self._fs.scenarios) if has_scenarios else [None] n_original_timesteps = len(original_timesteps) n_reduced_timesteps = n_clusters * timesteps_per_cluster + n_original_clusters = cluster_structure.n_original_clusters # Expand function using ClusterResult.expand_data() - handles multi-dimensional cases - def expand_da(da: xr.DataArray) -> xr.DataArray: + # For charge_state with cluster dim, also includes the extra timestep + # Clamp to valid bounds to handle partial clusters at the end + last_original_cluster_idx = min( + (n_original_timesteps - 1) // timesteps_per_cluster, + n_original_clusters - 1, + ) + + def expand_da(da: xr.DataArray, var_name: str = '') -> xr.DataArray: if 'time' not in da.dims: return da.copy() - return info.result.expand_data(da, original_time=original_timesteps) + expanded = info.result.expand_data(da, original_time=original_timesteps) + + # For charge_state with cluster dim, append the extra timestep value + if var_name.endswith('|charge_state') and 'cluster' in da.dims: + # Get extra timestep from last cluster using vectorized selection + cluster_order = cluster_structure.cluster_order # (n_original_clusters,) or with period/scenario + if cluster_order.ndim == 1: + last_cluster = int(cluster_order[last_original_cluster_idx]) + extra_val = da.isel(cluster=last_cluster, time=-1) + else: + # Multi-dimensional: select last cluster for each period/scenario slice + last_clusters = cluster_order.isel(original_cluster=last_original_cluster_idx) + extra_val = da.isel(cluster=last_clusters, time=-1) + # Drop 'cluster'/'time' coords created by isel (kept as non-dim coords) + extra_val = extra_val.drop_vars(['cluster', 'time'], errors='ignore') + extra_val = extra_val.expand_dims(time=[original_timesteps_extra[-1]]) + expanded = xr.concat([expanded, extra_val], dim='time') + + return expanded # 1. Expand FlowSystem data (with cluster_weight set to 1.0 for all timesteps) reduced_ds = self._fs.to_dataset(include_solution=False) # Filter out cluster-related variables and copy attrs without clustering info data_vars = { - name: expand_da(da) + name: expand_da(da, name) for name, da in reduced_ds.data_vars.items() if name != 'cluster_weight' and not name.startswith('clustering|') } @@ -1201,17 +1357,22 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: expanded_fs = FlowSystem.from_dataset(expanded_ds) # 2. Expand solution + # charge_state variables get their extra timestep via expand_da; others get NaN via reindex reduced_solution = self._fs.solution expanded_fs._solution = xr.Dataset( - {name: expand_da(da) for name, da in reduced_solution.data_vars.items()}, + {name: expand_da(da, name) for name, da in reduced_solution.data_vars.items()}, attrs=reduced_solution.attrs, ) + # Reindex to timesteps_extra for consistency with non-expanded FlowSystems + # (variables without extra timestep data will have NaN at the final timestep) + expanded_fs._solution = expanded_fs._solution.reindex(time=original_timesteps_extra) # 3. Combine charge_state with SOC_boundary for InterclusterStorageModel storages # For intercluster storages, charge_state is relative (ΔE) and can be negative. # Per Blanke et al. (2022) Eq. 9, actual SOC at time t in period d is: # SOC(t) = SOC_boundary[d] * (1 - loss)^t_within_period + charge_state(t) # where t_within_period is hours from period start (accounts for self-discharge decay). + n_original_timesteps_extra = len(original_timesteps_extra) soc_boundary_vars = [name for name in reduced_solution.data_vars if name.endswith('|SOC_boundary')] for soc_boundary_name in soc_boundary_vars: storage_name = soc_boundary_name.rsplit('|', 1)[0] @@ -1222,30 +1383,42 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: soc_boundary = reduced_solution[soc_boundary_name] expanded_charge_state = expanded_fs._solution[charge_state_name] - # Map each original timestep to its original period index - original_period_indices = np.arange(n_original_timesteps) // timesteps_per_cluster + # Map each original timestep (including extra) to its original period index + # The extra timestep belongs to the last period + original_cluster_indices = np.minimum( + np.arange(n_original_timesteps_extra) // timesteps_per_cluster, + n_original_clusters - 1, + ) # Select SOC_boundary for each timestep (boundary[d] for period d) - # SOC_boundary has dim 'cluster_boundary', we select indices 0..n_original_periods-1 + # SOC_boundary has dim 'cluster_boundary', we select indices 0..n_original_clusters-1 soc_boundary_per_timestep = soc_boundary.isel( - cluster_boundary=xr.DataArray(original_period_indices, dims=['time']) + cluster_boundary=xr.DataArray(original_cluster_indices, dims=['time']) ) - soc_boundary_per_timestep = soc_boundary_per_timestep.assign_coords(time=original_timesteps) + soc_boundary_per_timestep = soc_boundary_per_timestep.assign_coords(time=original_timesteps_extra) # Apply self-discharge decay to SOC_boundary based on time within period # Get the storage's relative_loss_per_hour from the clustered flow system storage = self._fs.storages.get(storage_name) if storage is not None: # Time within period for each timestep (0, 1, 2, ..., timesteps_per_cluster-1, 0, 1, ...) - time_within_period = np.arange(n_original_timesteps) % timesteps_per_cluster + # The extra timestep is at index timesteps_per_cluster (one past the last within-cluster index) + time_within_period = np.arange(n_original_timesteps_extra) % timesteps_per_cluster + # The extra timestep gets the correct decay (timesteps_per_cluster) + time_within_period[-1] = timesteps_per_cluster time_within_period_da = xr.DataArray( - time_within_period, dims=['time'], coords={'time': original_timesteps} + time_within_period, dims=['time'], coords={'time': original_timesteps_extra} ) # Decay factor: (1 - loss)^t, using mean loss over time - # Keep as DataArray to respect per-period/scenario values loss_value = storage.relative_loss_per_hour.mean('time') if (loss_value > 0).any(): decay_da = (1 - loss_value) ** time_within_period_da + if 'cluster' in decay_da.dims: + # Map each timestep to its cluster's decay value + cluster_per_timestep = cluster_structure.cluster_order.values[original_cluster_indices] + decay_da = decay_da.isel(cluster=xr.DataArray(cluster_per_timestep, dims=['time'])).drop_vars( + 'cluster', errors='ignore' + ) soc_boundary_per_timestep = soc_boundary_per_timestep * decay_da # Combine: actual_SOC = SOC_boundary * decay + charge_state @@ -1254,15 +1427,22 @@ def expand_da(da: xr.DataArray) -> xr.DataArray: combined_charge_state = (expanded_charge_state + soc_boundary_per_timestep).clip(min=0) expanded_fs._solution[charge_state_name] = combined_charge_state.assign_attrs(expanded_charge_state.attrs) + # Remove SOC_boundary variables - they're cluster-specific and now incorporated into charge_state + for soc_boundary_name in soc_boundary_vars: + if soc_boundary_name in expanded_fs._solution: + del expanded_fs._solution[soc_boundary_name] + # Also drop the cluster_boundary coordinate (orphaned after removing SOC_boundary) + if 'cluster_boundary' in expanded_fs._solution.coords: + expanded_fs._solution = expanded_fs._solution.drop_vars('cluster_boundary') + n_combinations = len(periods) * len(scenarios) - n_original_segments = cluster_structure.n_original_periods logger.info( f'Expanded FlowSystem from {n_reduced_timesteps} to {n_original_timesteps} timesteps ' f'({n_clusters} clusters' + ( f', {n_combinations} period/scenario combinations)' if n_combinations > 1 - else f' → {n_original_segments} original segments)' + else f' → {n_original_clusters} original clusters)' ) ) diff --git a/tests/test_cluster_reduce_expand.py b/tests/test_cluster_reduce_expand.py index 7072fe22e..4059470ee 100644 --- a/tests/test_cluster_reduce_expand.py +++ b/tests/test_cluster_reduce_expand.py @@ -167,7 +167,7 @@ def test_expand_solution_enables_statistics_accessor(solver_fixture, timesteps_8 # These should work without errors flow_rates = fs_expanded.statistics.flow_rates assert 'Boiler(Q_th)' in flow_rates - assert len(flow_rates['Boiler(Q_th)'].coords['time']) == 192 + assert len(flow_rates['Boiler(Q_th)'].coords['time']) == 193 # 192 + 1 extra timestep flow_hours = fs_expanded.statistics.flow_hours assert 'Boiler(Q_th)' in flow_hours @@ -321,7 +321,7 @@ def test_cluster_and_expand_with_scenarios(solver_fixture, timesteps_8_days, sce flow_var = 'Boiler(Q_th)|flow_rate' assert flow_var in fs_expanded.solution assert 'scenario' in fs_expanded.solution[flow_var].dims - assert len(fs_expanded.solution[flow_var].coords['time']) == 192 + assert len(fs_expanded.solution[flow_var].coords['time']) == 193 # 192 + 1 extra timestep def test_expand_solution_maps_scenarios_independently(solver_fixture, timesteps_8_days, scenarios_2): @@ -449,9 +449,9 @@ def test_storage_cluster_mode_intercluster(self, solver_fixture, timesteps_8_day soc_boundary = fs_clustered.solution['Battery|SOC_boundary'] assert 'cluster_boundary' in soc_boundary.dims - # Number of boundaries = n_original_periods + 1 - n_original_periods = fs_clustered.clustering.result.cluster_structure.n_original_periods - assert soc_boundary.sizes['cluster_boundary'] == n_original_periods + 1 + # Number of boundaries = n_original_clusters + 1 + n_original_clusters = fs_clustered.clustering.result.cluster_structure.n_original_clusters + assert soc_boundary.sizes['cluster_boundary'] == n_original_clusters + 1 def test_storage_cluster_mode_intercluster_cyclic(self, solver_fixture, timesteps_8_days): """Storage with cluster_mode='intercluster_cyclic' - linked with yearly cycling.""" @@ -693,7 +693,7 @@ def test_expand_solution_with_periods(self, solver_fixture, timesteps_8_days, pe # Solution should have period dimension flow_var = 'Boiler(Q_th)|flow_rate' assert 'period' in fs_expanded.solution[flow_var].dims - assert len(fs_expanded.solution[flow_var].coords['time']) == 192 + assert len(fs_expanded.solution[flow_var].coords['time']) == 193 # 192 + 1 extra timestep def test_cluster_with_periods_and_scenarios(self, solver_fixture, timesteps_8_days, periods_2, scenarios_2): """Clustering should work with both periods and scenarios.""" @@ -719,7 +719,7 @@ def test_cluster_with_periods_and_scenarios(self, solver_fixture, timesteps_8_da fs_expanded = fs_clustered.transform.expand_solution() assert 'period' in fs_expanded.solution[flow_var].dims assert 'scenario' in fs_expanded.solution[flow_var].dims - assert len(fs_expanded.solution[flow_var].coords['time']) == 192 + assert len(fs_expanded.solution[flow_var].coords['time']) == 193 # 192 + 1 extra timestep # ==================== Peak Selection Tests ==================== diff --git a/tests/test_clustering/test_base.py b/tests/test_clustering/test_base.py index 9c63f25f6..9cca4de81 100644 --- a/tests/test_clustering/test_base.py +++ b/tests/test_clustering/test_base.py @@ -17,7 +17,7 @@ class TestClusterStructure: def test_basic_creation(self): """Test basic ClusterStructure creation.""" - cluster_order = xr.DataArray([0, 1, 0, 1, 2, 0], dims=['original_period']) + cluster_order = xr.DataArray([0, 1, 0, 1, 2, 0], dims=['original_cluster']) cluster_occurrences = xr.DataArray([3, 2, 1], dims=['cluster']) structure = ClusterStructure( @@ -29,7 +29,7 @@ def test_basic_creation(self): assert structure.n_clusters == 3 assert structure.timesteps_per_cluster == 24 - assert structure.n_original_periods == 6 + assert structure.n_original_clusters == 6 def test_creation_from_numpy(self): """Test ClusterStructure creation from numpy arrays.""" @@ -42,12 +42,12 @@ def test_creation_from_numpy(self): assert isinstance(structure.cluster_order, xr.DataArray) assert isinstance(structure.cluster_occurrences, xr.DataArray) - assert structure.n_original_periods == 5 + assert structure.n_original_clusters == 5 def test_get_cluster_weight_per_timestep(self): """Test weight calculation per timestep.""" structure = ClusterStructure( - cluster_order=xr.DataArray([0, 1, 0], dims=['original_period']), + cluster_order=xr.DataArray([0, 1, 0], dims=['original_cluster']), cluster_occurrences=xr.DataArray([2, 1], dims=['cluster']), n_clusters=2, timesteps_per_cluster=4, @@ -136,7 +136,7 @@ def test_basic_creation(self): structure = create_cluster_structure_from_mapping(mapping, timesteps_per_cluster=4) assert structure.timesteps_per_cluster == 4 - assert structure.n_original_periods == 3 + assert structure.n_original_clusters == 3 class TestClustering: diff --git a/tests/test_clustering/test_integration.py b/tests/test_clustering/test_integration.py index 2d04a51c1..16c638c95 100644 --- a/tests/test_clustering/test_integration.py +++ b/tests/test_clustering/test_integration.py @@ -170,6 +170,104 @@ def test_cluster_reduces_timesteps(self): assert len(fs_clustered.timesteps) * len(fs_clustered.clusters) == 48 +class TestClusterAdvancedOptions: + """Tests for advanced clustering options.""" + + @pytest.fixture + def basic_flow_system(self): + """Create a basic FlowSystem for testing.""" + pytest.importorskip('tsam') + from flixopt import Bus, Flow, Sink, Source + from flixopt.core import TimeSeriesData + + n_hours = 168 # 7 days + fs = FlowSystem(timesteps=pd.date_range('2024-01-01', periods=n_hours, freq='h')) + + demand_data = np.sin(np.linspace(0, 14 * np.pi, n_hours)) + 2 + bus = Bus('electricity') + grid_flow = Flow('grid_in', bus='electricity', size=100) + demand_flow = Flow( + 'demand_out', bus='electricity', size=100, fixed_relative_profile=TimeSeriesData(demand_data / 100) + ) + source = Source('grid', outputs=[grid_flow]) + sink = Sink('demand', inputs=[demand_flow]) + fs.add_elements(source, sink, bus) + return fs + + def test_cluster_method_parameter(self, basic_flow_system): + """Test that cluster_method parameter works.""" + fs_clustered = basic_flow_system.transform.cluster( + n_clusters=2, cluster_duration='1D', cluster_method='hierarchical' + ) + assert len(fs_clustered.clusters) == 2 + + def test_hierarchical_is_deterministic(self, basic_flow_system): + """Test that hierarchical clustering (default) produces deterministic results.""" + fs1 = basic_flow_system.transform.cluster(n_clusters=2, cluster_duration='1D') + fs2 = basic_flow_system.transform.cluster(n_clusters=2, cluster_duration='1D') + + # Hierarchical clustering should produce identical cluster orders + xr.testing.assert_equal(fs1.clustering.cluster_order, fs2.clustering.cluster_order) + + def test_metrics_available(self, basic_flow_system): + """Test that clustering metrics are available after clustering.""" + fs_clustered = basic_flow_system.transform.cluster(n_clusters=2, cluster_duration='1D') + + assert fs_clustered.clustering.metrics is not None + assert isinstance(fs_clustered.clustering.metrics, xr.Dataset) + assert 'time_series' in fs_clustered.clustering.metrics.dims + assert len(fs_clustered.clustering.metrics.data_vars) > 0 + + def test_representation_method_parameter(self, basic_flow_system): + """Test that representation_method parameter works.""" + fs_clustered = basic_flow_system.transform.cluster( + n_clusters=2, cluster_duration='1D', representation_method='medoidRepresentation' + ) + assert len(fs_clustered.clusters) == 2 + + def test_rescale_cluster_periods_parameter(self, basic_flow_system): + """Test that rescale_cluster_periods parameter works.""" + fs_clustered = basic_flow_system.transform.cluster( + n_clusters=2, cluster_duration='1D', rescale_cluster_periods=False + ) + assert len(fs_clustered.clusters) == 2 + + def test_tsam_kwargs_passthrough(self, basic_flow_system): + """Test that additional kwargs are passed to tsam.""" + # sameMean is a valid tsam parameter + fs_clustered = basic_flow_system.transform.cluster(n_clusters=2, cluster_duration='1D', sameMean=True) + assert len(fs_clustered.clusters) == 2 + + def test_metrics_with_periods(self): + """Test that metrics have period dimension for multi-period FlowSystems.""" + pytest.importorskip('tsam') + from flixopt import Bus, Flow, Sink, Source + from flixopt.core import TimeSeriesData + + n_hours = 168 # 7 days + fs = FlowSystem( + timesteps=pd.date_range('2024-01-01', periods=n_hours, freq='h'), + periods=pd.Index([2025, 2030], name='period'), + ) + + demand_data = np.sin(np.linspace(0, 14 * np.pi, n_hours)) + 2 + bus = Bus('electricity') + grid_flow = Flow('grid_in', bus='electricity', size=100) + demand_flow = Flow( + 'demand_out', bus='electricity', size=100, fixed_relative_profile=TimeSeriesData(demand_data / 100) + ) + source = Source('grid', outputs=[grid_flow]) + sink = Sink('demand', inputs=[demand_flow]) + fs.add_elements(source, sink, bus) + + fs_clustered = fs.transform.cluster(n_clusters=2, cluster_duration='1D') + + # Metrics should have period dimension + assert fs_clustered.clustering.metrics is not None + assert 'period' in fs_clustered.clustering.metrics.dims + assert len(fs_clustered.clustering.metrics.period) == 2 + + class TestClusteringModuleImports: """Tests for flixopt.clustering module imports.""" From 4f1d827a1bb582f1cd7ab109da57d786709f1c57 Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 5 Jan 2026 15:03:35 +0100 Subject: [PATCH 190/191] Add tutorial data back to ntoebooks --- docs/notebooks/02-heat-system.ipynb | 72 +++----- .../03-investment-optimization.ipynb | 125 +++++--------- .../04-operational-constraints.ipynb | 57 +++---- docs/notebooks/05-multi-carrier-system.ipynb | 125 +++++--------- .../06a-time-varying-parameters.ipynb | 99 ++++------- docs/notebooks/07-scenarios-and-periods.ipynb | 156 ++++-------------- 6 files changed, 189 insertions(+), 445 deletions(-) diff --git a/docs/notebooks/02-heat-system.ipynb b/docs/notebooks/02-heat-system.ipynb index 3ff933ec3..9b47a96b4 100644 --- a/docs/notebooks/02-heat-system.ipynb +++ b/docs/notebooks/02-heat-system.ipynb @@ -32,9 +32,6 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -59,33 +56,12 @@ "metadata": {}, "outputs": [], "source": [ - "# One week, hourly resolution\n", - "timesteps = pd.date_range('2024-01-15', periods=168, freq='h')\n", + "from data.tutorial_data import get_heat_system_data\n", "\n", - "# Create realistic office heat demand pattern\n", - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "day_of_week = (hours // 24) % 7\n", - "\n", - "# Base demand pattern (kW)\n", - "base_demand = np.where(\n", - " (hour_of_day >= 7) & (hour_of_day <= 18), # Office hours\n", - " 80, # Daytime\n", - " 30, # Night setback\n", - ")\n", - "\n", - "# Reduce on weekends (days 5, 6)\n", - "weekend_factor = np.where(day_of_week >= 5, 0.5, 1.0)\n", - "heat_demand = base_demand * weekend_factor\n", - "\n", - "# Add some random variation\n", - "np.random.seed(42)\n", - "heat_demand = heat_demand + np.random.normal(0, 5, len(heat_demand))\n", - "heat_demand = np.clip(heat_demand, 20, 100)\n", - "\n", - "print(f'Time range: {timesteps[0]} to {timesteps[-1]}')\n", - "print(f'Peak demand: {heat_demand.max():.1f} kW')\n", - "print(f'Total demand: {heat_demand.sum():.0f} kWh')" + "data = get_heat_system_data()\n", + "timesteps = data['timesteps']\n", + "heat_demand = data['heat_demand']\n", + "gas_price = data['gas_price']" ] }, { @@ -95,15 +71,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize the demand pattern with plotly\n", - "demand_series = xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}, name='Heat Demand [kW]')\n", - "fig = px.line(\n", - " x=demand_series.time.values,\n", - " y=demand_series.values,\n", - " title='Office Heat Demand Profile',\n", - " labels={'x': 'Time', 'y': 'kW'},\n", + "# Visualize the demand pattern with fxplot\n", + "demand_ds = xr.Dataset(\n", + " {\n", + " 'Heat Demand': xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}),\n", + " }\n", ")\n", - "fig" + "demand_ds.fxplot.line(title='Office Heat Demand Profile')" ] }, { @@ -123,15 +97,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Time-of-use gas prices (€/kWh)\n", - "gas_price = np.where(\n", - " (hour_of_day >= 6) & (hour_of_day <= 22),\n", - " 0.08, # Peak: 6am-10pm\n", - " 0.05, # Off-peak: 10pm-6am\n", + "# Visualize time-of-use gas prices with fxplot\n", + "price_ds = xr.Dataset(\n", + " {\n", + " 'Gas Price': xr.DataArray(gas_price, dims=['time'], coords={'time': timesteps}),\n", + " }\n", ")\n", - "\n", - "fig = px.line(x=timesteps, y=gas_price, title='Gas Price [€/kWh]', labels={'x': 'Time', 'y': '€/kWh'})\n", - "fig" + "price_ds.fxplot.line(title='Gas Price [€/kWh]')" ] }, { @@ -405,8 +377,16 @@ "name": "python3" }, "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", "name": "python", - "version": "3.11" + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" } }, "nbformat": 4, diff --git a/docs/notebooks/03-investment-optimization.ipynb b/docs/notebooks/03-investment-optimization.ipynb index 349c84ccf..c31bfaee2 100644 --- a/docs/notebooks/03-investment-optimization.ipynb +++ b/docs/notebooks/03-investment-optimization.ipynb @@ -32,9 +32,6 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -84,26 +81,15 @@ "metadata": {}, "outputs": [], "source": [ - "# One week in summer, hourly\n", - "timesteps = pd.date_range('2024-07-15', periods=168, freq='h')\n", - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Solar radiation profile (kW/m² equivalent, simplified)\n", - "# Peak around noon, zero at night\n", - "solar_profile = np.maximum(0, np.sin((hour_of_day - 6) * np.pi / 12)) * 0.8\n", - "solar_profile = np.where((hour_of_day >= 6) & (hour_of_day <= 20), solar_profile, 0)\n", - "\n", - "# Add some cloud variation\n", - "np.random.seed(42)\n", - "cloud_factor = np.random.uniform(0.6, 1.0, len(timesteps))\n", - "solar_profile = solar_profile * cloud_factor\n", - "\n", - "# Pool operates 8am-10pm, constant demand when open\n", - "pool_demand = np.where((hour_of_day >= 8) & (hour_of_day <= 22), 150, 50) # kW\n", - "\n", - "print(f'Peak solar: {solar_profile.max():.2f} kW/kW_installed')\n", - "print(f'Pool demand: {pool_demand.max():.0f} kW (open), {pool_demand.min():.0f} kW (closed)')" + "from data.tutorial_data import get_investment_data\n", + "\n", + "data = get_investment_data()\n", + "timesteps = data['timesteps']\n", + "solar_profile = data['solar_profile']\n", + "pool_demand = data['pool_demand']\n", + "GAS_PRICE = data['gas_price']\n", + "SOLAR_COST_WEEKLY = data['solar_cost_per_kw_week']\n", + "TANK_COST_WEEKLY = data['tank_cost_per_kwh_week']" ] }, { @@ -113,63 +99,20 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize profiles with plotly - using xarray and faceting\n", + "# Visualize profiles with fxplot\n", "profiles = xr.Dataset(\n", " {\n", " 'Solar Profile [kW/kW]': xr.DataArray(solar_profile, dims=['time'], coords={'time': timesteps}),\n", " 'Pool Demand [kW]': xr.DataArray(pool_demand, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", - "\n", - "# Convert to long format for faceting\n", - "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", - "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", - "fig.update_yaxes(matches=None, showticklabels=True)\n", - "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", - "fig" + "profiles.fxplot.line(title='Solar and Pool Profiles', height=300)" ] }, { "cell_type": "markdown", "id": "7", "metadata": {}, - "source": [ - "## Define Costs\n", - "\n", - "Investment costs are **annualized** (€/year) to compare with operating costs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "8", - "metadata": {}, - "outputs": [], - "source": [ - "# Cost parameters\n", - "GAS_PRICE = 0.12 # €/kWh - high gas price makes solar attractive\n", - "\n", - "# Solar collectors: 400 €/kW installed, 20-year lifetime → ~25 €/kW/year annualized\n", - "# (simplified, real calculation would include interest rate)\n", - "SOLAR_COST_PER_KW = 20 # €/kW/year\n", - "\n", - "# Buffer tank: 50 €/kWh capacity, 30-year lifetime → ~2 €/kWh/year\n", - "TANK_COST_PER_KWH = 1.5 # €/kWh/year\n", - "\n", - "# Scale factor: We model 1 week, but costs are annual\n", - "# So we scale investment costs to weekly equivalent\n", - "WEEKS_PER_YEAR = 52\n", - "SOLAR_COST_WEEKLY = SOLAR_COST_PER_KW / WEEKS_PER_YEAR\n", - "TANK_COST_WEEKLY = TANK_COST_PER_KWH / WEEKS_PER_YEAR\n", - "\n", - "print(f'Solar cost: {SOLAR_COST_WEEKLY:.3f} €/kW/week')\n", - "print(f'Tank cost: {TANK_COST_WEEKLY:.4f} €/kWh/week')" - ] - }, - { - "cell_type": "markdown", - "id": "9", - "metadata": {}, "source": [ "## Build the System with Investment Options\n", "\n", @@ -179,7 +122,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -250,7 +193,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "9", "metadata": {}, "source": [ "## Run Optimization" @@ -259,7 +202,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -268,7 +211,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "11", "metadata": {}, "source": [ "## Analyze Investment Decisions\n", @@ -279,7 +222,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -293,7 +236,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "13", "metadata": {}, "source": [ "### Visualize Sizes" @@ -302,7 +245,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -311,7 +254,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "15", "metadata": {}, "source": [ "### Cost Breakdown" @@ -320,7 +263,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -338,7 +281,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "17", "metadata": {}, "source": [ "### System Operation" @@ -347,7 +290,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -357,7 +300,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -367,7 +310,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -376,7 +319,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "21", "metadata": {}, "source": [ "## Compare: What if No Solar?\n", @@ -387,7 +330,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -403,7 +346,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "23", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -414,7 +357,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -423,7 +366,7 @@ }, { "cell_type": "markdown", - "id": "27", + "id": "25", "metadata": {}, "source": [ "## Key Concepts\n", @@ -470,8 +413,16 @@ "name": "python3" }, "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", "name": "python", - "version": "3.11" + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" } }, "nbformat": 4, diff --git a/docs/notebooks/04-operational-constraints.ipynb b/docs/notebooks/04-operational-constraints.ipynb index fbb611d1c..017761b5a 100644 --- a/docs/notebooks/04-operational-constraints.ipynb +++ b/docs/notebooks/04-operational-constraints.ipynb @@ -32,9 +32,6 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -73,32 +70,11 @@ "metadata": {}, "outputs": [], "source": [ - "# 3 days, hourly resolution\n", - "timesteps = pd.date_range('2024-03-11', periods=72, freq='h')\n", - "hours = np.arange(72)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Factory operates in shifts:\n", - "# - Day shift (6am-2pm): 400 kW\n", - "# - Evening shift (2pm-10pm): 350 kW\n", - "# - Night (10pm-6am): 80 kW (maintenance heating only)\n", - "\n", - "steam_demand = np.select(\n", - " [\n", - " (hour_of_day >= 6) & (hour_of_day < 14), # Day shift\n", - " (hour_of_day >= 14) & (hour_of_day < 22), # Evening shift\n", - " ],\n", - " [400, 350],\n", - " default=80, # Night\n", - ")\n", - "\n", - "# Add some variation\n", - "np.random.seed(123)\n", - "steam_demand = steam_demand + np.random.normal(0, 20, len(steam_demand))\n", - "steam_demand = np.clip(steam_demand, 50, 450).astype(float)\n", + "from data.tutorial_data import get_constraints_data\n", "\n", - "print(f'Peak demand: {steam_demand.max():.0f} kW')\n", - "print(f'Min demand: {steam_demand.min():.0f} kW')" + "data = get_constraints_data()\n", + "timesteps = data['timesteps']\n", + "steam_demand = data['steam_demand']" ] }, { @@ -108,7 +84,9 @@ "metadata": {}, "outputs": [], "source": [ - "px.line(x=timesteps, y=steam_demand, title='Factory Steam Demand', labels={'x': 'Time', 'y': 'kW'})" + "# Visualize demand with fxplot\n", + "demand_ds = xr.Dataset({'Steam Demand': xr.DataArray(steam_demand, dims=['time'], coords={'time': timesteps})})\n", + "demand_ds.fxplot.line(title='Factory Steam Demand')" ] }, { @@ -267,11 +245,7 @@ " }\n", ")\n", "\n", - "df = status_ds.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", - "fig = px.line(df, x='time', y='value', facet_col='variable', height=300, title='Main Boiler Operation')\n", - "fig.update_yaxes(matches=None, showticklabels=True)\n", - "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", - "fig" + "status_ds.fxplot.line(title='Main Boiler Operation', height=300)" ] }, { @@ -455,7 +429,20 @@ ] } ], - "metadata": {}, + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/docs/notebooks/05-multi-carrier-system.ipynb b/docs/notebooks/05-multi-carrier-system.ipynb index a1a9543fa..83115e129 100644 --- a/docs/notebooks/05-multi-carrier-system.ipynb +++ b/docs/notebooks/05-multi-carrier-system.ipynb @@ -32,9 +32,6 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import plotly.express as px\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -85,40 +82,15 @@ "metadata": {}, "outputs": [], "source": [ - "# One week, hourly\n", - "timesteps = pd.date_range('2024-02-05', periods=168, freq='h')\n", - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Hospital electricity demand (kW)\n", - "# Base load + daily pattern (higher during day for equipment, lighting)\n", - "elec_base = 150 # 24/7 critical systems\n", - "elec_daily = 100 * np.sin((hour_of_day - 6) * np.pi / 12) # Peak at noon\n", - "elec_daily = np.maximum(0, elec_daily)\n", - "electricity_demand = elec_base + elec_daily\n", - "\n", - "# Hospital heat demand (kW)\n", - "# Higher in morning, drops during day, increases for hot water in evening\n", - "heat_pattern = np.select(\n", - " [\n", - " (hour_of_day >= 5) & (hour_of_day < 9), # Morning warmup\n", - " (hour_of_day >= 9) & (hour_of_day < 17), # Daytime\n", - " (hour_of_day >= 17) & (hour_of_day < 22), # Evening\n", - " ],\n", - " [350, 250, 300],\n", - " default=200, # Night\n", - ")\n", - "heat_demand = heat_pattern.astype(float)\n", - "\n", - "# Add random variation\n", - "np.random.seed(456)\n", - "electricity_demand += np.random.normal(0, 15, len(timesteps))\n", - "heat_demand += np.random.normal(0, 20, len(timesteps))\n", - "electricity_demand = np.clip(electricity_demand, 100, 300)\n", - "heat_demand = np.clip(heat_demand, 150, 400)\n", - "\n", - "print(f'Electricity: {electricity_demand.min():.0f} - {electricity_demand.max():.0f} kW')\n", - "print(f'Heat: {heat_demand.min():.0f} - {heat_demand.max():.0f} kW')" + "from data.tutorial_data import get_multicarrier_data\n", + "\n", + "data = get_multicarrier_data()\n", + "timesteps = data['timesteps']\n", + "electricity_demand = data['electricity_demand']\n", + "heat_demand = data['heat_demand']\n", + "elec_buy_price = data['elec_buy_price']\n", + "elec_sell_price = data['elec_sell_price']\n", + "gas_price = data['gas_price']" ] }, { @@ -128,29 +100,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Electricity prices (€/kWh)\n", - "# Time-of-use: expensive during day, cheaper at night\n", - "elec_buy_price = np.where(\n", - " (hour_of_day >= 7) & (hour_of_day <= 21),\n", - " 0.35, # Peak - high electricity prices make CHP attractive\n", - " 0.20, # Off-peak\n", - ")\n", - "\n", - "# Feed-in tariff (sell price) - allows selling excess CHP electricity\n", - "elec_sell_price = 0.12 # Fixed feed-in rate\n", - "\n", - "# Gas price - relatively low, favoring gas-based generation\n", - "gas_price = 0.05 # €/kWh" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "# Visualize demands and prices with plotly - using xarray and faceting\n", + "# Visualize demands and prices with fxplot\n", "profiles = xr.Dataset(\n", " {\n", " 'Electricity Demand [kW]': xr.DataArray(electricity_demand, dims=['time'], coords={'time': timesteps}),\n", @@ -158,17 +108,12 @@ " 'Elec. Buy Price [€/kWh]': xr.DataArray(elec_buy_price, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", - "\n", - "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", - "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", - "fig.update_yaxes(matches=None, showticklabels=True)\n", - "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", - "fig" + "profiles.fxplot.line(title='Hospital Energy Profiles', height=300)" ] }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ "## Build the Multi-Carrier System" @@ -177,7 +122,7 @@ { "cell_type": "code", "execution_count": null, - "id": "9", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -270,7 +215,7 @@ }, { "cell_type": "markdown", - "id": "10", + "id": "9", "metadata": {}, "source": [ "## Run Optimization" @@ -279,7 +224,7 @@ { "cell_type": "code", "execution_count": null, - "id": "11", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -288,7 +233,7 @@ }, { "cell_type": "markdown", - "id": "12", + "id": "11", "metadata": {}, "source": [ "## Analyze Results\n", @@ -299,7 +244,7 @@ { "cell_type": "code", "execution_count": null, - "id": "13", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -308,7 +253,7 @@ }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "### Heat Balance" @@ -317,7 +262,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -326,7 +271,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "### Gas Balance" @@ -335,7 +280,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -344,7 +289,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "### CHP Operation Pattern" @@ -353,7 +298,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -362,7 +307,7 @@ }, { "cell_type": "markdown", - "id": "20", + "id": "19", "metadata": {}, "source": [ "### Cost and Emissions Summary" @@ -371,7 +316,7 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -399,7 +344,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "### Compare: What if No CHP?\n", @@ -410,7 +355,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -466,7 +411,7 @@ }, { "cell_type": "markdown", - "id": "24", + "id": "23", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -477,7 +422,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -486,7 +431,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "25", "metadata": {}, "source": [ "## Key Concepts\n", @@ -548,8 +493,16 @@ "name": "python3" }, "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", "name": "python", - "version": "3.11" + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" } }, "nbformat": 4, diff --git a/docs/notebooks/06a-time-varying-parameters.ipynb b/docs/notebooks/06a-time-varying-parameters.ipynb index 5c833b2ea..ac248aacd 100644 --- a/docs/notebooks/06a-time-varying-parameters.ipynb +++ b/docs/notebooks/06a-time-varying-parameters.ipynb @@ -32,7 +32,6 @@ "outputs": [], "source": [ "import numpy as np\n", - "import pandas as pd\n", "import plotly.express as px\n", "import xarray as xr\n", "\n", @@ -78,20 +77,13 @@ "metadata": {}, "outputs": [], "source": [ - "# One winter week\n", - "timesteps = pd.date_range('2024-01-22', periods=168, freq='h')\n", - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Outdoor temperature: daily cycle with cold nights\n", - "temp_base = 2 # Average temp in °C\n", - "temp_amplitude = 5 # Daily variation\n", - "outdoor_temp = temp_base + temp_amplitude * np.sin((hour_of_day - 6) * np.pi / 12)\n", - "\n", - "# Add day-to-day variation for realism\n", - "np.random.seed(789)\n", - "daily_offset = np.repeat(np.random.uniform(-3, 3, 7), 24)\n", - "outdoor_temp = outdoor_temp + daily_offset" + "from data.tutorial_data import get_time_varying_data\n", + "\n", + "data = get_time_varying_data()\n", + "timesteps = data['timesteps']\n", + "outdoor_temp = data['outdoor_temp']\n", + "heat_demand = data['heat_demand']\n", + "cop = data['cop']" ] }, { @@ -101,74 +93,41 @@ "metadata": {}, "outputs": [], "source": [ - "# Heat demand: inversely related to outdoor temp (higher demand when colder)\n", - "heat_demand = 200 - 8 * outdoor_temp\n", - "heat_demand = np.clip(heat_demand, 100, 300)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "# Visualize input profiles\n", + "# Visualize input profiles with fxplot\n", "profiles = xr.Dataset(\n", " {\n", " 'Outdoor Temp [°C]': xr.DataArray(outdoor_temp, dims=['time'], coords={'time': timesteps}),\n", " 'Heat Demand [kW]': xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", - "\n", - "df = profiles.to_dataframe().reset_index().melt(id_vars='time', var_name='variable', value_name='value')\n", - "fig = px.line(df, x='time', y='value', facet_col='variable', height=300)\n", - "fig.update_yaxes(matches=None, showticklabels=True)\n", - "fig.for_each_annotation(lambda a: a.update(text=a.text.split('=')[-1]))\n", - "fig" + "profiles.fxplot.line(title='Temperature and Heat Demand Profiles', height=300)" ] }, { "cell_type": "markdown", - "id": "8", + "id": "7", "metadata": {}, "source": [ - "## Calculate Time-Varying COP\n", + "## Time-Varying COP\n", "\n", - "The COP depends on outdoor temperature. We use a simplified Carnot-based formula:\n", + "The COP is pre-calculated based on outdoor temperature using a simplified Carnot-based formula:\n", "\n", "$$\\text{COP}_{\\text{real}} \\approx 0.45 \\times \\text{COP}_{\\text{Carnot}} = 0.45 \\times \\frac{T_{\\text{supply}}}{T_{\\text{supply}} - T_{\\text{source}}}$$\n", "\n", - "where temperatures are in Kelvin." + "Let's visualize the relationship:" ] }, { "cell_type": "code", "execution_count": null, - "id": "9", - "metadata": {}, - "outputs": [], - "source": [ - "# COP calculation\n", - "T_supply = 45 + 273.15 # Supply temperature 45°C in Kelvin\n", - "T_source = outdoor_temp + 273.15 # Outdoor temp in Kelvin\n", - "\n", - "carnot_cop = T_supply / (T_supply - T_source)\n", - "real_cop = 0.45 * carnot_cop\n", - "real_cop = np.clip(real_cop, 2.0, 5.0) # Physical limits" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", + "id": "8", "metadata": {}, "outputs": [], "source": [ "# Visualize COP vs temperature relationship\n", "px.scatter(\n", " x=outdoor_temp,\n", - " y=real_cop,\n", + " y=cop,\n", " title='Heat Pump COP vs Outdoor Temperature',\n", " labels={'x': 'Outdoor Temperature [°C]', 'y': 'COP'},\n", " opacity=0.5,\n", @@ -177,7 +136,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "9", "metadata": {}, "source": [ "## Build the Model\n", @@ -192,7 +151,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -214,7 +173,7 @@ " 'HeatPump',\n", " inputs=[fx.Flow('Elec', bus='Electricity', size=150)],\n", " outputs=[fx.Flow('Heat', bus='Heat', size=500)],\n", - " conversion_factors=[{'Elec': real_cop, 'Heat': 1}], # <-- Array for time-varying COP\n", + " conversion_factors=[{'Elec': cop, 'Heat': 1}], # <-- Array for time-varying COP\n", " ),\n", " # Heat demand\n", " fx.Sink('Building', inputs=[fx.Flow('Heat', bus='Heat', size=1, fixed_relative_profile=heat_demand)]),\n", @@ -225,7 +184,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "11", "metadata": {}, "source": [ "## Analyze Results" @@ -234,7 +193,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -244,7 +203,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -254,7 +213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -283,7 +242,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "15", "metadata": {}, "source": [ "## Key Concepts\n", @@ -323,7 +282,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "16", "metadata": {}, "source": [ "## Summary\n", @@ -356,8 +315,16 @@ "name": "python3" }, "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", "name": "python", - "version": "3.10.0" + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" } }, "nbformat": 4, diff --git a/docs/notebooks/07-scenarios-and-periods.ipynb b/docs/notebooks/07-scenarios-and-periods.ipynb index db74afefb..9f80a6c9b 100644 --- a/docs/notebooks/07-scenarios-and-periods.ipynb +++ b/docs/notebooks/07-scenarios-and-periods.ipynb @@ -32,8 +32,6 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "import pandas as pd\n", "import plotly.express as px\n", "\n", "import flixopt as fx\n", @@ -72,82 +70,22 @@ "metadata": {}, "outputs": [], "source": [ - "# Time horizon: one representative winter week\n", - "timesteps = pd.date_range('2024-01-15', periods=168, freq='h') # 7 days\n", - "\n", - "# Planning periods (years)\n", - "periods = pd.Index([2024, 2025, 2026], name='period')\n", - "\n", - "# Scenarios with probabilities\n", - "scenarios = pd.Index(['Mild Winter', 'Harsh Winter'], name='scenario')\n", - "scenario_weights = np.array([0.6, 0.4]) # 60% mild, 40% harsh\n", - "\n", - "print(f'Time dimension: {len(timesteps)} hours')\n", - "print(f'Periods: {list(periods)}')\n", - "print(f'Scenarios: {list(scenarios)}')\n", - "print(f'Scenario weights: {dict(zip(scenarios, scenario_weights, strict=False))}')" - ] - }, - { - "cell_type": "markdown", - "id": "6", - "metadata": {}, - "source": [ - "## Create Scenario-Dependent Demand Profiles\n", - "\n", - "Heat demand differs significantly between mild and harsh winters:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "7", - "metadata": {}, - "outputs": [], - "source": [ - "hours = np.arange(168)\n", - "hour_of_day = hours % 24\n", - "\n", - "# Base daily pattern (kW): higher in morning/evening\n", - "daily_pattern = np.select(\n", - " [\n", - " (hour_of_day >= 6) & (hour_of_day < 9), # Morning peak\n", - " (hour_of_day >= 9) & (hour_of_day < 17), # Daytime\n", - " (hour_of_day >= 17) & (hour_of_day < 22), # Evening peak\n", - " ],\n", - " [180, 120, 160],\n", - " default=100, # Night\n", - ").astype(float)\n", - "\n", - "# Add random variation\n", - "np.random.seed(42)\n", - "noise = np.random.normal(0, 10, len(timesteps))\n", - "\n", - "# Mild winter: lower demand\n", - "mild_demand = daily_pattern * 0.8 + noise\n", - "mild_demand = np.clip(mild_demand, 60, 200)\n", - "\n", - "# Harsh winter: higher demand\n", - "harsh_demand = daily_pattern * 1.3 + noise * 1.5\n", - "harsh_demand = np.clip(harsh_demand, 100, 280)\n", - "\n", - "# Create DataFrame with scenario columns (flixopt uses column names to match scenarios)\n", - "heat_demand = pd.DataFrame(\n", - " {\n", - " 'Mild Winter': mild_demand,\n", - " 'Harsh Winter': harsh_demand,\n", - " },\n", - " index=timesteps,\n", - ")\n", - "\n", - "print(f'Mild winter demand: {mild_demand.min():.0f} - {mild_demand.max():.0f} kW')\n", - "print(f'Harsh winter demand: {harsh_demand.min():.0f} - {harsh_demand.max():.0f} kW')" + "from data.tutorial_data import get_scenarios_data\n", + "\n", + "data = get_scenarios_data()\n", + "timesteps = data['timesteps']\n", + "periods = data['periods']\n", + "scenarios = data['scenarios']\n", + "scenario_weights = data['scenario_weights']\n", + "heat_demand = data['heat_demand']\n", + "gas_prices = data['gas_prices']\n", + "elec_prices = data['elec_prices']" ] }, { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -163,39 +101,7 @@ }, { "cell_type": "markdown", - "id": "9", - "metadata": {}, - "source": [ - "## Create Period-Dependent Prices\n", - "\n", - "Energy prices change across planning years:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "10", - "metadata": {}, - "outputs": [], - "source": [ - "# Gas prices by period (€/kWh) - expected to rise\n", - "gas_prices = np.array([0.06, 0.08, 0.10]) # 2024, 2025, 2026\n", - "\n", - "# Electricity sell prices by period (€/kWh) - CHP revenue\n", - "elec_prices = np.array([0.28, 0.34, 0.43]) # Rising with gas\n", - "\n", - "print('Gas prices by period:')\n", - "for period, price in zip(periods, gas_prices, strict=False):\n", - " print(f' {period}: {price:.2f} €/kWh')\n", - "\n", - "print('\\nElectricity sell prices by period:')\n", - "for period, price in zip(periods, elec_prices, strict=False):\n", - " print(f' {period}: {price:.2f} €/kWh')" - ] - }, - { - "cell_type": "markdown", - "id": "11", + "id": "7", "metadata": {}, "source": [ "## Build the Flow System\n", @@ -206,7 +112,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -227,7 +133,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "9", "metadata": {}, "source": [ "## Add Components" @@ -236,7 +142,7 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "10", "metadata": {}, "outputs": [], "source": [ @@ -313,7 +219,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "11", "metadata": {}, "source": [ "## Run Optimization" @@ -322,7 +228,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "12", "metadata": {}, "outputs": [], "source": [ @@ -331,7 +237,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "13", "metadata": {}, "source": [ "## Analyze Results\n", @@ -342,7 +248,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -355,7 +261,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "15", "metadata": {}, "source": [ "### Heat Balance by Scenario\n", @@ -366,7 +272,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -375,7 +281,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "17", "metadata": {}, "source": [ "### CHP Operation Patterns" @@ -384,7 +290,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -393,7 +299,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "19", "metadata": {}, "source": [ "### Multi-Dimensional Data Access\n", @@ -404,7 +310,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -419,7 +325,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -434,7 +340,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "22", "metadata": {}, "source": [ "## Sensitivity: What if Only Mild Winter?\n", @@ -445,7 +351,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -463,7 +369,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "24", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -474,7 +380,7 @@ { "cell_type": "code", "execution_count": null, - "id": "29", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -483,7 +389,7 @@ }, { "cell_type": "markdown", - "id": "30", + "id": "26", "metadata": {}, "source": [ "## Key Concepts\n", From 5ea865159aa85c811b42dd0931cdbc2cfcbd5d5b Mon Sep 17 00:00:00 2001 From: FBumann <117816358+FBumann@users.noreply.github.com> Date: Mon, 5 Jan 2026 17:52:06 +0100 Subject: [PATCH 191/191] Feature/comparison (#550) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add dataset plot accessor * Add fxplot acessor showcase * The internal plot accessors now leverage the shared .fxplot implementation, reducing code duplication while maintaining the same functionality (data preparation, color resolution from components, PlotResult wrapping). * Fix notebook * 1. xlabel/ylabel parameters - Added to bar(), stacked_bar(), line(), area(), and duration_curve() methods in both DatasetPlotAccessor and DataArrayPlotAccessor 2. scatter() method - Plots two variables against each other with x and y parameters 3. pie() method - Creates pie charts from aggregated (scalar) dataset values, e.g. ds.sum('time').fxplot.pie() 4. duration_curve() method - Sorts values along the time dimension in descending order, with optional normalize parameter for percentage x-axis 5. CONFIG.Plotting.default_line_shape - New config option (default 'hv') that controls the default line shape for line(), area(), and duration_curve() methods * Fix faceting of pie * Improve auto dim handling * Improve notebook * Fix pie plot * Logic order changed: 1. X-axis is now determined first using CONFIG.Plotting.x_dim_priority 2. Facets are resolved from remaining dimensions (x-axis excluded) x_dim_priority expanded: x_dim_priority = ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster') - Time-like dims first, then common grouping dims as fallback - variable stays excluded (it's used for color, not x-axis) _get_x_dim() refactored: - Now takes dims: list[str] instead of a DataFrame - More versatile - works with any list of dimension names * Add x parameter and x_dim_priority config to fxplot - Add `x` parameter to bar/stacked_bar/line/area for explicit x-axis control - Add CONFIG.Plotting.x_dim_priority for auto x-axis selection order - X-axis determined first, facets from remaining dimensions - Refactor _get_x_column -> _get_x_dim (takes dim list, not DataFrame) - Support scalar data (no dims) by using 'variable' as x-axis * Add x parameter and smart dimension handling to fxplot - Add `x` parameter to bar/stacked_bar/line/area for explicit x-axis control - Add CONFIG.Plotting.x_dim_priority for auto x-axis selection Default: ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster') - X-axis determined first, facets resolved from remaining dimensions - Refactor _get_x_column -> _get_x_dim (takes dim list, more versatile) - Support scalar data (no dims) by using 'variable' as x-axis - Skip color='variable' when x='variable' to avoid double encoding - Fix _dataset_to_long_df to use dims (not just coords) as id_vars * Add x parameter and smart dimension handling to fxplot - Add `x` parameter to bar/stacked_bar/line/area for explicit x-axis control - Add CONFIG.Plotting.x_dim_priority for auto x-axis selection Default: ('time', 'duration', 'duration_pct', 'period', 'scenario', 'cluster') - X-axis determined first, facets resolved from remaining dimensions - Refactor _get_x_column -> _get_x_dim (takes dim list, more versatile) - Support scalar data (no dims) by using 'variable' as x-axis - Skip color='variable' when x='variable' to avoid double encoding - Fix _dataset_to_long_df to use dims (not just coords) as id_vars - Ensure px_kwargs properly overrides all defaults (color, facets, etc.) * Improve documentation * Fix notebook in docs * 1. heatmap kwarg merge order - Now uses **{**imshow_args, **imshow_kwargs} so user can override 2. scatter unused colors - Removed the unused parameter 3. to_duration_curve sorting - Changed [::-1] to np.flip(..., axis=time_axis) for correct multi-dimensional handling 4. DataArrayPlotAccessor.heatmap - Same kwarg merge fix * Improve docstrings * Update notebooks to not do file operations * Add Comparison class * Add Release notes * Add Comparison class to all Notebooks * Update comparison.py and add documentation * ⏺ The class went from ~560 lines to ~115 lines. Key simplifications: 1. __getattr__ - dynamically delegates any method to the underlying accessor 2. _wrap_plot_method - single method that handles all the data collection and concatenation 3. _recreate_figure - infers plot type from the original figure and recreates with combined data Tradeoffs: - Less explicit type hints on method signatures (but still works the same) - Infers plot type from original figure rather than hardcoding per method - Automatically supports any new methods added to StatisticsPlotAccessor in the future * ⏺ The class went from ~560 lines to ~115 lines. Key simplifications: 1. __getattr__ - dynamically delegates any method to the underlying accessor 2. _wrap_plot_method - single method that handles all the data collection and concatenation 3. _recreate_figure - infers plot type from the original figure and recreates with combined data Tradeoffs: - Less explicit type hints on method signatures (but still works the same) - Infers plot type from original figure rather than hardcoding per method - Automatically supports any new methods added to StatisticsPlotAccessor in the future * Minor bugfix * Now all methods properly split kwargs and pass plotly_kwargs to the figure creation. The _DATA_KWARGS mapping defines which kwargs affect data processing - everything else passes through to plotly. * Now all methods properly split kwargs and pass plotly_kwargs to the figure creation. The _DATA_KWARGS mapping defines which kwargs affect data processing - everything else passes through to plotly. * Improve documentation and improve CHANGELOG.md * Fix core dims * FIx CHangelog and change to v6.0.0 * FIx CHangelog and change to v6.0.0 * FIx CHangelog and change to v6.0.0 * Enhanced Clustering Control New Parameters Added to cluster() Method | Parameter | Type | Default | Purpose | |-------------------------|-------------------------------|----------------------|--------------------------------------------------------------------------------------------------------------------| | cluster_method | Literal[...] | 'k_means' | Clustering algorithm ('k_means', 'hierarchical', 'k_medoids', 'k_maxoids', 'averaging') | | representation_method | Literal[...] | 'meanRepresentation' | How clusters are represented ('meanRepresentation', 'medoidRepresentation', 'distributionAndMinMaxRepresentation') | | extreme_period_method | Literal[...] | 'new_cluster_center' | How peaks are integrated ('None', 'append', 'new_cluster_center', 'replace_cluster_center') | | rescale_cluster_periods | bool | True | Rescale clusters to match original means | | random_state | int | None | None | Random seed for reproducibility | | predef_cluster_order | np.ndarray | list[int] | None | None | Manual clustering assignments | | **tsam_kwargs | Any | - | Pass-through for any tsam parameter | Clustering Quality Metrics Access via fs.clustering.metrics after clustering - returns a DataFrame with RMSE, MAE, and other accuracy indicators per time series. Files Modified 1. flixopt/transform_accessor.py - Updated cluster() signature and tsam call 2. flixopt/clustering/base.py - Added metrics field to Clustering class 3. tests/test_clustering/test_integration.py - Added tests for new parameters 4. docs/user-guide/optimization/clustering.md - Updated documentation * Dimension renamed: original_period → original_cluster Property renamed: n_original_periods → n_original_clusters * Problem: Expanded FlowSystem from clustering didn't have the extra timestep that regular FlowSystems have. Root Cause: In expand_solution(), the solution was only indexed by original_timesteps (n elements) instead of original_timesteps_extra (n+1 elements). Fix in flixopt/transform_accessor.py: 1. Reindex solution to timesteps_extra (line 1296-1298): - Added expanded_fs._solution.reindex(time=original_timesteps_extra) for consistency with non-expanded FlowSystems 2. Fill extra timestep for charge_state (lines 1300-1333): - Added special handling to properly fill the extra timestep for storage charge_state variables using the last cluster's extra timestep value 3. Updated intercluster storage handling (lines 1340-1388): - Modified to work with original_timesteps_extra instead of just original_timesteps - The extra timestep now correctly gets the final SOC boundary value with proper decay applied Tests updated in tests/test_cluster_reduce_expand.py: - Updated 4 assertions that check solution time coordinates to expect 193 (192 + 1 extra) instead of 192 * - 'variable' is treated as a special valid facet value (since it exists in the melted DataFrame from data_var names, not as a dimension) - When facet_row='variable' or facet_col='variable' is passed, it's passed through directly - In line(), when faceting by variable, it's not also used for color (avoids double encoding) * Add variable and color to auto resolving in fxplot * Added 'variable' to both priority lists and updated the logic to treat it consistently: flixopt/config.py: 'extra_dim_priority': ('variable', 'cluster', 'period', 'scenario'), 'x_dim_priority': ('time', 'duration', 'duration_pct', 'variable', 'period', 'scenario', 'cluster'), flixopt/dataset_plot_accessor.py: - _get_x_dim: Now takes n_data_vars parameter; 'variable' is available when > 1 - _resolve_auto_facets: 'variable' is available when len(data_vars) > 1 and respects exclude_dims Behavior: - 'variable' is treated like any other dimension in the priority system - Only available when there are multiple data_vars - Properly excluded when already used (e.g., for x-axis) * Improve plotting, especially for clustering * Drop cluster index when expanding * Fix storage expansion * Improve clustering * fix scatter plot faceting * ⏺ Fixed the documentation in the notebook: 1. Cell 32 (API Reference table): Updated defaults to 'hierarchical', 'medoidRepresentation', and None 2. Cell 16: Swapped the example to show k_means as the alternative (since hierarchical is now default) 3. Cell 17: Updated variable names to match 4. Cell 33 (Key Takeaways): Clarified that random_state is only needed for non-deterministic methods like 'k_means' The code review * 1. Error handling for accuracyIndicators() - Added try/except with warning log and empty DataFrame fallback, plus handling empty DataFrames when building the metrics Dataset 2. Random state to tsam - Replaced global np.random.seed() with passing seed parameter directly to tsam's TimeSeriesAggregation 3. tsam_kwargs conflict validation - Added validation that raises ValueError if user tries to override explicit parameters via **tsam_kwargs (including seed) 4. predef_cluster_order validation - Added dimension validation for DataArray inputs, checking they match the FlowSystem's period/scenario structure 5. Out-of-bounds fix - Clamped last_original_cluster_idx to n_original_clusters - 1 to handle partial clusters at the end * 1. DataFrame truth ambiguity - Changed non_empty_metrics.get(first_key) or next(...) to explicit if metrics_df is None: check 2. removed random state * Fix pie plot animation frame and add warnings for unassigned dims * Change logger warning to regular warning * ⏺ The centralized slot assignment system is now complete. Here's a summary of the changes made: Changes Made 1. flixopt/config.py - Replaced three separate config attributes (extra_dim_priority, dim_slot_priority, x_dim_priority) with a single unified dim_priority tuple - Updated CONFIG.Plotting class docstring and attribute definitions - Updated to_dict() method to use the new attribute - The new priority order: ('time', 'duration', 'duration_pct', 'variable', 'cluster', 'period', 'scenario') 2. flixopt/dataset_plot_accessor.py - Created new assign_slots() function that centralizes all dimension-to-slot assignment logic - Fixed slot fill order: x → color → facet_col → facet_row → animation_frame - Updated all plot methods (bar, stacked_bar, line, area, heatmap, scatter, pie) to use assign_slots() - Removed old _get_x_dim() and _resolve_auto_facets() functions - Updated docstrings to reference dim_priority instead of x_dim_priority 3. flixopt/statistics_accessor.py - Updated _resolve_auto_facets() to use the new assign_slots() function internally - Added import for assign_slots from dataset_plot_accessor Key Design Decisions - Single priority list controls all auto-assignment - Slots are filled in fixed order based on availability - None means a slot is not available for that plot type - 'auto' triggers auto-assignment from priority list - Explicit string values override auto-assignment * Add slot_order to config * Add new assign_slots() method * Add new assign_slots() method * Fix heatmap and convert all to use fxplot * Fix heatmap * Fix heatmap * Fix heatmap * Fix heatmap * Merge remote-tracking branch 'origin/feature/tsam-params' into feature/comparison # Conflicts: # docs/notebooks/08c-clustering.ipynb # flixopt/config.py * comparison.py: 1. Removed _resolve_facets method - fxplot handles 'auto' resolution internally 2. Updated all methods to pass facet params directly to fxplot 3. sizes now uses ds.fxplot.bar() instead of px.bar 4. effects now uses ds.fxplot.bar() with proper column naming statistics_accessor.py: 1. Simplified effects method significantly: - Works directly with Dataset (no DataArray concat/conversion) - Uses dict.get for aspect selection - Cleaner aggregation logic - Returns Dataset with effects as data variables - Uses fxplot.bar instead of px.bar The code is now consistent - all plotting methods in both StatisticsPlotAccessor and ComparisonStatisticsPlot use fxplot for centralized dimension/slot handling. * Squeeze signleton dims in heatmap() * Replaced print statements with class repr * 1. 08a-aggregation.ipynb cell 16: Removed corrupted markdown tag from markdown source 2. flixopt/comparison.py line 75: Added fallback for None names: # Before self._names = names or [fs.name for fs in flow_systems] # After self._names = names or [fs.name or f'System {i}' for i, fs in enumerate(flow_systems)] --- CHANGELOG.md | 27 + docs/notebooks/02-heat-system.ipynb | 33 +- .../03-investment-optimization.ipynb | 58 +- .../04-operational-constraints.ipynb | 73 ++- docs/notebooks/05-multi-carrier-system.ipynb | 94 ++- .../06a-time-varying-parameters.ipynb | 20 +- docs/notebooks/07-scenarios-and-periods.ipynb | 110 ++-- docs/notebooks/08a-aggregation.ipynb | 38 +- docs/notebooks/08b-rolling-horizon.ipynb | 39 +- docs/notebooks/08c-clustering.ipynb | 68 +- .../08c2-clustering-storage-modes.ipynb | 33 +- .../08d-clustering-multiperiod.ipynb | 24 +- docs/user-guide/results/index.md | 150 +++++ flixopt/__init__.py | 2 + flixopt/comparison.py | 609 ++++++++++++++++++ flixopt/statistics_accessor.py | 127 ++-- 16 files changed, 1180 insertions(+), 325 deletions(-) create mode 100644 flixopt/comparison.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 68d3d6b92..bad4e4d52 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,33 @@ Until here --> ### ✨ Added +**FlowSystem Comparison**: New `Comparison` class for comparing multiple FlowSystems side-by-side: + +```python +# Compare systems (uses FlowSystem.name by default) +comp = fx.Comparison([fs_base, fs_modified]) + +# Or with custom names +comp = fx.Comparison([fs1, fs2, fs3], names=['baseline', 'low_cost', 'high_eff']) + +# Side-by-side plots (auto-facets by 'case' dimension) +comp.statistics.plot.balance('Heat') +comp.statistics.flow_rates.fxplot.line() + +# Access combined data with 'case' dimension +comp.solution # xr.Dataset +comp.statistics.flow_rates # xr.Dataset + +# Compute differences relative to a reference case +comp.diff() # vs first case +comp.diff('baseline') # vs named case +``` + +- Concatenates solutions and statistics from multiple FlowSystems with a `'case'` dimension +- Mirrors all `StatisticsAccessor` properties (`flow_rates`, `flow_hours`, `sizes`, `charge_states`, `temporal_effects`, `periodic_effects`, `total_effects`) +- Mirrors all `StatisticsPlotAccessor` methods (`balance`, `carrier_balance`, `flows`, `sizes`, `duration_curve`, `effects`, `charge_states`, `heatmap`, `storage`) +- Existing plotting infrastructure automatically handles faceting by `'case'` + **Time-Series Clustering**: Reduce large time series to representative typical periods for faster investment optimization, then expand results back to full resolution. ```python diff --git a/docs/notebooks/02-heat-system.ipynb b/docs/notebooks/02-heat-system.ipynb index 9b47a96b4..d3514de15 100644 --- a/docs/notebooks/02-heat-system.ipynb +++ b/docs/notebooks/02-heat-system.ipynb @@ -32,6 +32,7 @@ "metadata": {}, "outputs": [], "source": [ + "import pandas as pd\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -281,12 +282,16 @@ "metadata": {}, "outputs": [], "source": [ - "total_costs = flow_system.solution['costs'].item()\n", "total_heat = heat_demand.sum()\n", "\n", - "print(f'Total operating costs: {total_costs:.2f} €')\n", - "print(f'Total heat delivered: {total_heat:.0f} kWh')\n", - "print(f'Average cost: {total_costs / total_heat * 100:.2f} ct/kWh')" + "pd.DataFrame(\n", + " {\n", + " 'Total operating costs [EUR]': flow_system.solution['costs'].item(),\n", + " 'Total heat delivered [kWh]': total_heat,\n", + " 'Average cost [ct/kWh]': flow_system.solution['costs'].item() / total_heat * 100,\n", + " },\n", + " index=['Value'],\n", + ").T" ] }, { @@ -370,25 +375,7 @@ ] } ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 5 } diff --git a/docs/notebooks/03-investment-optimization.ipynb b/docs/notebooks/03-investment-optimization.ipynb index c31bfaee2..a4ae769c5 100644 --- a/docs/notebooks/03-investment-optimization.ipynb +++ b/docs/notebooks/03-investment-optimization.ipynb @@ -32,6 +32,7 @@ "metadata": {}, "outputs": [], "source": [ + "import pandas as pd\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -229,9 +230,14 @@ "solar_size = flow_system.statistics.sizes['SolarCollectors(Heat)'].item()\n", "tank_size = flow_system.statistics.sizes['BufferTank'].item()\n", "\n", - "print(\n", - " f'Optimal sizes: Solar {solar_size:.0f} kW, Tank {tank_size:.0f} kWh (ratio: {tank_size / solar_size:.1f} kWh/kW)'\n", - ")" + "pd.DataFrame(\n", + " {\n", + " 'Solar [kW]': solar_size,\n", + " 'Tank [kWh]': tank_size,\n", + " 'Ratio [kWh/kW]': tank_size / solar_size,\n", + " },\n", + " index=['Optimal Size'],\n", + ").T" ] }, { @@ -274,8 +280,13 @@ "tank_invest = tank_size * TANK_COST_WEEKLY\n", "gas_costs = total_costs - solar_invest - tank_invest\n", "\n", - "print(\n", - " f'Weekly costs: Solar {solar_invest:.1f}€ ({solar_invest / total_costs * 100:.0f}%) + Tank {tank_invest:.1f}€ ({tank_invest / total_costs * 100:.0f}%) + Gas {gas_costs:.1f}€ ({gas_costs / total_costs * 100:.0f}%) = {total_costs:.1f}€'\n", + "pd.DataFrame(\n", + " {\n", + " 'Solar Investment': {'EUR': solar_invest, '%': solar_invest / total_costs * 100},\n", + " 'Tank Investment': {'EUR': tank_invest, '%': tank_invest / total_costs * 100},\n", + " 'Gas Costs': {'EUR': gas_costs, '%': gas_costs / total_costs * 100},\n", + " 'Total': {'EUR': total_costs, '%': 100.0},\n", + " }\n", ")" ] }, @@ -334,14 +345,21 @@ "metadata": {}, "outputs": [], "source": [ - "# Gas-only scenario\n", + "# Gas-only scenario for comparison\n", "total_demand = pool_demand.sum()\n", "gas_only_cost = total_demand / 0.92 * GAS_PRICE # All heat from gas boiler\n", - "\n", "savings = gas_only_cost - total_costs\n", - "print(\n", - " f'Solar saves {savings:.1f}€/week ({savings / gas_only_cost * 100:.0f}%) vs gas-only ({gas_only_cost:.1f}€) → {savings * 52:.0f}€/year'\n", - ")" + "\n", + "pd.DataFrame(\n", + " {\n", + " 'Gas-only [EUR/week]': gas_only_cost,\n", + " 'With Solar [EUR/week]': total_costs,\n", + " 'Savings [EUR/week]': savings,\n", + " 'Savings [%]': savings / gas_only_cost * 100,\n", + " 'Savings [EUR/year]': savings * 52,\n", + " },\n", + " index=['Value'],\n", + ").T" ] }, { @@ -406,25 +424,7 @@ ] } ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 5 } diff --git a/docs/notebooks/04-operational-constraints.ipynb b/docs/notebooks/04-operational-constraints.ipynb index 017761b5a..e55f2aded 100644 --- a/docs/notebooks/04-operational-constraints.ipynb +++ b/docs/notebooks/04-operational-constraints.ipynb @@ -32,6 +32,7 @@ "metadata": {}, "outputs": [], "source": [ + "import pandas as pd\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -84,8 +85,12 @@ "metadata": {}, "outputs": [], "source": [ - "# Visualize demand with fxplot\n", - "demand_ds = xr.Dataset({'Steam Demand': xr.DataArray(steam_demand, dims=['time'], coords={'time': timesteps})})\n", + "# Visualize the demand with fxplot\n", + "demand_ds = xr.Dataset(\n", + " {\n", + " 'Steam Demand [kW]': xr.DataArray(steam_demand, dims=['time'], coords={'time': timesteps}),\n", + " }\n", + ")\n", "demand_ds.fxplot.line(title='Factory Steam Demand')" ] }, @@ -104,7 +109,7 @@ "metadata": {}, "outputs": [], "source": [ - "flow_system = fx.FlowSystem(timesteps)\n", + "flow_system = fx.FlowSystem(timesteps, name='Constrained')\n", "\n", "# Define and register custom carriers\n", "flow_system.add_carriers(\n", @@ -268,8 +273,12 @@ "startup_costs = total_startups * 50\n", "gas_costs = total_costs - startup_costs\n", "\n", - "print(\n", - " f'{total_startups} startups × 50€ = {startup_costs:.0f}€ startup + {gas_costs:.0f}€ gas = {total_costs:.0f}€ total'\n", + "pd.DataFrame(\n", + " {\n", + " 'Startups': {'Count': total_startups, 'EUR': startup_costs},\n", + " 'Gas': {'Count': '-', 'EUR': gas_costs},\n", + " 'Total': {'Count': '-', 'EUR': total_costs},\n", + " }\n", ")" ] }, @@ -321,7 +330,7 @@ "outputs": [], "source": [ "# Build unconstrained system\n", - "fs_unconstrained = fx.FlowSystem(timesteps)\n", + "fs_unconstrained = fx.FlowSystem(timesteps, name='Unconstrained')\n", "fs_unconstrained.add_carriers(\n", " fx.Carrier('gas', '#3498db', 'kW'),\n", " fx.Carrier('steam', '#87CEEB', 'kW_th', 'Process steam'),\n", @@ -351,14 +360,43 @@ "fs_unconstrained.optimize(fx.solvers.HighsSolver())\n", "unconstrained_costs = fs_unconstrained.solution['costs'].item()\n", "\n", - "constraint_overhead = (total_costs - unconstrained_costs) / unconstrained_costs * 100\n", - "print(f'Constraints add {constraint_overhead:.1f}% cost: {unconstrained_costs:.0f}€ → {total_costs:.0f}€')" + "pd.DataFrame(\n", + " {\n", + " 'Without Constraints': {'Cost [EUR]': unconstrained_costs},\n", + " 'With Constraints': {'Cost [EUR]': total_costs},\n", + " 'Overhead': {\n", + " 'Cost [EUR]': total_costs - unconstrained_costs,\n", + " '%': (total_costs - unconstrained_costs) / unconstrained_costs * 100,\n", + " },\n", + " }\n", + ")" ] }, { "cell_type": "markdown", "id": "24", "metadata": {}, + "source": [ + "### Side-by-Side Comparison\n", + "\n", + "Use the `Comparison` class to visualize both systems together:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "comp = fx.Comparison([fs_unconstrained, flow_system])\n", + "comp.statistics.plot.effects()" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, "source": [ "### Energy Flow Sankey\n", "\n", @@ -368,7 +406,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -377,7 +415,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "28", "metadata": {}, "source": [ "## Key Concepts\n", @@ -430,17 +468,10 @@ } ], "metadata": { - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" } }, "nbformat": 4, diff --git a/docs/notebooks/05-multi-carrier-system.ipynb b/docs/notebooks/05-multi-carrier-system.ipynb index 83115e129..1feab6e4f 100644 --- a/docs/notebooks/05-multi-carrier-system.ipynb +++ b/docs/notebooks/05-multi-carrier-system.ipynb @@ -32,6 +32,7 @@ "metadata": {}, "outputs": [], "source": [ + "import pandas as pd\n", "import xarray as xr\n", "\n", "import flixopt as fx\n", @@ -105,7 +106,7 @@ " {\n", " 'Electricity Demand [kW]': xr.DataArray(electricity_demand, dims=['time'], coords={'time': timesteps}),\n", " 'Heat Demand [kW]': xr.DataArray(heat_demand, dims=['time'], coords={'time': timesteps}),\n", - " 'Elec. Buy Price [€/kWh]': xr.DataArray(elec_buy_price, dims=['time'], coords={'time': timesteps}),\n", + " 'Elec. Buy Price [EUR/kWh]': xr.DataArray(elec_buy_price, dims=['time'], coords={'time': timesteps}),\n", " }\n", ")\n", "profiles.fxplot.line(title='Hospital Energy Profiles', height=300)" @@ -126,7 +127,7 @@ "metadata": {}, "outputs": [], "source": [ - "flow_system = fx.FlowSystem(timesteps)\n", + "flow_system = fx.FlowSystem(timesteps, name='With CHP')\n", "flow_system.add_carriers(\n", " fx.Carrier('gas', '#3498db', 'kW'),\n", " fx.Carrier('electricity', '#f1c40f', 'kW'),\n", @@ -320,9 +321,6 @@ "metadata": {}, "outputs": [], "source": [ - "total_costs = flow_system.solution['costs'].item()\n", - "total_co2 = flow_system.solution['CO2'].item()\n", - "\n", "# Energy flows\n", "flow_rates = flow_system.statistics.flow_rates\n", "grid_buy = flow_rates['GridBuy(Electricity)'].sum().item()\n", @@ -334,12 +332,20 @@ "total_elec = electricity_demand.sum()\n", "total_heat = heat_demand.sum()\n", "\n", - "# Display as compact summary\n", - "print(\n", - " f'Electricity: {chp_elec:.0f} kWh CHP ({chp_elec / total_elec * 100:.0f}%) + {grid_buy:.0f} kWh grid, {grid_sell:.0f} kWh sold'\n", - ")\n", - "print(f'Heat: {chp_heat:.0f} kWh CHP ({chp_heat / total_heat * 100:.0f}%) + {boiler_heat:.0f} kWh boiler')\n", - "print(f'Costs: {total_costs:.2f} € | CO2: {total_co2:.0f} kg')" + "pd.DataFrame(\n", + " {\n", + " 'CHP Electricity [kWh]': chp_elec,\n", + " 'CHP Electricity [%]': chp_elec / total_elec * 100,\n", + " 'Grid Buy [kWh]': grid_buy,\n", + " 'Grid Sell [kWh]': grid_sell,\n", + " 'CHP Heat [kWh]': chp_heat,\n", + " 'CHP Heat [%]': chp_heat / total_heat * 100,\n", + " 'Boiler Heat [kWh]': boiler_heat,\n", + " 'Total Costs [EUR]': flow_system.solution['costs'].item(),\n", + " 'Total CO2 [kg]': flow_system.solution['CO2'].item(),\n", + " },\n", + " index=['Value'],\n", + ").T" ] }, { @@ -360,7 +366,7 @@ "outputs": [], "source": [ "# Build system without CHP\n", - "fs_no_chp = fx.FlowSystem(timesteps)\n", + "fs_no_chp = fx.FlowSystem(timesteps, name='No CHP')\n", "fs_no_chp.add_carriers(\n", " fx.Carrier('gas', '#3498db', 'kW'),\n", " fx.Carrier('electricity', '#f1c40f', 'kW'),\n", @@ -399,13 +405,24 @@ "\n", "fs_no_chp.optimize(fx.solvers.HighsSolver())\n", "\n", + "total_costs = flow_system.solution['costs'].item()\n", + "total_co2 = flow_system.solution['CO2'].item()\n", "no_chp_costs = fs_no_chp.solution['costs'].item()\n", "no_chp_co2 = fs_no_chp.solution['CO2'].item()\n", "\n", - "cost_saving = (no_chp_costs - total_costs) / no_chp_costs * 100\n", - "co2_saving = (no_chp_co2 - total_co2) / no_chp_co2 * 100\n", - "print(\n", - " f'CHP saves {cost_saving:.1f}% costs ({no_chp_costs:.0f}→{total_costs:.0f} €) and {co2_saving:.1f}% CO2 ({no_chp_co2:.0f}→{total_co2:.0f} kg)'\n", + "pd.DataFrame(\n", + " {\n", + " 'Without CHP': {'Cost [EUR]': no_chp_costs, 'CO2 [kg]': no_chp_co2},\n", + " 'With CHP': {'Cost [EUR]': total_costs, 'CO2 [kg]': total_co2},\n", + " 'Savings': {\n", + " 'Cost [EUR]': no_chp_costs - total_costs,\n", + " 'CO2 [kg]': no_chp_co2 - total_co2,\n", + " },\n", + " 'Savings [%]': {\n", + " 'Cost [EUR]': (no_chp_costs - total_costs) / no_chp_costs * 100,\n", + " 'CO2 [kg]': (no_chp_co2 - total_co2) / no_chp_co2 * 100,\n", + " },\n", + " }\n", ")" ] }, @@ -413,6 +430,37 @@ "cell_type": "markdown", "id": "23", "metadata": {}, + "source": [ + "### Side-by-Side Comparison\n", + "\n", + "Use the `Comparison` class to visualize both systems together:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24", + "metadata": {}, + "outputs": [], + "source": [ + "comp = fx.Comparison([fs_no_chp, flow_system])\n", + "comp.statistics.plot.balance('Electricity')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "25", + "metadata": {}, + "outputs": [], + "source": [ + "comp.statistics.plot.balance('Heat')" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, "source": [ "### Energy Flow Sankey\n", "\n", @@ -422,7 +470,7 @@ { "cell_type": "code", "execution_count": null, - "id": "24", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -431,7 +479,7 @@ }, { "cell_type": "markdown", - "id": "25", + "id": "28", "metadata": {}, "source": [ "## Key Concepts\n", @@ -493,16 +541,8 @@ "name": "python3" }, "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" + "version": "3.11" } }, "nbformat": 4, diff --git a/docs/notebooks/06a-time-varying-parameters.ipynb b/docs/notebooks/06a-time-varying-parameters.ipynb index ac248aacd..5ebca688e 100644 --- a/docs/notebooks/06a-time-varying-parameters.ipynb +++ b/docs/notebooks/06a-time-varying-parameters.ipynb @@ -308,25 +308,7 @@ ] } ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.11" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 5 } diff --git a/docs/notebooks/07-scenarios-and-periods.ipynb b/docs/notebooks/07-scenarios-and-periods.ipynb index 9f80a6c9b..0f3cbaef0 100644 --- a/docs/notebooks/07-scenarios-and-periods.ipynb +++ b/docs/notebooks/07-scenarios-and-periods.ipynb @@ -32,7 +32,8 @@ "metadata": {}, "outputs": [], "source": [ - "import plotly.express as px\n", + "import pandas as pd\n", + "import xarray as xr\n", "\n", "import flixopt as fx\n", "\n", @@ -82,26 +83,40 @@ "elec_prices = data['elec_prices']" ] }, + { + "cell_type": "markdown", + "id": "6", + "metadata": {}, + "source": [ + "## Scenario-Dependent Demand Profiles\n", + "\n", + "Heat demand differs significantly between mild and harsh winters:" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "6", + "id": "7", "metadata": {}, "outputs": [], "source": [ - "# Visualize demand scenarios with plotly\n", - "fig = px.line(\n", - " heat_demand.iloc[:48],\n", - " title='Heat Demand by Scenario (First 2 Days)',\n", - " labels={'index': 'Time', 'value': 'kW', 'variable': 'Scenario'},\n", + "# Visualize demand scenarios with fxplot\n", + "demand_ds = xr.Dataset(\n", + " {\n", + " scenario: xr.DataArray(\n", + " heat_demand[scenario].values,\n", + " dims=['time'],\n", + " coords={'time': timesteps},\n", + " )\n", + " for scenario in scenarios\n", + " }\n", ")\n", - "fig.update_traces(mode='lines')\n", - "fig" + "demand_ds.fxplot.line(title='Heat Demand by Scenario')" ] }, { "cell_type": "markdown", - "id": "7", + "id": "8", "metadata": {}, "source": [ "## Build the Flow System\n", @@ -112,7 +127,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -121,6 +136,7 @@ " periods=periods,\n", " scenarios=scenarios,\n", " scenario_weights=scenario_weights,\n", + " name='Both Scenarios',\n", ")\n", "flow_system.add_carriers(\n", " fx.Carrier('gas', '#3498db', 'kW'),\n", @@ -128,12 +144,12 @@ " fx.Carrier('heat', '#e74c3c', 'kW'),\n", ")\n", "\n", - "print(flow_system)" + "flow_system" ] }, { "cell_type": "markdown", - "id": "9", + "id": "10", "metadata": {}, "source": [ "## Add Components" @@ -142,7 +158,7 @@ { "cell_type": "code", "execution_count": null, - "id": "10", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -219,7 +235,7 @@ }, { "cell_type": "markdown", - "id": "11", + "id": "12", "metadata": {}, "source": [ "## Run Optimization" @@ -228,7 +244,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -237,7 +253,7 @@ }, { "cell_type": "markdown", - "id": "13", + "id": "14", "metadata": {}, "source": [ "## Analyze Results\n", @@ -248,20 +264,25 @@ { "cell_type": "code", "execution_count": null, - "id": "14", + "id": "15", "metadata": {}, "outputs": [], "source": [ "chp_size = flow_system.statistics.sizes['CHP(P_el)']\n", - "total_cost = flow_system.solution['costs']\n", "\n", - "print(f'Optimal CHP: {float(chp_size.max()):.0f} kW electrical ({float(chp_size.max()) * 0.50 / 0.35:.0f} kW thermal)')\n", - "print(f'Expected cost: {float(total_cost.sum()):.0f} €')" + "pd.DataFrame(\n", + " {\n", + " 'CHP Electrical [kW]': float(chp_size.max()),\n", + " 'CHP Thermal [kW]': float(chp_size.max()) * 0.50 / 0.35,\n", + " 'Expected Cost [EUR]': float(flow_system.solution['costs'].sum()),\n", + " },\n", + " index=['Optimal'],\n", + ").T" ] }, { "cell_type": "markdown", - "id": "15", + "id": "16", "metadata": {}, "source": [ "### Heat Balance by Scenario\n", @@ -272,7 +293,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -281,7 +302,7 @@ }, { "cell_type": "markdown", - "id": "17", + "id": "18", "metadata": {}, "source": [ "### CHP Operation Patterns" @@ -290,7 +311,7 @@ { "cell_type": "code", "execution_count": null, - "id": "18", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -299,7 +320,7 @@ }, { "cell_type": "markdown", - "id": "19", + "id": "20", "metadata": {}, "source": [ "### Multi-Dimensional Data Access\n", @@ -310,13 +331,11 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "21", "metadata": {}, "outputs": [], "source": [ - "# View dimensions\n", "flow_rates = flow_system.statistics.flow_rates\n", - "print('Flow rates dimensions:', dict(flow_rates.sizes))\n", "\n", "# Plot flow rates\n", "flow_system.statistics.plot.flows()" @@ -325,22 +344,27 @@ { "cell_type": "code", "execution_count": null, - "id": "21", + "id": "22", "metadata": {}, "outputs": [], "source": [ "# CHP operation summary by scenario\n", "chp_heat = flow_rates['CHP(Q_th)']\n", "\n", - "for scenario in scenarios:\n", - " scenario_avg = float(chp_heat.sel(scenario=scenario).mean())\n", - " scenario_max = float(chp_heat.sel(scenario=scenario).max())\n", - " print(f'{scenario}: avg {scenario_avg:.0f} kW, max {scenario_max:.0f} kW')" + "pd.DataFrame(\n", + " {\n", + " scenario: {\n", + " 'Avg [kW]': float(chp_heat.sel(scenario=scenario).mean()),\n", + " 'Max [kW]': float(chp_heat.sel(scenario=scenario).max()),\n", + " }\n", + " for scenario in scenarios\n", + " }\n", + ")" ] }, { "cell_type": "markdown", - "id": "22", + "id": "23", "metadata": {}, "source": [ "## Sensitivity: What if Only Mild Winter?\n", @@ -351,7 +375,7 @@ { "cell_type": "code", "execution_count": null, - "id": "23", + "id": "24", "metadata": {}, "outputs": [], "source": [ @@ -362,14 +386,18 @@ "chp_size_mild = float(fs_mild.statistics.sizes['CHP(P_el)'].max())\n", "chp_size_both = float(chp_size.max())\n", "\n", - "print(\n", - " f'CHP sizing: {chp_size_mild:.0f} kW (mild only) vs {chp_size_both:.0f} kW (both scenarios) → +{chp_size_both - chp_size_mild:.0f} kW for uncertainty'\n", + "pd.DataFrame(\n", + " {\n", + " 'Mild Only': {'CHP Size [kW]': chp_size_mild},\n", + " 'Both Scenarios': {'CHP Size [kW]': chp_size_both},\n", + " 'Uncertainty Buffer': {'CHP Size [kW]': chp_size_both - chp_size_mild},\n", + " }\n", ")" ] }, { "cell_type": "markdown", - "id": "24", + "id": "25", "metadata": {}, "source": [ "### Energy Flow Sankey\n", @@ -380,7 +408,7 @@ { "cell_type": "code", "execution_count": null, - "id": "25", + "id": "26", "metadata": {}, "outputs": [], "source": [ @@ -389,7 +417,7 @@ }, { "cell_type": "markdown", - "id": "26", + "id": "27", "metadata": {}, "source": [ "## Key Concepts\n", diff --git a/docs/notebooks/08a-aggregation.ipynb b/docs/notebooks/08a-aggregation.ipynb index 8bc1a4774..e26c19223 100644 --- a/docs/notebooks/08a-aggregation.ipynb +++ b/docs/notebooks/08a-aggregation.ipynb @@ -172,6 +172,7 @@ "# Stage 2: Dispatch at full resolution with fixed sizes\n", "start = timeit.default_timer()\n", "fs_dispatch = flow_system.transform.fix_sizes(fs_sizing.statistics.sizes)\n", + "fs_dispatch.name = 'Two-Stage'\n", "fs_dispatch.optimize(solver)\n", "time_stage2 = timeit.default_timer() - start\n", "\n", @@ -199,6 +200,7 @@ "source": [ "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", + "fs_full.name = 'Full Optimization'\n", "fs_full.optimize(solver)\n", "time_full = timeit.default_timer() - start\n", "\n", @@ -271,7 +273,9 @@ "id": "16", "metadata": {}, "source": [ - "## Visual Comparison: Heat Balance" + "## Visual Comparison: Heat Balance\n", + "\n", + "Compare the full optimization with the two-stage approach side-by-side:" ] }, { @@ -281,24 +285,14 @@ "metadata": {}, "outputs": [], "source": [ - "# Full optimization heat balance\n", - "fs_full.statistics.plot.balance('Heat')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "18", - "metadata": {}, - "outputs": [], - "source": [ - "# Two-stage optimization heat balance\n", - "fs_dispatch.statistics.plot.balance('Heat')" + "# Side-by-side comparison of full optimization vs two-stage\n", + "comp = fx.Comparison([fs_full, fs_dispatch])\n", + "comp.statistics.plot.balance('Heat')" ] }, { "cell_type": "markdown", - "id": "19", + "id": "18", "metadata": {}, "source": [ "### Energy Flow Sankey (Full Optimization)\n", @@ -309,7 +303,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -318,7 +312,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "20", "metadata": {}, "source": [ "## When to Use Each Technique\n", @@ -358,7 +352,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "## Summary\n", @@ -389,7 +383,13 @@ ] } ], - "metadata": {}, + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + } + }, "nbformat": 4, "nbformat_minor": 5 } diff --git a/docs/notebooks/08b-rolling-horizon.ipynb b/docs/notebooks/08b-rolling-horizon.ipynb index c0d7bdf24..90da6d2ca 100644 --- a/docs/notebooks/08b-rolling-horizon.ipynb +++ b/docs/notebooks/08b-rolling-horizon.ipynb @@ -94,6 +94,7 @@ "\n", "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", + "fs_full.name = 'Full Optimization'\n", "fs_full.optimize(solver)\n", "time_full = timeit.default_timer() - start\n", "\n", @@ -133,6 +134,7 @@ "source": [ "start = timeit.default_timer()\n", "fs_rolling = flow_system.copy()\n", + "fs_rolling.name = 'Rolling Horizon'\n", "segments = fs_rolling.optimize.rolling_horizon(\n", " solver,\n", " horizon=192, # 2-day segments (192 timesteps at 15-min resolution)\n", @@ -179,7 +181,9 @@ "id": "11", "metadata": {}, "source": [ - "## Visualize: Heat Balance Comparison" + "markdown## Visualize: Heat Balance Comparison\n", + "\n", + "Use the `Comparison` class to view both methods side-by-side:" ] }, { @@ -189,22 +193,13 @@ "metadata": {}, "outputs": [], "source": [ - "fs_full.statistics.plot.balance('Heat').figure.update_layout(title='Heat Balance (Full)')" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "13", - "metadata": {}, - "outputs": [], - "source": [ - "fs_rolling.statistics.plot.balance('Heat').figure.update_layout(title='Heat Balance (Rolling)')" + "comp = fx.Comparison([fs_full, fs_rolling])\n", + "comp.statistics.plot.balance('Heat')" ] }, { "cell_type": "markdown", - "id": "14", + "id": "13", "metadata": {}, "source": [ "## Storage State Continuity\n", @@ -215,7 +210,7 @@ { "cell_type": "code", "execution_count": null, - "id": "15", + "id": "14", "metadata": {}, "outputs": [], "source": [ @@ -239,7 +234,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "15", "metadata": {}, "source": [ "## Inspect Individual Segments\n", @@ -250,7 +245,7 @@ { "cell_type": "code", "execution_count": null, - "id": "17", + "id": "16", "metadata": {}, "outputs": [], "source": [ @@ -263,7 +258,7 @@ }, { "cell_type": "markdown", - "id": "18", + "id": "17", "metadata": {}, "source": [ "## Visualize Segment Overlaps\n", @@ -274,7 +269,7 @@ { "cell_type": "code", "execution_count": null, - "id": "19", + "id": "18", "metadata": {}, "outputs": [], "source": [ @@ -291,7 +286,7 @@ { "cell_type": "code", "execution_count": null, - "id": "20", + "id": "19", "metadata": {}, "outputs": [], "source": [ @@ -303,7 +298,7 @@ }, { "cell_type": "markdown", - "id": "21", + "id": "20", "metadata": {}, "source": [ "## When to Use Rolling Horizon\n", @@ -324,7 +319,7 @@ }, { "cell_type": "markdown", - "id": "22", + "id": "21", "metadata": {}, "source": [ "## API Reference\n", @@ -348,7 +343,7 @@ }, { "cell_type": "markdown", - "id": "23", + "id": "22", "metadata": {}, "source": [ "## Summary\n", diff --git a/docs/notebooks/08c-clustering.ipynb b/docs/notebooks/08c-clustering.ipynb index 0f3b4cc29..6d85e60ba 100644 --- a/docs/notebooks/08c-clustering.ipynb +++ b/docs/notebooks/08c-clustering.ipynb @@ -101,6 +101,7 @@ "\n", "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", + "fs_full.name = 'Full Optimization'\n", "fs_full.optimize(solver)\n", "time_full = timeit.default_timer() - start" ] @@ -142,6 +143,7 @@ " cluster_duration='1D', # Daily clustering\n", " time_series_for_high_peaks=peak_series, # Capture peak demand day\n", ")\n", + "fs_clustered.name = 'Clustered (8 days)'\n", "\n", "time_clustering = timeit.default_timer() - start" ] @@ -239,9 +241,7 @@ " cluster_method='k_means', # Alternative: 'hierarchical' (default), 'k_medoids', 'averaging'\n", ")\n", "\n", - "# Compare cluster assignments between algorithms\n", - "print('hierarchical clusters:', fs_clustered.clustering.cluster_order.values)\n", - "print('k_means clusters: ', fs_kmeans.clustering.cluster_order.values)" + "fs_kmeans.clustering" ] }, { @@ -251,13 +251,12 @@ "metadata": {}, "outputs": [], "source": [ - "# Compare RMSE between algorithms\n", - "print('Quality comparison (RMSE for HeatDemand):')\n", - "print(\n", - " f' hierarchical: {float(fs_clustered.clustering.metrics[\"RMSE\"].sel(time_series=\"HeatDemand(Q_th)|fixed_relative_profile\")):.4f}'\n", - ")\n", - "print(\n", - " f' k_means: {float(fs_kmeans.clustering.metrics[\"RMSE\"].sel(time_series=\"HeatDemand(Q_th)|fixed_relative_profile\")):.4f}'\n", + "# Compare quality metrics between algorithms\n", + "pd.DataFrame(\n", + " {\n", + " 'hierarchical': fs_clustered.clustering.metrics.to_dataframe().iloc[0],\n", + " 'k_means': fs_kmeans.clustering.metrics.to_dataframe().iloc[0],\n", + " }\n", ")" ] }, @@ -293,7 +292,6 @@ "source": [ "# Save the cluster order from our optimized system\n", "cluster_order = fs_clustered.clustering.cluster_order.values\n", - "print(f'Cluster order to reuse: {cluster_order}')\n", "\n", "# Now modify the FlowSystem (e.g., increase storage capacity limits)\n", "flow_system_modified = flow_system.copy()\n", @@ -305,15 +303,13 @@ " cluster_duration='1D',\n", " predef_cluster_order=cluster_order, # Reuse cluster assignments\n", ")\n", + "fs_modified_clustered.name = 'Modified (larger storage limit)'\n", "\n", "# Optimize the modified system\n", "fs_modified_clustered.optimize(solver)\n", "\n", - "print('\\nComparison (same cluster structure):')\n", - "print(f' Original storage size: {fs_clustered.statistics.sizes[\"Storage\"].item():.0f}')\n", - "print(f' Modified storage size: {fs_modified_clustered.statistics.sizes[\"Storage\"].item():.0f}')\n", - "print(f' Original cost: {fs_clustered.solution[\"costs\"].item():,.0f} €')\n", - "print(f' Modified cost: {fs_modified_clustered.solution[\"costs\"].item():,.0f} €')" + "# Compare results using Comparison class\n", + "fx.Comparison([fs_clustered, fs_modified_clustered])" ] }, { @@ -356,6 +352,7 @@ "start = timeit.default_timer()\n", "\n", "fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin)\n", + "fs_dispatch.name = 'Two-Stage'\n", "fs_dispatch.optimize(solver)\n", "\n", "time_dispatch = timeit.default_timer() - start\n", @@ -544,7 +541,32 @@ "| `'cyclic'` | Each cluster is independent but cyclic (start = end) |\n", "| `'independent'` | Each cluster is independent, free start/end |\n", "\n", - "For a detailed comparison of storage modes, see [08c2-clustering-storage-modes](08c2-clustering-storage-modes.ipynb)." + "For a detailed comparison of storage modes, see [08c2-clustering-storage-modes](08c2-clustering-storage-modes.ipynb).\n", + "\n", + "### Peak Forcing Format\n", + "\n", + "```python\n", + "time_series_for_high_peaks = ['ComponentName(FlowName)|fixed_relative_profile']\n", + "```\n", + "\n", + "### Recommended Workflow\n", + "\n", + "```python\n", + "# Stage 1: Fast sizing\n", + "fs_sizing = flow_system.transform.cluster(\n", + " n_clusters=8,\n", + " cluster_duration='1D',\n", + " time_series_for_high_peaks=['Demand(Flow)|fixed_relative_profile'],\n", + ")\n", + "fs_sizing.optimize(solver)\n", + "\n", + "# Apply safety margin\n", + "sizes = {k: v.item() * 1.05 for k, v in fs_sizing.statistics.sizes.items()}\n", + "\n", + "# Stage 2: Accurate dispatch\n", + "fs_dispatch = flow_system.transform.fix_sizes(sizes)\n", + "fs_dispatch.optimize(solver)\n", + "```" ] }, { @@ -580,17 +602,7 @@ ] } ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "name": "python", - "version": "3.11" - } - }, + "metadata": {}, "nbformat": 4, "nbformat_minor": 5 } diff --git a/docs/notebooks/08c2-clustering-storage-modes.ipynb b/docs/notebooks/08c2-clustering-storage-modes.ipynb index 7a760edc3..3a9ab88ad 100644 --- a/docs/notebooks/08c2-clustering-storage-modes.ipynb +++ b/docs/notebooks/08c2-clustering-storage-modes.ipynb @@ -65,7 +65,7 @@ "flow_system.connect_and_transform() # Align all data as xarray\n", "\n", "timesteps = flow_system.timesteps\n", - "print(f'Loaded FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days)')\n", + "print(f'FlowSystem: {len(timesteps)} timesteps ({len(timesteps) / 24:.0f} days)')\n", "print(f'Components: {list(flow_system.components.keys())}')" ] }, @@ -142,6 +142,7 @@ "\n", "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", + "fs_full.name = 'Full Optimization'\n", "fs_full.optimize(solver)\n", "time_full = timeit.default_timer() - start\n", "\n", @@ -277,7 +278,9 @@ "# Expand clustered solutions to full resolution\n", "expanded_systems = {}\n", "for mode in storage_modes:\n", - " expanded_systems[mode] = clustered_systems[mode].transform.expand_solution()" + " fs_expanded = clustered_systems[mode].transform.expand_solution()\n", + " fs_expanded.name = f'Mode: {mode}'\n", + " expanded_systems[mode] = fs_expanded" ] }, { @@ -316,6 +319,28 @@ "cell_type": "markdown", "id": "14", "metadata": {}, + "source": [ + "### Side-by-Side Comparison\n", + "\n", + "Use the `Comparison` class to compare the full optimization with the recommended mode:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "15", + "metadata": {}, + "outputs": [], + "source": [ + "# Compare full optimization with the recommended intercluster_cyclic mode\n", + "comp = fx.Comparison([fs_full, expanded_systems['intercluster_cyclic']])\n", + "comp.statistics.plot.balance('Heat')" + ] + }, + { + "cell_type": "markdown", + "id": "16", + "metadata": {}, "source": [ "## Interpretation\n", "\n", @@ -343,7 +368,7 @@ }, { "cell_type": "markdown", - "id": "15", + "id": "17", "metadata": {}, "source": [ "## When to Use Each Mode\n", @@ -378,7 +403,7 @@ }, { "cell_type": "markdown", - "id": "16", + "id": "18", "metadata": {}, "source": [ "## Summary\n", diff --git a/docs/notebooks/08d-clustering-multiperiod.ipynb b/docs/notebooks/08d-clustering-multiperiod.ipynb index 016d9555b..e599384ac 100644 --- a/docs/notebooks/08d-clustering-multiperiod.ipynb +++ b/docs/notebooks/08d-clustering-multiperiod.ipynb @@ -140,6 +140,7 @@ "\n", "start = timeit.default_timer()\n", "fs_full = flow_system.copy()\n", + "fs_full.name = 'Full Optimization'\n", "fs_full.optimize(solver)\n", "time_full = timeit.default_timer() - start\n", "\n", @@ -345,6 +346,7 @@ "start = timeit.default_timer()\n", "\n", "fs_dispatch = flow_system.transform.fix_sizes(sizes_with_margin)\n", + "fs_dispatch.name = 'Two-Stage'\n", "fs_dispatch.optimize(solver)\n", "\n", "time_dispatch = timeit.default_timer() - start\n", @@ -434,9 +436,21 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "id": "25", "metadata": {}, + "outputs": [], + "source": [ + "# Side-by-side comparison using the Comparison class\n", + "comp = fx.Comparison([fs_full, fs_dispatch])\n", + "comp.statistics.plot.balance('Heat')" + ] + }, + { + "cell_type": "markdown", + "id": "26", + "metadata": {}, "source": [ "## Expand Clustered Solution to Full Resolution\n", "\n", @@ -446,7 +460,7 @@ { "cell_type": "code", "execution_count": null, - "id": "26", + "id": "27", "metadata": {}, "outputs": [], "source": [ @@ -460,7 +474,7 @@ { "cell_type": "code", "execution_count": null, - "id": "27", + "id": "28", "metadata": {}, "outputs": [], "source": [ @@ -470,7 +484,7 @@ }, { "cell_type": "markdown", - "id": "28", + "id": "29", "metadata": {}, "source": [ "## Key Considerations for Multi-Period Clustering\n", @@ -504,7 +518,7 @@ }, { "cell_type": "markdown", - "id": "29", + "id": "30", "metadata": {}, "source": [ "## Summary\n", diff --git a/docs/user-guide/results/index.md b/docs/user-guide/results/index.md index a9b40f7f9..500a64cd9 100644 --- a/docs/user-guide/results/index.md +++ b/docs/user-guide/results/index.md @@ -277,6 +277,156 @@ flow_system.statistics.plot.heatmap('Boiler(Q_th)|flow_rate') flow_system.to_netcdf('results/optimized_system.nc') ``` +## Comparing Multiple Systems + +Use the [`Comparison`][flixopt.comparison.Comparison] class to analyze and visualize multiple FlowSystems side-by-side. This is useful for: + +- Comparing different design alternatives (with/without CHP, different storage sizes) +- Analyzing optimization method trade-offs (full vs. two-stage, different aggregation levels) +- Sensitivity analysis (different scenarios, parameter variations) + +### Basic Usage + +```python +import flixopt as fx + +# Optimize two system variants +fs_baseline = create_system() +fs_baseline.name = 'Baseline' +fs_baseline.optimize(solver) + +fs_with_storage = create_system_with_storage() +fs_with_storage.name = 'With Storage' +fs_with_storage.optimize(solver) + +# Create comparison +comp = fx.Comparison([fs_baseline, fs_with_storage]) + +# Side-by-side balance plots (auto-faceted by 'case' dimension) +comp.statistics.plot.balance('Heat') + +# Access combined data with 'case' dimension +comp.statistics.flow_rates # xr.Dataset with dims: (time, case) +comp.solution # Combined solution dataset +``` + +### Requirements + +All FlowSystems must have **matching core dimensions** (`time`, `period`, `scenario`). Auxiliary dimensions like `cluster_boundary` are ignored. If core dimensions differ, use `.transform.sel()` to align them first: + +```python +# Systems with different scenarios +fs_both = flow_system # Has 'Mild Winter' and 'Harsh Winter' scenarios +fs_mild = flow_system.transform.sel(scenario='Mild Winter') # Single scenario + +# Cannot compare directly - scenario dimension mismatch! +# fx.Comparison([fs_both, fs_mild]) # Raises ValueError + +# Instead, select matching dimensions +fs_both_mild = fs_both.transform.sel(scenario='Mild Winter') +comp = fx.Comparison([fs_both_mild, fs_mild]) # Works! + +# Auxiliary dimensions are OK (e.g., expanded clustered solutions) +fs_expanded = fs_clustered.transform.expand_solution() # Has cluster_boundary dim +comp = fx.Comparison([fs_full, fs_expanded]) # Works! cluster_boundary is ignored +``` + +### Available Properties + +The `Comparison.statistics` accessor mirrors all `StatisticsAccessor` properties, returning combined datasets with an added `'case'` dimension: + +| Property | Description | +|----------|-------------| +| `flow_rates` | All flow rate variables | +| `flow_hours` | Flow hours (energy) | +| `sizes` | Component sizes | +| `storage_sizes` | Storage capacities | +| `charge_states` | Storage charge states | +| `temporal_effects` | Effects per timestep | +| `periodic_effects` | Investment effects | +| `total_effects` | Combined effects | + +### Available Plot Methods + +All standard plot methods work on the comparison, with the `'case'` dimension automatically used for faceting: + +```python +comp = fx.Comparison([fs_baseline, fs_modified]) + +# Balance plots - faceted by case +comp.statistics.plot.balance('Heat') +comp.statistics.plot.balance('Electricity', mode='area') + +# Flow plots +comp.statistics.plot.flows(component='CHP') + +# Effect breakdowns +comp.statistics.plot.effects() + +# Heatmaps +comp.statistics.plot.heatmap('Boiler(Q_th)') + +# Duration curves +comp.statistics.plot.duration_curve('CHP(Q_th)') + +# Storage plots +comp.statistics.plot.storage('Battery') +``` + +### Computing Differences + +Use the `diff()` method to compute differences relative to a reference case: + +```python +# Differences relative to first case (default) +differences = comp.diff() + +# Differences relative to specific case +differences = comp.diff(reference='Baseline') +differences = comp.diff(reference=0) # By index + +# Analyze differences +print(differences['costs']) # Cost difference per case +``` + +### Naming Systems + +System names come from `FlowSystem.name` by default. Override with the `names` parameter: + +```python +# Using FlowSystem.name (default) +fs1.name = 'Scenario A' +fs2.name = 'Scenario B' +comp = fx.Comparison([fs1, fs2]) + +# Or override explicitly +comp = fx.Comparison([fs1, fs2], names=['Base Case', 'Alternative']) +``` + +### Example: Comparing Optimization Methods + +```python +# Full optimization +fs_full = flow_system.copy() +fs_full.name = 'Full Optimization' +fs_full.optimize(solver) + +# Two-stage optimization +fs_sizing = flow_system.transform.resample('4h') +fs_sizing.optimize(solver) +fs_dispatch = flow_system.transform.fix_sizes(fs_sizing.statistics.sizes) +fs_dispatch.name = 'Two-Stage' +fs_dispatch.optimize(solver) + +# Compare results +comp = fx.Comparison([fs_full, fs_dispatch]) +comp.statistics.plot.balance('Heat') + +# Check cost difference +diff = comp.diff() +print(f"Cost difference: {diff['costs'].sel(case='Two-Stage').item():.0f} €") +``` + ## Next Steps - [Plotting Results](../results-plotting.md) - Detailed plotting documentation diff --git a/flixopt/__init__.py b/flixopt/__init__.py index 54fa21274..b84b82a4f 100644 --- a/flixopt/__init__.py +++ b/flixopt/__init__.py @@ -17,6 +17,7 @@ # Register xr.Dataset.fxplot accessor (import triggers registration via decorator) from . import dataset_plot_accessor as _ # noqa: F401 from .carrier import Carrier, CarrierContainer +from .comparison import Comparison from .components import ( LinearConverter, Sink, @@ -39,6 +40,7 @@ 'CONFIG', 'Carrier', 'CarrierContainer', + 'Comparison', 'Flow', 'Bus', 'Effect', diff --git a/flixopt/comparison.py b/flixopt/comparison.py new file mode 100644 index 000000000..63a00a0f1 --- /dev/null +++ b/flixopt/comparison.py @@ -0,0 +1,609 @@ +"""Compare multiple FlowSystems side-by-side.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +import xarray as xr + +from .config import CONFIG +from .plot_result import PlotResult + +if TYPE_CHECKING: + from .flow_system import FlowSystem + +__all__ = ['Comparison'] + +# Type aliases (matching statistics_accessor.py) +SelectType = dict[str, Any] +FilterType = str | list[str] +ColorType = str | list[str] | dict[str, str] | None + + +class Comparison: + """Compare multiple FlowSystems side-by-side. + + Combines solutions and statistics from multiple FlowSystems into unified + xarray Datasets with a 'case' dimension. The existing plotting infrastructure + automatically handles faceting by the 'case' dimension. + + All FlowSystems must have matching dimensions (time, period, scenario, etc.). + Use `flow_system.transform.sel()` to align dimensions before comparing. + + Args: + flow_systems: List of FlowSystems to compare. All must be optimized + and have matching dimensions. + names: Optional names for each case. If None, uses FlowSystem.name. + + Raises: + ValueError: If FlowSystems have mismatched dimensions. + RuntimeError: If any FlowSystem has no solution. + + Examples: + ```python + # Compare two systems (uses FlowSystem.name by default) + comp = fx.Comparison([fs_base, fs_modified]) + + # Or with custom names + comp = fx.Comparison([fs_base, fs_modified], names=['baseline', 'modified']) + + # Side-by-side plots (auto-facets by 'case') + comp.statistics.plot.balance('Heat') + comp.statistics.flow_rates.fxplot.line() + + # Access combined data + comp.solution # xr.Dataset with 'case' dimension + comp.statistics.flow_rates # xr.Dataset with 'case' dimension + + # Compute differences relative to first case + comp.diff() # Returns xr.Dataset of differences + comp.diff('baseline') # Or specify reference by name + + # For systems with different dimensions, align first: + fs_both = ... # Has scenario dimension + fs_mild = fs_both.transform.sel(scenario='Mild') # Select one scenario + fs_other = ... # Also select to match + comp = fx.Comparison([fs_mild, fs_other]) # Now dimensions match + ``` + """ + + def __init__(self, flow_systems: list[FlowSystem], names: list[str] | None = None) -> None: + if len(flow_systems) < 2: + raise ValueError('Comparison requires at least 2 FlowSystems') + + self._systems = flow_systems + self._names = names or [fs.name or f'System {i}' for i, fs in enumerate(flow_systems)] + + if len(self._names) != len(self._systems): + raise ValueError( + f'Number of names ({len(self._names)}) must match number of FlowSystems ({len(self._systems)})' + ) + + if len(set(self._names)) != len(self._names): + raise ValueError(f'Case names must be unique, got: {self._names}') + + # Validate all FlowSystems have solutions + for fs in flow_systems: + if fs.solution is None: + raise RuntimeError(f"FlowSystem '{fs.name}' has no solution. Run optimize() first.") + + # Validate matching dimensions across all FlowSystems + self._validate_matching_dimensions() + + # Caches + self._solution: xr.Dataset | None = None + self._statistics: ComparisonStatistics | None = None + + # Core dimensions that must match across FlowSystems + # Note: 'cluster' and 'cluster_boundary' are auxiliary dimensions from clustering + _CORE_DIMS = {'time', 'period', 'scenario'} + + def _validate_matching_dimensions(self) -> None: + """Validate that all FlowSystems have matching core dimensions. + + Only validates core dimensions (time, period, scenario). Auxiliary + dimensions like 'cluster_boundary' are ignored as they don't affect + the comparison logic. + """ + reference = self._systems[0] + ref_core_dims = set(reference.solution.dims) & self._CORE_DIMS + ref_name = self._names[0] + + for fs, name in zip(self._systems[1:], self._names[1:], strict=True): + fs_core_dims = set(fs.solution.dims) & self._CORE_DIMS + if fs_core_dims != ref_core_dims: + missing = ref_core_dims - fs_core_dims + extra = fs_core_dims - ref_core_dims + msg_parts = [f"Core dimension mismatch between '{ref_name}' and '{name}'."] + if missing: + msg_parts.append(f"Missing in '{name}': {missing}.") + if extra: + msg_parts.append(f"Extra in '{name}': {extra}.") + msg_parts.append('Use .transform.sel() to align dimensions before comparing.') + raise ValueError(' '.join(msg_parts)) + + @property + def names(self) -> list[str]: + """Case names for each FlowSystem.""" + return self._names + + @property + def solution(self) -> xr.Dataset: + """Combined solution Dataset with 'case' dimension.""" + if self._solution is None: + datasets = [] + for fs, name in zip(self._systems, self._names, strict=True): + ds = fs.solution.expand_dims(case=[name]) + datasets.append(ds) + self._solution = xr.concat(datasets, dim='case', join='outer', fill_value=float('nan')) + return self._solution + + @property + def statistics(self) -> ComparisonStatistics: + """Combined statistics accessor with 'case' dimension.""" + if self._statistics is None: + self._statistics = ComparisonStatistics(self) + return self._statistics + + def diff(self, reference: str | int = 0) -> xr.Dataset: + """Compute differences relative to a reference case. + + Args: + reference: Reference case name or index (default: 0, first case). + + Returns: + Dataset with differences (each case minus reference). + """ + if isinstance(reference, str): + if reference not in self._names: + raise ValueError(f"Reference '{reference}' not found. Available: {self._names}") + ref_idx = self._names.index(reference) + else: + ref_idx = reference + + ref_data = self.solution.isel(case=ref_idx) + return self.solution - ref_data + + +class ComparisonStatistics: + """Combined statistics accessor for comparing FlowSystems. + + Mirrors StatisticsAccessor properties, concatenating data with a 'case' dimension. + Access via ``Comparison.statistics``. + """ + + def __init__(self, comparison: Comparison) -> None: + self._comp = comparison + # Caches for dataset properties + self._flow_rates: xr.Dataset | None = None + self._flow_hours: xr.Dataset | None = None + self._flow_sizes: xr.Dataset | None = None + self._storage_sizes: xr.Dataset | None = None + self._sizes: xr.Dataset | None = None + self._charge_states: xr.Dataset | None = None + self._temporal_effects: xr.Dataset | None = None + self._periodic_effects: xr.Dataset | None = None + self._total_effects: xr.Dataset | None = None + # Caches for dict properties + self._carrier_colors: dict[str, str] | None = None + self._component_colors: dict[str, str] | None = None + self._bus_colors: dict[str, str] | None = None + self._carrier_units: dict[str, str] | None = None + self._effect_units: dict[str, str] | None = None + # Plot accessor + self._plot: ComparisonStatisticsPlot | None = None + + def _concat_property(self, prop_name: str) -> xr.Dataset: + """Concatenate a statistics property across all cases.""" + datasets = [] + for fs, name in zip(self._comp._systems, self._comp._names, strict=True): + ds = getattr(fs.statistics, prop_name) + datasets.append(ds.expand_dims(case=[name])) + return xr.concat(datasets, dim='case', join='outer', fill_value=float('nan')) + + def _merge_dict_property(self, prop_name: str) -> dict[str, str]: + """Merge a dict property from all cases (later cases override).""" + result: dict[str, str] = {} + for fs in self._comp._systems: + result.update(getattr(fs.statistics, prop_name)) + return result + + @property + def flow_rates(self) -> xr.Dataset: + """Combined flow rates with 'case' dimension.""" + if self._flow_rates is None: + self._flow_rates = self._concat_property('flow_rates') + return self._flow_rates + + @property + def flow_hours(self) -> xr.Dataset: + """Combined flow hours (energy) with 'case' dimension.""" + if self._flow_hours is None: + self._flow_hours = self._concat_property('flow_hours') + return self._flow_hours + + @property + def flow_sizes(self) -> xr.Dataset: + """Combined flow investment sizes with 'case' dimension.""" + if self._flow_sizes is None: + self._flow_sizes = self._concat_property('flow_sizes') + return self._flow_sizes + + @property + def storage_sizes(self) -> xr.Dataset: + """Combined storage capacity sizes with 'case' dimension.""" + if self._storage_sizes is None: + self._storage_sizes = self._concat_property('storage_sizes') + return self._storage_sizes + + @property + def sizes(self) -> xr.Dataset: + """Combined sizes (flow + storage) with 'case' dimension.""" + if self._sizes is None: + self._sizes = self._concat_property('sizes') + return self._sizes + + @property + def charge_states(self) -> xr.Dataset: + """Combined storage charge states with 'case' dimension.""" + if self._charge_states is None: + self._charge_states = self._concat_property('charge_states') + return self._charge_states + + @property + def temporal_effects(self) -> xr.Dataset: + """Combined temporal effects with 'case' dimension.""" + if self._temporal_effects is None: + self._temporal_effects = self._concat_property('temporal_effects') + return self._temporal_effects + + @property + def periodic_effects(self) -> xr.Dataset: + """Combined periodic effects with 'case' dimension.""" + if self._periodic_effects is None: + self._periodic_effects = self._concat_property('periodic_effects') + return self._periodic_effects + + @property + def total_effects(self) -> xr.Dataset: + """Combined total effects with 'case' dimension.""" + if self._total_effects is None: + self._total_effects = self._concat_property('total_effects') + return self._total_effects + + @property + def carrier_colors(self) -> dict[str, str]: + """Merged carrier colors from all cases.""" + if self._carrier_colors is None: + self._carrier_colors = self._merge_dict_property('carrier_colors') + return self._carrier_colors + + @property + def component_colors(self) -> dict[str, str]: + """Merged component colors from all cases.""" + if self._component_colors is None: + self._component_colors = self._merge_dict_property('component_colors') + return self._component_colors + + @property + def bus_colors(self) -> dict[str, str]: + """Merged bus colors from all cases.""" + if self._bus_colors is None: + self._bus_colors = self._merge_dict_property('bus_colors') + return self._bus_colors + + @property + def carrier_units(self) -> dict[str, str]: + """Merged carrier units from all cases.""" + if self._carrier_units is None: + self._carrier_units = self._merge_dict_property('carrier_units') + return self._carrier_units + + @property + def effect_units(self) -> dict[str, str]: + """Merged effect units from all cases.""" + if self._effect_units is None: + self._effect_units = self._merge_dict_property('effect_units') + return self._effect_units + + @property + def plot(self) -> ComparisonStatisticsPlot: + """Access plot methods for comparison statistics.""" + if self._plot is None: + self._plot = ComparisonStatisticsPlot(self) + return self._plot + + +class ComparisonStatisticsPlot: + """Plot accessor for comparison statistics. + + Wraps StatisticsPlotAccessor methods, combining data from all FlowSystems + with a 'case' dimension for faceting. + """ + + # Data-related kwargs for each method (everything else is plotly kwargs) + _DATA_KWARGS: dict[str, set[str]] = { + 'balance': {'select', 'include', 'exclude', 'unit'}, + 'carrier_balance': {'select', 'include', 'exclude', 'unit'}, + 'flows': {'start', 'end', 'component', 'select', 'unit'}, + 'storage': {'select', 'unit', 'charge_state_color'}, + 'charge_states': {'select'}, + 'duration_curve': {'select', 'normalize'}, + 'sizes': {'max_size', 'select'}, + 'effects': {'effect', 'by', 'select'}, + 'heatmap': {'select', 'reshape'}, + } + + def __init__(self, statistics: ComparisonStatistics) -> None: + self._stats = statistics + self._comp = statistics._comp + + def _split_kwargs(self, method_name: str, kwargs: dict) -> tuple[dict, dict]: + """Split kwargs into data kwargs and plotly kwargs.""" + data_keys = self._DATA_KWARGS.get(method_name, set()) + data_kwargs = {k: v for k, v in kwargs.items() if k in data_keys} + plotly_kwargs = {k: v for k, v in kwargs.items() if k not in data_keys} + return data_kwargs, plotly_kwargs + + def _combine_data(self, method_name: str, *args, **kwargs) -> tuple[xr.Dataset, str]: + """Call plot method on each system and combine data. Returns (combined_data, title).""" + datasets = [] + title = '' + kwargs = {**kwargs, 'show': False} # Don't mutate original + + for fs, case_name in zip(self._comp._systems, self._comp._names, strict=True): + try: + result = getattr(fs.statistics.plot, method_name)(*args, **kwargs) + datasets.append(result.data.expand_dims(case=[case_name])) + title = result.figure.layout.title.text or title + except (KeyError, ValueError): + continue + + if not datasets: + return xr.Dataset(), '' + + return xr.concat(datasets, dim='case', join='outer', fill_value=float('nan')), title + + def _finalize(self, ds: xr.Dataset, fig, show: bool | None) -> PlotResult: + """Handle show and return PlotResult.""" + import plotly.graph_objects as go + + if show is None: + show = CONFIG.Plotting.default_show + if show and fig: + fig.show() + return PlotResult(data=ds, figure=fig or go.Figure()) + + def balance( + self, + node: str, + *, + colors=None, + facet_col='auto', + facet_row='auto', + animation_frame='auto', + show: bool | None = None, + **kwargs, + ) -> PlotResult: + """Plot node balance comparison. See StatisticsPlotAccessor.balance.""" + data_kw, plotly_kw = self._split_kwargs('balance', kwargs) + ds, title = self._combine_data('balance', node, **data_kw) + if not ds.data_vars: + return self._finalize(ds, None, show) + fig = ds.fxplot.stacked_bar( + colors=colors, + title=title, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + **plotly_kw, + ) + return self._finalize(ds, fig, show) + + def carrier_balance( + self, + carrier: str, + *, + colors=None, + facet_col='auto', + facet_row='auto', + animation_frame='auto', + show: bool | None = None, + **kwargs, + ) -> PlotResult: + """Plot carrier balance comparison. See StatisticsPlotAccessor.carrier_balance.""" + data_kw, plotly_kw = self._split_kwargs('carrier_balance', kwargs) + ds, title = self._combine_data('carrier_balance', carrier, **data_kw) + if not ds.data_vars: + return self._finalize(ds, None, show) + fig = ds.fxplot.stacked_bar( + colors=colors, + title=title, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + **plotly_kw, + ) + return self._finalize(ds, fig, show) + + def flows( + self, + *, + colors=None, + facet_col='auto', + facet_row='auto', + animation_frame='auto', + show: bool | None = None, + **kwargs, + ) -> PlotResult: + """Plot flows comparison. See StatisticsPlotAccessor.flows.""" + data_kw, plotly_kw = self._split_kwargs('flows', kwargs) + ds, title = self._combine_data('flows', **data_kw) + if not ds.data_vars: + return self._finalize(ds, None, show) + fig = ds.fxplot.line( + colors=colors, + title=title, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + **plotly_kw, + ) + return self._finalize(ds, fig, show) + + def storage( + self, + storage: str, + *, + colors=None, + facet_col='auto', + facet_row='auto', + animation_frame='auto', + show: bool | None = None, + **kwargs, + ) -> PlotResult: + """Plot storage operation comparison. See StatisticsPlotAccessor.storage.""" + data_kw, plotly_kw = self._split_kwargs('storage', kwargs) + ds, title = self._combine_data('storage', storage, **data_kw) + if not ds.data_vars: + return self._finalize(ds, None, show) + fig = ds.fxplot.stacked_bar( + colors=colors, + title=title, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + **plotly_kw, + ) + return self._finalize(ds, fig, show) + + def charge_states( + self, + storages=None, + *, + colors=None, + facet_col='auto', + facet_row='auto', + animation_frame='auto', + show: bool | None = None, + **kwargs, + ) -> PlotResult: + """Plot charge states comparison. See StatisticsPlotAccessor.charge_states.""" + data_kw, plotly_kw = self._split_kwargs('charge_states', kwargs) + ds, title = self._combine_data('charge_states', storages, **data_kw) + if not ds.data_vars: + return self._finalize(ds, None, show) + fig = ds.fxplot.line( + colors=colors, + title=title, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + **plotly_kw, + ) + fig.update_yaxes(title_text='Charge State') + return self._finalize(ds, fig, show) + + def duration_curve( + self, + variables, + *, + normalize: bool = False, + colors=None, + facet_col='auto', + facet_row='auto', + animation_frame='auto', + show: bool | None = None, + **kwargs, + ) -> PlotResult: + """Plot duration curves comparison. See StatisticsPlotAccessor.duration_curve.""" + data_kw, plotly_kw = self._split_kwargs('duration_curve', kwargs) + ds, title = self._combine_data('duration_curve', variables, normalize=normalize, **data_kw) + if not ds.data_vars: + return self._finalize(ds, None, show) + fig = ds.fxplot.line( + colors=colors, + title=title, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + **plotly_kw, + ) + fig.update_xaxes(title_text='Duration [%]' if normalize else 'Timesteps') + return self._finalize(ds, fig, show) + + def sizes( + self, + *, + colors=None, + facet_col='auto', + facet_row='auto', + animation_frame='auto', + show: bool | None = None, + **kwargs, + ) -> PlotResult: + """Plot investment sizes comparison. See StatisticsPlotAccessor.sizes.""" + data_kw, plotly_kw = self._split_kwargs('sizes', kwargs) + ds, title = self._combine_data('sizes', **data_kw) + if not ds.data_vars: + return self._finalize(ds, None, show) + fig = ds.fxplot.bar( + x='variable', + color='variable', + colors=colors, + title=title, + ylabel='Size', + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + **plotly_kw, + ) + return self._finalize(ds, fig, show) + + def effects( + self, + aspect='total', + *, + colors=None, + facet_col='auto', + facet_row='auto', + animation_frame='auto', + show: bool | None = None, + **kwargs, + ) -> PlotResult: + """Plot effects comparison. See StatisticsPlotAccessor.effects.""" + data_kw, plotly_kw = self._split_kwargs('effects', kwargs) + ds, title = self._combine_data('effects', aspect, **data_kw) + if not ds.data_vars: + return self._finalize(ds, None, show) + + by = data_kw.get('by') + # After to_dataset(dim='effect'), effects become variables -> 'variable' column + x_col = by if by else 'variable' + color_col = 'variable' if len(ds.data_vars) > 1 else x_col + + fig = ds.fxplot.bar( + x=x_col, + color=color_col, + colors=colors, + title=title, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, + **plotly_kw, + ) + fig.update_layout(bargap=0, bargroupgap=0) + fig.update_traces(marker_line_width=0) + return self._finalize(ds, fig, show) + + def heatmap( + self, variables, *, colors=None, facet_col='auto', animation_frame='auto', show: bool | None = None, **kwargs + ) -> PlotResult: + """Plot heatmap comparison. See StatisticsPlotAccessor.heatmap.""" + data_kw, plotly_kw = self._split_kwargs('heatmap', kwargs) + ds, _ = self._combine_data('heatmap', variables, **data_kw) + if not ds.data_vars: + return self._finalize(ds, None, show) + da = ds[next(iter(ds.data_vars))] + fig = da.fxplot.heatmap(colors=colors, facet_col=facet_col, animation_frame=animation_frame, **plotly_kw) + return self._finalize(ds, fig, show) diff --git a/flixopt/statistics_accessor.py b/flixopt/statistics_accessor.py index e3581e4e3..536c6beaf 100644 --- a/flixopt/statistics_accessor.py +++ b/flixopt/statistics_accessor.py @@ -25,7 +25,6 @@ import numpy as np import pandas as pd -import plotly.express as px import plotly.graph_objects as go import xarray as xr @@ -1867,113 +1866,67 @@ def effects( self._stats._require_solution() # Get the appropriate effects dataset based on aspect - if aspect == 'total': - effects_ds = self._stats.total_effects - elif aspect == 'temporal': - effects_ds = self._stats.temporal_effects - elif aspect == 'periodic': - effects_ds = self._stats.periodic_effects - else: + effects_ds = { + 'total': self._stats.total_effects, + 'temporal': self._stats.temporal_effects, + 'periodic': self._stats.periodic_effects, + }.get(aspect) + if effects_ds is None: raise ValueError(f"Aspect '{aspect}' not valid. Choose from 'total', 'temporal', 'periodic'.") - # Get available effects (data variables in the dataset) - available_effects = list(effects_ds.data_vars) - - # Filter to specific effect if requested + # Filter to specific effect(s) and apply selection if effect is not None: - if effect not in available_effects: - raise ValueError(f"Effect '{effect}' not found. Available: {available_effects}") - effects_to_plot = [effect] + if effect not in effects_ds: + raise ValueError(f"Effect '{effect}' not found. Available: {list(effects_ds.data_vars)}") + ds = effects_ds[[effect]] else: - effects_to_plot = available_effects - - # Build a combined DataArray with effect dimension - effect_arrays = [] - for eff in effects_to_plot: - da = effects_ds[eff] - if by == 'contributor': - # Keep individual contributors (flows) - no groupby - effect_arrays.append(da.expand_dims(effect=[eff])) - else: - # Group by component (sum over contributor within each component) - da_grouped = da.groupby('component').sum() - effect_arrays.append(da_grouped.expand_dims(effect=[eff])) + ds = effects_ds - combined = xr.concat(effect_arrays, dim='effect') + # Group by component (default) unless by='contributor' + if by != 'contributor' and 'contributor' in ds.dims: + ds = ds.groupby('component').sum() - # Apply selection - combined = _apply_selection(combined.to_dataset(name='value'), select)['value'] + ds = _apply_selection(ds, select) - # Group by the specified dimension + # Sum over dimensions based on 'by' parameter if by is None: - # Aggregate totals per effect - sum over all dimensions except effect - if 'time' in combined.dims: - combined = combined.sum(dim='time') - if 'component' in combined.dims: - combined = combined.sum(dim='component') - if 'contributor' in combined.dims: - combined = combined.sum(dim='contributor') - x_col = 'effect' - color_col = 'effect' + for dim in ['time', 'component', 'contributor']: + if dim in ds.dims: + ds = ds.sum(dim=dim) + x_col, color_col = 'variable', 'variable' elif by == 'component': - # Sum over time if present - if 'time' in combined.dims: - combined = combined.sum(dim='time') + if 'time' in ds.dims: + ds = ds.sum(dim='time') x_col = 'component' - color_col = 'effect' if len(effects_to_plot) > 1 else 'component' + color_col = 'variable' if len(ds.data_vars) > 1 else 'component' elif by == 'contributor': - # Sum over time if present - if 'time' in combined.dims: - combined = combined.sum(dim='time') + if 'time' in ds.dims: + ds = ds.sum(dim='time') x_col = 'contributor' - color_col = 'effect' if len(effects_to_plot) > 1 else 'contributor' + color_col = 'variable' if len(ds.data_vars) > 1 else 'contributor' elif by == 'time': - if 'time' not in combined.dims: + if 'time' not in ds.dims: raise ValueError(f"Cannot plot by 'time' for aspect '{aspect}' - no time dimension.") - # Sum over components or contributors - if 'component' in combined.dims: - combined = combined.sum(dim='component') - if 'contributor' in combined.dims: - combined = combined.sum(dim='contributor') + for dim in ['component', 'contributor']: + if dim in ds.dims: + ds = ds.sum(dim=dim) x_col = 'time' - color_col = 'effect' if len(effects_to_plot) > 1 else None + color_col = 'variable' if len(ds.data_vars) > 1 else None else: raise ValueError(f"'by' must be one of 'component', 'contributor', 'time', or None, got {by!r}") - # Convert to DataFrame for plotly express - df = combined.to_dataframe(name='value').reset_index() - - # Resolve facet/animation: 'auto' means None for DataFrames (no dimension priority) - resolved_facet_col = None if facet_col == 'auto' else facet_col - resolved_facet_row = None if facet_row == 'auto' else facet_row - resolved_animation = None if animation_frame == 'auto' else animation_frame - - # Build color map - if color_col and color_col in df.columns: - color_items = df[color_col].unique().tolist() - color_map = process_colors(colors, color_items) - else: - color_map = None + # Build title + effect_label = effect or 'Effects' + title = f'{effect_label} ({aspect})' if by is None else f'{effect_label} ({aspect}) by {by}' - # Build title with unit if single effect - effect_label = effect if effect else 'Effects' - if effect and effect in effects_ds: - unit_label = effects_ds[effect].attrs.get('unit', '') - title = f'{effect_label} [{unit_label}]' if unit_label else effect_label - else: - title = effect_label - title = f'{title} ({aspect})' if by is None else f'{title} ({aspect}) by {by}' - - fig = px.bar( - df, + fig = ds.fxplot.bar( x=x_col, - y='value', color=color_col, - color_discrete_map=color_map, - facet_col=resolved_facet_col, - facet_row=resolved_facet_row, - animation_frame=resolved_animation, + colors=colors, title=title, + facet_col=facet_col, + facet_row=facet_row, + animation_frame=animation_frame, **plotly_kwargs, ) fig.update_layout(bargap=0, bargroupgap=0) @@ -1984,7 +1937,7 @@ def effects( if show: fig.show() - return PlotResult(data=combined.to_dataset(name=aspect), figure=fig) + return PlotResult(data=ds, figure=fig) def charge_states( self,