Skip to content

Calibrator API

The Calibrator class provides a unified interface for both standard and probabilistic calibration.

Methods

  • run() - Runs standard single-solution calibration using the optimization_config from the problem
  • run_probabilistic() - Runs probabilistic calibration using the probabilistic_config from the problem

Calibrator

A Facade for running parameter calibration from a defined CalibrationProblem.

Source code in commol/api/calibrator.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
class Calibrator:
    """
    A Facade for running parameter calibration from a defined CalibrationProblem.
    """

    def __init__(
        self,
        simulation: Simulation,
        problem: CalibrationProblem,
    ):
        """
        Initializes the calibration from a Simulation and CalibrationProblem.

        Parameters
        ----------
        simulation : Simulation
            A fully initialized Simulation object with the model to calibrate.
        problem : CalibrationProblem
            A fully constructed and validated calibration problem definition.
        """
        logger.info(
            f"Initializing Calibration for model: '{simulation.model_definition.name}'"
        )
        self.simulation: Simulation = simulation
        self.problem: CalibrationProblem = problem
        self._engine: "DifferenceEquationsProtocol" = simulation.engine

        logger.info(
            (
                f"Calibration initialized with {len(problem.parameters)} parameters "
                f"and {len(problem.observed_data)} observed data points."
            )
        )

        # Validate calibration parameters against model
        self._validate_calibration_parameters()
        self._validate_observed_data()

    def run(self) -> CalibrationResult:
        """
        Runs the calibration optimization.

        Returns
        -------
        CalibrationResult
            Object containing the optimized parameter values, final loss,
            convergence status, and other optimization statistics.

        Raises
        ------
        ImportError
            If Rust extension is not available.
        ValueError
            If calibration problem setup is invalid.
        RuntimeError
            If optimization fails.
        """
        # Determine algorithm name from config type
        algorithm_name = (
            OptimizationAlgorithm.NELDER_MEAD
            if isinstance(self.problem.optimization_config, NelderMeadConfig)
            else OptimizationAlgorithm.PARTICLE_SWARM
        )
        logger.info(
            (
                f"Starting calibration with "
                f"{algorithm_name} algorithm and "
                f"{self.problem.loss_function} loss function."
            )
        )

        # Convert observed data to Rust types
        rust_observed_data = [
            rust_calibration.ObservedDataPoint(
                step=point.step,
                compartment=point.compartment,
                value=point.value,
                weight=point.weight,
                scale_id=point.scale_id,
            )
            for point in self.problem.observed_data
        ]

        # Convert parameters to Rust types
        rust_parameters = [
            rust_calibration.CalibrationParameter(
                id=param.id,
                parameter_type=self._to_rust_parameter_type(param.parameter_type),
                min_bound=param.min_bound,
                max_bound=param.max_bound,
                initial_guess=param.initial_guess,
            )
            for param in self.problem.parameters
        ]

        # Convert constraints to Rust types
        rust_constraints = [
            rust_calibration.CalibrationConstraint(
                id=constraint.id,
                expression=constraint.expression,
                description=constraint.description,
                weight=constraint.weight,
                time_steps=constraint.time_steps,
            )
            for constraint in self.problem.constraints
        ]

        # Convert loss config to Rust type
        rust_loss_config = self._build_loss_config()

        # Convert optimization config to Rust type
        rust_optimization_config = self._build_optimization_config()

        logger.info("Converted problem definition to Rust types.")
        logger.info("Running optimization...")

        # Get initial population size for initial condition fraction conversion
        initial_population_size = self._get_initial_population_size()

        # Call the Rust calibrate function
        rust_result = rust_calibration.calibrate(
            self._engine,
            rust_observed_data,
            rust_parameters,
            rust_constraints,
            rust_loss_config,
            rust_optimization_config,
            initial_population_size,
        )

        # Convert result back to Python CalibrationResult
        result = CalibrationResult(
            best_parameters=rust_result.best_parameters,
            final_loss=rust_result.final_loss,
            iterations=rust_result.iterations,
            converged=rust_result.converged,
            termination_reason=rust_result.termination_reason,
        )

        logger.info(
            (
                f"Calibration finished after {result.iterations} iterations. "
                f"Final loss: {result.final_loss:.6f}"
            )
        )

        return result

    def run_with_history(self) -> "CalibrationResultWithHistoryProtocol":
        """
        Runs the calibration optimization and returns evaluation history.

        Returns all objective function evaluations that occurred during
        optimization, not just the final best result. This is useful for
        probabilistic calibration where we want to explore the parameter space.

        Returns
        -------
        CalibrationResultWithHistory
            Object containing optimized parameters, final loss, and all evaluations.

        Raises
        ------
        ImportError
            If Rust extension is not available.
        ValueError
            If calibration problem setup is invalid.
        RuntimeError
            If optimization fails.
        """
        logger.info("Starting calibration with history tracking")

        # Convert observed data to Rust types
        rust_observed_data = [
            rust_calibration.ObservedDataPoint(
                step=point.step,
                compartment=point.compartment,
                value=point.value,
                weight=point.weight,
                scale_id=point.scale_id,
            )
            for point in self.problem.observed_data
        ]

        # Convert parameters to Rust types
        rust_parameters = [
            rust_calibration.CalibrationParameter(
                id=param.id,
                parameter_type=self._to_rust_parameter_type(param.parameter_type),
                min_bound=param.min_bound,
                max_bound=param.max_bound,
                initial_guess=param.initial_guess,
            )
            for param in self.problem.parameters
        ]

        # Convert constraints to Rust types
        rust_constraints = [
            rust_calibration.CalibrationConstraint(
                id=constraint.id,
                expression=constraint.expression,
                description=constraint.description,
                weight=constraint.weight,
                time_steps=constraint.time_steps,
            )
            for constraint in self.problem.constraints
        ]

        # Convert loss config to Rust type
        rust_loss_config = self._build_loss_config()

        # Convert optimization config to Rust type
        rust_optimization_config = self._build_optimization_config()

        # Get initial population size for initial condition fraction conversion
        initial_population_size = self._get_initial_population_size()

        # Call the Rust calibrate_with_history function
        rust_result = rust_calibration.calibrate_with_history(
            self._engine,
            rust_observed_data,
            rust_parameters,
            rust_constraints,
            rust_loss_config,
            rust_optimization_config,
            initial_population_size,
        )

        logger.info(
            f"Calibration finished after {rust_result.iterations} iterations. "
            f"Final loss: {rust_result.final_loss:.6f}. "
            f"Collected {len(rust_result.evaluations)} evaluations."
        )

        return rust_result

    def _to_rust_parameter_type(
        self, param_type: str
    ) -> "CalibrationParameterTypeProtocol":
        """Convert Python CalibrationParameterType to Rust type."""
        if param_type == CalibrationParameterType.PARAMETER:
            return rust_calibration.CalibrationParameterType.Parameter
        elif param_type == CalibrationParameterType.INITIAL_CONDITION:
            return rust_calibration.CalibrationParameterType.InitialCondition
        elif param_type == CalibrationParameterType.SCALE:
            return rust_calibration.CalibrationParameterType.Scale
        else:
            raise ValueError(f"Unknown parameter type: {param_type}")

    def _build_loss_config(self) -> "LossConfigProtocol":
        """Convert Python loss function to Rust LossConfig."""
        loss_func = self.problem.loss_function

        if loss_func == LossFunction.SSE:
            return rust_calibration.LossConfig.sse()
        elif loss_func == LossFunction.RMSE:
            return rust_calibration.LossConfig.rmse()
        elif loss_func == LossFunction.MAE:
            return rust_calibration.LossConfig.mae()
        elif loss_func == LossFunction.WEIGHTED_SSE:
            return rust_calibration.LossConfig.weighted_sse()
        else:
            raise ValueError(f"Unsupported loss function: {loss_func}.")

    def _build_optimization_config(self) -> "OptimizationConfigProtocol":
        """Convert Python OptimizationConfig to Rust OptimizationConfig."""
        opt_config = self.problem.optimization_config

        if isinstance(opt_config, NelderMeadConfig):
            nm_config = rust_calibration.NelderMeadConfig(
                max_iterations=opt_config.max_iterations,
                sd_tolerance=opt_config.sd_tolerance,
                simplex_perturbation=opt_config.simplex_perturbation,
                alpha=opt_config.alpha,
                gamma=opt_config.gamma,
                rho=opt_config.rho,
                sigma=opt_config.sigma,
                verbose=opt_config.verbose,
                header_interval=opt_config.header_interval,
            )
            return rust_calibration.OptimizationConfig.nelder_mead(nm_config)

        elif isinstance(opt_config, ParticleSwarmConfig):
            return self._build_pso_config(opt_config)

        else:
            raise ValueError(
                f"Unsupported optimization config type: {type(opt_config).__name__}"
            )

    def _build_pso_config(
        self, opt_config: ParticleSwarmConfig
    ) -> "OptimizationConfigProtocol":
        """Convert ParticleSwarmConfig to Rust OptimizationConfig."""
        rust_inertia = self._build_rust_inertia(opt_config.inertia_config)
        rust_acceleration = self._build_rust_acceleration(
            opt_config.acceleration_config
        )

        mutation = opt_config.mutation_config
        rust_mutation = None
        if mutation is not None:
            rust_mutation = rust_calibration.PSOMutation(
                strategy=mutation.strategy,
                scale=mutation.scale,
                probability=mutation.probability,
                application=mutation.application,
            )

        velocity = opt_config.velocity_config
        rust_velocity = None
        if velocity is not None:
            rust_velocity = rust_calibration.PSOVelocity(
                clamp_factor=velocity.clamp_factor,
                mutation_threshold=velocity.mutation_threshold,
            )

        ps_config = rust_calibration.ParticleSwarmConfig(
            num_particles=opt_config.num_particles,
            max_iterations=opt_config.max_iterations,
            verbose=opt_config.verbose,
            inertia=rust_inertia,
            acceleration=rust_acceleration,
            mutation=rust_mutation,
            velocity=rust_velocity,
            initialization=opt_config.initialization,
            seed=self.problem.seed,
        )
        return rust_calibration.OptimizationConfig.particle_swarm(ps_config)

    def _build_rust_inertia(
        self, inertia: PSOConstantInertia | PSOChaoticInertia | None
    ):
        """Convert Python inertia config to Rust type."""
        if inertia is None:
            return None
        if isinstance(inertia, PSOConstantInertia):
            return rust_calibration.PSOInertiaConstant(factor=inertia.factor)
        if isinstance(inertia, PSOChaoticInertia):
            return rust_calibration.PSOInertiaChaotic(
                w_min=inertia.w_min, w_max=inertia.w_max
            )
        return None

    def _build_rust_acceleration(
        self, acceleration: PSOConstantAcceleration | PSOTimeVaryingAcceleration | None
    ):
        """Convert Python acceleration config to Rust type."""
        if acceleration is None:
            return None
        if isinstance(acceleration, PSOConstantAcceleration):
            return rust_calibration.PSOAccelerationConstant(
                cognitive=acceleration.cognitive,
                social=acceleration.social,
            )
        if isinstance(acceleration, PSOTimeVaryingAcceleration):
            return rust_calibration.PSOAccelerationTimeVarying(
                c1_initial=acceleration.c1_initial,
                c1_final=acceleration.c1_final,
                c2_initial=acceleration.c2_initial,
                c2_final=acceleration.c2_final,
            )
        return None

    @property
    def num_parameters(self) -> int:
        """Number of parameters being calibrated."""
        return len(self.problem.parameters)

    @property
    def num_observations(self) -> int:
        """Number of observed data points."""
        return len(self.problem.observed_data)

    @property
    def parameter_names(self) -> list[str]:
        """Names of parameters being calibrated."""
        return [param.id for param in self.problem.parameters]

    def _validate_calibration_parameters(self) -> None:
        """
        Validate that all calibration parameters exist in the model.

        Raises
        ------
        ValueError
            If a parameter ID doesn't exist in the model or if a bin ID is invalid.
        """
        model = self.simulation.model_definition
        model_param_ids = {p.id for p in model.parameters}
        model_bin_ids = {b.id for b in model.population.bins}

        for param in self.problem.parameters:
            if param.parameter_type == CalibrationParameterType.PARAMETER:
                if param.id not in model_param_ids:
                    raise ValueError(
                        f"Calibration parameter '{param.id}' not found in model "
                        f"parameters. Available parameters: "
                        f"{sorted(model_param_ids)}"
                    )
            elif param.parameter_type == CalibrationParameterType.INITIAL_CONDITION:
                if param.id not in model_bin_ids:
                    raise ValueError(
                        f"Calibration initial condition '{param.id}' not found in "
                        f"model bins. Available bins: {sorted(model_bin_ids)}"
                    )
            elif param.parameter_type == CalibrationParameterType.SCALE:
                if param.min_bound <= 0 or param.max_bound <= 0:
                    raise ValueError(
                        f"Scale parameter '{param.id}' must have positive bounds "
                        f"(got min={param.min_bound}, max={param.max_bound})"
                    )
            else:
                raise ValueError(
                    f"Unknown calibration parameter type: {param.parameter_type}"
                )

    def _get_compartment_index(self, bin_id: str) -> int:
        """
        Get the index of a bin/compartment by its ID.

        Parameters
        ----------
        bin_id : str
            The bin identifier

        Returns
        -------
        int
            The index of the compartment in the population vector

        Raises
        ------
        ValueError
            If the bin_id is not found
        """
        bins = self.simulation.model_definition.population.bins
        for idx, bin_obj in enumerate(bins):
            if bin_obj.id == bin_id:
                return idx
        raise ValueError(f"Bin '{bin_id}' not found in model")

    def _get_initial_population_size(self) -> int:
        """Get the initial population size from the model."""
        initial_conditions = (
            self.simulation.model_definition.population.initial_conditions
        )
        return initial_conditions.population_size

    def _validate_observed_data(self) -> None:
        """Validate that observed data compartments exist and have valid steps.

        Raises
        ------
        ValueError
            If observed data contains invalid compartments or negative time steps.
        """
        model_bin_ids = {b.id for b in self.simulation.model_definition.population.bins}

        for obs in self.problem.observed_data:
            if obs.compartment not in model_bin_ids:
                raise ValueError(
                    f"Observed data compartment '{obs.compartment}' not found in "
                    f"model. Available compartments: {sorted(model_bin_ids)}"
                )

        if self.problem.observed_data:
            min_step = min(obs.step for obs in self.problem.observed_data)
            if min_step < 0:
                raise ValueError(
                    f"Observed data contains negative time step: {min_step}. "
                    "Time steps must be non-negative."
                )

    def run_probabilistic(self):
        """Run probabilistic calibration.

        Returns
        -------
        ProbabilisticCalibrationResult
            Object containing the ensemble of parameter sets, statistics,
            predictions with confidence intervals, and coverage metrics.

        Raises
        ------
        ValueError
            If probabilistic_config is not set in the CalibrationProblem.
        RuntimeError
            If calibration or ensemble selection fails.
        """
        if self.problem.probabilistic_config is None:
            raise ValueError(
                "probabilistic_config must be set in CalibrationProblem to run "
                "probabilistic calibration. Please set problem.probabilistic_config "
                "to a ProbabilisticCalibrationConfig instance."
            )

        # Import ProbabilisticCalibrator implementation
        from commol.api.probabilistic_calibrator import ProbabilisticCalibrator

        # Create a probabilistic calibrator instance and delegate to it
        prob_calibrator = ProbabilisticCalibrator(self.simulation, self.problem)
        return prob_calibrator.run()

Attributes

num_parameters property

num_parameters: int

Number of parameters being calibrated.

num_observations property

num_observations: int

Number of observed data points.

parameter_names property

parameter_names: list[str]

Names of parameters being calibrated.

Functions

__init__

__init__(simulation: Simulation, problem: CalibrationProblem)

Initializes the calibration from a Simulation and CalibrationProblem.

Parameters:

Name Type Description Default
simulation Simulation

A fully initialized Simulation object with the model to calibrate.

required
problem CalibrationProblem

A fully constructed and validated calibration problem definition.

required
Source code in commol/api/calibrator.py
def __init__(
    self,
    simulation: Simulation,
    problem: CalibrationProblem,
):
    """
    Initializes the calibration from a Simulation and CalibrationProblem.

    Parameters
    ----------
    simulation : Simulation
        A fully initialized Simulation object with the model to calibrate.
    problem : CalibrationProblem
        A fully constructed and validated calibration problem definition.
    """
    logger.info(
        f"Initializing Calibration for model: '{simulation.model_definition.name}'"
    )
    self.simulation: Simulation = simulation
    self.problem: CalibrationProblem = problem
    self._engine: "DifferenceEquationsProtocol" = simulation.engine

    logger.info(
        (
            f"Calibration initialized with {len(problem.parameters)} parameters "
            f"and {len(problem.observed_data)} observed data points."
        )
    )

    # Validate calibration parameters against model
    self._validate_calibration_parameters()
    self._validate_observed_data()

run

Runs the calibration optimization.

Returns:

Type Description
CalibrationResult

Object containing the optimized parameter values, final loss, convergence status, and other optimization statistics.

Raises:

Type Description
ImportError

If Rust extension is not available.

ValueError

If calibration problem setup is invalid.

RuntimeError

If optimization fails.

Source code in commol/api/calibrator.py
def run(self) -> CalibrationResult:
    """
    Runs the calibration optimization.

    Returns
    -------
    CalibrationResult
        Object containing the optimized parameter values, final loss,
        convergence status, and other optimization statistics.

    Raises
    ------
    ImportError
        If Rust extension is not available.
    ValueError
        If calibration problem setup is invalid.
    RuntimeError
        If optimization fails.
    """
    # Determine algorithm name from config type
    algorithm_name = (
        OptimizationAlgorithm.NELDER_MEAD
        if isinstance(self.problem.optimization_config, NelderMeadConfig)
        else OptimizationAlgorithm.PARTICLE_SWARM
    )
    logger.info(
        (
            f"Starting calibration with "
            f"{algorithm_name} algorithm and "
            f"{self.problem.loss_function} loss function."
        )
    )

    # Convert observed data to Rust types
    rust_observed_data = [
        rust_calibration.ObservedDataPoint(
            step=point.step,
            compartment=point.compartment,
            value=point.value,
            weight=point.weight,
            scale_id=point.scale_id,
        )
        for point in self.problem.observed_data
    ]

    # Convert parameters to Rust types
    rust_parameters = [
        rust_calibration.CalibrationParameter(
            id=param.id,
            parameter_type=self._to_rust_parameter_type(param.parameter_type),
            min_bound=param.min_bound,
            max_bound=param.max_bound,
            initial_guess=param.initial_guess,
        )
        for param in self.problem.parameters
    ]

    # Convert constraints to Rust types
    rust_constraints = [
        rust_calibration.CalibrationConstraint(
            id=constraint.id,
            expression=constraint.expression,
            description=constraint.description,
            weight=constraint.weight,
            time_steps=constraint.time_steps,
        )
        for constraint in self.problem.constraints
    ]

    # Convert loss config to Rust type
    rust_loss_config = self._build_loss_config()

    # Convert optimization config to Rust type
    rust_optimization_config = self._build_optimization_config()

    logger.info("Converted problem definition to Rust types.")
    logger.info("Running optimization...")

    # Get initial population size for initial condition fraction conversion
    initial_population_size = self._get_initial_population_size()

    # Call the Rust calibrate function
    rust_result = rust_calibration.calibrate(
        self._engine,
        rust_observed_data,
        rust_parameters,
        rust_constraints,
        rust_loss_config,
        rust_optimization_config,
        initial_population_size,
    )

    # Convert result back to Python CalibrationResult
    result = CalibrationResult(
        best_parameters=rust_result.best_parameters,
        final_loss=rust_result.final_loss,
        iterations=rust_result.iterations,
        converged=rust_result.converged,
        termination_reason=rust_result.termination_reason,
    )

    logger.info(
        (
            f"Calibration finished after {result.iterations} iterations. "
            f"Final loss: {result.final_loss:.6f}"
        )
    )

    return result

run_with_history

run_with_history() -> CalibrationResultWithHistoryProtocol

Runs the calibration optimization and returns evaluation history.

Returns all objective function evaluations that occurred during optimization, not just the final best result. This is useful for probabilistic calibration where we want to explore the parameter space.

Returns:

Type Description
CalibrationResultWithHistory

Object containing optimized parameters, final loss, and all evaluations.

Raises:

Type Description
ImportError

If Rust extension is not available.

ValueError

If calibration problem setup is invalid.

RuntimeError

If optimization fails.

Source code in commol/api/calibrator.py
def run_with_history(self) -> "CalibrationResultWithHistoryProtocol":
    """
    Runs the calibration optimization and returns evaluation history.

    Returns all objective function evaluations that occurred during
    optimization, not just the final best result. This is useful for
    probabilistic calibration where we want to explore the parameter space.

    Returns
    -------
    CalibrationResultWithHistory
        Object containing optimized parameters, final loss, and all evaluations.

    Raises
    ------
    ImportError
        If Rust extension is not available.
    ValueError
        If calibration problem setup is invalid.
    RuntimeError
        If optimization fails.
    """
    logger.info("Starting calibration with history tracking")

    # Convert observed data to Rust types
    rust_observed_data = [
        rust_calibration.ObservedDataPoint(
            step=point.step,
            compartment=point.compartment,
            value=point.value,
            weight=point.weight,
            scale_id=point.scale_id,
        )
        for point in self.problem.observed_data
    ]

    # Convert parameters to Rust types
    rust_parameters = [
        rust_calibration.CalibrationParameter(
            id=param.id,
            parameter_type=self._to_rust_parameter_type(param.parameter_type),
            min_bound=param.min_bound,
            max_bound=param.max_bound,
            initial_guess=param.initial_guess,
        )
        for param in self.problem.parameters
    ]

    # Convert constraints to Rust types
    rust_constraints = [
        rust_calibration.CalibrationConstraint(
            id=constraint.id,
            expression=constraint.expression,
            description=constraint.description,
            weight=constraint.weight,
            time_steps=constraint.time_steps,
        )
        for constraint in self.problem.constraints
    ]

    # Convert loss config to Rust type
    rust_loss_config = self._build_loss_config()

    # Convert optimization config to Rust type
    rust_optimization_config = self._build_optimization_config()

    # Get initial population size for initial condition fraction conversion
    initial_population_size = self._get_initial_population_size()

    # Call the Rust calibrate_with_history function
    rust_result = rust_calibration.calibrate_with_history(
        self._engine,
        rust_observed_data,
        rust_parameters,
        rust_constraints,
        rust_loss_config,
        rust_optimization_config,
        initial_population_size,
    )

    logger.info(
        f"Calibration finished after {rust_result.iterations} iterations. "
        f"Final loss: {rust_result.final_loss:.6f}. "
        f"Collected {len(rust_result.evaluations)} evaluations."
    )

    return rust_result

run_probabilistic

run_probabilistic()

Run probabilistic calibration.

Returns:

Type Description
ProbabilisticCalibrationResult

Object containing the ensemble of parameter sets, statistics, predictions with confidence intervals, and coverage metrics.

Raises:

Type Description
ValueError

If probabilistic_config is not set in the CalibrationProblem.

RuntimeError

If calibration or ensemble selection fails.

Source code in commol/api/calibrator.py
def run_probabilistic(self):
    """Run probabilistic calibration.

    Returns
    -------
    ProbabilisticCalibrationResult
        Object containing the ensemble of parameter sets, statistics,
        predictions with confidence intervals, and coverage metrics.

    Raises
    ------
    ValueError
        If probabilistic_config is not set in the CalibrationProblem.
    RuntimeError
        If calibration or ensemble selection fails.
    """
    if self.problem.probabilistic_config is None:
        raise ValueError(
            "probabilistic_config must be set in CalibrationProblem to run "
            "probabilistic calibration. Please set problem.probabilistic_config "
            "to a ProbabilisticCalibrationConfig instance."
        )

    # Import ProbabilisticCalibrator implementation
    from commol.api.probabilistic_calibrator import ProbabilisticCalibrator

    # Create a probabilistic calibrator instance and delegate to it
    prob_calibrator = ProbabilisticCalibrator(self.simulation, self.problem)
    return prob_calibrator.run()

options: show_root_heading: true show_source: true heading_level: 2 show_docstring_attributes: false

CalibrationProblem

CalibrationProblem

Bases: BaseModel

Defines a complete calibration problem.

This class encapsulates all the information needed to calibrate model parameters against observed data. It provides validation of the calibration setup but delegates the actual optimization to the Rust backend.

Attributes:

Name Type Description
observed_data list[ObservedDataPoint]

List of observed data points to fit against

parameters list[CalibrationParameter]

List of parameters to calibrate with their bounds

constraints list[CalibrationConstraint]

List of constraints on calibration parameters (optional, default: empty list)

loss_function str

Loss function to use for measuring fit quality (default: "sse")

optimization_config OptimizationConfig

Configuration for the optimization algorithm

probabilistic_config ProbabilisticCalibrationConfig | None

Optional configuration for probabilistic calibration (default: None). When provided, enables ensemble-based parameter estimation with uncertainty quantification instead of single-point optimization.

seed int | None

Random seed for reproducibility across all stochastic processes (default: None, uses system entropy). Controls randomness in: - Optimization algorithms (e.g., Particle Swarm initialization) - Probabilistic calibration runs - Clustering algorithms - Ensemble selection When set, all random operations become deterministic and reproducible.

Source code in commol/context/calibration.py
class CalibrationProblem(BaseModel):
    """
    Defines a complete calibration problem.

    This class encapsulates all the information needed to calibrate model
    parameters against observed data. It provides validation of the calibration
    setup but delegates the actual optimization to the Rust backend.

    Attributes
    ----------
    observed_data : list[ObservedDataPoint]
        List of observed data points to fit against
    parameters : list[CalibrationParameter]
        List of parameters to calibrate with their bounds
    constraints : list[CalibrationConstraint]
        List of constraints on calibration parameters (optional, default: empty list)
    loss_function : str
        Loss function to use for measuring fit quality (default: "sse")
    optimization_config : OptimizationConfig
        Configuration for the optimization algorithm
    probabilistic_config : ProbabilisticCalibrationConfig | None
        Optional configuration for probabilistic calibration (default: None).
        When provided, enables ensemble-based parameter estimation with
        uncertainty quantification instead of single-point optimization.
    seed : int | None
        Random seed for reproducibility across all stochastic processes
        (default: None, uses system entropy).
        Controls randomness in:
        - Optimization algorithms (e.g., Particle Swarm initialization)
        - Probabilistic calibration runs
        - Clustering algorithms
        - Ensemble selection
        When set, all random operations become deterministic and reproducible.
    """

    observed_data: list[ObservedDataPoint] = Field(
        default=..., min_length=1, description="Observed data points"
    )
    parameters: list[CalibrationParameter] = Field(
        default=..., min_length=1, description="Parameters to calibrate"
    )
    constraints: list[CalibrationConstraint] = Field(
        default_factory=list,
        description="Constraints on calibration parameters",
    )
    loss_function: str = Field(
        default=LossFunction.SSE,
        description="Loss function to use for measuring fit quality",
    )
    optimization_config: NelderMeadConfig | ParticleSwarmConfig = Field(
        default=..., description="Optimization algorithm configuration"
    )
    probabilistic_config: ProbabilisticCalibrationConfig | None = Field(
        default=None,
        description="Optional configuration for probabilistic calibration",
    )
    seed: int | None = Field(
        default=None,
        ge=0,
        description=(
            "Random seed for reproducibility across all stochastic processes "
            "(optimization, probabilistic calibration, clustering, ensemble selection)"
        ),
    )

    @field_validator("loss_function")
    def validate_loss_function(cls, v: str) -> str:
        if v not in LossFunction:
            raise ValueError(
                f"Invalid loss_function: {v}. Must be one of {list(LossFunction)}"
            )
        return v

    @model_validator(mode="after")
    def validate_unique_parameter_ids(self) -> Self:
        """Ensure parameter IDs are unique."""
        param_ids = [p.id for p in self.parameters]
        if len(param_ids) != len(set(param_ids)):
            duplicates = [id for id in set(param_ids) if param_ids.count(id) > 1]
            raise ValueError(f"Duplicate parameter IDs found: {duplicates}")
        return self

Functions

validate_unique_parameter_ids

validate_unique_parameter_ids() -> Self

Ensure parameter IDs are unique.

Source code in commol/context/calibration.py
@model_validator(mode="after")
def validate_unique_parameter_ids(self) -> Self:
    """Ensure parameter IDs are unique."""
    param_ids = [p.id for p in self.parameters]
    if len(param_ids) != len(set(param_ids)):
        duplicates = [id for id in set(param_ids) if param_ids.count(id) > 1]
        raise ValueError(f"Duplicate parameter IDs found: {duplicates}")
    return self

options: show_root_heading: true show_source: false heading_level: 3 show_docstring_attributes: true

CalibrationResult

CalibrationResult

Bases: BaseModel

Result of a calibration run.

This is a simple data class that holds the results returned from the Rust calibration function.

Attributes:

Name Type Description
best_parameters dict[str, float]

Dictionary mapping parameter IDs to their calibrated values

final_loss float

Final loss value achieved

iterations int

Number of iterations performed

converged bool

Whether the optimization converged

termination_reason str

Explanation of why optimization terminated

Source code in commol/context/calibration.py
class CalibrationResult(BaseModel):
    """
    Result of a calibration run.

    This is a simple data class that holds the results returned from the
    Rust calibration function.

    Attributes
    ----------
    best_parameters : dict[str, float]
        Dictionary mapping parameter IDs to their calibrated values
    final_loss : float
        Final loss value achieved
    iterations : int
        Number of iterations performed
    converged : bool
        Whether the optimization converged
    termination_reason : str
        Explanation of why optimization terminated
    """

    best_parameters: dict[str, float] = Field(
        default=..., description="Calibrated parameter values"
    )
    final_loss: float = Field(default=..., description="Final loss value")
    iterations: int = Field(
        default=..., ge=0, description="Number of iterations performed"
    )
    converged: bool = Field(default=..., description="Whether optimization converged")
    termination_reason: str = Field(
        default=..., description="Reason for optimization termination"
    )

    @override
    def __str__(self) -> str:
        """String representation of calibration result."""
        return (
            f"CalibrationResult(\n"
            f"  converged={self.converged},\n"
            f"  final_loss={self.final_loss:.6f},\n"
            f"  iterations={self.iterations},\n"
            f"  best_parameters={self.best_parameters},\n"
            f"  termination_reason='{self.termination_reason}'\n"
            f")"
        )

Functions

__str__

__str__() -> str

String representation of calibration result.

Source code in commol/context/calibration.py
@override
def __str__(self) -> str:
    """String representation of calibration result."""
    return (
        f"CalibrationResult(\n"
        f"  converged={self.converged},\n"
        f"  final_loss={self.final_loss:.6f},\n"
        f"  iterations={self.iterations},\n"
        f"  best_parameters={self.best_parameters},\n"
        f"  termination_reason='{self.termination_reason}'\n"
        f")"
    )

options: show_root_heading: true show_source: false heading_level: 3 show_docstring_attributes: true

CalibrationParameter

CalibrationParameter

Bases: BaseModel

Defines a parameter or initial condition to be calibrated with its bounds.

Attributes:

Name Type Description
id str

Identifier (parameter ID for parameters, bin ID for initial conditions)

parameter_type str

Type of value being calibrated

min_bound float

Minimum allowed value for this parameter

max_bound float

Maximum allowed value for this parameter

initial_guess float | None

Optional starting value for optimization (if None, midpoint is used)

Source code in commol/context/calibration.py
class CalibrationParameter(BaseModel):
    """
    Defines a parameter or initial condition to be calibrated with its bounds.

    Attributes
    ----------
    id : str
        Identifier (parameter ID for parameters, bin ID for initial conditions)
    parameter_type : str
        Type of value being calibrated
    min_bound : float
        Minimum allowed value for this parameter
    max_bound : float
        Maximum allowed value for this parameter
    initial_guess : float | None
        Optional starting value for optimization (if None, midpoint is used)
    """

    id: str = Field(
        default=..., min_length=1, description="Parameter or bin identifier"
    )
    parameter_type: str = Field(
        default=...,
        description="Type of value being calibrated",
    )
    min_bound: float = Field(default=..., description="Minimum allowed value")
    max_bound: float = Field(default=..., description="Maximum allowed value")
    initial_guess: float | None = Field(
        default=None, description="Optional starting value for optimization"
    )

    @field_validator("parameter_type")
    def validate_parameter_type(cls, v: str) -> str:
        if v not in CalibrationParameterType:
            raise ValueError(
                f"Invalid parameter_type: {v}. "
                f"Must be one of {list(CalibrationParameterType)}"
            )
        return v

    @model_validator(mode="after")
    def validate_bounds(self) -> Self:
        """Validate that max_bound > min_bound and initial_guess is within bounds."""
        if self.max_bound <= self.min_bound:
            raise ValueError(
                (
                    f"max_bound ({self.max_bound}) must be greater than "
                    f"min_bound ({self.min_bound}) for parameter '{self.id}'"
                )
            )
        if self.initial_guess is not None:
            if not (self.min_bound <= self.initial_guess <= self.max_bound):
                raise ValueError(
                    (
                        f"initial_guess ({self.initial_guess}) must be between "
                        f"min_bound ({self.min_bound}) and max_bound "
                        f"({self.max_bound}) for parameter '{self.id}'"
                    )
                )

        return self

Functions

validate_bounds

validate_bounds() -> Self

Validate that max_bound > min_bound and initial_guess is within bounds.

Source code in commol/context/calibration.py
@model_validator(mode="after")
def validate_bounds(self) -> Self:
    """Validate that max_bound > min_bound and initial_guess is within bounds."""
    if self.max_bound <= self.min_bound:
        raise ValueError(
            (
                f"max_bound ({self.max_bound}) must be greater than "
                f"min_bound ({self.min_bound}) for parameter '{self.id}'"
            )
        )
    if self.initial_guess is not None:
        if not (self.min_bound <= self.initial_guess <= self.max_bound):
            raise ValueError(
                (
                    f"initial_guess ({self.initial_guess}) must be between "
                    f"min_bound ({self.min_bound}) and max_bound "
                    f"({self.max_bound}) for parameter '{self.id}'"
                )
            )

    return self

options: show_root_heading: true show_source: false heading_level: 3 show_docstring_attributes: true

ObservedDataPoint

ObservedDataPoint

Bases: BaseModel

Represents a single observed data point for calibration.

Attributes:

Name Type Description
step int

Time step of the observation

compartment str

Name of the compartment being observed

value float

Observed value

weight float

Weight for this observation in the loss function (default: 1.0)

scale_id str | None

Optional scale parameter ID to apply to model output before comparison

Source code in commol/context/calibration.py
class ObservedDataPoint(BaseModel):
    """
    Represents a single observed data point for calibration.

    Attributes
    ----------
    step : int
        Time step of the observation
    compartment : str
        Name of the compartment being observed
    value : float
        Observed value
    weight : float
        Weight for this observation in the loss function (default: 1.0)
    scale_id : str | None
        Optional scale parameter ID to apply to model output before comparison
    """

    step: int = Field(default=..., ge=0, description="Time step of the observation")
    compartment: str = Field(
        default=..., min_length=1, description="Name of the compartment being observed"
    )
    value: float = Field(default=..., ge=0.0, description="Observed value")
    weight: float = Field(
        default=1.0,
        gt=0.0,
        description="Weight for this observation in the loss function",
    )
    scale_id: str | None = Field(
        default=None,
        description="Optional scale parameter ID to apply to model output",
    )

options: show_root_heading: true show_source: false heading_level: 3 show_docstring_attributes: true

OptimizationConfig

Type Alias

OptimizationConfig is a type alias for NelderMeadConfig | ParticleSwarmConfig.

NelderMeadConfig

NelderMeadConfig

Bases: BaseModel

Configuration for the Nelder-Mead optimization algorithm.

The Nelder-Mead method is a simplex-based derivative-free optimization algorithm, suitable for problems where gradients are not available.

Attributes:

Name Type Description
max_iterations int

Maximum number of iterations (default: 1000)

sd_tolerance float

Convergence tolerance for standard deviation (default: 1e-6)

simplex_perturbation float

Multiplier for creating initial simplex vertices by perturbing each parameter dimension. A value of 1.1 means 10% perturbation. (default: 1.1)

alpha float | None

Reflection coefficient (default: None, uses argmin's default)

gamma float | None

Expansion coefficient (default: None, uses argmin's default)

rho float | None

Contraction coefficient (default: None, uses argmin's default)

sigma float | None

Shrink coefficient (default: None, uses argmin's default)

verbose bool

Enable verbose output during optimization (default: False)

header_interval int

Number of iterations between table header repeats in verbose output (default: 100)

Source code in commol/context/calibration.py
class NelderMeadConfig(BaseModel):
    """
    Configuration for the Nelder-Mead optimization algorithm.

    The Nelder-Mead method is a simplex-based derivative-free optimization
    algorithm, suitable for problems where gradients are not available.

    Attributes
    ----------
    max_iterations : int
        Maximum number of iterations (default: 1000)
    sd_tolerance : float
        Convergence tolerance for standard deviation (default: 1e-6)
    simplex_perturbation : float
        Multiplier for creating initial simplex vertices by perturbing each
        parameter dimension. A value of 1.1 means 10% perturbation. (default: 1.1)
    alpha : float | None
        Reflection coefficient (default: None, uses argmin's default)
    gamma : float | None
        Expansion coefficient (default: None, uses argmin's default)
    rho : float | None
        Contraction coefficient (default: None, uses argmin's default)
    sigma : float | None
        Shrink coefficient (default: None, uses argmin's default)
    verbose : bool
        Enable verbose output during optimization (default: False)
    header_interval: int
        Number of iterations between table header repeats in verbose output
        (default: 100)
    """

    max_iterations: int = Field(
        default=1000, gt=0, description="Maximum number of iterations"
    )
    sd_tolerance: float = Field(
        default=1e-6, gt=0.0, description="Convergence tolerance for standard deviation"
    )
    simplex_perturbation: float = Field(
        default=1.1,
        gt=1.0,
        description=(
            "Multiplier for creating initial simplex vertices (e.g., 1.1 = 10% "
            "perturbation)"
        ),
    )
    alpha: float | None = Field(
        default=None,
        gt=0.0,
        description="Reflection coefficient (default: None, uses argmin's default)",
    )
    gamma: float | None = Field(
        default=None,
        gt=0.0,
        description="Expansion coefficient (default: None, uses argmin's default)",
    )
    rho: float | None = Field(
        default=None,
        gt=0.0,
        description="Contraction coefficient (default: None, uses argmin's default)",
    )
    sigma: float | None = Field(
        default=None,
        gt=0.0,
        description="Shrink coefficient (default: None, uses argmin's default)",
    )
    verbose: bool = Field(
        default=False,
        description="Enable verbose output during optimization (default: False)",
    )
    header_interval: int = Field(
        default=100,
        gt=0,
        description=(
            "Number of iterations between table header repeats in verbose output "
            "(default: 100)"
        ),
    )

options: show_root_heading: true show_source: false heading_level: 3 show_docstring_attributes: true

ParticleSwarmConfig

ParticleSwarmConfig

Bases: BaseModel

Configuration for the Particle Swarm Optimization algorithm.

Attributes:

Name Type Description
num_particles int

Number of particles in the swarm (default: 20)

max_iterations int

Maximum number of iterations (default: 1000)

verbose bool

Enable verbose output (default: False)

initialization str

Particle initialization strategy (default: "uniform")

Methods:

Name Description
inertia

Set inertia weight strategy ("constant" or "chaotic")

acceleration

Set acceleration coefficients ("constant" or "time_varying")

mutation

Enable mutation to escape local optima

velocity

Configure velocity control

Source code in commol/context/calibration.py
class ParticleSwarmConfig(BaseModel):
    """
    Configuration for the Particle Swarm Optimization algorithm.

    Attributes
    ----------
    num_particles : int
        Number of particles in the swarm (default: 20)
    max_iterations : int
        Maximum number of iterations (default: 1000)
    verbose : bool
        Enable verbose output (default: False)
    initialization : str
        Particle initialization strategy (default: "uniform")

    Methods
    -------
    inertia(type, **kwargs)
        Set inertia weight strategy ("constant" or "chaotic")
    acceleration(type, **kwargs)
        Set acceleration coefficients ("constant" or "time_varying")
    mutation(strategy, scale, probability, application)
        Enable mutation to escape local optima
    velocity(clamp_factor, mutation_threshold)
        Configure velocity control
    """

    # Core parameters
    num_particles: int = Field(
        default=20, gt=0, description="Number of particles in the swarm"
    )
    max_iterations: int = Field(
        default=1000, gt=0, description="Maximum number of iterations"
    )
    verbose: bool = Field(
        default=False,
        description="Enable verbose output during optimization",
    )
    initialization: str = Field(
        default=PSOInitializationStrategy.UNIFORM,
        description=(
            "Particle initialization strategy: 'uniform' (default), 'latin_hypercube',"
            "or 'opposition_based'"
        ),
    )

    # Private attributes for optional configurations (set via fluent methods)
    _inertia: PSOConstantInertia | PSOChaoticInertia | None = PrivateAttr(default=None)
    _acceleration: PSOConstantAcceleration | PSOTimeVaryingAcceleration | None = (
        PrivateAttr(default=None)
    )
    _mutation: PSOMutationConfig | None = PrivateAttr(default=None)
    _velocity: PSOVelocityConfig | None = PrivateAttr(default=None)

    @field_validator("initialization")
    def validate_initialization(cls, v: str) -> str:
        if v not in PSOInitializationStrategy:
            raise ValueError(
                f"Invalid initialization: {v}. "
                f"Must be one of {list(PSOInitializationStrategy)}"
            )
        return v

    @overload
    def inertia(self, type: str, *, factor: float = 0.721) -> Self: ...

    @overload
    def inertia(self, type: str, *, w_min: float, w_max: float) -> Self: ...

    def inertia(self, type: str, **kwargs: float) -> Self:
        """
        Set inertia weight strategy.

        Parameters
        ----------
        type : str
            Inertia strategy type: "constant" or "chaotic"

        Other Parameters
        ----------------
        factor : float, default=0.721
            Fixed inertia weight (canonical PSO: 1/(2*ln(2)) ≈ 0.721).
            Only used when type="constant".
        w_min : float
            Minimum inertia weight. Only used when type="chaotic".
        w_max : float
            Maximum inertia weight (must be > w_min). Only used when type="chaotic".

        Returns
        -------
        Self
            The config instance for method chaining
        """
        if type not in PSOInertiaType:
            raise ValueError(
                f"Invalid inertia type: {type}. Must be one of {list(PSOInertiaType)}"
            )
        if type == PSOInertiaType.CONSTANT:
            self._inertia = PSOConstantInertia(factor=kwargs.get("factor", 0.721))
        elif type == PSOInertiaType.CHAOTIC:
            self._inertia = PSOChaoticInertia(
                w_min=kwargs["w_min"], w_max=kwargs["w_max"]
            )
        else:
            raise ValueError(
                f"Unknown inertia type: {type}. Use 'constant' or 'chaotic'"
            )
        return self

    @overload
    def acceleration(
        self,
        type: str,
        *,
        cognitive: float = 1.193,
        social: float = 1.193,
    ) -> Self: ...

    @overload
    def acceleration(
        self,
        type: str,
        *,
        c1_initial: float,
        c1_final: float,
        c2_initial: float,
        c2_final: float,
    ) -> Self: ...

    def acceleration(self, type: str, **kwargs: float) -> Self:
        """
        Set acceleration coefficient strategy.

        Parameters
        ----------
        type : str
            Acceleration strategy type: "constant" or "time_varying"

        Other Parameters
        ----------------
        cognitive : float, default=1.193
            Cognitive coefficient (c1) - attraction to personal best.
            Only used when type="constant".
        social : float, default=1.193
            Social coefficient (c2) - attraction to swarm best.
            Only used when type="constant".
        c1_initial : float
            Initial cognitive factor (typically high, e.g., 2.5).
            Only used when type="time_varying".
        c1_final : float
            Final cognitive factor (typically low, e.g., 0.5).
            Only used when type="time_varying".
        c2_initial : float
            Initial social factor (typically low, e.g., 0.5).
            Only used when type="time_varying".
        c2_final : float
            Final social factor (typically high, e.g., 2.5).
            Only used when type="time_varying".

        Returns
        -------
        Self
            The config instance for method chaining
        """
        if type not in PSOAccelerationType:
            raise ValueError(
                f"Invalid acceleration type: {type}. "
                f"Must be one of {list(PSOAccelerationType)}"
            )
        if type == PSOAccelerationType.CONSTANT:
            self._acceleration = PSOConstantAcceleration(
                cognitive=kwargs.get("cognitive", 1.193),
                social=kwargs.get("social", 1.193),
            )
        elif type == PSOAccelerationType.TIME_VARYING:
            self._acceleration = PSOTimeVaryingAcceleration(
                c1_initial=kwargs["c1_initial"],
                c1_final=kwargs["c1_final"],
                c2_initial=kwargs["c2_initial"],
                c2_final=kwargs["c2_final"],
            )
        else:
            raise ValueError(
                f"Unknown acceleration type: {type}. Use 'constant' or 'time_varying'"
            )
        return self

    def mutation(
        self,
        strategy: str,
        *,
        scale: float,
        probability: float,
        application: str,
    ) -> Self:
        """
        Enable mutation to help escape local optima.

        Parameters
        ----------
        strategy : str
            Mutation distribution: "gaussian" or "cauchy".
            Cauchy has heavier tails for larger jumps.
        scale : float
            Standard deviation (gaussian) or scale parameter (cauchy)
        probability : float
            Mutation probability per iteration (0.0 to 1.0)
        application : str
            Which particles to mutate:
            "global_best", "all_particles", or "below_average"

        Returns
        -------
        Self
            The config instance for method chaining
        """
        if strategy not in PSOMutationStrategy:
            raise ValueError(
                f"Invalid mutation strategy: {strategy}. "
                f"Must be one of {list(PSOMutationStrategy)}"
            )
        if application not in PSOMutationApplication:
            raise ValueError(
                f"Invalid mutation application: {application}. "
                f"Must be one of {list(PSOMutationApplication)}"
            )

        self._mutation = PSOMutationConfig(
            strategy=strategy,
            scale=scale,
            probability=probability,
            application=application,
        )
        return self

    def velocity(
        self,
        *,
        clamp_factor: float | None = None,
        mutation_threshold: float | None = None,
    ) -> Self:
        """
        Configure velocity control.

        Parameters
        ----------
        clamp_factor : float, optional
            Velocity clamping as fraction of search space (0.0-1.0).
            Typically 0.1-0.2. Prevents explosive velocities.
        mutation_threshold : float, optional
            Reinitialize velocities below this threshold.
            Typically 0.001-0.01. Prevents stagnation.

        Returns
        -------
        Self
            The config instance for method chaining
        """
        self._velocity = PSOVelocityConfig(
            clamp_factor=clamp_factor,
            mutation_threshold=mutation_threshold,
        )
        return self

    @property
    def inertia_config(self) -> PSOConstantInertia | PSOChaoticInertia | None:
        """Get the inertia configuration (for internal use)."""
        return self._inertia

    @property
    def acceleration_config(
        self,
    ) -> PSOConstantAcceleration | PSOTimeVaryingAcceleration | None:
        """Get the acceleration configuration (for internal use)."""
        return self._acceleration

    @property
    def mutation_config(self) -> PSOMutationConfig | None:
        """Get the mutation configuration (for internal use)."""
        return self._mutation

    @property
    def velocity_config(self) -> PSOVelocityConfig | None:
        """Get the velocity configuration (for internal use)."""
        return self._velocity

Attributes

inertia_config property

inertia_config: PSOConstantInertia | PSOChaoticInertia | None

Get the inertia configuration (for internal use).

acceleration_config property

acceleration_config: PSOConstantAcceleration | PSOTimeVaryingAcceleration | None

Get the acceleration configuration (for internal use).

mutation_config property

mutation_config: PSOMutationConfig | None

Get the mutation configuration (for internal use).

velocity_config property

velocity_config: PSOVelocityConfig | None

Get the velocity configuration (for internal use).

Functions

inertia

inertia(type: str, *, factor: float = 0.721) -> Self
inertia(type: str, *, w_min: float, w_max: float) -> Self
inertia(type: str, **kwargs: float) -> Self

Set inertia weight strategy.

Parameters:

Name Type Description Default
type str

Inertia strategy type: "constant" or "chaotic"

required

Other Parameters:

Name Type Description
factor float

Fixed inertia weight (canonical PSO: 1/(2*ln(2)) ≈ 0.721). Only used when type="constant".

w_min float

Minimum inertia weight. Only used when type="chaotic".

w_max float

Maximum inertia weight (must be > w_min). Only used when type="chaotic".

Returns:

Type Description
Self

The config instance for method chaining

Source code in commol/context/calibration.py
def inertia(self, type: str, **kwargs: float) -> Self:
    """
    Set inertia weight strategy.

    Parameters
    ----------
    type : str
        Inertia strategy type: "constant" or "chaotic"

    Other Parameters
    ----------------
    factor : float, default=0.721
        Fixed inertia weight (canonical PSO: 1/(2*ln(2)) ≈ 0.721).
        Only used when type="constant".
    w_min : float
        Minimum inertia weight. Only used when type="chaotic".
    w_max : float
        Maximum inertia weight (must be > w_min). Only used when type="chaotic".

    Returns
    -------
    Self
        The config instance for method chaining
    """
    if type not in PSOInertiaType:
        raise ValueError(
            f"Invalid inertia type: {type}. Must be one of {list(PSOInertiaType)}"
        )
    if type == PSOInertiaType.CONSTANT:
        self._inertia = PSOConstantInertia(factor=kwargs.get("factor", 0.721))
    elif type == PSOInertiaType.CHAOTIC:
        self._inertia = PSOChaoticInertia(
            w_min=kwargs["w_min"], w_max=kwargs["w_max"]
        )
    else:
        raise ValueError(
            f"Unknown inertia type: {type}. Use 'constant' or 'chaotic'"
        )
    return self

acceleration

acceleration(type: str, *, cognitive: float = 1.193, social: float = 1.193) -> Self
acceleration(type: str, *, c1_initial: float, c1_final: float, c2_initial: float, c2_final: float) -> Self
acceleration(type: str, **kwargs: float) -> Self

Set acceleration coefficient strategy.

Parameters:

Name Type Description Default
type str

Acceleration strategy type: "constant" or "time_varying"

required

Other Parameters:

Name Type Description
cognitive float

Cognitive coefficient (c1) - attraction to personal best. Only used when type="constant".

social float

Social coefficient (c2) - attraction to swarm best. Only used when type="constant".

c1_initial float

Initial cognitive factor (typically high, e.g., 2.5). Only used when type="time_varying".

c1_final float

Final cognitive factor (typically low, e.g., 0.5). Only used when type="time_varying".

c2_initial float

Initial social factor (typically low, e.g., 0.5). Only used when type="time_varying".

c2_final float

Final social factor (typically high, e.g., 2.5). Only used when type="time_varying".

Returns:

Type Description
Self

The config instance for method chaining

Source code in commol/context/calibration.py
def acceleration(self, type: str, **kwargs: float) -> Self:
    """
    Set acceleration coefficient strategy.

    Parameters
    ----------
    type : str
        Acceleration strategy type: "constant" or "time_varying"

    Other Parameters
    ----------------
    cognitive : float, default=1.193
        Cognitive coefficient (c1) - attraction to personal best.
        Only used when type="constant".
    social : float, default=1.193
        Social coefficient (c2) - attraction to swarm best.
        Only used when type="constant".
    c1_initial : float
        Initial cognitive factor (typically high, e.g., 2.5).
        Only used when type="time_varying".
    c1_final : float
        Final cognitive factor (typically low, e.g., 0.5).
        Only used when type="time_varying".
    c2_initial : float
        Initial social factor (typically low, e.g., 0.5).
        Only used when type="time_varying".
    c2_final : float
        Final social factor (typically high, e.g., 2.5).
        Only used when type="time_varying".

    Returns
    -------
    Self
        The config instance for method chaining
    """
    if type not in PSOAccelerationType:
        raise ValueError(
            f"Invalid acceleration type: {type}. "
            f"Must be one of {list(PSOAccelerationType)}"
        )
    if type == PSOAccelerationType.CONSTANT:
        self._acceleration = PSOConstantAcceleration(
            cognitive=kwargs.get("cognitive", 1.193),
            social=kwargs.get("social", 1.193),
        )
    elif type == PSOAccelerationType.TIME_VARYING:
        self._acceleration = PSOTimeVaryingAcceleration(
            c1_initial=kwargs["c1_initial"],
            c1_final=kwargs["c1_final"],
            c2_initial=kwargs["c2_initial"],
            c2_final=kwargs["c2_final"],
        )
    else:
        raise ValueError(
            f"Unknown acceleration type: {type}. Use 'constant' or 'time_varying'"
        )
    return self

mutation

mutation(strategy: str, *, scale: float, probability: float, application: str) -> Self

Enable mutation to help escape local optima.

Parameters:

Name Type Description Default
strategy str

Mutation distribution: "gaussian" or "cauchy". Cauchy has heavier tails for larger jumps.

required
scale float

Standard deviation (gaussian) or scale parameter (cauchy)

required
probability float

Mutation probability per iteration (0.0 to 1.0)

required
application str

Which particles to mutate: "global_best", "all_particles", or "below_average"

required

Returns:

Type Description
Self

The config instance for method chaining

Source code in commol/context/calibration.py
def mutation(
    self,
    strategy: str,
    *,
    scale: float,
    probability: float,
    application: str,
) -> Self:
    """
    Enable mutation to help escape local optima.

    Parameters
    ----------
    strategy : str
        Mutation distribution: "gaussian" or "cauchy".
        Cauchy has heavier tails for larger jumps.
    scale : float
        Standard deviation (gaussian) or scale parameter (cauchy)
    probability : float
        Mutation probability per iteration (0.0 to 1.0)
    application : str
        Which particles to mutate:
        "global_best", "all_particles", or "below_average"

    Returns
    -------
    Self
        The config instance for method chaining
    """
    if strategy not in PSOMutationStrategy:
        raise ValueError(
            f"Invalid mutation strategy: {strategy}. "
            f"Must be one of {list(PSOMutationStrategy)}"
        )
    if application not in PSOMutationApplication:
        raise ValueError(
            f"Invalid mutation application: {application}. "
            f"Must be one of {list(PSOMutationApplication)}"
        )

    self._mutation = PSOMutationConfig(
        strategy=strategy,
        scale=scale,
        probability=probability,
        application=application,
    )
    return self

velocity

velocity(*, clamp_factor: float | None = None, mutation_threshold: float | None = None) -> Self

Configure velocity control.

Parameters:

Name Type Description Default
clamp_factor float

Velocity clamping as fraction of search space (0.0-1.0). Typically 0.1-0.2. Prevents explosive velocities.

None
mutation_threshold float

Reinitialize velocities below this threshold. Typically 0.001-0.01. Prevents stagnation.

None

Returns:

Type Description
Self

The config instance for method chaining

Source code in commol/context/calibration.py
def velocity(
    self,
    *,
    clamp_factor: float | None = None,
    mutation_threshold: float | None = None,
) -> Self:
    """
    Configure velocity control.

    Parameters
    ----------
    clamp_factor : float, optional
        Velocity clamping as fraction of search space (0.0-1.0).
        Typically 0.1-0.2. Prevents explosive velocities.
    mutation_threshold : float, optional
        Reinitialize velocities below this threshold.
        Typically 0.001-0.01. Prevents stagnation.

    Returns
    -------
    Self
        The config instance for method chaining
    """
    self._velocity = PSOVelocityConfig(
        clamp_factor=clamp_factor,
        mutation_threshold=mutation_threshold,
    )
    return self

options: show_root_heading: true show_source: false heading_level: 3 show_docstring_attributes: true

Enumerations

LossFunction

LossFunction

Bases: StrEnum

Available loss functions for calibration.

Values

SSE Sum of Squared Errors RMSE Root Mean Squared Error MAE Mean Absolute Error WEIGHTED_SSE Weighted Sum of Squared Errors

Source code in commol/context/constants.py
@unique
class LossFunction(StrEnum):
    """
    Available loss functions for calibration.

    Values
    ------
    SSE
        Sum of Squared Errors
    RMSE
        Root Mean Squared Error
    MAE
        Mean Absolute Error
    WEIGHTED_SSE
        Weighted Sum of Squared Errors
    """

    SSE = "sse"
    RMSE = "rmse"
    MAE = "mae"
    WEIGHTED_SSE = "weighted_sse"

options: show_root_heading: true show_source: false heading_level: 3 members: true

OptimizationAlgorithm

OptimizationAlgorithm

Bases: StrEnum

Available optimization algorithms.

Values

NELDER_MEAD Nelder-Mead simplex algorithm PARTICLE_SWARM Particle Swarm Optimization

Source code in commol/context/constants.py
@unique
class OptimizationAlgorithm(StrEnum):
    """
    Available optimization algorithms.

    Values
    ------
    NELDER_MEAD
        Nelder-Mead simplex algorithm
    PARTICLE_SWARM
        Particle Swarm Optimization
    """

    NELDER_MEAD = "nelder_mead"
    PARTICLE_SWARM = "particle_swarm"

options: show_root_heading: true show_source: false heading_level: 3 members: true