Create and run simulations

To create simulations with idmtools, create a Python file that imports the relevant packages, uses the classes and functions to meet your specific needs, and then run the script using python script_name.py.

For example, if you would like to create many simulations “on-the-fly” (such as parameter sweeps) then you should use the SimulationBuilder and TemplatedSimulations classes. On the other hand, if you would like to create multiple simulations beforehand then you can use the Simulation class.

See the following examples for each of these scenarios:

SimulationBuilder example

"""
        This file demonstrates how to use ExperimentBuilder in PythonExperiment's builder.
        We are then adding the builder to PythonExperiment.

        Parameters for sweeping:
            |__ a = [0,1,2,3,4]

        Expect 5 sims with config parameters, note: "b" is not a sweep parameter, but it is depending on a's value:
            sim1: {a:0, b:2}
            sim2: {a:1, b:3}
            sim3: {a:2, b:4}
            sim4: {a:3, b:5}
            sim5: {a:4, b:6}
"""

import os
import sys
from functools import partial

from idmtools.builders import SimulationBuilder
from idmtools.core.platform_factory import platform
from idmtools.entities.experiment import Experiment
from idmtools.entities.templated_simulation import TemplatedSimulations
from idmtools_models.python.json_python_task import JSONConfiguredPythonTask
from idmtools_test import COMMON_INPUT_PATH


# define a custom sweep callback that sets b to a + 2
def param_update_ab(simulation, param, value):
    # Set B within
    if param == "a":
        simulation.task.set_parameter("b", value + 2)

    return simulation.task.set_parameter(param, value)


if __name__ == "__main__":
    # define what platform we want to use. Here we use a context manager but if you prefer you can
    # use objects such as Platform('COMPS2') instead
    with platform('COMPS2'):
        # define our base task
        base_task = JSONConfiguredPythonTask(script_path=os.path.join(COMMON_INPUT_PATH, "python", "model1.py"),
                                             parameters=dict(c='c-value'))

        # define our input csv sweep
        builder = SimulationBuilder()
        # Sweep parameter "a" and make "b" depends on "a"
        setAB = partial(param_update_ab, param="a")
        builder.add_sweep_definition(setAB, range(0, 5))

        # now define we want to create a series of simulations using the base task and the sweep
        ts = TemplatedSimulations.from_task(base_task, tags=dict(c='c-value'))
        ts.add_builder(builder)

        # define our experiment with its metadata
        experiment = Experiment.from_template(ts,
                                              name=os.path.split(sys.argv[0])[1],
                                              tags={"string_tag": "test", "number_tag": 123}
                                              )

        # run experiment
        experiment.run()
        # wait until done with longer interval
        # in most real scenarios, you probably do not want to wait as this will wait until all simulations
        # associated with an experiment are done. We do it in our examples to show feature and to enable
        # testing of the scripts
        experiment.wait(refresh_interval=10)
        # use system status as the exit code
        sys.exit(0 if experiment.succeeded else -1)

Simulation example

"""
        This file demonstrates how to use StandAloneSimulationsBuilder in PythonExperiment's builder.

        we create 5 simulations and for each simulation, we set parameter 'a' = [0,4] and 'b' = a + 10:
        then add each updated simulation to builder
        then we are adding the builder to PythonExperiment
"""
import copy
import os
import sys

from idmtools.assets import AssetCollection
from idmtools.core.platform_factory import Platform
from idmtools.entities.experiment import Experiment
from idmtools.entities.simulation import Simulation
from idmtools_models.python.json_python_task import JSONConfiguredPythonTask
from idmtools_test import COMMON_INPUT_PATH

if __name__ == "__main__":

    # define our platform
    platform = Platform('COMPS2')

    # create experiment  object and define some extra assets
    assets_path = os.path.join(COMMON_INPUT_PATH, "python", "Assets")
    e = Experiment(name=os.path.split(sys.argv[0])[1],
                   tags={"string_tag": "test", "number_tag": 123},
                   assets=AssetCollection.from_directory(assets_path))

    # define paths to model and extra assets folder container more common assets
    model_path = os.path.join(COMMON_INPUT_PATH, "python", "model.py")

    # define our base task including the common assets. We could also add these assets to the experiment above
    base_task = JSONConfiguredPythonTask(script_path=model_path, envelope='parameters')

    base_simulation = Simulation.from_task(base_task)

    # now build our simulations
    for i in range(5):
        # first copy the simulation
        sim = copy.deepcopy(base_simulation)
        # configure it
        sim.task.set_parameter("a", i)
        sim.task.set_parameter("b", i + 10)
        # and add it to the simulations
        e.simulations.append(sim)

    # run the experiment
    e.run()
    # wait on it
    # in most real scenarios, you probably do not want to wait as this will wait until all simulations
    # associated with an experiment are done. We do it in our examples to show feature and to enable
    # testing of the scripts
    e.wait()
    # use system status as the exit code
    sys.exit(0 if e.succeeded else -1)

Many additional examples can be found in the /examples folder of the GitHub repository.