Skip to content

Results API

Simulation result container and output file access.

SimulationResult

idfkit.simulation.result.SimulationResult dataclass

Result of an EnergyPlus simulation run.

Attributes:

Name Type Description
run_dir Path

Directory containing all simulation output.

success bool

Whether the simulation exited successfully.

exit_code int | None

Process exit code (None if timed out).

stdout str

Captured standard output.

stderr str

Captured standard error.

runtime_seconds float

Wall-clock execution time in seconds.

output_prefix str

Output file prefix (default "eplus").

fs FileSystem | None

Optional sync file system backend for reading output files.

async_fs AsyncFileSystem | None

Optional async file system backend for non-blocking reads. Set automatically by async_simulate when an AsyncFileSystem is provided.

Source code in src/idfkit/simulation/result.py
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
@dataclass(slots=True)
class SimulationResult:
    """Result of an EnergyPlus simulation run.

    Attributes:
        run_dir: Directory containing all simulation output.
        success: Whether the simulation exited successfully.
        exit_code: Process exit code (None if timed out).
        stdout: Captured standard output.
        stderr: Captured standard error.
        runtime_seconds: Wall-clock execution time in seconds.
        output_prefix: Output file prefix (default "eplus").
        fs: Optional sync file system backend for reading output files.
        async_fs: Optional async file system backend for non-blocking reads.
            Set automatically by [async_simulate][idfkit.simulation.async_runner.async_simulate] when an
            [AsyncFileSystem][idfkit.simulation.fs.AsyncFileSystem] is provided.
    """

    run_dir: Path
    success: bool
    exit_code: int | None
    stdout: str
    stderr: str
    runtime_seconds: float
    output_prefix: str = "eplus"
    fs: FileSystem | None = field(default=None, repr=False)
    async_fs: AsyncFileSystem | None = field(default=None, repr=False)
    _cached_errors: Any = field(default=_UNSET, init=False, repr=False)
    _cached_sql: Any = field(default=_UNSET, init=False, repr=False)
    _cached_variables: Any = field(default=_UNSET, init=False, repr=False)
    _cached_csv: Any = field(default=_UNSET, init=False, repr=False)
    _cached_html: Any = field(default=_UNSET, init=False, repr=False)

    def __post_init__(self) -> None:
        if self.fs is not None and self.async_fs is not None:
            msg = "fs and async_fs are mutually exclusive — provide one or neither"
            raise ValueError(msg)

    @property
    def errors(self) -> ErrorReport:
        """Parsed error report from the .err file (lazily cached).

        Returns:
            Parsed ErrorReport from the simulation's .err output.
        """
        cached = object.__getattribute__(self, "_cached_errors")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        err = self.err_path
        if err is None:
            report = ErrorReport.from_string("")
        elif self.fs is not None:
            text = self.fs.read_text(str(err), encoding="latin-1")
            report = ErrorReport.from_string(text)
        else:
            report = ErrorReport.from_file(err)
        object.__setattr__(self, "_cached_errors", report)
        return report

    @property
    def sql(self) -> SQLResult | None:
        """Parsed SQL output database (lazily cached).

        Returns:
            An SQLResult for querying time-series and tabular data,
            or None if no .sql file was produced.
        """
        cached = object.__getattribute__(self, "_cached_sql")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        path = self.sql_path
        if path is None:
            object.__setattr__(self, "_cached_sql", None)
            return None
        from .parsers.sql import SQLResult as _SQLResult

        if self.fs is not None:
            # sqlite3 requires a local file — download to a temp file
            data = self.fs.read_bytes(str(path))
            with tempfile.NamedTemporaryFile(suffix=".sql", delete=False) as tmp_file:
                tmp_file.write(data)
            result: SQLResult = _SQLResult(Path(tmp_file.name))
        else:
            result = _SQLResult(path)
        object.__setattr__(self, "_cached_sql", result)
        return result

    @property
    def variables(self) -> OutputVariableIndex | None:
        """Output variable/meter index from .rdd/.mdd files (lazily cached).

        Returns:
            An OutputVariableIndex for searching and injecting output
            variables, or None if no .rdd file was produced.
        """
        cached = object.__getattribute__(self, "_cached_variables")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        rdd = self.rdd_path
        if rdd is None:
            object.__setattr__(self, "_cached_variables", None)
            return None

        if self.fs is not None:
            from .parsers.rdd import parse_mdd, parse_rdd

            rdd_text = self.fs.read_text(str(rdd), encoding="latin-1")
            variables = parse_rdd(rdd_text)
            mdd = self.mdd_path
            meters = parse_mdd(self.fs.read_text(str(mdd), encoding="latin-1")) if mdd is not None else ()
            from .outputs import OutputVariableIndex as _OutputVariableIndex

            result: OutputVariableIndex = _OutputVariableIndex(variables=variables, meters=meters)
        else:
            from .outputs import OutputVariableIndex as _OutputVariableIndex

            result = _OutputVariableIndex.from_files(rdd, self.mdd_path)
        object.__setattr__(self, "_cached_variables", result)
        return result

    @property
    def csv(self) -> CSVResult | None:
        """Parsed CSV output (lazily cached).

        Returns:
            A CSVResult with extracted column metadata and values,
            or None if no .csv file was produced.
        """
        cached = object.__getattribute__(self, "_cached_csv")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        path = self.csv_path
        if path is None:
            object.__setattr__(self, "_cached_csv", None)
            return None
        from .parsers.csv import CSVResult as _CSVResult

        if self.fs is not None:
            text = self.fs.read_text(str(path), encoding="latin-1")
            result: CSVResult = _CSVResult.from_string(text)
        else:
            result = _CSVResult.from_file(path)
        object.__setattr__(self, "_cached_csv", result)
        return result

    @property
    def html(self) -> HTMLResult | None:
        """Parsed HTML tabular output (lazily cached).

        Returns:
            An HTMLResult with extracted tables and titles,
            or None if no HTML file was produced.
        """
        cached = object.__getattribute__(self, "_cached_html")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        path = self.html_path
        if path is None:
            object.__setattr__(self, "_cached_html", None)
            return None
        from .parsers.html import HTMLResult as _HTMLResult

        if self.fs is not None:
            text = self.fs.read_text(str(path), encoding="latin-1")
            result: HTMLResult = _HTMLResult.from_string(text)
        else:
            result = _HTMLResult.from_file(path)
        object.__setattr__(self, "_cached_html", result)
        return result

    @property
    def sql_path(self) -> Path | None:
        """Path to the SQLite output file, if present."""
        return self._find_output_file(".sql")

    @property
    def err_path(self) -> Path | None:
        """Path to the .err output file, if present."""
        return self._find_output_file(".err")

    @property
    def eso_path(self) -> Path | None:
        """Path to the .eso output file, if present."""
        return self._find_output_file(".eso")

    @property
    def csv_path(self) -> Path | None:
        """Path to the .csv output file, if present."""
        return self._find_output_file(".csv")

    @property
    def html_path(self) -> Path | None:
        """Path to the HTML tabular output file, if present."""
        return (
            self._find_output_file("Table.htm")
            or self._find_output_file("Table.html")
            or self._find_output_file(".htm")
            or self._find_output_file(".html")
        )

    @property
    def rdd_path(self) -> Path | None:
        """Path to the .rdd output file, if present."""
        return self._find_output_file(".rdd")

    @property
    def mdd_path(self) -> Path | None:
        """Path to the .mdd output file, if present."""
        return self._find_output_file(".mdd")

    def _find_output_file(self, suffix: str) -> Path | None:
        """Find an output file by suffix.

        Looks for ``{prefix}out{suffix}`` first, then falls back to
        scanning the run directory for any file with the given suffix.

        Args:
            suffix: File suffix to look for (e.g. ".sql", ".err").

        Returns:
            Path to the file, or None if not found.

        Raises:
            RuntimeError: If only [async_fs][idfkit.simulation.result.SimulationResult.async_fs] is set (no sync access
                available).  Use the ``async_*`` methods instead.
        """
        if self.async_fs is not None and self.fs is None:
            msg = (
                "This SimulationResult was created with an AsyncFileSystem. "
                "Use the async accessors (e.g. async_errors(), async_sql()) "
                "instead of the sync properties."
            )
            raise RuntimeError(msg)

        primary = self.run_dir / f"{self.output_prefix}out{suffix}"

        if self.fs is not None:
            if self.fs.exists(str(primary)):
                return primary
            # Fallback: glob for matching files
            matches = self.fs.glob(str(self.run_dir), f"*{suffix}")
            if matches:
                return Path(matches[0])
            return None

        # Local path-based lookup
        if primary.is_file():
            return primary

        # Fallback: scan directory
        for p in self.run_dir.iterdir():
            if p.is_file() and p.name.endswith(suffix):
                return p

        return None

    # ------------------------------------------------------------------
    # Async accessors — non-blocking counterparts to the sync properties
    # ------------------------------------------------------------------

    async def async_errors(self) -> ErrorReport:
        """Parsed error report from the .err file (async, lazily cached).

        Non-blocking counterpart to [errors][idfkit.simulation.result.SimulationResult.errors] that uses
        [async_fs][idfkit.simulation.result.SimulationResult.async_fs] for file reads.

        Returns:
            Parsed ErrorReport from the simulation's .err output.
        """
        cached = object.__getattribute__(self, "_cached_errors")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        err = await self._async_find_output_file(".err")
        if err is None:
            report = ErrorReport.from_string("")
        elif self.async_fs is not None:
            text = await self.async_fs.read_text(str(err), encoding="latin-1")
            report = ErrorReport.from_string(text)
        elif self.fs is not None:
            text = self.fs.read_text(str(err), encoding="latin-1")
            report = ErrorReport.from_string(text)
        else:
            report = ErrorReport.from_file(err)
        object.__setattr__(self, "_cached_errors", report)
        return report

    async def async_sql(self) -> SQLResult | None:
        """Parsed SQL output database (async, lazily cached).

        Non-blocking counterpart to [sql][idfkit.simulation.result.SimulationResult.sql] that uses
        [async_fs][idfkit.simulation.result.SimulationResult.async_fs] for file reads.

        Returns:
            An SQLResult for querying time-series and tabular data,
            or None if no .sql file was produced.
        """
        cached = object.__getattribute__(self, "_cached_sql")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        path = await self._async_find_output_file(".sql")
        if path is None:
            object.__setattr__(self, "_cached_sql", None)
            return None
        from .parsers.sql import SQLResult as _SQLResult

        if self.async_fs is not None:
            data = await self.async_fs.read_bytes(str(path))
            with tempfile.NamedTemporaryFile(suffix=".sql", delete=False) as tmp_file:
                tmp_file.write(data)
            result: SQLResult = _SQLResult(Path(tmp_file.name))
        elif self.fs is not None:
            data = self.fs.read_bytes(str(path))
            with tempfile.NamedTemporaryFile(suffix=".sql", delete=False) as tmp_file:
                tmp_file.write(data)
            result = _SQLResult(Path(tmp_file.name))
        else:
            result = _SQLResult(path)
        object.__setattr__(self, "_cached_sql", result)
        return result

    async def async_variables(self) -> OutputVariableIndex | None:
        """Output variable/meter index (async, lazily cached).

        Non-blocking counterpart to [variables][idfkit.simulation.result.SimulationResult.variables] that uses
        [async_fs][idfkit.simulation.result.SimulationResult.async_fs] for file reads.

        Returns:
            An OutputVariableIndex for searching and injecting output
            variables, or None if no .rdd file was produced.
        """
        cached = object.__getattribute__(self, "_cached_variables")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        rdd = await self._async_find_output_file(".rdd")
        if rdd is None:
            object.__setattr__(self, "_cached_variables", None)
            return None

        if self.async_fs is not None:
            from .parsers.rdd import parse_mdd, parse_rdd

            rdd_text = await self.async_fs.read_text(str(rdd), encoding="latin-1")
            variables = parse_rdd(rdd_text)
            mdd = await self._async_find_output_file(".mdd")
            meters = parse_mdd(await self.async_fs.read_text(str(mdd), encoding="latin-1")) if mdd is not None else ()
            from .outputs import OutputVariableIndex as _OutputVariableIndex

            result: OutputVariableIndex = _OutputVariableIndex(variables=variables, meters=meters)
        elif self.fs is not None:
            from .parsers.rdd import parse_mdd, parse_rdd

            rdd_text = self.fs.read_text(str(rdd), encoding="latin-1")
            variables = parse_rdd(rdd_text)
            mdd = self._find_output_file(".mdd")
            meters = parse_mdd(self.fs.read_text(str(mdd), encoding="latin-1")) if mdd is not None else ()
            from .outputs import OutputVariableIndex as _OutputVariableIndex

            result = _OutputVariableIndex(variables=variables, meters=meters)
        else:
            from .outputs import OutputVariableIndex as _OutputVariableIndex

            result = _OutputVariableIndex.from_files(rdd, self.mdd_path)
        object.__setattr__(self, "_cached_variables", result)
        return result

    async def async_csv(self) -> CSVResult | None:
        """Parsed CSV output (async, lazily cached).

        Non-blocking counterpart to [csv][idfkit.simulation.result.SimulationResult.csv] that uses
        [async_fs][idfkit.simulation.result.SimulationResult.async_fs] for file reads.

        Returns:
            A CSVResult with extracted column metadata and values,
            or None if no .csv file was produced.
        """
        cached = object.__getattribute__(self, "_cached_csv")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        path = await self._async_find_output_file(".csv")
        if path is None:
            object.__setattr__(self, "_cached_csv", None)
            return None
        from .parsers.csv import CSVResult as _CSVResult

        if self.async_fs is not None:
            text = await self.async_fs.read_text(str(path), encoding="latin-1")
            result: CSVResult = _CSVResult.from_string(text)
        elif self.fs is not None:
            text = self.fs.read_text(str(path), encoding="latin-1")
            result = _CSVResult.from_string(text)
        else:
            result = _CSVResult.from_file(path)
        object.__setattr__(self, "_cached_csv", result)
        return result

    async def async_html(self) -> HTMLResult | None:
        """Parsed HTML tabular output (async, lazily cached).

        Non-blocking counterpart to [html][idfkit.simulation.result.SimulationResult.html] that uses
        [async_fs][idfkit.simulation.result.SimulationResult.async_fs] for file reads.

        Returns:
            An HTMLResult with extracted tables and titles,
            or None if no HTML file was produced.
        """
        cached = object.__getattribute__(self, "_cached_html")
        if cached is not _UNSET:
            return cached  # type: ignore[no-any-return]
        path = (
            await self._async_find_output_file("Table.htm")
            or await self._async_find_output_file("Table.html")
            or await self._async_find_output_file(".htm")
            or await self._async_find_output_file(".html")
        )
        if path is None:
            object.__setattr__(self, "_cached_html", None)
            return None
        from .parsers.html import HTMLResult as _HTMLResult

        if self.async_fs is not None:
            text = await self.async_fs.read_text(str(path), encoding="latin-1")
            result: HTMLResult = _HTMLResult.from_string(text)
        elif self.fs is not None:
            text = self.fs.read_text(str(path), encoding="latin-1")
            result = _HTMLResult.from_string(text)
        else:
            result = _HTMLResult.from_file(path)
        object.__setattr__(self, "_cached_html", result)
        return result

    async def _async_find_output_file(self, suffix: str) -> Path | None:
        """Async counterpart to [_find_output_file][].

        Uses [async_fs][idfkit.simulation.result.SimulationResult.async_fs] for non-blocking file lookups, falling back
        to [fs][] or local path checks.

        Args:
            suffix: File suffix to look for (e.g. ".sql", ".err").

        Returns:
            Path to the file, or None if not found.
        """
        primary = self.run_dir / f"{self.output_prefix}out{suffix}"

        if self.async_fs is not None:
            if await self.async_fs.exists(str(primary)):
                return primary
            matches = await self.async_fs.glob(str(self.run_dir), f"*{suffix}")
            if matches:
                return Path(matches[0])
            return None

        if self.fs is not None:
            if self.fs.exists(str(primary)):
                return primary
            matches = self.fs.glob(str(self.run_dir), f"*{suffix}")
            if matches:
                return Path(matches[0])
            return None

        # Local path-based lookup
        if primary.is_file():
            return primary

        for p in self.run_dir.iterdir():
            if p.is_file() and p.name.endswith(suffix):
                return p

        return None

    @classmethod
    def from_directory(
        cls,
        path: str | Path,
        *,
        output_prefix: str = "eplus",
        fs: FileSystem | None = None,
        async_fs: AsyncFileSystem | None = None,
    ) -> SimulationResult:
        """Reconstruct a SimulationResult from an existing output directory.

        Useful for inspecting results from a previous simulation run.

        Args:
            path: Path to the simulation output directory.
            output_prefix: Output file prefix used during the run.
            fs: Optional sync file system backend for reading output files.
            async_fs: Optional async file system backend for non-blocking reads.

        Returns:
            SimulationResult pointing to the existing output.
        """
        run_dir = Path(path) if (fs is not None or async_fs is not None) else Path(path).resolve()
        return cls(
            run_dir=run_dir,
            success=True,
            exit_code=0,
            stdout="",
            stderr="",
            runtime_seconds=0.0,
            output_prefix=output_prefix,
            fs=fs,
            async_fs=async_fs,
        )

run_dir instance-attribute

success instance-attribute

exit_code instance-attribute

stdout instance-attribute

stderr instance-attribute

runtime_seconds instance-attribute

output_prefix = 'eplus' class-attribute instance-attribute

fs = field(default=None, repr=False) class-attribute instance-attribute

async_fs = field(default=None, repr=False) class-attribute instance-attribute

errors property

Parsed error report from the .err file (lazily cached).

Returns:

Type Description
ErrorReport

Parsed ErrorReport from the simulation's .err output.

sql property

Parsed SQL output database (lazily cached).

Returns:

Type Description
SQLResult | None

An SQLResult for querying time-series and tabular data,

SQLResult | None

or None if no .sql file was produced.

variables property

Output variable/meter index from .rdd/.mdd files (lazily cached).

Returns:

Type Description
OutputVariableIndex | None

An OutputVariableIndex for searching and injecting output

OutputVariableIndex | None

variables, or None if no .rdd file was produced.

csv property

Parsed CSV output (lazily cached).

Returns:

Type Description
CSVResult | None

A CSVResult with extracted column metadata and values,

CSVResult | None

or None if no .csv file was produced.

html property

Parsed HTML tabular output (lazily cached).

Returns:

Type Description
HTMLResult | None

An HTMLResult with extracted tables and titles,

HTMLResult | None

or None if no HTML file was produced.

sql_path property

Path to the SQLite output file, if present.

err_path property

Path to the .err output file, if present.

eso_path property

Path to the .eso output file, if present.

csv_path property

Path to the .csv output file, if present.

html_path property

Path to the HTML tabular output file, if present.

rdd_path property

Path to the .rdd output file, if present.

mdd_path property

Path to the .mdd output file, if present.

from_directory(path, *, output_prefix='eplus', fs=None, async_fs=None) classmethod

Reconstruct a SimulationResult from an existing output directory.

Useful for inspecting results from a previous simulation run.

Parameters:

Name Type Description Default
path str | Path

Path to the simulation output directory.

required
output_prefix str

Output file prefix used during the run.

'eplus'
fs FileSystem | None

Optional sync file system backend for reading output files.

None
async_fs AsyncFileSystem | None

Optional async file system backend for non-blocking reads.

None

Returns:

Type Description
SimulationResult

SimulationResult pointing to the existing output.

Source code in src/idfkit/simulation/result.py
@classmethod
def from_directory(
    cls,
    path: str | Path,
    *,
    output_prefix: str = "eplus",
    fs: FileSystem | None = None,
    async_fs: AsyncFileSystem | None = None,
) -> SimulationResult:
    """Reconstruct a SimulationResult from an existing output directory.

    Useful for inspecting results from a previous simulation run.

    Args:
        path: Path to the simulation output directory.
        output_prefix: Output file prefix used during the run.
        fs: Optional sync file system backend for reading output files.
        async_fs: Optional async file system backend for non-blocking reads.

    Returns:
        SimulationResult pointing to the existing output.
    """
    run_dir = Path(path) if (fs is not None or async_fs is not None) else Path(path).resolve()
    return cls(
        run_dir=run_dir,
        success=True,
        exit_code=0,
        stdout="",
        stderr="",
        runtime_seconds=0.0,
        output_prefix=output_prefix,
        fs=fs,
        async_fs=async_fs,
    )

ErrorReport

idfkit.simulation.parsers.err.ErrorReport dataclass

Parsed contents of an EnergyPlus .err file.

Attributes:

Name Type Description
fatal tuple[ErrorMessage, ...]

Fatal error messages.

severe tuple[ErrorMessage, ...]

Severe error messages.

warnings tuple[ErrorMessage, ...]

Warning messages.

info tuple[ErrorMessage, ...]

Informational messages.

warmup_converged bool

Whether warmup convergence was reported.

simulation_complete bool

Whether the simulation completed successfully.

raw_text str

The original unparsed file text.

Source code in src/idfkit/simulation/parsers/err.py
@dataclass(frozen=True, slots=True)
class ErrorReport:
    """Parsed contents of an EnergyPlus .err file.

    Attributes:
        fatal: Fatal error messages.
        severe: Severe error messages.
        warnings: Warning messages.
        info: Informational messages.
        warmup_converged: Whether warmup convergence was reported.
        simulation_complete: Whether the simulation completed successfully.
        raw_text: The original unparsed file text.
    """

    fatal: tuple[ErrorMessage, ...]
    severe: tuple[ErrorMessage, ...]
    warnings: tuple[ErrorMessage, ...]
    info: tuple[ErrorMessage, ...]
    warmup_converged: bool
    simulation_complete: bool
    raw_text: str

    @property
    def has_fatal(self) -> bool:
        """Whether any fatal errors were found."""
        return len(self.fatal) > 0

    @property
    def has_severe(self) -> bool:
        """Whether any severe errors were found."""
        return len(self.severe) > 0

    @property
    def fatal_count(self) -> int:
        """Number of fatal errors."""
        return len(self.fatal)

    @property
    def severe_count(self) -> int:
        """Number of severe errors."""
        return len(self.severe)

    @property
    def error_count(self) -> int:
        """Total number of fatal + severe errors."""
        return len(self.fatal) + len(self.severe)

    @property
    def warning_count(self) -> int:
        """Total number of warnings."""
        return len(self.warnings)

    def summary(self) -> str:
        """Return a human-readable summary of the error report.

        Returns:
            A multi-line summary string.
        """
        lines: list[str] = []
        lines.append(
            f"Fatal: {len(self.fatal)}, Severe: {len(self.severe)}, "
            f"Warnings: {len(self.warnings)}, Info: {len(self.info)}"
        )
        if self.warmup_converged:
            lines.append("Warmup: converged")
        if self.simulation_complete:
            lines.append("Simulation: completed successfully")
        elif self.has_fatal:
            lines.append("Simulation: terminated with fatal error(s)")
        return "\n".join(lines)

    @classmethod
    def from_file(cls, path: str | Path) -> ErrorReport:
        """Parse an .err file from disk.

        Args:
            path: Path to the .err file.

        Returns:
            Parsed ErrorReport.
        """
        text = Path(path).read_text(encoding="latin-1")
        return _parse_err(text)

    @classmethod
    def from_string(cls, text: str) -> ErrorReport:
        """Parse .err content from a string.

        Args:
            text: Raw .err file contents.

        Returns:
            Parsed ErrorReport.
        """
        return _parse_err(text)

fatal instance-attribute

severe instance-attribute

warnings instance-attribute

fatal_count property

Number of fatal errors.

severe_count property

Number of severe errors.

warning_count property

Total number of warnings.

has_fatal property

Whether any fatal errors were found.

has_severe property

Whether any severe errors were found.

summary()

Return a human-readable summary of the error report.

Returns:

Type Description
str

A multi-line summary string.

Source code in src/idfkit/simulation/parsers/err.py
def summary(self) -> str:
    """Return a human-readable summary of the error report.

    Returns:
        A multi-line summary string.
    """
    lines: list[str] = []
    lines.append(
        f"Fatal: {len(self.fatal)}, Severe: {len(self.severe)}, "
        f"Warnings: {len(self.warnings)}, Info: {len(self.info)}"
    )
    if self.warmup_converged:
        lines.append("Warmup: converged")
    if self.simulation_complete:
        lines.append("Simulation: completed successfully")
    elif self.has_fatal:
        lines.append("Simulation: terminated with fatal error(s)")
    return "\n".join(lines)

from_file(path) classmethod

Parse an .err file from disk.

Parameters:

Name Type Description Default
path str | Path

Path to the .err file.

required

Returns:

Type Description
ErrorReport

Parsed ErrorReport.

Source code in src/idfkit/simulation/parsers/err.py
@classmethod
def from_file(cls, path: str | Path) -> ErrorReport:
    """Parse an .err file from disk.

    Args:
        path: Path to the .err file.

    Returns:
        Parsed ErrorReport.
    """
    text = Path(path).read_text(encoding="latin-1")
    return _parse_err(text)

from_string(text) classmethod

Parse .err content from a string.

Parameters:

Name Type Description Default
text str

Raw .err file contents.

required

Returns:

Type Description
ErrorReport

Parsed ErrorReport.

Source code in src/idfkit/simulation/parsers/err.py
@classmethod
def from_string(cls, text: str) -> ErrorReport:
    """Parse .err content from a string.

    Args:
        text: Raw .err file contents.

    Returns:
        Parsed ErrorReport.
    """
    return _parse_err(text)

ErrorMessage

idfkit.simulation.parsers.err.ErrorMessage dataclass

A single error/warning message from EnergyPlus.

Attributes:

Name Type Description
severity str

One of "Fatal", "Severe", "Warning", "Info".

message str

The primary message text.

details tuple[str, ...]

Additional continuation lines (** ~~~ ** lines).

Source code in src/idfkit/simulation/parsers/err.py
@dataclass(frozen=True, slots=True)
class ErrorMessage:
    """A single error/warning message from EnergyPlus.

    Attributes:
        severity: One of "Fatal", "Severe", "Warning", "Info".
        message: The primary message text.
        details: Additional continuation lines (``** ~~~   **`` lines).
    """

    severity: str
    message: str
    details: tuple[str, ...]

HTMLResult

idfkit.simulation.parsers.html.HTMLResult dataclass

Parsed HTML tabular output from an EnergyPlus simulation.

Attributes:

Name Type Description
tables list[HTMLTable]

All tables extracted from the file, in document order.

Source code in src/idfkit/simulation/parsers/html.py
@dataclass(slots=True)
class HTMLResult:
    """Parsed HTML tabular output from an EnergyPlus simulation.

    Attributes:
        tables: All tables extracted from the file, in document order.
    """

    tables: list[HTMLTable] = field(default_factory=lambda: [])

    @classmethod
    def from_file(cls, path: Path | str, encoding: str = "latin-1") -> HTMLResult:
        """Parse an EnergyPlus HTML output file.

        Args:
            path: Path to the HTML file (typically ``eplustblTable.html``
                or ``eplusoutTable.html``).
            encoding: File encoding (default ``latin-1``).

        Returns:
            Parsed [HTMLResult][idfkit.simulation.parsers.html.HTMLResult].
        """
        with open(path, encoding=encoding, errors="replace") as f:
            return cls.from_string(f.read())

    @classmethod
    def from_string(cls, html: str) -> HTMLResult:
        """Parse an HTML string.

        Args:
            html: The raw HTML content.

        Returns:
            Parsed [HTMLResult][idfkit.simulation.parsers.html.HTMLResult].
        """
        parser = _EnergyPlusHTMLParser()
        parser.feed(html)
        return cls(tables=parser.tables)

    def __len__(self) -> int:
        return len(self.tables)

    def __getitem__(self, index: int) -> HTMLTable:
        return self.tables[index]

    def __iter__(self) -> Iterator[HTMLTable]:
        return iter(self.tables)

    # ------------------------------------------------------------------
    # Convenience accessors (eppy-compatible patterns)
    # ------------------------------------------------------------------

    def titletable(self) -> list[tuple[str, list[list[str]]]]:
        """Return ``(title, rows)`` pairs like eppy's ``readhtml.titletable``.

        Each entry is ``(bold_title, [header_row, *data_rows])``.
        """
        result: list[tuple[str, list[list[str]]]] = []
        for t in self.tables:
            combined = [t.header, *t.rows] if t.header else list(t.rows)
            result.append((t.title, combined))
        return result

    def tablebyname(self, name: str) -> HTMLTable | None:
        """Find first table whose title contains *name* (case-insensitive)."""
        lower = name.lower()
        for t in self.tables:
            if lower in t.title.lower():
                return t
        return None

    def tablebyindex(self, index: int) -> HTMLTable | None:
        """Get a table by its zero-based position."""
        if 0 <= index < len(self.tables):
            return self.tables[index]
        return None

    def tablesbyreport(self, report_name: str) -> list[HTMLTable]:
        """Get all tables belonging to a specific report."""
        lower = report_name.lower()
        return [t for t in self.tables if lower in t.report_name.lower()]

tables = field(default_factory=(lambda: [])) class-attribute instance-attribute

from_file(path, encoding='latin-1') classmethod

Parse an EnergyPlus HTML output file.

Parameters:

Name Type Description Default
path Path | str

Path to the HTML file (typically eplustblTable.html or eplusoutTable.html).

required
encoding str

File encoding (default latin-1).

'latin-1'

Returns:

Type Description
HTMLResult

Parsed HTMLResult.

Source code in src/idfkit/simulation/parsers/html.py
@classmethod
def from_file(cls, path: Path | str, encoding: str = "latin-1") -> HTMLResult:
    """Parse an EnergyPlus HTML output file.

    Args:
        path: Path to the HTML file (typically ``eplustblTable.html``
            or ``eplusoutTable.html``).
        encoding: File encoding (default ``latin-1``).

    Returns:
        Parsed [HTMLResult][idfkit.simulation.parsers.html.HTMLResult].
    """
    with open(path, encoding=encoding, errors="replace") as f:
        return cls.from_string(f.read())

from_string(html) classmethod

Parse an HTML string.

Parameters:

Name Type Description Default
html str

The raw HTML content.

required

Returns:

Type Description
HTMLResult

Parsed HTMLResult.

Source code in src/idfkit/simulation/parsers/html.py
@classmethod
def from_string(cls, html: str) -> HTMLResult:
    """Parse an HTML string.

    Args:
        html: The raw HTML content.

    Returns:
        Parsed [HTMLResult][idfkit.simulation.parsers.html.HTMLResult].
    """
    parser = _EnergyPlusHTMLParser()
    parser.feed(html)
    return cls(tables=parser.tables)

titletable()

Return (title, rows) pairs like eppy's readhtml.titletable.

Each entry is (bold_title, [header_row, *data_rows]).

Source code in src/idfkit/simulation/parsers/html.py
def titletable(self) -> list[tuple[str, list[list[str]]]]:
    """Return ``(title, rows)`` pairs like eppy's ``readhtml.titletable``.

    Each entry is ``(bold_title, [header_row, *data_rows])``.
    """
    result: list[tuple[str, list[list[str]]]] = []
    for t in self.tables:
        combined = [t.header, *t.rows] if t.header else list(t.rows)
        result.append((t.title, combined))
    return result

tablebyname(name)

Find first table whose title contains name (case-insensitive).

Source code in src/idfkit/simulation/parsers/html.py
def tablebyname(self, name: str) -> HTMLTable | None:
    """Find first table whose title contains *name* (case-insensitive)."""
    lower = name.lower()
    for t in self.tables:
        if lower in t.title.lower():
            return t
    return None

tablebyindex(index)

Get a table by its zero-based position.

Source code in src/idfkit/simulation/parsers/html.py
def tablebyindex(self, index: int) -> HTMLTable | None:
    """Get a table by its zero-based position."""
    if 0 <= index < len(self.tables):
        return self.tables[index]
    return None

tablesbyreport(report_name)

Get all tables belonging to a specific report.

Source code in src/idfkit/simulation/parsers/html.py
def tablesbyreport(self, report_name: str) -> list[HTMLTable]:
    """Get all tables belonging to a specific report."""
    lower = report_name.lower()
    return [t for t in self.tables if lower in t.report_name.lower()]

HTMLTable

idfkit.simulation.parsers.html.HTMLTable dataclass

A single table extracted from the EnergyPlus HTML output.

Attributes:

Name Type Description
title str

The bold title preceding the table (e.g. "Site and Source Energy").

header list[str]

Column headers (first <tr> with <th> cells).

rows list[list[str]]

Data rows as lists of strings.

report_name str

The top-level report name (e.g. "Annual Building Utility Performance Summary").

for_string str

The "For:" qualifier (e.g. "Entire Facility").

Source code in src/idfkit/simulation/parsers/html.py
@dataclass(slots=True)
class HTMLTable:
    """A single table extracted from the EnergyPlus HTML output.

    Attributes:
        title: The bold title preceding the table (e.g.
            ``"Site and Source Energy"``).
        header: Column headers (first ``<tr>`` with ``<th>`` cells).
        rows: Data rows as lists of strings.
        report_name: The top-level report name (e.g.
            ``"Annual Building Utility Performance Summary"``).
        for_string: The ``"For:"`` qualifier (e.g. ``"Entire Facility"``).
    """

    title: str
    header: list[str]
    rows: list[list[str]]
    report_name: str = ""
    for_string: str = ""

    def to_dict(self) -> dict[str, dict[str, str]]:
        """Convert to a dict mapping row headers to {col_header: value}.

        The first column is treated as the row key.  Remaining columns
        are keyed by their respective column headers (starting at
        index 1).  Duplicate row keys are silently overwritten by the
        last occurrence.

        This gives convenient dict-style access similar to eppy's
        ``readhtml.named_grid_h``.
        """
        result: dict[str, dict[str, str]] = {}
        for row in self.rows:
            if not row:
                continue
            row_key = row[0]
            entry: dict[str, str] = {}
            for i in range(1, len(self.header)):
                if i < len(row):
                    entry[self.header[i]] = row[i]
            result[row_key] = entry
        return result

title instance-attribute

header instance-attribute

rows instance-attribute

report_name = '' class-attribute instance-attribute

for_string = '' class-attribute instance-attribute

to_dict()

Convert to a dict mapping row headers to {col_header: value}.

The first column is treated as the row key. Remaining columns are keyed by their respective column headers (starting at index 1). Duplicate row keys are silently overwritten by the last occurrence.

This gives convenient dict-style access similar to eppy's readhtml.named_grid_h.

Source code in src/idfkit/simulation/parsers/html.py
def to_dict(self) -> dict[str, dict[str, str]]:
    """Convert to a dict mapping row headers to {col_header: value}.

    The first column is treated as the row key.  Remaining columns
    are keyed by their respective column headers (starting at
    index 1).  Duplicate row keys are silently overwritten by the
    last occurrence.

    This gives convenient dict-style access similar to eppy's
    ``readhtml.named_grid_h``.
    """
    result: dict[str, dict[str, str]] = {}
    for row in self.rows:
        if not row:
            continue
        row_key = row[0]
        entry: dict[str, str] = {}
        for i in range(1, len(self.header)):
            if i < len(row):
                entry[self.header[i]] = row[i]
        result[row_key] = entry
    return result