diff --git a/CHANGES.rst b/CHANGES.rst index fbc4fd89..7fef08a4 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -29,18 +29,30 @@ describe future plans. Enhancements ------------ - - Add 'dynamic_import()' (support 'ad_creator()' from device file). + - Add 'utils.xy_statistics()' (support 'plans.lineup2()'). + - Add 'utils.dynamic_import()' (support 'devices.ad_creator()' from device file). + - Add 'utils.MMap' (support 'plans.lineup2()'). + - Add 'utils.peak_full_width' (support 'plans.lineup2()'). Fixes ----- - - 'PVPositionerSoftDone' used an invalid subscription event type - in unusual cases (with fake ophyd simulated devices). + - In some cases, 'plans.lineup2()' appeared to succeed but motor was not in + aligned position. + - In unusual cases (with fake ophyd simulated devices), + 'devices.PVPositionerSoftDone' used an invalid subscription event type. Maintenance ----------- - In 'ad_creator()', convert text class name to class object. + - Refactor 'plans.lineup2()': new statistics computations & improve + diagnostics. + + Deprecations + ----------- + + - Use of 'PySumReg.SummationRegisters' to be removed in next major release. 1.7.1 ****** diff --git a/apstools/callbacks/__init__.py b/apstools/callbacks/__init__.py index d3a55657..cfbe407c 100644 --- a/apstools/callbacks/__init__.py +++ b/apstools/callbacks/__init__.py @@ -5,7 +5,6 @@ from .nexus_writer import NEXUS_RELEASE from .nexus_writer import NXWriter from .nexus_writer import NXWriterAPS -from .scan_signal_statistics import factor_fwhm from .scan_signal_statistics import SignalStatsCallback from .spec_file_writer import SCAN_ID_RESET_VALUE from .spec_file_writer import SPEC_TIME_FORMAT diff --git a/apstools/callbacks/scan_signal_statistics.py b/apstools/callbacks/scan_signal_statistics.py index d42dce22..8f0c3097 100644 --- a/apstools/callbacks/scan_signal_statistics.py +++ b/apstools/callbacks/scan_signal_statistics.py @@ -1,34 +1,20 @@ """ -Collect statistics on the signals used in 1-D scans. -==================================================== +Collect statistics on the first detector used in 1-D scans. +=========================================================== .. autosummary:: - ~factor_fwhm ~SignalStatsCallback """ -__all__ = """ -factor_fwhm -SignalStatsCallback -""".split() - -import math import logging import pyRestTable -import pysumreg +import pysumreg # deprecate, will remove in next major version bump logger = logging.getLogger(__name__) logger.info(__file__) -factor_fwhm = 2 * math.sqrt(2 * math.log(2)) -r""" -FWHM :math:`=2\sqrt{2\ln{2}}\cdot\sigma_c` - -see: https://statproofbook.github.io/P/norm-fwhm.html -""" - class SignalStatsCallback: """ @@ -48,6 +34,7 @@ class SignalStatsCallback: from bluesky import plans as bp from bluesky import preprocessors as bpp + from apstools.callbacks import SignalStatsCallback signal_stats = SignalStatsCallback() @@ -66,7 +53,7 @@ def _inner(): ~receiver ~report ~data_stream - ~stop_report + ~reporting .. rubric:: Internal API .. autosummary:: @@ -76,6 +63,8 @@ def _inner(): ~event ~start ~stop + ~analysis + ~_data ~_scanning ~_registers """ @@ -83,14 +72,24 @@ def _inner(): data_stream: str = "primary" """RunEngine document with signals to to watch.""" - stop_report: bool = True + reporting: bool = True """If ``True`` (default), call the ``report()`` method when a ``stop`` document is received.""" _scanning: bool = False """Is a run *in progress*?""" _registers: dict = {} - """Dictionary (keyed on Signal name) of ``SummationRegister()`` objects.""" + """ + Deprecated: Use 'analysis' instead, will remove in next major release. + + Dictionary (keyed on Signal name) of ``SummationRegister()`` objects. + """ + + _data: dict = {} + """Arrays of x & y data""" + + analysis: object = None + """Dictionary of statistical array analyses.""" # TODO: What happens when the run is paused? @@ -105,10 +104,11 @@ def clear(self): self._scanning = False self._detectors = [] self._motor = "" - self._registers = {} + self._registers = {} # deprecated, for removal self._descriptor_uid = None self._x_name = None self._y_names = [] + self._data = {} def descriptor(self, doc): """Receives 'descriptor' documents from the RunEngine.""" @@ -122,11 +122,16 @@ def descriptor(self, doc): # Pick the first motor signal. self._x_name = doc["hints"][self._motor]["fields"][0] + self._data[self._x_name] = [] + # Get the signals for each detector object.s for d in self._detectors: - self._y_names += doc["hints"][d]["fields"] + for y_name in doc["hints"][d]["fields"]: + self._y_names.append(y_name) + self._data[y_name] = [] # Keep statistics for each of the Y signals (vs. the one X signal). + # deprecated, for removal self._registers = {y: pysumreg.SummationRegisters() for y in self._y_names} def event(self, doc): @@ -138,8 +143,12 @@ def event(self, doc): # Collect the data for the signals. x = doc["data"][self._x_name] + self._data[self._x_name].append(x) + for yname in self._y_names: - self._registers[yname].add(x, doc["data"][yname]) + y = doc["data"][yname] + self._registers[yname].add(x, y) # deprecated, for removal + self._data[yname].append(y) def receiver(self, key, document): """Client method used to subscribe to the RunEngine.""" @@ -151,35 +160,24 @@ def receiver(self, key, document): def report(self): """Print a table with the collected statistics for each signal.""" - if len(self._registers) == 0: + if len(self._data) == 0: return - keys = "n centroid sigma x_at_max_y max_y min_y mean_y stddev_y".split() + + x_name = self._x_name + y_name = self._detectors[0] + + keys = """ + n centroid x_at_max_y + fwhm variance sigma + min_x mean_x max_x + min_y mean_y max_y + success reasons + """.split() + table = pyRestTable.Table() - if len(keys) <= len(self._registers): - # statistics in the column labels - table.labels = ["detector"] + keys - for yname, stats in self._registers.items(): - row = [yname] - for k in keys: - try: - v = getattr(stats, k) - except (ValueError, ZeroDivisionError): - v = 0 - row.append(v) - table.addRow(row) - else: - # signals in the column labels - table.labels = ["statistic"] + list(self._registers) - for k in keys: - row = [k] - for stats in self._registers.values(): - try: - v = getattr(stats, k) - except (ValueError, ZeroDivisionError): - v = 0 - row.append(v) - table.addRow(row) - print(f"Motor: {self._x_name}") + table.labels = "statistic value".split() + table.rows = [(k, self.analysis.get(k, "--")) for k in keys if k in self.analysis] + print(f"Motor: {x_name!r} Detector: {y_name!r}") print(table) def start(self, doc): @@ -192,8 +190,16 @@ def start(self, doc): def stop(self, doc): """Receives 'stop' documents from the RunEngine.""" + from ..utils.statistics import xy_statistics + if not self._scanning: return self._scanning = False - if self.stop_report: + + self.analysis = xy_statistics( + self._data[self._x_name], + self._data[self._detectors[0]], + ) + + if self.reporting: self.report() diff --git a/apstools/plans/alignment.py b/apstools/plans/alignment.py index 0ad7d295..a83d6069 100644 --- a/apstools/plans/alignment.py +++ b/apstools/plans/alignment.py @@ -13,6 +13,7 @@ import datetime import logging +import sys import numpy as np import pyRestTable @@ -33,12 +34,19 @@ from .doc_run import write_stream logger = logging.getLogger(__name__) +MAIN = sys.modules["__main__"] def lineup( # fmt: off - detectors, axis, minus, plus, npts, - time_s=0.1, peak_factor=4, width_factor=0.8, + detectors, + axis, + minus, + plus, + npts, + time_s=0.1, + peak_factor=4, + width_factor=0.8, feature="cen", rescan=True, bec=None, @@ -104,9 +112,12 @@ def lineup( RE(lineup(diode, foemirror.theta, -30, 30, 30, 1.0)) """ - bec = bec or utils.ipython_shell_namespace().get("bec") if bec is None: - raise ValueError("Cannot find BestEffortCallback() instance.") + if "bec" in dir(MAIN): + # get from __main__ namespace + bec = getattr(MAIN, "bec") + else: + raise ValueError("Cannot find BestEffortCallback() instance.") bec.enable_plots() if not isinstance(detectors, (tuple, list)): @@ -274,10 +285,9 @@ def guess_erf_params(x_data, y_data): low_x_data = np.min(x_data_sorted) high_x_data = np.max(x_data_sorted) - # Estimate wid as a fraction of the range. This is very arbitrary and might need tuning! - width = ( - high_x_data - low_x_data - ) / 10 # This is a guess and might need adjustment based on your data's characteristics + # Estimate width as a fraction of the range. This is very arbitrary and might need tuning! + # This is a guess and might need adjustment based on your data's characteristics. + width = (high_x_data - low_x_data) / 10 # Estimate the midpoint of the x values midpoint = x_data[int(len(x_data) / 2)] @@ -327,27 +337,50 @@ def erf_model(x, low, high, width, midpoint): def lineup2( # fmt: off - detectors, mover, rel_start, rel_end, points, - peak_factor=2.5, width_factor=0.8, + detectors, + mover, + rel_start, + rel_end, + points, + peak_factor=1.5, + width_factor=0.8, feature="centroid", nscans=2, signal_stats=None, + reporting=None, md={}, # fmt: on ): """ - Lineup and center a given mover, relative to current position. + Lineup and center a given mover, relative to the current position. + + Step scan ``mover`` and measure ``detectors`` at each step. Peak analysis is + based on data collected during the scan for the **first** detector (in the + list of ``detectors``) *vs.* ``mover``. If a peak is detected, move the + ``mover`` to the feature position (default is ``"centroid"``). If + ``nscans>1``, adjust the scan range based on the apparent FWHM and rescan. + At most, a total of ``nscans`` will be run. + + If unable to identify a peak, ``lineup2()`` returns ``mover`` to its + original position and returns (no more rescans). If ``lineup2()`` fails to + identify a peak that you can observe, consider re-running with increased + ``points``, changed range(``rel_start`` and ``rel_end``), and/or changed + terms (``peak_factor`` and ``width_factor``). + + Increase ``nscans`` for additional fine-tuning of the ``centroid`` and + ``fwhm`` parameters. It is probably not necessary to set ``nscans>5``. + + .. index:: Bluesky Plan; lineup2; lineup This plan can be used in the queueserver, Jupyter notebooks, and IPython consoles. It does not require the bluesky BestEffortCallback. Instead, it - uses *PySumReg* [#pysumreg]_ to compute statistics for each signal in a 1-D + uses *numpy* [#numpy]_ to compute statistics for each signal in a 1-D scan. - New in release 1.6.18 + - New in release 1.6.18. + - Changed from PySumReg to numpy in release 1.7.2. - .. index:: Bluesky Plan; lineup2; lineup - - PARAMETERS + .. rubric:: PARAMETERS detectors *Readable* or [*Readable*]: Detector object or list of detector objects (each is a Device or @@ -383,8 +416,10 @@ def lineup2( x_at_min_y x location of y minimum ========== ==================== - Statistical analysis provided by *PySumReg*. [#pysumreg]_ + Statistical analysis provided by *numpy*. [#numpy]_ + (Previous versions used *PySumReg*. [#pysumreg]_) + .. [#numpy] https://numpy.org/ .. [#pysumreg] https://prjemian.github.io/pysumreg/latest/ nscans *int*: @@ -394,15 +429,37 @@ def lineup2( signal_stats *object*: Caller could provide an object of :class:`~apstools.callbacks.scan_signal_statistics.SignalStatsCallback`. - If ``None``, this function will search for the ``signal_stats`` signal - in the global namespace. If not still found, a new one will be created - for the brief lifetime of this function. + + When ``signal_stats`` is not provided, this function will search for the + ``signal_stats`` signal in the global (``__main__``) namespace. If not + still found, a new ``signal_stats`` will be created and then added to + the global namespace. + + Results of the analysis are available in ``signal_stats.analysis``. The + data used for the analysis are available in ``signal_stats._data``. The + PySumReg analysis remains available (for now) + in ``signal_stats._registers``. md *dict*: User-supplied metadata for this scan. + + ---- + + In addition to the many keys provided in `signal_stats.analysis` by + :class:`~apstools.callbacks.scan_signal_statistics.SignalStatsCallback`, + this plan adds two keys as follows: + + .. rubric:: Keys added to signal_stats.analysis dictionary + + success : bool + Did ``lineup2()`` identify a peak and set the mover to its value? + + reason : [str] + If ``success=False``, this is a list of the reasons why ``lineup2()`` + did not identify a peak. """ from ..callbacks import SignalStatsCallback - from ..callbacks import factor_fwhm + from .. import utils if not isinstance(detectors, (tuple, list)): detectors = [detectors] @@ -410,13 +467,28 @@ def lineup2( _md = dict(purpose="alignment") _md.update(md or {}) + # Do not move the positioner outside the limits of the first scan. + try: + m_pos = mover.position + except AttributeError: + m_pos = mover.get() + m_lo = m_pos + rel_start + m_hi = m_pos + rel_end + if signal_stats is None: - signal_stats = utils.ipython_shell_namespace().get("signal_stats") - if signal_stats is None: + # Print report() when stop document is received. + if "signal_stats" in dir(MAIN): + # get from __main__ namespace + signal_stats = getattr(MAIN, "signal_stats") + else: signal_stats = SignalStatsCallback() - signal_stats.stop_report = True + # Add signal_stats to __main__ namespace. Users have requested + # a way to determine if a lineup failed and why. + setattr(MAIN, "signal_stats", signal_stats) + signal_stats.reporting = True if reporting is None else reporting else: - signal_stats.stop_report = False # Turn this automation off. + # Do not print report() when stop document is received. + signal_stats.reporting = False if reporting is None else reporting # Allow for feature to be defined using a name from PeakStats. xref_PeakStats = { @@ -428,37 +500,66 @@ def lineup2( # translate from PeakStats feature to SignalStats feature = xref_PeakStats.get(feature, feature) - def get_x_by_feature(): - """Return the X value of the specified ``feature``.""" - stats = principal_signal_stats() - if strong_peak(stats) and not too_wide(stats): + def find_peak_position(): + """Return the X value of the specified 'feature'.""" + stats = signal_stats.analysis + logging.debug("stats: %s", stats) + peak_is_strong = strong_peak(stats) + peak_too_wide = too_wide(stats) + peak_width_zero = stats.fwhm == 0 + peak_sigma_zero = stats.get("sigma", 0) == 0 + findings = [ + peak_is_strong, + not peak_too_wide, + not peak_width_zero, + not peak_sigma_zero, + ] + if all(findings): + stats["success"] = True return getattr(stats, feature) - def principal_signal_stats() -> str: - """Return the name of the first detector Signal.""" - return signal_stats._registers[signal_stats._y_names[0]] + # Save these into stats for the caller to find. + stats["success"] = False + stats["reasons"] = [] + if not peak_is_strong: + stats["reasons"].append("No strong peak found.") + if peak_too_wide: + stats["reasons"].append("Peak is too wide.") + if peak_width_zero: + stats["reasons"].append("FWHM is zero.") + if peak_sigma_zero: + stats["reasons"].append("Computed peak sigma is zero.") + print(" ".join(stats["reasons"])) + logger.debug("No peak found. Reasons: %s", stats["reasons"]) + logger.debug("stats: %s", stats) def strong_peak(stats) -> bool: """Determine if the peak is strong.""" try: - value = (stats.max_y - stats.min_y) / stats.stddev_y - return abs(value) > peak_factor - except ZeroDivisionError: # not enough samples - try: - value = stats.max_y / stats.min_y - return abs(value) > peak_factor - except ZeroDivisionError: - return False + denominator = stats.mean_y - stats.min_y + if denominator == 0: + raise ZeroDivisionError() + return abs((stats.max_y - stats.min_y) / denominator) > peak_factor + except (AttributeError, ZeroDivisionError): + return False def too_wide(stats): """Does the measured peak width fill the full range of X?""" + fallback_errors = ( + AttributeError, # no statistics available + ValueError, # math domain error: variance<0 (noise, no clear peak) + ZeroDivisionError, # not enough samples + ) try: - x_range = stats.max_x - stats.min_x - fwhm = stats.sigma * factor_fwhm - return fwhm > width_factor * x_range - except ZeroDivisionError: # not enough samples + return stats.fwhm > width_factor * (stats.max_x - stats.min_x) + except fallback_errors as reason: + logger.warning("Cannot detect width: %s", reason) + logger.debug("Statistics: %s", stats) return True + if "plan_name" not in _md: + _md["plan_name"] = "lineup2" + @bpp.subs_decorator(signal_stats.receiver) def _inner(): """Run the scan, collecting statistics at each step.""" @@ -469,17 +570,26 @@ def _inner(): yield from _inner() # Run the scan. nscans -= 1 - target = get_x_by_feature() + target = find_peak_position() if target is None: nscans = 0 # Nothing found, no point scanning again. + yield from bps.mv(mover, m_pos) # back to starting position + logger.debug("Moved %s to %s: %f", mover.name, "original position", m_pos) else: + # Maintain absolute range: m_lo <= target <= m_hi + target = min(max(m_lo, target), m_hi) yield from bps.mv(mover, target) # Move to the feature position. logger.info("Moved %s to %s: %f", mover.name, feature, target) if nscans > 0: # move the end points for the next scan - rel_end = principal_signal_stats().sigma * factor_fwhm - rel_start = -rel_end + offset = 2 * signal_stats.analysis.fwhm + # Can't move outside absolute range of m_lo..m_hi + rel_start = max(m_lo - target, -offset) + rel_end = min(m_hi - target, offset) + + if signal_stats.reporting: # Final report + signal_stats.report() class TuneAxis(object): diff --git a/apstools/plans/tests/test_alignment.py b/apstools/plans/tests/test_alignment.py index 5354821c..385a1a04 100644 --- a/apstools/plans/tests/test_alignment.py +++ b/apstools/plans/tests/test_alignment.py @@ -43,6 +43,12 @@ scaler1.select_channels() + +# TODO: Refactor 'pvoigt' as Python-only noisy pseudo-voigt signal, +# a la 'ophyd.sim.noisy_det' to speed up testing of alignment plans. +# Use 'ophyd.sim.motor' as its positioner. +# Currently, the 14 tests of 'lineup2()' take ~1m 40s. + # First, must be connected to m1. pvoigt = SynPseudoVoigt(name="pvoigt", motor=axis, motor_field=axis.name) pvoigt.kind = "hinted" @@ -342,14 +348,14 @@ class ImitatorForTesting(Device): "TestParameters", "peak base noise center sigma xlo xhi npts nscans tol outcome" ) parms_signal_but_high_background = _TestParameters(1e5, 1e6, 10, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.05, False) -parms_model_peak = _TestParameters(1e5, 0, 10, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.05, True) +parms_model_peak = _TestParameters(1e5, 0, 10, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.06, True) # parms_high_background_poor_resolution = _TestParameters(1e5, 1e4, 10, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.1, True) -parms_not_much_better = _TestParameters(1e5, 1e4, 10, 0.1, 0.2, -0.7, 0.5, 11, 2, 0.05, True) +parms_not_much_better = _TestParameters(1e5, 1e4, 10, 0.1, 0.2, -0.7, 0.5, 11, 2, 0.08, True) parms_neg_peak_1x_base = _TestParameters(-1e5, -1e4, 1e-8, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.1, True) -parms_neg_base = _TestParameters(1e5, -10, 10, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.05, True) -parms_small_signal_zero_base = _TestParameters(1e-5, 0, 1e-8, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.05, True) -parms_neg_small_signal_zero_base = _TestParameters(-1e5, 0, 1e-8, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.05, True) -parms_small_signal_finite_base = _TestParameters(1e-5, 1e-7, 1e-8, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.05, True) +parms_neg_base = _TestParameters(1e5, -10, 10, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.06, True) # +parms_small_signal_zero_base = _TestParameters(1e-5, 0, 1e-8, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.06, True) # +parms_neg_small_signal_zero_base = _TestParameters(-1e5, 0, 1e-8, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.06, True) +parms_small_signal_finite_base = _TestParameters(1e-5, 1e-7, 1e-8, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.06, True) # parms_no_signal_only_noise = _TestParameters(0, 0, 1e-8, 0.1, 0.2, -1.0, 0.5, 11, 1, 0.05, False) parms_bkg_plus_noise = _TestParameters(0, 1, 0.1, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.05, False) parms_bkg_plus_big_noise = _TestParameters(0, 1, 100, 0.1, 0.2, -0.7, 0.5, 11, 1, 0.05, False) @@ -363,11 +369,11 @@ class ImitatorForTesting(Device): parms_signal_but_high_background, parms_model_peak, parms_high_background_poor_resolution, - parms_not_much_better, + parms_not_much_better, # TODO: sigma parms_neg_peak_1x_base, parms_neg_base, parms_small_signal_zero_base, - parms_neg_small_signal_zero_base, + parms_neg_small_signal_zero_base, # TODO: sigma parms_small_signal_finite_base, parms_no_signal_only_noise, parms_bkg_plus_noise, @@ -404,7 +410,7 @@ def test_lineup2_signal_permutations(parms: _TestParameters): change_noisy_parameters() try: - centroid = stats._registers[detector.name].centroid + centroid = stats.analysis.centroid if parms.outcome: assert math.isclose(m1.position, centroid, abs_tol=parms.tol) assert math.isclose(parms.center, centroid, abs_tol=parms.tol) diff --git a/apstools/utils/__init__.py b/apstools/utils/__init__.py index 134249f9..39f38ef9 100644 --- a/apstools/utils/__init__.py +++ b/apstools/utils/__init__.py @@ -75,6 +75,7 @@ from .misc import to_unicode_or_bust from .misc import trim_string_for_EPICS from .misc import unix +from .mmap_dict import MMap from .override_parameters import OverrideParameters from .plot import plotxy from .plot import select_live_plot @@ -91,6 +92,10 @@ from .spreadsheet import ExcelDatabaseFileBase from .spreadsheet import ExcelDatabaseFileGeneric from .spreadsheet import ExcelReadError +from .statistics import array_index +from .statistics import xy_statistics +from .statistics import factor_fwhm +from .statistics import peak_full_width from .time_constants import DAY from .time_constants import HOUR from .time_constants import MINUTE diff --git a/apstools/utils/mmap_dict.py b/apstools/utils/mmap_dict.py new file mode 100644 index 00000000..82614079 --- /dev/null +++ b/apstools/utils/mmap_dict.py @@ -0,0 +1,98 @@ +""" +MMap +==== + +A dictionary where the keys are also accessible as attributes. +""" + +from collections.abc import MutableMapping + + +class MMap(MutableMapping): + """ + Dictionary with keys accessible as attributes (read-only). + + All keys are accessible for read and write by dictionary-style references + ('mmap["key"]' or 'mmap.get(key)'). + + Since dictionary keys are accessible as attributes, the names of existing + attributes (such as 'clear', 'items', keys', values', ...) are reserved and + cannot be used as dictionary keys. + + EXAMPLE:: + + >>> mmap = MMap(a=1, b=2) + >>> mmap + MMap(a=1, b=2) + >>> mmap["a] + 1 + >>> mmap["a"] = 2 + >>> mmap.a + 2 + >>> mmap.a = 3 + AttributeError: Unknown: attribute='a' + >>> mmap + MMap(a=2, b=2) + >>> mmap["clear"] = 5 + KeyError: "key='clear' is a reserved name." + + You can retrieve the value of a key by any of these dictionary methods:: + + mmap["a"] + mmap.get("a") + # Or supply a default if the key does not exist. + # Here, the default is 1.2345 + mmap.get("a", 1.2345) + + and by any attribute method:: + + mmap.a + getattr(mmap, "a") + # Or supply a default if the key does not exist. + getattr(mmap, "a", 1.2345) + """ + + _db = {} + attribute_names = dir(dict) + "_db".split() + + def __init__(self, **kwargs): + self.clear() + self.update(**kwargs) + + def clear(self): + self._db = {} + + def __delitem__(self, key): + del self._db[key] + + def __dir__(self): + all = dir(super()) + all += "_db clear update keys items values get pop popitem".split() + all += list(self) + return sorted(set(all)) + + def __getattr__(self, key): + return self._db[key] + + def __getitem__(self, key): + return self._db[key] + + def __iter__(self): + yield from self._db + + def __len__(self): + return len(self._db) + + def __repr__(self): + d = ", ".join([f"{k}={v!r}" for k, v in self._db.items()]) + return f"{self.__class__.__name__}({d})" + + def __setattr__(self, attribute, value): + if attribute not in self.attribute_names: + raise AttributeError(f"Unknown: {attribute=!r}") + super().__setattr__(attribute, value) + + def __setitem__(self, key, value): + if key in self.attribute_names: + raise KeyError(f"{key=!r} is a reserved name.") + self._db[key] = value diff --git a/apstools/utils/statistics.py b/apstools/utils/statistics.py new file mode 100644 index 00000000..c4858b84 --- /dev/null +++ b/apstools/utils/statistics.py @@ -0,0 +1,258 @@ +""" +Statistics +==================================================== + +.. autosummary:: + + ~array_index + ~factor_fwhm + ~peak_full_width + ~xy_statistics + +""" + +import logging +import math + +import numpy as np + +from .mmap_dict import MMap + +logger = logging.getLogger(__name__) +logger.info(__file__) + +factor_fwhm = 2 * math.sqrt(2 * math.log(2)) +r""" +FWHM :math:`=2\sqrt{2\ln{2}}\cdot\sigma_c` + +Conversion factor between full-width at half-maximum peak value and the computed +standard deviation, :math:`\sigma_c`, of the center of mass (centroid), +:math:`\mu`, of the peak. + +.. seealso:: https://statproofbook.github.io/P/norm-fwhm.html +""" + + +def array_index(arr, value) -> int: + """Return the index for the first occurence of value in arr.""" + return np.where(arr == value)[0][0] + + +def peak_full_width(x, y, positive=True) -> float: + """ + Assess *apparent* FWHM by inspecting the data. + + 1. Set a goal of half-way between min and max y. + + 2. Start at the index of the max (min if positive=False) y. + + 3. Find the first indices on low and high side of starting position where + the goal is passed. Limited by the range of data available. + + 4. For each side, use linear interpolation to compute the x value at the + goal value. + + 5. Compute FWHM as the positive difference between the two sides. + + """ + # Quick, easy decision first: + if y.min() == y.max(): + return x.max() - x.min() + + goal = (y.max() + y.min()) / 2 # half-max + logger.debug("searching for apparent peak width: goal=%f", goal) + + value = y.max() if positive else y.min() + i_lo = array_index(y, value) + i_hi = i_lo + + if positive: + while i_lo > 0 and y[i_lo] > goal: + logger.debug(f"{i_lo=} {i_hi=} {len(y)=}") + i_lo -= 1 + while i_hi + 1 < len(y) and y[i_hi] > goal: + i_hi += 1 + logger.debug(f"{i_lo=} {i_hi=} {len(y)=}") + else: + while i_lo > 0 and y[i_lo] < goal: + logger.debug(f"{i_lo=} {i_hi=} {len(y)=}") + i_lo -= 1 + while i_hi + 1 < len(y) and y[i_hi] < goal: + i_hi += 1 + logger.debug(f"{i_lo=} {i_hi=} {len(y)=}") + + def interpolate_2_point(value, lo, hi): + """Could fail if y2 == y1: ZeroDivisionError.""" + lo, hi = sorted([lo, hi]) # take no chances + x1, x2 = x[lo], x[hi] + y1, y2 = y[lo], y[hi] + return (value - y1) * (x2 - x1) / (y2 - y1) + x1 + + x_hi = interpolate_2_point(goal, i_hi - 1, i_hi) + x_lo = interpolate_2_point(goal, i_lo, i_lo + 1) + fwhm = abs(x_hi - x_lo) # absolute value, take no chances + logger.debug("Found apparent FWHM: fwhm=%f", fwhm) + return fwhm + + +def xy_statistics(x: list, y: list = None) -> MMap: + r""" + Compute statistical measures of a 1-D array (or weighted array), using *numpy*. + + Results are returned in :class:`~apstools.utils.mmap_dict.MMap` dictionary. + When suppplied, :math:`\vec{y}` will be used as the weights of :math:`\vec{x}`. + Uses *numpy* [#numpy]_ as an alternative to *PySumReg*. [#PySumReg]_ + + .. [#numpy] https://numpy.org/ + .. [#PySumReg] https://prjemian.github.io/pysumreg/index.html + + PARAMETERS + + x : list | numpy.ndarray + :math:`\vec{x}`: 1-D array of numbers. + y : list | numpy.ndarray + :math:`\vec{y}`: (optional) 1-D array of numbers. + :math:`\vec{x}` and :math:`\vec{y}` must be of the same length. + Used as weights for :math:`\vec{x}` in fitting analyses. + + .. rubric:: MMap Dictionary Keys + + These keys are always defined. + + ============== ====== ============================================== + key type description + ============== ====== ============================================== + ``max_x`` float Maximum value of :math:`\vec{x}`. + ``mean_x`` float :math:`\bar{x}`: Mean value of :math:`\vec{x}`. + ``median_x`` float Median value of :math:`\vec{x}`. + ``min_x`` float Minmum value of :math:`\vec{x}`. + ``n`` int :math:`n`: Length of :math:`\vec{x}`. + ``range_x`` float Difference between maximum and minimum values of :math:`\vec{x}`. + ``stddev_x`` float :math:`\sigma_x` : Standard deviation of :math:`\vec{x}`. + ============== ====== ============================================== + + These keys are added when :math:`\vec{y}` is provided. + Requires :math:`\vec{x}` & :math:`\vec{y}` to be the same length. + + ============== ====== ============================================== + key type description + ============== ====== ============================================== + ``centroid`` float :math:`\mu` : Average of :math:`\vec{x}`, weighted by :math:`\vec{y}`. [#centroid]_ + ``fwhm`` float Apparent width of the peak at half the maximum value of :math:`\vec{y}`. [#fwhm]_ + ``max_y`` float Maximum value of :math:`\vec{y}`. + ``mean_y`` float :math:`\bar{y}`: Mean value of :math:`\vec{y}`. + ``median_y`` float Median value of :math:`\vec{y}`. + ``min_y`` float Minmum value of :math:`\vec{y}`. + ``range_y`` float Difference between maximum and minimum values of :math:`\vec{y}`. + ``stddev_y`` float :math:`\sigma_y` : Standard deviation of :math:`\vec{y}`. + ============== ====== ============================================== + + .. [#centroid] centroid: :math:`\mu = {{\sum_i^n{x_i \cdot y_i}} / {\sum_i^n{y_i}}}` + + In the special case when all of :math:`\vec{y}` is zero, :math:`\mu` is + set to halfway between ``min_x`` and ``max_x``. + + .. seealso:: https://en.wikipedia.org/wiki/Weighted_arithmetic_mean + + .. [#fwhm] FWHM: In the special case when all of :math:`\vec{y}` is zero, ``fwhm`` is set + to the positive difference of ``max_x`` & ``min_x``. + + These keys are added when :math:`\vec{y}` is not constant. + + ============== ====== ============================================== + key type description + ============== ====== ============================================== + ``sigma`` float :math:`\sigma_c` : Standard error of :math:`\mu`, a statistical measure of the peak width. See ``variance``. [#variance]_ + ``variance`` float :math:`\sigma_c^2` [#variance]_ + ``x_at_max_y`` float ``x`` value at maximum of :math:`\vec{y}` array. For positive-going peaks, this is the "peak position". + ``x_at_min_y`` float ``x`` value at minimum of :math:`\vec{y}` array. For negative-going peaks, this is the "peak position". + ============== ====== ============================================== + + .. [#variance] variance: :math:`\sigma_c^2 = {{\sum_i^n{(x_i - \mu)^2 \cdot y_i}} / {\sum_i^n{y_i}}}` + + .. seealso:: https://en.wikipedia.org/wiki/Weighted_arithmetic_mean + + These keys are added when a fit of :math:`\vec{y}` *vs.* :math:`\vec{x}` to + a first-order polynomial (straight line) and its covariance matrix can be + computed. + + ====================== ====== ============================================== + key type description + ====================== ====== ============================================== + ``correlation`` float Correlation coefficient :math:`r`. Measure of the goodness of fit. + ``intercept`` float Computed y-axis intercept when fitting (x,y) to a straight line. + ``slope`` float Computed y-axis slope when fitting (x,y) to a straight line. + ``stddev_intercept`` float Standard deviation of intercept, from covariance matrix when fitting (x,y) to a straight line. + ``stddev_slope`` float Standard deviation of slope, from covariance matrix when fitting (x,y) to a straight line. + ====================== ====== ============================================== + """ + + x = np.array(x) + if len(x) == 0: + raise ValueError("Array x cannot be empty.") + + results = MMap( + max_x=x.max(), + mean_x=x.mean(), + median_x=(x.max() + x.min()) / 2, + min_x=x.min(), + n=len(x), + range_x=x.max() - x.min(), + stddev_x=x.std(), + ) + if y is not None: + y = np.array(y) + if len(x) != len(y): + raise ValueError(f"Unequal shapes: {x.shape=} {y.shape=}") + + results.update( + { + "max_y": y.max(), + "mean_y": y.mean(), + "median_y": (y.max() + y.min()) / 2, + "min_y": y.min(), + "range_y": y.max() - y.min(), + "stddev_y": y.std(), + } + ) + + if y.min() != y.max(): + results.update( + { + "x_at_min_y": x[array_index(y, y.min())], + "x_at_max_y": x[array_index(y, y.max())], + } + ) + # Analyze with numpy + # https://stackoverflow.com/a/2415343/1046449 + center_of_mass = results["centroid"] = np.average(x, weights=y) + results["fwhm"] = peak_full_width(x, y) + try: + results["variance"] = max( + 0.0, + np.average((x - center_of_mass) ** 2, weights=y), + ) + results["sigma"] = math.sqrt(results["variance"]) + except ValueError as reason: + logger.warning("Cannot compute variance: %s", reason) + results["variance"] = 0.0 # define anyway + results["sigma"] = 0.0 + + try: + results["correlation"] = np.corrcoef(x, y)[0, 1] + fit = np.polyfit(x, y, 1, cov=True) + results.update( + { + "intercept": fit[0][1], + "slope": fit[0][0], + "stddev_intercept": math.sqrt(fit[1][1, 1]), + "stddev_slope": math.sqrt(fit[1][0, 0]), + } + ) + except (TypeError, ValueError) as reason: + logger.warning("Could not compute covariance matrix: %s", reason) + else: + results["centroid"] = (x.max() + x.min()) / 2 + results["fwhm"] = abs(x.max() - x.min()) + + return results diff --git a/apstools/utils/tests/data/ok_x_y_133a3ade.txt b/apstools/utils/tests/data/ok_x_y_133a3ade.txt new file mode 100644 index 00000000..097c0926 --- /dev/null +++ b/apstools/utils/tests/data/ok_x_y_133a3ade.txt @@ -0,0 +1,40 @@ +# OK for analysis + +# statistics: +# min_x = -1.094 +# max_x = -0.578 +# min_y = 5789.51396 +# max_y = 97749.5072 +# n = 25 +# x_at_max_y = -0.836 +# variance = 0.008800005546333502 +# sigma = 0.09380834475852083 +# centroid = -0.8331296608818577 +# fwhm = 0.11994229792652611 + +# m1 noisy +-1.09400 5789.51396 +-1.07200 6580.25778 +-1.05100 7769.37907 +-1.02900 9524.12409 +-1.00800 11694.60348 +-0.98600 14404.19018 +-0.96500 18605.03479 +-0.94300 25044.28887 +-0.92200 33415.60643 +-0.90000 45046.39182 +-0.87900 65589.41382 +-0.85700 86291.56438 +-0.83600 97749.50720 +-0.81500 91629.65601 +-0.79300 70859.98061 +-0.77200 50627.81115 +-0.75000 36605.22399 +-0.72900 27001.60984 +-0.70700 20043.81892 +-0.68600 16555.94974 +-0.66400 12664.57000 +-0.64300 10046.32073 +-0.62100 8639.83884 +-0.60000 6957.57240 +-0.57800 5872.36240 diff --git a/apstools/utils/tests/data/ok_x_y_constant_y_positive.txt b/apstools/utils/tests/data/ok_x_y_constant_y_positive.txt new file mode 100644 index 00000000..c6bf3ffe --- /dev/null +++ b/apstools/utils/tests/data/ok_x_y_constant_y_positive.txt @@ -0,0 +1,22 @@ +# OK for analysis +# +# statistics: +# centroid = 4.0 +# max_x = 7.0 +# max_y = 50.0 +# median_x = 4.0 +# median_y = 50.0 +# min_x = 1.0 +# min_y = 50.0 +# range_x = 6.0 +# range_y = 0.0 +# n = 7 +# +# x y +1 50 +2 50 +3 50 +4 50 +5 50 +6 50 +7 50 diff --git a/apstools/utils/tests/data/ok_x_y_constant_y_zero.txt b/apstools/utils/tests/data/ok_x_y_constant_y_zero.txt new file mode 100644 index 00000000..fa4ae023 --- /dev/null +++ b/apstools/utils/tests/data/ok_x_y_constant_y_zero.txt @@ -0,0 +1,18 @@ +# OK for analysis +# +# statistics: +# min_x = 1.0 +# max_x = 7.0 +# min_y = 0.0 +# max_y = 0.0 +# n = 7 +# centroid = 4.0 +# +# x y +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 diff --git a/apstools/utils/tests/data/ok_x_y_nx_simple_example.txt b/apstools/utils/tests/data/ok_x_y_nx_simple_example.txt new file mode 100644 index 00000000..0a07d3a7 --- /dev/null +++ b/apstools/utils/tests/data/ok_x_y_nx_simple_example.txt @@ -0,0 +1,47 @@ +# OK for analysis +# https://manual.nexusformat.org/examples/python/simple_example_write1/index.html + +# statistics: +# min_x = 17.92108 +# max_x = 17.92608 +# n = 31 +# min_y = 1037.0 +# max_y = 66863.0 +# x_at_max_y = 17.92391 +# variance = 8.23998241675532e-07 +# sigma = 0.0009077434889193819 +# centroid = 17.923494708234358 +# fwhm = 0.002733643326141788 + +# mr I0 +17.92608 1037 +17.92591 1318 +17.92575 1704 +17.92558 2857 +17.92541 4516 +17.92525 9998 +17.92508 23819 +17.92491 31662 +17.92475 40458 +17.92458 49087 +17.92441 56514 +17.92425 63499 +17.92408 66802 +17.92391 66863 +17.92375 66599 +17.92358 66206 +17.92341 65747 +17.92325 65250 +17.92308 64129 +17.92291 63044 +17.92275 60796 +17.92258 56795 +17.92241 51550 +17.92225 43710 +17.92208 29315 +17.92191 19782 +17.92175 12992 +17.92158 6622 +17.92141 4198 +17.92125 2248 +17.92108 1321 \ No newline at end of file diff --git a/apstools/utils/tests/data/ok_x_y_random.txt b/apstools/utils/tests/data/ok_x_y_random.txt new file mode 100644 index 00000000..65ba4e38 --- /dev/null +++ b/apstools/utils/tests/data/ok_x_y_random.txt @@ -0,0 +1,32 @@ +# OK for analysis + +# statistics: +# min_x = 0.0 +# max_x = 10.0 +# n = 11 +# min_y = 0.34153794329810716 +# max_y = 0.9110216387675163 +# x_at_min_y = 10.0 +# x_at_max_y = 4.0 +# correlation = -0.5836081093911799 +# intercept = 0.7436723856270571 +# stddev_intercept = 0.0982096730225767 +# slope = -0.035792153196186824 +# stddev_slope = 0.016600464602105423 +# variance = 9.28017340846042 +# sigma = 3.046337704270559 +# centroid = 4.366187059890569 +# fwhm = 1.9580094301681137 + +# x y +0 0.7480630412520091 +1 0.7378432488584941 +2 0.36913960247604516 +3 0.802821460688143 +4 0.9110216387675163 +5 0.3941859145327916 +6 0.4354494737090061 +7 0.6147174226935719 +8 0.422131597227946 +9 0.43491647260372146 +10 0.34153794329810716 \ No newline at end of file diff --git a/apstools/utils/tests/data/ok_x_y_single_nonzero_y.txt b/apstools/utils/tests/data/ok_x_y_single_nonzero_y.txt new file mode 100644 index 00000000..4594d3b9 --- /dev/null +++ b/apstools/utils/tests/data/ok_x_y_single_nonzero_y.txt @@ -0,0 +1,26 @@ +# OK for analysis +# +# statistics: +# min_x = 1.0 +# max_x = 11.0 +# min_y = 0.0 +# max_y = 50.0 +# n = 11 +# x_at_max_y = 8.0 +# centroid = 8.0 +# fwhm = 1.0 +# sigma = 0.0 +# variance = 0.0 +# +# x y +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 0 +8 50 +9 0 +10 0 +11 0 \ No newline at end of file diff --git a/apstools/utils/tests/data/ok_x_y_straight_line.txt b/apstools/utils/tests/data/ok_x_y_straight_line.txt new file mode 100644 index 00000000..eb4bb344 --- /dev/null +++ b/apstools/utils/tests/data/ok_x_y_straight_line.txt @@ -0,0 +1,13 @@ +# OK for analysis +# +# statistics: +# slope= 1.0000000000000004 +# intercept = 2.0000000000000004 +# correlation = 1.0 +# stddev_intercept = 0.0 +# stddev_slope = 0.0 +# +# x y +0 2 +1 3 +2 4 diff --git a/apstools/utils/tests/data/ok_x_y_two_nonzero_y.txt b/apstools/utils/tests/data/ok_x_y_two_nonzero_y.txt new file mode 100644 index 00000000..40f0442b --- /dev/null +++ b/apstools/utils/tests/data/ok_x_y_two_nonzero_y.txt @@ -0,0 +1,26 @@ +# OK for analysis +# +# statistics: +# min_x = 1.0 +# max_x = 11.0 +# min_y = 0.0 +# max_y = 50.0 +# n = 11 +# x_at_max_y = 7.0 +# centroid = 7.5 +# fwhm = 2.0 +# sigma = 0.5 +# variance = 0.25 +# +# x y +1 0 +2 0 +3 0 +4 0 +5 0 +6 0 +7 50 +8 50 +9 0 +10 0 +11 0 \ No newline at end of file diff --git a/apstools/utils/tests/test_aps_data_management.py b/apstools/utils/tests/test_aps_data_management.py index 436e875d..6adfa8d0 100644 --- a/apstools/utils/tests/test_aps_data_management.py +++ b/apstools/utils/tests/test_aps_data_management.py @@ -2,14 +2,15 @@ Test the APS Data Management utility functions. """ -import pytest import pathlib import tempfile import uuid +import pytest + +from .. import aps_data_management as adm from .. import dm_setup from .. import dm_source_environ -from .. import aps_data_management as adm @pytest.fixture() diff --git a/apstools/utils/tests/test_connect_pvlist.py b/apstools/utils/tests/test_connect_pvlist.py index 8db5c49d..7e7cbeaf 100644 --- a/apstools/utils/tests/test_connect_pvlist.py +++ b/apstools/utils/tests/test_connect_pvlist.py @@ -1,8 +1,8 @@ import pytest -from ..misc import connect_pvlist from ...tests import IOC_AD from ...tests import IOC_GP +from ..misc import connect_pvlist @pytest.mark.parametrize( diff --git a/apstools/utils/tests/test_mmap_dict.py b/apstools/utils/tests/test_mmap_dict.py new file mode 100644 index 00000000..c06c2287 --- /dev/null +++ b/apstools/utils/tests/test_mmap_dict.py @@ -0,0 +1,53 @@ +"""Test the utils.mmap_dict module.""" + +import pytest + +from ..mmap_dict import MMap + + +def test_MMap(): + """Test basic operations. Comments indicate method tested.""" + mmap = MMap() + assert mmap is not None # __init__ + assert len(mmap) == 0 # __len__ + assert "_db" in dir(mmap) # __dir__ + + mmap.update(a=1, b=2) + assert len(mmap) == 2 # __len__ + + mmap.clear() + assert len(mmap) == 0 # __len__ + + mmap = MMap(a=1, b=2, c="three") + assert len(mmap) == 3 # __len__ + assert "a" in mmap # __dict__ + assert "a" in dir(mmap) # __dir__ + assert "a" in list(mmap) # __dict__ + assert hasattr(mmap, "a") # __getattr__ + assert "MMap(a=1, b=2, c='three')" == repr(mmap) # __repr__ + assert "MMap(a=1, b=2, c='three')" == str(mmap) # __str__ + + mmap.clear() + assert len(mmap) == 0 + + mmap = MMap(a=1, b=2) + assert mmap["a"] == 1 # __getitem__ + assert mmap.get("a") == 1 # __getitem__ + assert mmap.get("a", "default") == 1 # __getitem__ + assert mmap.get("unknown", "default") == "default" # __getitem__ + mmap["a"] = "three" # __setitem__ + assert mmap.a == "three" # __getattr__ + assert mmap["a"] == "three" # __getitem__ + + for key in mmap: # __iter__ + assert key in mmap._db, f"{key=!r}: {mmap._db=!r}" # __dict__ + + # Some keys conflict with known attributes, such as method names. + with pytest.raises(KeyError) as reason: + mmap["pop"] = "weasel" # __setitem__ + assert "key='pop' is a reserved name." in str(reason) + + # No write access to dictionary keys as attributes. + with pytest.raises(AttributeError) as reason: + mmap.some_new_attribute = "new key" + assert "Unknown: attribute='some_new_attribute'" in str(reason) diff --git a/apstools/utils/tests/test_statistics.py b/apstools/utils/tests/test_statistics.py new file mode 100644 index 00000000..84c35955 --- /dev/null +++ b/apstools/utils/tests/test_statistics.py @@ -0,0 +1,93 @@ +"""Test the utils.statistics module.""" + +import pathlib +from contextlib import nullcontext as does_not_raise + +import pytest + +from ..statistics import xy_statistics + +DATA_PATH = pathlib.Path(__file__).parent / "data" + + +def get_x_y_data(path): + """ + Read ordered pairs from file. + + The header of the file (in comments) can contain key=value comments which + represent keys in the result from 'xy_statistics()'. Each such key found + will be tested. + + EXAMPLE:: + + # min_x = 1.0 + # max_x = 11.0 + # min_y = 0.0 + # max_y = 50.0 + # n = 11 + # x_at_max_y = 7.0 + # centroid = 7.5 + # fwhm = 1.1774100225154747 + # sigma = 0.5 + # variance = 0.25 + + """ + buf = open(path).read() + advice, comments, data = {}, [], [] + for line in buf.strip().splitlines(): + if line.strip().startswith("#"): + comments.append(line) + if "=" in line: + key, value = line.strip().lstrip("#").split("=") + advice[key.strip()] = value.strip() + elif line.strip() == "": + continue + else: + data += list(map(float, line.split())) + + return { + "advice": advice, + "comments": comments, + "file": path.name, + "x": data[0::2], + "y": data[1::2], + } + + +def ok_sample_data(): + for fname in DATA_PATH.glob("ok_x_y_*.txt"): + if fname.is_file(): + data = get_x_y_data(fname) + if data is not None: + yield data + + +@pytest.mark.parametrize("data", ok_sample_data()) +def test_xy_statistics(data): + assert DATA_PATH.exists() + assert data is not None + assert isinstance(data, dict) + + stats = xy_statistics(data["x"], data["y"]) + assert stats is not None + unknown = object() + for key, expected in data["advice"].items(): + received = stats.get(key, unknown) + assert f"{received}" == expected, f"{key=} {expected=} {stats=} {data['file']=!r}" + + +@pytest.mark.parametrize( + "x, y, xcept, text", + [ + [[], None, ValueError, "cannot be empty"], + [[1, 2], [], ValueError, "Unequal shapes:"], + [[1, 2], [1, 2, 3], ValueError, "Unequal shapes:"], + [[1, 2], [1, 2], None, str(None)], + ], +) +def test_xy_statistics_errors(x, y, xcept, text): + """Test known exceptions raised by xy_statistics().""" + context = does_not_raise() if xcept is None else pytest.raises(xcept) + with context as reason: + xy_statistics(x, y) + assert text in str(reason) diff --git a/apstools/utils/time_constants.py b/apstools/utils/time_constants.py index 983a2999..3cc683df 100644 --- a/apstools/utils/time_constants.py +++ b/apstools/utils/time_constants.py @@ -1,4 +1,7 @@ """ +TIME CONSTANTS +============== + Define symbols used by other modules to define time (seconds). .. autosummary:: diff --git a/docs/source/api/_utils.rst b/docs/source/api/_utils.rst index d4063b4e..3f33d7e0 100644 --- a/docs/source/api/_utils.rst +++ b/docs/source/api/_utils.rst @@ -66,8 +66,8 @@ Reporting ~apstools.utils.log_utils.file_log_handler ~apstools.utils.log_utils.get_log_path ~apstools.utils.log_utils.setup_IPython_console_logging - ~apstools.utils.log_utils.stream_log_handler ~apstools.utils.slit_core.SlitGeometry + ~apstools.utils.log_utils.stream_log_handler .. _utils.other: @@ -76,19 +76,23 @@ Other Utilities .. autosummary:: - ~apstools.utils.aps_data_management.dm_setup - ~apstools.utils.apsu_controls_subnet.warn_if_not_aps_controls_subnet + ~apstools.utils.statistics.array_index ~apstools.utils.misc.cleanupText ~apstools.utils.misc.connect_pvlist + ~apstools.utils.aps_data_management.dm_setup ~apstools.utils.misc.dynamic_import ~apstools.utils.email.EmailNotifications + ~apstools.utils.statistics.factor_fwhm + ~apstools.utils.statistics.peak_full_width ~apstools.utils.plot.select_live_plot ~apstools.utils.plot.select_mpl_figure ~apstools.utils.plot.trim_plot_by_name ~apstools.utils.plot.trim_plot_lines ~apstools.utils.misc.trim_string_for_EPICS - ~apstools.utils.misc.unix ~apstools.utils.time_constants.ts2iso + ~apstools.utils.misc.unix + ~apstools.utils.apsu_controls_subnet.warn_if_not_aps_controls_subnet + ~apstools.utils.statistics.xy_statistics .. _utils.general: @@ -129,6 +133,7 @@ General ~apstools.utils.list_runs.listRunKeys ~apstools.utils.list_runs.ListRuns ~apstools.utils.list_runs.listruns + ~apstools.utils.mmap_dict.MMap ~apstools.utils.override_parameters.OverrideParameters ~apstools.utils.misc.pairwise ~apstools.utils.plot.plotxy @@ -190,6 +195,9 @@ Submodules .. automodule:: apstools.utils.misc :members: +.. automodule:: apstools.utils.mmap_dict + :members: + .. automodule:: apstools.utils.override_parameters :members: @@ -211,5 +219,8 @@ Submodules .. automodule:: apstools.utils.spreadsheet :members: +.. automodule:: apstools.utils.statistics + :members: + .. automodule:: apstools.utils.time_constants :members: