diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index ed21aff..11616cd 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -2,14 +2,18 @@ name: CI
on:
push:
- branches:
- - main
- tags:
- - "v*"
+ branches: [main]
+ tags: [v*]
pull_request:
workflow_dispatch:
jobs:
+ check-manifest:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - run: pipx run check-manifest
+
test:
name: ${{ matrix.platform }} (${{ matrix.python-version }})
runs-on: ${{ matrix.platform }}
@@ -19,14 +23,14 @@ jobs:
strategy:
fail-fast: false
matrix:
- python-version: [3.7, 3.8, 3.9, '3.10']
- platform: [ubuntu-latest]
+ python-version: [3.7, 3.8, 3.9, "3.10", "3.11"]
+ platform: [ubuntu-latest, windows-latest]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- uses: tlambert03/setup-qt-libs@v1
- - uses: conda-incubator/setup-miniconda@v2
+ - uses: conda-incubator/setup-miniconda@v3
with:
auto-update-conda: true
python-version: ${{ matrix.python-version }}
@@ -34,19 +38,23 @@ jobs:
channels: conda-forge,defaults
channel-priority: true
+ - name: Install OpenCL
+ if: matrix.platform == 'ubuntu-latest'
+ run: mamba install pyopencl pocl
+
- name: Install dependencies
run: |
- mamba install pyopencl pocl
- sudo apt-get install -y xvfb
- pip install -U pip wheel
- pip install -e .[tests]
+ python -m pip install -U pip
+ python -m pip install -e .[test,pyqt5]
- - name: Test with xvfb
- if: runner.os == 'Linux'
- run: xvfb-run --auto-servernum pytest --cov llspy
+ - name: Test
+ uses: aganders3/headless-gui@v2
+ with:
+ shell: bash -el {0}
+ run: python -m pytest --cov llspy
- name: Coverage
- uses: codecov/codecov-action@v2
+ uses: codecov/codecov-action@v3
deploy:
name: Deploy
@@ -55,7 +63,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v2
@@ -70,9 +78,7 @@ jobs:
python -m build .
- name: twine check
- run: |
- twine check dist/*
- ls -lh dist
+ run: twine check dist/*
- name: Build and publish
run: twine upload dist/*
diff --git a/.gitignore b/.gitignore
index 7f0d84c..48ee0fc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -119,4 +119,4 @@ tests/testdata/sample/MIPs/
tests/testdata/sample/sample_ProcessingLog.txt
tests/testdata/sample/GPUdecon/
-llspy/_version.py
+src/llspy/_version.py
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 8dfe5d7..3a9f2e8 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,38 +1,33 @@
+ci:
+ autoupdate_schedule: monthly
+ autofix_commit_msg: "style(pre-commit.ci): auto fixes [...]"
+ autoupdate_commit_msg: "ci(pre-commit.ci): autoupdate"
+
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
- rev: v4.3.0
+ rev: v4.5.0
hooks:
- id: check-docstring-first
- id: end-of-file-fixer
- id: trailing-whitespace
- - repo: https://github.com/asottile/setup-cfg-fmt
- rev: v1.20.1
+
+ - repo: https://github.com/abravalheri/validate-pyproject
+ rev: v0.16
hooks:
- - id: setup-cfg-fmt
- - repo: https://github.com/PyCQA/flake8
- rev: 4.0.1
+ - id: validate-pyproject
+
+ - repo: https://github.com/charliermarsh/ruff-pre-commit
+ rev: v0.3.4
hooks:
- - id: flake8
- additional_dependencies: [flake8-typing-imports==1.7.0]
- - repo: https://github.com/myint/autoflake
- rev: v1.4
- hooks:
- - id: autoflake
- args: ["--in-place", "--remove-all-unused-imports"]
- - repo: https://github.com/PyCQA/isort
- rev: 5.10.1
- hooks:
- - id: isort
- - repo: https://github.com/psf/black
- rev: 22.6.0
- hooks:
- - id: black
- - repo: https://github.com/asottile/pyupgrade
- rev: v2.37.1
- hooks:
- - id: pyupgrade
- args: [--py37-plus, --keep-runtime-typing]
+ - id: ruff
+ args: [--fix, --unsafe-fixes]
+ - id: ruff-format
+
# - repo: https://github.com/pre-commit/mirrors-mypy
- # rev: v0.812
+ # rev: v1.9.0
# hooks:
# - id: mypy
+ # files: "^src/"
+ # # # you have to add the things you want to type check against here
+ # # additional_dependencies:
+ # # - numpy
diff --git a/MANIFEST.in b/MANIFEST.in
index 8b5ab30..75f63bb 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,10 +1,10 @@
include README.rst
include CHANGELOG.rst
-recursive-include llspy *.py
-recursive-include llspy *.ui
-recursive-include llspy *.ini
-recursive-include llspy *.png
+recursive-include src/llspy *.py
+recursive-include src/llspy *.ui
+recursive-include src/llspy *.ini
+recursive-include src/llspy *.png
recursive-include img *
recursive-exclude tests *
diff --git a/README.rst b/README.rst
index 30aa79b..5e588c6 100755
--- a/README.rst
+++ b/README.rst
@@ -170,24 +170,15 @@ Requirements
Installation
============
-**Note**: *As of version 0.4.2 cudaDecon is now included in the LLSpy conda package and requires no additional steps for installation. Horray for open source!*
-
-
-#. Install `Anaconda `_ or `Miniconda `_
-#. Launch a ``terminal`` window (OS X, Linux), or ``Anaconda Prompt`` (Windows)
-#. Add the "conda-forge" and "talley" channels to your conda config
-
- .. code:: bash
-
- $ conda config --add channels conda-forge
- $ conda config --add channels talley
-
+#. Install `conda `_
+#. Launch a ``terminal`` window (Linux), or ``Anaconda Prompt`` (Windows)
#. Install LLSpy into a new conda environment
.. code:: bash
- $ conda create -n llsenv python=3.6 llspy
+ $ conda create -n llsenv python=3.11 cudadecon
$ conda activate llsenv
+ $ pip install llspy
The ``create -n llsenv`` line creates a virtual environment. This is optional, but recommended as it easier to uninstall cleanly and prevents conflicts with any other python environments. If installing into a virtual environment, you must source the environment before proceeding, and each time before using llspy.
diff --git a/pyproject.toml b/pyproject.toml
index 06204c3..a62d3a1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,93 @@ requires = ["setuptools>=45", "wheel", "setuptools_scm>=6.2"]
build-backend = "setuptools.build_meta"
[tool.setuptools_scm]
-write_to = "llspy/_version.py"
+write_to = "src/llspy/_version.py"
+
+# https://peps.python.org/pep-0621/
+[project]
+name = "llspy"
+dynamic = ["version"]
+description = "Lattice Light Sheet Processing Tools."
+readme = "README.rst"
+requires-python = ">=3.7"
+license = { text = "BSD-3-Clause" }
+authors = [{ name = "Talley Lambert", email = "talley.lambert@example.com" }]
+classifiers = [
+ "Development Status :: 3 - Alpha",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: BSD License",
+ "Natural Language :: English",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: POSIX :: Linux",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3 :: Only",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Topic :: Scientific/Engineering :: Visualization",
+]
+dependencies = [
+ "click ~=8.1",
+ "llspy-slm>=0.2.1",
+ "matplotlib",
+ "numpy",
+ "parse ~=1.20",
+ "qtpy >=2.4",
+ "scipy <=1.12",
+ "sentry-sdk ~=1.38",
+ "tifffile",
+ "voluptuous ~=0.14",
+ "watchdog ~=3.0",
+ "numba;python_version < '3.13'",
+ "importlib_metadata; python_version < '3.8'",
+]
+
+# https://peps.python.org/pep-0621/#dependencies-optional-dependencies
+[project.optional-dependencies]
+napari = ["napari"]
+pyqt5 = ["PyQt5"]
+pyside2 = ["PySide2"]
+spimagine = ["spimagine"]
+test = ["pytest", "pytest-cov", "pytest-qt"]
+dev = ["ipython", "mypy", "pdbpp", "pre-commit", "rich", "ruff"]
+
+[project.urls]
+homepage = "https://github.com/tlambert03/LLSpy"
+repository = "https://github.com/tlambert03/LLSpy"
+
+[project.scripts]
+lls = "llspy.bin.llspy_cli:cli"
+lls-gui = "llspy.bin.llspy_gui:main"
+
+[tool.setuptools.packages.find]
+where = ["src"]
+
+# https://docs.astral.sh/ruff
+[tool.ruff]
+line-length = 88
+target-version = "py37"
+src = ["src"]
+
+[tool.ruff.lint]
+# https://docs.astral.sh/ruff/rules
+select = [
+ "E", # style errors
+ "W", # style warnings
+ "F", # flakes
+ "I", # isort
+ "UP", # pyupgrade
+ "C4", # flake8-comprehensions
+ "A001", # flake8-builtins
+ "RUF", # ruff-specific rules
+ "TCH", # flake8-type-checking
+ "TID", # flake8-tidy-imports
+]
+ignore = [
+ "E501", # line length
+]
+exclude = ["docs/*"]
[tool.check-manifest]
-ignore = ["llspy/_version.py", ".pre-commit-config.yaml"]
+ignore = ["llspy/_version.py", ".pre-commit-config.yaml", "tests/*"]
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index d416edc..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,88 +0,0 @@
-[metadata]
-name = llspy
-description = Lattice Light Sheet Processing Tools
-long_description = file: README.rst
-long_description_content_type = text/x-rst
-url = https://github.com/tlambert03/LLSpy
-author = Talley Lambert
-author_email = talley.lambert@gmail.com
-license = BSD 3-clause
-license_file = LICENSE
-classifiers =
- Development Status :: 3 - Alpha
- Intended Audience :: Science/Research
- License :: OSI Approved :: BSD License
- Natural Language :: English
- Operating System :: Microsoft :: Windows
- Operating System :: POSIX :: Linux
- Programming Language :: Python
- Programming Language :: Python :: 3
- Programming Language :: Python :: 3 :: Only
- Programming Language :: Python :: 3.6
- Programming Language :: Python :: 3.7
- Programming Language :: Python :: 3.8
- Programming Language :: Python :: 3.9
- Programming Language :: Python :: 3.10
- Topic :: Scientific/Engineering
- Topic :: Scientific/Engineering :: Visualization
-
-[options]
-packages = find:
-install_requires =
- click
- llspy-slm>=0.2.1
- matplotlib
- numpy
- parse
- qtpy
- scipy
- sentry-sdk
- tifffile
- voluptuous
- watchdog
- numba;python_version < '3.10'
-python_requires = >=3.6
-include_package_data = True
-zip_safe = False
-
-[options.packages.find]
-exclude =
- tests
- docs
- pyinstaller
-
-[options.entry_points]
-console_scripts =
- lls = llspy.bin.llspy_cli:cli
- lls-gui = llspy.bin.llspy_gui:main
-
-[options.extras_require]
-napari =
- napari
-pyqt =
- PyQt5
-pyside =
- PySide2
-spimagine =
- spimagine
-tests =
- PyQt5
- pytest
- pytest-cov
- pytest-qt
-
-[options.package_data]
-* = *.ini, *.ui, *.png
-
-[bdist_wheel]
-universal = 1
-
-[flake8]
-exclude = docs,_version.py,.eggs,conf.py
-max-line-length = 88
-docstring-convention = numpy
-ignore = D100, D213, D401, D413, D107, W503, E501, E203, E741
-
-[isort]
-profile = black
-src_paths = llspy
diff --git a/fiducialreg/__init__.py b/src/fiducialreg/__init__.py
similarity index 100%
rename from fiducialreg/__init__.py
rename to src/fiducialreg/__init__.py
diff --git a/fiducialreg/fiducialreg.py b/src/fiducialreg/fiducialreg.py
similarity index 96%
rename from fiducialreg/fiducialreg.py
rename to src/fiducialreg/fiducialreg.py
index 7b3ed7e..a01c23d 100644
--- a/fiducialreg/fiducialreg.py
+++ b/src/fiducialreg/fiducialreg.py
@@ -46,7 +46,7 @@
matplotlib.use("Qt5Agg")
-import matplotlib.pyplot as plt # noqa
+import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
np.seterr(divide="ignore", invalid="ignore")
@@ -71,7 +71,7 @@ def log_filter(img, blurxysigma=1, blurzsigma=2.5, mask=None):
def bead_centroids(img, labeled, nlabels):
# get center of mass of each object
- return [ndimage.center_of_mass(img, labeled, l) for l in range(1, nlabels + 1)]
+ return [ndimage.center_of_mass(img, labeled, x) for x in range(1, nlabels + 1)]
def get_thresh(im, mincount=None, steps=100):
@@ -94,15 +94,11 @@ def get_thresh(im, mincount=None, steps=100):
object_count = np.array(object_count)
if mincount > object_count.max():
raise RegistrationError(
- "Could not detect minimum number of beads specified ({}), found: {}".format(
- mincount, object_count.max()
- )
+ f"Could not detect minimum number of beads specified ({mincount}), found: {object_count.max()}"
)
modecount = stats.mode(object_count[(object_count >= mincount)], axis=None).mode[0]
logging.debug(
- "Threshold detected: {}".format(
- threshrange[np.argmax(object_count == modecount)]
- )
+ f"Threshold detected: {threshrange[np.argmax(object_count == modecount)]}"
)
return threshrange[np.argmax(object_count == modecount)], modecount
@@ -355,7 +351,7 @@ def FitModelWeighted(modelFcn, startParameters, data, sigmas, *args):
return optimize.leastsq(
weightedMissfitF,
startParameters,
- (modelFcn, data.ravel(), (1.0 / sigmas).astype("f").ravel()) + args,
+ (modelFcn, data.ravel(), (1.0 / sigmas).astype("f").ravel(), *args),
full_output=1,
)
@@ -845,9 +841,7 @@ def get_all_tforms(
regto.append(ref)
else:
logger.warning(
- "Reference {} not in lablels: {} ... skipping".format(
- ref, self.labels
- )
+ f"Reference {ref} not in lablels: {self.labels} ... skipping"
)
if not len(regto):
logger.error("No recognized values in refs list. No tforms calculated")
@@ -881,9 +875,7 @@ def get_all_tforms(
except Exception:
print("SKIPPING MODE: ", mode)
logger.error(
- 'Failed to calculate mode "{}" in get_all_tforms. Skipping.'.format(
- mode
- )
+ f'Failed to calculate mode "{mode}" in get_all_tforms. Skipping.'
)
return D
@@ -901,7 +893,7 @@ def default(self, obj):
if not all(isinstance(i, np.ndarray) for i in obj):
return obj.tolist()
nestedList = obj.tolist()
- return [self.fixedString(l) for l in nestedList]
+ return [self.fixedString(x) for x in nestedList]
return json.JSONEncoder.default(self, obj)
tforms = self.get_all_tforms(**kwargs)
@@ -946,17 +938,13 @@ def tform(
movIdx = self.labels.index(movingLabel)
except ValueError:
raise ValueError(
- "Could not find label {} in reg list: {}".format(
- movingLabel, self.labels
- )
+ f"Could not find label {movingLabel} in reg list: {self.labels}"
)
try:
fixIdx = self.labels.index(fixedLabel)
except ValueError:
raise ValueError(
- "Could not find label {} in reg list: {}".format(
- fixedLabel, self.labels
- )
+ f"Could not find label {fixedLabel} in reg list: {self.labels}"
)
mode = mode.lower()
@@ -1062,8 +1050,8 @@ def show_tformed_image(self, movingLabel=None, fixedLabel=None, **kwargs):
def imoverlay(im1, im2, method=None, mip=False):
- im1 = im1.astype(np.float) if not mip else im1.astype(np.float).max(0)
- im2 = im2.astype(np.float) if not mip else im2.astype(np.float).max(0)
+ im1 = im1.astype(float) if not mip else im1.astype(float).max(0)
+ im2 = im2.astype(float) if not mip else im2.astype(float).max(0)
im1 -= im1.min()
im1 /= im1.max()
im2 -= im2.min()
@@ -1159,9 +1147,9 @@ def parsefile(self):
if mov not in self.tform_dict[ref]:
self.tform_dict[ref][mov] = {}
self.tform_dict[ref][mov][mode] = tform["tform"]
- self.refwaves = sorted(list(set(self.refwaves)))
- self.movwaves = sorted(list(set(self.movwaves)))
- self.modes = sorted(list(set(self.modes)))
+ self.refwaves = sorted(set(self.refwaves))
+ self.movwaves = sorted(set(self.movwaves))
+ self.modes = sorted(set(self.modes))
self.waves = self.refwaves # to make it easier to substitute for RegDir
@property
@@ -1180,15 +1168,11 @@ def get_tform(self, moving, ref, mode):
raise RegistrationError(f"Reference wave {ref} not in registration file")
if moving not in self.tform_dict[ref]:
raise RegistrationError(
- "No transform to map moving wave {} onto refrence wave {}".format(
- moving, ref
- )
+ f"No transform to map moving wave {moving} onto refrence wave {ref}"
)
if mode not in self.tform_dict[ref][moving]:
raise RegistrationError(
- "Transform mode {} not found for refwave: {}, movingwave: {}".format(
- mode, ref, moving
- )
+ f"Transform mode {mode} not found for refwave: {ref}, movingwave: {moving}"
)
return self.tform_dict[ref][moving][mode]
diff --git a/fiducialreg/imref.py b/src/fiducialreg/imref.py
similarity index 95%
rename from fiducialreg/imref.py
rename to src/fiducialreg/imref.py
index b2a9ff8..444e0d8 100644
--- a/fiducialreg/imref.py
+++ b/src/fiducialreg/imref.py
@@ -18,7 +18,6 @@ class DimensionManager:
def __init__(
self, DimensionName="", NumberOfSamples=2, Delta=1, StartCoordinateInWorld=0.5
):
-
if DimensionName not in ["X", "Y", "Z"]:
raise ValueError("DimensionName must be X, Y, or Z")
self.DimensionName = DimensionName
@@ -122,13 +121,13 @@ def __init__(self, *args):
self.Dimension.X = DimensionManager("X", args[0][1])
self.Dimension.Y = DimensionManager("Y", args[0][0])
elif len(args) == 3:
- if all([isinstance(x, (list, np.ndarray)) for x in args[1:3]]):
+ if all(isinstance(x, (list, np.ndarray)) for x in args[1:3]):
# world limits provided
self.Dimension.X = DimensionManager("X", args[0][1])
self.Dimension.Y = DimensionManager("Y", args[0][0])
self.XWorldLimits = args[1]
self.YWorldLimits = args[2]
- elif all([np.isscalar(x) for x in args[1:3]]):
+ elif all(np.isscalar(x) for x in args[1:3]):
# imref2d(imageSize,pixelExtentInWorldX,pixelExtentInWorldY)
self.Dimension.X = DimensionManager(
"X", args[0][1], args[1], args[1] / 2
@@ -219,7 +218,7 @@ def worldToIntrinsic(self, xWorld, yWorld):
def worldToSubscript(self, xWorld, yWorld):
if len({type(n) for n in (xWorld, yWorld)}) > 1:
raise ValueError("All inputs to worldToSubscript must have same type")
- if not any([np.isscalar(n) for n in (xWorld, yWorld)]):
+ if not any(np.isscalar(n) for n in (xWorld, yWorld)):
if len({len(n) for n in (xWorld, yWorld)}) > 1:
raise ValueError("All inputs to worldToSubscript must have same size")
# TODO: check order of CRP output index
@@ -244,13 +243,13 @@ def __repr__(self):
if len(val) == 1:
repdict[n] = f"{val[0]:.4f}"
else:
- if np.issubdtype(val.dtype, np.int):
+ if np.issubdtype(val.dtype, np.integer):
repdict[n] = list(val)
else:
- repdict[n] = list(f"{i:.4f}" for i in val)
- elif isinstance(self.__getattribute__(n), (np.float, float)):
+ repdict[n] = [f"{i:.4f}" for i in val]
+ elif isinstance(self.__getattribute__(n), float):
repdict[n] = f"{self.__getattribute__(n):.4f}"
- elif isinstance(self.__getattribute__(n), (np.int, np.int64, int)):
+ elif isinstance(self.__getattribute__(n), (np.int64, int)):
repdict[n] = int(self.__getattribute__(n))
return pformat(repdict)
@@ -288,7 +287,7 @@ def __init__(self, *args):
self.Dimension.Y = DimensionManager("Y", args[0][1])
self.Dimension.Z = DimensionManager("Z", args[0][0])
elif len(args) == 4:
- if all([isinstance(x, (list, np.ndarray)) for x in args[1:4]]):
+ if all(isinstance(x, (list, np.ndarray)) for x in args[1:4]):
# imref3d(imageSize,pixelExtentInWorldX,pixelExtentInWorldY,pixelExtentInWorldZ)
self.Dimension.X = DimensionManager("X", args[0][2])
self.Dimension.Y = DimensionManager("Y", args[0][1])
@@ -296,7 +295,7 @@ def __init__(self, *args):
self.XWorldLimits = args[1]
self.YWorldLimits = args[2]
self.ZWorldLimits = args[3]
- elif all([np.isscalar(x) for x in args[1:4]]):
+ elif all(np.isscalar(x) for x in args[1:4]):
# imref3d(imageSize,xWorldLimits,yWorldLimits,zWorldLimits)
self.Dimension.X = DimensionManager(
"X", args[0][2], args[1], args[1] / 2
@@ -377,7 +376,7 @@ def worldToIntrinsic(self, xWorld, yWorld, zWorld):
def worldToSubscript(self, xWorld, yWorld, zWorld):
if len({type(n) for n in (xWorld, yWorld, zWorld)}) > 1:
raise ValueError("All inputs to worldToSubscript must have same type")
- if not any([np.isscalar(n) for n in (xWorld, yWorld, zWorld)]):
+ if not any(np.isscalar(n) for n in (xWorld, yWorld, zWorld)):
if len({len(n) for n in (xWorld, yWorld, zWorld)}) > 1:
raise ValueError("All inputs to worldToSubscript must have same size")
# TODO: check order of CRP output index
@@ -395,7 +394,7 @@ def worldToSubscript(self, xWorld, yWorld, zWorld):
return c, r, p
- def sizesMatch(self, I):
+ def sizesMatch(self, x):
raise NotImplementedError()
# imageSize = I.shape
# if ~isequal(size(self.ImageSize), size(imageSize))
diff --git a/fiducialreg/imwarp.py b/src/fiducialreg/imwarp.py
similarity index 99%
rename from fiducialreg/imwarp.py
rename to src/fiducialreg/imwarp.py
index 9ad5a89..53302ea 100644
--- a/fiducialreg/imwarp.py
+++ b/src/fiducialreg/imwarp.py
@@ -33,7 +33,7 @@ def imwarp(inputImage, tform, R_A=None, outputRef=None):
# checkOutputViewAgreementWithTform(outputRef,tform)
# Resampling the input image must be done in a floating point type.
- if not np.issubdtype(inputImage.dtype, np.float):
+ if not np.issubdtype(inputImage.dtype, float):
inputImage = inputImage.astype(np.float64)
# Form grid of intrinsic points in output image.
diff --git a/llspy/__init__.py b/src/llspy/__init__.py
similarity index 100%
rename from llspy/__init__.py
rename to src/llspy/__init__.py
diff --git a/llspy/__main__.py b/src/llspy/__main__.py
similarity index 99%
rename from llspy/__main__.py
rename to src/llspy/__main__.py
index 551f97a..3dd0233 100644
--- a/llspy/__main__.py
+++ b/src/llspy/__main__.py
@@ -9,7 +9,6 @@
from llspy.bin.llspy_gui import main
if __name__ == "__main__":
-
import logging
logger = logging.getLogger(__name__)
diff --git a/llspy/arrayfun.py b/src/llspy/arrayfun.py
similarity index 95%
rename from llspy/arrayfun.py
rename to src/llspy/arrayfun.py
index 6532173..2b5b793 100644
--- a/llspy/arrayfun.py
+++ b/src/llspy/arrayfun.py
@@ -1,5 +1,5 @@
import numpy as np
-from scipy.ndimage.filters import gaussian_filter
+from scipy.ndimage import gaussian_filter
from scipy.stats import mode
from .libcudawrapper import deskewGPU as deskew
@@ -38,7 +38,7 @@ def threshold_li(image):
raise ValueError(
"threshold_li is expected to work with images "
"having more than one value. The input image seems "
- "to have just one value {}.".format(image.flat[0])
+ f"to have just one value {image.flat[0]}."
)
# Copy to ensure input image is not modified
@@ -97,7 +97,7 @@ def imcontentbounds(im, sigma=2):
# get rid of the first two planes in case of high dark noise
if im.ndim == 3:
im = np.squeeze(np.max(im[2:], 0))
- im = im.astype(np.float)
+ im = im.astype(float)
fullwidth = im.shape[-1]
# from scipy.ndimage.filters import median_filter
# mm = median_filter(b.astype(float),3)
@@ -148,14 +148,14 @@ def detect_background(im):
im = im[0][2]
if im.ndim == 3:
im = im[1] # pick the third plane... avoid noise in first plane on lattice
- return mode(im.flatten())[0][0]
+ return mode(im.flatten()).mode
def sub_background(im, background=None):
"""subtract provided background or autodetct as mode of the first plane"""
if background is None:
background = detect_background(im)
- out = im.astype(np.float) - background
+ out = im.astype(float) - background
out[out < 0] = 0
return out
@@ -173,7 +173,7 @@ def deskew_gputools(rawdata, dz=0.5, dx=0.102, angle=31.5, filler=0):
(nz, ny, nx) = rawdata.shape
# Francois' method:
# nxOut = math.ceil((nz - 1) * deskewFactor) + nx
- nxOut = np.int(np.floor((nz - 1) * dz * abs(np.cos(angle * np.pi / 180)) / dx) + nx)
+ nxOut = int(np.floor((nz - 1) * dz * abs(np.cos(angle * np.pi / 180)) / dx) + nx)
# +1 to pad left side with 1 column of filler pixels
# otherwise, edge pixel values are smeared across the image
paddedData = np.ones((nz, ny, nxOut), rawdata.dtype) * filler
diff --git a/llspy/bin/__init__.py b/src/llspy/bin/__init__.py
similarity index 100%
rename from llspy/bin/__init__.py
rename to src/llspy/bin/__init__.py
diff --git a/llspy/bin/llspy_cli.py b/src/llspy/bin/llspy_cli.py
similarity index 98%
rename from llspy/bin/llspy_cli.py
rename to src/llspy/bin/llspy_cli.py
index ef3a324..cd73435 100644
--- a/llspy/bin/llspy_cli.py
+++ b/src/llspy/bin/llspy_cli.py
@@ -29,7 +29,7 @@
DEFAULTS = schema.__defaults__
-CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
+CONTEXT_SETTINGS = {"help_option_names": ["-h", "--help"]}
class Config(dict):
@@ -106,13 +106,13 @@ def print_cfgfile(self):
def update_default(self, key, value):
if key not in DEFAULTS:
click.secho(
- "{} is not a recognized parameter! use config --info to "
- "list all recognized parameters".format(key),
+ f"{key} is not a recognized parameter! use config --info to "
+ "list all recognized parameters",
fg="red",
)
return 0
try:
- key, value = list(schema.validateItems(**{key: value}).items())[0]
+ key, value = next(iter(schema.validateItems(**{key: value}).items()))
except Exception as e:
click.secho(str(e), fg="red")
return 0
@@ -123,7 +123,7 @@ def update_default(self, key, value):
# preserve comments
with open(self.default_path) as f:
comments = [
- l for l in list(f) if l.startswith(self.comment) and key not in l
+ x for x in list(f) if x.startswith(self.comment) and key not in x
]
parser = configparser.ConfigParser(allow_no_value=True)
@@ -244,7 +244,7 @@ def info(paths, verbose, recurse, depth, showsize):
[paths.insert(i, s) for s in reversed(subf)]
# remove duplicates
- paths = sorted(list(set(paths)))
+ paths = sorted(set(paths))
if verbose == 0 and len(paths):
click.echo()
@@ -849,7 +849,7 @@ def compress(
[paths.insert(i, s) for s in reversed(subf)]
# remove duplicates
- paths = sorted(list(set(paths)))
+ paths = sorted(set(paths))
if dryrun:
click.secho("DRY RUN: NOTHING PERFORMED!", fg="red", underline=True)
diff --git a/llspy/bin/llspy_gui.py b/src/llspy/bin/llspy_gui.py
similarity index 97%
rename from llspy/bin/llspy_gui.py
rename to src/llspy/bin/llspy_gui.py
index 2e8d272..4a20e10 100644
--- a/llspy/bin/llspy_gui.py
+++ b/src/llspy/bin/llspy_gui.py
@@ -159,9 +159,9 @@ def setOptOut(value):
QtWidgets.QMessageBox.information(
mainGUI,
"Newer Version Available!",
- "Update available: v%s\n\nYou are using v%s\n\nIf you are using "
+ f"Update available: v{newestVersion}\n\nYou are using v{llspy.__version__}\n\nIf you are using "
'anaconda, you may update by typing "conda update -c talley llspy" '
- "at the anaconda prompt" % (newestVersion, llspy.__version__),
+ "at the anaconda prompt",
QtWidgets.QMessageBox.Ok,
QtWidgets.QMessageBox.NoButton,
)
diff --git a/llspy/camcalib.py b/src/llspy/camcalib.py
similarity index 98%
rename from llspy/camcalib.py
rename to src/llspy/camcalib.py
index 7be9030..4590800 100644
--- a/llspy/camcalib.py
+++ b/src/llspy/camcalib.py
@@ -55,11 +55,11 @@ def combine_stacks(ch0, ch1, darkavg):
shp = list(tf.TiffFile(ch0[0]).series[0].shape)
nZ = shp[0]
shp[0] *= len(ch0)
- pre = np.zeros(shp, dtype=np.float)
- post = np.zeros(shp, dtype=np.float)
+ pre = np.zeros(shp, dtype=float)
+ post = np.zeros(shp, dtype=float)
for n in range(len(ch0)):
- pre[n * nZ : n * nZ + nZ, :, :] = tf.imread(ch0[n]).astype(np.float) - darkavg
- post[n * nZ : n * nZ + nZ, :, :] = tf.imread(ch1[n]).astype(np.float) - darkavg
+ pre[n * nZ : n * nZ + nZ, :, :] = tf.imread(ch0[n]).astype(float) - darkavg
+ post[n * nZ : n * nZ + nZ, :, :] = tf.imread(ch1[n]).astype(float) - darkavg
return pre, post
@@ -168,7 +168,6 @@ def process_dark_images(folder, callback=None, callback2=None):
def process_bright_images(folder, darkavg, darkstd, callback=None, save=True):
-
ch0list, ch1list = get_channel_list(folder)
pre, post = combine_stacks(ch0list, ch1list, darkavg)
@@ -203,7 +202,6 @@ def process_bright_images(folder, darkavg, darkstd, callback=None, save=True):
if __name__ == "__main__":
-
# this script assumes you have aquired a series of 2-channel zstacks
# (not actually a stack, turn off Z galvo, and Z and Sample Piezos
# the first channel is "bright" and "even" (such as 488 laser sheet exciting FITC)
diff --git a/llspy/camera.py b/src/llspy/camera.py
similarity index 95%
rename from llspy/camera.py
rename to src/llspy/camera.py
index d1494ef..a858a9b 100644
--- a/llspy/camera.py
+++ b/src/llspy/camera.py
@@ -85,9 +85,7 @@ def selectiveMedianFilter(
100 * np.sum(pixelMatrix.flatten()) / float(len(pixelMatrix.flatten()))
)
print(
- "Bad pixels detected: {} {:0.2f}".format(
- np.sum(pixelMatrix.flatten()), pixpercent
- )
+ f"Bad pixels detected: {np.sum(pixelMatrix.flatten())} {pixpercent:0.2f}"
)
dt = stack.dtype
@@ -239,17 +237,17 @@ def __init__(self, fname=config.__CAMPARAMS__, data=None, roi=None):
if not self.shape[0] >= 3:
raise ValueError(
"Camera parameter file must have at least "
- "3 planes. {} has only {}".format(fname, self.shape[0])
+ f"3 planes. {fname} has only {self.shape[0]}"
)
if not self.roi.width == self.shape[1]:
raise ValueError(
"Tiff file provided does not have the same width "
- "({}) as the proivded roi ({})".format(self.shape[1], self.roi.width)
+ f"({self.shape[1]}) as the proivded roi ({self.roi.width})"
)
if not self.roi.height == self.shape[2]:
raise ValueError(
"Tiff file provided does not have the same height "
- "({}) as the proivded roi ({})".format(self.shape[2], self.roi.height)
+ f"({self.shape[2]}) as the proivded roi ({self.roi.height})"
)
self.width = self.roi.width
self.height = self.roi.height
@@ -303,7 +301,7 @@ def correct_stacks(
raise ValueError(f"Empty list of stacks received: {stacks}")
if len({S.shape for S in stacks}) > 1:
raise ValueError("All stacks in list must have the same shape")
- if not all([isinstance(S, np.ndarray) for S in stacks]):
+ if not all(isinstance(S, np.ndarray) for S in stacks):
raise ValueError("All stacks in list must be of type: np.ndarray")
# interleave stacks into single 3D so that they are in the order:
@@ -332,7 +330,7 @@ def correct_stacks(
else:
raise ValueError(
"unrecognized value for flashCorrectTarget "
- "parameter: {}".format(flashCorrectTarget)
+ f"parameter: {flashCorrectTarget}"
)
# interleaved = np.subtract(interleaved, self.offset)
@@ -348,14 +346,14 @@ def correct_stacks(
# (particularly if an object is truncated and there's more content
# just off to the side of the camera ROI)
# this will delete the edge columns
- if any([any(i) for i in trim]):
+ if any(any(i) for i in trim):
interleaved = arrayfun.trimedges(interleaved, trim, numStacks)
if not np.issubdtype(interleaved.dtype, typ):
warnings.warn("CONVERTING")
interleaved = interleaved.astype(typ)
- deinterleaved = [s for s in np.split(interleaved, interleaved.shape[0])]
+ deinterleaved = list(np.split(interleaved, interleaved.shape[0]))
deinterleaved = [
np.concatenate(deinterleaved[q::numStacks]) for q in range(numStacks)
]
@@ -364,7 +362,6 @@ def correct_stacks(
if __name__ == "__main__":
-
from llspy import llsdir, samples
paramfile = samples.camparams # path to the calibration file
diff --git a/llspy/compress.py b/src/llspy/compress.py
similarity index 100%
rename from llspy/compress.py
rename to src/llspy/compress.py
diff --git a/llspy/config.py b/src/llspy/config.py
similarity index 100%
rename from llspy/config.py
rename to src/llspy/config.py
diff --git a/llspy/cudabinwrapper.py b/src/llspy/cudabinwrapper.py
similarity index 98%
rename from llspy/cudabinwrapper.py
rename to src/llspy/cudabinwrapper.py
index d8c85f2..b49d65a 100644
--- a/llspy/cudabinwrapper.py
+++ b/src/llspy/cudabinwrapper.py
@@ -349,7 +349,7 @@ def _get_options(self):
def has_option(self, flag):
"""check the existence of a given flag in the binary help string."""
- return any([flag in key for key in self.options.keys()])
+ return any(flag in key for key in self.options.keys())
def has_option_longname(self, name):
"""check the existence of a given flag in the binary help string."""
@@ -372,7 +372,7 @@ def describe_option(self, flag):
print the description provided in the binary help string for a given flag
"""
if self.has_option(flag):
- return self.options[[key for key in self.options.keys() if flag in key][0]]
+ return self.options[next(key for key in self.options.keys() if flag in key)]
else:
logger.warning(f'The flag "{flag}" is not listed in the help string.')
diff --git a/llspy/exceptions.py b/src/llspy/exceptions.py
similarity index 100%
rename from llspy/exceptions.py
rename to src/llspy/exceptions.py
diff --git a/llspy/gui/__init__.py b/src/llspy/gui/__init__.py
similarity index 100%
rename from llspy/gui/__init__.py
rename to src/llspy/gui/__init__.py
diff --git a/llspy/gui/before_after.png b/src/llspy/gui/before_after.png
similarity index 100%
rename from llspy/gui/before_after.png
rename to src/llspy/gui/before_after.png
diff --git a/llspy/gui/camcalibgui.py b/src/llspy/gui/camcalibgui.py
similarity index 98%
rename from llspy/gui/camcalibgui.py
rename to src/llspy/gui/camcalibgui.py
index e9e3095..195e73e 100644
--- a/llspy/gui/camcalibgui.py
+++ b/src/llspy/gui/camcalibgui.py
@@ -43,7 +43,7 @@ def updatedarkstatus(prog):
# first handle dark images
darkavg = self.darkavg
darkstd = self.darkstd
- if not all([isinstance(a, np.ndarray) for a in (darkavg, darkstd)]):
+ if not all(isinstance(a, np.ndarray) for a in (darkavg, darkstd)):
self.setStatus.emit("Loading dark images... [Step 1 of 4]")
darklist = glob.glob(os.path.join(self.folder, "*dark*.tif"))
numdark = len(darklist)
@@ -126,7 +126,6 @@ def setFolder(self):
self.darkSTDineEdit.setText(os.path.join(folder, "dark_STD.tif"))
def processFolder(self):
-
folder = self.camCalibFolderLineEdit.text()
darkavg = None
@@ -140,7 +139,7 @@ def processFolder(self):
elif os.path.isfile(os.path.join(folder, "dark_STD.tif")):
darkstd = tf.imread(os.path.join(folder, "dark_STD.tif"))
- if not all([isinstance(a, np.ndarray) for a in (darkavg, darkstd)]):
+ if not all(isinstance(a, np.ndarray) for a in (darkavg, darkstd)):
if not pathHasPattern(folder, "*dark*.tif*"):
QtW.QMessageBox.warning(
self,
@@ -214,7 +213,6 @@ def _abort(self):
if __name__ == "__main__":
-
app = QtW.QApplication(sys.argv)
# dlg = LogWindow()
# dlg.show()
diff --git a/llspy/gui/camcordialog.py b/src/llspy/gui/camcordialog.py
similarity index 100%
rename from llspy/gui/camcordialog.py
rename to src/llspy/gui/camcordialog.py
diff --git a/llspy/gui/camcordialog.ui b/src/llspy/gui/camcordialog.ui
similarity index 100%
rename from llspy/gui/camcordialog.ui
rename to src/llspy/gui/camcordialog.ui
diff --git a/llspy/gui/exceptions.py b/src/llspy/gui/exceptions.py
similarity index 95%
rename from llspy/gui/exceptions.py
rename to src/llspy/gui/exceptions.py
index 21e1398..b1726d7 100644
--- a/llspy/gui/exceptions.py
+++ b/src/llspy/gui/exceptions.py
@@ -36,16 +36,14 @@ def fetch_package_version(dist_name):
>>> fetch_package_version('sentry')
"""
try:
- # Importing pkg_resources can be slow, so only import it
- # if we need it.
- import pkg_resources
+ from importlib.metadata import version
except ImportError:
- # pkg_resource is not available on Google App Engine
- raise NotImplementedError(
- "pkg_resources is not available " "on this Python install"
- )
- dist = pkg_resources.get_distribution(dist_name)
- return dist.version
+ from importlib_metadata import version
+
+ try:
+ return version(dist_name)
+ except Exception as e:
+ raise NotImplementedError(f"Could not fetch the version for {dist_name}: {e}")
def fetch_git_sha(path, head=None):
diff --git a/llspy/gui/guiDefaults.ini b/src/llspy/gui/guiDefaults.ini
similarity index 100%
rename from llspy/gui/guiDefaults.ini
rename to src/llspy/gui/guiDefaults.ini
diff --git a/llspy/gui/helpers.py b/src/llspy/gui/helpers.py
similarity index 99%
rename from llspy/gui/helpers.py
rename to src/llspy/gui/helpers.py
index cffa637..3825c91 100644
--- a/llspy/gui/helpers.py
+++ b/src/llspy/gui/helpers.py
@@ -95,7 +95,7 @@ def string_to_iterable(string):
it.extend(list(range(int(item[0]), int(item[1]) + 1, int(item[2]))))
else:
raise ValueError("Iterable string items must be of length <= 3")
- return sorted(list(set(it)))
+ return sorted(set(it))
def guisave(widget, settings):
diff --git a/llspy/gui/img_dialog.py b/src/llspy/gui/img_dialog.py
similarity index 97%
rename from llspy/gui/img_dialog.py
rename to src/llspy/gui/img_dialog.py
index 2983e03..0d1e17e 100644
--- a/llspy/gui/img_dialog.py
+++ b/src/llspy/gui/img_dialog.py
@@ -11,13 +11,13 @@
matplotlib.use("Qt5Agg")
-from matplotlib.backends.backend_qt5agg import ( # noqa: E402
+from matplotlib.backends.backend_qt5agg import (
FigureCanvasQTAgg as FigureCanvas,
)
-from matplotlib.backends.backend_qt5agg import ( # noqa: E402
+from matplotlib.backends.backend_qt5agg import (
NavigationToolbar2QT as NavigationToolbar,
)
-from matplotlib.figure import Figure # noqa: E402
+from matplotlib.figure import Figure
logger = logging.getLogger(__name__)
@@ -73,7 +73,6 @@ def __init__(self, name, wave=None, parent=None):
class DataModel(QtCore.QObject):
-
_idxChanged = QtCore.Signal()
_dataChanged = QtCore.Signal()
@@ -138,13 +137,13 @@ def setData(self, data):
self.isComplex = data.dtype == np.complex64 or data.dtype == np.complex128
if self.ndim == 2:
- self.shape = (1, 1, 1) + data.shape
+ self.shape = (1, 1, 1, *data.shape)
self.data = data.copy().reshape(self.shape)
elif self.ndim == 3:
- self.shape = (1, 1) + data.shape
+ self.shape = (1, 1, *data.shape)
self.data = data.copy().reshape(self.shape)
elif self.ndim == 4:
- self.shape = (1,) + data.shape
+ self.shape = (1, *data.shape)
self.data = data.copy().reshape(self.shape)
elif self.ndim == 5:
self.shape = data.shape
@@ -233,7 +232,7 @@ def getCurrent(self):
if not self.chanSettings[chan]["active"]:
continue
lut = self.chanSettings[chan]["lut"]
- D = np.maximum(data[chan].astype(np.float) - self.cmin[chan], 0)
+ D = np.maximum(data[chan].astype(float) - self.cmin[chan], 0)
D /= self.cmax[chan] if self.projection is None else D.max()
D *= self.chanSettings[chan]["scale"]
D = np.tile(D, (3, 1, 1)).transpose(1, 2, 0)
@@ -294,7 +293,6 @@ def __getitem__(self, tczTuple):
class MplCanvas(FigureCanvas):
-
_contrastChanged = QtCore.Signal()
def __init__(self):
@@ -317,7 +315,7 @@ def f_c(x, y):
def setDisplayOptions(self, options):
self.displayOptions = options
- if not ("cmap" in self.displayOptions):
+ if "cmap" not in self.displayOptions:
self.displayOptions["cmap"] = "cubehelix"
self.cmaps = tuple(
{self.displayOptions["cmap"], "gray", "afmhot", "cubehelix", "inferno"}
@@ -569,8 +567,8 @@ def update_axis_slider(self, axis, n):
slid = getattr(self, axis.upper() + "slider")
if n > 1:
if not (
- (axis == "z" and getattr(self.data, "projection"))
- or (axis == "c" and getattr(self.data, "_overlay"))
+ (axis == "z" and self.data.projection)
+ or (axis == "c" and self.data._overlay)
):
widg.show()
slid.setMaximum(n - 1)
@@ -582,7 +580,6 @@ def update_sliders(self):
self.update_axis_slider(axis, n)
def initialize(self):
-
datamax = self.data.max()
datamin = self.data.min()
# dataRange = datamax - datamin
diff --git a/llspy/gui/img_window.py b/src/llspy/gui/img_window.py
similarity index 100%
rename from llspy/gui/img_window.py
rename to src/llspy/gui/img_window.py
diff --git a/llspy/gui/img_window.ui b/src/llspy/gui/img_window.ui
similarity index 100%
rename from llspy/gui/img_window.ui
rename to src/llspy/gui/img_window.ui
diff --git a/llspy/gui/logo_dark.png b/src/llspy/gui/logo_dark.png
similarity index 100%
rename from llspy/gui/logo_dark.png
rename to src/llspy/gui/logo_dark.png
diff --git a/llspy/gui/logo_light.png b/src/llspy/gui/logo_light.png
similarity index 100%
rename from llspy/gui/logo_light.png
rename to src/llspy/gui/logo_light.png
diff --git a/llspy/gui/main_gui.py b/src/llspy/gui/main_gui.py
similarity index 100%
rename from llspy/gui/main_gui.py
rename to src/llspy/gui/main_gui.py
diff --git a/llspy/gui/main_gui.ui b/src/llspy/gui/main_gui.ui
similarity index 100%
rename from llspy/gui/main_gui.ui
rename to src/llspy/gui/main_gui.ui
diff --git a/llspy/gui/mainwindow.py b/src/llspy/gui/mainwindow.py
similarity index 98%
rename from llspy/gui/mainwindow.py
rename to src/llspy/gui/mainwindow.py
index 6ca5d1b..33418bc 100644
--- a/llspy/gui/mainwindow.py
+++ b/src/llspy/gui/mainwindow.py
@@ -6,13 +6,13 @@
import os.path as osp
import numpy as np
-from fiducialreg.fiducialreg import RegFile, RegistrationError
from qtpy import QtCore, QtGui
from qtpy import QtWidgets as QtW
import llspy
import llspy.gui.exceptions as err
import llspy.llsdir
+from fiducialreg.fiducialreg import RegFile, RegistrationError
from llspy.gui import workers
from llspy.gui.camcalibgui import CamCalibDialog
from llspy.gui.helpers import (
@@ -79,7 +79,18 @@
class LLSDragDropTable(QtW.QTableWidget):
- colHeaders = ["path", "name", "nC", "nT", "nZ", "nY", "nX", "angle", "dz", "dx"]
+ colHeaders = [ # noqa
+ "path",
+ "name",
+ "nC",
+ "nT",
+ "nZ",
+ "nY",
+ "nX",
+ "angle",
+ "dz",
+ "dx",
+ ]
nCOLS = len(colHeaders)
# A signal needs to be defined on class level:
@@ -537,17 +548,12 @@ def loadRegistrationFile(self, file=None):
try:
with open(file) as json_data:
regdict = json.load(json_data)
- refs = sorted(list({t["reference"] for t in regdict["tforms"]}))
+ refs = sorted({t["reference"] for t in regdict["tforms"]})
# mov = set([t['moving'] for t in regdict['tforms']])
modes = ["None"]
modes.extend(
sorted(
- list(
- {
- t["mode"].title().replace("Cpd", "CPD")
- for t in regdict["tforms"]
- }
- )
+ {t["mode"].title().replace("Cpd", "CPD") for t in regdict["tforms"]}
)
)
self.RegCalib_channelRefCombo.clear()
@@ -1138,14 +1144,12 @@ def displayPreview(self, array, dx, dz, params=None):
viewer.dims.ndisplay = 3
self.spimwins.append(viewer)
elif self.prevBackendSpimagineRadio.isChecked() and _SPIMAGINE_IMPORTED:
-
if np.squeeze(array).ndim > 4:
arrays = [array[:, i] for i in range(array.shape[1])]
else:
arrays = [np.squeeze(array)]
for arr in arrays:
-
datamax = arr.max()
datamin = arr.min()
dataRange = datamax - datamin
@@ -1660,7 +1664,6 @@ def concatenateSelected(self):
[self.listbox.addPath(p) for p in selectedPaths]
def undoRenameSelected(self):
-
box = QtW.QMessageBox()
box.setWindowTitle("Undo Renaming")
box.setText(
@@ -1695,7 +1698,7 @@ def undoRenameSelected(self):
paths = self.listbox.renamedPaths
for P in paths:
- for root, subd, file in os.walk(P):
+ for root, subd, _file in os.walk(P):
self.listbox.removePath(root)
for d in subd:
self.listbox.removePath(os.path.join(root, d))
@@ -1712,7 +1715,6 @@ def renameSelected(self):
[self.listbox.addPath(osp.join(item, p)) for p in os.listdir(item)]
def mergeMIPtool(self):
-
if len(self.listbox.selectedPaths()):
for obj in self.listbox.selectedObjects():
obj.mergemips()
@@ -1733,13 +1735,14 @@ def toggleOptOut(self, value):
def checkBundled(self, value):
if value:
try:
- bin = llspy.cudabinwrapper.get_bundled_binary()
+ bin_ = llspy.cudabinwrapper.get_bundled_binary()
except llspy.cudabinwrapper.CUDAbinException:
logger.warning(
- "Could not load bundled cudaDeconv. Check that it is installed. read docs"
+ "Could not load bundled cudaDeconv. "
+ "Check that it is installed read docs"
)
return
- self.setBinaryPath(bin)
+ self.setBinaryPath(bin_)
else:
self.setBinaryPath(self.cudaDeconvPathLineEdit.text())
@@ -1835,12 +1838,10 @@ def showAboutWindow(self):
QtW.QMessageBox.about(
self,
"LLSpy",
- """LLSpy v.{}\n
-Copyright © {}, President and Fellows of Harvard College. All rights reserved.\n\n
+ f"""LLSpy v.{llspy.__version__}\n
+Copyright © {now.year}, President and Fellows of Harvard College. All rights reserved.\n\n
Developed by Talley Lambert\n\n
-The cudaDeconv deconvolution program was written by Lin Shao and by Dan Milkie at Janelia Research Campus, and modified by Talley Lambert for LLSpy. """.format(
- llspy.__version__, now.year
- ),
+The cudaDeconv deconvolution program was written by Lin Shao and by Dan Milkie at Janelia Research Campus, and modified by Talley Lambert for LLSpy. """,
)
def showHelpWindow(self):
diff --git a/llspy/gui/qtlogger.py b/src/llspy/gui/qtlogger.py
similarity index 96%
rename from llspy/gui/qtlogger.py
rename to src/llspy/gui/qtlogger.py
index ef0d78f..82274b2 100644
--- a/llspy/gui/qtlogger.py
+++ b/src/llspy/gui/qtlogger.py
@@ -27,7 +27,6 @@ def formatException(self, exc_info):
class NotificationHandler(QObject, logging.Handler):
-
emitSignal = Signal(str)
def __init__(self):
@@ -58,6 +57,6 @@ def __init__(self, **kwargs):
def filter(self, record):
permitted = ["root", "llspy", "spimagine", "fiducialreg", "gputools"]
- if any(record.name.startswith(l) for l in permitted):
+ if any(record.name.startswith(x) for x in permitted):
return True
return False
diff --git a/llspy/gui/styles.py b/src/llspy/gui/styles.py
similarity index 100%
rename from llspy/gui/styles.py
rename to src/llspy/gui/styles.py
diff --git a/llspy/gui/watcher.py b/src/llspy/gui/watcher.py
similarity index 100%
rename from llspy/gui/watcher.py
rename to src/llspy/gui/watcher.py
diff --git a/llspy/gui/workers.py b/src/llspy/gui/workers.py
similarity index 95%
rename from llspy/gui/workers.py
rename to src/llspy/gui/workers.py
index 7878216..34e0b2a 100644
--- a/llspy/gui/workers.py
+++ b/src/llspy/gui/workers.py
@@ -63,8 +63,11 @@ def work(self):
"""
logger.debug(f"Subprocess {self.name} START")
self._logger.info(
- "~" * 20 + "\nRunning {} thread_{} with args: "
- "\n{}\n".format(self.binary, self.id, " ".join(self.args)) + "\n"
+ "~" * 20
+ + "\nRunning {} thread_{} with args: " "\n{}\n".format(
+ self.binary, self.id, " ".join(self.args)
+ )
+ + "\n"
)
self.process.finished.connect(self.onFinished)
self.process.finished.connect(
@@ -110,9 +113,7 @@ def procErrorRead(self):
def onFinished(self, exitCode, exitStatus):
statusmsg = {0: "exited normally", 1: "crashed"}
self._logger.info(
- "{} #{} {} with exit code: {}".format(
- self.name, self.id, statusmsg[exitStatus], exitCode
- )
+ f"{self.name} #{self.id} {statusmsg[exitStatus]} with exit code: {exitCode}"
)
self.finished.emit()
@@ -146,15 +147,12 @@ def procReadyRead(self):
def onFinished(self, exitCode, exitStatus):
statusmsg = {0: "exited normally", 1: "crashed"}
self._logger.info(
- "{} #{} {} with exit code: {}".format(
- self.name, self.id, statusmsg[exitStatus], exitCode
- )
+ f"{self.name} #{self.id} {statusmsg[exitStatus]} with exit code: {exitCode}"
)
self.finished.emit(self.id)
class CompressionWorker(SubprocessWorker):
-
status_update = QtCore.Signal(str, int)
def __init__(self, path, mode="compress", binary=None, wid=1, **kwargs):
@@ -191,9 +189,7 @@ def work(self):
break
if not self.binary:
raise err.MissingBinaryError(
- "No binary found for compression program: {}".format(
- llspy.compress.EXTENTIONS[tar_extension]
- )
+ f"No binary found for compression program: {llspy.compress.EXTENTIONS[tar_extension]}"
)
self.args = ["-dv", tar_compressed]
self.process.finished.connect(
@@ -204,7 +200,7 @@ def work(self):
if llspy.util.find_filepattern(self.path, "*.tar*"):
raise err.LLSpyError(
"There are both raw tiffs and a compressed file in "
- "directory: {}".format(self.path),
+ f"directory: {self.path}",
"If you would like to compress this directory, "
"please either remove any existing *.tar files, or remove "
"the uncompressed tiff files. Alternatively, you can use "
@@ -235,7 +231,6 @@ def untar(self, tarball, delete=True):
with tarfile.open(tarball) as tar:
def is_within_directory(directory, target):
-
abs_directory = os.path.abspath(directory)
abs_target = os.path.abspath(target)
@@ -244,7 +239,6 @@ def is_within_directory(directory, target):
return prefix == abs_directory
def safe_extract(tar, path=".", members=None, *, numeric_owner=False):
-
for member in tar.getmembers():
member_path = os.path.join(path, member.name)
if not is_within_directory(path, member_path):
@@ -327,8 +321,8 @@ def split(a, n):
if len(tRange) == E.parameters.nt:
cudaOpts["filename-pattern"] = f"_ch{chan}_"
else:
- cudaOpts["filename-pattern"] = "_ch{}.*_stack{}".format(
- chan, llspy.util.pyrange_to_perlregex(tRange)
+ cudaOpts["filename-pattern"] = (
+ f"_ch{chan}.*_stack{llspy.util.pyrange_to_perlregex(tRange)}"
)
argQueue.append(binary.assemble_args(**cudaOpts))
@@ -345,7 +339,6 @@ def split(a, n):
class LLSitemWorker(QtCore.QObject):
-
sig_starting_item = QtCore.Signal(str, int) # item path, numfiles
status_update = QtCore.Signal(str) # update mainGUI status
@@ -426,7 +419,7 @@ def work(self):
raise
# if not flash correcting but there is trimming/median filter requested
elif self.P.medianFilter or any(
- [any(i) for i in (self.P.trimX, self.P.trimY, self.P.trimZ)]
+ any(i) for i in (self.P.trimX, self.P.trimY, self.P.trimZ)
):
self.E.path = self.E.median_and_trim(**self.P)
@@ -438,7 +431,6 @@ def work(self):
# only call cudaDeconv if we need to deskew or deconvolve
if self.P.nIters > 0 or (self.P.deskew != 0 and self.P.saveDeskewedRaw):
-
try:
# check the binary path and create object
binary = llspy.cudabinwrapper.CUDAbin(_CUDABIN)
@@ -509,9 +501,7 @@ def on_file_finished(self):
# update status bar
self.nFiles_done = self.nFiles_done + 1
self.status_update.emit(
- "Processing {}: ({} of {})".format(
- self.shortname, self.nFiles_done, self.nFiles
- )
+ f"Processing {self.shortname}: ({self.nFiles_done} of {self.nFiles})"
)
# update progress bar
self.progressUp.emit()
@@ -535,7 +525,7 @@ def on_CUDAworker_done(self, worker_id):
# ... only as fast as the slowest GPU
# could probably fix easily by delivering a value to startCudaWorkers
# instead of looping through gpu inside of it
- if not any([v for v in self.__CUDAthreads.values()]):
+ if not any(v for v in self.__CUDAthreads.values()):
# if there's still stuff left in the argQueue for this item, keep going
if self.aborted:
self.aborted = False
@@ -547,7 +537,6 @@ def on_CUDAworker_done(self, worker_id):
self.post_process()
def post_process(self):
-
if self.P.doReg:
self.status_update.emit(f"Doing Channel Registration: {self.E.basename}")
try:
@@ -602,7 +591,7 @@ def post_process(self):
@QtCore.Slot()
def abort(self):
self._logger.info(f"LLSworker #{self.__id} notified to abort")
- if any([v for v in self.__CUDAthreads.values()]):
+ if any(v for v in self.__CUDAthreads.values()):
self.aborted = True
self.__argQueue = []
self.sig_abort.emit()
diff --git a/llspy/libcudawrapper.py b/src/llspy/libcudawrapper.py
similarity index 98%
rename from llspy/libcudawrapper.py
rename to src/llspy/libcudawrapper.py
index d18bc09..c1bf226 100644
--- a/llspy/libcudawrapper.py
+++ b/src/llspy/libcudawrapper.py
@@ -181,9 +181,7 @@ def deskewGPU(im, dz=0.5, dr=0.102, angle=31.5, width=0, shift=0, padVal=0.0):
im = im.astype(np.float32)
# have to calculate this here to know the size of the return array
if width == 0:
- deskewedNx = np.int(
- nx + np.floor(nz * dz * abs(np.cos(angle * np.pi / 180)) / dr)
- )
+ deskewedNx = int(nx + np.floor(nz * dz * abs(np.cos(angle * np.pi / 180)) / dr))
else:
deskewedNx = width
@@ -216,7 +214,7 @@ def affineGPU(im, tmat, dzyx=None):
result = np.empty((nz, ny, nx), dtype=np.float32)
if (
isinstance(dzyx, (tuple, list))
- and all([isinstance(i, float) for i in dzyx])
+ and all(isinstance(i, float) for i in dzyx)
and len(dzyx) == 3
):
# note, dzyx coordinate order is flipped when handing to Affine_interface_RA
@@ -301,7 +299,7 @@ def RL_init(
deskew=31.5,
rotate=0,
width=0,
- **kwargs
+ **kwargs,
):
requireCUDAlib()
nz, ny, nx = rawdata_shape
@@ -417,7 +415,6 @@ def RL_decon(
plt.show()
RL_cleanup()
elif sys.argv[1] == "camcor":
-
import time
from llspy import llsdir, samples
diff --git a/llspy/libinstall.py b/src/llspy/libinstall.py
similarity index 94%
rename from llspy/libinstall.py
rename to src/llspy/libinstall.py
index f0dad8e..17b7786 100644
--- a/llspy/libinstall.py
+++ b/src/llspy/libinstall.py
@@ -57,7 +57,7 @@ def find_libpath(path):
if is_libpath(os.path.join(path, "lib")):
return os.path.join(path, "lib")
else:
- for dirpath, dirnames, filenames in os.walk(path):
+ for _dirpath, dirnames, _filenames in os.walk(path):
for P in dirnames:
if is_libpath(P):
return P
@@ -77,7 +77,7 @@ def find_binpath(path):
if is_binpath(os.path.join(path, "bin")):
return os.path.join(path, "bin")
else:
- for dirpath, dirnames, filenames in os.walk(path):
+ for _dirpath, dirnames, _filenames in os.walk(path):
for P in dirnames:
if is_binpath(P):
return P
@@ -156,9 +156,7 @@ def install(dirpath, dryrun=False):
os.remove(D)
except PermissionError:
print(
- "Permission Error: you must manually remove or replace this file: {}".format(
- D
- )
+ f"Permission Error: you must manually remove or replace this file: {D}"
)
continue
copyfile(src, D)
diff --git a/llspy/llsdir.py b/src/llspy/llsdir.py
similarity index 96%
rename from llspy/llsdir.py
rename to src/llspy/llsdir.py
index 6915b52..a6ea08c 100644
--- a/llspy/llsdir.py
+++ b/src/llspy/llsdir.py
@@ -13,13 +13,12 @@
import numpy as np
import tifffile as tf
+from parse import parse as _parse
from llspy.libcudawrapper import affineGPU, deskewGPU, quickDecon
-from parse import parse as _parse
-from . import arrayfun, compress, config
+from . import arrayfun, compress, config, parse, schema, util
from . import otf as otfmodule
-from . import parse, schema, util
from .camera import CameraParameters, selectiveMedianFilter
from .cudabinwrapper import CUDAbin
from .exceptions import LLSpyError, OTFError
@@ -119,7 +118,7 @@ def filter_stack(filename, outname, dx, background, trim, medianFilter):
stack = util.imread(filename)
if medianFilter:
stack, _ = selectiveMedianFilter(stack, background)
- if any([any(i) for i in trim]):
+ if any(any(i) for i in trim):
stack = arrayfun.trimedges(stack, trim)
util.imsave(util.reorderstack(np.squeeze(stack), "zyx"), outname, dx=dx, dz=1)
@@ -170,14 +169,14 @@ def get_regObj(regCalibPath):
return refObj
-def register_folder(
- folder, regRefWave, regMode, regObj, voxsize=[1, 1, 1], discard=False
-):
+def register_folder(folder, regRefWave, regMode, regObj, voxsize=None, discard=False):
"""Register all (non-reference) wavelengths in a folder to the specified
reference wavelength, using the provided regObj.
voxsize must be an array of pixel sizes [dz, dy, dx]
"""
+ if voxsize is None:
+ voxsize = [1, 1, 1]
if isinstance(regObj, str):
regObj = get_regObj(regObj)
folder = str(folder)
@@ -266,7 +265,7 @@ def preview(exp, tR=0, cR=None, **kwargs):
if not isinstance(exp, LLSdir):
if isinstance(exp, str):
exp = LLSdir(exp)
- logger.debug(f"Preview called on {str(exp.path)}")
+ logger.debug(f"Preview called on {exp.path!s}")
logger.debug(f"Params: {exp.parameters}")
if exp.is_compressed():
@@ -308,7 +307,7 @@ def preview(exp, tR=0, cR=None, **kwargs):
else:
# camera correction trims edges, so if we aren't doing the camera correction
# we need to call the edge trim on our own
- if any([any(i) for i in (P.trimZ, P.trimY, P.trimX)]):
+ if any(any(i) for i in (P.trimZ, P.trimY, P.trimX)):
stacks = [
arrayfun.trimedges(s, (P.trimZ, P.trimY, P.trimX)) for s in stacks
]
@@ -367,8 +366,7 @@ def preview(exp, tR=0, cR=None, **kwargs):
)
else:
logger.error(
- "Registration Calibration dir not valid"
- "{}".format(P.regCalibPath)
+ "Registration Calibration dir not valid" f"{P.regCalibPath}"
)
out.append(np.stack(stacks, 0))
@@ -399,7 +397,7 @@ def process(exp, binary=None, **kwargs):
if not isinstance(exp, LLSdir):
if isinstance(exp, str):
exp = LLSdir(exp)
- logger.debug(f"Process called on {str(exp.path)}")
+ logger.debug(f"Process called on {exp.path!s}")
logger.debug(f"Params: {exp.parameters}")
if exp.is_compressed():
@@ -419,7 +417,7 @@ def process(exp, binary=None, **kwargs):
if P.correctFlash:
exp.path = exp.correct_flash(**P)
- elif P.medianFilter or any([any(i) for i in (P.trimX, P.trimY, P.trimZ)]):
+ elif P.medianFilter or any(any(i) for i in (P.trimX, P.trimY, P.trimZ)):
exp.path = exp.median_and_trim(**P)
if P.nIters > 0 or P.saveDeskewedRaw or P.rotate:
@@ -449,9 +447,7 @@ def process(exp, binary=None, **kwargs):
): # processing all the timepoints
filepattern = f"ch{chan}_"
else:
- filepattern = "ch{}_stack{}".format(
- chan, util.pyrange_to_perlregex(P.tRange)
- )
+ filepattern = f"ch{chan}_stack{util.pyrange_to_perlregex(P.tRange)}"
binary.process(str(exp.path), filepattern, P.otfs[chan], **opts)
@@ -499,7 +495,7 @@ def mergemips(folder, axis, write=True, dx=1, dt=1, delete=True, fpattern=None):
fpattern = __FPATTERN__
folder = plib.Path(folder)
if not folder.is_dir():
- raise OSError(f"MIP folder does not exist: {str(folder)}")
+ raise OSError(f"MIP folder does not exist: {folder!s}")
try:
filelist = []
@@ -556,7 +552,7 @@ def mergemips(folder, axis, write=True, dx=1, dt=1, delete=True, fpattern=None):
return stack
except ValueError as e:
- logger.error(f"ERROR: failed to merge MIPs from {str(folder)}: ")
+ logger.error(f"ERROR: failed to merge MIPs from {folder!s}: ")
logger.error(f"{e}")
@@ -840,12 +836,10 @@ def read_tiff_header(self):
with tf.TiffFile(self.tiff.raw[0]) as firstTiff:
self.parameters.shape = firstTiff.series[0].shape
try:
- self.tiff.bit_depth = getattr(firstTiff.pages[0], "bitspersample")
+ self.tiff.bit_depth = firstTiff.pages[0].bitspersample
except AttributeError:
try:
- self.tiff.bit_depth = getattr(
- firstTiff.pages[0], "bits_per_sample"
- )
+ self.tiff.bit_depth = firstTiff.pages[0].bits_per_sample
except AttributeError:
self.tiff.bit_depth = 16
(
@@ -924,9 +918,7 @@ def reduce_to_raw(self, keepmip=True, verbose=True):
shutil.rmtree(str(self.path.joinpath(folder)))
except Exception as e:
logger.error(
- "unable to remove directory: {}".format(
- self.path.joinpath(folder)
- )
+ f"unable to remove directory: {self.path.joinpath(folder)}"
)
logger.error(e)
return 0
@@ -977,7 +969,7 @@ def localParams(self, recalc=False, **kwargs):
"""
# allow for 'lazy' storage of previously calculated value
if "_localParams" in dir(self) and not recalc:
- if all([self._localParams[k] == v for k, v in kwargs.items()]):
+ if all(self._localParams[k] == v for k, v in kwargs.items()):
return self._localParams
_schema = schema.procParams(kwargs)
assert (
@@ -1002,9 +994,7 @@ def localParams(self, recalc=False, **kwargs):
logger.warning(f"Channel {chan} not present in datset! Excluding.")
if np.max(list(_schema.cRange)) > (self.parameters.nc - 1):
logger.warning(
- "cRange was larger than number of Channels! Excluding C > {}".format(
- self.parameters.nc - 1
- )
+ f"cRange was larger than number of Channels! Excluding C > {self.parameters.nc - 1}"
)
_schema.cRange = outrange
@@ -1021,15 +1011,11 @@ def localParams(self, recalc=False, **kwargs):
_schema.tRange = [minT]
if max(list(_schema.tRange)) > maxT:
logger.warning(
- "max tRange was greater than the last timepoint. Excluding T > {}".format(
- maxT
- )
+ f"max tRange was greater than the last timepoint. Excluding T > {maxT}"
)
if min(list(_schema.tRange)) < minT:
logger.warning(
- "min tRange was less than the first timepoint. Excluding < {}".format(
- minT
- )
+ f"min tRange was less than the first timepoint. Excluding < {minT}"
)
assert len(_schema.tRange), "No valid timepoints!"
@@ -1180,14 +1166,11 @@ def get_otf(self, wave, otfpath=config.__OTFPATH__):
if mask:
raise OTFError(
"Could not find OTF for "
- "wave {}, mask {}-{} in path: {}".format(
- wave, outerNA, innerNA, otfpath
- )
+ f"wave {wave}, mask {outerNA}-{innerNA} in path: {otfpath}"
)
else:
raise OTFError(
- "Could not find OTF for "
- "wave {} in path: {}".format(wave, otfpath)
+ "Could not find OTF for " f"wave {wave} in path: {otfpath}"
)
return otf
@@ -1226,7 +1209,6 @@ def median_and_trim(
trimX=(0, 0),
**kwargs,
):
-
trim = (trimZ, trimY, trimX)
outpath = self.path.joinpath("Corrected")
@@ -1364,9 +1346,7 @@ def register(self, regRefWave, regMode, regCalibPath, discard=False):
D, regRefWave, regMode, regObj, voxsize, discard=discard
)
else:
- logger.error(
- "Registration Calibration path not valid" "{}".format(regCalibPath)
- )
+ logger.error("Registration Calibration path not valid" f"{regCalibPath}")
def toJSON(self):
import json
@@ -1428,7 +1408,7 @@ def getdata(self):
return [util.imread(f) for f in self.get_t(self.t)]
def has_data(self):
- return all([isinstance(a, np.ndarray) for a in self.data])
+ return all(isinstance(a, np.ndarray) for a in self.data)
def toJSON(self):
D = self.__dict__.copy()
@@ -1503,7 +1483,7 @@ def default(self, obj):
if isinstance(obj, np.ndarray):
if all(isinstance(i, np.ndarray) for i in obj):
nestedList = obj.tolist()
- result = [self.fixedString(l) for l in nestedList]
+ result = [self.fixedString(x) for x in nestedList]
return result
else:
return obj.tolist()
diff --git a/llspy/otf.py b/src/llspy/otf.py
similarity index 98%
rename from llspy/otf.py
rename to src/llspy/otf.py
index f2dba93..8be089d 100644
--- a/llspy/otf.py
+++ b/src/llspy/otf.py
@@ -126,10 +126,8 @@ def makeotf(
def dir_has_otfs(dirname):
if os.path.isdir(str(dirname)):
if any(
- [
- (psffile_pattern.search(t) or default_otf_pattern.search(t))
- for t in os.listdir(dirname)
- ]
+ (psffile_pattern.search(t) or default_otf_pattern.search(t))
+ for t in os.listdir(dirname)
):
return True
return False
diff --git a/llspy/parse.py b/src/llspy/parse.py
similarity index 100%
rename from llspy/parse.py
rename to src/llspy/parse.py
diff --git a/llspy/register_old.py b/src/llspy/register_old.py
similarity index 100%
rename from llspy/register_old.py
rename to src/llspy/register_old.py
diff --git a/llspy/samples.py b/src/llspy/samples.py
similarity index 62%
rename from llspy/samples.py
rename to src/llspy/samples.py
index 7c6e5e2..1bce0c9 100644
--- a/llspy/samples.py
+++ b/src/llspy/samples.py
@@ -17,19 +17,13 @@
filename = "cell5_ch0_stack0000_488nm_0000000msec_0020931273msecAbs.tif"
-reg = dict(
- {
- "ex1": dict(
- {
- "tspeck": "/Users/talley/DropboxHMS/CBMF/lattice_sample_data/lls_registration_samp/reg_ex1/tspeck/",
- "data": "/Users/talley/DropboxHMS/CBMF/lattice_sample_data/lls_registration_samp/reg_ex1/data/",
- }
- ),
- "ex2": dict(
- {
- "tspeck": "/Users/talley/DropboxHMS/CBMF/lattice_sample_data/lls_registration_samp/reg_ex2/tspeck/",
- "data": "/Users/talley/DropboxHMS/CBMF/lattice_sample_data/lls_registration_samp/reg_ex2/data/",
- }
- ),
- }
-)
+reg = {
+ "ex1": {
+ "tspeck": "/Users/talley/DropboxHMS/CBMF/lattice_sample_data/lls_registration_samp/reg_ex1/tspeck/",
+ "data": "/Users/talley/DropboxHMS/CBMF/lattice_sample_data/lls_registration_samp/reg_ex1/data/",
+ },
+ "ex2": {
+ "tspeck": "/Users/talley/DropboxHMS/CBMF/lattice_sample_data/lls_registration_samp/reg_ex2/tspeck/",
+ "data": "/Users/talley/DropboxHMS/CBMF/lattice_sample_data/lls_registration_samp/reg_ex2/data/",
+ },
+}
diff --git a/llspy/schema.py b/src/llspy/schema.py
similarity index 98%
rename from llspy/schema.py
rename to src/llspy/schema.py
index 7a07860..4a229c5 100644
--- a/llspy/schema.py
+++ b/src/llspy/schema.py
@@ -28,7 +28,7 @@ def CTiterable(v):
iter(v)
except TypeError:
raise TypeError("Not an iterable object")
- if not all([(isinstance(i, int) and i >= 0) for i in v]):
+ if not all((isinstance(i, int) and i >= 0) for i in v):
raise ValueError("All values in Channel/Time range must be integers >= 0")
return v
@@ -339,7 +339,7 @@ def validateItems(**kwargs):
if k not in __validator__:
print(f"ERROR! got unrecognized key: {k}")
return 0
- S = Schema({k: v for k, v in __validator__.items()}, extra=PREVENT_EXTRA)
+ S = Schema(dict(__validator__.items()), extra=PREVENT_EXTRA)
return validate_with_humanized_errors(kwargs, S)
diff --git a/llspy/settingstxt.py b/src/llspy/settingstxt.py
similarity index 98%
rename from llspy/settingstxt.py
rename to src/llspy/settingstxt.py
index 35c7a94..ff2967f 100644
--- a/llspy/settingstxt.py
+++ b/src/llspy/settingstxt.py
@@ -144,9 +144,7 @@ def parse(self):
continue
if self.date is None:
logger.error(
- "Error, could not parse datestring {} with any of formats {}".format(
- datestring, dateformats
- )
+ f"Error, could not parse datestring {datestring} with any of formats {dateformats}"
)
# print that with dateobject.strftime('%x %X %p')
diff --git a/llspy/util.py b/src/llspy/util.py
similarity index 95%
rename from llspy/util.py
rename to src/llspy/util.py
index 6b612bb..559c47a 100644
--- a/llspy/util.py
+++ b/src/llspy/util.py
@@ -88,7 +88,7 @@ def imsave(arr, outpath, dx=1, dz=1, dt=1, unit="micron"):
def getfoldersize(folder, recurse=False):
if recurse:
total_size = 0
- for dirpath, dirnames, filenames in os.walk(folder):
+ for dirpath, _dirnames, filenames in os.walk(folder):
for f in filenames:
total_size += os.path.getsize(os.path.join(dirpath, f))
return total_size
@@ -168,15 +168,17 @@ def walklevel(some_dir, level=1):
def get_subfolders_containing_filepattern(
- dirname, filepattern="*Settings.txt", exclude=["Corrected"], level=1
+ dirname, filepattern="*Settings.txt", exclude=None, level=1
):
"""retrieve a list of subdirectories of the input directory that contain a
filepattern... useful for getting raw data directories for batch processing
"""
+ if exclude is None:
+ exclude = ["Corrected"]
matches = []
- for root, dirnames, filenames in walklevel(dirname, level):
- for filename in fnmatch.filter(filenames, filepattern):
- if not any([e in root for e in exclude]):
+ for root, _dirnames, filenames in walklevel(dirname, level):
+ for _filename in fnmatch.filter(filenames, filepattern):
+ if not any(e in root for e in exclude):
matches.append(root)
return matches
diff --git a/llspy/xzpsf.py b/src/llspy/xzpsf.py
similarity index 99%
rename from llspy/xzpsf.py
rename to src/llspy/xzpsf.py
index 320a464..bdfe1f3 100755
--- a/llspy/xzpsf.py
+++ b/src/llspy/xzpsf.py
@@ -24,9 +24,7 @@ def main(infile, nx, nz, sig=1, pad=12):
maxy, maxx = np.argwhere(mipblur == mipblur.max())[0]
print(f"bead detected at ({maxx},{maxy})")
- beadslice = indat[:, maxy - pad : maxy + pad, maxx - pad : maxx + pad].astype(
- np.float
- )
+ beadslice = indat[:, maxy - pad : maxy + pad, maxx - pad : maxx + pad].astype(float)
background = indat[:, :, 2].mean(1)
beadsums = beadslice.sum((1, 2)) - (
4 * pad * pad * background
@@ -80,7 +78,6 @@ def find_settext(path, filepattern="*Settings.txt"):
if __name__ == "__main__":
-
import argparse
parser = argparse.ArgumentParser()
diff --git a/tests/test_llsdir.py b/tests/test_llsdir.py
index 7141ca4..10720f1 100644
--- a/tests/test_llsdir.py
+++ b/tests/test_llsdir.py
@@ -20,9 +20,8 @@ def hash_dir(dir_path):
files
): # we sort to guarantee that files will always go in the same order
hashes.append(sha1OfFile(os.path.join(path, file)))
- for dir in sorted(
- dirs
- ): # we sort to guarantee that dirs will always go in the same order
- hashes.append(hash_dir(os.path.join(path, dir)))
+ # we sort to guarantee that dirs will always go in the same order
+ for _dir in sorted(dirs):
+ hashes.append(hash_dir(os.path.join(path, _dir)))
break # we only need one iteration - to get files and dirs in current directory
return str(hash("".join(hashes)))
diff --git a/tests/test_parse.py b/tests/test_parse.py
index 71023f1..6c021be 100644
--- a/tests/test_parse.py
+++ b/tests/test_parse.py
@@ -3,7 +3,7 @@
sys.path.append("..")
-from llspy import parse # noqa
+from llspy import parse
class FilenameTests(unittest.TestCase):
diff --git a/tests/test_processing.py b/tests/test_processing.py
index 6b80aac..90c61f2 100644
--- a/tests/test_processing.py
+++ b/tests/test_processing.py
@@ -1,4 +1,5 @@
import os
+import shutil
from qtpy import QtCore
@@ -11,6 +12,9 @@
@requires_cuda
def test_basic_processing(qtbot):
testdata = os.path.join(os.path.dirname(__file__), "testdata", "sample")
+ deconFolder = os.path.join(testdata, "GPUdecon")
+ if os.path.isdir(deconFolder):
+ shutil.rmtree(deconFolder)
LLSdir(testdata).reduce_to_raw(keepmip=False)
n_testfiles = len(os.listdir(testdata))
otfdir = os.path.join(os.path.dirname(__file__), "testdata", "otfs")
@@ -21,9 +25,8 @@ def test_basic_processing(qtbot):
assert mainGUI.listbox.rowCount() == 0
mainGUI.listbox.addPath(testdata)
assert mainGUI.listbox.rowCount() == 1
- with qtbot.waitSignal(mainGUI.sig_processing_done, timeout=12000):
- qtbot.mouseClick(mainGUI.processButton, QtCore.Qt.LeftButton)
- deconFolder = os.path.join(testdata, "GPUdecon")
+ with qtbot.waitSignal(mainGUI.sig_processing_done, timeout=60000):
+ mainGUI.onProcess()
MIPfolder = os.path.join(deconFolder, "MIPs")
assert os.path.isdir(deconFolder)
assert os.path.isdir(MIPfolder)