Skip to content

Commit

Permalink
Merge branch 'dev' into 'master'
Browse files Browse the repository at this point in the history
 On branch master
 Changes to be committed:
	modified:   .gitlab-ci.yml
	modified:   CHANGELOG.md
	modified:   CITATION.cff
	modified:   setup.py
	modified:   syncopy/__init__.py
	modified:   syncopy/datatype/base_data.py
	modified:   syncopy/datatype/discrete_data.py
	modified:   syncopy/datatype/methods/definetrial.py
	modified:   syncopy/io/load_spy_container.py
	modified:   syncopy/nwanalysis/connectivity_analysis.py
	modified:   syncopy/plotting/_plotting.py
	modified:   syncopy/plotting/config.py
	modified:   syncopy/plotting/sp_plotting.py
	modified:   syncopy/shared/const_def.py
	modified:   syncopy/shared/input_processors.py
	modified:   syncopy/shared/tools.py
	modified:   syncopy/specest/_norm_spec.py
	modified:   syncopy/specest/freqanalysis.py
	modified:   syncopy/specest/mtmfft.py
	modified:   syncopy/specest/stft.py
	modified:   syncopy/tests/backend/test_timefreq.py
	modified:   syncopy/tests/helpers.py
	modified:   syncopy/tests/test_connectivity.py
	modified:   syncopy/tests/test_discretedata.py
	modified:   syncopy/tests/test_selectdata.py
	modified:   syncopy/tests/test_specest.py
  • Loading branch information
pantaray committed May 13, 2022
2 parents a860486 + dd32496 commit e3363e1
Show file tree
Hide file tree
Showing 26 changed files with 438 additions and 284 deletions.
9 changes: 5 additions & 4 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,11 @@ slurmtest:
- conda env update -f syncopy.yml --prune
- conda activate syncopy
- export PYTHONPATH=$CI_PROJECT_DIR
- srun -p DEV --mem=8000m -c 4 pytest --full $TEST_DIR/test_specest.py -k 'not para'
- srun -p DEV --mem=8000m -c 4 pytest --full $TEST_DIR/test_specest.py -k 'para'
- srun -p DEV --mem=8000m -c 4 pytest --full $TEST_DIR/test_connectivity.py
- srun -p DEV --mem=8000m -c 4 pytest --full --ignore=$TEST_DIR/test_specest.py --ignore=$TEST_DIR/test_connectivity.py
- cd $TEST_DIR
- srun -p DEV --mem=8000m -c 4 pytest --full test_specest.py -k 'not para'
- srun -p DEV --mem=8000m -c 4 pytest --full test_specest.py -k 'para'
- srun -p DEV --mem=8000m -c 4 pytest --full test_connectivity.py
- srun -p DEV --mem=8000m -c 4 pytest --full --ignore=test_specest.py --ignore=test_connectivity.py

pypitest:
stage: upload
Expand Down
24 changes: 24 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,30 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project follows [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [2022.05] - 2022-05-13
Bugfixes and features additions for `EventData` objects.

### NEW
- Added support for flexible columns in `EventData` (thanks to @KatharineShapcott)

### CHANGED
- Include specific example how to create an "all-to-all" `trialdefinition` array
by invoking `definetrial` without arguments in the function's docstring.
- Modified versioning scheme: use a date-based scheme instead of increasing
version numbers
- Aligned padding API to FieldTrip in both `freqanalysis` and `connectivityanalysis`:
use `pad` instead of `pad_to_length` with three supported modes ('maxperlen',
float, 'nextpow2').

### DEPRECATED
- Removed support for calling `freqanalysis` with a `toi` array as well as an
input dataset that has an active in-place time-selection attached

### FIXED
- Improved legibility of `spy.__version__` for non-release installations
- Correctly process equidistant `toi` arrays with large spacing in `freqanalysis`
- Corrected `trialtime` for `DiscreteData` objects (thanks to @KatharineShapcott)

## [v0.21] - 2022-04-13
Feature update and bugfixes.

Expand Down
4 changes: 2 additions & 2 deletions CITATION.cff
Original file line number Diff line number Diff line change
Expand Up @@ -37,5 +37,5 @@ keywords:
- spectral-methods
- brain
repository-code: https://github.com/esi-neuroscience/syncopy
version: 0.1b3.dev287
date-released: '2022-01-19'
version: 0.3.dev187
date-released: '2022-04-13'
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
from conda2pip import conda2pip

# Set release version by hand for master branch
releaseVersion = "0.21"
releaseVersion = "2022.05"

# Get necessary and optional package dependencies
required, dev = conda2pip(return_lists=True)
Expand Down
2 changes: 1 addition & 1 deletion syncopy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
try:
__version__ = version("esi-syncopy")
except PackageNotFoundError:
proc = subprocess.Popen("git describe --always",
proc = subprocess.Popen("git describe --tags",
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
text=True, shell=True)
out, err = proc.communicate()
Expand Down
37 changes: 32 additions & 5 deletions syncopy/datatype/base_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,11 +250,15 @@ def _set_dataset_property_with_ndarray(self, inData, propertyName, ndim):
Number of expected array dimensions.
"""

# Ensure array has right no. of dimensions
try:
array_parser(inData, varname="data", dims=ndim)
except Exception as exc:
raise exc

# Gymnastics for `DiscreteData` objects w/non-standard `dimord`s
self._check_dataset_property_discretedata(inData)

# If there is existing data, replace values if shape and type match
if isinstance(getattr(self, "_" + propertyName), (np.memmap, h5py.Dataset)):
prop = getattr(self, "_" + propertyName)
Expand Down Expand Up @@ -303,6 +307,7 @@ def _set_dataset_property_with_memmap(self, inData, propertyName, ndim):
lgl = "{}-dimensional data".format(ndim)
act = "{}-dimensional memmap".format(inData.ndim)
raise SPYValueError(legal=lgl, varname=propertyName, actual=act)
self._check_dataset_property_discretedata(inData)

self.mode = inData.mode
self.filename = inData.filename
Expand All @@ -326,14 +331,18 @@ def _set_dataset_property_with_dataset(self, inData, propertyName, ndim):
act = "backing HDF5 file is closed"
raise SPYValueError(legal=lgl, actual=act, varname="data")

self._mode = inData.file.mode
self.filename = inData.file.filename

# Ensure dataset has right no. of dimensions
if inData.ndim != ndim:
lgl = "{}-dimensional data".format(ndim)
act = "{}-dimensional HDF5 dataset or memmap".format(inData.ndim)
raise SPYValueError(legal=lgl, varname="data", actual=act)

# Gymnastics for `DiscreteData` objects w/non-standard `dimord`s
self._check_dataset_property_discretedata(inData)

self._mode = inData.file.mode
self.filename = inData.file.filename

setattr(self, "_" + propertyName, inData)

def _set_dataset_property_with_list(self, inData, propertyName, ndim):
Expand Down Expand Up @@ -379,9 +388,9 @@ def _set_dataset_property_with_list(self, inData, propertyName, ndim):
if self.__class__.__name__ == "SpikeData":
nCol = 3
else: # EventData
nCol = 2
nCol = inData[0].shape[1]
if any(val.shape[1] != nCol for val in inData):
lgl = "NumPy 2d-arrays with 3 columns"
lgl = "NumPy 2d-arrays with {} columns".format(nCol)
act = "NumPy arrays of different shape"
raise SPYValueError(legal=lgl, varname="data", actual=act)
trialLens = [np.nanmax(val[:, self.dimord.index("sample")]) for val in inData]
Expand Down Expand Up @@ -415,6 +424,24 @@ def _set_dataset_property_with_list(self, inData, propertyName, ndim):
self._set_dataset_property_with_ndarray(data, propertyName, ndim)
self.trialdefinition = trialdefinition

def _check_dataset_property_discretedata(self, inData):
"""Check `DiscreteData` input data for shape consistency
Parameters
----------
inData : array/memmap/h5py.Dataset
array-like to be stored as a `DiscreteData` data source
"""

# Special case `DiscreteData`: `dimord` encodes no. of expected cols/rows;
# ensure this is consistent w/`inData`!
if any(["DiscreteData" in str(base) for base in self.__class__.__mro__]):
if len(self._defaultDimord) not in inData.shape:
lgl = "array with {} columns corresponding to dimord {}"
lgl = lgl.format(len(self._defaultDimord), self._defaultDimord)
act = "array with shape {}".format(str(inData.shape))
raise SPYValueError(legal=lgl, varname="data", actual=act)

def _is_empty(self):
return all([getattr(self, attr) is None
for attr in self._hdfFileDatasetProperties])
Expand Down
74 changes: 40 additions & 34 deletions syncopy/datatype/discrete_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,13 @@ def samplerate(self, sr):
raise exc
self._samplerate = sr

@property
def time(self):
"""list(float): trigger-relative time of each event """
if self.samplerate is not None and self.sampleinfo is not None:
return [(trl[:,self.dimord.index("sample")] - self.sampleinfo[tk,0] + self._t0[tk]) / self.samplerate \
for tk, trl in enumerate(self.trials)]

@property
def trialid(self):
""":class:`numpy.ndarray` of trial id associated with the sample"""
Expand Down Expand Up @@ -182,9 +189,9 @@ def trials(self):
def trialtime(self):
"""list(:class:`numpy.ndarray`): trigger-relative sample times in s"""
if self.samplerate is not None and self.sampleinfo is not None:
return [np.array([(t + self._t0[tk]) / self.samplerate \
for t in range(0, int(self.sampleinfo[tk, 1] - self.sampleinfo[tk, 0]))]) \
for tk in np.unique(self.trialid)]
sample0 = self.sampleinfo[:,0] - self._t0[:]
sample0 = np.append(sample0, np.nan)[self.trialid]
return (self.data[:,self.dimord.index("sample")] - sample0)/self.samplerate

# Helper function that grabs a single trial
def _get_trial(self, trialno):
Expand Down Expand Up @@ -266,41 +273,31 @@ def _get_time(self, trials, toi=None, toilim=None):
if toilim is not None:
allTrials = self.trialtime
for trlno in trials:
thisTrial = self.data[self.trialid == trlno, self.dimord.index("sample")]
trlSample = np.arange(*self.sampleinfo[trlno, :])
trlTime = allTrials[trlno]
minSample = trlSample[np.where(trlTime >= toilim[0])[0][0]]
maxSample = trlSample[np.where(trlTime <= toilim[1])[0][-1]]
selSample, _ = best_match(trlSample, [minSample, maxSample], span=True)
idxList = []
for smp in selSample:
idxList += list(np.where(thisTrial == smp)[0])
if len(idxList) > 1:
sampSteps = np.diff(idxList)
if sampSteps.min() == sampSteps.max() == 1:
idxList = slice(idxList[0], idxList[-1] + 1, 1)
timing.append(idxList)
trlTime = allTrials[self.trialid == trlno]
_, selTime = best_match(trlTime, toilim, span=True)
selTime = selTime.tolist()
if len(selTime) > 1 and np.diff(trlTime).min() > 0:
timing.append(slice(selTime[0], selTime[-1] + 1, 1))
else:
timing.append(selTime)

elif toi is not None:
allTrials = self.trialtime
for trlno in trials:
thisTrial = self.data[self.trialid == trlno, self.dimord.index("sample")]
trlSample = np.arange(*self.sampleinfo[trlno, :])
trlTime = allTrials[trlno]
_, selSample = best_match(trlTime, toi)
for k, idx in enumerate(selSample):
if np.abs(trlTime[idx - 1] - toi[k]) < np.abs(trlTime[idx] - toi[k]):
selSample[k] = trlSample[idx -1]
else:
selSample[k] = trlSample[idx]
idxList = []
for smp in selSample:
idxList += list(np.where(thisTrial == smp)[0])
if len(idxList) > 1:
sampSteps = np.diff(idxList)
if sampSteps.min() == sampSteps.max() == 1:
idxList = slice(idxList[0], idxList[-1] + 1, 1)
timing.append(idxList)
trlTime = allTrials[self.trialid == trlno]
_, arrayIdx = best_match(trlTime, toi)
# squash duplicate values then readd
_, xdi = np.unique(trlTime[arrayIdx], return_index=True)
arrayIdx = arrayIdx[np.sort(xdi)]
selTime = []
for t in arrayIdx:
selTime += np.where(trlTime[t] == trlTime)[0].tolist()
# convert to slice if possible
if len(selTime) > 1:
timeSteps = np.diff(selTime)
if timeSteps.min() == timeSteps.max() == 1:
selTime = slice(selTime[0], selTime[-1] + 1, 1)
timing.append(selTime)

else:
timing = [slice(None)] * len(trials)
Expand Down Expand Up @@ -617,6 +614,15 @@ def __init__(self,
:func:`syncopy.definetrial`
"""
if dimord is not None:
# ensure that event data can have extra dimord columns
if len(dimord) != len(self._defaultDimord):
for col in self._defaultDimord:
if col not in dimord:
base = "dimensional label {}"
lgl = base.format("'" + col + "'")
raise SPYValueError(legal=lgl, varname="dimord")
self._defaultDimord = dimord

# Call parent initializer
super().__init__(data=data,
Expand Down
3 changes: 3 additions & 0 deletions syncopy/datatype/methods/definetrial.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,9 @@ def definetrial(obj, trialdefinition=None, pre=None, post=None, start=None,
start trigger stop
|---- pre ----|--------|---------|--- post----|
**Note**: To define a trial encompassing the whole dataset simply invoke this
routine with no arguments, i.e., ``definetrial(obj)`` or equivalently
``obj.definetrial()``
Parameters
----------
Expand Down
8 changes: 3 additions & 5 deletions syncopy/io/load_spy_container.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,20 +276,18 @@ def _load(filename, checksum, mode, out):
actual=hsh_msg.format(hsh=hsh))

# Parsing is done, create new or check provided object
dimord = jsonDict.pop("dimord")
if out is not None:
try:
data_parser(out, varname="out", writable=True, dataclass=jsonDict["dataclass"])
except Exception as exc:
raise exc
new_out = False
out.dimord = dimord
else:
out = dataclass()
out = dataclass(dimord=dimord)
new_out = True

# First and foremost, assign dimensional information
dimord = jsonDict.pop("dimord")
out.dimord = dimord

# Access data on disk (error checking is done by setters)
out.mode = mode
for datasetProperty in out._hdfFileDatasetProperties:
Expand Down
38 changes: 18 additions & 20 deletions syncopy/nwanalysis/connectivity_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
@unwrap_select
@detect_parallel_client
def connectivityanalysis(data, method="coh", keeptrials=False, output="abs",
foi=None, foilim=None, pad_to_length=None,
foi=None, foilim=None, pad='maxperlen',
polyremoval=None, tapsmofrq=None, nTaper=None,
taper="hann", taper_opt=None, out=None, **kwargs):

Expand All @@ -60,7 +60,7 @@ def connectivityanalysis(data, method="coh", keeptrials=False, output="abs",
* **taper** : one of :data:`~syncopy.shared.const_def.availableTapers`
* **tapsmofrq** : spectral smoothing box for slepian tapers (in Hz)
* **nTaper** : (optional) number of orthogonal tapers for slepian tapers
* **pad_to_length**: either pad to an absolute length or set to `'nextpow2'`
* **pad**: either pad to an absolute length in seconds or set to `'nextpow2'`
"corr" : Cross-correlations
Computes the one sided (positive lags) cross-correlations
Expand All @@ -77,7 +77,7 @@ def connectivityanalysis(data, method="coh", keeptrials=False, output="abs",
* **taper** : one of :data:`~syncopy.shared.const_def.availableTapers`
* **tapsmofrq** : spectral smoothing box for slepian tapers (in Hz)
* **nTaper** : (optional, not recommended) number of slepian tapers
* **pad_to_length**: either pad to an absolute length or set to `'nextpow2'`
* **pad**: either pad to an absolute length in seconds or set to `'nextpow2'`
Parameters
----------
Expand All @@ -101,17 +101,15 @@ def connectivityanalysis(data, method="coh", keeptrials=False, output="abs",
Frequency-window ``[fmin, fmax]`` (in Hz) of interest. The
`foi` array will be constructed in 1Hz steps from `fmin` to
`fmax` (inclusive).
pad_to_length : int, None or 'nextpow2'
Padding of the (tapered) signal, if set to a number pads all trials
to this absolute length. E.g. `pad_to_length=2000` pads all
trials to 2000 samples, if and only if the longest trial is
at maximum 2000 samples.
Alternatively if all trials have the same initial lengths
setting `pad_to_length='nextpow2'` pads all trials to
the next power of two.
If `None` and trials have unequal lengths all trials are padded to match
the longest trial.
pad : 'maxperlen', float or 'nextpow2'
For the default `maxperlen`, no padding is performed in case of equal
length trials, while trials of varying lengths are padded to match the
longest trial. If `pad` is a number all trials are padded so that `pad` indicates
the absolute length of all trials after padding (in seconds). For instance
``pad = 2`` pads all trials to an absolute length of 2000 samples, if and
only if the longest trial contains at maximum 2000 samples and the
samplerate is 1kHz. If `pad` is `'nextpow2'` all trials are padded to the
nearest power of two (in samples) of the longest trial.
tapsmofrq : float or None
Only valid if `method` is `'coh'` or `'granger'`.
Enables multi-tapering and sets the amount of spectral
Expand Down Expand Up @@ -177,13 +175,13 @@ def connectivityanalysis(data, method="coh", keeptrials=False, output="abs",

# --- Padding ---

if method == "corr" and pad_to_length:
lgl = "`None`, no padding needed/allowed for cross-correlations"
actual = f"{pad_to_length}"
raise SPYValueError(legal=lgl, varname="pad_to_length", actual=actual)
if method == "corr" and pad != 'maxperlen':
lgl = "'maxperlen', no padding needed/allowed for cross-correlations"
actual = f"{pad}"
raise SPYValueError(legal=lgl, varname="pad", actual=actual)

# the actual number of samples in case of later padding
nSamples = process_padding(pad_to_length, lenTrials)
nSamples = process_padding(pad, lenTrials, data.samplerate)

# --- Basic foi sanitization ---

Expand All @@ -199,7 +197,7 @@ def connectivityanalysis(data, method="coh", keeptrials=False, output="abs",
"output": output,
"keeptrials": keeptrials,
"polyremoval": polyremoval,
"pad_to_length": pad_to_length}
"pad": pad}

# --- Setting up specific Methods ---
if method == 'granger':
Expand Down
2 changes: 1 addition & 1 deletion syncopy/plotting/_plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def plot_lines(ax, data_x, data_y, leg_fontsize=pltConfig['sLegendSize'], **pkwa
else:
ax.plot(data_x, data_y, **pkwargs)
if 'label' in pkwargs:
ax.legend(ncol=2, loc='upper right',
ax.legend(ncol=2, loc='best', frameon=False,
fontsize=leg_fontsize)
# make room for the legend
mn, mx = ax.get_ylim()
Expand Down
2 changes: 1 addition & 1 deletion syncopy/plotting/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
"mTitleSize": 12.5,
"mLabelSize": 12.5,
"mTickSize": 11,
"mLegendSize": 11,
"mLegendSize": 10,
"mXSize": 3.2,
"mYSize": 2.4,
"mMaxAxes": 35,
Expand Down
Loading

0 comments on commit e3363e1

Please sign in to comment.