-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
Copy pathtfr.py
4269 lines (3840 loc) · 138 KB
/
tfr.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""A module which implements the time-frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors: The MNE-Python contributors.
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
import inspect
from copy import deepcopy
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
from scipy.fft import fft, ifft
from scipy.signal import argrelmax
from .._fiff.meas_info import ContainsMixin, Info
from .._fiff.pick import _picks_to_idx, pick_info
from ..baseline import _check_baseline, rescale
from ..channels.channels import UpdateChannelsMixin
from ..channels.layout import _find_topomap_coords, _merge_ch_data, _pair_grad_sensors
from ..defaults import _BORDER_DEFAULT, _EXTRAPOLATE_DEFAULT, _INTERPOLATION_DEFAULT
from ..filter import next_fast_len
from ..parallel import parallel_func
from ..utils import (
ExtendedTimeMixin,
GetEpochsMixin,
SizeMixin,
_build_data_frame,
_check_combine,
_check_event_id,
_check_fname,
_check_method_kwargs,
_check_option,
_check_pandas_index_arguments,
_check_pandas_installed,
_check_time_format,
_convert_times,
_ensure_events,
_freq_mask,
_import_h5io_funcs,
_is_numeric,
_pl,
_prepare_read_metadata,
_prepare_write_metadata,
_time_mask,
_validate_type,
check_fname,
copy_doc,
copy_function_doc_to_method_doc,
fill_doc,
legacy,
logger,
object_diff,
repr_html,
sizeof_fmt,
verbose,
warn,
)
from ..utils.spectrum import _get_instance_type_string
from ..viz.topo import _imshow_tfr, _imshow_tfr_unified, _plot_topo
from ..viz.topomap import (
_add_colorbar,
_get_pos_outlines,
_set_contour_locator,
plot_tfr_topomap,
plot_topomap,
)
from ..viz.utils import (
_make_combine_callable,
_prepare_joint_axes,
_set_title_multiple_electrodes,
_setup_cmap,
_setup_vmin_vmax,
add_background_image,
figure_nobar,
plt_show,
)
from .multitaper import dpss_windows, tfr_array_multitaper
from .spectrum import EpochsSpectrum
@fill_doc
def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False):
"""Compute Morlet wavelets for the given frequency range.
Parameters
----------
sfreq : float
The sampling Frequency.
freqs : float | array-like, shape (n_freqs,)
Frequencies to compute Morlet wavelets for.
n_cycles : float | array-like, shape (n_freqs,)
Number of cycles. Can be a fixed number (float) or one per frequency
(array-like).
sigma : float, default None
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool, default False
Make sure the wavelet has a mean of zero.
Returns
-------
Ws : list of ndarray | ndarray
The wavelets time series. If ``freqs`` was a float, a single
ndarray is returned instead of a list of ndarray.
See Also
--------
mne.time_frequency.fwhm
Notes
-----
%(morlet_reference)s
%(fwhm_morlet_notes)s
References
----------
.. footbibliography::
Examples
--------
Let's show a simple example of the relationship between ``n_cycles`` and
the FWHM using :func:`mne.time_frequency.fwhm`:
.. plot::
import numpy as np
import matplotlib.pyplot as plt
from mne.time_frequency import morlet, fwhm
sfreq, freq, n_cycles = 1000., 10, 7 # i.e., 700 ms
this_fwhm = fwhm(freq, n_cycles)
wavelet = morlet(sfreq=sfreq, freqs=freq, n_cycles=n_cycles)
M, w = len(wavelet), n_cycles # convert to SciPy convention
s = w * sfreq / (2 * freq * np.pi) # from SciPy docs
_, ax = plt.subplots(layout="constrained")
colors = dict(real="#66CCEE", imag="#EE6677")
t = np.arange(-M // 2 + 1, M // 2 + 1) / sfreq
for kind in ('real', 'imag'):
ax.plot(
t, getattr(wavelet, kind), label=kind, color=colors[kind],
)
ax.plot(t, np.abs(wavelet), label=f'abs', color='k', lw=1., zorder=6)
half_max = np.max(np.abs(wavelet)) / 2.
ax.plot([-this_fwhm / 2., this_fwhm / 2.], [half_max, half_max],
color='k', linestyle='-', label='FWHM', zorder=6)
ax.legend(loc='upper right')
ax.set(xlabel='Time (s)', ylabel='Amplitude')
""" # noqa: E501
Ws = list()
n_cycles = np.array(n_cycles, float).ravel()
freqs = np.array(freqs, float)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be greater than 0.")
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for each frequency.")
_check_option("freqs.ndim", freqs.ndim, [0, 1])
singleton = freqs.ndim == 0
if singleton:
freqs = freqs[np.newaxis]
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# sigma_t is the stddev of gaussian window in the time domain; can be
# scale-dependent or fixed across freqs
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# time vector. We go 5 standard deviations out to make sure we're
# *very* close to zero at the ends. We also make sure that there's a
# sample at exactly t=0
t = np.arange(0.0, 5.0 * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
if zero_mean:
# this offset is equivalent to the κ_σ term in Wikipedia's
# equations, and satisfies the "admissibility criterion" for CWTs
real_offset = np.exp(-2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
gaussian_envelope = np.exp(-(t**2) / (2.0 * sigma_t**2))
W = oscillation * gaussian_envelope
# the scaling factor here is proportional to what is used in
# Tallon-Baudry 1997: (sigma_t*sqrt(pi))^(-1/2). It yields a wavelet
# with norm sqrt(2) for the full wavelet / norm 1 for the real part
W /= np.sqrt(0.5) * np.linalg.norm(W.ravel())
Ws.append(W)
if singleton:
Ws = Ws[0]
return Ws
def fwhm(freq, n_cycles):
"""Compute the full-width half maximum of a Morlet wavelet.
Uses the formula from :footcite:t:`Cohen2019`.
Parameters
----------
freq : float
The oscillation frequency of the wavelet.
n_cycles : float
The duration of the wavelet, expressed as the number of oscillation
cycles.
Returns
-------
fwhm : float
The full-width half maximum of the wavelet.
Notes
-----
.. versionadded:: 1.3
References
----------
.. footbibliography::
"""
return n_cycles * np.sqrt(2 * np.log(2)) / (np.pi * freq)
def _make_dpss(
sfreq,
freqs,
n_cycles=7.0,
time_bandwidth=4.0,
zero_mean=False,
return_weights=False,
):
"""Compute DPSS tapers for the given frequency range.
Parameters
----------
sfreq : float
The sampling frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,), default 7.
The number of cycles globally or for each frequency.
time_bandwidth : float, default 4.0
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
zero_mean : bool | None, , default False
Make sure the wavelet has a mean of zero.
return_weights : bool
Whether to return the concentration weights.
Returns
-------
Ws : list of array
The wavelets time series.
Cs : list of array
The concentration weights. Only returned if return_weights=True.
"""
Ws = list()
Cs = list()
freqs = np.array(freqs)
if np.any(freqs <= 0):
raise ValueError("all frequencies in 'freqs' must be greater than 0.")
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for each frequency.")
for m in range(n_taps):
Wm = list()
Cm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0.0, t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.0))
# Get dpss tapers
tapers, conc = dpss_windows(
t.shape[0], time_bandwidth / 2.0, n_taps, sym=False
)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= np.sqrt(0.5) * np.linalg.norm(Wk.ravel())
Ck = np.sqrt(conc[m])
Wm.append(Wk)
Cm.append(Ck)
Ws.append(Wm)
Cs.append(Cm)
if return_weights:
return Ws, Cs
return Ws
# Low level convolution
def _get_nfft(wavelets, X, use_fft=True, check=True):
n_times = X.shape[-1]
max_size = max(w.size for w in wavelets)
if max_size > n_times:
msg = (
f"At least one of the wavelets ({max_size}) is longer than the "
f"signal ({n_times}). Consider using a longer signal or "
"shorter wavelets."
)
if check:
if use_fft:
warn(msg, UserWarning)
else:
raise ValueError(msg)
nfft = n_times + max_size - 1
nfft = next_fast_len(nfft) # 2 ** int(np.ceil(np.log2(nfft)))
return nfft
def _cwt_gen(X, Ws, *, fsize=0, mode="same", decim=1, use_fft=True):
"""Compute cwt with fft based convolutions or temporal convolutions.
Parameters
----------
X : array of shape (n_signals, n_times)
The data.
Ws : list of array
Wavelets time series.
fsize : int
FFT length.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note:: Decimation may create aliasing artifacts.
use_fft : bool, default True
Use the FFT for convolutions or not.
Returns
-------
out : array, shape (n_signals, n_freqs, n_time_decim)
The time-frequency transform of the signals.
"""
_check_option("mode", mode, ["same", "valid", "full"])
decim = _ensure_slice(decim)
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
_, n_times = X.shape
n_times_out = X[:, decim].shape[1]
n_freqs = len(Ws)
# precompute FFTs of Ws
if use_fft:
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
fft_Ws[i] = fft(W, fsize)
# Make generator looping across signals
tfr = np.zeros((n_freqs, n_times_out), dtype=np.complex128)
for x in X:
if use_fft:
fft_x = fft(x, fsize)
# Loop across wavelets
for ii, W in enumerate(Ws):
if use_fft:
ret = ifft(fft_x * fft_Ws[ii])[: n_times + W.size - 1]
else:
# Work around multarray.correlate->OpenBLAS bug on ppc64le
# ret = np.correlate(x, W, mode=mode)
ret = np.convolve(x, W.real, mode=mode) + 1j * np.convolve(
x, W.imag, mode=mode
)
# Center and decimate decomposition
if mode == "valid":
sz = int(abs(W.size - n_times)) + 1
offset = (n_times - sz) // 2
this_slice = slice(offset // decim.step, (offset + sz) // decim.step)
if use_fft:
ret = _centered(ret, sz)
tfr[ii, this_slice] = ret[decim]
elif mode == "full" and not use_fft:
start = (W.size - 1) // 2
end = len(ret) - (W.size // 2)
ret = ret[start:end]
tfr[ii, :] = ret[decim]
else:
if use_fft:
ret = _centered(ret, n_times)
tfr[ii, :] = ret[decim]
yield tfr
# Loop of convolution: single trial
def _compute_tfr(
epoch_data,
freqs,
sfreq=1.0,
method="morlet",
n_cycles=7.0,
zero_mean=None,
time_bandwidth=None,
use_fft=True,
decim=1,
output="complex",
return_weights=False,
n_jobs=None,
*,
verbose=None,
):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.default ``'complex'``
freqs : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, default 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', default 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses complex exponentials windowed with multiple DPSS
tapers.
n_cycles : float | array of float, default 7.0
Number of cycles in the wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, default None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, default None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, default True
Use the FFT for convolutions or not.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str
* 'complex' (default) : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
return_weights : bool, default False
Whether to return the taper weights. Only applies if method='multitaper' and
output='complex' or 'phase'.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of ``out`` is ``(n_epochs, n_chans,
n_freqs, n_times)``, else it is ``(n_chans, n_freqs, n_times)``.
However, using multitaper method and output ``'complex'`` or
``'phase'`` results in shape of ``out`` being ``(n_epochs, n_chans,
n_tapers, n_freqs, n_times)``. If output is ``'avg_power_itc'``, the
real values in the ``output`` contain average power' and the imaginary
values contain the ITC: ``out = avg_power + i * itc``.
weights : array of shape (n_tapers, n_freqs)
The taper weights. Only returned if method='multitaper', output='complex' or
'phase', and return_weights=True.
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError(
"epoch_data must be of shape (n_epochs, n_chans, "
f"n_times), got {epoch_data.shape}"
)
# Check params
freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim = _check_tfr_param(
freqs,
sfreq,
method,
zero_mean,
n_cycles,
time_bandwidth,
use_fft,
decim,
output,
)
return_weights = (
return_weights and method == "multitaper" and output in ["complex", "phase"]
)
decim = _ensure_slice(decim)
if (freqs > sfreq / 2.0).any():
raise ValueError(
"Cannot compute freq above Nyquist freq of the data "
f"({sfreq / 2.0:0.1f} Hz), got {freqs.max():0.1f} Hz"
)
# We decimate *after* decomposition, so we need to create our kernels
# for the original sfreq
if method == "morlet":
W = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == "multitaper":
out = _make_dpss(
sfreq,
freqs,
n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
zero_mean=zero_mean,
return_weights=return_weights,
)
if return_weights:
Ws, weights = out
else:
Ws = out
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError(
"At least one of the wavelets is longer than the "
"signal. Use a longer signal or shorter wavelets."
)
# Initialize output
n_freqs = len(freqs)
n_tapers = len(Ws)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ("power", "phase", "avg_power", "itc"):
dtype = np.float64
elif output in ("complex", "avg_power_itc"):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex128
if ("avg_" in output) or ("itc" in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
elif output in ["complex", "phase"] and method == "multitaper":
out = np.empty((n_chans, n_tapers, n_epochs, n_freqs, n_times), dtype)
if return_weights:
weights = np.array(weights)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
all_Ws = sum([list(W) for W in Ws], list())
_get_nfft(all_Ws, epoch_data, use_fft)
parallel, my_cwt, n_jobs = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, "same", decim, method)
for channel in epoch_data.transpose(1, 0, 2)
)
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ("avg_" not in output) and ("itc" not in output):
# This is to enforce that the first dimension is for epochs
if output in ["complex", "phase"] and method == "multitaper":
out = out.transpose(2, 0, 1, 3, 4)
else:
out = out.transpose(1, 0, 2, 3)
if return_weights:
return out, weights
return out
def _check_tfr_param(
freqs, sfreq, method, zero_mean, n_cycles, time_bandwidth, use_fft, decim, output
):
"""Aux. function to _compute_tfr to check the params validity."""
# Check freqs
if not isinstance(freqs, list | np.ndarray):
raise ValueError(f"freqs must be an array-like, got {type(freqs)} instead.")
freqs = np.asarray(freqs, dtype=float)
if freqs.ndim != 1:
raise ValueError(
f"freqs must be of shape (n_freqs,), got {np.array(freqs.shape)} "
"instead."
)
# Check sfreq
if not isinstance(sfreq, float | int):
raise ValueError(f"sfreq must be a float or an int, got {type(sfreq)} instead.")
sfreq = float(sfreq)
# Default zero_mean = True if multitaper else False
zero_mean = method == "multitaper" if zero_mean is None else zero_mean
if not isinstance(zero_mean, bool):
raise ValueError(
f"zero_mean should be of type bool, got {type(zero_mean)}. instead"
)
freqs = np.asarray(freqs)
# Check n_cycles
if isinstance(n_cycles, int | float):
n_cycles = float(n_cycles)
elif isinstance(n_cycles, list | np.ndarray):
n_cycles = np.array(n_cycles)
if len(n_cycles) != len(freqs):
raise ValueError(
"n_cycles must be a float or an array of length "
f"{len(freqs)} frequencies, got {len(n_cycles)} cycles instead."
)
else:
raise ValueError(
f"n_cycles must be a float or an array, got {type(n_cycles)} instead."
)
# Check time_bandwidth
if (method == "morlet") and (time_bandwidth is not None):
raise ValueError('time_bandwidth only applies to "multitaper" method.')
elif method == "multitaper":
time_bandwidth = 4.0 if time_bandwidth is None else float(time_bandwidth)
# Check use_fft
if not isinstance(use_fft, bool):
raise ValueError(f"use_fft must be a boolean, got {type(use_fft)} instead.")
# Check decim
if isinstance(decim, int):
decim = slice(None, None, decim)
if not isinstance(decim, slice):
raise ValueError(
f"decim must be an integer or a slice, got {type(decim)} instead."
)
# Check output
_check_option(
"output",
output,
["complex", "power", "phase", "avg_power_itc", "avg_power", "itc"],
)
_check_option("method", method, ["multitaper", "morlet"])
return freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim
def _time_frequency_loop(X, Ws, output, use_fft, mode, decim, method=None):
"""Aux. function to _compute_tfr.
Loops time-frequency transform across wavelets and epochs.
Parameters
----------
X : array, shape (n_epochs, n_times)
The epochs data of a single channel.
Ws : list, shape (n_tapers, n_wavelets, n_times)
The wavelets.
output : str
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
use_fft : bool
Use the FFT for convolutions or not.
mode : {'full', 'valid', 'same'}
See numpy.convolve.
decim : slice
The decimation slice: e.g. power[:, decim]
method : str | None
Used only for multitapering to create tapers dimension in the output
if ``output in ['complex', 'phase']``.
"""
# Set output type
dtype = np.float64
if output in ["complex", "avg_power_itc"]:
dtype = np.complex128
# Init outputs
decim = _ensure_slice(decim)
n_tapers = len(Ws)
n_epochs, n_times = X[:, decim].shape
n_freqs = len(Ws[0])
if ("avg_" in output) or ("itc" in output):
tfrs = np.zeros((n_freqs, n_times), dtype=dtype)
elif output in ["complex", "phase"] and method == "multitaper":
tfrs = np.zeros((n_tapers, n_epochs, n_freqs, n_times), dtype=dtype)
else:
tfrs = np.zeros((n_epochs, n_freqs, n_times), dtype=dtype)
# Loops across tapers.
for taper_idx, W in enumerate(Ws):
# No need to check here, it's done earlier (outside parallel part)
nfft = _get_nfft(W, X, use_fft, check=False)
coefs = _cwt_gen(X, W, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft)
# Inter-trial phase locking is apparently computed per taper...
if "itc" in output:
plf = np.zeros((n_freqs, n_times), dtype=np.complex128)
# Loop across epochs
for epoch_idx, tfr in enumerate(coefs):
# Transform complex values
if output in ["power", "avg_power"]:
tfr = (tfr * tfr.conj()).real # power
elif output == "phase":
tfr = np.angle(tfr)
elif output == "avg_power_itc":
tfr_abs = np.abs(tfr)
plf += tfr / tfr_abs # phase
tfr = tfr_abs**2 # power
elif output == "itc":
plf += tfr / np.abs(tfr) # phase
continue # not need to stack anything else than plf
# Stack or add
if ("avg_" in output) or ("itc" in output):
tfrs += tfr
elif output in ["complex", "phase"] and method == "multitaper":
tfrs[taper_idx, epoch_idx] += tfr
else:
tfrs[epoch_idx] += tfr
# Compute inter trial coherence
if output == "avg_power_itc":
tfrs += 1j * np.abs(plf)
elif output == "itc":
tfrs += np.abs(plf)
# Normalization of average metrics
if ("avg_" in output) or ("itc" in output):
tfrs /= n_epochs
# Normalization by number of taper
if n_tapers > 1 and output not in ["complex", "phase"]:
tfrs /= n_tapers
return tfrs
@fill_doc
def cwt(X, Ws, use_fft=True, mode="same", decim=1):
"""Compute time-frequency decomposition with continuous wavelet transform.
Parameters
----------
X : array, shape (n_signals, n_times)
The signals.
Ws : list of array
Wavelets time series.
use_fft : bool
Use FFT for convolutions. Defaults to True.
mode : 'same' | 'valid' | 'full'
Convention for convolution. 'full' is currently not implemented with
``use_fft=False``. Defaults to ``'same'``.
%(decim_tfr)s
Returns
-------
tfr : array, shape (n_signals, n_freqs, n_times)
The time-frequency decompositions.
See Also
--------
mne.time_frequency.tfr_morlet : Compute time-frequency decomposition
with Morlet wavelets.
"""
nfft = _get_nfft(Ws, X, use_fft)
return _cwt_array(X, Ws, nfft, mode, decim, use_fft)
def _cwt_array(X, Ws, nfft, mode, decim, use_fft):
decim = _ensure_slice(decim)
coefs = _cwt_gen(X, Ws, fsize=nfft, mode=mode, decim=decim, use_fft=use_fft)
n_signals, n_times = X[:, decim].shape
tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex128)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def _tfr_aux(
method, inst, freqs, decim, return_itc, picks, average, output, **tfr_params
):
from ..epochs import BaseEpochs
kwargs = dict(
method=method,
freqs=freqs,
picks=picks,
decim=decim,
output=output,
**tfr_params,
)
if isinstance(inst, BaseEpochs):
kwargs.update(average=average, return_itc=return_itc)
elif average:
logger.info("inst is Evoked, setting `average=False`")
average = False
if average and output == "complex":
raise ValueError('output must be "power" if average=True')
if not average and return_itc:
raise ValueError("Inter-trial coherence is not supported with average=False")
return inst.compute_tfr(**kwargs)
@legacy(alt='.compute_tfr(method="morlet")')
@verbose
def tfr_morlet(
inst,
freqs,
n_cycles,
use_fft=False,
return_itc=True,
decim=1,
n_jobs=None,
picks=None,
zero_mean=True,
average=True,
output="power",
verbose=None,
):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Same computation as `~mne.time_frequency.tfr_array_morlet`, but
operates on `~mne.Epochs` or `~mne.Evoked` objects instead of
:class:`NumPy arrays <numpy.ndarray>`.
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
%(freqs_tfr_array)s
%(n_cycles_tfr)s
use_fft : bool, default False
The fft based convolution or not.
return_itc : bool, default True
Return inter-trial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
%(decim_tfr)s
%(n_jobs)s
picks : array-like of int | None, default None
The indices of the channels to decompose. If None, all available
good data channels are decomposed.
zero_mean : bool, default True
Make sure the wavelet has a mean of zero.
.. versionadded:: 0.13.0
%(average_tfr)s
output : str
Can be ``"power"`` (default) or ``"complex"``. If ``"complex"``, then
``average`` must be ``False``.
.. versionadded:: 0.15.0
%(verbose)s
Returns
-------
power : AverageTFR | EpochsTFR
The averaged or single-trial power.
itc : AverageTFR | EpochsTFR
The inter-trial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
mne.time_frequency.tfr_array_morlet
mne.time_frequency.tfr_multitaper
mne.time_frequency.tfr_array_multitaper
mne.time_frequency.tfr_stockwell
mne.time_frequency.tfr_array_stockwell
Notes
-----
%(morlet_reference)s
%(temporal_window_tfr_intro)s
%(temporal_window_tfr_morlet_notes)s
See :func:`mne.time_frequency.morlet` for more information about the
Morlet wavelet.
References
----------
.. footbibliography::
"""
tfr_params = dict(
n_cycles=n_cycles,
n_jobs=n_jobs,
use_fft=use_fft,
zero_mean=zero_mean,
output=output,
)
return _tfr_aux(
"morlet", inst, freqs, decim, return_itc, picks, average, **tfr_params
)
@verbose
def tfr_array_morlet(
data,
sfreq,
freqs,
n_cycles=7.0,
zero_mean=True,
use_fft=True,
decim=1,
output="complex",
n_jobs=None,
*,
verbose=None,
):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets.
Same computation as `~mne.time_frequency.tfr_morlet`, but operates on
:class:`NumPy arrays <numpy.ndarray>` instead of `~mne.Epochs` objects.
Parameters
----------
data : array of shape (n_epochs, n_channels, n_times)
The epochs.
sfreq : float | int
Sampling frequency of the data.
%(freqs_tfr_array)s
%(n_cycles_tfr)s
zero_mean : bool | None
If True, make sure the wavelets have a mean of zero. default False.
.. versionchanged:: 1.8
The default will change from ``zero_mean=False`` in 1.6 to ``True`` in
1.8.
use_fft : bool
Use the FFT for convolutions or not. default True.
%(decim_tfr)s
output : str, default ``'complex'``
* ``'complex'`` : single trial complex.
* ``'power'`` : single trial power.
* ``'phase'`` : single trial phase.
* ``'avg_power'`` : average of single trial power.
* ``'itc'`` : inter-trial coherence.
* ``'avg_power_itc'`` : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels. Default 1.
%(verbose)s
Returns
-------
out : array
Time frequency transform of ``data``.