Initial commit

This commit is contained in:
2021-03-27 14:58:49 +01:00
commit fc4eec6ac7
20 changed files with 1613 additions and 0 deletions

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,70 @@
import os
import mne
import numpy as np
import pandas as pd
from mne_bids import (BIDSPath, read_raw_bids)
def _get_filepath(bids_root, subject_id, task):
bids_path = BIDSPath(subject=subject_id, task=task, session=task,
datatype='eeg', suffix='eeg',
root=bids_root)
# this is not a bids-conform file format, but a derivate/extension. Therefore we have to hack a bit
# Depending on path structure, this might push a warning.
fn = os.path.splitext(bids_path.fpath.__str__())[0]
assert (fn[-3:] == "eeg")
fn = fn[0:-3]
return fn
def load_precomputed_ica(bids_root, subject_id, task):
# returns ICA and badComponents (starting at component = 0).
# Note the existance of add_ica_info in case you want to plot something.
fn = _get_filepath(bids_root, subject_id, task) + 'ica'
# import the eeglab ICA. I used eeglab because the "amica" ICA is a bit more powerful than runica
ica = mne.preprocessing.read_ica_eeglab(fn + '.set')
# ica = custom_read_eeglab_ica(fn+'.set')
# Potentially for plotting one might want to copy over the raw.info, but in this function we dont have access / dont want to load it
# ica.info = raw.info
ica._update_ica_names()
badComps = np.loadtxt(fn + '.tsv', delimiter="\t")
badComps -= 1 # start counting at 0
# if only a single component is in the file, we get an error here because it is an ndarray with n-dim = 0.
if len(badComps.shape) == 0:
badComps = [float(badComps)]
return ica, badComps
def add_ica_info(raw, ica):
# This function exists due to a MNE bug: https://github.com/mne-tools/mne-python/issues/8581
# In case you want to plot your ICA components, this function will generate a ica.info
ch_raw = raw.info['ch_names']
ch_ica = ica.ch_names
ix = [k for k, c in enumerate(ch_raw) if c in ch_ica and not c in raw.info['bads']]
info = raw.info.copy()
mne.io.pick.pick_info(info, ix, copy=False)
ica.info = info
return ica
def load_precomputed_badData(bids_root, subject_id, task):
# return precomputed annotations and bad channels (first channel = 0)
fn = _get_filepath(bids_root, subject_id, task)
print(fn)
tmp = pd.read_csv(fn + 'badSegments.csv')
# print(tmp)
annotations = mne.Annotations(tmp.onset, tmp.duration, tmp.description)
# Unfortunately MNE assumes that csv files are in milliseconds and only txt files in seconds.. wth?
# annotations = mne.read_annotations(fn+'badSegments.csv')
badChannels = np.loadtxt(fn + 'badChannels.tsv', delimiter='\t')
badChannels = badChannels.astype(int)
badChannels -= 1 # start counting at 0
# badChannels = [int(b) for b in badChannels]
return annotations, badChannels

275
utils/ccs_eeg_utils.py Normal file
View File

@@ -0,0 +1,275 @@
from osfclient import cli
import os
from mne_bids.read import _from_tsv, _drop
from mne_bids import (BIDSPath, read_raw_bids)
import mne
import numpy as np
import scipy.ndimage
import scipy.signal
from numpy import sin as sin
def read_annotations_core(bids_path, raw):
tsv = os.path.join(bids_path.directory, bids_path.update(suffix="events", extension=".tsv").basename)
_handle_events_reading_core(tsv, raw)
def _handle_events_reading_core(events_fname, raw):
"""Read associated events.tsv and populate raw.
Handle onset, duration, and description of each event.
"""
events_dict = _from_tsv(events_fname)
if ('value' in events_dict) and ('trial_type' in events_dict):
events_dict = _drop(events_dict, 'n/a', 'trial_type')
events_dict = _drop(events_dict, 'n/a', 'value')
descriptions = np.asarray([a + ':' + b for a, b in zip(events_dict["trial_type"], events_dict["value"])],
dtype=str)
# Get the descriptions of the events
elif 'trial_type' in events_dict:
# Drop events unrelated to a trial type
events_dict = _drop(events_dict, 'n/a', 'trial_type')
descriptions = np.asarray(events_dict['trial_type'], dtype=str)
# If we don't have a proper description of the events, perhaps we have
# at least an event value?
elif 'value' in events_dict:
# Drop events unrelated to value
events_dict = _drop(events_dict, 'n/a', 'value')
descriptions = np.asarray(events_dict['value'], dtype=str)
# Worst case, we go with 'n/a' for all events
else:
descriptions = 'n/a'
# Deal with "n/a" strings before converting to float
ons = [np.nan if on == 'n/a' else on for on in events_dict['onset']]
dus = [0 if du == 'n/a' else du for du in events_dict['duration']]
onsets = np.asarray(ons, dtype=float)
durations = np.asarray(dus, dtype=float)
# Keep only events where onset is known
good_events_idx = ~np.isnan(onsets)
onsets = onsets[good_events_idx]
durations = durations[good_events_idx]
descriptions = descriptions[good_events_idx]
del good_events_idx
# Add Events to raw as annotations
annot_from_events = mne.Annotations(onset=onsets,
duration=durations,
description=descriptions,
orig_time=None)
raw.set_annotations(annot_from_events)
return raw
# taken from the osfclient tutorial https://github.com/ubcbraincircuits/osfclienttutorial
class args:
def __init__(self, project, username=None, update=True, force=False, destination=None, source=None, recursive=False,
target=None, output=None, remote=None, local=None):
self.project = project
self.username = username
self.update = update # applies to upload, clone, and fetch
self.force = force # applies to fetch and upload
# upload arguments:
self.destination = destination
self.source = source
self.recursive = recursive
# remove argument:
self.target = target
# clone argument:
self.output = output
# fetch arguments:
self.remote = remote
self.local = local
def download_erpcore(task="MMN", subject=1, localpath="local/bids/"):
project = "9f5w7" # after recent change they put everything as "sessions" in one big BIDS file
arguments = args(project) # project ID
for extension in ["channels.tsv", "events.tsv", "eeg.fdt", "eeg.json", "eeg.set"]:
targetpath = '/sub-{:03d}/ses-{}/eeg/sub-{:03d}_ses-{}_task-{}_{}'.format(subject, task, subject, task, task,
extension)
print("Downloading {}".format(targetpath))
arguments.remote = "\\ERP_CORE_BIDS_Raw_Files/" + targetpath
arguments.local = localpath + targetpath
cli.fetch(arguments)
def simulate_ICA(dims=4):
A = [[-0.3, 0.2], [.2, 0.1]]
sample_rate = 100.0
nsamples = 1000
t = np.arange(nsamples) / sample_rate
s = []
# boxcars
s.append(np.mod(np.array(range(0, nsamples)), 250) > 125)
# a triangle staircase + trend
s.append((np.mod(np.array(range(0, nsamples)), 100) + np.array(range(0, nsamples)) * 0.05) / 100)
if dims == 4:
A = np.array([[.7, 0.3, 0.2, -0.5], [0.2, -0.5, -0.2, 0.3], [-.3, 0.1, 0, 0.2], [-0.5, -0.3, -0.2, 0.8]])
# some sinosoids
s.append(np.cos(2 * np.pi * 0.5 * t) + 0.2 * np.sin(2 * np.pi * 2.5 * t + 0.1) + 0.2 * np.sin(
2 * np.pi * 15.3 * t) + 0.1 * np.sin(2 * np.pi * 16.7 * t + 0.1) + 0.1 * np.sin(2 * np.pi * 23.45 * t + .8))
# uniform noise
s.append(0.2 * np.random.rand(nsamples))
x = np.matmul(A, np.array(s))
return x
def spline_matrix(x, knots):
# bah, spline-matrices are a pain to implement.
# But package "patsy" with function "bs" crashed my notebook...
# Anyway, knots define where the spline should be anchored. The default should work
# X defines where the spline set should be evaluated.
# e.g. call using: spline_matrix(np.linspace(0,0.95,num=100))
import scipy.interpolate as si
x_tup = si.splrep(knots, knots, k=3)
nknots = len(x_tup[0])
x_i = np.empty((len(x), nknots - 4))
for i in range(nknots - 4):
vec = np.zeros(nknots)
vec[i] = 1.0
x_list = list(x_tup)
x_list[1] = vec.tolist()
x_i[:, i] = si.splev(x, x_list)
return x_i
def simulate_TF(signal=1, noise=True):
# signal can be 1 (image), 2(chirp) or 3 (steps)
import imageio
if signal == 2:
im = imageio.imread('ex9_tf.png')
im = im[0:60, :, 3] - im[0:60, :, 1]
# im = scipy.ndimage.zoom(im,[1,1])
im = np.flip(im, axis=0)
# plt.imshow(im,origin='lower')
# sig = (scipy.fft.irfft(im.T,axis=1))
nov = 10;
im.shape[0] * 0.5
nperseg = 50;
im.shape[0] - 1
t, sig = scipy.signal.istft(im, fs=500, noverlap=nov, nperseg=nperseg)
sig = sig / 300 # normalize
elif signal == 3:
sig = scipy.signal.chirp(t=np.arange(0, 10, 1 / 500), f0=1, f1=100, t1=2, method='linear', phi=90)
elif signal == 1:
x = np.arange(0, 2, 1 / 500)
sig_steps = np.concatenate([1.0 * sin(2 * np.pi * x * 50), 1.2 * sin(2 * np.pi * x * 55 + np.pi / 2),
0.8 * sin(2 * np.pi * x * 125 + np.pi),
1.0 * sin(2 * np.pi * x * 120 + 3 * np.pi / 2)])
sig = sig_steps
if noise:
sig = sig + 0.1 * np.std(sig) * np.random.randn(sig.shape[0])
return sig
def get_TF_dataset(subject_id='002', bids_root="../local/bids"):
bids_path = BIDSPath(subject=subject_id, task="P3", session="P3",
datatype='eeg', suffix='eeg',
root=bids_root)
raw = read_raw_bids(bids_path)
read_annotations_core(bids_path, raw)
# raw.pick_channels(["Cz"])#["Pz","Fz","Cz"])
raw.load_data()
raw.set_montage('standard_1020', match_case=False)
evts, evts_dict = mne.events_from_annotations(raw)
wanted_keys = [e for e in evts_dict.keys() if "response" in e]
evts_dict_stim = dict((k, evts_dict[k]) for k in wanted_keys if k in evts_dict)
epochs = mne.Epochs(raw, evts, evts_dict_stim, tmin=-1, tmax=2)
return epochs
def get_classification_dataset(subject=1, typeInt=4):
# TypeInt:
# Task 1 (open and close left or right fist)
# Task 2 (imagine opening and closing left or right fist)
# Task 3 (open and close both fists or both feet)
# Task 4 (imagine opening and closing both fists or both feet)
assert (typeInt >= 1)
assert (typeInt <= 4)
from mne.io import concatenate_raws, read_raw_edf
from mne.datasets import eegbci
tmin, tmax = -1., 4.
runs = [3, 7, 11]
runs = [r + typeInt - 1 for r in runs]
print("loading subject {} with runs {}".format(subject, runs))
if typeInt <= 1:
event_id = dict(left=2, right=3)
else:
event_id = dict(hands=2, feet=3)
raw_fnames = eegbci.load_data(subject, runs)
raws = [read_raw_edf(f, preload=True) for f in raw_fnames]
raw = concatenate_raws(raws)
raw.filter(7., 30., fir_design='firwin', skip_by_annotation='edge')
eegbci.standardize(raw) # set channel names
montage = mne.channels.make_standard_montage('standard_1005')
raw.set_montage(montage)
raw.rename_channels(lambda x: x.strip('.'))
events, _ = mne.events_from_annotations(raw, event_id=dict(T1=2, T2=3))
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
# Read epochs (train will be done only between 1 and 2s)
# Testing will be done with a running classifier
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=picks,
baseline=None, preload=True)
return (epochs)
def ex8_simulateData(width=40, n_subjects=15, signal_mean=100, noise_between=30, noise_within=10, smooth_sd=4,
rng_seed=43):
# adapted and extended from https://mne.tools/dev/auto_tutorials/discussions/plot_background_statistics.html#sphx-glr-auto-tutorials-discussions-plot-background-statistics-py
rng = np.random.RandomState(rng_seed)
# For each "subject", make a smoothed noisy signal with a centered peak
X = noise_within * rng.randn(n_subjects, width, width)
# Add three signals
X[:, width // 6 * 2, width // 6 * 2] -= signal_mean / 3 * 3 + rng.randn(n_subjects) * noise_between
X[:, width // 6 * 4, width // 6 * 4] += signal_mean / 3 * 2 + rng.randn(n_subjects) * noise_between
X[:, width // 6 * 5, width // 6 * 5] += signal_mean / 3 * 2 + rng.randn(n_subjects) * noise_between
# Spatially smooth with a 2D Gaussian kernel
size = width // 2 - 1
gaussian = np.exp(-(np.arange(-size, size + 1) ** 2 / float(smooth_sd ** 2)))
for si in range(X.shape[0]):
for ri in range(X.shape[1]):
X[si, ri, :] = np.convolve(X[si, ri, :], gaussian, 'same')
for ci in range(X.shape[2]):
X[si, :, ci] = np.convolve(X[si, :, ci], gaussian, 'same')
# X += 10 * rng.randn(n_subjects, width, width)
return X
def stc_plot2img(h, title="SourceEstimate", closeAfterwards=False, crop=True):
h.add_text(0.1, 0.9, title, 'title', font_size=16)
screenshot = h.screenshot()
if closeAfterwards:
h.close()
if crop:
nonwhite_pix = (screenshot != 255).any(-1)
nonwhite_row = nonwhite_pix.any(1)
nonwhite_col = nonwhite_pix.any(0)
screenshot = screenshot[nonwhite_row][:, nonwhite_col]
return screenshot

89
utils/file_utils.py Normal file
View File

@@ -0,0 +1,89 @@
import os
import mne
def load_bad_annotations(filepath, fileending="badSegments.csv"):
"""
Loads the annotations for bad segments
:param filepath: The path to the file we want to load
:param fileending: Depending if the subject, for which we load the annotations, was preprocessed manually,
the ending of the filename will be different.
The default are file endings of the given preprocessed annotations: "badSegments.csv"
For the manual preprocessed annotations, the file endings are: "badannotations.csv"
:return: The mne annotations
"""
if os.path.isfile(filepath + "_" + fileending):
return mne.read_annotations(filepath + "_" + fileending)
def load_preprocessed_data(subject, dataset):
"""
Load the raw object as well as the annotations of the preprocessed file
:param subject: The subject, for which we want to load the raw object
:param dataset: The currently viewed dataset
:param selected_subjects: The manually preprocessed subjects
:return: The raw object
"""
folder = "Dataset\\" + dataset + "\\sub-" + subject + "\\ses-" + dataset + "\\eeg\\"
filepath = folder + "sub-" + subject + "_task-" + dataset
raw = mne.io.read_raw_fif(filepath + "_cleaned.fif")
return raw
def get_keys_for_events(stimulus=None, condition=None):
"""
For a given stimulus and condition get all the event keys.
:param stimulus: Either 'face' or 'car' or 'None' for no stimulus
:param condition: Either 'intact' or 'scrambled' or 'None' for no condition
:return: A list of keys or 'stimulus' if neither stimulus or condition was given
"""
if stimulus == 'face':
if condition == 'intact':
return ["stimulus:{}".format(k) for k in range(1, 41)]
elif condition == 'scrambled':
return ["stimulus:{}".format(k) for k in range(101, 141)]
else: # All faces
return ["stimulus:{}".format(k) for k in list(range(1, 41)) + list(range(101, 141))]
elif stimulus == 'car':
if condition == 'intact':
return ["stimulus:{}".format(k) for k in range(41, 81)]
elif condition == 'scrambled':
return ["stimulus:{}".format(k) for k in range(141, 181)]
else: # All cars
return ["stimulus:{}".format(k) for k in list(range(41, 81)) + list(range(141, 181))]
else: # If no stimulus is given
if condition == 'intact':
return ["stimulus:{}".format(k) for k in range(1, 41) and range(41, 81)]
elif condition == 'scrambled':
return ["stimulus:{}".format(k) for k in list(range(101, 141)) + list(range(141, 181))]
else: # Every stimulus
return "stimulus"
def get_epochs(raw, conditions=None, picks=None, tmin=-0.1, tmax=1):
"""
Returns the epochs for a given dataset
:param raw: the dataset
:param conditions: A List of tuples, of the Form [(stimulus, condition), (stimulus,condition)]
i.e. [('face',None), ('car', 'scrambled')] returns the epochs where the stimulus is face and the stim+condition is car+scrambled
default is None, i.e. everything
:param picks: a list. Additional criteria for picking the epochs, e.g. channels, etc.
:param tmin: onset before the event
:param tmax: end after the event
:return:
"""
events, events_dict = mne.events_from_annotations(raw)
events_dict_key = {}
if conditions is None:
conditions = [(None, None)]
for condition in conditions:
wanted_keys = get_keys_for_events(condition[0], condition[1])
if wanted_keys == "stimulus":
wanted_keys = [e for e in events_dict.keys() if "stimulus" in e]
events_dict_key.update(dict((k, events_dict[k]) for k in wanted_keys if k in events_dict))
epochs = mne.Epochs(raw, events, events_dict_key, tmin=tmin, tmax=tmax, reject_by_annotation=False, picks=picks)
epochs.drop_bad()
return epochs, events_dict_key

89
utils/plot_utils.py Normal file
View File

@@ -0,0 +1,89 @@
import mne
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from matplotlib.colors import LogNorm
from utils.file_utils import load_preprocessed_data, get_keys_for_events
def plot_grand_average(dataset):
"""
Plot the grand average ERPs
:param dataset: the datset for which the grand average is computed
"""
evtss = [('face', 'intact'), ('face', 'scrambled'), ('car', 'intact'), ('car', 'scrambled')]
chs = ['P7', 'PO7', 'P8', 'PO8']
for ch in chs:
fi = []
fs = []
ci = []
cs = []
for i in range(1, 41):
subj = "0" + str(i)
if len(str(i)) == 1:
subj = "0" + subj
# Load preprocessed .fif data files
raw = load_preprocessed_data(subj, dataset)
# Epoch the data
for ev in evtss:
wanted_keys = get_keys_for_events(ev[0], ev[1])
events, events_dict = mne.events_from_annotations(raw)
events_dict_key = dict((k, events_dict[k]) for k in wanted_keys if k in events_dict)
epochs = mne.Epochs(raw, events, events_dict_key, tmin=-0.1, tmax=1, reject_by_annotation=True,
picks=[ch])
# Get the N170 peak
# First construct a data frame
if ev[0] == 'face' and ev[1] == 'intact':
fi.append(epochs.average(picks=[ch]))
elif ev[0] == 'face' and ev[1] == 'scrambled':
fs.append(epochs.average(picks=[ch]))
elif ev[0] == 'car' and ev[1] == 'intact':
ci.append(epochs.average(picks=[ch]))
elif ev[0] == 'car' and ev[1] == 'scrambled':
cs.append(epochs.average(picks=[ch]))
ga_fi = mne.grand_average(fi)
ga_ci = mne.grand_average(ci)
ga_fs = mne.grand_average(fs)
ga_cs = mne.grand_average(cs)
ga_fi.comment = 'Face Intact'
ga_ci.comment = 'Car Intact'
ga_fs.comment = 'Face Scrambled'
ga_cs.comment = 'Car Scrambled'
mne.viz.plot_compare_evokeds([ga_fi, ga_ci, ga_fs, ga_cs], picks=ch, colors=['blue', 'red', 'blue', 'red'],
linestyles=['solid', 'solid', 'dotted', 'dotted'])
def plot_tf_cluster(F, clusters, cluster_p_values, freqs, times):
"""
Plot teh F-Statistic values of permutation clusters with p-values <= 0.05 in color and > 0.05 in grey.
:param F: F-Statistics of the permutation clusters
:param clusters: all permutation clusters
:param cluster_p_values: p-values of the clusters
:param freqs: frequency domain
:param times: time domain
"""
good_c = np.nan * np.ones_like(F)
for clu, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
good_c[clu] = F[clu]
bbox = [times[0], times[-1], freqs[0], freqs[-1]]
plt.imshow(F, aspect='auto', origin='lower', cmap=cm.gray, extent=bbox, interpolation='None')
a = plt.imshow(good_c, cmap=cm.RdBu_r, aspect='auto', origin='lower', extent=bbox, interpolation='None')
plt.colorbar(a)
plt.xlabel('Time (s)')
plt.ylabel('Frequency (Hz)')
plt.show()
def plot_oscillation_bands(condition):
fig, axis = plt.subplots(1, 5, figsize=(25, 5))
condition.plot_topomap(baseline=(-0.2, 0), fmin=0, fmax=4, title='Delta', axes=axis[0], show=False, vmin=0, vmax=1.5, tmin=0, tmax=1)
condition.plot_topomap(baseline=(-0.2, 0), fmin=4, fmax=8, title='Theta', axes=axis[1], show=False, vmin=0, vmax=0.7, tmin=0, tmax=1)
condition.plot_topomap(baseline=(-0.2, 0), fmin=8, fmax=12, title='Alpha', axes=axis[2], show=False, vmin=-0.15, vmax=0.2, tmin=0, tmax=1)
condition.plot_topomap(baseline=(-0.2, 0), fmin=13, fmax=30, title='Beta', axes=axis[3], show=False, vmin=-0.18, vmax=0.2, tmin=0, tmax=1)
condition.plot_topomap(baseline=(-0.2, 0), fmin=30, fmax=45, title='Gamma', axes=axis[4], vmin=0, vmax=0.2, tmin=0, tmax=1)