Edited some comments

This commit is contained in:
2021-03-28 15:45:14 +02:00
parent bb3406bd88
commit cfa950b85b
5 changed files with 45 additions and 20 deletions

View File

@@ -46,6 +46,7 @@ def events_to_labels(evts, events_dict, mask=None): # TODO Test schreiben
def permutation_test(baseline, score, n_iter):
"""
An implementation of a permutation test for classification scores.
:param baseline: The classification scores of the baseline, i.e. selection by chance
:param score: The classification scores which are tested for significance
:param n_iter: number of permutations
@@ -120,6 +121,8 @@ def decoding(dataset, filename, compute_metric=True, mask=None):
# Compute index of time point 0
index = math.floor((len(metric[0]) / time_scale) * 100)
baseline = np.array(metric[:index]).flatten()
# Plot the result
plt.plot(np.linspace(-200, 1000, 1127), np.mean(metric, axis=0))
plt.ylabel('Accuracy (%)')
plt.xlabel('Time (ms)')
@@ -129,11 +132,12 @@ def decoding(dataset, filename, compute_metric=True, mask=None):
# Compute the permutation tests
for t in range(len(metric[0][index:])):
score_t = np.asarray(metric[:, t + index])
p = permutation_test(baseline, score_t, 100)
p = permutation_test(baseline, score_t, 1000)
p_values.append(p)
if t % 50 == 0:
print(str(t) + " Out of " + str(len(metric[0][index:])))
# Plot the result
plt.plot(times[index:], p_values)
plt.ylabel('P-Value')
plt.xlabel('Time (ms)')
@@ -143,14 +147,17 @@ def decoding(dataset, filename, compute_metric=True, mask=None):
def create_tfr(raw, condition, freqs, n_cycles, response='induced', baseline=None, plot=False):
"""
Compute the time frequency representation (TFR) of data for a given condition via morlet wavelets
Compute the time frequency representation (TFR) of data for a given condition via Morlet wavelets
:param raw: the data
:param condition: the condition for which to compute the TFR. Given as a list of tuples of the form (stimulus, condition) # TODO ambiguous use of condition
:param condition: the condition for which to compute the TFR. Given as a list of tuples of the form (stimulus, texture)
:param freqs: the frequencies for which to compute the TFR
:param n_cycles: the number of cycles used by the morlet wavelets
:param response: type of expected TFR. Can be total, induced or evoked. Default is induced
:param n_cycles: the number of cycles used by the Morlet wavelets
:param response: type of expected TFR. Can be total, induced or evoked. Default is induced,
the others were not used for the report, only for exploration
:param baseline: baseline used to correct the power. A tuple of the form (start, end).
Default is None and no baseline correction will be applid
Default is None and no baseline correction will be applied
:param plot: True if results should be plotted, else false.
:return: The TFR or the given data for a given condition. Has type AverageTFR
"""
epochs, _ = get_epochs(raw, condition, tmin=-0.2, tmax=1)
@@ -168,6 +175,7 @@ def create_tfr(raw, condition, freqs, n_cycles, response='induced', baseline=Non
power_induced = tfr_morlet(epochs.subtract_evoked(), freqs=freqs, n_cycles=n_cycles, return_itc=False, n_jobs=4)
power = mne.combine_evoked([power_total, power_induced], weights=[1, -1])
if plot: power.plot(picks='P7')
# Apply a baseline correction to the power data
power.apply_baseline(mode='ratio', baseline=baseline)
if plot:
plot_oscillation_bands(power)
@@ -185,13 +193,9 @@ def time_frequency(dataset, filename, compute_tfr=True):
:param compute_tfr: If True the TFRs will be created, else the TFRs will be loaded from a precomputed file
"""
# Parameters
# Frequency space (from, to, steps) -> Control frequency resolution : Between num=50-80 good for 1-50Hz
# freqs = np.linspace(0.1, 50, num=50) # Use this for linear space scaling
freqs = np.logspace(*np.log10([0.1, 50]), num=50)
# Number of cycles -> Controls time resolution ? At ~freqs/2 good for high frequency resolution
n_cycles = freqs / 2 # 1 for high time resolution & freq smoothing, freqs/2 for high freq resolution & time smooth
# Baseline -> Should not go post-stimulus, i.e. > 0 -> Best ist pre-stimulus (e.g. -400 to -200ms)
baseline = [-0.5, 0]
n_cycles = freqs / 2
cond1 = []
cond2 = []
times = None
@@ -209,6 +213,8 @@ def time_frequency(dataset, filename, compute_tfr=True):
raw.set_montage('standard_1020', match_case=False)
# Create the two conditions we want to compare
# IMPORTANT: If different conditions should be compared you have to change them here, by altering the second
# argument passed to create_tfr
power_cond1 = create_tfr(raw, [('face', 'intact')], freqs, n_cycles, 'induced', (-0.2, 0))
print(' CONDITION 1 LOADED')
cond1.append(power_cond1)
@@ -219,17 +225,23 @@ def time_frequency(dataset, filename, compute_tfr=True):
cond2.append(power_cond2)
print(' DONE')
# Save the data so we can access the results more easily
np.save('cached_data/tf_data/' + filename + '_cond1', cond1)
np.save('cached_data/tf_data/' + filename + '_cond2', cond2)
else:
# If the data should not be recomputed, load the given filename
cond1 = np.load('cached_data/tf_data/' + filename + '_cond1.npy', allow_pickle=True).tolist()
cond2 = np.load('cached_data/tf_data/' + filename + '_cond2.npy', allow_pickle=True).tolist()
if times is None:
times = cond1[0].times
# Some plots
mne.grand_average(cond1).plot(picks=['P7'], vmin=-3, vmax=3, title='Grand Average P7')
mne.grand_average(cond2).plot(picks=['P7'], vmin=-3, vmax=3, title='Grand Average P7')
plot_oscillation_bands(mne.grand_average(cond1))
plot_oscillation_bands(mne.grand_average(cond2))
# Compute the cluster permutation
F, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
[mne.grand_average(cond1).data, mne.grand_average(cond2).data], n_jobs=4, verbose='INFO',
seed=123)

View File

@@ -93,16 +93,18 @@ def create_peak_difference_feature(df, max_subj=40):
def analyze_erp(channels, precompute=True):
"""
Execute several statistical tests for different hypothesis, to analyze ERPs
Execute several statistical tests for different hypothesis, to analyse ERPs
:param channels: The channels for which the tests are executed
:param precompute: If true, the peak-difference data will be computed. Else it will be loaded from a precomputed file,
if it exists. This should only be set 'False' if the method was already executed once!
"""
if precompute:
# Precompute the erp peaks
precompute_erp_df('N170')
for c in channels:
print("CHANNEL: " + c)
# Load the erp peak data and create the features for the t-tests
erp_df = pd.read_csv('cached_data/erp_peaks/erp_peaks_' + c + '.csv', index_col=0)
feature_df = create_peak_difference_feature(erp_df)
# 1. H_a : There is a difference between the N170 peak of recognizing faces and cars

View File

@@ -52,7 +52,7 @@ def filter_data(raw):
"""
Filter the data of a single subject with a bandpass filter.
The lower bound ist 0.5Hz to compensate the slow drifts.
The upper bound is 50Hz to compensate the high frequencies, including the power line spike at 60Hz
The upper bound is 48Hz to compensate the high frequencies, including the power line spike at 60Hz
:param raw: The data to be filtered
:return: The filtered data
"""

View File

@@ -18,7 +18,7 @@ def load_bad_annotations(filepath, fileending="badSegments.csv"):
def load_preprocessed_data(subject, dataset):
"""
Load the raw object as well as the annotations of the preprocessed file
Load the raw object of the preprocessed file
:param subject: The subject, for which we want to load the raw object
:param dataset: The currently viewed dataset
:param selected_subjects: The manually preprocessed subjects

View File

@@ -58,7 +58,8 @@ def plot_grand_average(dataset):
def plot_tf_cluster(F, clusters, cluster_p_values, freqs, times):
"""
Plot teh F-Statistic values of permutation clusters with p-values <= 0.05 in color and > 0.05 in grey.
Plot the F-Statistic values of permutation clusters with p-values <= 0.05 in color and > 0.05 in grey.
Currently only works well for the linear scaling. For the logarithmic scaling a different x-axis has to be chosen
:param F: F-Statistics of the permutation clusters
:param clusters: all permutation clusters
@@ -81,9 +82,19 @@ def plot_tf_cluster(F, clusters, cluster_p_values, freqs, times):
def plot_oscillation_bands(condition):
"""
Plot the oscillation bands for a given condition in the time from 130ms to 200ms
:param condition: the condition to plot the oscillation bands for
"""
fig, axis = plt.subplots(1, 5, figsize=(25, 5))
condition.plot_topomap(baseline=(-0.2, 0), fmin=0, fmax=4, title='Delta', axes=axis[0], show=False, vmin=0, vmax=1.5, tmin=0, tmax=1)
condition.plot_topomap(baseline=(-0.2, 0), fmin=4, fmax=8, title='Theta', axes=axis[1], show=False, vmin=0, vmax=0.7, tmin=0, tmax=1)
condition.plot_topomap(baseline=(-0.2, 0), fmin=8, fmax=12, title='Alpha', axes=axis[2], show=False, vmin=-0.15, vmax=0.2, tmin=0, tmax=1)
condition.plot_topomap(baseline=(-0.2, 0), fmin=13, fmax=30, title='Beta', axes=axis[3], show=False, vmin=-0.18, vmax=0.2, tmin=0, tmax=1)
condition.plot_topomap(baseline=(-0.2, 0), fmin=30, fmax=45, title='Gamma', axes=axis[4], vmin=0, vmax=0.2, tmin=0, tmax=1)
condition.plot_topomap(baseline=(-0.2, 0), fmin=0, fmax=4, title='Delta', axes=axis[0], show=False, vmin=0,
vmax=1.5, tmin=0.13, tmax=0.2)
condition.plot_topomap(baseline=(-0.2, 0), fmin=4, fmax=8, title='Theta', axes=axis[1], show=False, vmin=0,
vmax=0.7, tmin=0.13, tmax=0.2)
condition.plot_topomap(baseline=(-0.2, 0), fmin=8, fmax=12, title='Alpha', axes=axis[2], show=False, vmin=-0.25,
vmax=0.2, tmin=0.13, tmax=0.2)
condition.plot_topomap(baseline=(-0.2, 0), fmin=13, fmax=30, title='Beta', axes=axis[3], show=False, vmin=-0.21,
vmax=0.2, tmin=0.13, tmax=0.2)
condition.plot_topomap(baseline=(-0.2, 0), fmin=30, fmax=45, title='Gamma', axes=axis[4], vmin=-0.05, vmax=0.2,
tmin=0.13,
tmax=0.2)