Fixed some smaller things
This commit is contained in:
@@ -1,2 +1,4 @@
|
||||
This folder should hold the n170 dataset.
|
||||
Unpack the dataset here, so that the file structure: 'Dataset/n170/...' exists.
|
||||
Unpack the dataset here, so that the file structure: 'Dataset/n170/...' exists.
|
||||
|
||||
Bad annotations from the manually preprocessing step are saved in the 'preprocessed' folder.
|
||||
24
Dataset/preprocessed/sub-001_task-N170_badannotations.csv
Normal file
24
Dataset/preprocessed/sub-001_task-N170_badannotations.csv
Normal file
@@ -0,0 +1,24 @@
|
||||
onset,duration,description
|
||||
1970-01-01 00:00:0.0,0,
|
||||
1970-01-01 00:00:03.792590,4.272874611801244,BAD_
|
||||
1970-01-01 00:01:16.632102,4.703474378881992,BAD_
|
||||
1970-01-01 00:01:25.812306,2.674687014751555,BAD_
|
||||
1970-01-01 00:01:30.096020,3.3330078125,BAD_
|
||||
1970-01-01 00:01:39.164434,2.2565083947981464,BAD_
|
||||
1970-01-01 00:01:42.936322,3.154971370341613,BAD_
|
||||
1970-01-01 00:02:53.246060,5.229302940605606,BAD_
|
||||
1970-01-01 00:03:06.573345,2.070191187888213,BAD_
|
||||
1970-01-01 00:03:13.224567,8.847997137034156,BAD_
|
||||
1970-01-01 00:03:24.367312,6.69499830163042,BAD_
|
||||
1970-01-01 00:03:50.203670,2.9893560753105817,BAD_
|
||||
1970-01-01 00:04:51.697013,9.613967876552806,BAD_
|
||||
1970-01-01 00:05:01.312749,2.7326523680124524,BAD_
|
||||
1970-01-01 00:06:17.397529,5.784114178959612,BAD_
|
||||
1970-01-01 00:06:25.472251,3.2791828416148974,BAD_
|
||||
1970-01-01 00:06:31.833646,9.137823903338472,BAD_
|
||||
1970-01-01 00:06:40.977378,2.455246748835407,BAD_
|
||||
1970-01-01 00:07:06.623277,2.152998835403764,BAD_
|
||||
1970-01-01 00:07:26.697804,1.9542604813664184,BAD_
|
||||
1970-01-01 00:09:02.732109,4.980879998058981,BAD_
|
||||
1970-01-01 00:09:15.452340,1.8755932162266618,BAD_
|
||||
1970-01-01 00:11:20.352343,2.4966505725932393,BAD_
|
||||
|
BIN
Dataset/preprocessed/sub-001_task-N170_cleaned.fif
Normal file
BIN
Dataset/preprocessed/sub-001_task-N170_cleaned.fif
Normal file
Binary file not shown.
10
Dataset/preprocessed/sub-003_task-N170_badannotations.csv
Normal file
10
Dataset/preprocessed/sub-003_task-N170_badannotations.csv
Normal file
@@ -0,0 +1,10 @@
|
||||
onset,duration,description
|
||||
1970-01-01 00:00:0.0,0,
|
||||
1970-01-01 00:02:17.128157,2.947952251552806,BAD_
|
||||
1970-01-01 00:02:20.960569,7.62244395380435,BAD_
|
||||
1970-01-01 00:03:30.145704,5.332812500000017,BAD_
|
||||
1970-01-01 00:04:40.074527,5.490147030279502,BAD_
|
||||
1970-01-01 00:05:49.665420,3.3247270477484676,BAD_
|
||||
1970-01-01 00:06:59.603314,11.209782608695662,BAD_
|
||||
1970-01-01 00:08:12.657335,8.032341809006198,BAD_
|
||||
1970-01-01 00:09:22.031561,2.3020526009315745,BAD_
|
||||
|
BIN
Dataset/preprocessed/sub-003_task-N170_cleaned.fif
Normal file
BIN
Dataset/preprocessed/sub-003_task-N170_cleaned.fif
Normal file
Binary file not shown.
@@ -0,0 +1,9 @@
|
||||
onset,duration,description
|
||||
1970-01-01 00:00:0.0,0,
|
||||
1970-01-01 00:00:00.886042,2.5421947787267083,BAD_
|
||||
1970-01-01 00:01:18.262621,2.268929541925459,BAD_
|
||||
1970-01-01 00:03:27.443617,0.9647090935558822,BAD_
|
||||
1970-01-01 00:04:46.825909,3.2253578707297947,BAD_
|
||||
1970-01-01 00:05:52.347597,2.637423573369574,BAD_
|
||||
1970-01-01 00:06:59.515191,2.4345448369564906,BAD_
|
||||
1970-01-01 00:08:07.230491,1.287658918866498,BAD_
|
||||
|
BIN
Dataset/preprocessed/sub-014_task-N170_cleaned.fif
Normal file
BIN
Dataset/preprocessed/sub-014_task-N170_cleaned.fif
Normal file
Binary file not shown.
@@ -111,6 +111,7 @@ def decoding(dataset, filename, compute_metric=True, mask=None):
|
||||
if times is None:
|
||||
times = epochs.times
|
||||
np.save('cached_data/decoding_data/' + filename, metric)
|
||||
metric = np.asarray(metric)
|
||||
else:
|
||||
# Dummy time which is created according to epoch.times
|
||||
times = np.linspace(-0.09960938, 1, 1127)
|
||||
@@ -128,7 +129,7 @@ def decoding(dataset, filename, compute_metric=True, mask=None):
|
||||
# Compute the permutation tests
|
||||
for t in range(len(metric[0][index:])):
|
||||
score_t = np.asarray(metric[:, t + index])
|
||||
p = permutation_test(baseline, score_t, 1000)
|
||||
p = permutation_test(baseline, score_t, 100)
|
||||
p_values.append(p)
|
||||
if t % 50 == 0:
|
||||
print(str(t) + " Out of " + str(len(metric[0][index:])))
|
||||
@@ -140,7 +141,7 @@ def decoding(dataset, filename, compute_metric=True, mask=None):
|
||||
plt.show()
|
||||
|
||||
|
||||
def create_tfr(raw, condition, freqs, n_cycles, response='induced', baseline=None):
|
||||
def create_tfr(raw, condition, freqs, n_cycles, response='induced', baseline=None, plot=False):
|
||||
"""
|
||||
Compute the time frequency representation (TFR) of data for a given condition via morlet wavelets
|
||||
:param raw: the data
|
||||
@@ -166,10 +167,11 @@ def create_tfr(raw, condition, freqs, n_cycles, response='induced', baseline=Non
|
||||
power_total = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, return_itc=False, n_jobs=4)
|
||||
power_induced = tfr_morlet(epochs.subtract_evoked(), freqs=freqs, n_cycles=n_cycles, return_itc=False, n_jobs=4)
|
||||
power = mne.combine_evoked([power_total, power_induced], weights=[1, -1])
|
||||
# power.plot(picks='P7')
|
||||
if plot: power.plot(picks='P7')
|
||||
power.apply_baseline(mode='ratio', baseline=baseline)
|
||||
# plot_oscillation_bands(power)
|
||||
# power.plot(picks='P7')
|
||||
if plot:
|
||||
plot_oscillation_bands(power)
|
||||
power.plot(picks='P7')
|
||||
return power
|
||||
|
||||
|
||||
@@ -184,7 +186,7 @@ def time_frequency(dataset, filename, compute_tfr=True):
|
||||
"""
|
||||
# Parameters
|
||||
# Frequency space (from, to, steps) -> Control frequency resolution : Between num=50-80 good for 1-50Hz
|
||||
# freqs = np.linspace(0.1, 50, num=50) #
|
||||
# freqs = np.linspace(0.1, 50, num=50) # Use this for linear space scaling
|
||||
freqs = np.logspace(*np.log10([0.1, 50]), num=50)
|
||||
# Number of cycles -> Controls time resolution ? At ~freqs/2 good for high frequency resolution
|
||||
n_cycles = freqs / 2 # 1 for high time resolution & freq smoothing, freqs/2 for high freq resolution & time smooth
|
||||
@@ -224,8 +226,8 @@ def time_frequency(dataset, filename, compute_tfr=True):
|
||||
cond2 = np.load('cached_data/tf_data/' + filename + '_cond2.npy', allow_pickle=True).tolist()
|
||||
if times is None:
|
||||
times = cond1[0].times
|
||||
# mne.grand_average(cond1).plot(picks=['P7'], vmin=-3, vmax=3, title='Grand Average P7')
|
||||
# mne.grand_average(cond2).plot(picks=['P7'], vmin=-3, vmax=3, title='Grand Average P7')
|
||||
mne.grand_average(cond1).plot(picks=['P7'], vmin=-3, vmax=3, title='Grand Average P7')
|
||||
mne.grand_average(cond2).plot(picks=['P7'], vmin=-3, vmax=3, title='Grand Average P7')
|
||||
plot_oscillation_bands(mne.grand_average(cond1))
|
||||
plot_oscillation_bands(mne.grand_average(cond2))
|
||||
F, clusters, cluster_p_values, h0 = mne.stats.permutation_cluster_test(
|
||||
@@ -237,6 +239,5 @@ def time_frequency(dataset, filename, compute_tfr=True):
|
||||
if __name__ == '__main__':
|
||||
mne.set_log_level(verbose=VERBOSE_LEVEL)
|
||||
ds = 'N170'
|
||||
# decoding(ds, 'faces_vs_cars_100iters', False)
|
||||
# time_frequency(ds, 'face_intact_vs_all_0.1_50hz_ncf2', False)
|
||||
time_frequency(ds, 'face_intact_vs_all_0.1_50hz_ncf2', False)
|
||||
decoding(ds, 'faces_vs_cars', True)
|
||||
time_frequency(ds, 'face_intact_vs_all_0.1_50hz_ncf2', True)
|
||||
|
||||
@@ -91,11 +91,16 @@ def create_peak_difference_feature(df, max_subj=40):
|
||||
return peak_diff_df
|
||||
|
||||
|
||||
def analyze_erp(channels):
|
||||
def analyze_erp(channels, precompute=True):
|
||||
"""
|
||||
Execute several statistical tests for different hypothesis, to analyze ERPs
|
||||
:param channels: The channels for which the tests are executed
|
||||
:param precompute: If true, the peak-difference data will be computed. Else it will be loaded from a precomputed file,
|
||||
if it exists. This should only be set 'False' if the method was already executed once!
|
||||
"""
|
||||
if precompute:
|
||||
precompute_erp_df('N170')
|
||||
|
||||
for c in channels:
|
||||
print("CHANNEL: " + c)
|
||||
erp_df = pd.read_csv('cached_data/erp_peaks/erp_peaks_' + c + '.csv', index_col=0)
|
||||
@@ -130,5 +135,4 @@ def analyze_erp(channels):
|
||||
|
||||
if __name__ == '__main__':
|
||||
mne.set_log_level(verbose=VERBOSE_LEVEL)
|
||||
precompute_erp_df('N170')
|
||||
analyze_erp(['P7', 'PO7', 'P8', 'PO8'])
|
||||
analyze_erp(['P7', 'PO7', 'P8', 'PO8'], True)
|
||||
|
||||
@@ -70,8 +70,7 @@ def clean_data(raw, subject, dataset, cleaned=False):
|
||||
:return: the bad channels
|
||||
"""
|
||||
channels = None
|
||||
folder = "Dataset\\" + dataset + "\\sub-" + subject + "\\ses-" + dataset + "\\eeg\\"
|
||||
filepath = folder + "sub-" + subject + "_task-" + dataset
|
||||
filepath = "Dataset/preprocessed/sub-" + subject + "_task-" + dataset + "_badannotations.csv"
|
||||
|
||||
# If nothing was marked yet, plot the data to mark bad segments
|
||||
if not cleaned:
|
||||
@@ -80,15 +79,15 @@ def clean_data(raw, subject, dataset, cleaned=False):
|
||||
bad_idx = [idx for (idx, annot) in enumerate(raw.annotations) if annot['description'] == "BAD_"]
|
||||
# If bad intervals were found save
|
||||
if bad_idx:
|
||||
raw.annotations[bad_idx].save(filepath + "_badannotations.csv")
|
||||
raw.annotations[bad_idx].save(filepath)
|
||||
|
||||
if os.path.isfile(filepath + "_badannotations.csv"):
|
||||
annotations = mne.read_annotations(filepath + "_badannotations.csv")
|
||||
if os.path.isfile(filepath):
|
||||
annotations = mne.read_annotations(filepath)
|
||||
raw.annotations.append(annotations.onset, annotations.duration, annotations.description)
|
||||
|
||||
# Set the bad channels for each subject
|
||||
if subject == '001':
|
||||
channels = ['F8'] # Maybe also FP2?
|
||||
channels = ['F8']
|
||||
elif subject == '003':
|
||||
channels = []
|
||||
elif subject == '014':
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
from utils.file_utils import load_preprocessed_data, get_epochs
|
||||
import mne
|
||||
|
||||
from utils.file_utils import get_epochs
|
||||
|
||||
|
||||
def check_peaks():
|
||||
@@ -6,13 +8,16 @@ def check_peaks():
|
||||
Sanity check for the "get_peaks" method
|
||||
"""
|
||||
import matplotlib.pyplot as plt
|
||||
raw = load_preprocessed_data('002', 'N170')
|
||||
subject = '001'
|
||||
folder = "../Dataset/n170/sub-" + subject + "/ses-n170/eeg/"
|
||||
filepath = folder + "sub-" + subject + "_task-n170"
|
||||
raw = mne.io.read_raw_fif(filepath + "_cleaned.fif")
|
||||
epochs, _ = get_epochs(raw, [('face', 'intact')], picks='P7')
|
||||
ch, latency, peak = epochs.average().get_peak(tmin=0.13, tmax=0.2, mode='neg', return_amplitude=True)
|
||||
import numpy as np
|
||||
plt.plot(epochs.times, np.squeeze(epochs.average().data.T))
|
||||
plt.vlines([0.13, 0.2], -0.00001, 0.00001, colors='r', linestyles='dotted')
|
||||
plt.vlines(latency, -0.00001, 0.00001, colors='gray', linestyles='dotted')
|
||||
plt.vlines([0.13, 0.2], -0.00001, 0.00001, colors='gray', linestyles='dotted')
|
||||
plt.vlines(latency, -0.00001, 0.00001, colors='r', linestyles='dotted')
|
||||
plt.show()
|
||||
|
||||
|
||||
|
||||
Binary file not shown.
@@ -24,7 +24,7 @@ def load_preprocessed_data(subject, dataset):
|
||||
:param selected_subjects: The manually preprocessed subjects
|
||||
:return: The raw object
|
||||
"""
|
||||
folder = "Dataset\\" + dataset + "\\sub-" + subject + "\\ses-" + dataset + "\\eeg\\"
|
||||
folder = "Dataset/" + dataset + "/sub-" + subject + "/ses-" + dataset + "/eeg/"
|
||||
filepath = folder + "sub-" + subject + "_task-" + dataset
|
||||
raw = mne.io.read_raw_fif(filepath + "_cleaned.fif")
|
||||
return raw
|
||||
|
||||
Reference in New Issue
Block a user