{ "nbformat_minor": 0, "nbformat": 4, "cells": [ { "execution_count": null, "cell_type": "code", "source": [ "%matplotlib inline" ], "outputs": [], "metadata": { "collapsed": false } }, { "source": [ "\n# Compute coherence in source space using a MNE inverse solution\n\n\nThis examples computes the coherence between a seed in the left\nauditory cortex and the rest of the brain based on single-trial\nMNE-dSPM inverse solutions.\n\n\n" ], "cell_type": "markdown", "metadata": {} }, { "execution_count": null, "cell_type": "code", "source": [ "# Author: Martin Luessi \n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.minimum_norm import (apply_inverse, apply_inverse_epochs,\n read_inverse_operator)\nfrom mne.connectivity import seed_target_indices, spectral_connectivity\n\nprint(__doc__)\n\ndata_path = sample.data_path()\nsubjects_dir = data_path + '/subjects'\nfname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'\nfname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nfname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nlabel_name_lh = 'Aud-lh'\nfname_label_lh = data_path + '/MEG/sample/labels/%s.label' % label_name_lh\n\nevent_id, tmin, tmax = 1, -0.2, 0.5\nmethod = \"dSPM\" # use dSPM method (could also be MNE or sLORETA)\n\n# Load data.\ninverse_operator = read_inverse_operator(fname_inv)\nlabel_lh = mne.read_label(fname_label_lh)\nraw = mne.io.read_raw_fif(fname_raw)\nevents = mne.read_events(fname_event)\n\n# Add a bad channel.\nraw.info['bads'] += ['MEG 2443']\n\n# pick MEG channels.\npicks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,\n exclude='bads')\n\n# Read epochs.\nepochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,\n baseline=(None, 0),\n reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6))\n\n# First, we find the most active vertex in the left auditory cortex, which\n# we will later use as seed for the connectivity computation.\nsnr = 3.0\nlambda2 = 1.0 / snr ** 2\nevoked = epochs.average()\nstc = apply_inverse(evoked, inverse_operator, lambda2, method,\n pick_ori=\"normal\")\n\n# Restrict the source estimate to the label in the left auditory cortex.\nstc_label = stc.in_label(label_lh)\n\n# Find number and index of vertex with most power.\nsrc_pow = np.sum(stc_label.data ** 2, axis=1)\nseed_vertno = stc_label.vertices[0][np.argmax(src_pow)]\nseed_idx = np.searchsorted(stc.vertices[0], seed_vertno) # index in orig stc\n\n# Generate index parameter for seed-based connectivity analysis.\nn_sources = stc.data.shape[0]\nindices = seed_target_indices([seed_idx], np.arange(n_sources))\n\n# Compute inverse solution and for each epoch. By using \"return_generator=True\"\n# stcs will be a generator object instead of a list. This allows us so to\n# compute the coherence without having to keep all source estimates in memory.\n\nsnr = 1.0 # use lower SNR for single epochs\nlambda2 = 1.0 / snr ** 2\nstcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method,\n pick_ori=\"normal\", return_generator=True)\n\n# Now we are ready to compute the coherence in the alpha and beta band.\n# fmin and fmax specify the lower and upper freq. for each band, resp.\nfmin = (8., 13.)\nfmax = (13., 30.)\nsfreq = raw.info['sfreq'] # the sampling frequency\n\n# Now we compute connectivity. To speed things up, we use 2 parallel jobs\n# and use mode='fourier', which uses a FFT with a Hanning window\n# to compute the spectra (instead of multitaper estimation, which has a\n# lower variance but is slower). By using faverage=True, we directly\n# average the coherence in the alpha and beta band, i.e., we will only\n# get 2 frequency bins.\ncoh, freqs, times, n_epochs, n_tapers = spectral_connectivity(\n stcs, method='coh', mode='fourier', indices=indices,\n sfreq=sfreq, fmin=fmin, fmax=fmax, faverage=True, n_jobs=1)\n\nprint('Frequencies in Hz over which coherence was averaged for alpha: ')\nprint(freqs[0])\nprint('Frequencies in Hz over which coherence was averaged for beta: ')\nprint(freqs[1])\n\n# Generate a SourceEstimate with the coherence. This is simple since we\n# used a single seed. For more than one seeds we would have to split coh.\n# Note: We use a hack to save the frequency axis as time.\ntmin = np.mean(freqs[0])\ntstep = np.mean(freqs[1]) - tmin\ncoh_stc = mne.SourceEstimate(coh, vertices=stc.vertices, tmin=1e-3 * tmin,\n tstep=1e-3 * tstep, subject='sample')\n\n# Now we can visualize the coherence using the plot method.\nbrain = coh_stc.plot('sample', 'inflated', 'both',\n time_label='Coherence %0.1f Hz',\n subjects_dir=subjects_dir,\n clim=dict(kind='value', lims=(0.25, 0.4, 0.65)))\nbrain.show_view('lateral')" ], "outputs": [], "metadata": { "collapsed": false } } ], "metadata": { "kernelspec": { "display_name": "Python 2", "name": "python2", "language": "python" }, "language_info": { "mimetype": "text/x-python", "nbconvert_exporter": "python", "name": "python", "file_extension": ".py", "version": "2.7.13", "pygments_lexer": "ipython2", "codemirror_mode": { "version": 2, "name": "ipython" } } } }