{ "nbformat_minor": 0, "nbformat": 4, "cells": [ { "execution_count": null, "cell_type": "code", "source": [ "%matplotlib inline" ], "outputs": [], "metadata": { "collapsed": false } }, { "source": [ "\n# Decoding sensor space data with generalization across time and conditions\n\n\nThis example runs the analysis described in [1]_. It illustrates how one can\nfit a linear classifier to identify a discriminatory topography at a given time\ninstant and subsequently assess whether this linear model can accurately\npredict all of the time samples of a second set of conditions.\n\nReferences\n----------\n\n.. [1] King & Dehaene (2014) 'Characterizing the dynamics of mental\n representations: the temporal generalization method', Trends In\n Cognitive Sciences, 18(4), 203-210. doi: 10.1016/j.tics.2014.01.002.\n\n" ], "cell_type": "markdown", "metadata": {} }, { "execution_count": null, "cell_type": "code", "source": [ "# Authors: Jean-Remi King \n# Alexandre Gramfort \n# Denis Engemann \n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\nimport mne\nfrom mne.datasets import sample\nfrom mne.decoding import GeneralizationAcrossTime\n\nprint(__doc__)\n\n# Preprocess data\ndata_path = sample.data_path()\n# Load and filter data, set up epochs\nraw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'\nevents_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'\nraw = mne.io.read_raw_fif(raw_fname, preload=True)\npicks = mne.pick_types(raw.info, meg=True, exclude='bads') # Pick MEG channels\nraw.filter(1, 30, method='fft') # Band pass filtering signals\nevents = mne.read_events(events_fname)\nevent_id = {'AudL': 1, 'AudR': 2, 'VisL': 3, 'VisR': 4}\ndecim = 2 # decimate to make the example faster to run\nepochs = mne.Epochs(raw, events, event_id, -0.050, 0.400, proj=True,\n picks=picks, baseline=None, preload=True,\n reject=dict(mag=5e-12), decim=decim, verbose=False)\n\n# We will train the classifier on all left visual vs auditory trials\n# and test on all right visual vs auditory trials.\n\n# In this case, because the test data is independent from the train data,\n# we test the classifier of each fold and average the respective predictions.\n\n# Define events of interest\ntriggers = epochs.events[:, 2]\nviz_vs_auditory = np.in1d(triggers, (1, 2)).astype(int)\n\ngat = GeneralizationAcrossTime(predict_mode='mean-prediction', n_jobs=1)\n\n# For our left events, which ones are visual?\nviz_vs_auditory_l = (triggers[np.in1d(triggers, (1, 3))] == 3).astype(int)\n# To make scikit-learn happy, we converted the bool array to integers\n# in the same line. This results in an array of zeros and ones:\nprint(\"The unique classes' labels are: %s\" % np.unique(viz_vs_auditory_l))\n\ngat.fit(epochs[('AudL', 'VisL')], y=viz_vs_auditory_l)\n\n# For our right events, which ones are visual?\nviz_vs_auditory_r = (triggers[np.in1d(triggers, (2, 4))] == 4).astype(int)\n\ngat.score(epochs[('AudR', 'VisR')], y=viz_vs_auditory_r)\ngat.plot(title=\"Temporal Generalization (visual vs auditory): left to right\")" ], "outputs": [], "metadata": { "collapsed": false } } ], "metadata": { "kernelspec": { "display_name": "Python 2", "name": "python2", "language": "python" }, "language_info": { "mimetype": "text/x-python", "nbconvert_exporter": "python", "name": "python", "file_extension": ".py", "version": "2.7.13", "pygments_lexer": "ipython2", "codemirror_mode": { "version": 2, "name": "ipython" } } } }