MNE-CPP  0.1.9
A Framework for Electrophysiology
mnemath.cpp
Go to the documentation of this file.
1 //=============================================================================================================
37 //=============================================================================================================
38 // INCLUDES
39 //=============================================================================================================
40 
41 #define _USE_MATH_DEFINES
42 #include <math.h>
43 #include <iostream>
44 #include "mnemath.h"
45 
46 //=============================================================================================================
47 // EIGEN INCLUDES
48 //=============================================================================================================
49 
50 #include <Eigen/Eigen>
51 #include <Eigen/Geometry>
52 
53 //=============================================================================================================
54 // QT INCLUDES
55 //=============================================================================================================
56 
57 #include <QDebug>
58 
59 //=============================================================================================================
60 // USED NAMESPACES
61 //=============================================================================================================
62 
63 using namespace UTILSLIB;
64 using namespace Eigen;
65 
66 //=============================================================================================================
67 // DEFINE MEMBER METHODS
68 //=============================================================================================================
69 
70 int MNEMath::gcd(int iA, int iB)
71 {
72  if (iB == 0) {
73  return iA;
74  }
75 
76  return gcd(iB, iA % iB);
77 }
78 
79 //=============================================================================================================
80 
81 VectorXd* MNEMath::combine_xyz(const VectorXd& vec)
82 {
83  if (vec.size() % 3 != 0)
84  {
85  printf("Input must be a row or a column vector with 3N components\n");
86  return NULL;
87  }
88 
89  MatrixXd tmp = MatrixXd(vec.transpose());
90  SparseMatrix<double>* s = make_block_diag(tmp,3);
91 
92  SparseMatrix<double> sC = *s*s->transpose();
93  VectorXd* comb = new VectorXd(sC.rows());
94 
95  for(qint32 i = 0; i < sC.rows(); ++i)
96  (*comb)[i] = sC.coeff(i,i);
97 
98  delete s;
99  return comb;
100 }
101 
102 //=============================================================================================================
103 
104 double MNEMath::getConditionNumber(const MatrixXd& A,
105  VectorXd &s)
106 {
107  JacobiSVD<MatrixXd> svd(A);
108  s = svd.singularValues();
109 
110  double c = s.maxCoeff()/s.minCoeff();
111 
112  return c;
113 }
114 
115 //=============================================================================================================
116 
117 double MNEMath::getConditionSlope(const MatrixXd& A,
118  VectorXd &s)
119 {
120  JacobiSVD<MatrixXd> svd(A);
121  s = svd.singularValues();
122 
123  double c = s.maxCoeff()/s.mean();
124 
125  return c;
126 }
127 
128 //=============================================================================================================
129 
130 void MNEMath::get_whitener(MatrixXd &A,
131  bool pca,
132  QString ch_type,
133  VectorXd &eig,
134  MatrixXd &eigvec)
135 {
136  // whitening operator
137  SelfAdjointEigenSolver<MatrixXd> t_eigenSolver(A);//Can be used because, covariance matrices are self-adjoint matrices.
138 
139  eig = t_eigenSolver.eigenvalues();
140  eigvec = t_eigenSolver.eigenvectors().transpose();
141 
142  MNEMath::sort<double>(eig, eigvec, false);
143  qint32 rnk = MNEMath::rank(A);
144 
145  for(qint32 i = 0; i < eig.size()-rnk; ++i)
146  eig(i) = 0;
147 
148  printf("Setting small %s eigenvalues to zero.\n", ch_type.toUtf8().constData());
149  if (!pca) // No PCA case.
150  printf("Not doing PCA for %s\n", ch_type.toUtf8().constData());
151  else
152  {
153  printf("Doing PCA for %s.",ch_type.toUtf8().constData());
154  // This line will reduce the actual number of variables in data
155  // and leadfield to the true rank.
156  eigvec = eigvec.block(eigvec.rows()-rnk, 0, rnk, eigvec.cols());
157  }
158 }
159 
160 //=============================================================================================================
161 
162 void MNEMath::get_whitener(MatrixXd &A,
163  bool pca,
164  const std::string& ch_type,
165  VectorXd &eig,
166  MatrixXd &eigvec)
167 {
168  // whitening operator
169  SelfAdjointEigenSolver<MatrixXd> t_eigenSolver(A);//Can be used because, covariance matrices are self-adjoint matrices.
170 
171  eig = t_eigenSolver.eigenvalues();
172  eigvec = t_eigenSolver.eigenvectors().transpose();
173 
174  MNEMath::sort<double>(eig, eigvec, false);
175  qint32 rnk = MNEMath::rank(A);
176 
177  for(qint32 i = 0; i < eig.size()-rnk; ++i)
178  eig(i) = 0;
179 
180  printf("Setting small %s eigenvalues to zero.\n", ch_type.c_str());
181  if (!pca) // No PCA case.
182  printf("Not doing PCA for %s\n", ch_type.c_str());
183  else
184  {
185  printf("Doing PCA for %s.",ch_type.c_str());
186  // This line will reduce the actual number of variables in data
187  // and leadfield to the true rank.
188  eigvec = eigvec.block(eigvec.rows()-rnk, 0, rnk, eigvec.cols());
189  }
190 }
191 
192 //=============================================================================================================
193 
194 VectorXi MNEMath::intersect(const VectorXi &v1,
195  const VectorXi &v2,
196  VectorXi &idx_sel)
197 {
198  std::vector<int> tmp;
199 
200  std::vector< std::pair<int,int> > t_vecIntIdxValue;
201 
202  //ToDo:Slow; map VectorXi to stl container
203  for(qint32 i = 0; i < v1.size(); ++i)
204  tmp.push_back(v1[i]);
205 
206  std::vector<int>::iterator it;
207  for(qint32 i = 0; i < v2.size(); ++i)
208  {
209  it = std::search(tmp.begin(), tmp.end(), &v2[i], &v2[i]+1);
210  if(it != tmp.end())
211  t_vecIntIdxValue.push_back(std::pair<int,int>(v2[i], it-tmp.begin()));//Index and int value are swapped // to sort using the idx
212  }
213 
214  std::sort(t_vecIntIdxValue.begin(), t_vecIntIdxValue.end(), MNEMath::compareIdxValuePairSmallerThan<int>);
215 
216  VectorXi p_res(t_vecIntIdxValue.size());
217  idx_sel = VectorXi(t_vecIntIdxValue.size());
218 
219  for(quint32 i = 0; i < t_vecIntIdxValue.size(); ++i)
220  {
221  p_res[i] = t_vecIntIdxValue[i].first;
222  idx_sel[i] = t_vecIntIdxValue[i].second;
223  }
224 
225  return p_res;
226 }
227 
228 //=============================================================================================================
229 
230 // static inline MatrixXd extract_block_diag(MatrixXd& A, qint32 n)
231 // {
232 
233 // //
234 // // Principal Investigators and Developers:
235 // // ** Richard M. Leahy, PhD, Signal & Image Processing Institute,
236 // // University of Southern California, Los Angeles, CA
237 // // ** John C. Mosher, PhD, Biophysics Group,
238 // // Los Alamos National Laboratory, Los Alamos, NM
239 // // ** Sylvain Baillet, PhD, Cognitive Neuroscience & Brain Imaging Laboratory,
240 // // CNRS, Hopital de la Salpetriere, Paris, France
241 // //
242 // // Copyright (c) 2005 BrainStorm by the University of Southern California
243 // // This software distributed under the terms of the GNU General Public License
244 // // as published by the Free Software Foundation. Further details on the GPL
245 // // license can be found at http://www.gnu.org/copyleft/gpl.html .
246 // //
247 // //FOR RESEARCH PURPOSES ONLY. THE SOFTWARE IS PROVIDED "AS IS," AND THE
248 // // UNIVERSITY OF SOUTHERN CALIFORNIA AND ITS COLLABORATORS DO NOT MAKE ANY
249 // // WARRANTY, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO WARRANTIES OF
250 // // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, NOR DO THEY ASSUME ANY
251 // // LIABILITY OR RESPONSIBILITY FOR THE USE OF THIS SOFTWARE.
252 // //
253 // // Author: John C. Mosher 1993 - 2004
254 // //
255 // //
256 // // Modifications for mne Matlab toolbox
257 // //
258 // // Matti Hamalainen
259 // // 2006
260 
261 // [mA,na] = size(A); % matrix always has na columns
262 // % how many entries in the first column?
263 // bdn = na/n; % number of blocks
264 // ma = mA/bdn; % rows in first block
265 
266 // % blocks may themselves contain zero entries. Build indexing as above
267 // tmp = reshape([1:(ma*bdn)]',ma,bdn);
268 // i = zeros(ma*n,bdn);
269 // for iblock = 1:n,
270 // i((iblock-1)*ma+[1:ma],:) = tmp;
271 // end
272 
273 // i = i(:); % row indices foreach sparse bd
274 
275 // j = [0:mA:(mA*(na-1))];
276 // j = j(ones(ma,1),:);
277 // j = j(:);
278 
279 // i = i + j;
280 
281 // bd = full(A(i)); % column vector
282 // bd = reshape(bd,ma,na); % full matrix
283 
284 // }
285 
286 //=============================================================================================================
287 
288 bool MNEMath::issparse(VectorXd &v)
289 {
290  //ToDo: Figure out how to accelerate MNEMath::issparse(VectorXd &v)
291 
292  qint32 c = 0;
293  qint32 n = v.rows();
294  qint32 t = n/2;
295 
296  for(qint32 i = 0; i < n; ++i)
297  {
298  if(v(i) == 0)
299  ++c;
300  if(c > t)
301  return true;
302  }
303 
304  return false;
305 }
306 
307 //=============================================================================================================
308 
309 MatrixXd MNEMath::legendre(qint32 n,
310  const VectorXd &X,
311  QString normalize)
312 {
313  MatrixXd y;
314 
315  Q_UNUSED(y);
316 
317  Q_UNUSED(n);
318  Q_UNUSED(X);
319  Q_UNUSED(normalize);
320 
321  //ToDo
322 
323  return y;
324 }
325 
326 //=============================================================================================================
327 
328 MatrixXd MNEMath::legendre(qint32 n,
329  const VectorXd &X,
330  std::string normalize)
331 {
332  MatrixXd y;
333 
334  Q_UNUSED(y);
335 
336  Q_UNUSED(n);
337  Q_UNUSED(X);
338  Q_UNUSED(normalize);
339 
340  //ToDo
341 
342  return y;
343 }
344 
345 //=============================================================================================================
346 
347 SparseMatrix<double>* MNEMath::make_block_diag(const MatrixXd &A,
348  qint32 n)
349 {
350 
351  qint32 ma = A.rows();
352  qint32 na = A.cols();
353  float bdn = ((float)na)/n; // number of submatrices
354 
355 // std::cout << std::endl << "ma " << ma << " na " << na << " bdn " << bdn << std::endl;
356 
357  if(bdn - floor(bdn))
358  {
359  printf("Width of matrix must be even multiple of n\n");
360  return NULL;
361  }
362 
363  typedef Eigen::Triplet<double> T;
364  std::vector<T> tripletList;
365  tripletList.reserve(bdn*ma*n);
366 
367  qint32 current_col, current_row, i, r, c;
368  for(i = 0; i < bdn; ++i)
369  {
370  current_col = i * n;
371  current_row = i * ma;
372 
373  for(r = 0; r < ma; ++r)
374  for(c = 0; c < n; ++c)
375  tripletList.push_back(T(r+current_row, c+current_col, A(r, c+current_col)));
376  }
377 
378  SparseMatrix<double>* bd = new SparseMatrix<double>((int)floor((float)ma*bdn+0.5),na);
379 // SparseMatrix<double> p_Matrix(nrow, ncol);
380  bd->setFromTriplets(tripletList.begin(), tripletList.end());
381 
382  return bd;
383 }
384 
385 //=============================================================================================================
386 
388 {
389 
390  //nchoosek(n, k) with k = 2, equals n*(n-1)*0.5
391 
392  int t_iNumOfCombination = (int)(n*(n-1)*0.5);
393 
394  return t_iNumOfCombination;
395 }
396 
397 //=============================================================================================================
398 
399 qint32 MNEMath::rank(const MatrixXd& A,
400  double tol)
401 {
402  JacobiSVD<MatrixXd> t_svdA(A);//U and V are not computed
403  VectorXd s = t_svdA.singularValues();
404  double t_dMax = s.maxCoeff();
405  t_dMax *= tol;
406  qint32 sum = 0;
407  for(qint32 i = 0; i < s.size(); ++i)
408  sum += s[i] > t_dMax ? 1 : 0;
409  return sum;
410 }
411 
412 //=============================================================================================================
413 
414 MatrixXd MNEMath::rescale(const MatrixXd &data,
415  const RowVectorXf &times,
416  const QPair<float,float>& baseline,
417  QString mode)
418 {
419  MatrixXd data_out = data;
420  QStringList valid_modes;
421  valid_modes << "logratio" << "ratio" << "zscore" << "mean" << "percent";
422  if(!valid_modes.contains(mode))
423  {
424  qWarning().noquote() << "[MNEMath::rescale] Mode" << mode << "is not supported. Supported modes are:" << valid_modes << "Returning input data.";
425  return data_out;
426  }
427 
428  qInfo().noquote() << QString("[MNEMath::rescale] Applying baseline correction ... (mode: %1)").arg(mode);
429 
430  qint32 imin = 0;
431  qint32 imax = times.size();
432 
433  if (baseline.second == baseline.first) {
434  imin = 0;
435  } else {
436  float bmin = baseline.first;
437  for(qint32 i = 0; i < times.size(); ++i) {
438  if(times[i] >= bmin) {
439  imin = i;
440  break;
441  }
442  }
443  }
444 
445  float bmax = baseline.second;
446 
447  if (baseline.second == baseline.first) {
448  bmax = 0;
449  }
450 
451  for(qint32 i = times.size()-1; i >= 0; --i) {
452  if(times[i] <= bmax) {
453  imax = i+1;
454  break;
455  }
456  }
457 
458  if(imax < imin) {
459  qWarning() << "[MNEMath::rescale] imax < imin. Returning input data.";
460  return data_out;
461  }
462 
463  VectorXd mean = data_out.block(0, imin,data_out.rows(),imax-imin).rowwise().mean();
464  if(mode.compare("mean") == 0) {
465  data_out -= mean.rowwise().replicate(data.cols());
466  } else if(mode.compare("logratio") == 0) {
467  for(qint32 i = 0; i < data_out.rows(); ++i)
468  for(qint32 j = 0; j < data_out.cols(); ++j)
469  data_out(i,j) = log10(data_out(i,j)/mean[i]); // a value of 1 means 10 times bigger
470  } else if(mode.compare("ratio") == 0) {
471  data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
472  } else if(mode.compare("zscore") == 0) {
473  MatrixXd std_mat = data.block(0, imin, data.rows(), imax-imin) - mean.rowwise().replicate(imax-imin);
474  std_mat = std_mat.cwiseProduct(std_mat);
475  VectorXd std_v = std_mat.rowwise().mean();
476  for(qint32 i = 0; i < std_v.size(); ++i)
477  std_v[i] = sqrt(std_v[i] / (float)(imax-imin));
478 
479  data_out -= mean.rowwise().replicate(data_out.cols());
480  data_out = data_out.cwiseQuotient(std_v.rowwise().replicate(data_out.cols()));
481  } else if(mode.compare("percent") == 0) {
482  data_out -= mean.rowwise().replicate(data_out.cols());
483  data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
484  }
485 
486  return data_out;
487 }
488 
489 //=============================================================================================================
490 
491 MatrixXd MNEMath::rescale(const MatrixXd& data,
492  const RowVectorXf& times,
493  const std::pair<float,float>& baseline,
494  const std::string& mode)
495 {
496  MatrixXd data_out = data;
497  std::vector<std::string> valid_modes{"logratio", "ratio", "zscore", "mean", "percent"};
498  if(std::find(valid_modes.begin(),valid_modes.end(), mode) == valid_modes.end())
499  {
500  qWarning().noquote() << "[MNEMath::rescale] Mode" << mode.c_str() << "is not supported. Supported modes are:";
501  for(auto& mode : valid_modes){
502  std::cout << mode << " ";
503  }
504  std::cout << "\n" << "Returning input data.\n";
505  return data_out;
506  }
507 
508  qInfo().noquote() << QString("[MNEMath::rescale] Applying baseline correction ... (mode: %1)").arg(mode.c_str());
509 
510  qint32 imin = 0;
511  qint32 imax = times.size();
512 
513  if (baseline.second == baseline.first) {
514  imin = 0;
515  } else {
516  float bmin = baseline.first;
517  for(qint32 i = 0; i < times.size(); ++i) {
518  if(times[i] >= bmin) {
519  imin = i;
520  break;
521  }
522  }
523  }
524 
525  float bmax = baseline.second;
526 
527  if (baseline.second == baseline.first) {
528  bmax = 0;
529  }
530 
531  for(qint32 i = times.size()-1; i >= 0; --i) {
532  if(times[i] <= bmax) {
533  imax = i+1;
534  break;
535  }
536  }
537 
538  if(imax < imin) {
539  qWarning() << "[MNEMath::rescale] imax < imin. Returning input data.";
540  return data_out;
541  }
542 
543  VectorXd mean = data_out.block(0, imin,data_out.rows(),imax-imin).rowwise().mean();
544  if(mode.compare("mean") == 0) {
545  data_out -= mean.rowwise().replicate(data.cols());
546  } else if(mode.compare("logratio") == 0) {
547  for(qint32 i = 0; i < data_out.rows(); ++i)
548  for(qint32 j = 0; j < data_out.cols(); ++j)
549  data_out(i,j) = log10(data_out(i,j)/mean[i]); // a value of 1 means 10 times bigger
550  } else if(mode.compare("ratio") == 0) {
551  data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
552  } else if(mode.compare("zscore") == 0) {
553  MatrixXd std_mat = data.block(0, imin, data.rows(), imax-imin) - mean.rowwise().replicate(imax-imin);
554  std_mat = std_mat.cwiseProduct(std_mat);
555  VectorXd std_v = std_mat.rowwise().mean();
556  for(qint32 i = 0; i < std_v.size(); ++i)
557  std_v[i] = sqrt(std_v[i] / (float)(imax-imin));
558 
559  data_out -= mean.rowwise().replicate(data_out.cols());
560  data_out = data_out.cwiseQuotient(std_v.rowwise().replicate(data_out.cols()));
561  } else if(mode.compare("percent") == 0) {
562  data_out -= mean.rowwise().replicate(data_out.cols());
563  data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
564  }
565 
566  return data_out;
567 }
568 
569 //=============================================================================================================
570 
571 bool MNEMath::compareTransformation(const MatrixX4f& mDevHeadT,
572  const MatrixX4f& mDevHeadDest,
573  const float& fThreshRot,
574  const float& fThreshTrans)
575 {
576  bool bState = false;
577 
578  Matrix3f mRot = mDevHeadT.block(0,0,3,3);
579  Matrix3f mRotDest = mDevHeadDest.block(0,0,3,3);
580 
581  VectorXf vTrans = mDevHeadT.block(0,3,3,1);
582  VectorXf vTransDest = mDevHeadDest.block(0,3,3,1);
583 
584  Quaternionf quat(mRot);
585  Quaternionf quatNew(mRotDest);
586 
587  // Compare Rotation
588  Quaternionf quatCompare;
589  float fAngle;
590 
591  // get rotation between both transformations by multiplying with the inverted quaternion
592  quatCompare = quat*quatNew.inverse();
593  fAngle = quat.angularDistance(quatNew);
594  fAngle = fAngle * 180 / M_PI;
595 
596  // Compare translation
597  float fMove = (vTrans-vTransDest).norm();
598 
599  // compare to thresholds and update
600  if(fMove > fThreshTrans) {
601  qInfo() << "Large movement: " << fMove*1000 << "mm";
602  bState = true;
603 
604  } else if (fAngle > fThreshRot) {
605  qInfo() << "Large rotation: " << fAngle << "degree";
606  bState = true;
607 
608  } else {
609  bState = false;
610  }
611 
612  return bState;
613 }
UTILSLIB::MNEMath::get_whitener
static void get_whitener(Eigen::MatrixXd &A, bool pca, QString ch_type, Eigen::VectorXd &eig, Eigen::MatrixXd &eigvec)
UTILSLIB::MNEMath::gcd
static int gcd(int iA, int iB)
Definition: mnemath.cpp:70
UTILSLIB::MNEMath::combine_xyz
static Eigen::VectorXd * combine_xyz(const Eigen::VectorXd &vec)
Definition: mnemath.cpp:81
UTILSLIB::MNEMath::compareTransformation
static bool compareTransformation(const Eigen::MatrixX4f &mDevHeadT, const Eigen::MatrixX4f &mDevHeadTNew, const float &fThreshRot, const float &fThreshTrans)
Definition: mnemath.cpp:571
UTILSLIB::MNEMath::nchoose2
static int nchoose2(int n)
Definition: mnemath.cpp:387
UTILSLIB::MNEMath::legendre
static Eigen::MatrixXd legendre(qint32 n, const Eigen::VectorXd &X, QString normalize=QString("unnorm"))
UTILSLIB::MNEMath::rescale
static Eigen::MatrixXd rescale(const Eigen::MatrixXd &data, const Eigen::RowVectorXf &times, const QPair< float, float > &baseline, QString mode)
UTILSLIB::MNEMath::intersect
static Eigen::VectorXi intersect(const Eigen::VectorXi &v1, const Eigen::VectorXi &v2, Eigen::VectorXi &idx_sel)
Definition: mnemath.cpp:194
UTILSLIB::MNEMath::getConditionNumber
static double getConditionNumber(const Eigen::MatrixXd &A, Eigen::VectorXd &s)
Definition: mnemath.cpp:104
UTILSLIB::MNEMath::rank
static qint32 rank(const Eigen::MatrixXd &A, double tol=1e-8)
Definition: mnemath.cpp:399
UTILSLIB::MNEMath::make_block_diag
static Eigen::SparseMatrix< double > * make_block_diag(const Eigen::MatrixXd &A, qint32 n)
Definition: mnemath.cpp:347
mnemath.h
MNEMath class declaration.
UTILSLIB::MNEMath::issparse
static bool issparse(Eigen::VectorXd &v)
Definition: mnemath.cpp:288
UTILSLIB::MNEMath::getConditionSlope
static double getConditionSlope(const Eigen::MatrixXd &A, Eigen::VectorXd &s)
Definition: mnemath.cpp:117