MNE-CPP  0.1.9
A Framework for Electrophysiology
mnemath.cpp
Go to the documentation of this file.
1 //=============================================================================================================
37 //=============================================================================================================
38 // INCLUDES
39 //=============================================================================================================
40 
41 #define _USE_MATH_DEFINES
42 #include <math.h>
43 #include <iostream>
44 #include "mnemath.h"
45 
46 //=============================================================================================================
47 // EIGEN INCLUDES
48 //=============================================================================================================
49 
50 #include <Eigen/Eigen>
51 #include <Eigen/Geometry>
52 
53 //=============================================================================================================
54 // QT INCLUDES
55 //=============================================================================================================
56 
57 #include <QDebug>
58 
59 //=============================================================================================================
60 // USED NAMESPACES
61 //=============================================================================================================
62 
63 using namespace UTILSLIB;
64 using namespace Eigen;
65 
66 //=============================================================================================================
67 // DEFINE MEMBER METHODS
68 //=============================================================================================================
69 
70 int MNEMath::gcd(int iA, int iB)
71 {
72  if (iB == 0) {
73  return iA;
74  }
75 
76  return gcd(iB, iA % iB);
77 }
78 
79 //=============================================================================================================
80 
81 VectorXd* MNEMath::combine_xyz(const VectorXd& vec)
82 {
83  if (vec.size() % 3 != 0)
84  {
85  printf("Input must be a row or a column vector with 3N components\n");
86  return NULL;
87  }
88 
89  MatrixXd tmp = MatrixXd(vec.transpose());
90  SparseMatrix<double>* s = make_block_diag(tmp,3);
91 
92  SparseMatrix<double> sC = *s*s->transpose();
93  VectorXd* comb = new VectorXd(sC.rows());
94 
95  for(qint32 i = 0; i < sC.rows(); ++i)
96  (*comb)[i] = sC.coeff(i,i);
97 
98  delete s;
99  return comb;
100 }
101 
102 //=============================================================================================================
103 
104 double MNEMath::getConditionNumber(const MatrixXd& A,
105  VectorXd &s)
106 {
107  JacobiSVD<MatrixXd> svd(A);
108  s = svd.singularValues();
109 
110  double c = s.maxCoeff()/s.minCoeff();
111 
112  return c;
113 }
114 
115 //=============================================================================================================
116 
117 double MNEMath::getConditionSlope(const MatrixXd& A,
118  VectorXd &s)
119 {
120  JacobiSVD<MatrixXd> svd(A);
121  s = svd.singularValues();
122 
123  double c = s.maxCoeff()/s.mean();
124 
125  return c;
126 }
127 
128 //=============================================================================================================
129 
130 void MNEMath::get_whitener(MatrixXd &A,
131  bool pca,
132  QString ch_type,
133  VectorXd &eig,
134  MatrixXd &eigvec)
135 {
136  // whitening operator
137  SelfAdjointEigenSolver<MatrixXd> t_eigenSolver(A);//Can be used because, covariance matrices are self-adjoint matrices.
138 
139  eig = t_eigenSolver.eigenvalues();
140  eigvec = t_eigenSolver.eigenvectors().transpose();
141 
142  MNEMath::sort<double>(eig, eigvec, false);
143  qint32 rnk = MNEMath::rank(A);
144 
145  for(qint32 i = 0; i < eig.size()-rnk; ++i)
146  eig(i) = 0;
147 
148  printf("Setting small %s eigenvalues to zero.\n", ch_type.toUtf8().constData());
149  if (!pca) // No PCA case.
150  printf("Not doing PCA for %s\n", ch_type.toUtf8().constData());
151  else
152  {
153  printf("Doing PCA for %s.",ch_type.toUtf8().constData());
154  // This line will reduce the actual number of variables in data
155  // and leadfield to the true rank.
156  eigvec = eigvec.block(eigvec.rows()-rnk, 0, rnk, eigvec.cols());
157  }
158 }
159 
160 //=============================================================================================================
161 
162 void MNEMath::get_whitener(MatrixXd &A,
163  bool pca,
164  const std::string& ch_type,
165  VectorXd &eig,
166  MatrixXd &eigvec)
167 {
168  // whitening operator
169  SelfAdjointEigenSolver<MatrixXd> t_eigenSolver(A);//Can be used because, covariance matrices are self-adjoint matrices.
170 
171  eig = t_eigenSolver.eigenvalues();
172  eigvec = t_eigenSolver.eigenvectors().transpose();
173 
174  MNEMath::sort<double>(eig, eigvec, false);
175  qint32 rnk = MNEMath::rank(A);
176 
177  for(qint32 i = 0; i < eig.size()-rnk; ++i)
178  eig(i) = 0;
179 
180  printf("Setting small %s eigenvalues to zero.\n", ch_type.c_str());
181  if (!pca) // No PCA case.
182  printf("Not doing PCA for %s\n", ch_type.c_str());
183  else
184  {
185  printf("Doing PCA for %s.",ch_type.c_str());
186  // This line will reduce the actual number of variables in data
187  // and leadfield to the true rank.
188  eigvec = eigvec.block(eigvec.rows()-rnk, 0, rnk, eigvec.cols());
189  }
190 }
191 
192 //=============================================================================================================
193 
194 VectorXi MNEMath::intersect(const VectorXi &v1,
195  const VectorXi &v2,
196  VectorXi &idx_sel)
197 {
198  std::vector<int> tmp;
199 
200  std::vector< std::pair<int,int> > t_vecIntIdxValue;
201 
202  //ToDo:Slow; map VectorXi to stl container
203  for(qint32 i = 0; i < v1.size(); ++i)
204  tmp.push_back(v1[i]);
205 
206  std::vector<int>::iterator it;
207  for(qint32 i = 0; i < v2.size(); ++i)
208  {
209  it = std::search(tmp.begin(), tmp.end(), &v2[i], &v2[i]+1);
210  if(it != tmp.end())
211  t_vecIntIdxValue.push_back(std::pair<int,int>(v2[i], it-tmp.begin()));//Index and int value are swapped // to sort using the idx
212  }
213 
214  std::sort(t_vecIntIdxValue.begin(), t_vecIntIdxValue.end(), MNEMath::compareIdxValuePairSmallerThan<int>);
215 
216  VectorXi p_res(t_vecIntIdxValue.size());
217  idx_sel = VectorXi(t_vecIntIdxValue.size());
218 
219  for(quint32 i = 0; i < t_vecIntIdxValue.size(); ++i)
220  {
221  p_res[i] = t_vecIntIdxValue[i].first;
222  idx_sel[i] = t_vecIntIdxValue[i].second;
223  }
224 
225  return p_res;
226 }
227 
228 //=============================================================================================================
229 
230 // static inline MatrixXd extract_block_diag(MatrixXd& A, qint32 n)
231 // {
232 
233 // //
234 // // Principal Investigators and Developers:
235 // // ** Richard M. Leahy, PhD, Signal & Image Processing Institute,
236 // // University of Southern California, Los Angeles, CA
237 // // ** John C. Mosher, PhD, Biophysics Group,
238 // // Los Alamos National Laboratory, Los Alamos, NM
239 // // ** Sylvain Baillet, PhD, Cognitive Neuroscience & Brain Imaging Laboratory,
240 // // CNRS, Hopital de la Salpetriere, Paris, France
241 // //
242 // // Copyright (c) 2005 BrainStorm by the University of Southern California
243 // // This software distributed under the terms of the GNU General Public License
244 // // as published by the Free Software Foundation. Further details on the GPL
245 // // license can be found at http://www.gnu.org/copyleft/gpl.html .
246 // //
247 // //FOR RESEARCH PURPOSES ONLY. THE SOFTWARE IS PROVIDED "AS IS," AND THE
248 // // UNIVERSITY OF SOUTHERN CALIFORNIA AND ITS COLLABORATORS DO NOT MAKE ANY
249 // // WARRANTY, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO WARRANTIES OF
250 // // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, NOR DO THEY ASSUME ANY
251 // // LIABILITY OR RESPONSIBILITY FOR THE USE OF THIS SOFTWARE.
252 // //
253 // // Author: John C. Mosher 1993 - 2004
254 // //
255 // //
256 // // Modifications for mne Matlab toolbox
257 // //
258 // // Matti Hamalainen
259 // // 2006
260 
261 // [mA,na] = size(A); % matrix always has na columns
262 // % how many entries in the first column?
263 // bdn = na/n; % number of blocks
264 // ma = mA/bdn; % rows in first block
265 
266 // % blocks may themselves contain zero entries. Build indexing as above
267 // tmp = reshape([1:(ma*bdn)]',ma,bdn);
268 // i = zeros(ma*n,bdn);
269 // for iblock = 1:n,
270 // i((iblock-1)*ma+[1:ma],:) = tmp;
271 // end
272 
273 // i = i(:); % row indices foreach sparse bd
274 
275 // j = [0:mA:(mA*(na-1))];
276 // j = j(ones(ma,1),:);
277 // j = j(:);
278 
279 // i = i + j;
280 
281 // bd = full(A(i)); % column vector
282 // bd = reshape(bd,ma,na); % full matrix
283 
284 // }
285 
286 //=============================================================================================================
287 
288 bool MNEMath::issparse(VectorXd &v)
289 {
290  //ToDo: Figure out how to accelerate MNEMath::issparse(VectorXd &v)
291 
292  qint32 c = 0;
293  qint32 n = v.rows();
294  qint32 t = n/2;
295 
296  for(qint32 i = 0; i < n; ++i)
297  {
298  if(v(i) == 0)
299  ++c;
300  if(c > t)
301  return true;
302  }
303 
304  return false;
305 }
306 
307 //=============================================================================================================
308 
309 MatrixXd MNEMath::legendre(qint32 n,
310  const VectorXd &X,
311  QString normalize)
312 {
313  MatrixXd y;
314 
315  Q_UNUSED(y);
316 
317  Q_UNUSED(n);
318  Q_UNUSED(X);
319  Q_UNUSED(normalize);
320 
321  //ToDo
322 
323  return y;
324 }
325 
326 
327 //=============================================================================================================
328 
329 MatrixXd MNEMath::legendre(qint32 n,
330  const VectorXd &X,
331  std::string normalize)
332 {
333  MatrixXd y;
334 
335  Q_UNUSED(y);
336 
337  Q_UNUSED(n);
338  Q_UNUSED(X);
339  Q_UNUSED(normalize);
340 
341  //ToDo
342 
343  return y;
344 }
345 
346 //=============================================================================================================
347 
348 SparseMatrix<double>* MNEMath::make_block_diag(const MatrixXd &A,
349  qint32 n)
350 {
351 
352  qint32 ma = A.rows();
353  qint32 na = A.cols();
354  float bdn = ((float)na)/n; // number of submatrices
355 
356 // std::cout << std::endl << "ma " << ma << " na " << na << " bdn " << bdn << std::endl;
357 
358  if(bdn - floor(bdn))
359  {
360  printf("Width of matrix must be even multiple of n\n");
361  return NULL;
362  }
363 
364  typedef Eigen::Triplet<double> T;
365  std::vector<T> tripletList;
366  tripletList.reserve(bdn*ma*n);
367 
368  qint32 current_col, current_row, i, r, c;
369  for(i = 0; i < bdn; ++i)
370  {
371  current_col = i * n;
372  current_row = i * ma;
373 
374  for(r = 0; r < ma; ++r)
375  for(c = 0; c < n; ++c)
376  tripletList.push_back(T(r+current_row, c+current_col, A(r, c+current_col)));
377  }
378 
379  SparseMatrix<double>* bd = new SparseMatrix<double>((int)floor((float)ma*bdn+0.5),na);
380 // SparseMatrix<double> p_Matrix(nrow, ncol);
381  bd->setFromTriplets(tripletList.begin(), tripletList.end());
382 
383  return bd;
384 }
385 
386 //=============================================================================================================
387 
389 {
390 
391  //nchoosek(n, k) with k = 2, equals n*(n-1)*0.5
392 
393  int t_iNumOfCombination = (int)(n*(n-1)*0.5);
394 
395  return t_iNumOfCombination;
396 }
397 
398 //=============================================================================================================
399 
400 qint32 MNEMath::rank(const MatrixXd& A,
401  double tol)
402 {
403  JacobiSVD<MatrixXd> t_svdA(A);//U and V are not computed
404  VectorXd s = t_svdA.singularValues();
405  double t_dMax = s.maxCoeff();
406  t_dMax *= tol;
407  qint32 sum = 0;
408  for(qint32 i = 0; i < s.size(); ++i)
409  sum += s[i] > t_dMax ? 1 : 0;
410  return sum;
411 }
412 
413 //=============================================================================================================
414 
415 MatrixXd MNEMath::rescale(const MatrixXd &data,
416  const RowVectorXf &times,
417  const QPair<float,float>& baseline,
418  QString mode)
419 {
420  MatrixXd data_out = data;
421  QStringList valid_modes;
422  valid_modes << "logratio" << "ratio" << "zscore" << "mean" << "percent";
423  if(!valid_modes.contains(mode))
424  {
425  qWarning().noquote() << "[MNEMath::rescale] Mode" << mode << "is not supported. Supported modes are:" << valid_modes << "Returning input data.";
426  return data_out;
427  }
428 
429  qInfo().noquote() << QString("[MNEMath::rescale] Applying baseline correction ... (mode: %1)").arg(mode);
430 
431  qint32 imin = 0;
432  qint32 imax = times.size();
433 
434  if (baseline.second == baseline.first) {
435  imin = 0;
436  } else {
437  float bmin = baseline.first;
438  for(qint32 i = 0; i < times.size(); ++i) {
439  if(times[i] >= bmin) {
440  imin = i;
441  break;
442  }
443  }
444  }
445 
446  float bmax = baseline.second;
447 
448  if (baseline.second == baseline.first) {
449  bmax = 0;
450  }
451 
452  for(qint32 i = times.size()-1; i >= 0; --i) {
453  if(times[i] <= bmax) {
454  imax = i+1;
455  break;
456  }
457  }
458 
459  if(imax < imin) {
460  qWarning() << "[MNEMath::rescale] imax < imin. Returning input data.";
461  return data_out;
462  }
463 
464  VectorXd mean = data_out.block(0, imin,data_out.rows(),imax-imin).rowwise().mean();
465  if(mode.compare("mean") == 0) {
466  data_out -= mean.rowwise().replicate(data.cols());
467  } else if(mode.compare("logratio") == 0) {
468  for(qint32 i = 0; i < data_out.rows(); ++i)
469  for(qint32 j = 0; j < data_out.cols(); ++j)
470  data_out(i,j) = log10(data_out(i,j)/mean[i]); // a value of 1 means 10 times bigger
471  } else if(mode.compare("ratio") == 0) {
472  data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
473  } else if(mode.compare("zscore") == 0) {
474  MatrixXd std_mat = data.block(0, imin, data.rows(), imax-imin) - mean.rowwise().replicate(imax-imin);
475  std_mat = std_mat.cwiseProduct(std_mat);
476  VectorXd std_v = std_mat.rowwise().mean();
477  for(qint32 i = 0; i < std_v.size(); ++i)
478  std_v[i] = sqrt(std_v[i] / (float)(imax-imin));
479 
480  data_out -= mean.rowwise().replicate(data_out.cols());
481  data_out = data_out.cwiseQuotient(std_v.rowwise().replicate(data_out.cols()));
482  } else if(mode.compare("percent") == 0) {
483  data_out -= mean.rowwise().replicate(data_out.cols());
484  data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
485  }
486 
487  return data_out;
488 }
489 
490 //=============================================================================================================
491 
492 MatrixXd MNEMath::rescale(const MatrixXd& data,
493  const RowVectorXf& times,
494  const std::pair<float,float>& baseline,
495  const std::string& mode)
496 {
497  MatrixXd data_out = data;
498  std::vector<std::string> valid_modes{"logratio", "ratio", "zscore", "mean", "percent"};
499  if(std::find(valid_modes.begin(),valid_modes.end(), mode) == valid_modes.end())
500  {
501  qWarning().noquote() << "[MNEMath::rescale] Mode" << mode.c_str() << "is not supported. Supported modes are:";
502  for(auto& mode : valid_modes){
503  std::cout << mode << " ";
504  }
505  std::cout << "\n" << "Returning input data.\n";
506  return data_out;
507  }
508 
509  qInfo().noquote() << QString("[MNEMath::rescale] Applying baseline correction ... (mode: %1)").arg(mode.c_str());
510 
511  qint32 imin = 0;
512  qint32 imax = times.size();
513 
514  if (baseline.second == baseline.first) {
515  imin = 0;
516  } else {
517  float bmin = baseline.first;
518  for(qint32 i = 0; i < times.size(); ++i) {
519  if(times[i] >= bmin) {
520  imin = i;
521  break;
522  }
523  }
524  }
525 
526  float bmax = baseline.second;
527 
528  if (baseline.second == baseline.first) {
529  bmax = 0;
530  }
531 
532  for(qint32 i = times.size()-1; i >= 0; --i) {
533  if(times[i] <= bmax) {
534  imax = i+1;
535  break;
536  }
537  }
538 
539  if(imax < imin) {
540  qWarning() << "[MNEMath::rescale] imax < imin. Returning input data.";
541  return data_out;
542  }
543 
544  VectorXd mean = data_out.block(0, imin,data_out.rows(),imax-imin).rowwise().mean();
545  if(mode.compare("mean") == 0) {
546  data_out -= mean.rowwise().replicate(data.cols());
547  } else if(mode.compare("logratio") == 0) {
548  for(qint32 i = 0; i < data_out.rows(); ++i)
549  for(qint32 j = 0; j < data_out.cols(); ++j)
550  data_out(i,j) = log10(data_out(i,j)/mean[i]); // a value of 1 means 10 times bigger
551  } else if(mode.compare("ratio") == 0) {
552  data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
553  } else if(mode.compare("zscore") == 0) {
554  MatrixXd std_mat = data.block(0, imin, data.rows(), imax-imin) - mean.rowwise().replicate(imax-imin);
555  std_mat = std_mat.cwiseProduct(std_mat);
556  VectorXd std_v = std_mat.rowwise().mean();
557  for(qint32 i = 0; i < std_v.size(); ++i)
558  std_v[i] = sqrt(std_v[i] / (float)(imax-imin));
559 
560  data_out -= mean.rowwise().replicate(data_out.cols());
561  data_out = data_out.cwiseQuotient(std_v.rowwise().replicate(data_out.cols()));
562  } else if(mode.compare("percent") == 0) {
563  data_out -= mean.rowwise().replicate(data_out.cols());
564  data_out = data_out.cwiseQuotient(mean.rowwise().replicate(data_out.cols()));
565  }
566 
567  return data_out;
568 }
569 
570 //=============================================================================================================
571 
572 bool MNEMath::compareTransformation(const MatrixX4f& mDevHeadT,
573  const MatrixX4f& mDevHeadDest,
574  const float& fThreshRot,
575  const float& fThreshTrans)
576 {
577  bool bState = false;
578 
579  Matrix3f mRot = mDevHeadT.block(0,0,3,3);
580  Matrix3f mRotDest = mDevHeadDest.block(0,0,3,3);
581 
582  VectorXf vTrans = mDevHeadT.block(0,3,3,1);
583  VectorXf vTransDest = mDevHeadDest.block(0,3,3,1);
584 
585  Quaternionf quat(mRot);
586  Quaternionf quatNew(mRotDest);
587 
588  // Compare Rotation
589  Quaternionf quatCompare;
590  float fAngle;
591 
592  // get rotation between both transformations by multiplying with the inverted quaternion
593  quatCompare = quat*quatNew.inverse();
594  fAngle = quat.angularDistance(quatNew);
595  fAngle = fAngle * 180 / M_PI;
596 
597  // Compare translation
598  float fMove = (vTrans-vTransDest).norm();
599 
600  // compare to thresholds and update
601  if(fMove > fThreshTrans) {
602  qInfo() << "Large movement: " << fMove*1000 << "mm";
603  bState = true;
604 
605  } else if (fAngle > fThreshRot) {
606  qInfo() << "Large rotation: " << fAngle << "degree";
607  bState = true;
608 
609  } else {
610  bState = false;
611  }
612 
613  return bState;
614 }
static int gcd(int iA, int iB)
Definition: mnemath.cpp:70
static void get_whitener(Eigen::MatrixXd &A, bool pca, QString ch_type, Eigen::VectorXd &eig, Eigen::MatrixXd &eigvec)
static bool issparse(Eigen::VectorXd &v)
Definition: mnemath.cpp:288
static double getConditionSlope(const Eigen::MatrixXd &A, Eigen::VectorXd &s)
Definition: mnemath.cpp:117
static Eigen::MatrixXd legendre(qint32 n, const Eigen::VectorXd &X, QString normalize=QString("unnorm"))
static qint32 rank(const Eigen::MatrixXd &A, double tol=1e-8)
Definition: mnemath.cpp:400
static Eigen::VectorXi intersect(const Eigen::VectorXi &v1, const Eigen::VectorXi &v2, Eigen::VectorXi &idx_sel)
Definition: mnemath.cpp:194
MNEMath class declaration.
static Eigen::VectorXd * combine_xyz(const Eigen::VectorXd &vec)
Definition: mnemath.cpp:81
static Eigen::SparseMatrix< double > * make_block_diag(const Eigen::MatrixXd &A, qint32 n)
Definition: mnemath.cpp:348
static Eigen::MatrixXd rescale(const Eigen::MatrixXd &data, const Eigen::RowVectorXf &times, const QPair< float, float > &baseline, QString mode)
static bool compareTransformation(const Eigen::MatrixX4f &mDevHeadT, const Eigen::MatrixX4f &mDevHeadTNew, const float &fThreshRot, const float &fThreshTrans)
Definition: mnemath.cpp:572
static int nchoose2(int n)
Definition: mnemath.cpp:388
static double getConditionNumber(const Eigen::MatrixXd &A, Eigen::VectorXd &s)
Definition: mnemath.cpp:104