66 const MatrixXd& matData,
74 const int nChannels =
static_cast<int>(matData.rows());
75 const int nTimes =
static_cast<int>(matData.cols());
77 if (nComponents < 0) {
78 nComponents = nChannels;
84 VectorXd vecMean = matData.rowwise().mean();
85 MatrixXd matCentered = matData.colwise() - vecMean;
90 MatrixXd matCov = (matCentered * matCentered.transpose()) /
static_cast<double>(nTimes - 1);
92 SelfAdjointEigenSolver<MatrixXd> eigSolver(matCov);
93 VectorXd vecEigVals = eigSolver.eigenvalues();
94 MatrixXd matEigVecs = eigSolver.eigenvectors();
97 VectorXd vecTopEigVals = vecEigVals.tail(nComponents).reverse();
98 MatrixXd matTopEigVecs = matEigVecs.rightCols(nComponents).rowwise().reverse();
101 VectorXd vecInvSqrtEig = vecTopEigVals.array().sqrt().inverse();
102 MatrixXd matWhitening = vecInvSqrtEig.asDiagonal() * matTopEigVecs.transpose();
105 VectorXd vecSqrtEig = vecTopEigVals.array().sqrt();
106 MatrixXd matDewhitening = matTopEigVecs * vecSqrtEig.asDiagonal();
109 MatrixXd matWhite = matWhitening * matCentered;
114 MatrixXd matW = MatrixXd::Identity(nComponents, nComponents);
117 std::mt19937 gen(seed);
118 std::normal_distribution<double> dist(0.0, 0.01);
119 for (
int i = 0; i < nComponents; ++i) {
120 for (
int j = 0; j < nComponents; ++j) {
122 matW(i, j) = dist(gen);
135 const double dInvN = 1.0 /
static_cast<double>(nTimes);
136 MatrixXd matIdentity = MatrixXd::Identity(nComponents, nComponents);
142 double dCurrentLR = learningRate;
143 constexpr double dAnnealFactor = 0.998;
145 for (
int iter = 0; iter < maxIterations; ++iter) {
147 MatrixXd matSources = matW * matWhite;
150 VectorXd vecSigns = VectorXd::Ones(nComponents);
152 vecSigns = estimateSignVector(matSources);
156 MatrixXd matY(nComponents, nTimes);
157 for (
int i = 0; i < nComponents; ++i) {
158 if (vecSigns(i) > 0) {
160 matY.row(i) = -matSources.row(i).array().tanh();
163 matY.row(i) = matSources.row(i).array().tanh() - matSources.row(i).array();
168 MatrixXd matGrad = matIdentity + (matY * matSources.transpose()) * dInvN;
169 MatrixXd matDW = dCurrentLR * matGrad * matW;
177 double dChange = matDW.squaredNorm();
178 if (dChange < tolerance) {
183 dCurrentLR *= dAnnealFactor;
static InfomaxResult compute(const Eigen::MatrixXd &matData, int nComponents=-1, int maxIterations=200, double learningRate=0.001, double tolerance=1e-7, bool extendedMode=true, unsigned int seed=0)