[Y,IX] = sort(X,dim,mode)
| igl::sort(X,dim,mode,Y,IX)
| MATLAB version allows Y to be a multidimensional matrix, but the Eigen version is only for 1D or 2D matrices. |
B(i:(i+w),j:(j+h)) = A(x:(x+w),y:(y+h))
| B.block(i,j,w,h) = A.block(i,j,w,h)
| MATLAB version would allow w and h to be non-positive since the colon operator evaluates to a list of indices, but the Eigen version needs non-negative width and height values. |
max(A(:))
| A.maxCoeff()
| Find the maximum coefficient over all entries of the matrix. |
min(A(:))
| A.minCoeff()
| Find the minimum coefficient over all entries of the matrix. |
eye(w,h)
| MatrixXd::Identity(w,h), MatrixXf::Identity(w,h), etc.
| |
A(i:(i+w),j:(j+h)) = eye(w,h)
| A.block(i,j,w,h).setIdentity()
| |
[I,J,V] = find(X)
| igl::find(X,I,J,V)
| Matlab supports finding subscripts (I and J) as well as indices (just I), but so far igl::find only supports subscripts. Also, igl::find requires X to be sparse. |
X(:,j) = X(:,j) + x
| X.col(j).array() += x
| |
Acol_sum = sum(A,1) Arow_sum = sum(A,2) Adim_sum = sum(Asparse,dim)
| Acol_sum = A.colwise().sum() Arow_sum = A.rowwise().sum() igl::sum(Asparse,dim,Adim_sum)
| Currently the igl version only supports sparse matrix input (and dim must be 1 or 2). For sparse matrices, one could use A*Matrix<Atype,Dynamic,1>::Ones(A.cols(),1); but this will not work as expected for SparseMatrix<bool>. |
D = diag(M)
| igl::diag(M,D)
| Extract the main diagonal of a matrix. Currently igl version supports sparse only. |
M = diag(D)
| igl::diag(D,M)
| Construct new square matrix M with entries of vector D along the diagonal. Currently igl version supports sparse only. Might also consider simply: M = D.asDiagonal(); |
[Y,I] = max(X,[],dim)
| igl::mat_max(X,dim,Y,I)
| Matlab has a bizarre convention of passing [] as the second argument to mean take the max/min along dimension dim. |
Y = max(X,[],1) Y = max(X,[],2) Y = min(X,[],1) Y = min(X,[],2)
| Y = X.colwise().maxCoeff() Y = X.rowwise().maxCoeff() Y = X.colwise().minCoeff() Y = X.rowwise().minCoeff()
| Matlab allows you to obtain the indices of extrema see mat_max |
C = A.*B
| C = (A.array() * B.array()).matrix()
| |
C = A.^b
| C = A.array().pow(b).matrix()
| |
A(B == 0) = C(B==0)
| A = (B.array() == 0).select(C,A)
| |
C = A + B'
| SparseMatrixType BT = B.transpose() SparseMatrixType C = A+BT;
| Do not attempt to combine .transpose() in expression like this: C = A + B.transpose()
|
[L,p] = chol(A)
| SparseLLT<SparseMatrixType> A_LLT(A.template triangularView<Lower>()) SparseMatrixType L = A_LLT.matrixL();bool p = (L*0).eval().nonZeros()==0;
| Do not attempt to use A in constructor of A_LLT like this: SparseLLT<SparseMatrixType> A_LLT(A)
Do not attempt to use A_LLT.succeeded() to determine if Cholesky factorization succeeded, like this: bool p = A_LLT.succeeded() |
X = U\(L\b)
| X = b; L.template triangularView<Lower>().solveInPlace(X); U.template triangularView<Upper>().solveInPlace(X);
| Expects that L and U are lower and upper triangular matrices respectively |
B = repmat(A,i,j)
| igl::repmat(A,i,j,B); B = A.replicate(i,j);
| igl::repmat is also implemented for Sparse Matrices. |
I = low:step:hi
| igl::colon(low,step,hi,I); // or const int size = ((hi-low)/step)+1; I = VectorXi::LinSpaced(size,low,low+step*(size-1));
| IGL version should be templated enough to handle same situations as matlab's colon. The matlab keyword end does not correspond in the C++ version. You'll have to use M.size(),M.rows() or whatever. |
O = ones(m,n)
| Matrix* O = Matrix*::Ones(m,n)
| |
O = zeros(m,n)
| Matrix* O = Matrix*::Zero(m,n)
| |
B = A(I,J) B = A(I,:)
| igl::slice(A,I,J,B) igl::slice(A,I,1,B)
| This is only for the case when I and J are lists of indices and not vectors of logicals. |
B(I,J) = A B(I,:) = A
| igl::slice_into(A,I,J,B) igl::slice_into(A,I,1,B)
| This is only for the case when I and J are lists of indices and not vectors of logicals. |
M = mode(X,dim)
| igl::mode(X,dim,M)
| |
B = arrayfun(FUN, A)
| B = A.unaryExpr(ptr_fun(FUN))
| If FUN is templated, the templates must be fully resolved. |
B = fliplr(A) B = flipud(A)
| B = A.rowwise().reverse().eval() B = A.colwise().reverse().eval()
| The .eval() is not necessary if A != B |
B = IM(A) A = IM(A);
| B = A.unaryExpr(bind1st(mem_fun( static_cast<VectorXi::Scalar&(VectorXi::*)(VectorXi::Index)> (&VectorXi::operator())), &IM)).eval(); // or for_each(A.data(),A.data()+A.size(),[&IM](int & a){a=IM(a);});
| Where IM is an "index map" column vector and A is an arbitrary matrix. The .eval() is not necessary if A != B, but then the second option should be used. |
A = sparse(I,J,V)
| // build std::vector<Eigen::Triplet> IJV A.setFromTriplets(IJV);
| IJV and A should not be empty! (this might be fixed) |
A = min(A,c);
| C.array() = A.array().min(c);
| Coefficient-wise minimum of matrix and scalar (or matching size matrix) |
I=1:10; ... IP = I(P==0);
| I = VectorXi::LinSpaced(10,0,9); ... VectorXi IP = I; IP.conservativeResize(stable_partition( IP.data(), IP.data()+IP.size(), [&P](int i){return P(i)==0;})-IP.data());
| Where I is a vector of increasing indices from 0 to n, and P is a vector. Requires C++11 and #include <algorithm> |
B = A(R<2,C>1); | B = igl::slice_mask(A,R.array()<2,C.array()>1); | |
a = any(A(:))
| bool a = any_of(A.data(),A.data()+A.size(),[](bool a){ return a;}); bool a = A.array().any();
| Casts Matrix::Scalar to bool. |
B = mod(A,2)
| igl::mod(A,2,B)
| |
[IA,LOCB] = ismember(A,B)
| igl::ismember(A,B,IA,LOCB)
| |
A = A - diag(diag(A));
| A.prune([](const int r, const int c, const Scalar)->bool{return r!=c;});
| Remove the diagonal from a sparse matrix. |
A = A - diag(diag(A));
| A.prune([](const int r, const int c, const Scalar)->bool{return r!=c;});
| Remove the diagonal from a sparse matrix. |