spm_cva.m 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. function [CVA] = spm_cva(Y,X,X0,c,U)
  2. % Canonical Variate Analysis
  3. % FORMAT [CVA] = spm_cva(Y,X,X0,c,[U])
  4. % Y - data
  5. % X - design
  6. % X0 - null space
  7. % c - contrast weights
  8. % U - dimension reduction (projection matrix)
  9. %
  10. %
  11. % CVA.c - contrast weights
  12. % CVA.X - contrast subspace
  13. % CVA.Y - whitened and adjusted data
  14. % CVA.X0 - null space of contrast
  15. %
  16. % CVA.V - canonical vectors (data)
  17. % CVA.v - canonical variates (data)
  18. % CVA.W - canonical vectors (design)
  19. % CVA.w - canonical variates (design)
  20. % CVA.C - canonical contrast (design)
  21. %
  22. % CVA.r - canonical correlations
  23. % CVA.chi - Chi-squared statistics testing D >= i
  24. % CVA.cva - canonical value
  25. % CVA.df - d.f.
  26. % CVA.p - p-values
  27. %
  28. % CVA.bic - Bayesian Information Criterion
  29. % CVA.aic - Akaike Information Criterion
  30. %
  31. %__________________________________________________________________________
  32. %
  33. % CVA uses the generalised eigenvalue solution to the treatment and
  34. % residual sum of squares and products of a general linear model. The
  35. % eigenvalues (i.e., canonical values), after transformation, have a
  36. % chi-squared distribution and allow one to test the null hypothesis that
  37. % the mapping is D or more dimensional. The first p-value is formally
  38. % identical to that obtained using Wilks' Lambda and tests for the
  39. % significance of any mapping.
  40. %
  41. % This routine uses the current contrast to define the subspace of interest
  42. % and treats the remaining design as uninteresting. Conventional results
  43. % for the canonical values are used after the data (and design matrix) have
  44. % been whitened; using the appropriate ReML estimate of non-sphericity.
  45. %
  46. % This function also computes the LogBayesFactor for testing the hypothesis
  47. % that the latent dimension (number of sig. canonical vectors) is i versus
  48. % zero: Two approximations are given: CVA.bic(i) and CVA.aic(i).
  49. % These LogBFs can then be used for group inference - see Jafarpour et al.
  50. %
  51. % References:
  52. %
  53. % Characterizing dynamic brain responses with fMRI: a multivariate
  54. % approach. Friston KJ, Frith CD, Frackowiak RS, Turner R. NeuroImage. 1995
  55. % Jun;2(2):166-72.
  56. %
  57. % A multivariate analysis of evoked responses in EEG and MEG data. Friston
  58. % KJ, Stephan KM, Heather JD, Frith CD, Ioannides AA, Liu LC, Rugg MD,
  59. % Vieth J, Keber H, Hunter K, Frackowiak RS. NeuroImage. 1996 Jun;
  60. % 3(3):167-174.
  61. %
  62. % Population level inference for multivariate MEG analysis. Jafarpour A,
  63. % Barnes G, Fuentemilla Lluis, Duzel E, Penny WD. PLoS One. 2013.
  64. % 8(8): e71305
  65. %__________________________________________________________________________
  66. % Copyright (C) 2006-2013 Wellcome Trust Centre for Neuroimaging
  67. % Karl Friston
  68. % $Id: spm_cva.m 7303 2018-04-28 16:00:22Z karl $
  69. if nargin < 3, X0 = []; end
  70. if nargin < 4, c = eye(size(X,2)); end
  71. if isempty(c), c = eye(size(X,2)); end
  72. %-Get null-space of contrast
  73. %--------------------------------------------------------------------------
  74. X0 = [X0, X - X*c*pinv(full(c))];
  75. X = full(X*c);
  76. if any(any(X0))
  77. X0 = spm_svd(X0);
  78. end
  79. %-Remove null space of contrast
  80. %--------------------------------------------------------------------------
  81. Y = Y - X0*(X0'*Y);
  82. X = X - X0*(X0'*X);
  83. P = pinv(X);
  84. %-Dimension reduction (if necessary)
  85. %==========================================================================
  86. if nargin < 5
  87. [n,m] = size(Y);
  88. n = fix(n/4);
  89. if m > n
  90. U = spm_svd(Y');
  91. try
  92. U = U(:,1:n);
  93. end
  94. else
  95. U = speye(size(Y,2));
  96. end
  97. else
  98. U = spm_svd(U);
  99. end
  100. Y = Y*U;
  101. %-Canonical Variates Analysis
  102. %==========================================================================
  103. %-Degrees of freedom
  104. %--------------------------------------------------------------------------
  105. [n,m] = size(Y);
  106. b = rank(X);
  107. h = min(b,m);
  108. f = n - b - size(X0,2);
  109. %-Generalised eigensolution for treatment and residual sum of squares
  110. %--------------------------------------------------------------------------
  111. T = X*(P*Y);
  112. SST = T'*T;
  113. SSR = Y - T;
  114. SSR = SSR'*SSR;
  115. [v,d] = eig(SST,SSR);
  116. %-sort and order
  117. %--------------------------------------------------------------------------
  118. d = diag(d);
  119. i = isfinite(d);
  120. d = d(i);
  121. v = v(:,i);
  122. [d,i] = sort(d,1,'descend');
  123. v = v(:,i);
  124. h = min(length(d),h);
  125. d = d(1:h);
  126. v = spm_en(v(:,1:h));
  127. V = U*v; % canonical vectors (data)
  128. v = Y*v; % canonical variates (data)
  129. W = P*v; % canonical vectors (design)
  130. w = X*W; % canonical variates (design)
  131. C = c*W; % canonical contrast (design)
  132. %-Inference on dimensionality - p(i) test of D >= i; Wilks' Lambda := p(1)
  133. %--------------------------------------------------------------------------
  134. cval = d.*(d > 0);
  135. [chi, df, p, r, bic, aic] = deal(zeros(1,h));
  136. for i = 1:h
  137. % Chi-squared approximation
  138. %----------------------------------------------------------------------
  139. chi(i) = (f - (m - b + 1)/2)*sum(log(cval(i:h) + 1));
  140. df(i) = (m - i + 1)*(b - i + 1);
  141. p(i) = 1 - spm_Xcdf(chi(i),df(i));
  142. r(i) = sqrt(cval(i) / (1 + cval(i)));
  143. % BIC/AIC
  144. %----------------------------------------------------------------------
  145. k = i*(m + b);
  146. bic(i) = 0.5*n*sum(log(cval(1:i) + 1)) - 0.5*k*log(n);
  147. aic(i) = 0.5*n*sum(log(cval(1:i) + 1)) - k;
  148. end
  149. %-Prevent overflow
  150. %--------------------------------------------------------------------------
  151. p = max(p,exp(-16));
  152. r = min(max(r,0),1);
  153. %-Assemble results
  154. %==========================================================================
  155. CVA.X = X; % contrast subspace
  156. CVA.Y = Y; % whitened and adjusted data
  157. CVA.c = c; % contrast weights
  158. CVA.X0 = X0; % null space of contrast
  159. CVA.V = V; % canonical vectors (data)
  160. CVA.v = v; % canonical variates (data)
  161. CVA.W = W; % canonical vectors (design)
  162. CVA.w = w; % canonical variates (design)
  163. CVA.C = C; % canonical contrast (design)
  164. CVA.U = U; % dimension reduction (projector)
  165. CVA.r = r; % canonical correlations
  166. CVA.chi = chi; % Chi-squared statistics testing D >= i
  167. CVA.cva = cval; % canonical value
  168. CVA.df = df; % d.f.
  169. CVA.p = p; % p-values
  170. CVA.bic = bic; % LogBF from Bayesian Information Criterion
  171. CVA.aic = aic; % LogBF from Akaike Information Criterion