
    *YHhl              
       ^   d Z ddlZddlmZmZ ddlmZmZ ddlZ	ddl
mZmZ ddlmZmZmZmZmZmZ ddlmZ dd	lmZmZ dd
lmZmZ ddlmZ ddlmZm Z m!Z! g dZ"d Z#	 ddZ$d Z%ddZ&d Z' G d deeeeee      Z( G d de(      Z) G d de(      Z* G d de(      Z+ G d deee      Z,y) zG
The :mod:`sklearn.pls` module implements Partial Least Squares (PLS).
    N)ABCMetaabstractmethod)IntegralReal)pinvsvd   )BaseEstimatorClassNamePrefixFeaturesOutMixinMultiOutputMixinRegressorMixinTransformerMixin_fit_context)ConvergenceWarning)check_arraycheck_consistent_length)Interval
StrOptions)svd_flip)FLOAT_DTYPEScheck_is_fittedvalidate_data)PLSSVDPLSCanonicalPLSRegressionc           
         t        | dd      \  }}}|j                  j                  j                         }ddd}t	        j
                  |      ||   z  t	        j                  |      j                  z  }t	        j                  ||kD        }|d d d |f   }||d | z  }t	        j                  t	        j                  t	        j                  ||d |                   S )NF)full_matricescheck_finiteg     @@g    .A)fd)r   dtypecharlowernpmaxfinfoepssum	transpose	conjugatedot)ausvhtfactorcondranks           Y/var/www/html/planif/env/lib/python3.12/site-packages/sklearn/cross_decomposition/_pls.py
_pinv2_oldr5       s     1E>HAq"	AS!F66!9vay 288A;??2D66!d(D	!UdU(A5DMA<<RVVAr%4y%9:;;    c                    t        j                  | j                        j                  	 t	        fd|j
                  D              }d}|dk(  rt        |       t        |      }
}	t        |      D ]  }|dk(  rt        j                  	|      }n7t        j                  | j
                  |      t        j                  ||      z  }|t        j                  t        j                  ||            z   z  }t        j                  | |      }|dk(  rt        j                  
|      }nAt        j                  |j
                  |      t        j                  |j
                  |      z  }|r/|t        j                  t        j                  ||            z   z  }t        j                  ||      t        j                  ||      z   z  }||z
  }t        j                  ||      |k  s|j                  d   dk(  r n|} dz   }||k(  rt        j                  dt               |fS # t        $ r}t        d      |d}~ww xY w)a?  Return the first left and right singular vectors of X'y.

    Provides an alternative to the svd(X'y) and uses the power method instead.
    With norm_y_weights to True and in mode A, this corresponds to the
    algorithm section 11.3 of the Wegelin's review, except this starts at the
    "update saliences" part.
    c              3   z   K   | ]2  }t        j                  t        j                  |      kD        s/| 4 y wN)r$   anyabs).0colr'   s     r4   	<genexpr>z;_get_first_singular_vectors_power_method.<locals>.<genexpr>?   s)     GsRVVBFF3K#4E-FsGs   0;;y residual is constantNd   B   z$Maximum number of iterations reached)r$   r&   r!   r'   nextTStopIterationr5   ranger+   sqrtshapewarningswarnr   )Xymodemax_itertolnorm_y_weightsy_scoreex_weights_oldX_pinvy_pinvi	x_weightsx_score	y_weightsx_weights_diffn_iterr'   s                    @r4   (_get_first_singular_vectors_power_methodr\   2   s    ((177


C=GaccGG Ms{ $A
18_ "3;vw/IqssG,rvvgw/GGIRWWRVVIy9:S@@	&&I&3;vw/IqssG,rvvgii/III	9!=>DDI&&I&"&&I*F*LM"]266..1C71771:?!-"0 UF<>PQi''U  =451<=s   H, ,	I5IIc                     t        j                  | j                  |      }t        |d      \  }}}|dddf   |dddf   fS )zbReturn the first left and right singular vectors of X'y.

    Here the whole SVD is computed.
    Fr   Nr   )r$   r+   rD   r   )rK   rL   CU_Vts         r4   _get_first_singular_vectors_svdrc   m   sD    
 	qssAA1E*HAq"QT7Bq!tHr6   c                 |   | j                  d      }| |z  } |j                  d      }||z  }|rA| j                  dd      }d||dk(  <   | |z  } |j                  dd      }d||dk(  <   ||z  }nDt        j                  | j                  d         }t        j                  |j                  d         }| |||||fS )z{Center X, y and scale if the scale parameter==True

    Returns
    -------
        X, y, x_mean, y_mean, x_std, y_std
    r   axisrB   )rf   ddofg      ?        )meanstdr$   onesrH   )rK   rL   scalex_meany_meanx_stdy_stds          r4   _center_scale_xyrq   w   s     VVV^FKAVVV^FKA11%!esl	U
11%!esl	U

#
#a--r6   c                     t        j                  t        j                  |             }t        j                  | |         }| |z  } ||z  }y)z7Same as svd_flip but works on 1d arrays, and is inplaceN)r$   argmaxr;   sign)r-   vbiggest_abs_val_idxrt   s       r4   _svd_flip_1drw      sA     ))BFF1I.771()*DIAIAr6   c                   $    e Zd ZU dZ eeddd      gdg eddh      g ed	d
h      g eddh      g eeddd      g eeddd      gdgdZe	e
d<   e	 dddd	dddddd       Z ed      d        ZddZddZddZddZ fdZ xZS ) _PLSa  Partial Least Squares (PLS)

    This class implements the generic PLS algorithm.

    Main ref: Wegelin, a survey of Partial Least Squares (PLS) methods,
    with emphasis on the two-block case
    https://stat.uw.edu/sites/default/files/files/reports/2000/tr371.pdf
    rB   Nleftclosedboolean
regression	canonicalArA   r   nipalsr   n_componentsrl   deflation_moderM   	algorithmrN   rO   copy_parameter_constraintsT  ư>)rl   r   rM   r   rN   rO   r   c                t    || _         || _        || _        || _        || _        || _        || _        || _        y r9   )r   r   rM   rl   r   rN   rO   r   )	selfr   rl   r   rM   r   rN   rO   r   s	            r4   __init__z_PLS.__init__   s>     ),	
" 	r6   prefer_skip_nested_validationc           	         t        ||       t        | |t        j                  d| j                  d      }t        |dt        j                  d| j                  d      }|j                  dk(  rd| _        |j                  dd      }nd| _        |j                  d	   }|j                  d   }|j                  d   }| j                  }| j                  d
k(  rt        ||      nt        |||      }||kD  rt        d| d| d      | j                  dk(  | _        | j                  }t        ||| j                         \  }	}
| _        | _        | _        | _        t        j*                  ||f      | _        t        j*                  ||f      | _        t        j*                  ||f      | _        t        j*                  ||f      | _        t        j*                  ||f      | _        t        j*                  ||f      | _        g | _        t        j:                  |
j<                        j>                  }tA        |      D ]q  }| jB                  dk(  rt        jD                  t        jF                  |
      d|z  k  d	      }d|
dd|f<   	 tI        |	|
| jJ                  | jL                  | jN                  |      \  }}}| j8                  jY                  |       n| jB                  dk(  rt[        |	|
      \  }}t]               t        j^                  |	|      }|rd}nt        j^                  ||      }t        j^                  |
|      |z  }t        j^                  ||	      t        j^                  ||      z  }|	t        j`                  ||      z  }	| j                  dk(  rFt        j^                  ||
      t        j^                  ||      z  }|
t        j`                  ||      z  }
| j                  d
k(  rFt        j^                  ||
      t        j^                  ||      z  }|
t        j`                  ||      z  }
|| j,                  dd|f<   || j.                  dd|f<   || j0                  dd|f<   || j2                  dd|f<   || j4                  dd|f<   | j6                  dd|f<   t t        j^                  | j,                  tc        t        j^                  | j4                  jd                  | j,                        d            | _3        t        j^                  | j.                  tc        t        j^                  | j6                  jd                  | j.                        d            | _4        t        j^                  | jf                  | j6                  jd                        | _5        | jj                  | j(                  z  jd                  | j&                  z  | _5        | j$                  | _6        | jf                  j                  d   | _7        | S # tP        $ r3}tS        |      dk7  r tU        jV                  d|        Y d}~ d}~ww xY w)  Fit model to data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training vectors, where `n_samples` is the number of samples and
            `n_features` is the number of predictors.

        y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Target vectors, where `n_samples` is the number of samples and
            `n_targets` is the number of response variables.

        Returns
        -------
        self : object
            Fitted model.
        Tr	   r!   force_writeabler   ensure_min_samplesrL   F
input_namer!   r   r   	ensure_2drB   r   r~   `n_components` upper bound is . Got   instead. Reduce `n_components`.r   r   
   re   rh   N)rM   rN   rO   rP   r?   z$y residual is constant at iteration r   )r   )8r   r   r$   float64r   r   ndim_predict_1dreshaperH   r   r   min
ValueError_norm_y_weightsrq   rl   _x_mean_y_mean_x_std_y_stdzeros
x_weights_
y_weights_	_x_scores	_y_scoresx_loadings_y_loadings_n_iter_r&   r!   r'   rF   r   allr;   r\   rM   rN   rO   rE   strrI   rJ   appendrc   rw   r+   outerr   rD   x_rotations_y_rotations_coef_
intercept__n_features_out)r   rK   rL   npqr   rank_upper_boundrP   Xkyky_epskyk_maskrW   rY   r   rR   x_scoresy_ssy_scores
x_loadings
y_loadingss                          r4   fitz_PLS.fit   sf   & 	 1%**  
 ** 
 66Q;#D		"a A$DGGAJGGAJGGAJ((
 ,,<C1I#aA, 	 **01A0B C#n$DF 
  $22kA-- HXq$**H
DBdlDK ((A|#45((A|#451l"341l"3488Q$5688Q$56
 "&&|$ =	0A ~~)&&b5j!8qA!$1g:
 A!YY!% HH'5	!! ##G,5('Fr2'N$	9 I. vvb),Hvvi3vvb),t3H "-x0JJJ"((8Z00B""k1VVHb1BFF8X4NN
bhhx44""l2VVHb1BFF8X4NN
bhhx44$-DOOAqD!$-DOOAqD!#+DNN1a4 #+DNN1a4 %/DQT"%/DQT"{=	0L FFOO((**DOO<5Q
 FFOO((**DOO<5Q
 VVD--t/?/?/A/AB
jj4;;.11DKK?
,,#0066q9{ % 1v!99MM$H"LM	s   	3X	Y'YYc                    t        |        t        | ||t        d      }|| j                  z  }|| j                  z  }t        j                  || j                        }|wt        |dd|t              }|j                  dk(  r|j                  dd      }|| j                  z  }|| j                  z  }t        j                  || j                        }||fS |S )a.  Apply the dimension reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples to transform.

        y : array-like of shape (n_samples, n_targets), default=None
            Target vectors.

        copy : bool, default=True
            Whether to copy `X` and `y`, or perform in-place normalization.

        Returns
        -------
        x_scores, y_scores : array-like or tuple of array-like
            Return `x_scores` if `y` is not given, `(x_scores, y_scores)` otherwise.
        Fr   r!   resetrL   )r   r   r   r!   rB   r   )r   r   r   r   r   r$   r+   r   r   r   r   r   r   r   )r   rK   rL   r   r   r   s         r4   	transformz_PLS.transformp  s    & 	$LN	T\\	T[[66!T../=cU\A vv{IIb!$AAvva!2!23HX%%r6   c                    t        |        t        |dt              }t        j                  || j
                  j                        }|| j                  z  }|| j                  z  }|^t        |dt              }t        j                  || j                  j                        }|| j                  z  }|| j                  z  }||fS |S )ak  Transform data back to its original space.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_components)
            New data, where `n_samples` is the number of samples
            and `n_components` is the number of pls components.

        y : array-like of shape (n_samples,) or (n_samples, n_components)
            New target, where `n_samples` is the number of samples
            and `n_components` is the number of pls components.

        Returns
        -------
        X_original : ndarray of shape (n_samples, n_features)
            Return the reconstructed `X` data.

        y_original : ndarray of shape (n_samples, n_targets)
            Return the reconstructed `X` target. Only returned when `y` is given.

        Notes
        -----
        This transformation will only be exact if `n_components=n_features`.
        rK   )r   r!   rL   )r   r   r   r$   matmulr   rD   r   r   r   r   r   )r   rK   rL   X_reconstructedy_reconstructeds        r4   inverse_transformz_PLS.inverse_transform  s    2 	c>))At'7'7'9'9:4;;&4<<'=A#\BA ii4+;+;+=+=>Ot{{*Ot||+O"O33r6   c                     t        |        t        | ||t        d      }|| j                  z  }|| j                  j
                  z  | j                  z   }| j                  r|j                         S |S )aL  Predict targets of given samples.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples.

        copy : bool, default=True
            Whether to copy `X` or perform in-place normalization.

        Returns
        -------
        y_pred : ndarray of shape (n_samples,) or (n_samples, n_targets)
            Returns predicted values.

        Notes
        -----
        This call requires the estimation of a matrix of shape
        `(n_features, n_targets)`, which may be an issue in high dimensional
        space.
        Fr   )	r   r   r   r   r   rD   r   r   ravel)r   rK   r   y_preds       r4   predictz_PLS.predict  s`    , 	$LN	T\\TZZ\\!DOO3!%!1!1v||~=v=r6   c                 F    | j                  ||      j                  ||      S )a  Learn and apply the dimension reduction on the train data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training vectors, where `n_samples` is the number of samples and
            `n_features` is the number of predictors.

        y : array-like of shape (n_samples, n_targets), default=None
            Target vectors, where `n_samples` is the number of samples and
            `n_targets` is the number of response variables.

        Returns
        -------
        self : ndarray of shape (n_samples, n_components)
            Return `x_scores` if `y` is not given, `(x_scores, y_scores)` otherwise.
        r   r   r   rK   rL   s      r4   fit_transformz_PLS.fit_transform  !    $ xx1~''1--r6   c                 h    t         |          }d|j                  _        d|j                  _        |S )NTF)super__sklearn_tags__regressor_tags
poor_scoretarget_tagsrequired)r   tags	__class__s     r4   r   z_PLS.__sklearn_tags__  s1    w'))-&$)!r6   r	   )NTr9   T)__name__
__module____qualname____doc__r   r   r   r   r   dict__annotations__r   r   r   r   r   r   r   r   r   __classcell__r   s   @r4   ry   ry      s     "(AtFCD%|[&ABCS#J'( %!234h4?@q$v67	$D 	   # * 5` 6`D%N*X>:.( r6   ry   )	metaclassc                        e Zd ZU dZi ej
                  Zeed<   dD ]  Zej                  e        	 d
ddddd fdZ
 fd	Z xZS )r   a  PLS regression.

    PLSRegression is also known as PLS2 or PLS1, depending on the number of
    targets.

    For a comparison between other cross decomposition algorithms, see
    :ref:`sphx_glr_auto_examples_cross_decomposition_plot_compare_cross_decomposition.py`.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, n_features]`.

    scale : bool, default=True
        Whether to scale `X` and `y`.

    max_iter : int, default=500
        The maximum number of iterations of the power method when
        `algorithm='nipals'`. Ignored otherwise.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `y` in :term:`fit` before applying centering,
        and potentially scaling. If `False`, these operations will be done
        inplace, modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `y`.

    x_scores_ : ndarray of shape (n_samples, n_components)
        The transformed training samples.

    y_scores_ : ndarray of shape (n_samples, n_components)
        The transformed training targets.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_targets, n_components)
        The projection matrix used to transform `y`.

    coef_ : ndarray of shape (n_target, n_features)
        The coefficients of the linear model such that `y` is approximated as
        `y = X @ coef_.T + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `y` is approximated as
        `y = X @ coef_.T + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.

    Examples
    --------
    >>> from sklearn.cross_decomposition import PLSRegression
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
    >>> y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> pls2 = PLSRegression(n_components=2)
    >>> pls2.fit(X, y)
    PLSRegression()
    >>> y_pred = pls2.predict(X)

    For a comparison between PLS Regression and :class:`~sklearn.decomposition.PCA`, see
    :ref:`sphx_glr_auto_examples_cross_decomposition_plot_pcr_vs_pls.py`.
    r   r   rM   r   Tr   r   rl   rN   rO   r   c          
      4    t         |   ||ddd|||       y )Nr~   r   r   r   r   r   r   r   rl   rN   rO   r   r   s         r4   r   zPLSRegression.__init__m  s/     	%' 	 		
r6   c                 l    t         |   ||       | j                  | _        | j                  | _        | S )r   )r   r   r   	x_scores_r   	y_scores_)r   rK   rL   r   s      r4   r   zPLSRegression.fit{  s.    $ 	Aqr6   r   )r   r   r   r   ry   r   r   r   parampopr   r   r   r   s   @r4   r   r     sb    eN $Cd&A&A#BDB8 *""5)* 
'+cu4
 r6   r   c                        e Zd ZU dZi ej
                  Zeed<   dD ]  Zej                  e        	 d
dddddd fd	Z
 xZS )r   a^  Partial Least Squares transformer and regressor.

    For a comparison between other cross decomposition algorithms, see
    :ref:`sphx_glr_auto_examples_cross_decomposition_plot_compare_cross_decomposition.py`.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `y`.

    algorithm : {'nipals', 'svd'}, default='nipals'
        The algorithm used to estimate the first singular vectors of the
        cross-covariance matrix. 'nipals' uses the power method while 'svd'
        will compute the whole SVD.

    max_iter : int, default=500
        The maximum number of iterations of the power method when
        `algorithm='nipals'`. Ignored otherwise.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `y` in fit before applying centering, and
        potentially scaling. If False, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `y`.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_targets, n_components)
        The projection matrix used to transform `y`.

    coef_ : ndarray of shape (n_targets, n_features)
        The coefficients of the linear model such that `y` is approximated as
        `y = X @ coef_.T + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `y` is approximated as
        `y = X @ coef_.T + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component. Empty if `algorithm='svd'`.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    CCA : Canonical Correlation Analysis.
    PLSSVD : Partial Least Square SVD.

    Examples
    --------
    >>> from sklearn.cross_decomposition import PLSCanonical
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
    >>> y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> plsca = PLSCanonical(n_components=2)
    >>> plsca.fit(X, y)
    PLSCanonical()
    >>> X_c, y_c = plsca.transform(X, y)
    r   )r   rM   Tr   r   r   )rl   r   rN   rO   r   c          
      4    t         |   ||dd||||       y )Nr   r   r   r   )r   r   rl   r   rN   rO   r   r   s          r4   r   zPLSCanonical.__init__  s/     	%& 	 		
r6   r   r   r   r   r   ry   r   r   r   r   r   r   r   r   s   @r4   r   r     se    `D $Cd&A&A#BDB+ *""5)* 
 
 
r6   r   c                        e Zd ZU dZi ej
                  Zeed<   dD ]  Zej                  e        	 d	ddddd fdZ
 xZS )
CCAa  Canonical Correlation Analysis, also known as "Mode B" PLS.

    For a comparison between other cross decomposition algorithms, see
    :ref:`sphx_glr_auto_examples_cross_decomposition_plot_compare_cross_decomposition.py`.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    Parameters
    ----------
    n_components : int, default=2
        Number of components to keep. Should be in `[1, min(n_samples,
        n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `y`.

    max_iter : int, default=500
        The maximum number of iterations of the power method.

    tol : float, default=1e-06
        The tolerance used as convergence criteria in the power method: the
        algorithm stops whenever the squared norm of `u_i - u_{i-1}` is less
        than `tol`, where `u` corresponds to the left singular vector.

    copy : bool, default=True
        Whether to copy `X` and `y` in fit before applying centering, and
        potentially scaling. If False, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the cross-covariance matrices of each
        iteration.

    y_weights_ : ndarray of shape (n_targets, n_components)
        The right singular vectors of the cross-covariance matrices of each
        iteration.

    x_loadings_ : ndarray of shape (n_features, n_components)
        The loadings of `X`.

    y_loadings_ : ndarray of shape (n_targets, n_components)
        The loadings of `y`.

    x_rotations_ : ndarray of shape (n_features, n_components)
        The projection matrix used to transform `X`.

    y_rotations_ : ndarray of shape (n_targets, n_components)
        The projection matrix used to transform `y`.

    coef_ : ndarray of shape (n_targets, n_features)
        The coefficients of the linear model such that `y` is approximated as
        `y = X @ coef_.T + intercept_`.

    intercept_ : ndarray of shape (n_targets,)
        The intercepts of the linear model such that `y` is approximated as
        `y = X @ coef_.T + intercept_`.

        .. versionadded:: 1.1

    n_iter_ : list of shape (n_components,)
        Number of iterations of the power method, for each
        component.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.
    PLSSVD : Partial Least Square SVD.

    Examples
    --------
    >>> from sklearn.cross_decomposition import CCA
    >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
    >>> y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
    >>> cca = CCA(n_components=1)
    >>> cca.fit(X, y)
    CCA(n_components=1)
    >>> X_c, y_c = cca.transform(X, y)
    r   r   Tr   r   r   c          
      4    t         |   ||ddd|||       y )Nr   rA   r   r   r   r   s         r4   r   zCCA.__init__x  s/     	%& 	 		
r6   r   r   r   s   @r4   r   r     s]    Xt $Cd&A&A#BDB8 *""5)* 
'+cu4
 
r6   r   c                       e Zd ZU dZ eeddd      gdgdgdZeed<   dd	d	d
dZ	 e
d	      d        ZddZddZy)r   a  Partial Least Square SVD.

    This transformer simply performs a SVD on the cross-covariance matrix
    `X'y`. It is able to project both the training data `X` and the targets
    `y`. The training data `X` is projected on the left singular vectors, while
    the targets are projected on the right singular vectors.

    Read more in the :ref:`User Guide <cross_decomposition>`.

    .. versionadded:: 0.8

    Parameters
    ----------
    n_components : int, default=2
        The number of components to keep. Should be in `[1,
        min(n_samples, n_features, n_targets)]`.

    scale : bool, default=True
        Whether to scale `X` and `y`.

    copy : bool, default=True
        Whether to copy `X` and `y` in fit before applying centering, and
        potentially scaling. If `False`, these operations will be done inplace,
        modifying both arrays.

    Attributes
    ----------
    x_weights_ : ndarray of shape (n_features, n_components)
        The left singular vectors of the SVD of the cross-covariance matrix.
        Used to project `X` in :meth:`transform`.

    y_weights_ : ndarray of (n_targets, n_components)
        The right singular vectors of the SVD of the cross-covariance matrix.
        Used to project `X` in :meth:`transform`.

    n_features_in_ : int
        Number of features seen during :term:`fit`.

    feature_names_in_ : ndarray of shape (`n_features_in_`,)
        Names of features seen during :term:`fit`. Defined only when `X`
        has feature names that are all strings.

        .. versionadded:: 1.0

    See Also
    --------
    PLSCanonical : Partial Least Squares transformer and regressor.
    CCA : Canonical Correlation Analysis.

    Examples
    --------
    >>> import numpy as np
    >>> from sklearn.cross_decomposition import PLSSVD
    >>> X = np.array([[0., 0., 1.],
    ...               [1., 0., 0.],
    ...               [2., 2., 2.],
    ...               [2., 5., 4.]])
    >>> y = np.array([[0.1, -0.2],
    ...               [0.9, 1.1],
    ...               [6.2, 5.9],
    ...               [11.9, 12.3]])
    >>> pls = PLSSVD(n_components=2).fit(X, y)
    >>> X_c, y_c = pls.transform(X, y)
    >>> X_c.shape, y_c.shape
    ((4, 2), (4, 2))
    rB   Nrz   r{   r}   r   rl   r   r   T)rl   r   c                .    || _         || _        || _        y r9   r   )r   r   rl   r   s       r4   r   zPLSSVD.__init__  s    (
	r6   r   c                 &   t        ||       t        | |t        j                  d| j                  d      }t        |dt        j                  d| j                  d      }|j                  dk(  r|j                  dd      }| j                  }t        |j                  d	   |j                  d   |j                  d         }||kD  rt        d
| d| d      t        ||| j                        \  }}| _        | _        | _        | _        t        j$                  |j&                  |      }t)        |d      \  }}}|ddd|f   }|d| }t+        ||      \  }}|j&                  }	|| _        |	| _        | j,                  j                  d   | _        | S )aJ  Fit model to data.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training samples.

        y : array-like of shape (n_samples,) or (n_samples, n_targets)
            Targets.

        Returns
        -------
        self : object
            Fitted estimator.
        Tr	   r   rL   Fr   rB   r   r   r   r   r   r^   N)r   r   r$   r   r   r   r   r   r   r   rH   r   rq   rl   r   r   r   r   r+   rD   r   r   r   r   r   )
r   rK   rL   r   r   r_   r`   r.   rb   Vs
             r4   r   z
PLSSVD.fit  s   " 	 1%**  
 ** 
 66Q;		"a A
 ((qwwqz1771:qwwqzB**01A0B C#n$DF 
 FVq$**F
B1dlDL$+t{
 FF133Nq.1ba,B2DD#44Q7r6   c                    t        |        t        | |t        j                  d      }|| j                  z
  | j
                  z  }t        j                  || j                        }|~t        |ddt        j                        }|j                  dk(  r|j                  dd      }|| j                  z
  | j                  z  }t        j                  || j                        }||fS |S )a	  
        Apply the dimensionality reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Samples to be transformed.

        y : array-like of shape (n_samples,) or (n_samples, n_targets),                 default=None
            Targets.

        Returns
        -------
        x_scores : array-like or tuple of array-like
            The transformed data `X_transformed` if `y is not None`,
            `(X_transformed, y_transformed)` otherwise.
        F)r!   r   rL   )r   r   r!   rB   r   )r   r   r$   r   r   r   r+   r   r   r   r   r   r   r   )r   rK   rL   Xrr   yrr   s          r4   r   zPLSSVD.transform  s    & 	$5A$,,$++-66"doo.=A#bjjQAvv{IIb!$dll"dkk1Bvvb$//2HX%%r6   c                 F    | j                  ||      j                  ||      S )a  Learn and apply the dimensionality reduction.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            Training samples.

        y : array-like of shape (n_samples,) or (n_samples, n_targets),                 default=None
            Targets.

        Returns
        -------
        out : array-like or tuple of array-like
            The transformed data `X_transformed` if `y is not None`,
            `(X_transformed, y_transformed)` otherwise.
        r   r   s      r4   r   zPLSSVD.fit_transform7  r   r6   r   r9   )r   r   r   r   r   r   r   r   r   r   r   r   r   r    r6   r4   r   r     sh    AH "(AtFCD$D 4 
 5> 6>@@.r6   r   )r   r   r   Fr   )-r   rI   abcr   r   numbersr   r   numpyr$   scipy.linalgr   r   baser
   r   r   r   r   r   
exceptionsr   utilsr   r   utils._param_validationr   r   utils.extmathr   utils.validationr   r   r   __all__r5   r\   rc   rq   rw   ry   r   r   r   r   r  r6   r4   <module>r     s     ' "  "  , 8 : $ K K
5<& =B8(v.4]#]@VD VrB
4 B
Jk
$ k
\B.,.> B.r6   