o
    i̧                     @   sZ  d dl Z d dlmZmZ d dlmZmZ d dlZd dl	m
Z ddlmZmZmZ ddlmZmZ ddlmZ ddlmZmZmZmZ dd	lmZmZ dd
lmZ ddlm Z  ddl!m"Z"m#Z# ddl$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z- ddl+m.Z/ ddl+m0Z1 g dZ2dd Z3G dd deedZ4G dd dee4edZ5dd Z6					d!dd Z7dS )"    N)ABCMetaabstractmethod)IntegralReal   )BaseEstimatorClassifierMixin_fit_context)ConvergenceWarningNotFittedError)LabelEncoder)check_arraycheck_random_statecolumn_or_1dcompute_class_weight)Interval
StrOptions)safe_sparse_dot)available_if)_ovr_decision_functioncheck_classification_targets)_check_large_sparse_check_sample_weight_num_samplescheck_consistent_lengthcheck_is_fittedvalidate_data   )
_liblinear)_libsvm)_libsvm_sparse)c_svcnu_svcZ	one_classZepsilon_svrZnu_svrc                 C   s   | j d d }g }ttdg|g}t|D ]W}||| ||d  ddf }t|d |D ]=}||| ||d  ddf }	| |d || ||d  f }
| ||| ||d  f }|t|
|t||	  q1q|S )zGenerate primal coefficients from dual coefficients
    for the one-vs-one multi class LibSVM in the case
    of a linear kernel.r   r   N)shapenpZcumsumZhstackrangeappendr   )	dual_coefZ	n_supportZsupport_vectorsn_classcoefZsv_locsZclass1Zsv1Zclass2Zsv2Zalpha1Zalpha2 r*   `/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/sklearn/svm/_base.py_one_vs_one_coef%   s   	   r,   c                       s  e Zd ZU dZeh degeeddddgeddheed	dddgeeddd
dgeed	dd
dgeed	dddgeed	dddgeed	dddgdgdgeeddd
dgedhe	dgdgeeddddgdgdZ
e	ed< g dZedd Z fddZeddd;ddZdd Zdd  Zd!d" Zd#d$ Zd%d& Zd'd( Zd)d* Zd+d, Zd-d. Zd/d0 Zd1d2 Zd3d4 Zed5d6 Zd7d8 Z ed9d: Z!  Z"S )<
BaseLibSVMzBase class for estimators that use libsvm as backing library.

    This implements support vector machine classification and regression.

    Parameter documentation is in the derived `SVC` class.
    >   precomputedsigmoidlinearrbfpolyr   Nleft)closedscaleauto        Zneitherright      ?booleanZbalancedverboserandom_statekerneldegreegammacoef0tolCnuepsilon	shrinkingprobability
cache_sizeclass_weightr;   max_iterr=   _parameter_constraints)r0   r2   r1   r/   r.   c                 C   sz   | j tvrtdt| j f || _|| _|| _|| _|| _|| _|| _	|| _
|	| _|
| _|| _|| _|| _|| _|| _d S )Nz&impl should be one of %s, %s was given)_implLIBSVM_IMPL
ValueErrorr?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   r;   rK   r=   )selfr?   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   r;   rK   r=   r*   r*   r+   __init__j   s&   

zBaseLibSVM.__init__c                    s*   t   }| jdk|j_| jdk|j_|S Nr.   )super__sklearn_tags__r?   
input_tagspairwisesparserP   tags	__class__r*   r+   rT      s   
zBaseLibSVM.__sklearn_tags__T)Zprefer_skip_nested_validationc              	   C   s  t | j}t|}|r| jdkrtd|ot| j | _t| jr)t|| nt	| ||t
jdddd\}}| |}t
j|du rDg n|t
jd}t| j}t|}|d	krl||jd
 krltdd||jd
 f  | jdkr||jd krtd|jd
 |jd |jd
 d
kr|jd
 |krtd|j|jf t| jrdn| j}|dkrd| _nGt| jtr| jdkr|r|| | d	  n| }	|	d
krd|jd |	  nd| _n| jdkrd|jd  | _n
t| jtr| j| _| jr| jn| j}
| jr
t ddd |!t
"dj#}|
||||||d t$|dr&|jn|f| _%| j&' | _(| j)| _*| jdv rNt+| j,d	krN|  j&d9  _&| j) | _)| jrV| j*j-n| j*}t
.| j(/ }t
.|/ }|rn|srtd| jdv r~| j0| _1| S | j02 | _1| S )a  Fit the SVM model according to the given training data.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)                 or (n_samples, n_samples)
            Training vectors, where `n_samples` is the number of samples
            and `n_features` is the number of features.
            For kernel="precomputed", the expected shape of X is
            (n_samples, n_samples).

        y : array-like of shape (n_samples,)
            Target values (class labels in classification, real numbers in
            regression).

        sample_weight : array-like of shape (n_samples,), default=None
            Per-sample weights. Rescale C per sample. Higher weights
            force the classifier to put more emphasis on these points.

        Returns
        -------
        self : object
            Fitted estimator.

        Notes
        -----
        If X and y are not C-ordered and contiguous arrays of np.float64 and
        X is not a scipy.sparse.csr_matrix, X and/or y may be copied.

        If X is a dense array, then the other methods will not support sparse
        matrices as input.
        r.   z-Sparse precomputed kernels are not supported.rD   csrF)dtypeorderaccept_sparseaccept_large_sparseNr]   r   r   z"X and y have incompatible shapes.
zX has %s samples, but y has %s.r   zDPrecomputed matrix must be a square matrix. Input is a {}x{} matrix.zsample_weight and X have incompatible shapes: %r vs %r
Note: Sparse matrices cannot be indexed w/boolean masks (use `indices=True` in CV).r7   r5   r9   r6   z[LibSVM] endi)random_seedr#   r!   r"   r<   zxThe dual coefficients or intercepts are not finite. The input data may contain large values and need to be preprocessed.)3r   r=   spissparser?   	TypeErrorcallable_sparser   r   r$   float64_validate_targetsasarrayrN   indexrM   r   r#   rO   format_gamma
isinstancerA   strmultiplymeanvarr   _sparse_fit
_dense_fitr;   printrandintiinfomaxhasattr
shape_fit_
intercept_copy_intercept_
dual_coef__dual_coef_lenclasses_dataisfiniteall	_num_itern_iter_item)rP   Xysample_weightrndrW   solver_typeZ	n_samplesr?   ZX_varfitseedr'   Zintercept_finitenessZdual_coef_finitenessr*   r*   r+   r      s   
"






&"

zBaseLibSVM.fitc                 C   s   t |ddjtjddS )zxValidation of y and class_weight.

        Default implementation for SVR and one-class; overridden in BaseSVC.
        TwarnF)r   )r   Zastyper$   rm   )rP   r   r*   r*   r+   rn   '  s   zBaseLibSVM._validate_targetsc                 C   s2   | j dv sJ | j dkrtd| j t d S d S )Nr   r   r   znSolver terminated early (max_iter=%i).  Consider pre-processing your data with StandardScaler or MinMaxScaler.)fit_status_warningsr   rK   r
   rP   r*   r*   r+   _warn_from_fit_status.  s   
z BaseLibSVM._warn_from_fit_statusc              
   C   s  t | jr|| _| |}|jd |jd krtdt| j tj	||fi d|d|dt
| dtdd|d	| jd
| jd| jd| jd| jd| jd| jd| jd| jd| jd| jd|\	| _| _| _| _| _| _| _| _| _ | !  d S )Nr   r   z(X.shape[0] should be equal to X.shape[1]svm_typer   rJ   class_weight_r?   rD   rE   rH   r@   rG   rC   rI   rB   rA   rF   rK   rf   )"rk   r?   _BaseLibSVM__Xfit_compute_kernelr#   rO   libsvmset_verbosity_wrapr;   r   getattrr$   emptyrD   rE   rH   r@   rG   rC   rI   rB   rr   rF   rK   support_support_vectors_
_n_supportr   r   _probA_probBr   r   r   )rP   r   r   r   r   r?   rf   r*   r*   r+   ry   8  sj   

	
zBaseLibSVM._dense_fitc                 C   sP  t j|jt jdd|_|  | j|}t| j	 t
|jd |j|j|j|||| j| j| j| j| jt| dt d|| j| j| jt| jt| j| j|\	| _| _}| _| _| _ | _!| _"| _#| $  t%| drrt&| j'd }	nd}	| jjd }
t (t )|
|	}|
st*+g | _,d S t )d|j-d |j-|	 }t*+|||f|	|
f| _,d S )NrD   r]   r^   r   r   r   r   ).r$   ro   r   rm   sort_indices_sparse_kernelsrp   libsvm_sparser   r;   Zlibsvm_sparse_trainr#   indicesindptrr@   rr   rB   rC   rD   r   r   rE   rI   rF   intrG   rH   rK   r   r   r   r   r   r   r   r   r   r~   r   r   ZtileZarangerh   
csr_matrixr   size)rP   r   r   r   r   r?   rf   kernel_typeZdual_coef_datar(   Zn_SVZdual_coef_indicesZdual_coef_indptrr*   r*   r+   rx   g  sf   

zBaseLibSVM._sparse_fitc                 C   s$   |  |}| jr| jn| j}||S )a  Perform regression on samples in X.

        For an one-class model, +1 (inlier) or -1 (outlier) is returned.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        y_pred : ndarray of shape (n_samples,)
            The predicted values.
        )_validate_for_predictrl   _sparse_predict_dense_predict)rP   r   predictr*   r*   r+   r     s   
zBaseLibSVM.predictc                 C   s   |  |}|jdkrt|ddd}| j}t| jr3d}|jd | jd kr3td|jd | jd f t	| j
}tj|| j| j| j| j| j| j| j||| j| j| j| jdS )	Nr   rD   F)r^   r`   r.   r   MX.shape[1] = %d should be equal to %d, the number of samples at training time)r   r?   r@   rB   rA   rI   )r   ndimr   r?   rk   r#   r   rO   rN   rp   rM   r   r   r   r   r   r   r   r   r   r@   rB   rr   rI   )rP   r   r?   r   r*   r*   r+   r     s:   


zBaseLibSVM._dense_predictc                 C   s   | j }t|r	d}| j|}d}t|j|j|j| j	j| j	j| j	j| j
j| jt| j|| j| j| j| j|t| dtd| j| j| j| j| j| j| jS )Nr.   r7   r   r   )r?   rk   r   rp   r   Zlibsvm_sparse_predictr   r   r   r   r   r   rN   rM   r@   rr   rB   rC   r   r$   r   rE   rF   rG   rH   r   r   r   )rP   r   r?   r   rD   r*   r*   r+   r     s<   
zBaseLibSVM._sparse_predictc                 C   s@   t | jr| || j}t|r| }tj|tjdd}|S )z0Return the data transformed by a callable kernelrD   r   )	rk   r?   r   rh   ri   Ztoarrayr$   ro   rm   rP   r   r?   r*   r*   r+   r     s   

zBaseLibSVM._compute_kernelc                 C   sV   |  |}| |}| jr| |}n| |}| jdv r)t| jdkr)|  S |S )af  Evaluates the decision function for the samples in X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)

        Returns
        -------
        X : array-like of shape (n_samples, n_class * (n_class-1) / 2)
            Returns the decision function of the sample for each class
            in the model.
        rg   r   )	r   r   rl   _sparse_decision_function_dense_decision_functionrM   r   r   ravel)rP   r   Zdec_funcr*   r*   r+   _decision_function	  s   



zBaseLibSVM._decision_functionc                 C   sh   t |tjddd}| j}t|rd}tj|| j| j| j	| j
| j| j| jt| j|| j| j| j| jdS )NrD   F)r]   r^   r`   r.   r   r?   r@   rI   rB   rA   )r   r$   rm   r?   rk   r   decision_functionr   r   r   r   r   r   r   rN   rp   rM   r@   rI   rB   rr   r   r*   r*   r+   r   '  s(   
z#BaseLibSVM._dense_decision_functionc                 C   s   t j|jt jdd|_| j}t|drd}| j|}t	|j|j
|j| jj| jj
| jj| jj| jt| j|| j| j| j| j| jt| dt d| j| j| j| j| j| j| jS )NrD   r   __call__r.   r   r   )r$   ro   r   rm   r?   r~   r   rp   r   Zlibsvm_sparse_decision_functionr   r   r   r   r   rN   rM   r@   rr   rB   rC   rD   r   r   rE   rF   rG   rH   r   r   r   rP   r   r?   r   r*   r*   r+   r   ?  s<   

z$BaseLibSVM._sparse_decision_functionc              	   C   s   t |  t| jst| |dtjdddd}| jr"t|s"t	|}| jr)|
  t|r?| js?t| js?tdt| j | jdkr\|jd | jd kr\td	|jd | jd f | j}| js{|jdkr{| j |jd kr{td
| jj d|S )Nr\   rD   F)r_   r]   r^   r`   resetz3cannot use sparse input in %r trained on dense datar.   r   r   r   zThe internal representation of z was altered)r   rk   r?   r   r$   rm   rl   rh   ri   r   r   rO   type__name__r#   r   r   r   
n_support_sumr[   )rP   r   svr*   r*   r+   r   b  sD   



$z BaseLibSVM._validate_for_predictc                 C   s>   | j dkr	td|  }t|rd|jj_|S d|j_|S )zWeights assigned to the features when `kernel="linear"`.

        Returns
        -------
        ndarray of shape (n_features, n_classes)
        r0   z2coef_ is only available when using a linear kernelF)r?   AttributeError	_get_coefrh   ri   r   flagsZ	writeablerP   r)   r*   r*   r+   coef_  s   


zBaseLibSVM.coef_c                 C   s   t | j| jS N)r   r   r   r   r*   r*   r+   r     s   zBaseLibSVM._get_coefc                 C   sL   zt |  W n	 ty   tw t| j}|dv r| jS t| jd gS )z)Number of support vectors for each class.r   r   )	r   r   r   rN   rp   rM   r   r$   array)rP   r   r*   r*   r+   r     s   zBaseLibSVM.n_support_r   )#r   
__module____qualname____doc__r   rk   r   r   r   dictrL   __annotations__r   r   rQ   rT   r	   r   rn   r   ry   rx   r   r   r   r   r   r   r   r   propertyr   r   r   __classcell__r*   r*   rZ   r+   r-   E   s\   
 
	

' 
/="$#)
r-   )	metaclassc                       s   e Zd ZU dZi ejeddhgdgdZeed< dD ]Z	e
e	 qe fdd	Zd
d Zdd Z fddZdd Zeedd Zeedd Zdd Zdd Zdd Zedd Zedd Z fd d!Z  ZS )"BaseSVCz!ABC for LibSVM-based classifiers.ovrovor:   )decision_function_shape
break_tiesrL   )rF   rE   c                    s:   || _ || _t j|||||||d||	|
||||d d S )Nr7   r>   )r   r   rS   rQ   )rP   r?   r@   rA   rB   rC   rD   rE   rG   rH   rI   rJ   r;   rK   r   r=   r   rZ   r*   r+   rQ     s&   
zBaseSVC.__init__c                 C   sl   t |dd}t| tj|dd\}}t| j||d| _t|dk r*tdt| || _	tj
|tjddS )	NTr   )Zreturn_inverse)classesr   r   z>The number of classes has to be greater than one; got %d classrD   r   )r   r   r$   uniquer   rJ   r   r   rO   r   ro   rm   )rP   r   Zy_clsr*   r*   r+   rn     s   zBaseSVC._validate_targetsc                 C   s>   |  |}| jdkrt| jdkrt|dk | t| jS |S )a4  Evaluate the decision function for the samples in X.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            The input samples.

        Returns
        -------
        X : ndarray of shape (n_samples, n_classes * (n_classes-1) / 2)
            Returns the decision function of the sample for each class
            in the model.
            If decision_function_shape='ovr', the shape is (n_samples,
            n_classes).

        Notes
        -----
        If decision_function_shape='ovo', the function values are proportional
        to the distance of the samples X to the separating hyperplane. If the
        exact distances are required, divide the function values by the norm of
        the weight vector (``coef_``). See also `this question
        <https://stats.stackexchange.com/questions/14876/
        interpreting-distance-from-hyperplane-in-svm>`_ for further details.
        If decision_function_shape='ovr', the decision function is a monotonic
        transformation of ovo decision function.
        r   r   r   )r   r   r   r   r   )rP   r   decr*   r*   r+   r     s   
zBaseSVC.decision_functionc                    sx   t |  | jr| jdkrtd| jr*| jdkr*t| jdkr*tj| |dd}nt	 
|}| jtj|tjdS )a  Perform classification on samples in X.

        For an one-class model, +1 or -1 is returned.

        Parameters
        ----------
        X : {array-like, sparse matrix} of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        y_pred : ndarray of shape (n_samples,)
            Class labels for samples in X.
        r   z>break_ties must be False when decision_function_shape is 'ovo'r   r   r   )Zaxisra   )r   r   r   rO   r   r   r$   Zargmaxr   rS   r   Ztakero   Zintp)rP   r   r   rZ   r*   r+   r     s   
zBaseSVC.predictc                 C   s$   | j std| jdvrtddS )Nz5predict_proba is not available when probability=Falserg   z0predict_proba only implemented for SVC and NuSVCT)rH   r   rM   r   r*   r*   r+   _check_proba=  s   
zBaseSVC._check_probac                 C   sD   |  |}| jjdks| jjdkrtd| jr| jn| j}||S )a  Compute probabilities of possible outcomes for samples in X.

        The model needs to have probability information computed at training
        time: fit with attribute `probability` set to True.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        T : ndarray of shape (n_samples, n_classes)
            Returns the probability of the sample for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute :term:`classes_`.

        Notes
        -----
        The probability model is created using cross validation, so
        the results can be slightly different than those obtained by
        predict. Also, it will produce meaningless results on very small
        datasets.
        r   zApredict_proba is not available when fitted with probability=False)r   probA_r   probB_r   rl   _sparse_predict_proba_dense_predict_proba)rP   r   Z
pred_probar*   r*   r+   predict_probaF  s   
zBaseSVC.predict_probac                 C   s   t | |S )a  Compute log probabilities of possible outcomes for samples in X.

        The model need to have probability information computed at training
        time: fit with attribute `probability` set to True.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features) or                 (n_samples_test, n_samples_train)
            For kernel="precomputed", the expected shape of X is
            (n_samples_test, n_samples_train).

        Returns
        -------
        T : ndarray of shape (n_samples, n_classes)
            Returns the log-probabilities of the sample for each class in
            the model. The columns correspond to the classes in sorted
            order, as they appear in the attribute :term:`classes_`.

        Notes
        -----
        The probability model is created using cross validation, so
        the results can be slightly different than those obtained by
        predict. Also, it will produce meaningless results on very small
        datasets.
        )r$   logr   )rP   r   r*   r*   r+   predict_log_probak  s   zBaseSVC.predict_log_probac                 C   sh   |  |}| j}t|rd}t| j}tj|| j| j	| j
| j| j| j| j||| j| j| j| jd}|S )Nr.   r   )r   r?   rk   rN   rp   rM   r   r   r   r   r   r   r   r   r   r@   rI   rB   rr   )rP   r   r?   r   Zpprobr*   r*   r+   r     s,   
zBaseSVC._dense_predict_probac                 C   s   t j|jt jdd|_| j}t|rd}| j|}t	|j|j
|j| jj| jj
| jj| jj| jt| j|| j| j| j| j| jt| dt d| j| j| j| j| j| j| jS )NrD   r   r.   r   r   )r$   ro   r   rm   r?   rk   r   rp   r   Zlibsvm_sparse_predict_probar   r   r   r   r   rN   rM   r@   rr   rB   rC   rD   r   r   rE   rF   rG   rH   r   r   r   r   r*   r*   r+   r     s<   
zBaseSVC._sparse_predict_probac                 C   sb   | j jd dkrt| j | j}|S t| j | j| j}t|d r*t|	 }|S t
|}|S )Nr   r   )r   r#   r   r   r,   r   rh   ri   ZvstackZtocsrr$   r   r*   r*   r+   r     s   
zBaseSVC._get_coefc                 C      | j S zParameter learned in Platt scaling when `probability=True`.

        Returns
        -------
        ndarray of shape  (n_classes * (n_classes - 1) / 2)
        )r   r   r*   r*   r+   r        zBaseSVC.probA_c                 C   r   r   )r   r   r*   r*   r+   r     r   zBaseSVC.probB_c                    s   t   }| jdk|j_|S rR   )rS   rT   r?   rU   rW   rX   rZ   r*   r+   rT     s   
zBaseSVC.__sklearn_tags__)r   r   r   r   r-   rL   r   r   r   Zunused_parampopr   rQ   rn   r   r   r   r   r   r   r   r   r   r   r   r   rT   r   r*   r*   rZ   r+   r     s8   
 ' %	
$
#
	
	r   c           	      C   s   ddiddddddd	iidd
idddddddiiddddidd}| dkr.||  S | dkr8t d|  ||d}|du rGd| }n%||d}|du rXd||f }n||d}|du rjd|||f }n|S t d||||f )a  Find the liblinear magic number for the solver.

    This number depends on the values of the following attributes:
      - multi_class
      - penalty
      - loss
      - dual

    The same number is also internally used by LibLinear to determine
    which solver to use.
    F   r      )FT)l1l2r   T      r   r               )logistic_regressionZhingeZsquared_hingeepsilon_insensitivesquared_epsilon_insensitivecrammer_singerr   r   z<`multi_class` must be one of `ovr`, `crammer_singer`, got %rNzloss='%s' is not supportedz>The combination of penalty='%s' and loss='%s' is not supportedzLThe combination of penalty='%s' and loss='%s' are not supported when dual=%szJUnsupported set of arguments: %s, Parameters: penalty=%r, loss=%r, dual=%r)rO   get)	multi_classpenaltylossdualZ_solver_type_dictZ_solver_penZerror_stringZ_solver_dualZ
solver_numr*   r*   r+   _get_liblinear_solver_type  sD   

	

r   r   r   皙?c                 C   s  |dvr&t  }||}|j}t|dk rtd|d  t||||d}n
tjdtjd}|}t	
| t|}|rAtddd	 d
}|rQ|dkrOtd| |}t
| t
| t	
| t| rit|  tj|tjd }tj|dd}t|| tjd}t||||}t	| |t| ||
||||	|tdj||\}}t|}||	krtdt |r|ddddf }||dddf  }n|}d}|||fS )a  Used by Logistic Regression (and CV) and LinearSVC/LinearSVR.

    Preprocessing is done in this function before supplying it to liblinear.

    Parameters
    ----------
    X : {array-like, sparse matrix} of shape (n_samples, n_features)
        Training vector, where `n_samples` is the number of samples and
        `n_features` is the number of features.

    y : array-like of shape (n_samples,)
        Target vector relative to X

    C : float
        Inverse of cross-validation parameter. The lower the C, the higher
        the penalization.

    fit_intercept : bool
        Whether or not to fit an intercept. If set to True, the feature vector
        is extended to include an intercept term: ``[x_1, ..., x_n, 1]``, where
        1 corresponds to the intercept. If set to False, no intercept will be
        used in calculations (i.e. data is expected to be already centered).

    intercept_scaling : float
        Liblinear internally penalizes the intercept, treating it like any
        other term in the feature vector. To reduce the impact of the
        regularization on the intercept, the `intercept_scaling` parameter can
        be set to a value greater than 1; the higher the value of
        `intercept_scaling`, the lower the impact of regularization on it.
        Then, the weights become `[w_x_1, ..., w_x_n,
        w_intercept*intercept_scaling]`, where `w_x_1, ..., w_x_n` represent
        the feature weights and the intercept weight is scaled by
        `intercept_scaling`. This scaling allows the intercept term to have a
        different regularization behavior compared to the other features.

    class_weight : dict or 'balanced', default=None
        Weights associated with classes in the form ``{class_label: weight}``.
        If not given, all classes are supposed to have weight one. For
        multi-output problems, a list of dicts can be provided in the same
        order as the columns of y.

        The "balanced" mode uses the values of y to automatically adjust
        weights inversely proportional to class frequencies in the input data
        as ``n_samples / (n_classes * np.bincount(y))``

    penalty : {'l1', 'l2'}
        The norm of the penalty used in regularization.

    dual : bool
        Dual or primal formulation,

    verbose : int
        Set verbose to any positive number for verbosity.

    max_iter : int
        Number of iterations.

    tol : float
        Stopping condition.

    random_state : int, RandomState instance or None, default=None
        Controls the pseudo random number generation for shuffling the data.
        Pass an int for reproducible output across multiple function calls.
        See :term:`Glossary <random_state>`.

    multi_class : {'ovr', 'crammer_singer'}, default='ovr'
        `ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
        optimizes a joint objective over all classes.
        While `crammer_singer` is interesting from an theoretical perspective
        as it is consistent it is seldom used in practice and rarely leads to
        better accuracy and is more expensive to compute.
        If `crammer_singer` is chosen, the options loss, penalty and dual will
        be ignored.

    loss : {'logistic_regression', 'hinge', 'squared_hinge',             'epsilon_insensitive', 'squared_epsilon_insensitive},             default='logistic_regression'
        The loss function used to fit the model.

    epsilon : float, default=0.1
        Epsilon parameter in the epsilon-insensitive loss function. Note
        that the value of this parameter depends on the scale of the target
        variable y. If unsure, set epsilon=0.

    sample_weight : array-like of shape (n_samples,), default=None
        Weights assigned to each sample.

    Returns
    -------
    coef_ : ndarray of shape (n_features, n_features + 1)
        The coefficient vector got by minimizing the objective function.

    intercept_ : float
        The intercept term added to the vector.

    n_iter_ : array of int
        Number of iterations run across for each class.
    )r   r   r   zeThis solver needs samples of at least 2 classes in the data, but the data contains only one class: %rr   )r   r   r   ra   z[LibLinear]rb   rc   g      zqIntercept scaling is %r but needs to be greater than 0. To disable fitting an intercept, set fit_intercept=False.W)requirementsre   z@Liblinear failed to converge, increase the number of iterations.Nr<   r7   )r   Zfit_transformr   r   rO   r   r$   r   rm   	liblinearr   r   rz   r   r   rh   ri   r   ro   r   requirer   r   Z
train_wrapr{   r|   r}   r   r   r
   )r   r   rD   Zfit_interceptZintercept_scalingrJ   r   r   r;   rK   rC   r=   r   r   rF   r   encZy_indr   r   r   Zbiasr   Z	raw_coef_r   Z
n_iter_maxr   r   r*   r*   r+   _fit_liblinear*  s~   t






r  )Nr   r   r   N)8r   abcr   r   numbersr   r   numpyr$   Zscipy.sparserW   rh   baser   r   r	   
exceptionsr
   r   Zpreprocessingr   utilsr   r   r   r   Zutils._param_validationr   r   Zutils.extmathr   Zutils.metaestimatorsr   Zutils.multiclassr   r   Zutils.validationr   r   r   r   r   r   rb   r   r  r   r   r    r   rN   r,   r-   r   r   r  r*   r*   r*   r+   <module>   sD         u  =E