o
    i                     @   s  d Z ddlZddlmZ ddlZddlmZ ddlm	Z	m
Z
mZmZmZ ddlmZ ddlmZmZmZ dd	lmZ dd
lmZmZmZmZmZ g dZ	dQddZ	dRddZedgdgddgeddhdgddddddddZedgdgddgeeddddgeddhdgdddddddd d!Z edgdgddgeddhdgddddddd"d#Z!edgdgddgeddhdgddddddd$d%Z"edgdgddgeddhdgddddddd&d'Z#edgdgddgeddhdgddddddd(d)Z$edgdgddgeddhdgddddddd*d+Z%edgdgeddhdgddgd,ddddd-d.d/Z&d0d1 Z'edgdgddgeh d2dgd3gd4dddddd5d6d7Z(edgdgddgeh d2ddgd3gd4dddddd5d8d9Z)edgdgd:ddd;d< Z*d=d> Z+edgdgddgeeddd?deeddd@dgdAdddddBdCdDZ,edgdgddgdEddddFdGdHZ-edgdgddgdEddddFdIdJZ.edgdgddgeeddd?deeddd@dgdAdddddBdKdLZ/edgdgddgeeddddgeddhdgddddddddMdNZ0edgdgddgeddhdgdddddddOdPZ1dS )SzMetrics to assess performance on regression task.

Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.

Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
    N)Real   )UndefinedMetricWarning)_average_find_matching_floating_dtypeget_namespaceget_namespace_and_devicesize)_xlogy)Interval
StrOptionsvalidate_params)_weighted_percentile)_check_sample_weight_num_samplescheck_arraycheck_consistent_lengthcolumn_or_1d)	max_errormean_absolute_errormean_squared_errormean_squared_log_errormedian_absolute_errormean_absolute_percentage_errormean_pinball_lossr2_scoreroot_mean_squared_log_errorroot_mean_squared_errorexplained_variance_scoremean_tweedie_deviancemean_poisson_deviancemean_gamma_devianced2_tweedie_scored2_pinball_scored2_absolute_error_scorenumericc           
      C   sL  t | |||d\}}t| || t| d|d} t|d|d}|dur)t|| |d}| jdkr4|| d} |jdkr?||d}| jd |jd krWtd| jd |jd | jd }d	}t	|t
rp||vrotd
||n'|durt|dd}|dkrtd||jd krtd|jd  d| d|dkrdnd}	|	| |||fS )a  Check that y_true, y_pred and sample_weight belong to the same regression task.

    To reduce redundancy when calling `_find_matching_floating_dtype`,
    please use `_check_reg_targets_with_floating_dtype` instead.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,) or None
        Sample weights.

    multioutput : array-like or string in ['raw_values', uniform_average',
        'variance_weighted'] or None
        None is accepted due to backward compatibility of r2_score().

    dtype : str or list, default="numeric"
        the dtype argument passed to check_array.

    xp : module, default=None
        Precomputed array namespace module. When passed, typically from a caller
        that has already performed inspection of its own inputs, skips array
        namespace inspection.

    Returns
    -------
    type_true : one of {'continuous', continuous-multioutput'}
        The type of the true target data, as output by
        'utils.multiclass.type_of_target'.

    y_true : array-like of shape (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,) or None
        Sample weights.

    multioutput : array-like of shape (n_outputs) or string in ['raw_values',
        uniform_average', 'variance_weighted'] or None
        Custom output weights if ``multioutput`` is array-like or
        just the corresponding argument if ``multioutput`` is a
        correct keyword.
    xpF)	ensure_2ddtypeN)r)      )r*   z<y_true and y_pred have different number of output ({0}!={1}))
raw_valuesuniform_averagevariance_weightedzIAllowed 'multioutput' string values are {}. You provided multioutput={!r})r(   z5Custom weights are useful only in multi-output cases.r   z+There must be equally many custom weights (z) as outputs (z).Z
continuouscontinuous-multioutput)r   r   r   r   ndimZreshapeshape
ValueErrorformat
isinstancestr)
y_truey_predsample_weightmultioutputr)   r'   _	n_outputsZallowed_multioutput_stry_type r=   j/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/sklearn/metrics/_regression.py_check_reg_targets<   sR   4



r?   c                 C   s<   t | |||d}t| |||||d\}} }}}|| |||fS )a  Ensures y_true, y_pred, and sample_weight correspond to same regression task.

    Extends `_check_reg_targets` by automatically selecting a suitable floating-point
    data type for inputs using `_find_matching_floating_dtype`.

    Use this private method only when converting inputs to array API-compatibles.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,)

    multioutput : array-like or string in ['raw_values', 'uniform_average',         'variance_weighted'] or None
        None is accepted due to backward compatibility of r2_score().

    xp : module, default=None
        Precomputed array namespace module. When passed, typically from a caller
        that has already performed inspection of its own inputs, skips array
        namespace inspection.

    Returns
    -------
    type_true : one of {'continuous', 'continuous-multioutput'}
        The type of the true target data, as output by
        'utils.multiclass.type_of_target'.

    y_true : array-like of shape (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : array-like of shape (n_outputs) or string in ['raw_values',         'uniform_average', 'variance_weighted'] or None
        Custom output weights if ``multioutput`` is array-like or
        just the corresponding argument if ``multioutput`` is a
        correct keyword.
    r&   )r)   r'   )r   r?   )r6   r7   r8   r9   r'   Z
dtype_namer<   r=   r=   r>   &_check_reg_targets_with_floating_dtype   s
   2r@   z
array-liker,   r-   r6   r7   r8   r9   T)Zprefer_skip_nested_validationr8   r9   c                C   s~   t | |||\}}t| ||||d\}} }}}t|||  |d|d}t|tr5|dkr/|S |dkr5d}t||d}t|S )aa  Mean absolute error regression loss.

    The mean absolute error is a non-negative floating point value, where best value
    is 0.0. Read more in the :ref:`User Guide <mean_absolute_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'}  or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or array of floats
        If multioutput is 'raw_values', then mean absolute error is returned
        for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

        MAE output is non-negative floating point. The best value is 0.0.

    Examples
    --------
    >>> from sklearn.metrics import mean_absolute_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_absolute_error(y_true, y_pred)
    0.5
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> mean_absolute_error(y_true, y_pred)
    0.75
    >>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
    array([0.5, 1. ])
    >>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.85...
    r&   r   )weightsaxisr'   r,   r-   NrC   )r   r@   r   absr4   r5   float)r6   r7   r8   r9   r'   r:   output_errorsr   r=   r=   r>   r      s   A

r   r*   Zboth)closed)r6   r7   r8   alphar9         ?r8   rJ   r9   c                C   s   t | |||\}}t| ||||d\}} }}}| | }||dk|j}|| | d| d|  |  }	t|	|dd}
t|trF|dkrF|
S t|trQ|dkrQd}tt|
|dS )	a  Pinball loss for quantile regression.

    Read more in the :ref:`User Guide <pinball_loss>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    alpha : float, slope of the pinball loss, default=0.5,
        This loss is equivalent to :ref:`mean_absolute_error` when `alpha=0.5`,
        `alpha=0.95` is minimized by estimators of the 95th percentile.

    multioutput : {'raw_values', 'uniform_average'}  or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute error is returned
        for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

        The pinball loss output is a non-negative floating point. The best
        value is 0.0.

    Examples
    --------
    >>> from sklearn.metrics import mean_pinball_loss
    >>> y_true = [1, 2, 3]
    >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.1)
    0.03...
    >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.1)
    0.3...
    >>> mean_pinball_loss(y_true, [0, 2, 3], alpha=0.9)
    0.3...
    >>> mean_pinball_loss(y_true, [1, 2, 4], alpha=0.9)
    0.03...
    >>> mean_pinball_loss(y_true, y_true, alpha=0.1)
    0.0
    >>> mean_pinball_loss(y_true, y_true, alpha=0.9)
    0.0
    r&   r   r*   rC   rD   r,   r-   NrE   )r   r@   Zastyper)   r   r4   r5   rG   )r6   r7   r8   rJ   r9   r'   r:   diffsignZlossrH   r=   r=   r>   r   5  s   G
 r   c                C   s   t | |||\}}}t| ||||d\}} }}}|j||jj| j|d}|| }|||  ||| }	t	|	|dd}
t
|trP|dkrJ|
S |dkrPd}t	|
|d}t|S )	a
  Mean absolute percentage error (MAPE) regression loss.

    Note that we are not using the common "percentage" definition: the percentage
    in the range [0, 100] is converted to a relative value in the range [0, 1]
    by dividing by 100. Thus, an error of 200% corresponds to a relative error of 2.

    Read more in the :ref:`User Guide <mean_absolute_percentage_error>`.

    .. versionadded:: 0.24

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.
        If input is list then the shape must be (n_outputs,).

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute percentage error
        is returned for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

        MAPE output is non-negative floating point. The best value is 0.0.
        But note that bad predictions can lead to arbitrarily large
        MAPE values, especially if some `y_true` values are very close to zero.
        Note that we return a large value instead of `inf` when `y_true` is zero.

    Examples
    --------
    >>> from sklearn.metrics import mean_absolute_percentage_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    0.3273...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    0.5515...
    >>> mean_absolute_percentage_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.6198...
    >>> # the value when some element of the y_true is zero is arbitrarily high because
    >>> # of the division by epsilon
    >>> y_true = [1., 0., 2.4, 7.]
    >>> y_pred = [1.2, 0.1, 2.4, 8.]
    >>> mean_absolute_percentage_error(y_true, y_pred)
    112589990684262.48
    r&   )r)   devicer   rM   r,   r-   NrE   )r   r@   ZasarrayZfinfoZfloat64Zepsr)   rF   maximumr   r4   r5   rG   )r6   r7   r8   r9   r'   r:   device_epsilonZ
y_true_absZmaperH   r   r=   r=   r>   r     s$   M



r   c                C   sz   t | |||\}}t| ||||d\}} }}}t| | d d|d}t|tr3|dkr-|S |dkr3d}t||d}t|S )	aZ  Mean squared error regression loss.

    Read more in the :ref:`User Guide <mean_squared_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or array of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import mean_squared_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> mean_squared_error(y_true, y_pred)
    0.375
    >>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
    >>> y_pred = [[0, 2],[-1, 2],[8, -5]]
    >>> mean_squared_error(y_true, y_pred)
    0.708...
    >>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
    array([0.41666667, 1.        ])
    >>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.825...
    r&   r   r   )rD   rC   r,   r-   NrE   )r   r@   r   r4   r5   rG   )r6   r7   r8   r9   r'   r:   rH   r   r=   r=   r>   r     s   @

r   c                C   s^   t | |||\}}|t| ||dd}t|tr%|dkr|S |dkr%d}t||d}t|S )a  Root mean squared error regression loss.

    Read more in the :ref:`User Guide <mean_squared_error>`.

    .. versionadded:: 1.4

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import root_mean_squared_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> root_mean_squared_error(y_true, y_pred)
    0.612...
    >>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
    >>> y_pred = [[0, 2],[-1, 2],[8, -5]]
    >>> root_mean_squared_error(y_true, y_pred)
    0.822...
    r,   rB   r-   NrE   )r   sqrtr   r4   r5   r   rG   )r6   r7   r8   r9   r'   r:   rH   r   r=   r=   r>   r   [  s   ;
r   c                C   j   t | |\}}t| ||||d\}} }}}|| dks#||dkr'tdt|| ||||dS )a  Mean squared logarithmic error regression loss.

    Read more in the :ref:`User Guide <mean_squared_log_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'

        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors when the input is of multioutput
            format.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import mean_squared_log_error
    >>> y_true = [3, 5, 2.5, 7]
    >>> y_pred = [2.5, 5, 4, 8]
    >>> mean_squared_log_error(y_true, y_pred)
    0.039...
    >>> y_true = [[0.5, 1], [1, 2], [7, 6]]
    >>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
    >>> mean_squared_log_error(y_true, y_pred)
    0.044...
    >>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
    array([0.00462428, 0.08377444])
    >>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.060...
    r&   r+   zcMean Squared Logarithmic Error cannot be used when targets contain values less than or equal to -1.rB   )r   r@   anyr2   r   log1pr6   r7   r8   r9   r'   r:   r=   r=   r>   r     s   B
r   c                C   rU   )ao  Root mean squared logarithmic error regression loss.

    Read more in the :ref:`User Guide <mean_squared_log_error>`.

    .. versionadded:: 1.4

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'

        Defines aggregating of multiple output values.
        Array-like value defines weights used to average errors.

        'raw_values' :
            Returns a full set of errors when the input is of multioutput
            format.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    Returns
    -------
    loss : float or ndarray of floats
        A non-negative floating point value (the best value is 0.0), or an
        array of floating point values, one for each individual target.

    Examples
    --------
    >>> from sklearn.metrics import root_mean_squared_log_error
    >>> y_true = [3, 5, 2.5, 7]
    >>> y_pred = [2.5, 5, 4, 8]
    >>> root_mean_squared_log_error(y_true, y_pred)
    0.199...
    r&   r+   zhRoot Mean Squared Logarithmic Error cannot be used when targets contain values less than or equal to -1.rB   )r   r@   rV   r2   r   rW   rX   r=   r=   r>   r     s   8
r   )r6   r7   r9   r8   )r9   r8   c                C   s   t | |||\}} }}}|du rtjt||  dd}ntt||  |d}t|tr9|dkr3|S |dkr9d}ttj||dS )a=  Median absolute error regression loss.

    Median absolute error output is non-negative floating point. The best value
    is 0.0. Read more in the :ref:`User Guide <median_absolute_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values. Array-like value defines
        weights used to average errors.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Errors of all outputs are averaged with uniform weight.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

        .. versionadded:: 0.24

    Returns
    -------
    loss : float or ndarray of floats
        If multioutput is 'raw_values', then mean absolute error is returned
        for each output separately.
        If multioutput is 'uniform_average' or an ndarray of weights, then the
        weighted average of all output errors is returned.

    Examples
    --------
    >>> from sklearn.metrics import median_absolute_error
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> median_absolute_error(y_true, y_pred)
    0.5
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> median_absolute_error(y_true, y_pred)
    0.75
    >>> median_absolute_error(y_true, y_pred, multioutput='raw_values')
    array([0.5, 1. ])
    >>> median_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
    0.85
    Nr   rD   r8   r,   r-   rE   )	r?   npZmedianrF   r   r4   r5   rG   average)r6   r7   r9   r8   r:   rH   r=   r=   r>   r   U  s   A
r   c                 C   s   | j }|dk}|sd| |  }	n$| dk}
|j|g||d}	||
@ }d| | ||   |	|< d|	|
| @ < t|trT|dkr?|	S |dkrFd}n|dkrS|}||sSd}n|}t|	|d	}t|dkrft|S |S )
zCCommon part used by explained variance score and :math:`R^2` score.r   r*   )rP   r)           r,   r-   Nr.   rE   )r)   onesr4   r5   rV   r   r	   rG   )	numeratordenominatorr;   r9   force_finiter'   rP   r)   nonzero_denominatoroutput_scoresnonzero_numeratorvalid_scoreavg_weightsresultr=   r=   r>   _assemble_r2_explained_variance  s4   

rh   >   r.   r-   r,   boolean)r6   r7   r8   r9   ra   )r8   r9   ra   c             	   C   s   t | |||\}}}t| ||||d\}} }}}t| | |dd}t| | | d |dd}	t| |dd}
t| |
 d |dd}t|	|| jd ||||dS )a  Explained variance regression score function.

    Best possible score is 1.0, lower values are worse.

    In the particular case when ``y_true`` is constant, the explained variance
    score is not finite: it is either ``NaN`` (perfect predictions) or
    ``-Inf`` (imperfect predictions). To prevent such non-finite numbers to
    pollute higher-level experiments such as a grid search cross-validation,
    by default these cases are replaced with 1.0 (perfect predictions) or 0.0
    (imperfect predictions) respectively. If ``force_finite``
    is set to ``False``, this score falls back on the original :math:`R^2`
    definition.

    .. note::
       The Explained Variance score is similar to the
       :func:`R^2 score <r2_score>`, with the notable difference that it
       does not account for systematic offsets in the prediction. Most often
       the :func:`R^2 score <r2_score>` should be preferred.

    Read more in the :ref:`User Guide <explained_variance_score>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average', 'variance_weighted'} or             array-like of shape (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output scores.
        Array-like value defines weights used to average scores.

        'raw_values' :
            Returns a full set of scores in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

        'variance_weighted' :
            Scores of all outputs are averaged, weighted by the variances
            of each individual output.

    force_finite : bool, default=True
        Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant
        data should be replaced with real numbers (``1.0`` if prediction is
        perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting
        for hyperparameters' search procedures (e.g. grid search
        cross-validation).

        .. versionadded:: 1.1

    Returns
    -------
    score : float or ndarray of floats
        The explained variance or ndarray if 'multioutput' is 'raw_values'.

    See Also
    --------
    r2_score :
        Similar metric, but accounting for systematic offsets in
        prediction.

    Notes
    -----
    This is not a symmetric function.

    Examples
    --------
    >>> from sklearn.metrics import explained_variance_score
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> explained_variance_score(y_true, y_pred)
    0.957...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
    0.983...
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2]
    >>> explained_variance_score(y_true, y_pred)
    1.0
    >>> explained_variance_score(y_true, y_pred, force_finite=False)
    nan
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2 + 1e-8]
    >>> explained_variance_score(y_true, y_pred)
    0.0
    >>> explained_variance_score(y_true, y_pred, force_finite=False)
    -inf
    r&   r   rM   r   r*   r_   r`   r;   r9   ra   r'   rP   )r   r@   r   rh   r1   )r6   r7   r8   r9   ra   r'   r:   rP   Z
y_diff_avgr_   Z
y_true_avgr`   r=   r=   r>   r     s(   t
r   c             	   C   s   t | |||\}}}t| ||||d\}} }}}t|dk r*d}t|t tdS |dur;t|}|dddf }	nd}	|j|	| | d  dd}
|j|	| t	| d||d	 d  dd}t
|
|| jd
 ||||dS )aX  :math:`R^2` (coefficient of determination) regression score function.

    Best possible score is 1.0 and it can be negative (because the
    model can be arbitrarily worse). In the general case when the true y is
    non-constant, a constant model that always predicts the average y
    disregarding the input features would get a :math:`R^2` score of 0.0.

    In the particular case when ``y_true`` is constant, the :math:`R^2` score
    is not finite: it is either ``NaN`` (perfect predictions) or ``-Inf``
    (imperfect predictions). To prevent such non-finite numbers to pollute
    higher-level experiments such as a grid search cross-validation, by default
    these cases are replaced with 1.0 (perfect predictions) or 0.0 (imperfect
    predictions) respectively. You can set ``force_finite`` to ``False`` to
    prevent this fix from happening.

    Note: when the prediction residuals have zero mean, the :math:`R^2` score
    is identical to the
    :func:`Explained Variance score <explained_variance_score>`.

    Read more in the :ref:`User Guide <r2_score>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average', 'variance_weighted'},             array-like of shape (n_outputs,) or None, default='uniform_average'

        Defines aggregating of multiple output scores.
        Array-like value defines weights used to average scores.
        Default is "uniform_average".

        'raw_values' :
            Returns a full set of scores in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

        'variance_weighted' :
            Scores of all outputs are averaged, weighted by the variances
            of each individual output.

        .. versionchanged:: 0.19
            Default value of multioutput is 'uniform_average'.

    force_finite : bool, default=True
        Flag indicating if ``NaN`` and ``-Inf`` scores resulting from constant
        data should be replaced with real numbers (``1.0`` if prediction is
        perfect, ``0.0`` otherwise). Default is ``True``, a convenient setting
        for hyperparameters' search procedures (e.g. grid search
        cross-validation).

        .. versionadded:: 1.1

    Returns
    -------
    z : float or ndarray of floats
        The :math:`R^2` score or ndarray of scores if 'multioutput' is
        'raw_values'.

    Notes
    -----
    This is not a symmetric function.

    Unlike most other scores, :math:`R^2` score may be negative (it need not
    actually be the square of a quantity R).

    This metric is not well-defined for single samples and will return a NaN
    value if n_samples is less than two.

    References
    ----------
    .. [1] `Wikipedia entry on the Coefficient of determination
            <https://en.wikipedia.org/wiki/Coefficient_of_determination>`_

    Examples
    --------
    >>> from sklearn.metrics import r2_score
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> r2_score(y_true, y_pred)
    0.948...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> r2_score(y_true, y_pred,
    ...          multioutput='variance_weighted')
    0.938...
    >>> y_true = [1, 2, 3]
    >>> y_pred = [1, 2, 3]
    >>> r2_score(y_true, y_pred)
    1.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [2, 2, 2]
    >>> r2_score(y_true, y_pred)
    0.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [3, 2, 1]
    >>> r2_score(y_true, y_pred)
    -3.0
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2]
    >>> r2_score(y_true, y_pred)
    1.0
    >>> r2_score(y_true, y_pred, force_finite=False)
    nan
    >>> y_true = [-2, -2, -2]
    >>> y_pred = [-2, -2, -2 + 1e-8]
    >>> r2_score(y_true, y_pred)
    0.0
    >>> r2_score(y_true, y_pred, force_finite=False)
    -inf
    r&   r   z9R^2 score is not well-defined with less than two samples.nanNg      ?r   rY   )rD   rC   r'   r*   rj   )r   r@   r   warningswarnr   rG   r   sumr   rh   r1   )r6   r7   r8   r9   ra   r'   r:   rR   msgweightr_   r`   r=   r=   r>   r   j  s<    

r   )r6   r7   c                 C   sR   t | |\}}t| |dd|d\}} }}}|dkrtdt||| | S )ad  
    The max_error metric calculates the maximum residual error.

    Read more in the :ref:`User Guide <max_error>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated target values.

    Returns
    -------
    max_error : float
        A positive floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import max_error
    >>> y_true = [3, 2, 7, 1]
    >>> y_pred = [4, 2, 7, 1]
    >>> max_error(y_true, y_pred)
    1.0
    N)r8   r9   r'   r/   z&Multioutput not supported in max_error)r   r?   r2   rG   maxrF   )r6   r7   r'   r:   r<   r=   r=   r>   r     s   "
r   c           	      C   sN  t | |\}}}|}|dk rBd||| dk| dd| d| d|   | ||d|  d|   ||d| d|    }n]|dkrM| | d }nR|dkr_dt| | | |  |  }n@|dkrsd|||  | |  d  }n,d|| d| d| d|   | ||d|  d|   ||d| d|    }tt||dS )z&Mean Tweedie deviance regression loss.r   r   r]   r*   rE   )r   powwherexlogylogrG   r   )	r6   r7   r8   powerr'   r:   rR   pdevr=   r=   r>   _mean_tweedie_devianceH  s:   	 ry   rightleft)r6   r7   r8   rv   r8   rv   c                C   s  t | |\}}t| ||d|d\}} }}}|dkrtd|dur.t|}|ddtjf }d| d}|dk rF||dkrEt|d nA|dkrKn<d	|  krUd
k rln n|| dk se||dkrkt|d n|d
kr|| dks~||dkrt|d ntt| |||dS )a[  Mean Tweedie deviance regression loss.

    Read more in the :ref:`User Guide <mean_tweedie_deviance>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    power : float, default=0
        Tweedie power parameter. Either power <= 0 or power >= 1.

        The higher `p` the less weight is given to extreme
        deviations between true and predicted targets.

        - power < 0: Extreme stable distribution. Requires: y_pred > 0.
        - power = 0 : Normal distribution, output corresponds to
          mean_squared_error. y_true and y_pred can be any real numbers.
        - power = 1 : Poisson distribution. Requires: y_true >= 0 and
          y_pred > 0.
        - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
          and y_pred > 0.
        - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
        - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
          and y_pred > 0.
        - otherwise : Positive stable distribution. Requires: y_true > 0
          and y_pred > 0.

    Returns
    -------
    loss : float
        A non-negative floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import mean_tweedie_deviance
    >>> y_true = [2, 0, 1, 4]
    >>> y_pred = [0.5, 0.5, 2., 2.]
    >>> mean_tweedie_deviance(y_true, y_pred, power=1)
    1.4260...
    Nr9   r'   r/   z2Multioutput not supported in mean_tweedie_deviancez'Mean Tweedie deviance error with power=z can only be used on r   zstrictly positive y_pred.r*   r   z,non-negative y and strictly positive y_pred.zstrictly positive y and y_pred.r|   )r   r@   r2   r   r[   ZnewaxisrV   ry   )r6   r7   r8   rv   r'   r:   r<   messager=   r=   r>   r   i  s8   <
r   r6   r7   r8   rZ   c                C      t | ||ddS )ad  Mean Poisson deviance regression loss.

    Poisson deviance is equivalent to the Tweedie deviance with
    the power parameter `power=1`.

    Read more in the :ref:`User Guide <mean_tweedie_deviance>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values. Requires y_true >= 0.

    y_pred : array-like of shape (n_samples,)
        Estimated target values. Requires y_pred > 0.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    loss : float
        A non-negative floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import mean_poisson_deviance
    >>> y_true = [2, 0, 1, 4]
    >>> y_pred = [0.5, 0.5, 2., 2.]
    >>> mean_poisson_deviance(y_true, y_pred)
    1.4260...
    r*   r|   r   r   r=   r=   r>   r      s   (r    c                C   r   )a  Mean Gamma deviance regression loss.

    Gamma deviance is equivalent to the Tweedie deviance with
    the power parameter `power=2`. It is invariant to scaling of
    the target variable, and measures relative errors.

    Read more in the :ref:`User Guide <mean_tweedie_deviance>`.

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values. Requires y_true > 0.

    y_pred : array-like of shape (n_samples,)
        Estimated target values. Requires y_pred > 0.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    Returns
    -------
    loss : float
        A non-negative floating point value (the best value is 0.0).

    Examples
    --------
    >>> from sklearn.metrics import mean_gamma_deviance
    >>> y_true = [2, 0.5, 1, 4]
    >>> y_pred = [0.5, 0.5, 2., 2.]
    >>> mean_gamma_deviance(y_true, y_pred)
    1.0568...
    r   r|   r   r   r=   r=   r>   r!     s   )r!   c                C   s   t | |\}}t| ||d|d\}} }}}|dkrtdt|dk r/d}t|t tdS |j| dd	|j|dd	} }t	| |||d
}t
| ||d}	t| |	||d
}
d||
  S )a
  
    :math:`D^2` regression score function, fraction of Tweedie deviance explained.

    Best possible score is 1.0 and it can be negative (because the model can be
    arbitrarily worse). A model that always uses the empirical mean of `y_true` as
    constant prediction, disregarding the input features, gets a D^2 score of 0.0.

    Read more in the :ref:`User Guide <d2_score>`.

    .. versionadded:: 1.0

    Parameters
    ----------
    y_true : array-like of shape (n_samples,)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    power : float, default=0
        Tweedie power parameter. Either power <= 0 or power >= 1.

        The higher `p` the less weight is given to extreme
        deviations between true and predicted targets.

        - power < 0: Extreme stable distribution. Requires: y_pred > 0.
        - power = 0 : Normal distribution, output corresponds to r2_score.
          y_true and y_pred can be any real numbers.
        - power = 1 : Poisson distribution. Requires: y_true >= 0 and
          y_pred > 0.
        - 1 < p < 2 : Compound Poisson distribution. Requires: y_true >= 0
          and y_pred > 0.
        - power = 2 : Gamma distribution. Requires: y_true > 0 and y_pred > 0.
        - power = 3 : Inverse Gaussian distribution. Requires: y_true > 0
          and y_pred > 0.
        - otherwise : Positive stable distribution. Requires: y_true > 0
          and y_pred > 0.

    Returns
    -------
    z : float
        The D^2 score.

    Notes
    -----
    This is not a symmetric function.

    Like R^2, D^2 score may be negative (it need not actually be the square of
    a quantity D).

    This metric is not well-defined for single samples and will return a NaN
    value if n_samples is less than two.

    References
    ----------
    .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
           Wainwright. "Statistical Learning with Sparsity: The Lasso and
           Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/

    Examples
    --------
    >>> from sklearn.metrics import d2_tweedie_score
    >>> y_true = [0.5, 1, 2.5, 7]
    >>> y_pred = [1, 1, 5, 3.5]
    >>> d2_tweedie_score(y_true, y_pred)
    0.285...
    >>> d2_tweedie_score(y_true, y_pred, power=1)
    0.487...
    >>> d2_tweedie_score(y_true, y_pred, power=2)
    0.630...
    >>> d2_tweedie_score(y_true, y_true, power=2)
    1.0
    Nr}   r/   z-Multioutput not supported in d2_tweedie_scorer   9D^2 score is not well-defined with less than two samples.rk   r*   rY   r|   )rC   r'   )r   r@   r2   r   rl   rm   r   rG   Zsqueezer   r   ry   )r6   r7   r8   rv   r'   r:   r<   ro   r_   Zy_avgr`   r=   r=   r>   r"      s&   Y
r"   c                C   s0  t | |||\}} }}}t|dk rd}t|t tdS t| |||dd}|du r>ttj	| |d dd	t
| d
f}ntt| ||d dt
| d
f}t| |||dd}	|dk}
|	dk}|
|@ }t| jd
 }d
|| |	|   ||< d||
| @ < t|tr|dkr|S d}n|}ttj||dS )u
  
    :math:`D^2` regression score function, fraction of pinball loss explained.

    Best possible score is 1.0 and it can be negative (because the model can be
    arbitrarily worse). A model that always uses the empirical alpha-quantile of
    `y_true` as constant prediction, disregarding the input features,
    gets a :math:`D^2` score of 0.0.

    Read more in the :ref:`User Guide <d2_score>`.

    .. versionadded:: 1.1

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    alpha : float, default=0.5
        Slope of the pinball deviance. It determines the quantile level alpha
        for which the pinball deviance and also D2 are optimal.
        The default `alpha=0.5` is equivalent to `d2_absolute_error_score`.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average scores.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

    Returns
    -------
    score : float or ndarray of floats
        The :math:`D^2` score with a pinball deviance
        or ndarray of scores if `multioutput='raw_values'`.

    Notes
    -----
    Like :math:`R^2`, :math:`D^2` score may be negative
    (it need not actually be the square of a quantity D).

    This metric is not well-defined for a single point and will return a NaN
    value if n_samples is less than two.

     References
    ----------
    .. [1] Eq. (7) of `Koenker, Roger; Machado, José A. F. (1999).
           "Goodness of Fit and Related Inference Processes for Quantile Regression"
           <https://doi.org/10.1080/01621459.1999.10473882>`_
    .. [2] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
           Wainwright. "Statistical Learning with Sparsity: The Lasso and
           Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/

    Examples
    --------
    >>> from sklearn.metrics import d2_pinball_score
    >>> y_true = [1, 2, 3]
    >>> y_pred = [1, 3, 3]
    >>> d2_pinball_score(y_true, y_pred)
    0.5
    >>> d2_pinball_score(y_true, y_pred, alpha=0.9)
    0.772...
    >>> d2_pinball_score(y_true, y_pred, alpha=0.1)
    -1.045...
    >>> d2_pinball_score(y_true, y_true, alpha=0.1)
    1.0
    r   r   rk   r,   rL   Nd   r   )qrD   r*   )r8   Zpercentile_rankr]   rE   )r?   r   rl   rm   r   rG   r   r[   ZtileZ
percentilelenr   r^   r1   r4   r5   r\   )r6   r7   r8   rJ   r9   r:   ro   r_   Z
y_quantiler`   rd   rb   re   rc   rf   r=   r=   r>   r#     sV   \


r#   c                C   s   t | ||d|dS )a
  
    :math:`D^2` regression score function, fraction of absolute error explained.

    Best possible score is 1.0 and it can be negative (because the model can be
    arbitrarily worse). A model that always uses the empirical median of `y_true`
    as constant prediction, disregarding the input features,
    gets a :math:`D^2` score of 0.0.

    Read more in the :ref:`User Guide <d2_score>`.

    .. versionadded:: 1.1

    Parameters
    ----------
    y_true : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Ground truth (correct) target values.

    y_pred : array-like of shape (n_samples,) or (n_samples, n_outputs)
        Estimated target values.

    sample_weight : array-like of shape (n_samples,), default=None
        Sample weights.

    multioutput : {'raw_values', 'uniform_average'} or array-like of shape             (n_outputs,), default='uniform_average'
        Defines aggregating of multiple output values.
        Array-like value defines weights used to average scores.

        'raw_values' :
            Returns a full set of errors in case of multioutput input.

        'uniform_average' :
            Scores of all outputs are averaged with uniform weight.

    Returns
    -------
    score : float or ndarray of floats
        The :math:`D^2` score with an absolute error deviance
        or ndarray of scores if 'multioutput' is 'raw_values'.

    Notes
    -----
    Like :math:`R^2`, :math:`D^2` score may be negative
    (it need not actually be the square of a quantity D).

    This metric is not well-defined for single samples and will return a NaN
    value if n_samples is less than two.

     References
    ----------
    .. [1] Eq. (3.11) of Hastie, Trevor J., Robert Tibshirani and Martin J.
           Wainwright. "Statistical Learning with Sparsity: The Lasso and
           Generalizations." (2015). https://hastie.su.domains/StatLearnSparsity/

    Examples
    --------
    >>> from sklearn.metrics import d2_absolute_error_score
    >>> y_true = [3, -0.5, 2, 7]
    >>> y_pred = [2.5, 0.0, 2, 8]
    >>> d2_absolute_error_score(y_true, y_pred)
    0.764...
    >>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
    >>> y_pred = [[0, 2], [-1, 2], [8, -5]]
    >>> d2_absolute_error_score(y_true, y_pred, multioutput='uniform_average')
    0.691...
    >>> d2_absolute_error_score(y_true, y_pred, multioutput='raw_values')
    array([0.8125    , 0.57142857])
    >>> y_true = [1, 2, 3]
    >>> y_pred = [1, 2, 3]
    >>> d2_absolute_error_score(y_true, y_pred)
    1.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [2, 2, 2]
    >>> d2_absolute_error_score(y_true, y_pred)
    0.0
    >>> y_true = [1, 2, 3]
    >>> y_pred = [3, 2, 1]
    >>> d2_absolute_error_score(y_true, y_pred)
    -1.0
    rK   rL   )r#   rA   r=   r=   r>   r$   )  s   _
r$   )r%   N)N)2__doc__rl   numbersr   numpyr[   
exceptionsr   Zutils._array_apir   r   r   r   r	   r
   rt   Zutils._param_validationr   r   r   Zutils.statsr   Zutils.validationr   r   r   r   r   Z__ALL__r?   r@   r   r   r   r   r   r   r   r   rh   r   r   r   ry   r   r    r!   r"   r#   r$   r=   r=   r=   r>   <module>   s   
b
;
TY
aP
KO
E
K2

 
 &
$!	T#$	g

 

	