o
    )iA                     @   s  d dl mZ d dlmZmZmZmZ d dlZd dlm	Z	 d dlm
Z
 d dlmZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddlmZmZmZmZ ddlmZmZmZ g dZ G dd dej!Z"G dd dej#Z$dee% dee% dee de&de&dede$fddZ'deddd d!Z(G d"d# d#eZ)G d$d% d%eZ*G d&d' d'eZ+G d(d) d)eZ,ed*d+ed,d-d. fd/dd0d1d2deee)ef  de&de&dede$f
d3d4Z-ed5d+ed,d6d. fd/dd0d1d2deee*ef  de&de&dede$f
d7d8Z.ed9d+ed,d:d. fd/dd0d1d2deee+ef  de&de&dede$f
d;d<Z/ed=d+ed,d>d. fd/dd0d1d2deee,ef  de&de&dede$f
d?d@Z0dS )A    )partial)AnyListOptionalUnionN)Tensor)shufflenetv2   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface)ShuffleNet_V2_X0_5_WeightsShuffleNet_V2_X1_0_WeightsShuffleNet_V2_X1_5_WeightsShuffleNet_V2_X2_0_Weights   )_fuse_modules_replace_reluquantize_model)	QuantizableShuffleNetV2#ShuffleNet_V2_X0_5_QuantizedWeights#ShuffleNet_V2_X1_0_QuantizedWeights#ShuffleNet_V2_X1_5_QuantizedWeights#ShuffleNet_V2_X2_0_QuantizedWeightsshufflenet_v2_x0_5shufflenet_v2_x1_0shufflenet_v2_x1_5shufflenet_v2_x2_0c                       s<   e Zd Zdededdf fddZdedefdd	Z  ZS )
QuantizableInvertedResidualargskwargsreturnNc                    s"   t  j|i | tj | _d S N)super__init__nnZ	quantizedZFloatFunctionalcatselfr$   r%   	__class__ {/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/torchvision/models/quantization/shufflenetv2.pyr)   $   s   z$QuantizableInvertedResidual.__init__xc                 C   sh   | j dkr|jddd\}}| jj|| |gdd}n| jj| || |gdd}t|d}|S )Nr   r   )dim)Zstridechunkr+   branch2branch1r   Zchannel_shuffle)r-   r2   x1Zx2outr0   r0   r1   forward(   s   
 z#QuantizableInvertedResidual.forward)__name__
__module____qualname__r   r)   r   r9   __classcell__r0   r0   r.   r1   r#   #   s    r#   c                       sT   e Zd Zdededdf fddZdedefdd	Zdd
ee ddfddZ	  Z
S )r   r$   r%   r&   Nc                    s6   t  j|dti| tjj | _tjj | _	d S )NZinverted_residual)
r(   r)   r#   torchZaoZquantizationZ	QuantStubquantZDeQuantStubdequantr,   r.   r0   r1   r)   6   s   z QuantizableShuffleNetV2.__init__r2   c                 C   s"   |  |}| |}| |}|S r'   )r?   Z_forward_implr@   )r-   r2   r0   r0   r1   r9   ;   s   


zQuantizableShuffleNetV2.forwardis_qatc                 C   s   | j  D ]\}}|dv r|durt|g dg|dd q|  D ]3}t|tu rTt|jj  dkrBt|jddgg d	g|dd t|jg dd
dgg dg|dd q!dS )aB  Fuse conv/bn/relu modules in shufflenetv2 model

        Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
        Model is modified in place.

        .. note::
            Note that this operation does not change numerics
            and the model after modification is in floating point
        )Zconv1Zconv5N)012T)Zinplacer   rB   rC   )rD   34rE   rF   )567)	Z_modulesitemsr   modulestyper#   lenr6   r5   )r-   rA   namemr0   r0   r1   
fuse_modelA   s    
z"QuantizableShuffleNetV2.fuse_modelr'   )r:   r;   r<   r   r)   r   r9   r   boolrP   r=   r0   r0   r.   r1   r   4   s     r   stages_repeatsstages_out_channelsweightsprogressquantizer%   r&   c                K   s   |d urt |dt|jd  d|jv rt |d|jd  |dd}t| |fi |}t| |r7t|| |d urE||j|dd |S )NZnum_classes
categoriesbackendfbgemmT)rU   Z
check_hash)	r   rM   metapopr   r   r   Zload_state_dictZget_state_dict)rR   rS   rT   rU   rV   r%   rX   modelr0   r0   r1   _shufflenetv2Z   s   	

r]   )r   r   rY   zdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelsz
        These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
        weights listed below.
    )Zmin_sizerW   rX   recipeZ_docsc                
   @   F   e Zd Zedeeddi edejddddid	d
ddZ	e	Z
dS )r   zShttps://download.pytorch.org/models/quantized/shufflenetv2_x0.5_fbgemm-00845098.pth   	crop_sizei ImageNet-1Kg#~jL@gRS@zacc@1zacc@5g{Gz?gjt?
num_paramsunquantized_metrics_ops
_file_sizeurlZ
transformsrZ   N)r:   r;   r<   r   r   r
   _COMMON_METAr   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULTr0   r0   r0   r1   r      &    
r   c                
   @   r_   )r   zQhttps://download.pytorch.org/models/quantized/shufflenetv2_x1_fbgemm-1e62bb32.pthr`   ra   i" rc   gףp=
Q@gh|?U@rd   g(\?gy&1@re   rk   N)r:   r;   r<   r   r   r
   rm   r   rn   ro   rp   r0   r0   r0   r1   r      rq   r   c                   @   J   e Zd Zedeedddi eddejddd	d
iddddZ	e	Z
dS )r   zShttps://download.pytorch.org/models/quantized/shufflenetv2_x1_5_fbgemm-d7401f05.pthr`      rb   Zresize_size+https://github.com/pytorch/vision/pull/5906iv5 rc   gSR@g̬V@rd   gl?gK7A`@r^   rf   rg   rh   ri   rj   rk   N)r:   r;   r<   r   r   r
   rm   r   rn   ro   rp   r0   r0   r0   r1   r      (    r   c                   @   rr   )r   zShttps://download.pytorch.org/models/quantized/shufflenetv2_x2_0_fbgemm-5cac526c.pthr`   rs   rt   ru   ip rc   g-R@gZd;W@rd   g-?g|?5@rv   rk   N)r:   r;   r<   r   r   r
   rm   r   rn   ro   rp   r0   r0   r0   r1   r      rw   r   Zquantized_shufflenet_v2_x0_5)rN   Z
pretrainedc                 C      |  ddr	tjS tjS NrV   F)getr   ro   r   rn   r%   r0   r0   r1   <lambda>      
r|   )rT   TFrT   rU   rV   c                 K   4   |rt nt| } tg dg df| ||d|S )aQ  
    Constructs a ShuffleNetV2 with 0.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X0_5_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
        :members:
        :noindex:
          r   )   0   `         r~   )r   r   verifyr]   rT   rU   rV   r%   r0   r0   r1   r         0r   Zquantized_shufflenet_v2_x1_0c                 C   rx   ry   )rz   r   ro   r   rn   r{   r0   r0   r1   r|     r}   c                 K   r   )aQ  
    Constructs a ShuffleNetV2 with 1.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X1_0_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
        :members:
        :noindex:
    r   )r   t   rs   i  r   r~   )r   r   r   r]   r   r0   r0   r1   r      r   r    Zquantized_shufflenet_v2_x1_5c                 C   rx   ry   )rz   r   ro   r   rn   r{   r0   r0   r1   r|   F  r}   c                 K   r   )aQ  
    Constructs a ShuffleNetV2 with 1.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X1_5_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
        :members:
        :noindex:
    r   )r      i`  i  r   r~   )r   r   r   r]   r   r0   r0   r1   r!   B  r   r!   Zquantized_shufflenet_v2_x2_0c                 C   rx   ry   )rz   r   ro   r   rn   r{   r0   r0   r1   r|   |  r}   c                 K   r   )aQ  
    Constructs a ShuffleNetV2 with 2.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights` or :class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ShuffleNet_V2_X2_0_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
        :members:
        :noindex:
    r   )r      i  i  i   r~   )r   r   r   r]   r   r0   r0   r1   r"   x  r   r"   )1	functoolsr   typingr   r   r   r   r>   Ztorch.nnr*   r   Ztorchvision.modelsr   Ztransforms._presetsr
   Z_apir   r   r   _metar   _utilsr   r   r   r   r   r   utilsr   r   r   __all__ZInvertedResidualr#   ZShuffleNetV2r   intrQ   r]   rm   r   r   r   r   r   r    r!   r"   r0   r0   r0   r1   <module>   s    &

-
-
-
