o
    )iT<                     @   s0  d dl mZ d dlmZmZmZmZ d dlZd dlm	Z	 d dlm
Z
 ddlmZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ g dZde
dede
fddZG dd de	jZG dd de	jZdee dedededef
ddZdeddZ G dd  d eZ!G d!d" d"eZ"G d#d$ d$eZ#G d%d& d&eZ$e ed'e!j%fd(dd)d*dee! dededefd+d,Z&e ed'e"j%fd(dd)d*dee" dededefd-d.Z'e ed'e#j%fd(dd)d*dee# dededefd/d0Z(e ed'e$j%fd(dd)d*dee$ dededefd1d2Z)dS )3    )partial)AnyCallableListOptionalN)Tensor   )ImageClassification)_log_api_usage_once   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface)	ShuffleNetV2ShuffleNet_V2_X0_5_WeightsShuffleNet_V2_X1_0_WeightsShuffleNet_V2_X1_5_WeightsShuffleNet_V2_X2_0_Weightsshufflenet_v2_x0_5shufflenet_v2_x1_0shufflenet_v2_x1_5shufflenet_v2_x2_0xgroupsreturnc                 C   sP   |   \}}}}|| }| |||||} t| dd } | ||||} | S )Nr   r   )sizeviewtorchZ	transpose
contiguous)r   r   Z	batchsizeZnum_channelsheightwidthZchannels_per_group r$   n/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/torchvision/models/shufflenetv2.pychannel_shuffle   s   r&   c                       sp   e Zd Zdedededdf fddZe	
ddededededededejfddZ	de
de
fddZ  ZS )InvertedResidualinpoupstrider   Nc                    sT  t    d|  krdkstd td|| _|d }| jdkr7||d> kr7td| d| d| d| jdkrct| j||d| jdd	t|tj||ddd
ddt|tj	dd| _
nt | _
ttj| jdkrs|n||ddd
ddt|tj	dd| j||d| jdd	t|tj||ddd
ddt|tj	dd| _d S )Nr      zillegal stride valuer   zInvalid combination of stride z, inp z	 and oup zB values. If stride == 1 then inp should be equal to oup // 2 << 1.kernel_sizer*   paddingr   F)r-   r*   r.   biasTZinplace)super__init__
ValueErrorr*   nn
Sequentialdepthwise_convBatchNorm2dConv2dReLUbranch1branch2)selfr(   r)   r*   Zbranch_features	__class__r$   r%   r2   ,   sJ   






zInvertedResidual.__init__r   r   Fior-   r.   r/   c              	   C   s   t j| |||||| dS )N)r/   r   )r4   r8   )r?   r@   r-   r*   r.   r/   r$   r$   r%   r6   V   s   zInvertedResidual.depthwise_convr   c                 C   sb   | j dkr|jddd\}}tj|| |fdd}ntj| || |fdd}t|d}|S )Nr   r   )dim)r*   chunkr    catr;   r:   r&   )r<   r   x1Zx2outr$   r$   r%   forward\   s   

zInvertedResidual.forward)r   r   F)__name__
__module____qualname__intr2   staticmethodboolr4   r8   r6   r   rF   __classcell__r$   r$   r=   r%   r'   +   s(    *r'   c                       sn   e Zd Zdefdee dee dededejf ddf
 fd	d
Z	de
de
fddZde
de
fddZ  ZS )r   i  stages_repeatsstages_out_channelsnum_classesinverted_residual.r   Nc              
      sd  t    t|  t|dkrtdt|dkrtd|| _d}| jd }ttj||ddddd	t	|tj
d
d| _|}tjdddd| _|  |  |  dd dD }t||| jdd  D ])\}}	}|||dg}
t|	d D ]}|
|||d qtt| |tj|
  |}qb| jd }ttj||ddddd	t	|tj
d
d| _t||| _d S )Nr+   z2expected stages_repeats as list of 3 positive ints   z7expected stages_out_channels as list of 5 positive intsr   r   r   F)r/   Tr0   r,   c                 S   s   g | ]}d | qS )Zstager$   ).0r?   r$   r$   r%   
<listcomp>   s    z)ShuffleNetV2.__init__.<locals>.<listcomp>)r   r+      )r1   r2   r
   lenr3   Z_stage_out_channelsr4   r5   r8   r7   r9   conv1Z	MaxPool2dmaxpoolziprangeappendsetattrconv5ZLinearfc)r<   rN   rO   rP   rQ   Zinput_channelsZoutput_channelsZstage_namesnameZrepeatsseqr?   r=   r$   r%   r2   i   sB   


 

zShuffleNetV2.__init__r   c                 C   sX   |  |}| |}| |}| |}| |}| |}|ddg}| |}|S )Nr   r+   )rX   rY   Zstage2Zstage3Zstage4r^   meanr_   r<   r   r$   r$   r%   _forward_impl   s   






zShuffleNetV2._forward_implc                 C   s
   |  |S )N)rd   rc   r$   r$   r%   rF      s   
zShuffleNetV2.forward)rG   rH   rI   r'   r   rJ   r   r4   Moduler2   r   rd   rF   rM   r$   r$   r=   r%   r   h   s     0r   weightsprogressargskwargsc                 O   sL   | d urt |dt| jd  t|i |}| d ur$|| j|dd |S )NrP   
categoriesT)rg   Z
check_hash)r   rW   metar   Zload_state_dictZget_state_dict)rf   rg   rh   ri   modelr$   r$   r%   _shufflenetv2   s   rm   )r   r   z2https://github.com/ericsun99/Shufflenet-v2-Pytorch)Zmin_sizerj   recipec                
   @   D   e Zd Zedeeddi edddddid	d
dddZeZdS )r   zDhttps://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth   	crop_sizei ImageNet-1Kg-FN@g9voT@zacc@1zacc@5g{Gz?gT㥛 @VThese weights were trained from scratch to reproduce closely the results of the paper.
num_params_metrics_ops
_file_size_docsurlZ
transformsrk   N	rG   rH   rI   r   r   r	   _COMMON_METAIMAGENET1K_V1DEFAULTr$   r$   r$   r%   r      &    
r   c                
   @   ro   )r   zBhttps://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pthrp   rq   i" rs   gI+WQ@gNbX9V@rt   g(\?gE!@ru   rv   r|   Nr~   r$   r$   r$   r%   r      r   r   c                   @   H   e Zd Zedeedddi eddddd	d
idddddZeZdS )r   zBhttps://download.pytorch.org/models/shufflenetv2_x1_5-3c479a10.pthrp      rr   Zresize_size+https://github.com/pytorch/vision/pull/5906iv5 rs   g9v?R@g/$V@rt   gl?gw/+@
                These weights were trained from scratch by using TorchVision's `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            rn   rw   rx   ry   rz   r{   r|   Nr~   r$   r$   r$   r%   r      (    r   c                   @   r   )r   zBhttps://download.pytorch.org/models/shufflenetv2_x2_0-8be3c8ee.pthrp   r   r   r   ip rs   gQS@gMb@W@rt   g-?g+n<@r   r   r|   Nr~   r$   r$   r$   r%   r     r   r   Z
pretrained)rf   T)rf   rg   c                 K   (   t | } t| |g dg dfi |S )a  
    Constructs a ShuffleNetV2 architecture with 0.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X0_5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X0_5_Weights
        :members:
    rU      rU   )   0   `         )r   verifyrm   rf   rg   ri   r$   r$   r%   r        
r   c                 K   r   )a  
    Constructs a ShuffleNetV2 architecture with 1.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X1_0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_0_Weights
        :members:
    r   )r   t   r   i  r   )r   r   rm   r   r$   r$   r%   r   >  r   r   c                 K   r   )a  
    Constructs a ShuffleNetV2 architecture with 1.5x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X1_5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X1_5_Weights
        :members:
    r   )r      i`  i  r   )r   r   rm   r   r$   r$   r%   r   ]  r   r   c                 K   r   )a  
    Constructs a ShuffleNetV2 architecture with 2.0x output channels, as described in
    `ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design
    <https://arxiv.org/abs/1807.11164>`__.

    Args:
        weights (:class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.ShuffleNet_V2_X2_0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.shufflenetv2.ShuffleNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/shufflenetv2.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ShuffleNet_V2_X2_0_Weights
        :members:
    r   )r      i  i  i   )r   r   rm   r   r$   r$   r%   r   |  r   r   )*	functoolsr   typingr   r   r   r   r    Ztorch.nnr4   r   Ztransforms._presetsr	   utilsr
   Z_apir   r   r   _metar   _utilsr   r   __all__rJ   r&   re   r'   r   rL   rm   r   r   r   r   r   r   r   r   r   r   r$   r$   r$   r%   <module>   s    =A
