o
    )iF                     @   s  d dl mZ d dlmZmZmZmZmZ d dlZd dl	m
Z
 d dlmZ d dlmZmZmZmZmZmZmZ ddlmZ dd	lmZmZmZ dd
lmZ ddlmZmZ ddlm Z m!Z!m"Z" g dZ#G dd deZ$G dd deZ%G dd deZ&deee$e%f  dee' dee de(de(dede&fddZ)dedd d!d"Z*G d#d$ d$eZ+G d%d& d&eZ,G d'd( d(eZ-G d)d* d*eZ.ed+d,ed-d.d/ fd0dd1d2d3deee+ef  de(de(dede&f
d4d5Z/ed6d,ed-d7d/ fd0dd1d2d3deee,ef  de(de(dede&f
d8d9Z0ed:d,ed-d;d/ fd0dd1d2d3deee-ef  de(de(dede&f
d<d=Z1ed>d,ed-d?d/ fd0dd1d2d3deee.ef  de(de(dede&f
d@dAZ2dS )B    )partial)AnyListOptionalTypeUnionN)Tensor)
BasicBlock
BottleneckResNetResNet18_WeightsResNet50_WeightsResNeXt101_32X8D_WeightsResNeXt101_64X4D_Weights   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface   )_fuse_modules_replace_reluquantize_model)	QuantizableResNetResNet18_QuantizedWeightsResNet50_QuantizedWeights!ResNeXt101_32X8D_QuantizedWeights!ResNeXt101_64X4D_QuantizedWeightsresnet18resnet50resnext101_32x8dresnext101_64x4dc                       T   e Zd Zdededdf fddZdedefdd	Zdd
ee ddfddZ	  Z
S )QuantizableBasicBlockargskwargsreturnNc                    s$   t  j|i | tjj | _d S N)super__init__torchnn	quantizedFloatFunctionaladd_reluselfr(   r)   	__class__ u/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/torchvision/models/quantization/resnet.pyr-   &   s   zQuantizableBasicBlock.__init__xc                 C   s\   |}|  |}| |}| |}| |}| |}| jd ur%| |}| j||}|S r+   )conv1bn1reluconv2bn2
downsampler2   r4   r9   identityoutr7   r7   r8   forward*   s   






zQuantizableBasicBlock.forwardis_qatc                 C   s@   t | g dddgg|dd | jrt | jddg|dd d S d S )Nr:   r;   r<   r=   r>   TZinplace01r   r?   r4   rD   r7   r7   r8   
fuse_model;   s   z QuantizableBasicBlock.fuse_modelr+   __name__
__module____qualname__r   r-   r   rC   r   boolrK   __classcell__r7   r7   r5   r8   r'   %   s     r'   c                       r&   )QuantizableBottleneckr(   r)   r*   Nc                    s>   t  j|i | tj | _tjdd| _tjdd| _d S )NFrF   )	r,   r-   r/   r0   r1   skip_add_reluZReLUrelu1relu2r3   r5   r7   r8   r-   B   s   zQuantizableBottleneck.__init__r9   c                 C   sz   |}|  |}| |}| |}| |}| |}| |}| |}| |}| jd ur4| |}| j	
||}|S r+   )r:   r;   rT   r=   r>   rU   conv3bn3r?   rS   r2   r@   r7   r7   r8   rC   H   s   









zQuantizableBottleneck.forwardrD   c                 C   sF   t | g dg dddgg|dd | jr!t | jddg|dd d S d S )	N)r:   r;   rT   )r=   r>   rU   rV   rW   TrF   rG   rH   rI   rJ   r7   r7   r8   rK   Z   s   z QuantizableBottleneck.fuse_modelr+   rL   r7   r7   r5   r8   rR   A   s     rR   c                       r&   )r   r(   r)   r*   Nc                    s2   t  j|i | tjj | _tjj | _d S r+   )	r,   r-   r.   ZaoZquantizationZ	QuantStubquantZDeQuantStubdequantr3   r5   r7   r8   r-   c   s   zQuantizableResNet.__init__r9   c                 C   s"   |  |}| |}| |}|S r+   )rX   Z_forward_implrY   )r4   r9   r7   r7   r8   rC   i   s   


zQuantizableResNet.forwardrD   c                 C   sH   t | g d|dd |  D ]}t|tu st|tu r!|| qdS )a  Fuse conv/bn/relu modules in resnet models

        Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        rE   TrF   N)r   modulestyperR   r'   rK   )r4   rD   mr7   r7   r8   rK   r   s   
zQuantizableResNet.fuse_modelr+   rL   r7   r7   r5   r8   r   b   s     	r   blocklayersweightsprogressquantizer)   r*   c                 K   s   |d urt |dt|jd  d|jv rt |d|jd  |dd}t| |fi |}t| |r7t|| |d urE||j|dd |S )NZnum_classes
categoriesbackendfbgemmT)r`   Z
check_hash)	r   lenmetapopr   r   r   Zload_state_dictZget_state_dict)r]   r^   r_   r`   ra   r)   rc   modelr7   r7   r8   _resnet   s   

ri   )r   r   rd   zdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelsz
        These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
        weights listed below.
    )Zmin_sizerb   rc   recipeZ_docsc                
   @   sF   e Zd Zedeeddi edejddddid	d
ddZ	e	Z
dS )r   zJhttps://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth   	crop_sizei(^ ImageNet-1KgV-_Q@g r8V@zacc@1zacc@5g/$?g`"y&@
num_paramsunquantized_metrics_ops
_file_sizeurlZ
transformsrf   N)rM   rN   rO   r   r   r   _COMMON_METAr   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULTr7   r7   r7   r8   r      s&    
r   c                
   @   ~   e Zd Zedeeddi edejddddid	d
ddZ	edeedddi edej
ddddid	dddZeZdS )r   zJhttps://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pthrk   rl   i(rn   g{GR@gjt4W@ro   gB`"[@gM8@rp   rv   zJhttps://download.pytorch.org/models/quantized/resnet50_fbgemm-23753f79.pth   rm   Zresize_sizeg5^IT@gX9vW@g8@N)rM   rN   rO   r   r   r   rx   r   ry   rz   IMAGENET1K_V2IMAGENET1K_FBGEMM_V2r{   r7   r7   r7   r8   r      H    
r   c                
   @   r|   )r    zQhttps://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pthrk   rl   i(Jrn   gvS@gQW@ro   gDli0@gV-U@rp   rv   zQhttps://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm-ee16d00c.pthr}   r~   g~jT@g rX@gzGU@N)rM   rN   rO   r   r   r   rx   r   ry   rz   r   r   r{   r7   r7   r7   r8   r       r   r    c                   @   sJ   e Zd Zedeedddi eddejddd	d
iddddZ	e	Z
dS )r!   zRhttps://download.pytorch.org/models/quantized/resnext101_64x4d_fbgemm-605a1cb3.pthrk   r}   r~   i(mz+https://github.com/pytorch/vision/pull/5935rn   gxT@g/X@ro   gQ.@g$cT@)rq   rj   rr   rs   rt   ru   rv   N)rM   rN   rO   r   r   r   rx   r   ry   rz   r{   r7   r7   r7   r8   r!     s(    r!   Zquantized_resnet18)nameZ
pretrainedc                 C      |  ddr	tjS tjS Nra   F)getr   rz   r   ry   r)   r7   r7   r8   <lambda>     
r   )r_   TF)r_   r`   ra   c                 K   .   |rt nt| } ttg d| ||fi |S )a  ResNet-18 model from
    `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` or :class:`~torchvision.models.ResNet18_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet18_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNet18_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNet18_Weights
        :members:
        :noindex:
    )r   r   r   r   )r   r   verifyri   r'   r_   r`   ra   r)   r7   r7   r8   r"        -r"   Zquantized_resnet50c                 C   r   r   )r   r   rz   r   ry   r   r7   r7   r8   r   Q  r   c                 K   r   )a  ResNet-50 model from
    `Deep Residual Learning for Image Recognition <https://arxiv.org/abs/1512.03385>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` or :class:`~torchvision.models.ResNet50_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet50_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNet50_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNet50_Weights
        :members:
        :noindex:
    )r         r   )r   r   r   ri   rR   r   r7   r7   r8   r#   M  r   r#   Zquantized_resnext101_32x8dc                 C   r   r   )r   r    rz   r   ry   r   r7   r7   r8   r     r   c                 K   F   |rt nt| } t|dd t|dd ttg d| ||fi |S )a  ResNeXt-101 32x8d model from
    `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_32X8D_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet101_32X8D_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNeXt101_32X8D_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNeXt101_32X8D_Weights
        :members:
        :noindex:
    groups    width_per_group   r   r      r   )r    r   r   r   ri   rR   r   r7   r7   r8   r$        -r$   Zquantized_resnext101_64x4dc                 C   r   r   )r   r!   rz   r   ry   r   r7   r7   r8   r     r   c                 K   r   )a  ResNeXt-101 64x4d model from
    `Aggregated Residual Transformation for Deep Neural Networks <https://arxiv.org/abs/1611.05431>`_

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights` or :class:`~torchvision.models.ResNeXt101_64X4D_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.ResNet101_64X4D_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        quantize (bool, optional): If True, return a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableResNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/resnet.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.ResNeXt101_64X4D_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.ResNeXt101_64X4D_Weights
        :members:
        :noindex:
    r   @   r   r   r   )r!   r   r   r   ri   rR   r   r7   r7   r8   r%     r   r%   )3	functoolsr   typingr   r   r   r   r   r.   Ztorch.nnr/   r   Ztorchvision.models.resnetr	   r
   r   r   r   r   r   Ztransforms._presetsr   Z_apir   r   r   _metar   _utilsr   r   utilsr   r   r   __all__r'   rR   r   intrP   ri   rx   r   r   r    r!   r"   r#   r$   r%   r7   r7   r7   r8   <module>   s    $
!
&&
)
)
+
