o
    )i?*                     @   s  d dl Z d dlmZ d dlmZmZmZmZ d dlZd dl	m
Z
 d dlm
  mZ d dlmZ d dlmZ d dlmZmZ ddlmZ d	d
lmZmZmZ d	dlmZ d	dlmZmZ ddl m!Z!m"Z"m#Z# g dZ$G dd dej%Z&G dd dej'Z(G dd dej)Z*G dd dej+Z,G dd dej-Z.G dd dej/Z0G dd dej1Z2G dd dej3Z4G d d! d!eZ5ed"d#ed$d%d& fd'dd(d)d*d+eee5ef  d,e6d-e6d.ed/e4f
d0d1Z7dS )2    N)partial)AnyListOptionalUnion)Tensor)	inception)Inception_V3_WeightsInceptionOutputs   )ImageClassification   )register_modelWeightsWeightsEnum)_IMAGENET_CATEGORIES)_ovewrite_named_paramhandle_legacy_interface   )_fuse_modules_replace_reluquantize_model)QuantizableInception3Inception_V3_QuantizedWeightsinception_v3c                       sT   e Zd Zdededdf fddZdedefdd	Zdd
ee ddfddZ	  Z
S )QuantizableBasicConv2dargskwargsreturnNc                    s    t  j|i | t | _d S N)super__init__nnZReLUreluselfr   r   	__class__ x/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/torchvision/models/quantization/inception.pyr!      s   zQuantizableBasicConv2d.__init__xc                 C   s"   |  |}| |}| |}|S r   convZbnr#   )r%   r*   r(   r(   r)   forward   s   


zQuantizableBasicConv2d.forwardis_qatc                 C   s   t | g d|dd d S )Nr+   T)Zinplace)r   )r%   r.   r(   r(   r)   
fuse_model%   s   z!QuantizableBasicConv2d.fuse_modelr   )__name__
__module____qualname__r   r!   r   r-   r   boolr/   __classcell__r(   r(   r&   r)   r      s     r   c                       <   e Zd Zdededdf fddZdedefdd	Z  ZS )
QuantizableInceptionAr   r   r   Nc                    &   t  j|dti| tj | _d S NZ
conv_blockr    r!   r   r"   	quantizedFloatFunctionalmyopr$   r&   r(   r)   r!   +      zQuantizableInceptionA.__init__r*   c                 C      |  |}| j|dS Nr   _forwardr<   catr%   r*   outputsr(   r(   r)   r-   /      
zQuantizableInceptionA.forwardr0   r1   r2   r   r!   r   r-   r4   r(   r(   r&   r)   r6   )       r6   c                       r5   )
QuantizableInceptionBr   r   r   Nc                    r7   r8   r9   r$   r&   r(   r)   r!   6   r=   zQuantizableInceptionB.__init__r*   c                 C   r>   r?   r@   rC   r(   r(   r)   r-   :   rE   zQuantizableInceptionB.forwardrF   r(   r(   r&   r)   rH   4   rG   rH   c                       r5   )
QuantizableInceptionCr   r   r   Nc                    r7   r8   r9   r$   r&   r(   r)   r!   A   r=   zQuantizableInceptionC.__init__r*   c                 C   r>   r?   r@   rC   r(   r(   r)   r-   E   rE   zQuantizableInceptionC.forwardrF   r(   r(   r&   r)   rI   ?   rG   rI   c                       r5   )
QuantizableInceptionDr   r   r   Nc                    r7   r8   r9   r$   r&   r(   r)   r!   L   r=   zQuantizableInceptionD.__init__r*   c                 C   r>   r?   r@   rC   r(   r(   r)   r-   P   rE   zQuantizableInceptionD.forwardrF   r(   r(   r&   r)   rJ   J   rG   rJ   c                       sR   e Zd Zdededdf fddZdedee fdd	Zdedefd
dZ  Z	S )QuantizableInceptionEr   r   r   Nc                    s>   t  j|dti| tj | _tj | _tj | _d S r8   )	r    r!   r   r"   r:   r;   myop1myop2myop3r$   r&   r(   r)   r!   W   s   zQuantizableInceptionE.__init__r*   c                 C   s   |  |}| |}| || |g}| j|d}| |}| |}| || 	|g}| j
|d}tj|dddd}| |}||||g}|S )Nr   r   )Zkernel_sizeZstridepadding)	branch1x1Zbranch3x3_1Zbranch3x3_2aZbranch3x3_2brL   rB   Zbranch3x3dbl_1Zbranch3x3dbl_2Zbranch3x3dbl_3aZbranch3x3dbl_3brM   FZ
avg_pool2dbranch_pool)r%   r*   rP   Z	branch3x3Zbranch3x3dblrR   rD   r(   r(   r)   rA   ]   s   




zQuantizableInceptionE._forwardc                 C   r>   r?   )rA   rN   rB   rC   r(   r(   r)   r-   r   rE   zQuantizableInceptionE.forward)
r0   r1   r2   r   r!   r   r   rA   r-   r4   r(   r(   r&   r)   rK   U   s    rK   c                       s*   e Zd Zdededdf fddZ  ZS )QuantizableInceptionAuxr   r   r   Nc                    s   t  j|dti| d S r8   )r    r!   r   r$   r&   r(   r)   r!   y   s   z QuantizableInceptionAux.__init__)r0   r1   r2   r   r!   r4   r(   r(   r&   r)   rS   w   s    "rS   c                       sT   e Zd Zdededdf fddZdedefdd	Zdd
ee	 ddfddZ
  ZS )r   r   r   r   Nc              
      sD   t  j|dtttttttgi| t	j
j | _t	j
j | _d S )NZinception_blocks)r    r!   r   r6   rH   rI   rJ   rK   rS   torchZaoZquantizationZ	QuantStubquantZDeQuantStubdequantr$   r&   r(   r)   r!   ~   s    zQuantizableInception3.__init__r*   c                 C   sf   |  |}| |}| |\}}| |}| jo| j}tj r-|s(t	
d t||S | ||S )NzIScripted QuantizableInception3 always returns QuantizableInception3 Tuple)Z_transform_inputrU   rA   rV   Ztraining
aux_logitsrT   ZjitZis_scriptingwarningswarnr
   Zeager_outputs)r%   r*   ZauxZaux_definedr(   r(   r)   r-      s   





zQuantizableInception3.forwardr.   c                 C   s(   |   D ]}t|tu r|| qdS )a  Fuse conv/bn/relu modules in inception model

        Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization.
        Model is modified in place.  Note that this operation does not change numerics
        and the model after modification is in floating point
        N)modulestyper   r/   )r%   r.   mr(   r(   r)   r/      s
   
z QuantizableInception3.fuse_modelr   )r0   r1   r2   r   r!   r   r
   r-   r   r3   r/   r4   r(   r(   r&   r)   r   }   s     r   c                   @   sJ   e Zd Zedeedddddeddejd	d
ddidddd
dZ	e	Z
dS )r   zUhttps://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-a2837893.pthi+  iV  )Z	crop_sizeZresize_sizeir)K   r]   fbgemmzdhttps://github.com/pytorch/vision/tree/main/references/classification#post-training-quantized-modelszImageNet-1Kg%CKS@g-VW@)zacc@1zacc@5g'1@gL7A`%7@z
                These weights were produced by doing Post Training Quantization (eager mode) on top of the unquantized
                weights listed below.
            )
Z
num_paramsZmin_size
categoriesbackendZrecipeZunquantizedZ_metricsZ_ops
_file_sizeZ_docs)urlZ
transformsmetaN)r0   r1   r2   r   r   r   r   r	   IMAGENET1K_V1IMAGENET1K_FBGEMM_V1DEFAULTr(   r(   r(   r)   r      s*    r   Zquantized_inception_v3)nameZ
pretrainedc                 C   s   |  ddr	tjS tjS )NquantizeF)getr   re   r	   rd   )r   r(   r(   r)   <lambda>   s   
rj   )weightsTF)rk   progressrh   rk   rl   rh   r   r   c                 K   s   |rt nt| } |dd}| dur<d|vrt|dd t|dd t|dt| jd  d| jv r<t|d| jd  |dd	}tdi |}t	| |rTt
|| | durv|rb|sbd|_d|_|| j|dd
 |sv|svd|_d|_|S )a  Inception v3 model architecture from
    `Rethinking the Inception Architecture for Computer Vision <http://arxiv.org/abs/1512.00567>`__.

    .. note::
        **Important**: In contrast to the other models the inception_v3 expects tensors with a size of
        N x 3 x 299 x 299, so ensure your images are sized accordingly.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` or :class:`~torchvision.models.Inception_V3_Weights`, optional): The pretrained
            weights for the model. See
            :class:`~torchvision.models.quantization.Inception_V3_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr.
            Default is True.
        quantize (bool, optional): If True, return a quantized version of the model.
            Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableInception3``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/inception.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.quantization.Inception_V3_QuantizedWeights
        :members:

    .. autoclass:: torchvision.models.Inception_V3_Weights
        :members:
        :noindex:
    rW   FNZtransform_inputTZnum_classesr_   r`   r^   )rl   Z
check_hashr(   )r   r	   verifyri   r   lenrc   popr   r   r   rW   Z	AuxLogitsZload_state_dictZget_state_dict)rk   rl   rh   r   Zoriginal_aux_logitsr`   modelr(   r(   r)   r      s.   2

r   )8rX   	functoolsr   typingr   r   r   r   rT   Ztorch.nnr"   Ztorch.nn.functionalZ
functionalrQ   r   Ztorchvision.modelsr   Zinception_moduleZtorchvision.models.inceptionr	   r
   Ztransforms._presetsr   Z_apir   r   r   _metar   _utilsr   r   utilsr   r   r   __all__ZBasicConv2dr   Z
InceptionAr6   Z
InceptionBrH   Z
InceptionCrI   Z
InceptionDrJ   Z
InceptionErK   ZInceptionAuxrS   Z
Inception3r   r   r3   r   r(   r(   r(   r)   <module>   sX    ",
