o
    ik                  	   @   s$  d dl Z d dlmZmZmZ d dlZd dlmZmZ d dlm	Z	 G dd dej
jZG dd dej
jZG d	d
 d
ej
jZG dd deZG dd deZG dd deZd$dedededefddZG dd dej
jZG dd deZG dd deZdd Zejfd d!Zejfd"d#ZdS )%    N)ListOptionalTuple)_VFTensor)PackedSequencec                       s\   e Zd ZddgZ fddZejjdd Zejjdd Z	ejjd	d
 Z
dd Z  ZS )QuantizedLinearscale
zero_pointc                    s   t    td |j| _|j| _t|jj	tj
d \| _| _| _| _tjj| jdd| _tjj| jdd| _|jd usDJ dtjj|jj	tj
d dd| _| dt| jj	tj
d d S )Nztorch.jit.QuantizedLinear is deprecated and will be removed in an upcoming PyTorch release. Please use the torch.ao.nn.quantized.dynamic.Linear instead.Zmemory_formatFZrequires_gradzQuantizedLinear requires a biaspacked_tensor_ptr)super__init__warningswarnin_featuresout_featurestorchfbgemm_linear_quantize_weightweightclonecontiguous_formatfloatcol_offsetsr	   r
   nn	Parameterbiasregister_bufferfbgemm_pack_quantized_matrixselfother	__class__ b/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/torch/jit/quantized.pyr      s6   
zQuantizedLinear.__init__c                 C      | j t| j d S N)r   set_r   r   r   r!   r%   r%   r&   _unpack.   s   zQuantizedLinear._unpackc                 C   .   | j tjtjtt g tjd	  d S N)dtype)
r   r)   r   zerosjitannotater   intuint8detachr*   r%   r%   r&   _pack2       zQuantizedLinear._packc              	   C   s2   t | | j| j| j| j| j| j}|	|j
S r(   )r   Z)fbgemm_linear_int8_weight_fp32_activationr   r   r   r   r	   r
   r   tor.   r!   inputoutr%   r%   r&   forward8   s   	zQuantizedLinear.forwardc                 C      dj di | j}|S )Nz^in_features={in_features}, out_features={out_features}, scale={scale}, zero_point={zero_point}r%   format__dict__r!   reprr%   r%   r&   
extra_reprE   s   zQuantizedLinear.extra_repr)__name__
__module____qualname____constants__r   r   r0   script_methodr+   r5   r;   rB   __classcell__r%   r%   r#   r&   r   
   s    !


r   c                       sT   e Zd Z fddZejjdd Zejjdd Zejjdd Z	d	d
 Z
  ZS )QuantizedLinearFP16c                    s   t    td |j| _|j| _|j| _t	|jj
tjd | _|jd us-J dtjj|jj
tjd dd| _| d| j d S )Nztorch.jit.QuantizedLinearFP16 is deprecated and will be removed in an upcoming PyTorch release. Please use the torch.ao.nn.quantized.dynamic.Linear instead.r   z#QuantizedLinearFP16 requires a biasFr   packed_weight)r   r   r   r   r   r   r   original_weightr   fbgemm_pack_gemm_matrix_fp16r   r   r   r   r   r   r   r    r#   r%   r&   r   O   s    
zQuantizedLinearFP16.__init__c                 C   r'   r(   )rJ   r)   r   rL   rK   r*   r%   r%   r&   r+   b   s   
zQuantizedLinearFP16._unpackc                 C   r,   r-   )
rJ   r)   r   r/   r0   r1   r   r2   r3   r4   r*   r%   r%   r&   r5   h   r6   zQuantizedLinearFP16._packc                 C   s   t | | j| j}|S r(   )r   Z)fbgemm_linear_fp16_weight_fp32_activationr   rJ   r   r8   r%   r%   r&   r;   n   s   zQuantizedLinearFP16.forwardc                 C   r<   )Nz8in_features={in_features}, out_features={out_features}, r%   r=   r@   r%   r%   r&   rB   u   s   zQuantizedLinearFP16.extra_repr)rC   rD   rE   r   r   r0   rG   r+   r5   r;   rB   rH   r%   r%   r#   r&   rI   N   s    


rI   c                
       s   e Zd Zg dZ fddZdd Zejjdd Z	ejj	dd	e
d
e
deddfddZejjdd Zejjdd Z  ZS )QuantizedRNNCellBase)
input_sizehidden_sizer   scale_hhscale_ihzero_point_ihzero_point_hhc                    s.  t    td |j| _|j| _|j| _| jstdt	|j
jtjd \}}| _| _| d| | d| t	|jjtjd \}}| _| _| d| | d| t| j
}| d| t| j}| d	| tjj|jjtjd d
d| _tjj|jjtjd d
d| _d S )Nztorch.jit.QuantizedRNNCellBase is deprecated and will be removed in an upcoming PyTorch release. Please use the torch.ao.nn.quantized.dynamic.RNNCell instead.z&Quantized RNN cells require bias termsr   	weight_ihcol_offsets_ih	weight_hhcol_offsets_hh	packed_ih	packed_hhFr   )r   r   r   r   rN   rO   r   
ValueErrorr   r   rT   r   r   r   rQ   rR   r   rV   rP   rS   r   r   r   bias_ihbias_hh)r!   r"   rT   rU   rV   rW   rX   rY   r#   r%   r&   r      sR   
zQuantizedRNNCellBase.__init__c                 C   sN   d}d| j v r| jdur|d7 }d| j v r| jdkr|d7 }|jdi | j S )	Nz{input_size}, {hidden_size}r   Tz, bias={bias}nonlinearitytanhz, nonlinearity={nonlinearity}r%   )r?   r   r]   r>   )r!   sr%   r%   r&   rB      s   zQuantizedRNNCellBase.extra_reprc                 C   s0   | d| jkrtd| d d| j d S )N   z'input has inconsistent input_size: got , expected )sizerN   RuntimeError)r!   r9   r%   r%   r&   check_forward_input   s
   z(QuantizedRNNCellBase.check_forward_input r9   hxhidden_labelreturnNc              	   C   sp   | d| dkrtd| d d| d| d | d| jkr6td| d| d d| j d S )	Nr   zInput batch size z doesn't match hiddenz batch size r`   hiddenz# has inconsistent hidden_size: got ra   )rb   rc   rO   )r!   r9   rf   rg   r%   r%   r&   check_forward_hidden   s    z)QuantizedRNNCellBase.check_forward_hiddenc                 C   s,   | j t| j | jt| j d S r(   )rX   r)   r   r   rT   rY   rV   r*   r%   r%   r&   r+      s   zQuantizedRNNCellBase._unpackc                 C   sX   | j tjtjtt g tjd	  | j
tjtjtt g tjd	  d S r-   )rX   r)   r   r/   r0   r1   r   r2   r3   r4   rY   r*   r%   r%   r&   r5      s     zQuantizedRNNCellBase._pack)re   )rC   rD   rE   rF   r   rB   r   r0   rG   rd   r   strrj   r+   r5   rH   r%   r%   r#   r&   rM   }   s*    
0

rM   c                       sH   e Zd Zg dZ fddZejjd
dede	e defdd	Z
  ZS )QuantizedRNNCell)rN   rO   r   rP   rQ   rR   rS   r]   c                    s"   t  | td |j| _d S )Nztorch.jit.QuantizedRNNCell is deprecated and will be removed in an upcoming PyTorch release. Please use the torch.ao.nn.quantized.dynamic.RNNCell instead.)r   r   r   r   r]   r    r#   r%   r&   r      s
   zQuantizedRNNCell.__init__Nr9   rf   rh   c                 C   s   |  | |d u rtj|d| j|j|jd}| ||d | jdkrDt	
||| j| j| j| j| j| j| j| j| j| j| j| j}|S | jdkrit	||| j| j| j| j| j| j| j| j| j| j| j| j}|S |}td| j )Nr   r.   devicere   r^   ZreluzUnknown nonlinearity: )rd   r   r/   rb   rO   r.   rn   rj   r]   r   Zquantized_rnn_tanh_cellrT   rV   r[   r\   rX   rY   rU   rW   rQ   rP   rR   rS   Zquantized_rnn_relu_cellrc   )r!   r9   rf   retr%   r%   r&   r;      sX   

$
zQuantizedRNNCell.forwardr(   )rC   rD   rE   rF   r   r   r0   rG   r   r   r;   rH   r%   r%   r#   r&   rl      s
    &rl   c                
       sR   e Zd Z fddZejj	d	dedee	eef  de	eef fddZ
  ZS )
QuantizedLSTMCellc                       t  | td d S )Nztorch.jit.QuantizedLSTMCell is deprecated and will be removed in an upcoming PyTorch release. Please use the torch.ao.nn.quantized.dynamic.LSTMCell instead.r   r   r   r   r    r#   r%   r&   r   .     zQuantizedLSTMCell.__init__Nr9   rf   rh   c                 C   s   |  | |d u rtj|d| j|j|jd}||f}| ||d d | ||d d t	||| j
| j| j| j| j| j| j| j| j| j| j| jS )Nr   rm   z[0]r`   z[1])rd   r   r/   rb   rO   r.   rn   rj   r   Zquantized_lstm_cellrT   rV   r[   r\   rX   rY   rU   rW   rQ   rP   rR   rS   )r!   r9   rf   r/   r%   r%   r&   r;   5  s0   
zQuantizedLSTMCell.forwardr(   )rC   rD   rE   r   r   r0   rG   r   r   r   r;   rH   r%   r%   r#   r&   rp   -  s    
rp   c                       s@   e Zd Z fddZejjd	dedee defddZ	  Z
S )
QuantizedGRUCellc                    rq   )Nztorch.jit.QuantizedGRUCell is deprecated and will be removed in an upcoming PyTorch release. Please use the torch.ao.nn.quantized.dynamic.GRUCell instead.rr   r    r#   r%   r&   r   T  rs   zQuantizedGRUCell.__init__Nr9   rf   rh   c                 C   sz   |  | |d u rtj|d| j|j|jd}| ||d t	||| j
| j| j| j| j| j| j| j| j| j| j| jS )Nr   rm   re   )rd   r   r/   rb   rO   r.   rn   rj   r   Zquantized_gru_cellrT   rV   r[   r\   rX   rY   rU   rW   rQ   rP   rR   rS   r!   r9   rf   r%   r%   r&   r;   [  s,   
zQuantizedGRUCell.forwardr(   )rC   rD   rE   r   r   r0   rG   r   r   r;   rH   r%   r%   r#   r&   rt   S  s    &rt   r`   tensorpermutationdimrh   c                 C   s   |  ||S r(   )Zindex_select)rv   rw   rx   r%   r%   r&   apply_permutationu  s   ry   c                
       s   e Zd Zg dZejf fdd	Zejjde	de
e	 ddfdd	Zejjde	de
e	 deeeef fd
dZejj	dde	deeeef deddfddZejjde	de	de
e	 ddfddZejjde	de
e	 de	fddZ  ZS )QuantizedRNNBase)	moderN   rO   
num_layersr   batch_firstdropoutbidirectionalr.   c                    s  t    td j| _j| _j| _j| _j| _j	| _	| jdkr,| j	r,J j
| _
j| _| jr9dnd}|| _| jsCJ | jdkrQ| jdkrQtd|tjkrb|tjkrbtd| g | _t| jD ]o t|D ]h} dkry| jn| j| }|dkrd	nd
 fdd}|d\}}|d\}	}
|tjkrtjj||	||
}ntjj| |}tjj|	 |
}tjj||}t| d  d | | j| qpqjd S )Nztorch.jit.QuantizedRNNBase is deprecated and will be removed in an upcoming PyTorch release. Please use the torch.ao.nn.quantized.dynamic instead.GRU   r`   LSTMz.Only LSTM or GRU is supported for QuantizedRNNUnsupported dtype: r   Z_reversere   c                    sD   d|  d   }d|  d   }t |}t |}||fS )NZweight_Z_lZbias_)getattr)ZihhhZweight_nameZ	bias_namer   r   layerr"   suffixr%   r&   get_weight_bias  s
   

z2QuantizedRNNBase.__init__.<locals>.get_weight_biasZihhhZcell_params__)r   r   r   r   r{   rN   rO   r|   r   r}   r~   r   r.   rc   r   int8float16all_weightsrangeopsZ	quantizedZmake_quantized_cell_paramsZlinear_prepack_fp16r   Zmake_quantized_cell_params_fp16setattrappend)r!   r"   r.   num_directions	directionZlayer_input_sizer   rT   r[   rV   r\   Zcell_paramsrX   rY   r#   r   r&   r     s^   




zQuantizedRNNBase.__init__r9   batch_sizesrh   Nc                 C   sd   |d urdnd}|  |krtd| d|   | j|dkr0td| j d|d d S )Nr      zinput must have z dimensions, got z5input.size(-1) must be equal to input_size. Expected z, got )rx   rc   rN   rb   )r!   r9   r   Zexpected_input_dimr%   r%   r&   check_input  s   zQuantizedRNNBase.check_inputc                 C   sT   |d urt |d }n| jr|dn|d}| jrdnd}| j| || jf}|S )Nr   r`   r   )r2   r}   rb   r   r|   rO   )r!   r9   r   Z
mini_batchr   expected_hidden_sizer%   r%   r&   get_expected_hidden_size  s   z)QuantizedRNNBase.get_expected_hidden_sizeExpected hidden size {}, got {}rf   r   msgc                 C   s(   |  |krt||t|  d S r(   )rb   rc   r>   list)r!   rf   r   r   r%   r%   r&   check_hidden_size  s   z"QuantizedRNNBase.check_hidden_sizeri   c                 C   s,   |  || | ||}| j||dd d S )Nr   )r   r   r   r   r!   r9   ri   r   r   r%   r%   r&   check_forward_args  s
   
z#QuantizedRNNBase.check_forward_argsrw   c                 C   s   |d u r|S t ||S r(   ry   r!   rf   rw   r%   r%   r&   permute_hidden  s   
zQuantizedRNNBase.permute_hidden)r   )rC   rD   rE   rF   r   r   r   r0   rG   r   r   r   r   r2   r   rk   r   r   r   rH   r%   r%   r#   r&   rz   y  sH    C		$rz   c                       s\  e Zd ZdddgiZ fddZejjdede	e
eef  de	e d	ed
e	e de
ee
eef f fddZejj	ddede	e
eef  de
ee
eef f fddZejj	ddede	e
eef  de
ee
eef f fddZejjde
eef de	e de
eef fddZejjdede
eef de	e ddfddZdddZ  ZS )QuantizedLSTMr;   forward_packedforward_tensorc                    s   t  || td d S )Nztorch.jit.QuantizedLSTM is deprecated and will be removed in an upcoming PyTorch release. Please use the torch.ao.nn.quantized.dynamic.LSTM instead.rr   )r!   r"   r.   r#   r%   r&   r     s   zQuantizedLSTM.__init__r9   rf   r   max_batch_sizesorted_indicesrh   c                 C   s   |d u r | j r	dnd}tj| j| || j|j|jd}||f}n| ||}| ||| |d u s3J tj	||| j
| j| jt| j| j| j | j| jdd}|d }	|dd  }
|	|
fS )Nr   r`   rm   F)r.   Zuse_dynamicr   )r   r   r/   r|   rO   r.   rn   r   r   Zquantized_lstmr   r   r   r~   trainingr}   )r!   r9   rf   r   r   r   r   r/   resultoutputri   r%   r%   r&   forward_impl
  s:   	
zQuantizedLSTM.forward_implNc           	      C   L   d }| j r
|dn|d}d }d }| |||||\}}|| ||fS Nr   r`   r}   rb   r   r   	r!   r9   rf   r   r   r   unsorted_indicesr   ri   r%   r%   r&   r   6  s   
zQuantizedLSTM.forward_tensorc           
      C   L   |\}}}}t |d }| |||||\}}	t||||}|| |	|fS Nr   r2   r   r   r   
r!   r9   rf   input_r   r   r   r   r   ri   r%   r%   r&   r   E     
zQuantizedLSTM.forward_packedrw   c                 C   s(   |d u r|S t |d |t |d |fS r   r   r   r%   r%   r&   r   S  s
   zQuantizedLSTM.permute_hiddenri   c                 C   s@   |  || | ||}| |d |d | |d |d d S )Nr   z"Expected hidden[0] size {}, got {}r`   z"Expected hidden[1] size {}, got {}r   r   r%   r%   r&   r   ]  s   

z QuantizedLSTM.check_forward_argsc                 C   "   t |tr| ||S | ||S r(   
isinstancer   r   r   ru   r%   r%   r&   r;   n     
zQuantizedLSTM.forwardr(   )rC   rD   rE   __overloads__r   r   r0   rG   r   r   r   r2   r   r   r   r   r   r   r;   rH   r%   r%   r#   r&   r      sl    +

	
r   c                       s   e Zd ZdddgiZ fddZejjdede	e de	e d	e
d
e	e deeef fddZejj	ddede	e deeef fddZejj	ddede	e deeef fddZdddZ  ZS )QuantizedGRUr;   r   r   c                    s    t  j|i | td d S )Nztorch.jit.QuantizedGRU is deprecated and will be removed in an upcoming PyTorch release. Please use the torch.ao.nn.quantized.dynamic.GRU instead.rr   )r!   argskwargsr#   r%   r&   r   x  s   zQuantizedGRU.__init__r9   rf   r   r   r   rh   c           
      C   s   |d u r| j r	dnd}tj| j| || j|j|jd}n| ||}| ||| |d u rDt	||| j
| j| jt| j| j| j | j	}nt	|||| j
| j| jt| j| j| j 	}|d }|d }	||	fS )Nr   r`   rm   r   )r   r   r/   r|   rO   r.   rn   r   r   Zquantized_grur   r   r   r~   r   r}   )
r!   r9   rf   r   r   r   r   r   r   ri   r%   r%   r&   r     sJ   	
zQuantizedGRU.forward_implNc           	      C   r   r   r   r   r%   r%   r&   r     s   
zQuantizedGRU.forward_tensorc           
      C   r   r   r   r   r%   r%   r&   r     r   zQuantizedGRU.forward_packedc                 C   r   r(   r   ru   r%   r%   r&   r;     r   zQuantizedGRU.forwardr(   )rC   rD   rE   r   r   r   r0   rG   r   r   r2   r   r   r   r   r   r;   rH   r%   r%   r#   r&   r   u  sH    
5

r   c                 C   s   t d i }|  D ]\}}|| u rqt|}||ur |||< q| D ]
\}}t| || q%t| tjj	r;t
| S t| tjjrFt| S t| tjjrQt| S | S )Nzvquantize_rnn_cell_modules function has been deprecated. Please use torch.ao.quantization.quantize_dynamic API instead.)r   r   named_modulesquantize_rnn_cell_modulesitemsr   r   r   r   ZLSTMCellrp   ZGRUCellrt   ZRNNCellrl   )modulereassignnamemodnew_modr%   r%   r&   r     s(   r   c                 C   s   t d i }|  D ]\}}|| u rqt||}||ur!|||< q| D ]
\}}t| || q&t| tjj	rQ|tj
krAt| S |tjkrJt| S td| | S )Nztquantize_linear_modules function has been deprecated. Please use torch.ao.quantization.quantize_dynamic API instead.r   )r   r   r   quantize_linear_modulesr   r   r   r   r   ZLinearr   r   r   rI   rc   r   r.   r   r   r   r   r%   r%   r&   r     s(   


r   c                 C   s   t d i }|  D ]\}}|| u rqt||}||ur!|||< q| D ]
\}}t| || q&t| tjj	rN|tj
krI|tjkrItd| t| |S t| tjjrYt| S | S )Nzqquantize_rnn_modules function has been deprecated. Please use torch.ao.quantization.quantize_dynamic API instead.r   )r   r   r   quantize_rnn_modulesr   r   r   r   r   r   r   r   rc   r   r   r   r   r%   r%   r&   r   	  s(   

r   )r`   )r   typingr   r   r   r   r   r   Ztorch.nn.utils.rnnr   r0   ZScriptModuler   rI   rM   rl   rp   rt   r2   ry   rz   r   r   r   r   r   r   r%   r%   r%   r&   <module>   s&    D/lD&" uc