o
    )iB                     @   s   d dl Z d dlZd dlZd dlmZ d dlmZmZmZm	Z	m
Z
mZmZmZmZ d dlZd dlmZmZmZmZ ddlmZ edZejfded	ed
ededef
ddZddejdedededejf
ddZG dd dZdedefddZG dd dZdS )    NFraction)	AnyCallablecastDictListOptionalTupleTypeVarUnion)_probe_video_from_file_read_video_from_file
read_videoread_video_timestamps   )tqdmTptstimebase_fromtimebase_to
round_funcreturnc                 C   s   t | d| | }||S )zconvert pts between different time bases
    Args:
        pts: presentation timestamp, float
        timebase_from: original timebase. Fraction
        timebase_to: new timebase. Fraction
        round_func: rounding function.
    r   r   )r   r   r   r   Znew_pts r   o/var/www/html/eduruby.in/lip-sync/lip-sync-env/lib/python3.10/site-packages/torchvision/datasets/video_utils.pypts_convert   s   r   tensorsizestepdilationc                 C   s   |   dkrtd|    | d}|  }|| || f}|||d  d  | d |f}|d dk r:d|f}t| ||S )a   
    similar to tensor.unfold, but with the dilation
    and specialized for 1d tensors

    Returns all consecutive windows of `size` elements, with
    `step` between windows. The distance between each element
    in a window is given by `dilation`.
    r   z*tensor should have 1 dimension instead of r   )dim
ValueErrorZstridenumeltorchZ
as_strided)r   r   r   r   Zo_strider"   Z
new_stridenew_sizer   r   r   unfold   s   	
 r%   c                   @   sV   e Zd ZdZdee ddfddZdefddZd	ede	ee e
e f fd
dZdS )_VideoTimestampsDatasetz
    Dataset used to parallelize the reading of the timestamps
    of a list of videos, given their paths in the filesystem.

    Used in VideoClips and defined at top level, so it can be
    pickled when forking.
    video_pathsr   Nc                 C   s
   || _ d S Nr'   )selfr'   r   r   r   __init__8      
z _VideoTimestampsDataset.__init__c                 C   
   t | jS r(   lenr'   r*   r   r   r   __len__;   r,   z_VideoTimestampsDataset.__len__idxc                 C   s   t | j| S r(   )r   r'   )r*   r2   r   r   r   __getitem__>   s   z#_VideoTimestampsDataset.__getitem__)__name__
__module____qualname____doc__r   strr+   intr1   r
   r	   floatr3   r   r   r   r   r&   /   s
    &r&   xc                 C   s   | S )zH
    Dummy collate function to be used with _VideoTimestampsDataset
    r   )r;   r   r   r   _collate_fnB   s   r<   c                   @   s  e Zd ZdZ												d=dee ded	ed
ee deeee	f  dededededededededdfddZ
d>ddZdeee	f ddfddZedeee	f fddZdee dd fdd Ze	d?d!ejd"ed#ed$ed
ee deejeee ejf f fd%d&Zd?d"ed#ed
ee ddfd'd(Zdefd)d*Zdefd+d,Zdefd-d.Zd/edeeef fd0d1Zed"ed2ed3edeeejf fd4d5Zd/edeejejeee	f ef fd6d7Zdeee	f fd8d9Zd:eee	f ddfd;d<ZdS )@
VideoClipsaC  
    Given a list of video files, computes all consecutive subvideos of size
    `clip_length_in_frames`, where the distance between each subvideo in the
    same video is defined by `frames_between_clips`.
    If `frame_rate` is specified, it will also resample all the videos to have
    the same frame rate, and the clips will refer to this frame rate.

    Creating this instance the first time is time-consuming, as it needs to
    decode all the videos in `video_paths`. It is recommended that you
    cache the results after instantiation of the class.

    Recreating the clips for different clip lengths is fast, and can be done
    with the `compute_clips` method.

    Args:
        video_paths (List[str]): paths to the video files
        clip_length_in_frames (int): size of a clip in number of frames
        frames_between_clips (int): step (in frames) between each clip
        frame_rate (int, optional): if specified, it will resample the video
            so that it has `frame_rate`, and then the clips will be defined
            on the resampled video
        num_workers (int): how many subprocesses to use for data loading.
            0 means that the data will be loaded in the main process. (default: 0)
        output_format (str): The format of the output video tensors. Can be either "THWC" (default) or "TCHW".
       r   Nr   THWCr'   clip_length_in_framesframes_between_clips
frame_rate_precomputed_metadatanum_workers_video_width_video_height_video_min_dimension_video_max_dimension_audio_samples_audio_channelsoutput_formatr   c                 C   s   || _ || _|| _|| _|	| _|
| _|| _|| _| | _	| j	dvr*t
d| d|d u r3|   n| | | ||| d S )N)r?   TCHWz5output_format should be either 'THWC' or 'TCHW', got .)r'   rD   rE   rF   rG   rH   rI   rJ   upperrK   r!   _compute_frame_pts_init_from_metadatacompute_clips)r*   r'   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   r   r   r   r+   d   s   



zVideoClips.__init__c                    s   g | _ g | _dd l  jjjt| jd| jt	d}t
t|d0}|D ]$}|d tt| \}} fdd|D }| j | | j| q#W d    d S 1 sSw   Y  d S )Nr   r>   )Z
batch_sizerD   Z
collate_fn)totalr   c                    s   g | ]
} j | jd qS )Zdtype)	as_tensorlong).0cr#   r   r   
<listcomp>   s    z1VideoClips._compute_frame_pts.<locals>.<listcomp>)	video_pts	video_fpsZtorch.utils.datautilsdataZ
DataLoaderr&   r'   rD   r<   r   r/   updatelistzipextend)r*   dlZpbarbatchclipsfpsr   rX   r   rO      s$   
"zVideoClips._compute_frame_ptsmetadatac                 C   sV   |d | _ t| j t|d ksJ |d | _t| j t|d ks$J |d | _d S )Nr'   rZ   r[   )r'   r/   rZ   r[   )r*   rf   r   r   r   rP      s
   

zVideoClips._init_from_metadatac                 C   s   | j | j| jd}|S )Nr'   rZ   r[   rg   )r*   	_metadatar   r   r   rf      s
   zVideoClips.metadataindicesc                    s~    fdd|D } fdd|D } fdd|D }|||d}t  | j j j| j j j j j j	 j
 jdS )Nc                       g | ]} j | qS r   r)   rV   ir0   r   r   rY          z%VideoClips.subset.<locals>.<listcomp>c                    rj   r   )rZ   rk   r0   r   r   rY      rm   c                    rj   r   )r[   rk   r0   r   r   rY      rm   rg   )r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   )type
num_framesr   rB   rD   rE   rF   rG   rH   rI   rJ   rK   )r*   ri   r'   rZ   r[   rf   r   r0   r   subset   s,   zVideoClips.subsetrZ   ro   r   re   c           	      C   s   |d u rd}|d u r|}t | t||  }ttt|||}| | } t| ||}| s5t	
d t|trE|gt | }||fS t|||}||fS )Nr   zThere aren't enough frames in the current video to get a clip for the given clip length and frames between clips. The video (and potentially others) will be skipped.)r/   r:   r=   _resample_video_idxr9   mathfloorr%   r"   warningswarn
isinstanceslice)	rZ   ro   r   re   rB   Ztotal_framesZ_idxsrd   idxsr   r   r   compute_clips_for_video   s"   
z"VideoClips.compute_clips_for_videoc           	      C   s   || _ || _|| _g | _g | _t| j| jD ]\}}| |||||\}}| j	| | j	| qt
dd | jD }|d | _dS )a  
        Compute all consecutive sequences of clips from video_pts.
        Always returns clips of size `num_frames`, meaning that the
        last few frames in a video can potentially be dropped.

        Args:
            num_frames (int): number of frames for the clip
            step (int): distance between two clips
            frame_rate (int, optional): The frame rate
        c                 S      g | ]}t |qS r   r/   rV   vr   r   r   rY          z,VideoClips.compute_clips.<locals>.<listcomp>r   N)ro   r   rB   rd   resampling_idxsr`   rZ   r[   ry   appendr#   rT   Zcumsumtolistcumulative_sizes)	r*   ro   r   rB   rZ   re   rd   rx   Zclip_lengthsr   r   r   rQ      s   zVideoClips.compute_clipsc                 C   s   |   S r(   )	num_clipsr0   r   r   r   r1      s   zVideoClips.__len__c                 C   r-   r(   r.   r0   r   r   r   
num_videos   r,   zVideoClips.num_videosc                 C   s
   | j d S )zJ
        Number of subclips that are available in the video list.
        )r   r0   r   r   r   r     s   
zVideoClips.num_clipsr2   c                 C   s<   t | j|}|dkr|}||fS || j|d   }||fS )zw
        Converts a flattened representation of the indices into a video_idx, clip_idx
        representation.
        r   r   )bisectbisect_rightr   )r*   r2   	video_idxclip_idxr   r   r   get_clip_location	  s   zVideoClips.get_clip_locationoriginal_fpsnew_fpsc                 C   sP   t || }| rt|}td d |S tj| tjd| }| tj	}|S )NrS   )
r:   
is_integerr9   rw   r#   ZarangeZfloat32rs   toint64)ro   r   r   r   rx   r   r   r   rq     s   zVideoClips._resample_video_idxc                 C   sp  ||   krtd| d|    d| |\}}| j| }| j| | }ddlm} | }|dkr`| jdkr<td| j	dkrEtd| j
dkrNtd	| jdkrWtd
| jdkr`td|dkrz|d  }|d  }	t|||	\}
}}ntt|}|j}d}tt|d  }tt|d  }d\}}tdd}t|jj|jj}|jrt|jj|jj}t|||tj}t|||tj}|j}t|| j| j	| j
| j||f|| j| j||f|d\}
}}d|i}|dur||d< | j dur| j!| | }t"|t#j$r||d  }|
| }
| j |d< t%|
| j&ks$J |
j' d| j& | j(dkr2|
)dddd}
|
|||fS )a7  
        Gets a subclip from a list of videos.

        Args:
            idx (int): index of the subclip. Must be between 0 and num_clips().

        Returns:
            video (Tensor)
            audio (Tensor)
            info (Dict)
            video_idx (int): index of the video in `video_paths`
        zIndex z out of range (z number of clips)r   )get_video_backendZpyavz.pyav backend doesn't support _video_width != 0z/pyav backend doesn't support _video_height != 0z6pyav backend doesn't support _video_min_dimension != 0z6pyav backend doesn't support _video_max_dimension != 0z0pyav backend doesn't support _audio_samples != 0r   N)r   r   r   )
Zvideo_widthZvideo_heightZvideo_min_dimensionZvideo_max_dimensionZvideo_pts_rangevideo_timebaseZaudio_samplesZaudio_channelsZaudio_pts_rangeaudio_timebaser[   	audio_fpsz x rL         )*r   
IndexErrorr   r'   rd   Ztorchvisionr   rE   r!   rF   rG   rH   rI   itemr   r   r[   r   r9   r   r   	numeratordenominatorZ	has_audior   r   rr   rs   ceilZaudio_sample_rater   rJ   rB   r   rv   r#   Tensorr/   ro   shaperK   Zpermute)r*   r2   r   r   Z
video_pathZclip_ptsr   backendZ	start_ptsZend_ptsZvideoZaudioinfo_infor[   r   Zvideo_start_ptsZvideo_end_ptsZaudio_start_ptsZaudio_end_ptsr   r   _Zresampling_idxr   r   r   get_clip!  sz   







&zVideoClips.get_clipc                 C   sn   dd | j D }dd | j D }|rt|}| }| j }||d< ||d< |d= |d= |d= d	|d
< |S )Nc                 S   rz   r   r{   r|   r   r   r   rY   z  r~   z+VideoClips.__getstate__.<locals>.<listcomp>c                 S   s   g | ]}| tjqS r   )r   r#   r   )rV   r;   r   r   r   rY     s    video_pts_sizesrZ   rd   r   r   r   _version)rZ   r#   catnumpy__dict__copy)r*   r   rZ   dr   r   r   __getstate__y  s   

zVideoClips.__getstate__r   c                 C   sf   d|vr	|| _ d S tj|d tjd}tj||d dd}|d= ||d< || _ | | j| j| j d S )Nr   rZ   rS   r   r   )r    )	r   r#   rT   r   splitrQ   ro   r   rB   )r*   r   rZ   r   r   r   __setstate__  s   zVideoClips.__setstate__)r>   r   NNr   r   r   r   r   r   r   r?   )r   Nr(   ) r4   r5   r6   r7   r   r8   r9   r	   r   r   r+   rO   rP   propertyrf   rp   staticmethodr#   r   r
   r   rw   ry   rQ   r1   r   r   r   rq   r   r   r   r   r   r   r   r=   I   s    	


% &*Xr=   )r   ) r   rr   rt   	fractionsr   typingr   r   r   r   r   r	   r
   r   r   r#   Ztorchvision.ior   r   r   r   r\   r   r   rs   r9   r   r   r%   r&   r<   r=   r   r   r   r   <module>   s    ,$$