From 1d5ca7ababbd4a06ac97dca32a7c2c8dda293a76 Mon Sep 17 00:00:00 2001 From: Zhicheng Yan Date: Thu, 3 Dec 2020 01:40:41 -0800 Subject: [PATCH] expose _multiprocessing_context in VideoClips class Summary: There are issues with multiprocessing, custom __getstate__ with Tensors and forkserver. See details in PR below - https://github.com/pytorch/pytorch/issues/32351 To temporarily mitigate it, expose the `_multiprocessing_context` argument. Differential Revision: D24644136 fbshipit-source-id: ae786f363f5406d268770aa83cd9d86395194ee6 --- torchvision/datasets/video_utils.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/torchvision/datasets/video_utils.py b/torchvision/datasets/video_utils.py index d743cc49b13..9cb97503509 100644 --- a/torchvision/datasets/video_utils.py +++ b/torchvision/datasets/video_utils.py @@ -101,6 +101,7 @@ def __init__( _video_max_dimension=0, _audio_samples=0, _audio_channels=0, + _multiprocessing_context=None, ): self.video_paths = video_paths @@ -114,6 +115,8 @@ def __init__( self._audio_samples = _audio_samples self._audio_channels = _audio_channels + self._multiprocessing_context = _multiprocessing_context + if _precomputed_metadata is None: self._compute_frame_pts() else: @@ -136,6 +139,7 @@ def _compute_frame_pts(self): batch_size=16, num_workers=self.num_workers, collate_fn=self._collate_fn, + multiprocessing_context=self._multiprocessing_context, ) with tqdm(total=len(dl)) as pbar: