2 torch.multiprocessing is a wrapper around the native :mod:`multiprocessing` 3 module. It registers custom reducers, that use shared memory to provide shared 4 views on the same data in different processes. Once the tensor/storage is moved 5 to shared_memory (see :func:`~torch.Tensor.share_memory_`), it will be possible 6 to send it to other processes without making any copies. 8 The API is 100% compatible with the original module - it's enough to change 9 ``import multiprocessing`` to ``import torch.multiprocessing`` to have all the 10 tensors sent through the queues or shared via other mechanisms, moved to shared 13 Because of the similarity of APIs we do not document most of this package 14 contents, and we recommend referring to very good docs of the original module. 18 from .reductions
import init_reductions
19 import multiprocessing
21 __all__ = [
'set_sharing_strategy',
'get_sharing_strategy',
22 'get_all_sharing_strategies']
25 from multiprocessing
import *
28 __all__ += multiprocessing.__all__
33 torch._C._multiprocessing_init()
36 if sys.version_info < (3, 3):
37 """Override basic classes in Python 2.7 and Python 3.3 to use ForkingPickler 38 for serialization. Later versions of Python already use ForkingPickler.""" 39 from .queue
import Queue, SimpleQueue
40 from .pool
import Pool
43 """Add helper function to spawn N processes and wait for completion of any of 44 them. This depends `mp.get_context` which was added in Python 3.4.""" 45 from .spawn
import spawn, SpawnContext
48 if sys.platform ==
'darwin' or sys.platform ==
'win32':
49 _sharing_strategy =
'file_system' 50 _all_sharing_strategies = {
'file_system'}
52 _sharing_strategy =
'file_descriptor' 53 _all_sharing_strategies = {
'file_descriptor',
'file_system'}
57 """Sets the strategy for sharing CPU tensors. 60 new_strategy (str): Name of the selected strategy. Should be one of 61 the values returned by :func:`get_all_sharing_strategies()`. 63 global _sharing_strategy
64 assert new_strategy
in _all_sharing_strategies
65 _sharing_strategy = new_strategy
69 """Returns the current strategy for sharing CPU tensors.""" 70 return _sharing_strategy
74 """Returns a set of sharing strategies supported on a current system.""" 75 return _all_sharing_strategies
def set_sharing_strategy(new_strategy)
def get_sharing_strategy()
def get_all_sharing_strategies()