|
def | __init__ (self, workspace_type=None) |
|
def | add_remote_net (self, net) |
|
def | remote_nets (self) |
|
def | add (self, task) |
|
def | tasks (self) |
|
def | num_registered_tasks (self) |
|
def | used_nodes (self) |
|
def | report_step (self, step=None, node=None, interval_ms=1000) |
|
def | report_net (self, net=None, node=None, report_interval=5) |
|
def | tasks_by_node (self, node_remap=None) |
|
def | to_task (self, node=None) |
|
def | workspace_type (self) |
|
def | __repr__ (self) |
|
Context that gathers tasks which will run concurrently, potentially on
multiple nodes. All tasks in the same node will share the same workspace
and thus can share blobs, while tasks running in different nodes won't
be able to directly share data.
All tasks of the task group will start concurrently, and the task group
will finish execution when the last task of the group finishes.
Example:
# supose that s1 ... s5 are execution steps or nets.
with TaskGroup() as tg:
# these tasks go to default node 'local'
Task(step=s1)
Task(step=s2)
with Node('n2'):
Task(step=s3)
with Node('n1'):
Task(step=s4)
with Node('n2'):
Task(step=s5)
# this will run all steps in parallel.
# s1 and s2 will run at default node 'local'
# s3 and s5 will run at node 'n2'
# s4 will run at node 'n1'
session.run(tg)
Definition at line 166 of file task.py.