Caffe2 - Python API
A deep learning, cross platform ML framework
preprocess_declarations.py
1 import re
2 from copy import deepcopy
3 from function_wrapper import TYPE_FORMAL_GENERIC
4 import common_with_cwrap
5 
6 type_map = {
7  'floating_point': [
8  'Float',
9  'Double',
10  'Half',
11  ],
12  'integral': [
13  'Byte',
14  'Char',
15  'Short',
16  'Int',
17  'Long',
18  'Bool'
19  ],
20 }
21 
22 all_types = type_map['floating_point'] + type_map['integral']
23 type_map['all'] = all_types
24 
25 all_backends = ['CPU', 'CUDA', 'SparseCPU', 'SparseCUDA']
26 default_backends = ['CPU', 'CUDA']
27 
28 
29 def process_types_and_backends(option):
30  # if specific pairs were not listed, then enumerate them
31  # based on the backend and type attributes
32  # if backend or type is not defined, it is assumed to be all of them
33  if 'backend_type_pairs' not in option:
34  backends = option.get('backends', default_backends)
35  if isinstance(option.get('type_method_definition_dispatch'), dict):
36  backends = option.get('type_method_definition_dispatch').keys()
37  backends = set(backends)
38 
39  types = option.get('types', all_types)
40 
41  pairs = [[p, t] for p in backends for t in types]
42  else:
43  pairs = option['backend_type_pairs']
44 
45  # expand type alias (integral, floating_point, all)
46  def expand(pair):
47  p, t = pair
48  assert(p in all_backends)
49  if t in type_map:
50  return [(p, tt) for tt in type_map[t]]
51  assert(t in all_types)
52  return [(p, t)]
53  pairs = set(p for pair in pairs for p in expand(pair))
54 
55  # disable CUDA Half if there is a Sparse argument
56  for arg in option.get('arguments', []):
57  if arg['type'] == 'THSTensor*':
58  pairs.discard(('CUDA', 'Half'))
59 
60  # special case remove Half and Bool for cpu unless it is explicitly enabled,
61  if not option.get('cpu_half', False):
62  pairs.discard(('CPU', 'Half'))
63 
64  if not option.get('cpu_bool', False):
65  pairs.discard(('CPU', 'Bool'))
66 
67  # TODO: remove this hack once support for a bool tensor for CUDA is enabled
68  pairs.discard(('CUDA', 'Bool'))
69 
70  # sort the result for easy reading
71  option['backend_type_pairs'] = sorted([p for p in pairs])
72 
73 
74 def exclude(declaration):
75  return 'only_register' in declaration or declaration.get('name') == 'ndimension'
76 
77 
78 def add_variants(option):
79  option.setdefault('variants', ['method'])
80 
81 # if we have 'output' arguments, generate a variant where
82 # we mark oututs as allocate = True, and where the method variant
83 # is disabled...
84 
85 
86 def handle_outputs_taken_as_arguments(options):
87  new_options = []
88 
89  def is_nullable(arg):
90  return (arg['type'] in {'THIntegerTensor*', 'THTensor*'} and
91  arg.get('default', '') in {None, 'NULL', 'nullptr'})
92 
93  def should_generate_out_variant(option):
94  if 'function' in option['variants'] and option['mode'] != 'native':
95  # don't generate _out variants for in-place functions
96  return re.search('(^__i|[^_]_$)', option['api_name']) is None
97  return False
98 
99  for option in options:
100  for arg in option['arguments']:
101  # mark arguments which can be null
102  if is_nullable(arg):
103  arg['is_nullable'] = True
104 
105  if any('output' in arg for arg in option['arguments']):
106  allocate_option = deepcopy(option)
107  # the allocating option needs to be marked
108  for arg in allocate_option['arguments']:
109  if 'output' in arg:
110  arg['allocate'] = True
111 
112  # the original option, which takes arguments for the results,
113  # is no longer a method, and has _out added to indicte it takes
114  # output arguments
115  if should_generate_out_variant(option):
116  if 'method' in option['variants']:
117  option['variants'].remove('method')
118  option['api_name'] += '_out'
119  new_options.append(option)
120 
121  new_options.append(allocate_option)
122  else:
123  new_options.append(option)
124  return new_options
125 
126 
127 def sanitize_return(option):
128  ret = option['return']
129  m = re.match(r'argument (\d+(,\d+)*)', ret)
130  if m is not None:
131  arguments = [int(x) for x in m.group(1).split(',')]
132  option['return'] = {'kind': 'arguments', 'arguments': arguments}
133  elif ret == 'self':
134  option['return'] = {'kind': 'arguments', 'arguments': []}
135  for i, x in enumerate(option['arguments']):
136  if x['name'] == 'self':
137  option['return']['arguments'].append(i)
138  break
139  else:
140  option['return'] = {'kind': 'type', 'type': option['return']}
141 
142 
143 def set_mode(option):
144  option['mode'] = option.get('mode', 'TH')
145 
146 # To enable 0-dim support in TH operations
147 # we find all places where a single Scalar replaced with a Tensor
148 # as an argument is still a valid function
149 # we then mark the tensor variant with a key zero_dim_dispatch_when_scalar: name
150 # where 'name' is the name of the argument that should be a scalar
151 # during dispatch, if that argument is marked internally as holding a scalar
152 # then the method will dispatch to that function.
153 
154 
155 def discover_zero_dim_tensor_operations(declaration):
156  def exclude(arg):
157  return arg.get('ignore_check')
158 
159  def signature(option, i=None, value=None):
160  elements = [TYPE_FORMAL_GENERIC.get(arg['type'], arg['type'])
161  if i is None or j != i else value
162  for j, arg in enumerate(option['arguments'])
163  if not exclude(arg)]
164  return '#'.join(elements)
165  signature_to_option = {signature(option): option
166  for option in declaration['options']}
167 
168  for option in declaration['options']:
169  for i, arg in enumerate(option['arguments']):
170  if arg['type'] == 'real':
171  signature_of_tensor_version = signature(option, i, 'Tensor &')
172  if signature_of_tensor_version in signature_to_option:
173  tensor_version = \
174  signature_to_option[signature_of_tensor_version]
175  names = [arg['name'] for arg in tensor_version['arguments']
176  if not exclude(arg)]
177  tensor_version['zero_dim_dispatch_when_scalar'] = names[i]
178  # print("FOUND "+str(i) )
179  # print("Scalar Version ===== ")
180  # print(yaml.dump(option))
181  # print("Tensor Version ===== ")
182  # print(yaml.dump(tensor_version))
183  # print("SHARED "+names[i])
184 
185 
186 def discover_sparse_tensor_operations(declaration):
187  def exclude(arg):
188  return arg.get('ignore_check')
189 
190  def signature(option, i=None, value=None):
191  elements = [TYPE_FORMAL_GENERIC.get(arg['type'], arg['type'])
192  if i is None or j != i else value
193  for j, arg in enumerate(option['arguments'])
194  if not exclude(arg)]
195  return '#'.join(elements)
196 
197  # Determine if any options have the 'aten_dense_sparse' flag
198  dense_sparse_options = [option
199  for option in declaration['options']
200  if option.get('aten_dense_sparse', False)]
201  if len(dense_sparse_options) > 0:
202  signature_to_option = {signature(option): option
203  for option in declaration['options']}
204 
205  for option in declaration['options']:
206  for i, arg in enumerate(option['arguments']):
207  if (arg['type'] == 'THSTensor*' and
208  option.get('aten_dense_sparse', False)):
209  signature_of_tensor_version = signature(
210  option, i, 'Tensor &')
211  if signature_of_tensor_version in signature_to_option:
212  tensor_version = \
213  signature_to_option[signature_of_tensor_version]
214  raw_args = len(tensor_version['arguments'])
215  names = [arg['name'] for arg in tensor_version['arguments']
216  if not exclude(arg)]
217  filtered_args = len(names)
218  tensor_version['when_sparse_dispatch'] = names[i -
219  (raw_args - filtered_args)]
220 
221 
222 def is_extended_method(option):
223  if 'method' in option['variants']:
224  return False
225  else:
226  return True
227 
228 
229 def run(declarations):
230  declarations = [d for d in declarations if not exclude(d)]
231  non_extended_methods = set()
232  for declaration in declarations:
234  declaration['options'] = [deepcopy(o) for o in declaration['options']]
235  declaration['options'] = common_with_cwrap.filter_unique_options(
236  declaration['options'],
237  allow_kwarg=False,
238  type_to_signature=TYPE_FORMAL_GENERIC,
239  remove_self=True)
240 
242 
243  discover_zero_dim_tensor_operations(declaration)
244  discover_sparse_tensor_operations(declaration)
245 
246  for option in declaration['options']:
247  set_mode(option)
248  if option['mode'] != 'native':
249  sanitize_return(option)
250  process_types_and_backends(option)
251  add_variants(option)
252  if not is_extended_method(option):
253  non_extended_methods.add(option['api_name'])
254  declaration['options'] = handle_outputs_taken_as_arguments(
255  declaration['options'])
256  # We (very unfortunately) have overloaded virtual methods. Because
257  # of C++'s rules, we cannot move one overload without doing some
258  # extra work to make sure that overload in a superclass and an
259  # overload in a subclass resolve together. I've chosen to resolve
260  # this problem simply by moving ALL overloads of a method which
261  # occurs in Tensor to Type. This is why we have to first compute
262  # which methods *names* go on type, and then move ALL overloads
263  # of this name to Type.
264  for declaration in declarations:
265  for option in declaration['options']:
266  option['extended_method'] = option['api_name'] not in non_extended_methods
267  return declarations
Module caffe2.python.layers.split.
def set_declaration_defaults(declaration)
def sort_by_number_of_options(declaration, reverse=True)
def filter_unique_options(options, allow_kwarg, type_to_signature, remove_self)