Caffe2 - Python API
A deep learning, cross platform ML framework
gen.py
1 import argparse
2 import os
3 
4 import yaml
5 from collections import OrderedDict
6 
7 import sys
8 from os import path
9 sys.path.append(path.dirname(path.abspath(__file__)))
10 
11 import cwrap_parser
12 import nn_parse
13 import native_parse
14 import preprocess_declarations
15 import function_wrapper
16 
17 from code_template import CodeTemplate
18 
19 
20 # This file is the top-level entry point for code generation in ATen.
21 # It takes an arbitrary number of arguments specifying metadata files to
22 # process (.cwrap, .yaml and .h) and outputs a number generated header
23 # and cpp files in ATen/ (see invocations of 'write' for each file that
24 # is written.) It is invoked from cmake; look for the 'cwrap_files'
25 # variable for an up-to-date list of files which are passed.
26 
27 parser = argparse.ArgumentParser(description='Generate ATen source files')
28 parser.add_argument('files', help='cwrap files', nargs='+')
29 
30 parser.add_argument(
31  '-s',
32  '--source-path',
33  help='path to source directory for ATen',
34  default='.')
35 parser.add_argument(
36  '-o',
37  '--output-dependencies',
38  help='output a list of dependencies into the given file and exit')
39 parser.add_argument(
40  '-d', '--install_dir', help='output directory', default='ATen')
41 parser.add_argument(
42  '--rocm',
43  action='store_true',
44  help='reinterpret CUDA as ROCm/HIP and adjust filepaths accordingly')
45 options = parser.parse_args()
46 gen_to_source = os.environ.get('GEN_TO_SOURCE') # update source directly as part of gen
47 if not gen_to_source:
48  core_install_dir = os.path.join(options.install_dir, 'core_tmp') if options.install_dir is not None else None
49 else:
50  core_install_dir = os.path.join(options.source_path, 'core')
51 
52 if options.install_dir is not None and not os.path.exists(options.install_dir):
53  os.makedirs(options.install_dir)
54 if core_install_dir is not None and not os.path.exists(core_install_dir):
55  os.makedirs(core_install_dir)
56 
57 
58 class FileManager(object):
59  def __init__(self, install_dir=None):
60  self.install_dir = install_dir if install_dir else options.install_dir
61  self.filenames = set()
62  self.outputs_written = False
63  self.undeclared_files = []
64 
65  def will_write(self, filename):
66  filename = '{}/{}'.format(self.install_dir, filename)
67  if self.outputs_written:
68  raise Exception("'will_write' can only be called before " +
69  "the call to write_outputs, refactor so outputs are registered " +
70  "before running the generators")
71  self.filenames.add(filename)
72 
73  def _write_if_changed(self, filename, contents):
74  try:
75  with open(filename, 'r') as f:
76  old_contents = f.read()
77  except IOError:
78  old_contents = None
79  if contents != old_contents:
80  with open(filename, 'w') as f:
81  f.write(contents)
82 
83  def write_outputs(self, filename):
84  """Write a file containing the list of all outputs which are
85  generated by this script."""
86  self._write_if_changed(
87  filename,
88  ''.join(name + ";" for name in sorted(self.filenames)))
89  self.outputs_written = True
90 
91  def write(self, filename, s, env=None):
92  filename = '{}/{}'.format(self.install_dir, filename)
93  if isinstance(s, CodeTemplate):
94  assert env is not None
95  env['generated_comment'] = "@" + "generated by aten/src/ATen/gen.py"
96  s = s.substitute(env)
97  self._write_if_changed(filename, s)
98  if filename not in self.filenames:
99  self.undeclared_files.append(filename)
100  else:
101  self.filenames.remove(filename)
102 
103  def check_all_files_written(self):
104  if len(self.undeclared_files) > 0:
105  raise Exception(
106  "trying to write files {} which are not ".format(self.undeclared_files) +
107  "in the list of outputs this script produces. " +
108  "use will_write to add them.")
109  if len(self.filenames) > 0:
110  raise Exception("Outputs declared with 'will_write' were " +
111  "never written: {}".format(self.filenames))
112 
113 
114 TEMPLATE_PATH = options.source_path + "/templates"
115 GENERATOR_DERIVED = CodeTemplate.from_file(
116  TEMPLATE_PATH + "/GeneratorDerived.h")
117 TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.cpp")
118 SPARSE_TYPE_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/SparseTypeDerived.cpp")
119 TYPE_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDerived.h")
120 TYPE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Type.h")
121 TYPE_EXTENDED_INTERFACE_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtendedInterface.h")
122 TYPE_DEFAULT_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.h")
123 TYPE_DEFAULT_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeDefault.cpp")
124 TYPE_EXTENSION_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtension.h")
125 TYPE_EXTENSION_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtension.cpp")
126 TYPE_EXTENSION_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtensionDerived.h")
127 TYPE_EXTENSION_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/TypeExtensionDerived.cpp")
128 
129 LEGACY_TH_DISPATCHER_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.h")
130 LEGACY_TH_DISPATCHER_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcher.cpp")
131 LEGACY_TH_DISPATCHER_DERIVED_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcherDerived.cpp")
132 LEGACY_TH_DISPATCHER_DERIVED_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHDispatcherDerived.h")
133 
134 REGISTER_CPU_H = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.h")
135 REGISTER_CPU_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCPU.cpp")
136 
137 REGISTER_CUDA_H = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCUDA.h")
138 REGISTER_CUDA_CPP = CodeTemplate.from_file(TEMPLATE_PATH + "/RegisterCUDA.cpp")
139 
140 TENSOR_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Tensor.h")
141 TENSOR_METHODS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/TensorMethods.h")
142 
143 FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/Functions.h")
144 LEGACY_TH_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/LegacyTHFunctions.h")
145 
146 NATIVE_FUNCTIONS_H = CodeTemplate.from_file(TEMPLATE_PATH + "/NativeFunctions.h")
147 
148 EXTENSION_BACKEND_REGISTRATION_H = CodeTemplate.from_file(TEMPLATE_PATH + "/ExtensionBackendRegistration.h")
149 
150 TYPE_REGISTER = CodeTemplate("""\
151 context->registerType(Backend::${backend}, ScalarType::${scalar_type}, new ${type_name}());
152 """)
153 
154 EXTENSION_BACKEND_REGISTER_SWITCH = CodeTemplate("""\
155 case Backend::${Backend}:
156  ${Type}Dispatch::register_function(schema, fn);
157  break;
158 """)
159 
160 core_file_manager = FileManager(core_install_dir)
161 file_manager = FileManager()
162 cuda_file_manager = FileManager()
163 
164 generators = {
165  'CPUGenerator.h': {
166  'name': 'CPU',
167  'th_generator': 'THGenerator * generator;',
168  'header': 'TH/TH.h',
169  },
170  'CUDAGenerator.h': {
171  'name': 'CUDA',
172  'th_generator': '',
173  'header': 'THC/THC.h' if not options.rocm else 'THH/THH.h'
174  },
175 }
176 
177 backends = ['CPU', 'CUDA']
178 densities = ['Dense', 'Sparse']
179 extension_backends = ['MSNPU', 'XLA']
180 
181 # scalar_name, c_type, accreal, th_scalar_type, is_floating_type
182 scalar_types = [
183  ('Bool', 'uint8_t', 'BoolAccrealNotDefined', 'uint8_t', False),
184  ('Byte', 'uint8_t', 'Long', 'uint8_t', False),
185  ('Char', 'int8_t', 'Long', 'int8_t', False),
186  ('Double', 'double', 'Double', 'double', True),
187  ('Float', 'float', 'Double', 'float', True),
188  ('Int', 'int', 'Long', 'int32_t', False),
189  ('Long', 'int64_t', 'Long', 'int64_t', False),
190  ('Short', 'int16_t', 'Long', 'int16_t', False),
191  ('Half', 'Half', 'Double', 'at::Half', True),
192 ]
193 
194 # shared environment for non-derived base classes Type.h Tensor.h Storage.h
195 top_env = {
196  'cpu_type_registrations': [],
197  'cpu_type_headers': [],
198  'cuda_type_registrations': [],
199  'cuda_type_headers': [],
200  'pure_virtual_type_method_declarations': [],
201  'pure_virtual_extended_type_method_declarations': [],
202  'type_method_declarations': [],
203  'type_method_definitions': [],
204  'tensor_method_declarations': [],
205  'tensor_method_definitions': [],
206  'function_declarations': [],
207  'function_definitions': [],
208  'type_ids': [],
209  'native_function_declarations': [],
210  'extension_backend_headers': [],
211  'extension_backend_register_switches': [],
212 }
213 
214 
215 def dict_representer(dumper, data):
216  return dumper.represent_dict(data.items())
217 
218 
219 def postprocess_output_declarations(output_declarations):
220  # ensure each return has a name associated with it
221  for decl in output_declarations:
222  has_named_ret = False
223  for n, ret in enumerate(decl.returns):
224  if 'name' not in ret:
225  assert not has_named_ret
226  if decl.inplace:
227  ret['name'] = 'self'
228  elif len(decl.returns) == 1:
229  ret['name'] = 'out'
230  else:
231  ret['name'] = 'out' + str(n)
232  else:
233  has_named_ret = True
234 
235  def remove_key_if_none(dictionary, key):
236  if key in dictionary.keys() and dictionary[key] is None:
237  del dictionary[key]
238  return dictionary
239 
240  return [remove_key_if_none(decl._asdict(), 'buffers')
241  for decl in output_declarations]
242 
243 
244 def format_yaml(data):
245  if options.output_dependencies:
246  # yaml formatting is slow so don't do it if we will ditch it.
247  return ""
248  noalias_dumper = yaml.dumper.SafeDumper
249  noalias_dumper.ignore_aliases = lambda self, data: True
250  # Support serializing OrderedDict
251  noalias_dumper.add_representer(OrderedDict, dict_representer)
252  # Some yaml parsers (e.g. Haskell's) don't understand line breaks.
253  # width=float('Inf') turns off optional line breaks and improves
254  # the portability of the outputted yaml.
255  return yaml.dump(data, default_flow_style=False, Dumper=noalias_dumper, width=float('Inf'))
256 
257 
258 def generate_storage_type_and_tensor(backend, density, scalar_type, declarations):
259  scalar_name, c_type, accreal, th_scalar_type, is_floating_type = scalar_type
260  env = {}
261  density_tag = 'Sparse' if density == 'Sparse' else ''
262  env['Density'] = density
263  env['ScalarName'] = scalar_name
264  env['ScalarType'] = c_type
265  env['THScalarType'] = th_scalar_type
266  env['AccScalarName'] = accreal
267  env['isFloatingType'] = is_floating_type
268  env['isIntegralType'] = not is_floating_type
269  env['Type'] = "{}{}{}Type".format(density_tag, backend, scalar_name)
270  env['DenseTensor'] = "{}{}Tensor".format(backend, scalar_name)
271  env['Backend'] = density_tag + backend
272  env['DenseBackend'] = backend
273  env['storage_tensor_headers'] = []
274  if density != 'Sparse':
275  env['storage_tensor_headers'] = ['#include <c10/core/TensorImpl.h>']
276 
277  # used for generating switch logic for external functions
278  tag = density_tag + backend + scalar_name
279  env['TypeID'] = 'TypeID::' + tag
280  top_env['type_ids'].append(tag + ',')
281 
282  if backend == 'CUDA':
283  env['extra_cuda_headers'] = []
284  env['extra_cuda_headers'].append('#include <ATen/DeviceGuard.h>')
285  if options.rocm:
286  env['th_headers'] = [
287  '#include <THH/THH.h>',
288  '#include <THH/THHTensor.hpp>',
289  '#include <THHUNN/THHUNN.h>',
290  '#undef THNN_',
291  '#undef THCIndexTensor_',
292  ]
293  env['extra_cuda_headers'].append('#include <ATen/hip/ATenHIPGeneral.h>')
294  env['extra_cuda_headers'].append('#include <ATen/hip/HIPDevice.h>')
295  env['extra_cuda_headers'].append('#include <ATen/hip/HIPTypeDefault.h>')
296  else:
297  env['th_headers'] = [
298  '#include <THC/THC.h>',
299  '#include <THC/THCTensor.hpp>',
300  '#include <THCUNN/THCUNN.h>',
301  '#undef THNN_',
302  '#undef THCIndexTensor_',
303  ]
304  env['extra_cuda_headers'].append('#include <ATen/cuda/ATenCUDAGeneral.h>')
305  env['extra_cuda_headers'].append('#include <ATen/cuda/CUDADevice.h>')
306  env['extra_cuda_headers'].append('#include <ATen/cuda/CUDATypeDefault.h>')
307  sname = '' if scalar_name == "Float" else scalar_name
308  env['THType'] = 'Cuda{}'.format(sname)
309  env['THStorage'] = 'THCuda{}Storage'.format(sname)
310  env['THTensor'] = 'THCuda{}Tensor'.format(sname)
311  env['THIndexTensor'] = 'THCudaLongTensor'
312  env['state'] = ['globalContext().getTHCState()']
313  env['isCUDA'] = 'true'
314  env['storage_device'] = 'return storage->device;'
315  env['Generator'] = 'CUDAGenerator'
316  else:
317  env['th_headers'] = [
318  '#include <TH/TH.h>',
319  '#include <TH/THTensor.hpp>',
320  '#include <THNN/THNN.h>',
321  '#undef THNN_',
322  ]
323  env['extra_cuda_headers'] = []
324  env['THType'] = scalar_name
325  env['THStorage'] = "TH{}Storage".format(scalar_name)
326  env['THTensor'] = 'TH{}Tensor'.format(scalar_name)
327  env['THIndexTensor'] = 'THLongTensor'
328  env['state'] = []
329  env['isCUDA'] = 'false'
330  env['storage_device'] = 'throw std::runtime_error("CPU storage has no device");'
331  env['Generator'] = 'CPUGenerator'
332  env['AS_REAL'] = env['ScalarType']
333  if scalar_name == "Half":
334  env['SparseTensor'] = 'Tensor'
335  if backend == "CUDA":
336  env['AS_REAL'] = 'convert<at::Half,double>'
337 
338  declarations, definitions = function_wrapper.create_derived(
339  env, declarations)
340  env['type_derived_method_declarations'] = declarations
341  env['type_derived_method_definitions'] = definitions
342 
343  fm = file_manager
344  if env['DenseBackend'] == 'CUDA':
345  fm = cuda_file_manager
346 
347  if density != 'Sparse':
348  fm.write(env['Type'] + ".cpp", TYPE_DERIVED_CPP, env)
349  else:
350  fm.write(env['Type'] + ".cpp", SPARSE_TYPE_DERIVED_CPP, env)
351  fm.write(env['Type'] + ".h", TYPE_DERIVED_H, env)
352 
353  type_register = TYPE_REGISTER.substitute(backend=env['Backend'], scalar_type=scalar_name, type_name=env['Type'])
354  if env['DenseBackend'] == 'CPU':
355  top_env['cpu_type_registrations'].append(type_register)
356  top_env['cpu_type_headers'].append(
357  '#include "ATen/{}.h"'.format(env['Type']))
358  else:
359  assert env['DenseBackend'] == 'CUDA'
360  top_env['cuda_type_registrations'].append(type_register)
361  top_env['cuda_type_headers'].append(
362  '#include "ATen/{}.h"'.format(env['Type']))
363 
364 
365 def generate_type_extension_backend(backend, declarations):
366  env = {}
367  env['Type'] = "{}Type".format(backend)
368  env['Backend'] = backend
369  env['DeviceType'] = backend
370 
371  declarations, definitions = function_wrapper.create_extension_backend(
372  env, declarations)
373  env['type_method_declarations'] = declarations
374  env['type_method_definitions'] = definitions
375 
376  file_manager.write(env['Type'] + ".cpp", TYPE_EXTENSION_CPP, env)
377  file_manager.write(env['Type'] + ".h", TYPE_EXTENSION_H, env)
378 
379  extension_backend_register_switch = EXTENSION_BACKEND_REGISTER_SWITCH.substitute(env)
380  top_env['extension_backend_register_switches'].append(extension_backend_register_switch)
381  top_env['extension_backend_headers'].append(
382  '#include <ATen/{}.h>'.format(env['Type']))
383 
384 
385 def generate_type_extension_backend_derived_types(backend):
386  env = {}
387  env['Backend'] = backend
388  for scalar_name, c_type, _, _, _ in scalar_types:
389  env['Type'] = "{}{}Type".format(backend, scalar_name)
390  env['ScalarName'] = scalar_name
391  env['ScalarType'] = c_type
392  env['TypeID'] = 'TypeID::' + backend + scalar_name
393  top_env['type_ids'].append(backend + scalar_name + ',')
394 
395  type_register = TYPE_REGISTER.substitute(backend=env['Backend'], scalar_type=scalar_name, type_name=env['Type'])
396  top_env['cpu_type_registrations'].append(type_register)
397  file_manager.write(env['Type'] + ".cpp", TYPE_EXTENSION_DERIVED_CPP, env)
398  file_manager.write(env['Type'] + ".h", TYPE_EXTENSION_DERIVED_H, env)
399 
400  top_env['cpu_type_headers'].append('#include "ATen/{}.h"'.format(env['Type']))
401 
402 
403 def generate_legacy_th_dispatcher(backend, density, scalar_type, declarations):
404  assert density != 'Sparse'
405  scalar_name, c_type, accreal, th_scalar_type, is_floating_type = scalar_type
406  env = {}
407  env['Backend'] = backend
408  env['Dispatcher'] = "LegacyTH{}{}Dispatcher".format(backend, scalar_name)
409 
410  fm = file_manager
411  if backend == 'CUDA':
412  fm = cuda_file_manager
413 
414  fm.write(env['Dispatcher'] + ".cpp", LEGACY_TH_DISPATCHER_DERIVED_CPP, env)
415  fm.write(env['Dispatcher'] + ".h", LEGACY_TH_DISPATCHER_DERIVED_H, env)
416 
417  return env
418 
419 
420 def iterate_types():
421  for backend in backends:
422  for density in densities:
423  for scalar_type in scalar_types:
424  if density == 'Sparse' and scalar_type[0] == 'Half':
425  # THS does not do half type yet.
426  continue
427  yield (backend, density, scalar_type)
428 
429 
430 ###################
431 # declare what files will be output _before_ we do any work
432 # so that the script runs quickly when we are just querying the
433 # outputs
434 def declare_outputs():
435  core_files = ['Type.h', 'Tensor.h', 'TensorMethods.h']
436  for f in core_files:
437  core_file_manager.will_write(f)
438  files = ['Declarations.yaml', 'TypeExtendedInterface.h', 'TypeDefault.cpp', 'TypeDefault.h',
439  'LegacyTHDispatcher.h', 'LegacyTHDispatcher.cpp', 'LegacyTHFunctions.h',
440  'Functions.h', 'NativeFunctions.h', 'RegisterCPU.cpp', 'RegisterCPU.h', 'ExtensionBackendRegistration.h']
441  for f in files:
442  file_manager.will_write(f)
443  cuda_files = ['RegisterCUDA.cpp', 'RegisterCUDA.h']
444  for f in cuda_files:
445  cuda_file_manager.will_write(f)
446  for fname in sorted(generators.keys()):
447  fm = file_manager
448  if generators[fname]['name'] == 'CUDA':
449  fm = cuda_file_manager
450  fm.will_write(fname)
451  for backend, density, scalar_type in iterate_types():
452  scalar_name = scalar_type[0]
453  full_backend = "Sparse" + backend if density == "Sparse" else backend
454  fm = file_manager
455  if backend == 'CUDA':
456  fm = cuda_file_manager
457  for kind in ["Type"]:
458  if kind != 'Type' and density == "Sparse":
459  # No Storage or Tensor for sparse
460  continue
461  fm.will_write("{}{}{}.h".format(full_backend, scalar_name, kind))
462  fm.will_write("{}{}{}.cpp".format(full_backend, scalar_name, kind))
463  # output LegacyTHDispatchers
464  if density != 'Sparse':
465  fm.will_write("{}{}{}{}.h".format('LegacyTH', full_backend, scalar_name, 'Dispatcher'))
466  fm.will_write("{}{}{}{}.cpp".format('LegacyTH', full_backend, scalar_name, 'Dispatcher'))
467  for backend in extension_backends:
468  file_manager.will_write("{}Type.h".format(backend))
469  file_manager.will_write("{}Type.cpp".format(backend))
470  for scalar_type in scalar_types:
471  scalar_name = scalar_type[0]
472  file_manager.will_write("{}{}Type.h".format(backend, scalar_name))
473  file_manager.will_write("{}{}Type.cpp".format(backend, scalar_name))
474 
475 
476 def filter_by_extension(files, *extensions):
477  filtered_files = []
478  for file in files:
479  for extension in extensions:
480  if file.endswith(extension):
481  filtered_files.append(file)
482  return filtered_files
483 
484 
485 # because EOL may not be LF(\n) on some environment (e.g. Windows),
486 # normalize EOL from CRLF/CR to LF and compare both files.
487 def cmpfiles_with_eol_normalization(a, b, names):
488  results = ([], [], []) # match, mismatch, error
489  for x in names:
490  try:
491  with open(os.path.join(a, x)) as f:
492  ax = f.read().replace('\r\n', '\n').replace('\r', '\n')
493  with open(os.path.join(b, x)) as f:
494  bx = f.read().replace('\r\n', '\n').replace('\r', '\n')
495  if ax == bx:
496  results[0].append(x)
497  else:
498  results[1].append(x)
499  except OSError:
500  results[2].append(x)
501  return results
502 
503 
504 def generate_outputs():
505  cwrap_files = filter_by_extension(options.files, '.cwrap')
506  nn_files = filter_by_extension(options.files, 'nn.yaml', '.h')
507  native_files = filter_by_extension(options.files, 'native_functions.yaml')
508 
509  declarations = [d
510  for file in cwrap_files
511  for d in cwrap_parser.parse(file)]
512 
513  declarations += nn_parse.run(nn_files)
514  declarations += native_parse.run(native_files)
515  declarations = preprocess_declarations.run(declarations)
516  for fname, env in generators.items():
517  fm = file_manager
518  if env['name'] == 'CUDA':
519  fm = cuda_file_manager
520  fm.write(fname, GENERATOR_DERIVED, env)
521 
522  # note: this will fill in top_env['type/tensor_method_declarations/definitions']
523  # and modify the declarations to include any information that will all_backends
524  # be used by function_wrapper.create_derived
525  output_declarations = function_wrapper.create_generic(top_env, declarations)
526  output_declarations = postprocess_output_declarations(output_declarations)
527  file_manager.write("Declarations.yaml", format_yaml(output_declarations))
528 
529  for backend, density, scalar_type in iterate_types():
530  generate_storage_type_and_tensor(backend, density, scalar_type, declarations)
531  for backend in extension_backends:
532  generate_type_extension_backend(backend, declarations)
533  generate_type_extension_backend_derived_types(backend)
534 
535  for backend, density, scalar_type in iterate_types():
536  if density != 'Sparse':
537  generate_legacy_th_dispatcher(backend, density, scalar_type, [])
538 
539  core_files = {
540  'Type.h': TYPE_H,
541  'Tensor.h': TENSOR_H,
542  'TensorMethods.h': TENSOR_METHODS_H
543  }
544 
545  for core_file, core_template_file in core_files.items():
546  core_file_manager.write(core_file, core_template_file, top_env)
547 
548  file_manager.write('TypeExtendedInterface.h', TYPE_EXTENDED_INTERFACE_H, top_env)
549  file_manager.write('TypeDefault.h', TYPE_DEFAULT_H, top_env)
550  file_manager.write('TypeDefault.cpp', TYPE_DEFAULT_CPP, top_env)
551 
552  file_manager.write('LegacyTHDispatcher.h', LEGACY_TH_DISPATCHER_H, top_env)
553  file_manager.write('LegacyTHDispatcher.cpp', LEGACY_TH_DISPATCHER_CPP, top_env)
554 
555  file_manager.write('RegisterCPU.h', REGISTER_CPU_H, top_env)
556  file_manager.write('RegisterCPU.cpp', REGISTER_CPU_CPP, top_env)
557 
558  cuda_file_manager.write('RegisterCUDA.h', REGISTER_CUDA_H, top_env)
559  cuda_file_manager.write('RegisterCUDA.cpp', REGISTER_CUDA_CPP, top_env)
560 
561  file_manager.write('Functions.h', FUNCTIONS_H, top_env)
562  file_manager.write('LegacyTHFunctions.h', LEGACY_TH_FUNCTIONS_H, top_env)
563 
564  file_manager.write('NativeFunctions.h', NATIVE_FUNCTIONS_H, top_env)
565 
566  file_manager.write('ExtensionBackendRegistration.h', EXTENSION_BACKEND_REGISTRATION_H, top_env)
567 
568  file_manager.check_all_files_written()
569  cuda_file_manager.check_all_files_written()
570 
571  # check that generated files match source files
572  core_source_path = os.path.join(options.source_path, 'core')
573  match, mismatch, errors = cmpfiles_with_eol_normalization(core_install_dir, core_source_path, core_files.keys())
574  if errors:
575  raise RuntimeError("Error while trying to compare source and generated files for {}. "
576  "Source directory: {}. Generated directory: {}."
577  .format(errors, core_source_path, core_install_dir))
578  if mismatch:
579  file_component = '{}'.format(','.join(mismatch))
580  if len(mismatch) > 1:
581  file_component = '{' + file_component + '}'
582  update_cmd = "cp {}/{} {}".format(core_install_dir, file_component, core_source_path)
583  raise RuntimeError("Source files: {} did not match generated files. To update the source files, "
584  "set environment variable GEN_TO_SOURCE or run \"{}\"".format(mismatch, update_cmd))
585 
586 declare_outputs()
587 if options.output_dependencies is not None:
588  file_manager.write_outputs(options.output_dependencies)
589  core_file_manager.write_outputs(options.output_dependencies + "-core")
590  cuda_file_manager.write_outputs(options.output_dependencies + "-cuda")
591 else:
592  generate_outputs()
def _write_if_changed(self, filename, contents)
Definition: gen.py:73
def parse(filename)
Definition: cwrap_parser.py:6
def run(paths)
def run(paths)
Definition: nn_parse.py:391
outputs_written
Definition: gen.py:62
def write_outputs(self, filename)
Definition: gen.py:83
undeclared_files
Definition: gen.py:63