Caffe2 - Python API
A deep learning, cross platform ML framework
setup.py
1 # Welcome to the PyTorch setup.py.
2 #
3 # Environment variables you are probably interested in:
4 #
5 # DEBUG
6 # build with -O0 and -g (debug symbols)
7 #
8 # REL_WITH_DEB_INFO
9 # build with optimizations and -g (debug symbols)
10 #
11 # MAX_JOBS
12 # maximum number of compile jobs we should use to compile your code
13 #
14 # USE_CUDA=0
15 # disables CUDA build
16 #
17 # CFLAGS
18 # flags to apply to both C and C++ files to be compiled (a quirk of setup.py
19 # which we have faithfully adhered to in our build system is that CFLAGS
20 # also applies to C++ files, in contrast to the default behavior of autogoo
21 # and cmake build systems.)
22 #
23 # CC
24 # the C/C++ compiler to use (NB: the CXX flag has no effect for distutils
25 # compiles, because distutils always uses CC to compile, even for C++
26 # files.
27 #
28 # Environment variables for feature toggles:
29 #
30 # USE_CUDNN=0
31 # disables the cuDNN build
32 #
33 # USE_FBGEMM=0
34 # disables the FBGEMM build
35 #
36 # BUILD_TEST=0
37 # disables the test build
38 #
39 # USE_MIOPEN=0
40 # disables the MIOpen build
41 #
42 # USE_MKLDNN=0
43 # disables use of MKLDNN
44 #
45 # USE_NNPACK=0
46 # disables NNPACK build
47 #
48 # USE_QNNPACK=0
49 # disables QNNPACK build (quantized 8-bit operators)
50 #
51 # USE_DISTRIBUTED=0
52 # disables distributed (c10d, gloo, mpi, etc.) build
53 #
54 # USE_SYSTEM_NCCL=0
55 # disables use of system-wide nccl (we will use our submoduled
56 # copy in third_party/nccl)
57 #
58 # BUILD_CAFFE2_OPS=0
59 # disable Caffe2 operators build
60 #
61 # USE_GLOO_IBVERBS
62 # toggle features related to distributed support
63 #
64 # USE_OPENCV
65 # enables use of OpenCV for additional operators
66 #
67 # USE_FFMPEG
68 # enables use of ffmpeg for additional operators
69 #
70 # USE_LEVELDB
71 # enables use of LevelDB for storage
72 #
73 # USE_LMDB
74 # enables use of LMDB for storage
75 #
76 # BUILD_BINARY
77 # enables the additional binaries/ build
78 #
79 # PYTORCH_BUILD_VERSION
80 # PYTORCH_BUILD_NUMBER
81 # specify the version of PyTorch, rather than the hard-coded version
82 # in this file; used when we're building binaries for distribution
83 #
84 # TORCH_CUDA_ARCH_LIST
85 # specify which CUDA architectures to build for.
86 # ie `TORCH_CUDA_ARCH_LIST="6.0;7.0"`
87 # These are not CUDA versions, instead, they specify what
88 # classes of NVIDIA hardware we should generate PTX for.
89 #
90 # ONNX_NAMESPACE
91 # specify a namespace for ONNX built here rather than the hard-coded
92 # one in this file; needed to build with other frameworks that share ONNX.
93 #
94 # BLAS
95 # BLAS to be used by Caffe2. Can be MKL, Eigen, ATLAS, or OpenBLAS. If set
96 # then the build will fail if the requested BLAS is not found, otherwise
97 # the BLAS will be chosen based on what is found on your system.
98 #
99 # USE_FBGEMM
100 # Enables use of FBGEMM
101 #
102 # USE_REDIS
103 # Whether to use Redis for distributed workflows (Linux only)
104 #
105 # USE_ZSTD
106 # Enables use of ZSTD, if the libraries are found
107 #
108 # Environment variables we respect (these environment variables are
109 # conventional and are often understood/set by other software.)
110 #
111 # CUDA_HOME (Linux/OS X)
112 # CUDA_PATH (Windows)
113 # specify where CUDA is installed; usually /usr/local/cuda or
114 # /usr/local/cuda-x.y
115 # CUDAHOSTCXX
116 # specify a different compiler than the system one to use as the CUDA
117 # host compiler for nvcc.
118 #
119 # CUDA_NVCC_EXECUTABLE
120 # Specify a NVCC to use. This is used in our CI to point to a cached nvcc
121 #
122 # CUDNN_LIB_DIR
123 # CUDNN_INCLUDE_DIR
124 # CUDNN_LIBRARY
125 # specify where cuDNN is installed
126 #
127 # MIOPEN_LIB_DIR
128 # MIOPEN_INCLUDE_DIR
129 # MIOPEN_LIBRARY
130 # specify where MIOpen is installed
131 #
132 # NCCL_ROOT_DIR
133 # NCCL_LIB_DIR
134 # NCCL_INCLUDE_DIR
135 # specify where nccl is installed
136 #
137 # NVTOOLSEXT_PATH (Windows only)
138 # specify where nvtoolsext is installed
139 #
140 # LIBRARY_PATH
141 # LD_LIBRARY_PATH
142 # we will search for libraries in these paths
143 
144 from __future__ import print_function
145 from setuptools import setup, Extension, distutils, Command, find_packages
146 from distutils import core, dir_util
147 from distutils.core import Distribution
148 from distutils.errors import DistutilsArgError
149 import setuptools.command.build_ext
150 import setuptools.command.install
151 import distutils.command.clean
152 import distutils.sysconfig
153 import filecmp
154 import platform
155 import subprocess
156 import shutil
157 import sys
158 import os
159 import json
160 import glob
161 import importlib
162 
163 from tools.build_pytorch_libs import build_caffe2
164 from tools.setup_helpers.env import (IS_WINDOWS, IS_DARWIN, IS_LINUX,
165  check_env_flag,
166  DEBUG, REL_WITH_DEB_INFO, USE_MKLDNN)
167 from tools.setup_helpers.cuda import USE_CUDA, CUDA_HOME, CUDA_VERSION
168 from tools.setup_helpers.cudnn import USE_CUDNN, CUDNN_LIBRARY, CUDNN_INCLUDE_DIR
169 from tools.setup_helpers.rocm import USE_ROCM
170 from tools.setup_helpers.miopen import USE_MIOPEN, MIOPEN_LIBRARY, MIOPEN_INCLUDE_DIR
171 from tools.setup_helpers.nccl import USE_NCCL, USE_SYSTEM_NCCL, NCCL_SYSTEM_LIB, NCCL_INCLUDE_DIR
172 from tools.setup_helpers.dist_check import USE_DISTRIBUTED
173 ################################################################################
174 # Parameters parsed from environment
175 ################################################################################
176 
177 VERBOSE_SCRIPT = True
178 RUN_BUILD_DEPS = True
179 # see if the user passed a quiet flag to setup.py arguments and respect
180 # that in our parts of the build
181 EMIT_BUILD_WARNING = False
182 RERUN_CMAKE = False
183 filtered_args = []
184 for i, arg in enumerate(sys.argv):
185  if arg == '--cmake':
186  RERUN_CMAKE = True
187  continue
188  if arg == 'rebuild' or arg == 'build':
189  arg = 'build' # rebuild is gone, make it build
190  EMIT_BUILD_WARNING = True
191  if arg == "--":
192  filtered_args += sys.argv[i:]
193  break
194  if arg == '-q' or arg == '--quiet':
195  VERBOSE_SCRIPT = False
196  if arg == 'clean':
197  RUN_BUILD_DEPS = False
198  filtered_args.append(arg)
199 sys.argv = filtered_args
200 
201 if VERBOSE_SCRIPT:
202  def report(*args):
203  print(*args)
204 else:
205  def report(*args):
206  pass
207 
208 # Constant known variables used throughout this file
209 cwd = os.path.dirname(os.path.abspath(__file__))
210 lib_path = os.path.join(cwd, "torch", "lib")
211 third_party_path = os.path.join(cwd, "third_party")
212 caffe2_build_dir = os.path.join(cwd, "build")
213 # lib/pythonx.x/site-packages
214 rel_site_packages = distutils.sysconfig.get_python_lib(prefix='')
215 # full absolute path to the dir above
216 full_site_packages = distutils.sysconfig.get_python_lib()
217 # CMAKE: full path to python library
218 if IS_WINDOWS:
219  cmake_python_library = "{}/libs/python{}.lib".format(
220  distutils.sysconfig.get_config_var("prefix"),
221  distutils.sysconfig.get_config_var("VERSION"))
222 else:
223  cmake_python_library = "{}/{}".format(
224  distutils.sysconfig.get_config_var("LIBDIR"),
225  distutils.sysconfig.get_config_var("INSTSONAME"))
226 cmake_python_include_dir = distutils.sysconfig.get_python_inc()
227 
228 
229 ################################################################################
230 # Version, create_version_file, and package_name
231 ################################################################################
232 package_name = os.getenv('TORCH_PACKAGE_NAME', 'torch')
233 version = '1.1.0a0'
234 if os.getenv('PYTORCH_BUILD_VERSION'):
235  assert os.getenv('PYTORCH_BUILD_NUMBER') is not None
236  build_number = int(os.getenv('PYTORCH_BUILD_NUMBER'))
237  version = os.getenv('PYTORCH_BUILD_VERSION')
238  if build_number > 1:
239  version += '.post' + str(build_number)
240 else:
241  try:
242  sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
243  version += '+' + sha[:7]
244  except Exception:
245  pass
246 report("Building wheel {}-{}".format(package_name, version))
247 
248 
249 # all the work we need to do _before_ setup runs
250 def build_deps():
251  report('-- Building version ' + version)
252  version_path = os.path.join(cwd, 'torch', 'version.py')
253  with open(version_path, 'w') as f:
254  f.write("__version__ = '{}'\n".format(version))
255  # NB: This is not 100% accurate, because you could have built the
256  # library code with DEBUG, but csrc without DEBUG (in which case
257  # this would claim to be a release build when it's not.)
258  f.write("debug = {}\n".format(repr(DEBUG)))
259  f.write("cuda = {}\n".format(repr(CUDA_VERSION)))
260 
261  def check_file(f):
262  if not os.path.exists(f):
263  report("Could not find {}".format(f))
264  report("Did you run 'git submodule update --init --recursive'?")
265  sys.exit(1)
266 
267  check_file(os.path.join(third_party_path, "gloo", "CMakeLists.txt"))
268  check_file(os.path.join(third_party_path, "pybind11", "CMakeLists.txt"))
269  check_file(os.path.join(third_party_path, 'cpuinfo', 'CMakeLists.txt'))
270  check_file(os.path.join(third_party_path, 'onnx', 'CMakeLists.txt'))
271  check_file(os.path.join(third_party_path, 'foxi', 'CMakeLists.txt'))
272  check_file(os.path.join(third_party_path, 'QNNPACK', 'CMakeLists.txt'))
273  check_file(os.path.join(third_party_path, 'fbgemm', 'CMakeLists.txt'))
274 
275  check_pydep('yaml', 'pyyaml')
276  check_pydep('typing', 'typing')
277 
278  build_caffe2(version=version,
279  cmake_python_library=cmake_python_library,
280  build_python=True,
281  rerun_cmake=RERUN_CMAKE,
282  build_dir='build')
283 
284  # Use copies instead of symbolic files.
285  # Windows has very poor support for them.
286  sym_files = ['tools/shared/cwrap_common.py', 'tools/shared/_utils_internal.py']
287  orig_files = ['aten/src/ATen/common_with_cwrap.py', 'torch/_utils_internal.py']
288  for sym_file, orig_file in zip(sym_files, orig_files):
289  same = False
290  if os.path.exists(sym_file):
291  if filecmp.cmp(sym_file, orig_file):
292  same = True
293  else:
294  os.remove(sym_file)
295  if not same:
296  shutil.copyfile(orig_file, sym_file)
297 
298  dir_util.copy_tree('third_party/pybind11/include/pybind11/',
299  'torch/include/pybind11')
300 
301 ################################################################################
302 # Building dependent libraries
303 ################################################################################
304 
305 missing_pydep = '''
306 Missing build dependency: Unable to `import {importname}`.
307 Please install it via `conda install {module}` or `pip install {module}`
308 '''.strip()
309 
310 
311 def check_pydep(importname, module):
312  try:
313  importlib.import_module(importname)
314  except ImportError:
315  raise RuntimeError(missing_pydep.format(importname=importname, module=module))
316 
317 
318 class build_ext(setuptools.command.build_ext.build_ext):
319  def run(self):
320  # report build options
321  if USE_NUMPY:
322  report('-- Building with NumPy bindings')
323  else:
324  report('-- NumPy not found')
325  if USE_CUDNN:
326  report('-- Detected cuDNN at ' + CUDNN_LIBRARY + ', ' + CUDNN_INCLUDE_DIR)
327  else:
328  report('-- Not using cuDNN')
329  if USE_MIOPEN:
330  report('-- Detected MIOpen at ' + MIOPEN_LIBRARY + ', ' + MIOPEN_INCLUDE_DIR)
331  else:
332  report('-- Not using MIOpen')
333  if USE_CUDA:
334  report('-- Detected CUDA at ' + CUDA_HOME)
335  else:
336  report('-- Not using CUDA')
337  if USE_MKLDNN:
338  report('-- Using MKLDNN')
339  else:
340  report('-- Not using MKLDNN')
341  if USE_NCCL and USE_SYSTEM_NCCL:
342  report('-- Using system provided NCCL library at ' + NCCL_SYSTEM_LIB + ', ' + NCCL_INCLUDE_DIR)
343  elif USE_NCCL:
344  report('-- Building NCCL library')
345  else:
346  report('-- Not using NCCL')
347  if USE_DISTRIBUTED:
348  report('-- Building with THD distributed package ')
349  if IS_LINUX:
350  report('-- Building with c10d distributed package ')
351  else:
352  report('-- Building without c10d distributed package')
353  else:
354  report('-- Building without distributed package')
355 
356  # It's an old-style class in Python 2.7...
357  setuptools.command.build_ext.build_ext.run(self)
358 
359  # Copy the essential export library to compile C++ extensions.
360  if IS_WINDOWS:
361  build_temp = self.build_temp
362 
363  ext_filename = self.get_ext_filename('_C')
364  lib_filename = '.'.join(ext_filename.split('.')[:-1]) + '.lib'
365 
366  export_lib = os.path.join(
367  build_temp, 'torch', 'csrc', lib_filename).replace('\\', '/')
368 
369  build_lib = self.build_lib
370 
371  target_lib = os.path.join(
372  build_lib, 'torch', 'lib', '_C.lib').replace('\\', '/')
373 
374  self.copy_file(export_lib, target_lib)
375 
376  def build_extensions(self):
378  # The caffe2 extensions are created in
379  # tmp_install/lib/pythonM.m/site-packages/caffe2/python/
380  # and need to be copied to build/lib.linux.... , which will be a
381  # platform dependent build folder created by the "build" command of
382  # setuptools. Only the contents of this folder are installed in the
383  # "install" command by default.
384  # We only make this copy for Caffe2's pybind extensions
385  caffe2_pybind_exts = [
386  'caffe2.python.caffe2_pybind11_state',
387  'caffe2.python.caffe2_pybind11_state_gpu',
388  'caffe2.python.caffe2_pybind11_state_hip',
389  ]
390  i = 0
391  while i < len(self.extensions):
392  ext = self.extensions[i]
393  if ext.name not in caffe2_pybind_exts:
394  i += 1
395  continue
396  fullname = self.get_ext_fullname(ext.name)
397  filename = self.get_ext_filename(fullname)
398  report("\nCopying extension {}".format(ext.name))
399 
400  src = os.path.join("torch", rel_site_packages, filename)
401  if not os.path.exists(src):
402  report("{} does not exist".format(src))
403  del self.extensions[i]
404  else:
405  dst = os.path.join(os.path.realpath(self.build_lib), filename)
406  report("Copying {} from {} to {}".format(ext.name, src, dst))
407  dst_dir = os.path.dirname(dst)
408  if not os.path.exists(dst_dir):
409  os.makedirs(dst_dir)
410  self.copy_file(src, dst)
411  i += 1
412  distutils.command.build_ext.build_ext.build_extensions(self)
413 
414  def get_outputs(self):
415  outputs = distutils.command.build_ext.build_ext.get_outputs(self)
416  outputs.append(os.path.join(self.build_lib, "caffe2"))
417  report("setup.py::get_outputs returning {}".format(outputs))
418  return outputs
419 
420  def create_compile_commands(self):
421  def load(filename):
422  with open(filename) as f:
423  return json.load(f)
424  ninja_files = glob.glob('build/*compile_commands.json')
425  cmake_files = glob.glob('torch/lib/build/*/compile_commands.json')
426  all_commands = [entry
427  for f in ninja_files + cmake_files
428  for entry in load(f)]
429 
430  # cquery does not like c++ compiles that start with gcc.
431  # It forgets to include the c++ header directories.
432  # We can work around this by replacing the gcc calls that python
433  # setup.py generates with g++ calls instead
434  for command in all_commands:
435  if command['command'].startswith("gcc "):
436  command['command'] = "g++ " + command['command'][4:]
437 
438  new_contents = json.dumps(all_commands, indent=2)
439  contents = ''
440  if os.path.exists('compile_commands.json'):
441  with open('compile_commands.json', 'r') as f:
442  contents = f.read()
443  if contents != new_contents:
444  with open('compile_commands.json', 'w') as f:
445  f.write(new_contents)
446 
447 
448 class install(setuptools.command.install.install):
449  def run(self):
450  setuptools.command.install.install.run(self)
451 
452 
453 class clean(distutils.command.clean.clean):
454  def run(self):
455  import glob
456  import re
457  with open('.gitignore', 'r') as f:
458  ignores = f.read()
459  pat = re.compile(r'^#( BEGIN NOT-CLEAN-FILES )?')
460  for wildcard in filter(None, ignores.split('\n')):
461  match = pat.match(wildcard)
462  if match:
463  if match.group(1):
464  # Marker is found and stop reading .gitignore.
465  break
466  # Ignore lines which begin with '#'.
467  else:
468  for filename in glob.glob(wildcard):
469  try:
470  os.remove(filename)
471  except OSError:
472  shutil.rmtree(filename, ignore_errors=True)
473 
474  # It's an old-style class in Python 2.7...
475  distutils.command.clean.clean.run(self)
476 
477 
478 ################################################################################
479 # Configure compile flags
480 ################################################################################
481 
482 library_dirs = []
483 
484 if IS_WINDOWS:
485  # /NODEFAULTLIB makes sure we only link to DLL runtime
486  # and matches the flags set for protobuf and ONNX
487  extra_link_args = ['/NODEFAULTLIB:LIBCMT.LIB']
488  # /MD links against DLL runtime
489  # and matches the flags set for protobuf and ONNX
490  # /Z7 turns on symbolic debugging information in .obj files
491  # /EHa is about native C++ catch support for asynchronous
492  # structured exception handling (SEH)
493  # /DNOMINMAX removes builtin min/max functions
494  # /wdXXXX disables warning no. XXXX
495  extra_compile_args = ['/MD', '/Z7',
496  '/EHa', '/DNOMINMAX',
497  '/wd4267', '/wd4251', '/wd4522', '/wd4522', '/wd4838',
498  '/wd4305', '/wd4244', '/wd4190', '/wd4101', '/wd4996',
499  '/wd4275']
500  if sys.version_info[0] == 2:
501  if not check_env_flag('FORCE_PY27_BUILD'):
502  report('The support for PyTorch with Python 2.7 on Windows is very experimental.')
503  report('Please set the flag `FORCE_PY27_BUILD` to 1 to continue build.')
504  sys.exit(1)
505  # /bigobj increases number of sections in .obj file, which is needed to link
506  # against libaries in Python 2.7 under Windows
507  extra_compile_args.append('/bigobj')
508 else:
509  extra_link_args = []
510  extra_compile_args = [
511  '-std=c++11',
512  '-Wall',
513  '-Wextra',
514  '-Wno-strict-overflow',
515  '-Wno-unused-parameter',
516  '-Wno-missing-field-initializers',
517  '-Wno-write-strings',
518  '-Wno-unknown-pragmas',
519  # This is required for Python 2 declarations that are deprecated in 3.
520  '-Wno-deprecated-declarations',
521  # Python 2.6 requires -fno-strict-aliasing, see
522  # http://legacy.python.org/dev/peps/pep-3123/
523  # We also depend on it in our code (even Python 3).
524  '-fno-strict-aliasing',
525  # Clang has an unfixed bug leading to spurious missing
526  # braces warnings, see
527  # https://bugs.llvm.org/show_bug.cgi?id=21629
528  '-Wno-missing-braces',
529  ]
530  if check_env_flag('WERROR'):
531  extra_compile_args.append('-Werror')
532 
533 library_dirs.append(lib_path)
534 
535 # we specify exact lib names to avoid conflict with lua-torch installs
536 CAFFE2_LIBS = []
537 if USE_CUDA:
538  CAFFE2_LIBS.extend(['-Wl,--no-as-needed', os.path.join(lib_path, 'libcaffe2_gpu.so'), '-Wl,--as-needed'])
539 if USE_ROCM:
540  CAFFE2_LIBS.extend(['-Wl,--no-as-needed', os.path.join(lib_path, 'libcaffe2_hip.so'), '-Wl,--as-needed'])
541 
542 # static library only
543 if IS_DARWIN:
544  CAFFE2_LIBS = []
545  if USE_CUDA:
546  CAFFE2_LIBS.append(os.path.join(lib_path, 'libcaffe2_gpu.dylib'))
547  if USE_ROCM:
548  CAFFE2_LIBS.append(os.path.join(lib_path, 'libcaffe2_hip.dylib'))
549 
550 if IS_WINDOWS:
551  CAFFE2_LIBS = []
552  if USE_CUDA:
553  CAFFE2_LIBS.append(os.path.join(lib_path, 'caffe2_gpu.lib'))
554  if USE_ROCM:
555  CAFFE2_LIBS.append(os.path.join(lib_path, 'caffe2_hip.lib'))
556 
557 main_compile_args = []
558 main_libraries = ['shm', 'torch_python']
559 main_link_args = []
560 main_sources = ["torch/csrc/stub.cpp"]
561 
562 # Before the introduction of stub.cpp, _C.so and libcaffe2.so defined
563 # some of the same symbols, and it was important for _C.so to be
564 # loaded before libcaffe2.so so that the versions in _C.so got
565 # used. This happened automatically because we loaded _C.so directly,
566 # and libcaffe2.so was brought in as a dependency (though I suspect it
567 # may have been possible to break by importing caffe2 first in the
568 # same process).
569 #
570 # Now, libtorch_python.so and libcaffe2.so define some of the same
571 # symbols. We directly load the _C.so stub, which brings both of these
572 # in as dependencies. We have to make sure that symbols continue to be
573 # looked up in libtorch_python.so first, by making sure it comes
574 # before libcaffe2.so in the linker command.
575 main_link_args.extend(CAFFE2_LIBS)
576 
577 try:
578  import numpy as np
579  NUMPY_INCLUDE_DIR = np.get_include()
580  USE_NUMPY = True
581 except ImportError:
582  USE_NUMPY = False
583 
584 if USE_CUDA:
585  if IS_WINDOWS:
586  cuda_lib_path = CUDA_HOME + '/lib/x64/'
587  else:
588  cuda_lib_dirs = ['lib64', 'lib']
589  for lib_dir in cuda_lib_dirs:
590  cuda_lib_path = os.path.join(CUDA_HOME, lib_dir)
591  if os.path.exists(cuda_lib_path):
592  break
593  library_dirs.append(cuda_lib_path)
594 
595 if DEBUG:
596  if IS_WINDOWS:
597  extra_link_args.append('/DEBUG:FULL')
598  else:
599  extra_compile_args += ['-O0', '-g']
600  extra_link_args += ['-O0', '-g']
601 
602 if REL_WITH_DEB_INFO:
603  if IS_WINDOWS:
604  extra_link_args.append('/DEBUG:FULL')
605  else:
606  extra_compile_args += ['-g']
607  extra_link_args += ['-g']
608 
609 
610 def make_relative_rpath(path):
611  if IS_DARWIN:
612  return '-Wl,-rpath,@loader_path/' + path
613  elif IS_WINDOWS:
614  return ''
615  else:
616  return '-Wl,-rpath,$ORIGIN/' + path
617 
618 ################################################################################
619 # Declare extensions and package
620 ################################################################################
621 
622 extensions = []
623 packages = find_packages(exclude=('tools', 'tools.*'))
624 C = Extension("torch._C",
625  libraries=main_libraries,
626  sources=main_sources,
627  language='c++',
628  extra_compile_args=main_compile_args + extra_compile_args,
629  include_dirs=[],
630  library_dirs=library_dirs,
631  extra_link_args=extra_link_args + main_link_args + [make_relative_rpath('lib')],
632  )
633 extensions.append(C)
634 
635 if not IS_WINDOWS:
636  DL = Extension("torch._dl",
637  sources=["torch/csrc/dl.c"],
638  language='c'
639  )
640  extensions.append(DL)
641 
642 # These extensions are built by cmake and copied manually in build_extensions()
643 # inside the build_ext implementaiton
644 extensions.append(
645  Extension(
646  name=str('caffe2.python.caffe2_pybind11_state'),
647  sources=[]),
648 )
649 if USE_CUDA:
650  extensions.append(
651  Extension(
652  name=str('caffe2.python.caffe2_pybind11_state_gpu'),
653  sources=[]),
654  )
655 if USE_ROCM:
656  extensions.append(
657  Extension(
658  name=str('caffe2.python.caffe2_pybind11_state_hip'),
659  sources=[]),
660  )
661 
662 cmdclass = {
663  'build_ext': build_ext,
664  'clean': clean,
665  'install': install,
666 }
667 
668 entry_points = {
669  'console_scripts': [
670  'convert-caffe2-to-onnx = caffe2.python.onnx.bin.conversion:caffe2_to_onnx',
671  'convert-onnx-to-caffe2 = caffe2.python.onnx.bin.conversion:onnx_to_caffe2',
672  ]
673 }
674 
675 # post run, warnings, printed at the end to make them more visible
676 build_update_message = """
677  It is no longer necessary to use the 'build' or 'rebuild' targets
678 
679  To install:
680  $ python setup.py install
681  To develop locally:
682  $ python setup.py develop
683  To force cmake to re-run (off by default):
684  $ python setup.py develop --cmake
685 """
686 
687 
688 def print_box(msg):
689  lines = msg.split('\n')
690  size = max(len(l) + 1 for l in lines)
691  print('-' * (size + 2))
692  for l in lines:
693  print('|{}{}|'.format(l, ' ' * (size - len(l))))
694  print('-' * (size + 2))
695 
696 if __name__ == '__main__':
697  # Parse the command line and check the arguments
698  # before we proceed with building deps and setup
699  dist = Distribution()
700  dist.script_name = sys.argv[0]
701  dist.script_args = sys.argv[1:]
702  try:
703  ok = dist.parse_command_line()
704  except DistutilsArgError as msg:
705  raise SystemExit(core.gen_usage(dist.script_name) + "\nerror: %s" % msg)
706  if not ok:
707  sys.exit()
708 
709  if RUN_BUILD_DEPS:
710  build_deps()
711  setup(
712  name=package_name,
713  version=version,
714  description=("Tensors and Dynamic neural networks in "
715  "Python with strong GPU acceleration"),
716  ext_modules=extensions,
717  cmdclass=cmdclass,
718  packages=packages,
719  entry_points=entry_points,
720  package_data={
721  'torch': [
722  'bin/*',
723  'test/*',
724  '__init__.pyi',
725  'lib/*.so*',
726  'lib/*.dylib*',
727  'lib/*.dll',
728  'lib/*.lib',
729  'lib/*.pdb',
730  'lib/torch_shm_manager',
731  'lib/*.h',
732  'include/ATen/*.h',
733  'include/ATen/cpu/*.h',
734  'include/ATen/cpu/vec256/*.h',
735  'include/ATen/core/*.h',
736  'include/ATen/cuda/*.cuh',
737  'include/ATen/cuda/*.h',
738  'include/ATen/cuda/detail/*.cuh',
739  'include/ATen/cuda/detail/*.h',
740  'include/ATen/cudnn/*.h',
741  'include/ATen/detail/*.h',
742  'include/caffe2/utils/*.h',
743  'include/c10/*.h',
744  'include/c10/macros/*.h',
745  'include/c10/core/*.h',
746  'include/ATen/core/dispatch/*.h',
747  'include/c10/core/impl/*.h',
748  'include/ATen/core/opschema/*.h',
749  'include/c10/util/*.h',
750  'include/c10/cuda/*.h',
751  'include/c10/cuda/impl/*.h',
752  'include/c10/hip/*.h',
753  'include/c10/hip/impl/*.h',
754  'include/caffe2/**/*.h',
755  'include/torch/*.h',
756  'include/torch/csrc/*.h',
757  'include/torch/csrc/api/include/torch/*.h',
758  'include/torch/csrc/api/include/torch/data/*.h',
759  'include/torch/csrc/api/include/torch/data/dataloader/*.h',
760  'include/torch/csrc/api/include/torch/data/datasets/*.h',
761  'include/torch/csrc/api/include/torch/data/detail/*.h',
762  'include/torch/csrc/api/include/torch/data/samplers/*.h',
763  'include/torch/csrc/api/include/torch/data/transforms/*.h',
764  'include/torch/csrc/api/include/torch/detail/*.h',
765  'include/torch/csrc/api/include/torch/detail/ordered_dict.h',
766  'include/torch/csrc/api/include/torch/nn/*.h',
767  'include/torch/csrc/api/include/torch/nn/modules/*.h',
768  'include/torch/csrc/api/include/torch/nn/parallel/*.h',
769  'include/torch/csrc/api/include/torch/optim/*.h',
770  'include/torch/csrc/api/include/torch/serialize/*.h',
771  'include/torch/csrc/autograd/*.h',
772  'include/torch/csrc/autograd/functions/*.h',
773  'include/torch/csrc/autograd/generated/*.h',
774  'include/torch/csrc/autograd/utils/*.h',
775  'include/torch/csrc/cuda/*.h',
776  'include/torch/csrc/jit/*.h',
777  'include/torch/csrc/jit/generated/*.h',
778  'include/torch/csrc/jit/passes/*.h',
779  'include/torch/csrc/jit/script/*.h',
780  'include/torch/csrc/jit/testing/*.h',
781  'include/torch/csrc/onnx/*.h',
782  'include/torch/csrc/utils/*.h',
783  'include/pybind11/*.h',
784  'include/pybind11/detail/*.h',
785  'include/TH/*.h*',
786  'include/TH/generic/*.h*',
787  'include/THC/*.cuh',
788  'include/THC/*.h*',
789  'include/THC/generic/*.h',
790  'include/THCUNN/*.cuh',
791  'include/THCUNN/generic/*.h',
792  'include/THNN/*.h',
793  'include/THNN/generic/*.h',
794  'share/cmake/ATen/*.cmake',
795  'share/cmake/Caffe2/*.cmake',
796  'share/cmake/Caffe2/public/*.cmake',
797  'share/cmake/Caffe2/Modules_CUDA_fix/*.cmake',
798  'share/cmake/Caffe2/Modules_CUDA_fix/upstream/*.cmake',
799  'share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/*.cmake',
800  'share/cmake/Gloo/*.cmake',
801  'share/cmake/Torch/*.cmake',
802  ],
803  'caffe2': [
804  'python/serialized_test/data/operator_test/*.zip',
805  ]
806  },
807  )
808  if EMIT_BUILD_WARNING:
809  print_box(build_update_message)
Definition: setup.py:1
def create_compile_commands(self)
Definition: setup.py:420