Caffe2 - Python API
A deep learning, cross platform ML framework
collect_env.py
1 # This script outputs relevant system environment info
2 # Run it with `python collect_env.py`.
3 from __future__ import absolute_import, division, print_function, unicode_literals
4 import re
5 import subprocess
6 import sys
7 import time
8 import datetime
9 import os
10 from collections import namedtuple
11 
12 try:
13  import torch
14  TORCH_AVAILABLE = True
15 except (ImportError, NameError, AttributeError):
16  TORCH_AVAILABLE = False
17 
18 PY3 = sys.version_info >= (3, 0)
19 
20 # System Environment Information
21 SystemEnv = namedtuple('SystemEnv', [
22  'torch_version',
23  'is_debug_build',
24  'cuda_compiled_version',
25  'gcc_version',
26  'cmake_version',
27  'os',
28  'python_version',
29  'is_cuda_available',
30  'cuda_runtime_version',
31  'nvidia_driver_version',
32  'nvidia_gpu_models',
33  'cudnn_version',
34  'pip_version', # 'pip' or 'pip3'
35  'pip_packages',
36  'conda_packages',
37 ])
38 
39 
40 def run(command):
41  """Returns (return-code, stdout, stderr)"""
42  p = subprocess.Popen(command, stdout=subprocess.PIPE,
43  stderr=subprocess.PIPE, shell=True)
44  output, err = p.communicate()
45  rc = p.returncode
46  if PY3:
47  output = output.decode("utf-8")
48  err = err.decode("utf-8")
49  return rc, output.strip(), err.strip()
50 
51 
52 def run_and_read_all(run_lambda, command):
53  """Runs command using run_lambda; reads and returns entire output if rc is 0"""
54  rc, out, _ = run_lambda(command)
55  if rc != 0:
56  return None
57  return out
58 
59 
60 def run_and_parse_first_match(run_lambda, command, regex):
61  """Runs command using run_lambda, returns the first regex match if it exists"""
62  rc, out, _ = run_lambda(command)
63  if rc != 0:
64  return None
65  match = re.search(regex, out)
66  if match is None:
67  return None
68  return match.group(1)
69 
70 
71 def get_conda_packages(run_lambda):
72  if get_platform() == 'win32':
73  grep_cmd = r'findstr /R "torch soumith mkl magma"'
74  else:
75  grep_cmd = r'grep "torch\|soumith\|mkl\|magma"'
76  conda = os.environ.get('CONDA_EXE', 'conda')
77  out = run_and_read_all(run_lambda, conda + ' list | ' + grep_cmd)
78  if out is None:
79  return out
80  # Comment starting at beginning of line
81  comment_regex = re.compile(r'^#.*\n')
82  return re.sub(comment_regex, '', out)
83 
84 
85 def get_gcc_version(run_lambda):
86  return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)')
87 
88 
89 def get_cmake_version(run_lambda):
90  return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)')
91 
92 
93 def get_nvidia_driver_version(run_lambda):
94  if get_platform() == 'darwin':
95  cmd = 'kextstat | grep -i cuda'
96  return run_and_parse_first_match(run_lambda, cmd,
97  r'com[.]nvidia[.]CUDA [(](.*?)[)]')
98  smi = get_nvidia_smi()
99  return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
100 
101 
102 def get_gpu_info(run_lambda):
103  if get_platform() == 'darwin':
104  if TORCH_AVAILABLE and torch.cuda.is_available():
105  return torch.cuda.get_device_name(None)
106  return None
107  smi = get_nvidia_smi()
108  uuid_regex = re.compile(r' \(UUID: .+?\)')
109  rc, out, _ = run_lambda(smi + ' -L')
110  if rc != 0:
111  return None
112  # Anonymize GPUs by removing their UUID
113  return re.sub(uuid_regex, '', out)
114 
115 
116 def get_running_cuda_version(run_lambda):
117  return run_and_parse_first_match(run_lambda, 'nvcc --version', r'V(.*)$')
118 
119 
120 def get_cudnn_version(run_lambda):
121  """This will return a list of libcudnn.so; it's hard to tell which one is being used"""
122  if get_platform() == 'win32':
123  cudnn_cmd = 'where /R "%CUDA_PATH%\\bin" cudnn*.dll'
124  elif get_platform() == 'darwin':
125  # CUDA libraries and drivers can be found in /usr/local/cuda/. See
126  # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
127  # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
128  # Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
129  cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'
130  else:
131  cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
132  rc, out, _ = run_lambda(cudnn_cmd)
133  # find will return 1 if there are permission errors or if not found
134  if len(out) == 0 or (rc != 1 and rc != 0):
135  l = os.environ.get('CUDNN_LIBRARY')
136  if l is not None and os.path.isfile(l):
137  return os.path.realpath(l)
138  return None
139  files = set()
140  for fn in out.split('\n'):
141  fn = os.path.realpath(fn) # eliminate symbolic links
142  if os.path.isfile(fn):
143  files.add(fn)
144  if not files:
145  return None
146  # Alphabetize the result because the order is non-deterministic otherwise
147  files = list(sorted(files))
148  if len(files) == 1:
149  return files[0]
150  result = '\n'.join(files)
151  return 'Probably one of the following:\n{}'.format(result)
152 
153 
154 def get_nvidia_smi():
155  # Note: nvidia-smi is currently available only on Windows and Linux
156  smi = 'nvidia-smi'
157  if get_platform() == 'win32':
158  smi = '"C:\\Program Files\\NVIDIA Corporation\\NVSMI\\%s"' % smi
159  return smi
160 
161 
162 def get_platform():
163  if sys.platform.startswith('linux'):
164  return 'linux'
165  elif sys.platform.startswith('win32'):
166  return 'win32'
167  elif sys.platform.startswith('cygwin'):
168  return 'cygwin'
169  elif sys.platform.startswith('darwin'):
170  return 'darwin'
171  else:
172  return sys.platform
173 
174 
175 def get_mac_version(run_lambda):
176  return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
177 
178 
179 def get_windows_version(run_lambda):
180  return run_and_read_all(run_lambda, 'wmic os get Caption | findstr /v Caption')
181 
182 
183 def get_lsb_version(run_lambda):
184  return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
185 
186 
187 def check_release_file(run_lambda):
188  return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
189  r'PRETTY_NAME="(.*)"')
190 
191 
192 def get_os(run_lambda):
193  platform = get_platform()
194 
195  if platform == 'win32' or platform == 'cygwin':
196  return get_windows_version(run_lambda)
197 
198  if platform == 'darwin':
199  version = get_mac_version(run_lambda)
200  if version is None:
201  return None
202  return 'Mac OSX {}'.format(version)
203 
204  if platform == 'linux':
205  # Ubuntu/Debian based
206  desc = get_lsb_version(run_lambda)
207  if desc is not None:
208  return desc
209 
210  # Try reading /etc/*-release
211  desc = check_release_file(run_lambda)
212  if desc is not None:
213  return desc
214 
215  return platform
216 
217  # Unknown platform
218  return platform
219 
220 
221 def get_pip_packages(run_lambda):
222  # People generally have `pip` as `pip` or `pip3`
223  def run_with_pip(pip):
224  if get_platform() == 'win32':
225  grep_cmd = r'findstr /R "numpy torch"'
226  else:
227  grep_cmd = r'grep "torch\|numpy"'
228  return run_and_read_all(run_lambda, pip + ' list --format=freeze | ' + grep_cmd)
229 
230  if not PY3:
231  return 'pip', run_with_pip('pip')
232 
233  # Try to figure out if the user is running pip or pip3.
234  out2 = run_with_pip('pip')
235  out3 = run_with_pip('pip3')
236 
237  num_pips = len([x for x in [out2, out3] if x is not None])
238  if num_pips == 0:
239  return 'pip', out2
240 
241  if num_pips == 1:
242  if out2 is not None:
243  return 'pip', out2
244  return 'pip3', out3
245 
246  # num_pips is 2. Return pip3 by default b/c that most likely
247  # is the one associated with Python 3
248  return 'pip3', out3
249 
250 
251 def get_env_info():
252  run_lambda = run
253  pip_version, pip_list_output = get_pip_packages(run_lambda)
254 
255  if TORCH_AVAILABLE:
256  version_str = torch.__version__
257  debug_mode_str = torch.version.debug
258  cuda_available_str = torch.cuda.is_available()
259  cuda_version_str = torch.version.cuda
260  else:
261  version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
262 
263  return SystemEnv(
264  torch_version=version_str,
265  is_debug_build=debug_mode_str,
266  python_version='{}.{}'.format(sys.version_info[0], sys.version_info[1]),
267  is_cuda_available=cuda_available_str,
268  cuda_compiled_version=cuda_version_str,
269  cuda_runtime_version=get_running_cuda_version(run_lambda),
270  nvidia_gpu_models=get_gpu_info(run_lambda),
271  nvidia_driver_version=get_nvidia_driver_version(run_lambda),
272  cudnn_version=get_cudnn_version(run_lambda),
273  pip_version=pip_version,
274  pip_packages=pip_list_output,
275  conda_packages=get_conda_packages(run_lambda),
276  os=get_os(run_lambda),
277  gcc_version=get_gcc_version(run_lambda),
278  cmake_version=get_cmake_version(run_lambda),
279  )
280 
281 env_info_fmt = """
282 PyTorch version: {torch_version}
283 Is debug build: {is_debug_build}
284 CUDA used to build PyTorch: {cuda_compiled_version}
285 
286 OS: {os}
287 GCC version: {gcc_version}
288 CMake version: {cmake_version}
289 
290 Python version: {python_version}
291 Is CUDA available: {is_cuda_available}
292 CUDA runtime version: {cuda_runtime_version}
293 GPU models and configuration: {nvidia_gpu_models}
294 Nvidia driver version: {nvidia_driver_version}
295 cuDNN version: {cudnn_version}
296 
297 Versions of relevant libraries:
298 {pip_packages}
299 {conda_packages}
300 """.strip()
301 
302 
303 def pretty_str(envinfo):
304  def replace_nones(dct, replacement='Could not collect'):
305  for key in dct.keys():
306  if dct[key] is not None:
307  continue
308  dct[key] = replacement
309  return dct
310 
311  def replace_bools(dct, true='Yes', false='No'):
312  for key in dct.keys():
313  if dct[key] is True:
314  dct[key] = true
315  elif dct[key] is False:
316  dct[key] = false
317  return dct
318 
319  def prepend(text, tag='[prepend]'):
320  lines = text.split('\n')
321  updated_lines = [tag + line for line in lines]
322  return '\n'.join(updated_lines)
323 
324  def replace_if_empty(text, replacement='No relevant packages'):
325  if text is not None and len(text) == 0:
326  return replacement
327  return text
328 
329  def maybe_start_on_next_line(string):
330  # If `string` is multiline, prepend a \n to it.
331  if string is not None and len(string.split('\n')) > 1:
332  return '\n{}\n'.format(string)
333  return string
334 
335  mutable_dict = envinfo._asdict()
336 
337  # If nvidia_gpu_models is multiline, start on the next line
338  mutable_dict['nvidia_gpu_models'] = \
339  maybe_start_on_next_line(envinfo.nvidia_gpu_models)
340 
341  # If the machine doesn't have CUDA, report some fields as 'No CUDA'
342  dynamic_cuda_fields = [
343  'cuda_runtime_version',
344  'nvidia_gpu_models',
345  'nvidia_driver_version',
346  ]
347  all_cuda_fields = dynamic_cuda_fields + ['cudnn_version']
348  all_dynamic_cuda_fields_missing = all(
349  mutable_dict[field] is None for field in dynamic_cuda_fields)
350  if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing:
351  for field in all_cuda_fields:
352  mutable_dict[field] = 'No CUDA'
353  if envinfo.cuda_compiled_version is None:
354  mutable_dict['cuda_compiled_version'] = 'None'
355 
356  # Replace True with Yes, False with No
357  mutable_dict = replace_bools(mutable_dict)
358 
359  # Replace all None objects with 'Could not collect'
360  mutable_dict = replace_nones(mutable_dict)
361 
362  # If either of these are '', replace with 'No relevant packages'
363  mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages'])
364  mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages'])
365 
366  # Tag conda and pip packages with a prefix
367  # If they were previously None, they'll show up as ie '[conda] Could not collect'
368  if mutable_dict['pip_packages']:
369  mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'],
370  '[{}] '.format(envinfo.pip_version))
371  if mutable_dict['conda_packages']:
372  mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'],
373  '[conda] ')
374  return env_info_fmt.format(**mutable_dict)
375 
376 
377 def get_pretty_env_info():
378  return pretty_str(get_env_info())
379 
380 
381 def main():
382  print("Collecting environment information...")
383  output = get_pretty_env_info()
384  print(output)
385 
386 
387 if __name__ == '__main__':
388  main()
def is_available()
Definition: __init__.py:45
def get_device_name(device=None)
Definition: __init__.py:268