Caffe2 - Python API
A deep learning, cross platform ML framework
tensor.py
1 from functools import reduce
2 import torch
3 import torch._utils
4 from ..function import Function
5 
6 
7 class Type(Function):
8 
9  @staticmethod
10  def forward(ctx, i, dest_type):
11  ctx.input_type = type(i)
12  ctx.input_device = -1 if not i.is_cuda else i.get_device()
13  return i.type(dest_type)
14 
15  @staticmethod
16  def backward(ctx, grad_output):
17  if ctx.input_device == -1:
18  return grad_output.type(ctx.input_type), None
19  else:
20  with torch.cuda.device(ctx.input_device):
21  return grad_output.type(ctx.input_type), None
22 
23 
24 # TODO: deprecate this
25 class Resize(Function):
26 
27  @staticmethod
28  def forward(ctx, tensor, sizes):
29  ctx.sizes = sizes
30  ctx.numel = reduce(lambda x, y: x * y, sizes, 1)
31  if tensor.numel() != ctx.numel:
32  raise RuntimeError(("requested resize to {} ({} elements in total), "
33  "but the given tensor has a size of {} ({} elements). "
34  "autograd's resize can only change the shape of a given "
35  "tensor, while preserving the number of elements. ").format(
36  'x'.join(map(str, sizes)), ctx.numel,
37  'x'.join(map(str, tensor.size())), tensor.numel()))
38  ctx.input_sizes = tensor.size()
39  if tensor.is_contiguous():
40  result = tensor.new(tensor).contiguous().view(*sizes)
41  return result
42  else:
43  return tensor.contiguous().view(*sizes)
44 
45  @staticmethod
46  def backward(ctx, grad_output):
47  assert grad_output.numel() == ctx.numel
48  return grad_output.contiguous().view(ctx.input_sizes), None