1 from functools
import reduce
4 from ..function
import Function
10 def forward(ctx, i, dest_type):
11 ctx.input_type = type(i)
12 ctx.input_device = -1
if not i.is_cuda
else i.get_device()
13 return i.type(dest_type)
16 def backward(ctx, grad_output):
17 if ctx.input_device == -1:
18 return grad_output.type(ctx.input_type),
None 21 return grad_output.type(ctx.input_type),
None 28 def forward(ctx, tensor, sizes):
30 ctx.numel = reduce(
lambda x, y: x * y, sizes, 1)
31 if tensor.numel() != ctx.numel:
32 raise RuntimeError((
"requested resize to {} ({} elements in total), " 33 "but the given tensor has a size of {} ({} elements). " 34 "autograd's resize can only change the shape of a given " 35 "tensor, while preserving the number of elements. ").format(
36 'x'.join(map(str, sizes)), ctx.numel,
37 'x'.join(map(str, tensor.size())), tensor.numel()))
38 ctx.input_sizes = tensor.size()
39 if tensor.is_contiguous():
40 result = tensor.new(tensor).contiguous().view(*sizes)
43 return tensor.contiguous().view(*sizes)
46 def backward(ctx, grad_output):
47 assert grad_output.numel() == ctx.numel
48 return grad_output.contiguous().view(ctx.input_sizes),
None