1 #include <torch/csrc/autograd/functions/tensor.h> 3 #include <torch/csrc/autograd/function.h> 4 #include <torch/csrc/autograd/functions/basic_ops.h> 5 #include <torch/csrc/autograd/functions/utils.h> 6 #include <torch/csrc/autograd/generated/Functions.h> 7 #include <torch/csrc/autograd/variable.h> 16 namespace torch {
namespace autograd {
18 auto CopyBackwards::apply(variable_list&& grads) -> variable_list {
19 check_input_variables(
"CopyBackwards", grads, 1);
20 auto& grad = grads[0];
21 variable_list grad_inputs(2);
22 if (should_compute_output(0)) {
23 grad_inputs[0] = at::zeros_like(grad);
25 if (should_compute_output(1)) {
29 if (grad.is_cuda() && grad.device() != src_device) {
30 grad_inputs[1] = src_type->copy(grad);
32 grad_inputs[1] = grad.toType(*src_type);
38 CopySlices::CopySlices(
39 const Variable& base_var,
41 std::shared_ptr<Function> fn_)
44 view(
std::move(view_)),
48 add_input_metadata(base_var);
49 const auto num_outputs = fn->num_outputs();
50 next_edges_.reserve(num_outputs);
51 add_next_edge(base_var.gradient_edge());
52 for (
size_t i = 1; i < num_outputs; i++) {
53 add_next_edge(fn->next_edge(i));
57 auto CopySlices::apply(variable_list&& inputs) -> variable_list {
58 check_input_variables(
"CopySlices", inputs, 1);
59 auto& grad = inputs[0];
62 throw std::runtime_error(ERR_BACKWARD_TWICE);
65 auto result = at::empty_strided(base.sizes(), base.strides(), grad.options());
68 auto offset = view.storage_offset() - base.storage_offset();
69 auto grad_slice = result.as_strided(view.sizes(), view.strides(), offset);
74 auto res = (*fn)({ grad_slice.clone() });
76 variable_list grad_inputs(num_outputs());
77 for (
size_t i = 0; i < res.size(); i++) {
78 if (should_compute_output(i)) {
79 AT_ASSERT(res[i].defined());
81 grad_slice.copy_(res[i]);
82 grad_inputs[i] = std::move(result);
84 grad_inputs[i] = std::move(res[i]);
92 void CopySlices::release_variables() {
RAII guard that sets a certain default device in its constructor, and changes it back to the device t...