1 #include <torch/csrc/utils/tensor_numpy.h> 3 #include <torch/csrc/utils/numpy_stub.h> 6 namespace torch {
namespace utils {
7 PyObject* tensor_to_numpy(
const at::Tensor& tensor) {
8 throw std::runtime_error(
"PyTorch was compiled without NumPy support");
11 throw std::runtime_error(
"PyTorch was compiled without NumPy support");
13 bool is_numpy_scalar(PyObject* obj) {
14 throw std::runtime_error(
"PyTorch was compiled without NumPy support");
19 #include <torch/csrc/DynamicTypes.h> 20 #include <torch/csrc/Exceptions.h> 21 #include <torch/csrc/autograd/python_variable.h> 23 #include <ATen/ATen.h> 31 namespace torch {
namespace utils {
33 static std::vector<npy_intp> to_numpy_shape(IntArrayRef x) {
35 auto nelem = x.size();
36 auto result = std::vector<npy_intp>(nelem);
37 for (
size_t i = 0; i < nelem; i++) {
38 result[i] =
static_cast<npy_intp
>(x[i]);
43 static std::vector<int64_t> to_aten_shape(
int ndim, npy_intp* values) {
45 auto result = std::vector<int64_t>(ndim);
46 for (
int i = 0; i < ndim; i++) {
47 result[i] =
static_cast<int64_t
>(values[i]);
52 static int aten_to_dtype(
const ScalarType scalar_type);
54 PyObject* tensor_to_numpy(
const at::Tensor& tensor) {
57 "can't convert CUDA tensor to numpy. Use Tensor.cpu() to " 58 "copy the tensor to host memory first.");
62 "can't convert sparse tensor to numpy. Use Tensor.to_dense() to " 63 "convert to a dense tensor first.");
65 if (tensor.type().backend() != Backend::CPU) {
66 throw TypeError(
"NumPy conversion for %s is not supported", tensor.type().toString());
68 auto dtype = aten_to_dtype(tensor.scalar_type());
69 auto sizes = to_numpy_shape(tensor.sizes());
70 auto strides = to_numpy_shape(tensor.strides());
72 auto element_size_in_bytes = tensor.element_size();
73 for (
auto& stride : strides) {
74 stride *= element_size_in_bytes;
85 NPY_ARRAY_ALIGNED | NPY_ARRAY_WRITEABLE,
87 if (!array)
return nullptr;
93 PyObject* py_tensor = THPVariable_Wrap(make_variable(tensor,
false));
95 if (PyArray_SetBaseObject((PyArrayObject*)array.get(), py_tensor) == -1) {
99 tensor.storage().unsafeGetStorageImpl()->set_resizable(
false);
101 return array.release();
105 if (!PyArray_Check(obj)) {
106 throw TypeError(
"expected np.ndarray (got %s)", Py_TYPE(obj)->tp_name);
109 auto array = (PyArrayObject*)obj;
110 int ndim = PyArray_NDIM(array);
111 auto sizes = to_aten_shape(ndim, PyArray_DIMS(array));
112 auto strides = to_aten_shape(ndim, PyArray_STRIDES(array));
114 auto element_size_in_bytes = PyArray_ITEMSIZE(array);
115 for (
auto& stride : strides) {
116 if (stride%element_size_in_bytes != 0) {
118 "given numpy array strides not a multiple of the element byte size. " 119 "Copy the numpy array to reallocate the memory.");
121 stride /= element_size_in_bytes;
124 size_t storage_size = 1;
125 for (
int i = 0; i < ndim; i++) {
126 if (strides[i] < 0) {
128 "some of the strides of a given numpy array are negative. This is " 129 "currently not supported, but will be added in future releases.");
132 storage_size += (sizes[i] - 1) * strides[i];
135 void* data_ptr = PyArray_DATA(array);
136 auto& type = CPU(numpy_dtype_to_aten(PyArray_TYPE(array)));
137 if (!PyArray_EquivByteorders(PyArray_DESCR(array)->byteorder, NPY_NATIVE)) {
139 "given numpy array has byte order different from the native byte order. " 140 "Conversion between byte orders is currently not supported.");
143 return type.tensorFromBlob(data_ptr, sizes, strides, [obj](
void* data) {
149 static int aten_to_dtype(
const ScalarType scalar_type) {
150 switch (scalar_type) {
151 case kDouble:
return NPY_DOUBLE;
152 case kFloat:
return NPY_FLOAT;
153 case kHalf:
return NPY_HALF;
154 case kLong:
return NPY_INT64;
155 case kInt:
return NPY_INT32;
156 case kShort:
return NPY_INT16;
157 case kChar:
return NPY_INT8;
158 case kByte:
return NPY_UINT8;
160 throw ValueError(
"Got unsupported ScalarType ", toString(scalar_type));
164 ScalarType numpy_dtype_to_aten(
int dtype) {
166 case NPY_DOUBLE:
return kDouble;
167 case NPY_FLOAT:
return kFloat;
168 case NPY_HALF:
return kHalf;
169 case NPY_INT32:
return kInt;
170 case NPY_INT16:
return kShort;
171 case NPY_INT8:
return kChar;
172 case NPY_UINT8:
return kByte;
175 if (dtype == NPY_LONGLONG || dtype == NPY_INT64) {
181 auto pytype =
THPObjectPtr(PyArray_TypeObjectFromType(dtype));
184 "can't convert np.ndarray of type %s. The only supported types are: " 185 "float64, float32, float16, int64, int32, int16, int8, and uint8.",
186 ((PyTypeObject*)pytype.get())->tp_name);
189 bool is_numpy_scalar(PyObject* obj) {
190 return (PyArray_IsIntegerScalar(obj) ||
191 PyArray_IsScalar(obj, Floating));
bool is_cuda() const
Returns if a Tensor has CUDA backend.
bool is_sparse() const
Returns if a Tensor has sparse backend.
Flush-To-Zero and Denormals-Are-Zero mode.