3 #include <ATen/core/ATenGeneral.h> 4 #include <c10/core/Allocator.h> 5 #include <c10/util/Deprecated.h> 6 #include <ATen/core/Generator.h> 7 #include <c10/core/Layout.h> 8 #include <c10/core/Scalar.h> 9 #include <c10/core/ScalarType.h> 10 #include <ATen/core/SparseTensorRef.h> 11 #include <c10/util/ArrayRef.h> 12 #include <c10/util/Half.h> 13 #include <c10/core/TensorTypeIdRegistration.h> 14 #include <ATen/core/Reduction.h> 15 #include <c10/core/TensorOptions.h> 17 #include <c10/util/Optional.h> 39 using TensorList = ArrayRef<Tensor>;
44 static inline void noop_deleter(
void*) {}
109 : type_id_(type_id), is_variable_(is_variable), is_undefined_(is_undefined) {}
112 virtual ScalarType scalarType()
const = 0;
114 virtual Backend backend()
const = 0;
115 Layout
layout()
const noexcept {
return layout_from_backend(backend()); }
116 virtual bool is_cuda()
const = 0;
117 virtual bool is_hip()
const = 0;
118 virtual bool is_sparse()
const = 0;
119 virtual bool is_distributed()
const = 0;
120 bool is_variable()
const noexcept {
return is_variable_; }
121 bool is_undefined()
const noexcept {
return is_undefined_; }
122 virtual Allocator * allocator()
const = 0;
123 virtual Device getDeviceFromPtr(
void * data)
const = 0;
124 virtual Storage storageFromBlob(
void * data, int64_t size,
const std::function<
void(
void*)> & deleter=noop_deleter)
const = 0;
125 virtual Storage storageWithAllocator(int64_t size,
Allocator* allocator)
const = 0;
126 virtual std::unique_ptr<Generator> generator()
const = 0;
127 virtual Tensor unsafeTensorFromTH(
void * th_pointer,
bool retain)
const = 0;
128 virtual Storage unsafeStorageFromTH(
void * th_pointer,
bool retain)
const = 0;
129 virtual const char * toString()
const = 0;
130 virtual Type & toBackend(
Backend b)
const = 0;
131 virtual Type & toScalarType(ScalarType s)
const = 0;
132 Type & toSparse()
const {
133 return this->toBackend(at::toSparse(this->backend()));
135 Type & toDense()
const {
136 return this->toBackend(at::toDense(this->backend()));
139 return this->toBackend(at::backendToCPU(this->backend()));
141 Type & cuda()
const {
142 return this->toBackend(at::backendToCUDA(this->backend()));
145 return this->toBackend(at::backendToHIP(this->backend()));
149 virtual TypeID ID()
const = 0;
155 DeviceType device_type()
const {
156 return backendToDeviceType(backend());
161 bool non_blocking =
false,
163 virtual Tensor & copy_(
Tensor &
self,
const Tensor & src,
bool non_blocking=
false)
const = 0;
165 virtual void backward(
169 bool create_graph)
const = 0;
170 virtual void set_data(
Tensor &
self,
Tensor new_data)
const = 0;
172 virtual Tensor tensorFromBlob(
void * data,
IntArrayRef sizes,
const std::function<
void(
void*)> & deleter=noop_deleter)
const = 0;
173 virtual Tensor tensorFromBlob(
void * data,
IntArrayRef sizes,
IntArrayRef strides,
const std::function<
void(
void*)> & deleter=noop_deleter)
const = 0;
177 bool operator==(
const Type& other)
const {
178 return this == &other;
180 bool operator!=(
const Type& other)
const {
181 return this != &other;
189 .is_variable(is_variable());
195 if (!device_opt.has_value()) {
199 AT_ASSERT(device.
type() == device_type());
200 return options(device.
index());
222 virtual Tensor all(
const Tensor &
self, int64_t dim,
bool keepdim)
const = 0;
223 virtual bool allclose(
const Tensor &
self,
const Tensor & other,
double rtol,
double atol,
bool equal_nan)
const = 0;
224 virtual Tensor any(
const Tensor &
self, int64_t dim,
bool keepdim)
const = 0;
239 virtual Tensor bincount(
const Tensor &
self,
const Tensor & weights, int64_t minlength)
const = 0;
243 virtual std::vector<Tensor> chunk(
const Tensor &
self, int64_t chunks, int64_t dim)
const = 0;
250 virtual Tensor contiguous(
const Tensor &
self)
const = 0;
255 virtual Tensor cumsum(
const Tensor &
self, int64_t dim, ScalarType
dtype)
const = 0;
256 virtual Tensor cumsum(
const Tensor &
self, int64_t dim)
const = 0;
257 virtual Tensor cumprod(
const Tensor &
self, int64_t dim, ScalarType dtype)
const = 0;
258 virtual Tensor cumprod(
const Tensor &
self, int64_t dim)
const = 0;
260 virtual Tensor diag_embed(
const Tensor &
self, int64_t offset, int64_t dim1, int64_t dim2)
const = 0;
261 virtual Tensor diagflat(
const Tensor &
self, int64_t offset)
const = 0;
262 virtual Tensor diagonal(
const Tensor &
self, int64_t offset, int64_t dim1, int64_t dim2)
const = 0;
279 virtual Tensor flatten(
const Tensor &
self, int64_t start_dim, int64_t end_dim)
const = 0;
285 virtual Tensor fft(
const Tensor &
self, int64_t signal_ndim,
bool normalized)
const = 0;
286 virtual Tensor ifft(
const Tensor &
self, int64_t signal_ndim,
bool normalized)
const = 0;
287 virtual Tensor rfft(
const Tensor &
self, int64_t signal_ndim,
bool normalized,
bool onesided)
const = 0;
288 virtual Tensor irfft(
const Tensor &
self, int64_t signal_ndim,
bool normalized,
bool onesided,
IntArrayRef signal_sizes)
const = 0;
295 virtual Tensor isclose(
const Tensor &
self,
const Tensor & other,
double rtol,
double atol,
bool equal_nan)
const = 0;
296 virtual bool is_distributed(
const Tensor &
self)
const = 0;
297 virtual bool is_floating_point(
const Tensor &
self)
const = 0;
298 virtual bool is_complex(
const Tensor &
self)
const = 0;
299 virtual bool is_nonzero(
const Tensor &
self)
const = 0;
300 virtual bool is_same_size(
const Tensor &
self,
const Tensor & other)
const = 0;
301 virtual bool is_signed(
const Tensor &
self)
const = 0;
302 virtual std::tuple<Tensor,Tensor> kthvalue(
const Tensor &
self, int64_t k, int64_t dim,
bool keepdim)
const = 0;
312 virtual Tensor log_softmax(
const Tensor &
self, int64_t dim, ScalarType dtype)
const = 0;
313 virtual Tensor log_softmax(
const Tensor &
self, int64_t dim)
const = 0;
316 virtual Tensor matrix_power(
const Tensor &
self, int64_t n)
const = 0;
317 virtual std::tuple<Tensor,Tensor> max(
const Tensor &
self, int64_t dim,
bool keepdim)
const = 0;
319 virtual Tensor mean(
const Tensor &
self, ScalarType dtype)
const = 0;
324 virtual std::tuple<Tensor,Tensor> median(
const Tensor &
self, int64_t dim,
bool keepdim)
const = 0;
325 virtual std::tuple<Tensor,Tensor> min(
const Tensor &
self, int64_t dim,
bool keepdim)
const = 0;
328 virtual std::tuple<Tensor,Tensor> mode(
const Tensor &
self, int64_t dim,
bool keepdim)
const = 0;
334 virtual Tensor mvlgamma(
const Tensor &
self, int64_t p)
const = 0;
335 virtual Tensor & mvlgamma_(
Tensor &
self, int64_t p)
const = 0;
336 virtual Tensor narrow_copy(
const Tensor &
self, int64_t dim, int64_t start, int64_t length)
const = 0;
337 virtual Tensor narrow(
const Tensor &
self, int64_t dim, int64_t start, int64_t length)
const = 0;
339 virtual Tensor pin_memory(
const Tensor &
self)
const = 0;
340 virtual Tensor pinverse(
const Tensor &
self,
double rcond)
const = 0;
349 virtual std::tuple<Tensor,Tensor> prelu_backward(
const Tensor & grad_output,
const Tensor &
self,
const Tensor & weight)
const = 0;
354 virtual Tensor select(
const Tensor &
self, int64_t dim, int64_t index)
const = 0;
363 virtual int64_t size(
const Tensor &
self, int64_t dim)
const = 0;
364 virtual Tensor slice(
const Tensor &
self, int64_t dim, int64_t start, int64_t end, int64_t step)
const = 0;
365 virtual std::tuple<Tensor,Tensor> slogdet(
const Tensor &
self)
const = 0;
367 virtual Tensor softmax(
const Tensor &
self, int64_t dim, ScalarType dtype)
const = 0;
368 virtual Tensor softmax(
const Tensor &
self, int64_t dim)
const = 0;
369 virtual std::vector<Tensor> split(
const Tensor &
self, int64_t split_size, int64_t dim)
const = 0;
370 virtual std::vector<Tensor> split_with_sizes(
const Tensor &
self,
IntArrayRef split_sizes, int64_t dim)
const = 0;
372 virtual Tensor squeeze(
const Tensor &
self, int64_t dim)
const = 0;
374 virtual Tensor & squeeze_(
Tensor &
self, int64_t dim)
const = 0;
377 virtual int64_t stride(
const Tensor &
self, int64_t dim)
const = 0;
378 virtual Tensor sum(
const Tensor &
self, ScalarType dtype)
const = 0;
388 virtual Tensor prod(
const Tensor &
self, ScalarType dtype)
const = 0;
390 virtual Tensor prod(
const Tensor &
self, int64_t dim,
bool keepdim, ScalarType dtype)
const = 0;
391 virtual Tensor prod(
const Tensor &
self, int64_t dim,
bool keepdim)
const = 0;
392 virtual Tensor prod(
const Tensor &
self, int64_t dim, ScalarType dtype)
const = 0;
399 virtual Tensor transpose(
const Tensor &
self, int64_t dim0, int64_t dim1)
const = 0;
400 virtual Tensor & transpose_(
Tensor &
self, int64_t dim0, int64_t dim1)
const = 0;
407 virtual Tensor unsqueeze(
const Tensor &
self, int64_t dim)
const = 0;
408 virtual Tensor & unsqueeze_(
Tensor &
self, int64_t dim)
const = 0;
409 virtual Tensor var(
const Tensor &
self,
bool unbiased)
const = 0;
427 virtual Tensor & sparse_resize_(
Tensor &
self,
IntArrayRef size, int64_t sparse_dim, int64_t dense_dim)
const = 0;
428 virtual Tensor & sparse_resize_and_clear_(
Tensor &
self,
IntArrayRef size, int64_t sparse_dim, int64_t dense_dim)
const = 0;
431 virtual int64_t sparse_dim(
const Tensor &
self)
const = 0;
432 virtual int64_t _dimI(
const Tensor &
self)
const = 0;
433 virtual int64_t dense_dim(
const Tensor &
self)
const = 0;
434 virtual int64_t _dimV(
const Tensor &
self)
const = 0;
435 virtual int64_t _nnz(
const Tensor &
self)
const = 0;
437 virtual bool is_coalesced(
const Tensor &
self)
const = 0;
440 virtual Tensor & _coalesced_(
Tensor &
self,
bool coalesced)
const = 0;
443 virtual int64_t numel(
const Tensor &
self)
const = 0;
444 virtual std::vector<Tensor> unbind(
const Tensor &
self, int64_t dim)
const = 0;
445 virtual Tensor to_sparse(
const Tensor &
self, int64_t sparse_dim)
const = 0;
446 virtual Tensor to_sparse(
const Tensor &
self)
const = 0;
449 virtual Tensor to(
const Tensor &
self, ScalarType dtype,
bool non_blocking,
bool copy)
const = 0;
450 virtual Tensor to(
const Tensor &
self,
const Tensor & other,
bool non_blocking,
bool copy)
const = 0;
452 virtual void* data_ptr(
const Tensor &
self)
const = 0;
457 virtual bool is_set_to(
const Tensor &
self,
const Tensor & tensor)
const = 0;
512 virtual Tensor & tril_(
Tensor &
self, int64_t diagonal)
const = 0;
513 virtual Tensor & triu_(
Tensor &
self, int64_t diagonal)
const = 0;
515 virtual Tensor & polygamma_(
Tensor &
self, int64_t n)
const = 0;
539 virtual Tensor & cauchy_(
Tensor &
self,
double median,
double sigma,
Generator * generator)
const = 0;
540 virtual Tensor & log_normal_(
Tensor &
self,
double mean,
double std,
Generator * generator)
const = 0;
543 virtual Tensor diag(
const Tensor &
self, int64_t diagonal)
const = 0;
544 virtual Tensor cross(
const Tensor &
self,
const Tensor & other, int64_t dim)
const = 0;
545 virtual Tensor triu(
const Tensor &
self, int64_t diagonal)
const = 0;
546 virtual Tensor tril(
const Tensor &
self, int64_t diagonal)
const = 0;
561 virtual Tensor index_select(
const Tensor &
self, int64_t dim,
const Tensor & index)
const = 0;
564 virtual Tensor gather(
const Tensor &
self, int64_t dim,
const Tensor & index,
bool sparse_grad)
const = 0;
567 virtual std::tuple<Tensor,Tensor> gels(
const Tensor &
self,
const Tensor &
A)
const = 0;
568 virtual std::tuple<Tensor,Tensor> trtrs(
const Tensor &
self,
const Tensor & A,
bool upper,
bool transpose,
bool unitriangular)
const = 0;
569 virtual std::tuple<Tensor,Tensor> symeig(
const Tensor &
self,
bool eigenvectors,
bool upper)
const = 0;
570 virtual std::tuple<Tensor,Tensor> eig(
const Tensor &
self,
bool eigenvectors)
const = 0;
571 virtual std::tuple<Tensor,Tensor,Tensor> svd(
const Tensor &
self,
bool some,
bool compute_uv)
const = 0;
572 virtual Tensor cholesky(
const Tensor &
self,
bool upper)
const = 0;
573 virtual Tensor cholesky_solve(
const Tensor &
self,
const Tensor & input2,
bool upper)
const = 0;
574 virtual std::tuple<Tensor,Tensor> solve(
const Tensor &
self,
const Tensor & A)
const = 0;
575 virtual Tensor potri(
const Tensor &
self,
bool upper)
const = 0;
576 virtual std::tuple<Tensor,Tensor> pstrf(
const Tensor &
self,
bool upper,
Scalar tol)
const = 0;
577 virtual std::tuple<Tensor,Tensor> qr(
const Tensor &
self)
const = 0;
578 virtual std::tuple<Tensor,Tensor> geqrf(
const Tensor &
self)
const = 0;
580 virtual Tensor ormqr(
const Tensor &
self,
const Tensor & input2,
const Tensor & input3,
bool left,
bool transpose)
const = 0;
581 virtual std::tuple<Tensor,Tensor> btrifact(
const Tensor &
self,
bool pivot)
const = 0;
582 virtual std::tuple<Tensor,Tensor,Tensor> btrifact_with_info(
const Tensor &
self,
bool pivot)
const = 0;
584 virtual Tensor multinomial(
const Tensor &
self, int64_t num_samples,
bool replacement,
Generator * generator)
const = 0;
587 virtual Tensor polygamma(int64_t n,
const Tensor &
self)
const = 0;
591 virtual Tensor reciprocal(
const Tensor &
self)
const = 0;
607 virtual std::tuple<Tensor,Tensor> sort(
const Tensor &
self, int64_t dim,
bool descending)
const = 0;
608 virtual Tensor argsort(
const Tensor &
self, int64_t dim,
bool descending)
const = 0;
609 virtual std::tuple<Tensor,Tensor> topk(
const Tensor &
self, int64_t k, int64_t dim,
bool largest,
bool sorted)
const = 0;
613 virtual Tensor unfold(
const Tensor &
self, int64_t dimension, int64_t size, int64_t step)
const = 0;
614 virtual bool equal(
const Tensor &
self,
const Tensor & other)
const = 0;
626 #include <ATen/core/Tensor.h>
TensorOptions device_index(int16_t device_index)
Convenience function that returns a TensorOptions object with the device set to CUDA and the device_i...
Scalar represents a 0-dimensional tensor which contains a single element.
TensorOptions device(Device device)
Convenience function that returns a TensorOptions object with the device set to the given one...
TensorOptions options(int16_t device_index=-1) const
Constructs the TensorOptions from a type and a device_index.
Represents a a compute device on which a tensor is located.
Backend
This legacy enum class defines the set of backends supported by old school, code generated Type-based...
Dynamic type ID of a Tensor argument.
TensorOptions(T &&device)
A class to encapsulate construction axes of an Tensor.
TensorOptions layout(Layout layout)
Convenience function that returns a TensorOptions object with the layout set to the given one...
To register your own kernel for an operator, do in one (!) cpp file: C10_REGISTER_KERNEL(OperatorHand...
Flush-To-Zero and Denormals-Are-Zero mode.
DeviceIndex index() const noexcept
Returns the optional index.
TensorOptions options(c10::optional< Device > device_opt) const
Constructs the TensorOptions from a type and a Device.
DeviceType type() const noexcept
Returns the type of device this is.
TensorOptions dtype(caffe2::TypeMeta dtype)
Convenience function that returns a TensorOptions object with the dtype set to the given one...