3 #include <c10/core/Device.h> 4 #include <c10/core/Layout.h> 5 #include <c10/core/Scalar.h> 6 #include <c10/core/ScalarType.h> 7 #include <ATen/core/SparseTensorRef.h> 8 #include <c10/core/Storage.h> 9 #include <ATen/core/TensorAccessor.h> 10 #include <c10/core/TensorImpl.h> 11 #include <c10/core/UndefinedTensorImpl.h> 12 #include <c10/util/Exception.h> 13 #include <c10/util/Optional.h> 14 #include <c10/core/Tensor.h> 15 #include <ATen/core/LegacyTypeDispatch.h> 29 using TensorList = ArrayRef<Tensor>;
55 : impl_(std::move(tensor_impl)) {
56 if (impl_.get() ==
nullptr) {
57 throw std::runtime_error(
"TensorImpl with nullptr is not supported");
67 static Tensor wrap_tensor_impl(
69 Tensor r(std::move(tensor_impl));
70 r.enforce_invariants();
89 int64_t storage_offset()
const {
90 return impl_->storage_offset();
97 return impl_.release();
103 bool defined()
const {
146 impl_ = std::move(x.impl_);
154 bool is_same(
const Tensor& other)
const noexcept {
155 return impl_ == other.impl_;
157 size_t use_count()
const noexcept {
158 return impl_.use_count();
160 size_t weak_use_count()
const noexcept {
161 return impl_.weak_use_count();
164 const char * toString()
const;
167 return impl_->sizes();
170 return impl_->strides();
172 int64_t ndimension()
const {
175 bool is_contiguous()
const {
176 return impl_->is_contiguous();
184 size_t nbytes()
const {
185 return impl_->numel() * impl_->itemsize();
190 size_t itemsize()
const {
191 return impl_->itemsize();
195 size_t element_size()
const {
196 return impl_->itemsize();
199 Type & type()
const {
203 return impl_->type_id();
205 ScalarType scalar_type()
const {
206 return typeMetaToScalarType(impl_->dtype());
208 bool has_storage()
const {
209 return defined() && impl_->has_storage();
211 const Storage& storage()
const {
212 return impl_->storage();
214 bool is_alias_of(
const at::Tensor& other)
const{
215 return impl_->storage().is_alias_of(other.storage());
217 Tensor toType(
const Type & t,
bool non_blocking=
false)
const;
218 Tensor & copy_(
const Tensor & src,
bool non_blocking=
false);
219 Tensor toType(ScalarType t)
const;
224 bool is_variable()
const noexcept;
227 Layout
layout()
const noexcept;
236 int64_t get_device()
const;
239 bool is_cuda()
const;
245 bool is_sparse()
const;
254 template <
typename T>
262 template<
typename T,
size_t N>
264 static_assert(N > 0,
"accessor is used for indexing tensor, for scalars use *data<T>()");
265 AT_CHECK(dim() == N,
"expected ", N,
" dims but tensor has ", dim());
268 template<
typename T,
size_t N>
276 template<
typename T,
size_t N,
template <
typename U>
class PtrTraits =
DefaultPtrTraits,
typename index_t = int64_t>
278 static_assert(N > 0,
"accessor is used for indexing tensor, for scalars use *data<T>()");
279 AT_CHECK(dim() == N,
"expected ", N,
" dims but tensor has ", dim());
282 template<
typename T,
size_t N,
template <
typename U>
class PtrTraits =
DefaultPtrTraits,
typename index_t = int64_t>
296 Tensor operator[](int64_t index)
const;
305 impl_->set_requires_grad(requires_grad);
313 return impl_->grad();
315 const Tensor& grad()
const {
316 return impl_->grad();
319 void set_data(
Tensor new_data);
324 bool keep_graph =
false,
325 bool create_graph =
false);
344 Tensor all(int64_t dim,
bool keepdim=
false)
const;
345 bool allclose(
const Tensor & other,
double rtol=1e-05,
double atol=1e-08,
bool equal_nan=
false)
const;
346 Tensor any(int64_t dim,
bool keepdim=
false)
const;
361 Tensor bincount(
const Tensor & weights={}, int64_t minlength=0)
const;
365 std::vector<Tensor> chunk(int64_t chunks, int64_t dim=0)
const;
372 Tensor contiguous()
const;
377 Tensor cumsum(int64_t dim, ScalarType
dtype)
const;
378 Tensor cumsum(int64_t dim)
const;
379 Tensor cumprod(int64_t dim, ScalarType dtype)
const;
380 Tensor cumprod(int64_t dim)
const;
382 Tensor diag_embed(int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1)
const;
383 Tensor diagflat(int64_t offset=0)
const;
384 Tensor diagonal(int64_t offset=0, int64_t dim1=0, int64_t dim2=1)
const;
401 Tensor flatten(int64_t start_dim=0, int64_t end_dim=-1)
const;
407 Tensor fft(int64_t signal_ndim,
bool normalized=
false)
const;
408 Tensor ifft(int64_t signal_ndim,
bool normalized=
false)
const;
409 Tensor rfft(int64_t signal_ndim,
bool normalized=
false,
bool onesided=
true)
const;
410 Tensor irfft(int64_t signal_ndim,
bool normalized=
false,
bool onesided=
true,
IntArrayRef signal_sizes={})
const;
417 Tensor isclose(
const Tensor & other,
double rtol=1e-05,
double atol=1e-08,
bool equal_nan=
false)
const;
418 bool is_distributed()
const;
419 bool is_floating_point()
const;
420 bool is_complex()
const;
421 bool is_nonzero()
const;
422 bool is_same_size(
const Tensor & other)
const;
423 bool is_signed()
const;
424 std::tuple<Tensor,Tensor> kthvalue(int64_t k, int64_t dim=-1,
bool keepdim=
false)
const;
434 Tensor log_softmax(int64_t dim, ScalarType dtype)
const;
435 Tensor log_softmax(int64_t dim)
const;
438 Tensor matrix_power(int64_t n)
const;
439 std::tuple<Tensor,Tensor> max(int64_t dim,
bool keepdim=
false)
const;
441 Tensor mean(ScalarType dtype)
const;
446 std::tuple<Tensor,Tensor> median(int64_t dim,
bool keepdim=
false)
const;
447 std::tuple<Tensor,Tensor> min(int64_t dim,
bool keepdim=
false)
const;
450 std::tuple<Tensor,Tensor> mode(int64_t dim=-1,
bool keepdim=
false)
const;
456 Tensor mvlgamma(int64_t p)
const;
457 Tensor & mvlgamma_(int64_t p);
458 Tensor narrow_copy(int64_t dim, int64_t start, int64_t length)
const;
459 Tensor narrow(int64_t dim, int64_t start, int64_t length)
const;
461 Tensor pin_memory()
const;
462 Tensor pinverse(
double rcond=1e-15)
const;
471 std::tuple<Tensor,Tensor> prelu_backward(
const Tensor & grad_output,
const Tensor & weight)
const;
476 Tensor select(int64_t dim, int64_t index)
const;
485 int64_t size(int64_t dim)
const;
486 Tensor slice(int64_t dim=0, int64_t start=0, int64_t end=9223372036854775807, int64_t step=1)
const;
487 std::tuple<Tensor,Tensor> slogdet()
const;
489 Tensor softmax(int64_t dim, ScalarType dtype)
const;
490 Tensor softmax(int64_t dim)
const;
491 std::vector<Tensor> split(int64_t split_size, int64_t dim=0)
const;
492 std::vector<Tensor> split_with_sizes(
IntArrayRef split_sizes, int64_t dim=0)
const;
494 Tensor squeeze(int64_t dim)
const;
496 Tensor & squeeze_(int64_t dim);
499 int64_t stride(int64_t dim)
const;
500 Tensor sum(ScalarType dtype)
const;
510 Tensor prod(ScalarType dtype)
const;
512 Tensor prod(int64_t dim,
bool keepdim, ScalarType dtype)
const;
513 Tensor prod(int64_t dim,
bool keepdim=
false)
const;
514 Tensor prod(int64_t dim, ScalarType dtype)
const;
521 Tensor transpose(int64_t dim0, int64_t dim1)
const;
522 Tensor & transpose_(int64_t dim0, int64_t dim1);
529 Tensor unsqueeze(int64_t dim)
const;
530 Tensor & unsqueeze_(int64_t dim);
531 Tensor var(
bool unbiased=
true)
const;
549 Tensor & sparse_resize_(
IntArrayRef size, int64_t sparse_dim, int64_t dense_dim);
550 Tensor & sparse_resize_and_clear_(
IntArrayRef size, int64_t sparse_dim, int64_t dense_dim);
553 int64_t sparse_dim()
const;
554 int64_t _dimI()
const;
555 int64_t dense_dim()
const;
556 int64_t _dimV()
const;
557 int64_t _nnz()
const;
559 bool is_coalesced()
const;
562 Tensor & _coalesced_(
bool coalesced);
565 int64_t numel()
const;
566 std::vector<Tensor> unbind(int64_t dim=0)
const;
567 Tensor to_sparse(int64_t sparse_dim)
const;
570 Tensor to(
Device device, ScalarType dtype,
bool non_blocking=
false,
bool copy=
false)
const;
571 Tensor to(ScalarType dtype,
bool non_blocking=
false,
bool copy=
false)
const;
572 Tensor to(
const Tensor & other,
bool non_blocking=
false,
bool copy=
false)
const;
574 void* data_ptr()
const;
579 bool is_set_to(
const Tensor & tensor)
const;
634 Tensor & tril_(int64_t diagonal=0);
635 Tensor & triu_(int64_t diagonal=0);
637 Tensor & polygamma_(int64_t n);
656 Tensor & random_(int64_t from, int64_t to,
Generator * generator=
nullptr);
659 Tensor & uniform_(
double from=0,
double to=1,
Generator * generator=
nullptr);
660 Tensor & normal_(
double mean=0,
double std=1,
Generator * generator=
nullptr);
661 Tensor & cauchy_(
double median=0,
double sigma=1,
Generator * generator=
nullptr);
662 Tensor & log_normal_(
double mean=1,
double std=2,
Generator * generator=
nullptr);
665 Tensor diag(int64_t diagonal=0)
const;
666 Tensor cross(
const Tensor & other, int64_t dim=-1)
const;
667 Tensor triu(int64_t diagonal=0)
const;
668 Tensor tril(int64_t diagonal=0)
const;
683 Tensor index_select(int64_t dim,
const Tensor & index)
const;
686 Tensor gather(int64_t dim,
const Tensor & index,
bool sparse_grad=
false)
const;
689 std::tuple<Tensor,Tensor> gels(
const Tensor &
A)
const;
690 std::tuple<Tensor,Tensor> trtrs(
const Tensor & A,
bool upper=
true,
bool transpose=
false,
bool unitriangular=
false)
const;
691 std::tuple<Tensor,Tensor> symeig(
bool eigenvectors=
false,
bool upper=
true)
const;
692 std::tuple<Tensor,Tensor> eig(
bool eigenvectors=
false)
const;
693 std::tuple<Tensor,Tensor,Tensor> svd(
bool some=
true,
bool compute_uv=
true)
const;
694 Tensor cholesky(
bool upper=
false)
const;
695 Tensor cholesky_solve(
const Tensor & input2,
bool upper=
false)
const;
696 std::tuple<Tensor,Tensor> solve(
const Tensor & A)
const;
697 Tensor potri(
bool upper=
true)
const;
698 std::tuple<Tensor,Tensor> pstrf(
bool upper=
true,
Scalar tol=-1)
const;
699 std::tuple<Tensor,Tensor> qr()
const;
700 std::tuple<Tensor,Tensor> geqrf()
const;
702 Tensor ormqr(
const Tensor & input2,
const Tensor & input3,
bool left=
true,
bool transpose=
false)
const;
703 std::tuple<Tensor,Tensor> btrifact(
bool pivot=
true)
const;
704 std::tuple<Tensor,Tensor,Tensor> btrifact_with_info(
bool pivot=
true)
const;
706 Tensor multinomial(int64_t num_samples,
bool replacement=
false,
Generator * generator=
nullptr)
const;
709 Tensor polygamma(int64_t n)
const;
713 Tensor reciprocal()
const;
729 std::tuple<Tensor,Tensor> sort(int64_t dim=-1,
bool descending=
false)
const;
730 Tensor argsort(int64_t dim=-1,
bool descending=
false)
const;
731 std::tuple<Tensor,Tensor> topk(int64_t k, int64_t dim=-1,
bool largest=
true,
bool sorted=
true)
const;
735 Tensor unfold(int64_t dimension, int64_t size, int64_t step)
const;
736 bool equal(
const Tensor & other)
const;
746 return this->to(typeMetaToScalarType(type_meta), non_blocking, copy);
749 return this->to(device, typeMetaToScalarType(type_meta), non_blocking, copy);
752 template <
typename F,
typename... Args>
753 auto m(F func, Args&&... params)
const -> decltype(func(*
this, std::forward<Args>(params)...)) {
754 return func(*
this, std::forward<Args>(params)...);
760 void enforce_invariants();
770 return Tensor(weak_impl_.lock());
773 bool is_same(
const WeakTensor& other)
const noexcept {
774 return weak_impl_ == other.weak_impl_;
777 size_t use_count()
const noexcept {
778 return weak_impl_.use_count();
780 size_t weak_use_count()
const noexcept {
781 return weak_impl_.weak_use_count();
785 return weak_impl_._unsafe_get_target();
796 template <
typename T,
typename... Args>
797 Tensor make_tensor(Args&&... args) {
798 return Tensor(c10::make_intrusive<T>(std::forward<Args>(args)...));
804 #include <ATen/core/TensorMethods.h>
This is a minimal Tensor class for use in c10 code.
Scalar represents a 0-dimensional tensor which contains a single element.
The low-level representation of a tensor, which contains a pointer to a storage (which contains the a...
TensorOptions device(Device device)
Convenience function that returns a TensorOptions object with the device set to the given one...
Represents a a compute device on which a tensor is located.
Backend
This legacy enum class defines the set of backends supported by old school, code generated Type-based...
Dynamic type ID of a Tensor argument.
TensorOptions(T &&device)
A class to encapsulate construction axes of an Tensor.
TensorOptions layout(Layout layout)
Convenience function that returns a TensorOptions object with the layout set to the given one...
To register your own kernel for an operator, do in one (!) cpp file: C10_REGISTER_KERNEL(OperatorHand...
Type & legacyTensorType(const TensorImpl &tensor)
Return the Type object corresponding to this Tensor, which we can use to do dynamic dispatch to opera...
TensorOptions requires_grad(bool requires_grad=true)
Convenience function that returns a TensorOptions object with the requires_grad set to the given one...
Flush-To-Zero and Denormals-Are-Zero mode.
C10_NODISCARD TensorOptions requires_grad(c10::optional< bool > requires_grad) const noexcept
Sets the requires_grad property of the TensorOptions.
TensorOptions dtype(caffe2::TypeMeta dtype)
Convenience function that returns a TensorOptions object with the dtype set to the given one...