3 #include <c10/core/Device.h> 4 #include <c10/core/Layout.h> 5 #include <c10/core/Scalar.h> 6 #include <c10/core/ScalarType.h> 7 #include <ATen/core/SparseTensorRef.h> 8 #include <c10/core/Storage.h> 9 #include <ATen/core/TensorAccessor.h> 10 #include <c10/core/TensorImpl.h> 11 #include <c10/core/UndefinedTensorImpl.h> 12 #include <c10/util/Exception.h> 13 #include <c10/util/Optional.h> 14 #include <c10/core/Tensor.h> 15 #include <ATen/core/LegacyTypeDispatch.h> 29 using TensorList = ArrayRef<Tensor>;
55 : impl_(
std::move(tensor_impl)) {
56 if (impl_.get() ==
nullptr) {
57 throw std::runtime_error(
"TensorImpl with nullptr is not supported");
67 static Tensor wrap_tensor_impl(
69 Tensor r(std::move(tensor_impl));
70 r.enforce_invariants();
74 explicit Tensor(C10Tensor tensor) : impl_(
std::move(tensor).impl()) {
78 explicit operator C10Tensor() const & {
79 return C10Tensor(impl_);
82 explicit operator C10Tensor() && {
83 return C10Tensor(std::move(impl_));
89 int64_t storage_offset()
const {
90 return impl_->storage_offset();
93 TensorImpl * unsafeGetTensorImpl()
const {
96 TensorImpl * unsafeReleaseTensorImpl() {
97 return impl_.release();
103 bool defined()
const {
146 impl_ = std::move(x.impl_);
150 Tensor& operator=(Scalar v) &&;
154 bool is_same(
const Tensor& other)
const noexcept {
155 return impl_ == other.impl_;
157 size_t use_count() const noexcept {
158 return impl_.use_count();
160 size_t weak_use_count() const noexcept {
161 return impl_.weak_use_count();
164 const char * toString()
const;
166 IntArrayRef sizes()
const {
167 return impl_->sizes();
169 IntArrayRef strides()
const {
170 return impl_->strides();
172 int64_t ndimension()
const {
175 bool is_contiguous()
const {
176 return impl_->is_contiguous();
184 size_t nbytes()
const {
185 return impl_->numel() * impl_->itemsize();
190 size_t itemsize()
const {
191 return impl_->itemsize();
195 size_t element_size()
const {
196 return impl_->itemsize();
199 Type & type()
const {
202 TensorTypeId type_id()
const {
203 return impl_->type_id();
205 ScalarType scalar_type()
const {
206 return typeMetaToScalarType(impl_->dtype());
208 bool has_storage()
const {
209 return defined() && impl_->has_storage();
211 const Storage& storage()
const {
212 return impl_->storage();
214 bool is_alias_of(
const at::Tensor& other)
const{
215 return impl_->storage().is_alias_of(other.storage());
217 Tensor toType(
const Type & t,
bool non_blocking=
false)
const;
218 Tensor & copy_(
const Tensor & src,
bool non_blocking=
false);
219 Tensor toType(ScalarType t)
const;
224 bool is_variable() const noexcept;
227 Layout
layout() const noexcept;
236 int64_t get_device() const;
239 bool is_cuda() const;
245 bool is_sparse() const;
254 template <typename
T>
262 template<typename
T,
size_t N>
263 TensorAccessor<T,N> accessor() const& {
264 static_assert(N > 0,
"accessor is used for indexing tensor, for scalars use *data<T>()");
265 AT_CHECK(dim() == N,
"expected ", N,
" dims but tensor has ", dim());
266 return TensorAccessor<T,N>(data<T>(),sizes().data(),strides().data());
268 template<
typename T,
size_t N>
269 TensorAccessor<T,N> accessor() && =
delete;
276 template<
typename T,
size_t N,
template <
typename U>
class PtrTraits = DefaultPtrTraits,
typename index_t = int64_t>
277 PackedTensorAccessor<T,N,PtrTraits,index_t> packed_accessor() const& {
278 static_assert(N > 0,
"accessor is used for indexing tensor, for scalars use *data<T>()");
279 AT_CHECK(dim() == N,
"expected ", N,
" dims but tensor has ", dim());
280 return PackedTensorAccessor<T,N,PtrTraits,index_t>(
static_cast<typename PtrTraits<T>::PtrType
>(data<T>()),sizes().data(),strides().data());
282 template<
typename T,
size_t N,
template <
typename U>
class PtrTraits = DefaultPtrTraits,
typename index_t = int64_t>
283 PackedTensorAccessor<T,N> packed_accessor() && =
delete;
287 Tensor& operator+=(Scalar other);
289 Tensor& operator-=(Scalar other);
291 Tensor& operator*=(Scalar other);
293 Tensor& operator/=(Scalar other);
294 Tensor operator[](Scalar index)
const;
296 Tensor operator[](int64_t index)
const;
305 impl_->set_requires_grad(requires_grad);
313 return impl_->grad();
315 const Tensor& grad()
const {
316 return impl_->grad();
319 void set_data(
Tensor new_data);
324 bool keep_graph =
false,
325 bool create_graph =
false);
332 ${tensor_method_declarations}
340 return this->to(typeMetaToScalarType(type_meta), non_blocking, copy);
343 return this->to(device, typeMetaToScalarType(type_meta), non_blocking, copy);
346 template <
typename F,
typename... Args>
347 auto m(F func, Args&&... params) const -> decltype(func(*this,
std::forward<Args>(params)...)) {
348 return func(*
this, std::forward<Args>(params)...);
351 friend struct WeakTensor;
354 void enforce_invariants();
358 struct CAFFE2_API WeakTensor {
359 WeakTensor(
const Tensor& t) : weak_impl_(t.impl_) {}
364 return Tensor(weak_impl_.lock());
367 bool is_same(
const WeakTensor& other)
const noexcept {
368 return weak_impl_ == other.weak_impl_;
371 size_t use_count() const noexcept {
372 return weak_impl_.use_count();
374 size_t weak_use_count() const noexcept {
375 return weak_impl_.weak_use_count();
378 TensorImpl* unsafeGetTensorImpl()
const {
379 return weak_impl_._unsafe_get_target();
390 template <
typename T,
typename... Args>
391 Tensor make_tensor(Args&&... args) {
392 return Tensor(c10::make_intrusive<T>(std::forward<Args>(args)...));
398 #include <ATen/core/TensorMethods.h>
TensorOptions device(Device device)
Convenience function that returns a TensorOptions object with the device set to the given one...
Backend
This legacy enum class defines the set of backends supported by old school, code generated Type-based...
TensorOptions(T &&device)
A class to encapsulate construction axes of an Tensor.
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...
TensorOptions layout(Layout layout)
Convenience function that returns a TensorOptions object with the layout set to the given one...
To register your own kernel for an operator, do in one (!) cpp file: C10_REGISTER_KERNEL(OperatorHand...
Type & legacyTensorType(const TensorImpl &tensor)
Return the Type object corresponding to this Tensor, which we can use to do dynamic dispatch to opera...
TensorOptions requires_grad(bool requires_grad=true)
Convenience function that returns a TensorOptions object with the requires_grad set to the given one...
Flush-To-Zero and Denormals-Are-Zero mode.
C10_NODISCARD TensorOptions requires_grad(c10::optional< bool > requires_grad) const noexcept
Sets the requires_grad property of the TensorOptions.
TensorOptions dtype(caffe2::TypeMeta dtype)
Convenience function that returns a TensorOptions object with the dtype set to the given one...