Caffe2 - C++ API
A deep learning, cross platform ML framework
Namespaces | Data Structures | Typedefs | Enumerations | Functions | Variables
caffe2 Namespace Reference

A global dictionary that holds information about what Caffe2 modules have been loaded in the current runtime, and also utility functions to load modules. More...

Namespaces

 detail
 To make a c10 operator "C10Add" callable from caffe2 as "C2MyAddOpName", just write.
 

Data Structures

struct  _CaffeHighestPreallocatedTypeId
 
struct  AbsFunctor
 
struct  AbsGradientFunctor
 
struct  AbstractLengthsDef
 
class  AbstractLengthsGradientOp
 
class  AbstractLengthsOp
 Segment reduction op with optional fused embedding lookup. More...
 
class  AbstractLengthsWithMainInputAndForwardOutputGradientOp
 
class  AbstractLengthsWithMainInputGradientOp
 
struct  AbstractReduceBackDef
 
struct  AbstractReduceFrontDef
 
class  AbstractReduceFrontOrBackGradientOp
 
class  AbstractReduceFrontOrBackOp
 Simple non-segmented reduction over the first few dimensions of the tensor. More...
 
struct  AbstractSortedSegmentDef
 
class  AbstractSortedSegmentGradientOp
 
class  AbstractSortedSegmentOp
 Segment reduction op with optional fused embedding lookup. More...
 
struct  AbstractSortedSegmentRangeDef
 
class  AbstractSortedSegmentRangeGradientOp
 
class  AbstractSortedSegmentRangeOp
 Base implementation for segment reduction op that leverages continuity of the data. More...
 
struct  AbstractSparseLengthsDef
 
struct  AbstractSparseSortedSegmentDef
 
struct  AbstractSparseUnsortedSegmentDef
 
struct  AbstractUnsortedSegmentDef
 
class  AbstractUnsortedSegmentGradientOp
 
class  AbstractUnsortedSegmentOp
 Unsorted segment reduction op with optional fused embedding lookup. More...
 
class  AccumulateHistogramOp
 
class  AccumulateInputGradientOp
 
class  AccumulateOp
 
class  AccuracyOp
 
struct  AcosFunctor
 
struct  AcosGradientFunctor
 
class  AdadeltaOp
 
class  AdagradOp
 
class  AdamOp
 
class  AddDNNLowPOp
 
struct  AddFunctor
 
class  AddPaddingOp
 
class  AdjustBatchOp
 
class  AffineChannelGradientOp
 
class  AffineChannelOp
 
class  AlgorithmsCache
 
class  AliasOp
 Alias op makes the output and the input share the same underlying storage. More...
 
struct  AlignedDeleter
 
struct  AllocAligned
 
class  AlternateLearningRate
 
class  APMeterOp
 
struct  ArgMaxReducer
 
struct  ArgMinReducer
 
class  ArgOp
 
class  ArgumentHelper
 A helper class to index into arguments. More...
 
struct  AsinFunctor
 
struct  AsinGradientFunctor
 
class  AssertOp
 
class  AsyncNetBase
 
class  AsyncNetExecutorHelper
 
class  AsyncSchedulingNet
 
class  AsyncTask
 
class  AsyncTaskFuture
 
class  AsyncTaskGraph
 
class  AsyncTaskGraphBase
 
struct  AtanFunctor
 
struct  AtanGradientFunctor
 
class  AtomicIterOp
 
class  AveragedLoss
 
class  AveragedLossGradient
 
struct  AveragePoolFunctor
 
class  AvgExportedStat
 
class  BackendTransformerBase
 
class  BaseInputAccessor
 
class  BaseReducer
 
class  BaseReducerGradient
 
class  BatchBoxCoxOp
 
class  BatchBucketizeOp
 
class  BatchBucketOneHotOp
 
class  BatchDenseToSparseOp
 
class  BatchGatherGradientOp
 
class  BatchGatherOp
 
class  BatchMatMulDNNLowPOp
 
class  BatchMatMulOp
 
class  BatchMomentsGradientOp
 
class  BatchMomentsOp
 
class  BatchOneHotOp
 
class  BatchPermutationDNNLowPOp
 
class  BatchPermutationGradientOp
 
class  BatchPermutationOp
 
class  BatchSparseToDenseOp
 
class  BatchToSpaceOp
 
class  BBoxTransformOp
 
class  BernoulliJSDGradientOp
 
class  BernoulliJSDOp
 
class  BinaryElementwiseDNNLowPOp
 
class  BinaryElementwiseWithArgsGradientOp
 
class  BinaryElementwiseWithArgsGradientOp< NumericTypes, CPUContext, BinaryFunctorWithDefaultCtor< DivFunctor< CPUContext > >, SameTypeAsInput, SameTypeAsInput >
 
class  BinaryElementwiseWithArgsOp
 
struct  BinaryFunctorWithDefaultCtor
 
class  BisectPercentileOp
 
class  Blob
 Blob is a general container that hosts a typed pointer. More...
 
class  BlobDeserializerBase
 BlobDeserializerBase is an abstract class that deserializes a blob from a BlobProto or a TensorProto. More...
 
class  BlobSerializerBase
 BlobSerializerBase is an abstract class that serializes a blob to a string. More...
 
class  BlobsQueue
 
struct  BlobStatGetter
 
struct  BlobStatRegistry
 
class  BlockingCounter
 
class  BooleanMaskOp
 
class  BooleanUnmaskOp
 
class  BoundShapeInferencer
 
struct  BoundShapeSpec
 
class  BoxWithNMSLimitOp
 
class  BRGNCHWCToPackedInt8BGRAStylizerDeprocessOp
 
class  BufferedTokenizer
 
class  ByteWeightDequantOp
 
class  Caffe2Annotation
 
class  Caffe2ModuleTestDynamicDummyOp
 
struct  CastHelper
 
struct  CastHelper< std::string, SrcType >
 
class  CastOp
 
struct  CbrtFunctor
 
struct  CbrtGradientFunctor
 
class  CeilOp
 
class  ChannelBackpropStatsOp
 
class  ChannelShuffleDNNLowPOp
 
class  ChannelShuffleGradientOp
 
class  ChannelShuffleOp
 
class  ChannelStatsOp
 
struct  CharRange
 
class  CheckCounterDoneOp
 
class  CheckpointOp
 
class  ClipGradientOp
 
class  ClipOp
 
class  ClipTensorByScalingOp
 
class  CloseBlobsQueueOp
 
class  CloseRebatchingQueueOp
 
class  Col2ImOp
 
class  CollectAndDistributeFpnRpnProposalsOp
 
class  CommonSubexpressionEliminationTransform
 Common Subexpression Elimination. More...
 
class  CompositeLearningRate
 
class  CompositeLearningRateItem
 
class  ConcatDNNLowPOp
 
class  ConcatOp
 
class  ConditionalOp
 
class  ConstantFillOp
 
class  ConstantWarmupLearningRate
 
struct  ConvArgs
 
class  ConvDNNLowPAcc16Op
 Quantized Conv operator with 16-bit accumulation. More...
 
class  ConvDNNLowPOp
 
class  ConvDNNLowPPackWeightOp
 Pack a weight matrix that can be used by DNNLOWP Int8Conv operators. More...
 
class  Converter
 
class  ConvGradientOp
 
class  ConvOp
 
class  ConvPoolDNNLowPOpBase
 
class  ConvPoolOpBase
 
class  ConvReluOp
 
class  ConvToNNPackTransform
 
class  ConvTransposeGradientOp
 
class  ConvTransposeOp
 
class  ConvTransposeUnpoolBase
 
class  CopyCPUToIDEEPOp
 
class  CopyIDEEPToCPUOp
 
class  CopyOnDeviceLikeOp
 
class  CopyOp
 
struct  CosFunctor
 
struct  CosGradientFunctor
 
struct  CoshFunctor
 
struct  CoshGradientFunctor
 
class  CosineEmbeddingCriterionGradientOp
 
class  CosineEmbeddingCriterionOp
 
class  CosineSimilarityGradientOp
 
class  CosineSimilarityOp
 
class  CountDownOp
 
class  Counter
 
class  CountUpOp
 
class  CPUContext
 The CPU Context, representing the bare minimum of what a Context class in Caffe2 should implement. More...
 
struct  CPUEventWrapper
 
class  CpuId
 Identification of an Intel CPU. More...
 
class  CPUSparseLengthsReductionOp
 
class  CreateBlobsQueueOp
 
class  CreateCounterOp
 
class  CreateDBOp
 
class  CreateMapOp
 
class  CreateRebatchingQueueOp
 
class  CreateScopeOp
 
class  CreateTextFileReaderOp
 
class  CrossEntropyGradientOp
 
class  CrossEntropyOp
 
class  CTCBeamSearchDecoderOp
 
class  CTCGreedyDecoderOp
 
struct  CubeFunctor
 
struct  CubeGradientFunctor
 
class  CUDAContext
 
struct  CudaDevicePropWrapper
 
struct  CudaEventWrapper
 
class  CUDARecurrentNetworkExecutor
 
class  CudaRTCFunction
 
class  CuDNNActivationGradientOp
 
class  CuDNNActivationGradientOp< CUDNN_ACTIVATION_ELU >
 
class  CuDNNActivationOp
 
class  CuDNNActivationOp< CUDNN_ACTIVATION_ELU >
 
class  CuDNNActivationOpBase
 
class  CudnnConvGradientOp
 
class  CudnnConvOp
 
class  CudnnConvOpBase
 
class  CudnnConvTransposeGradientOp
 
class  CudnnConvTransposeOp
 
class  CudnnConvTransposeOpBase
 
class  cudnnFilterDescWrapper
 
class  CuDNNLRNGradientOp
 
class  CuDNNLRNOp
 
class  CuDNNSoftmaxGradientOp
 
class  CuDNNSoftmaxOp
 
class  CuDNNState
 
class  cudnnTensorDescWrapper
 cudnnTensorDescWrapper is the placeholder that wraps around a cudnnTensorDescriptor_t, allowing us to do descriptor change as-needed during runtime. More...
 
class  cudnnTypeWrapper
 cudnnTypeWrapper is a wrapper class that allows us to refer to the cudnn type in a template function. More...
 
class  cudnnTypeWrapper< at::Half >
 
class  cudnnTypeWrapper< double >
 
class  cudnnTypeWrapper< float >
 
class  CuDNNWeightedSumOp
 
struct  CuDNNWorkspace
 CuDNNWorkspace is a wrapper around a raw cuda pointer that holds the cudnn scratch space. More...
 
class  CuDNNWrapper
 CuDNNWrapper is a class that wraps the cudnn handles and cudnn workspaces. More...
 
class  DataCoupleOp
 
class  DBExistsOp
 
class  DecodedFrame
 
class  DefaultEngine
 
class  DeformConvGradientOp
 
class  DeformConvOp
 
class  DeformConvOpBase
 
class  DenseVectorToIdListOp
 
class  DequantizeDNNLowPOp
 
class  DequeueBlobsOp
 
class  DequeueRebatchingQueueOp
 
class  DetailedExportedStat
 
struct  DeviceTypeRegisterer
 
class  DiagonalFillOp
 
struct  DispatchHelper
 
struct  DispatchHelper< FixedValues< FirstVal, Values... >, ExtraArgs... >
 
struct  DispatchHelper< FixedValues<>, ExtraArgs... >
 
struct  DivFunctor
 
class  DNNLowPOp
 A convenient base class for C2 operators with DNNLOWP engine. More...
 
class  DoOp
 
class  DotProductGradientOp
 
class  DotProductOp
 
class  DotProductWithPaddingGradientOp
 
class  DotProductWithPaddingOp
 
class  DropoutGradientOp
 
class  DropoutOp
 
struct  EigenPowFunctor
 
class  ElementwiseLinearDNNLowPOp
 
class  ElementwiseLinearGradientOp
 
class  ElementwiseLinearOp
 
class  ElementwiseRTCOp
 A GPU operator that can generate limited elementwise operations. More...
 
struct  EluFunctor
 
struct  EluGradientFunctor
 
class  EnforceFiniteOp
 
class  EnqueueBlobsOp
 
class  EnqueueRebatchingQueueOp
 
class  EnsureClippedOp
 
class  EnsureCPUOutputOp
 
class  EnsureDenseOp
 Pass inputs to outputs. More...
 
struct  ErfFunctor
 
struct  ErfGradientFunctor
 
class  Event
 
struct  EventCreateFunctionRegisterer
 
struct  EventErrorMessageFunctionRegisterer
 
struct  EventFinishFunctionRegisterer
 
struct  EventQueryFunctionRegisterer
 
struct  EventRecordFunctionRegisterer
 
struct  EventResetFunctionRegisterer
 
struct  EventSetCallbackFunctionRegisterer
 
struct  EventSetFinishedFunctionRegisterer
 
struct  EventWaitFunctionRegisterer
 
struct  ExecutionOptions
 
class  ExecutorHelper
 
class  ExpandDimsOp
 
class  ExpandGradientOp
 
class  ExpandOp
 
struct  ExpFunctor
 
class  ExpLearningRate
 
class  ExportedStat
 
struct  ExportedStatValue
 
class  FailOp
 
class  FeedBlobOp
 
class  FileReader
 
class  FileStoreHandler
 
class  FileStoreHandlerCreateOp
 
class  FillerOp
 
class  FindDuplicateElementsOp
 
class  FindOp
 
class  FixedDivisor
 
class  FixedDivisor< std::int32_t >
 
class  FixedLearningRate
 
struct  FixedType
 
struct  FixedValues
 
class  FlattenOp
 
class  FlattenToVecOp
 
class  FlexibleTopKGradientOp
 
class  FlexibleTopKOp
 
class  Float16ConstantFillOp
 
class  Float16UniformFillOp
 
class  FloatToFused8BitRowwiseQuantizedOp
 
class  FloatToFusedRandRowwiseQuantizedOp
 
class  FloatToHalfOp
 
class  FloatToRowwiseQuantized8BitsOp
 
class  FloorOp
 
struct  ForEach
 ForEach is a unary functor that forwards each element of the input array into the elementwise Functor provided, and gathers the results of each call into the resulting array. More...
 
class  FP16MomentumSGDUpdateOp
 
class  FP32MomentumSGDUpdateOp
 
class  FreeOp
 
class  FtrlOp
 
struct  FtrlParams
 
class  FullyConnectedDecompGradientOp
 
class  FullyConnectedDNNLowPAcc16Op
 Quantized FC operator with 16-bit accumulation. More...
 
class  FullyConnectedDNNLowPOp
 
class  FullyConnectedDNNLowPPackWeightOp
 
class  FullyConnectedFakeLowpFPOp
 
class  FullyConnectedGradientFakeLowpFPOp
 
class  FullyConnectedGradientOp
 
class  FullyConnectedOp
 
class  FullyConnectedOp_SPARSE
 
class  FullyConnectedOpDecomp
 
class  FullyConnectedOpPrune
 
class  FullyConnectedPruneGradientOp
 
class  FunHashGradientOp
 
class  FunHashOp
 
class  Fused8BitRowwiseQuantizedToFloatOp
 
class  FusedRandRowwiseQuantizedToFloatOp
 
class  GatherByKeyOp
 
class  GatherDNNLowPOp
 
class  GatherFused8BitRowwiseOp
 
class  GatherOp
 
class  GatherPaddingOp
 
class  GatherRangesOp
 
class  GatherRangesToDenseOp
 
class  GaussianFillOp
 
class  GenerateProposalsOp
 
struct  GenericTensorImplementation
 
struct  GetAddPaddingGradient
 
class  GetAveragedLossGradient
 
class  GetBatchGatherGradient
 
class  GetBatchPermutationGradient
 
class  GetBatchToSpaceGradient
 
class  GetBernoulliJSDGradient
 
class  GetCastGradient
 
class  GetCol2ImGradient
 
class  GetConvGradient
 
class  GetConvTransposeGradient
 
struct  GetCopyGradient
 
class  GetCosineSimilarityGradient
 
struct  GetCPUToGPUGradient
 
class  GetCrossEntropyGradient
 
class  GetDotProductGradient
 
class  GetDotProductWithPaddingGradient
 
class  GetDropoutGradient
 
struct  GetElementwiseLinearGradient
 
class  GetExpandDimsGradient
 
class  GetFCDecompGradient
 
class  GetFloatToHalfGradient
 
struct  GetGPUToCPUGradient
 
class  GetGroupSpatialSoftmaxGradient
 
class  GetGRUUnitGradient
 
class  GetHalfToFloatGradient
 
class  GetIm2ColGradient
 
class  GetInstanceNormGradient
 
class  GetIntegralImageGradient
 
class  GetL1DistanceGradient
 
class  GetLabelCrossEntropyGradient
 
class  GetLeakyReluGradient
 
class  GetLRNGradient
 
class  GetLSTMUnitGradient
 
struct  GetMakeTwoClassGradient
 
class  GetMatMulGradient
 
class  GetMeanGradient
 
struct  GetNanCheckGradient
 
struct  GetNegateGradientGradient
 
class  GetNormalizeGradient
 
class  GetPackSegmentsGradient
 
class  GetPadImageGradient
 
class  GetPoolGradient
 
class  GetPrependDimGradient
 
struct  GetRecurrentGradient
 
struct  GetRecurrentNetworkGradient
 
class  GetReduceBackMaxGradient
 
class  GetReduceBackSumGradient
 
class  GetReduceFrontMaxGradient
 
class  GetReduceFrontMeanGradient
 
class  GetReduceFrontSumGradient
 
struct  GetRemovePaddingGradient
 
class  GetResizeNearestGradient
 
class  GetReversePackedSegsGradient
 
class  GetRoIPoolGradient
 
class  GetSampleAsGradient
 
class  GetScaleGradient
 
class  GetSelectSmoothL1LossGradient
 
class  GetSeluGradient
 
class  GetSigmoidCrossEntropyLossGradient
 
struct  GetSigmoidCrossEntropyWithLogitsGradient
 
class  GetSigmoidFocalLossGradient
 
class  GetSmoothL1LossGradient
 
class  GetSoftmaxFocalLossGradient
 
class  GetSoftplusGradient
 
class  GetSpaceToBatchGradient
 
class  GetSquaredL2DistanceGradient
 
class  GetSquareRootDivideGradient
 
class  GetSqueezeGradient
 
class  GetSumElementsGradient
 
class  GetTopKGradient
 
class  GetTransposeGradient
 
class  GetUnpackSegmentsGradient
 
class  GetUpsampleBilinearGradient
 
class  GetUpsampleNearestGradient
 
struct  GetWeightedSigmoidCrossEntropyWithLogitsGradient
 
struct  GetZeroGradientOpGradient
 
class  GFtrlOp
 
struct  GFtrlParams
 
class  GivenTensorByteStringToUInt8FillOp
 
class  GivenTensorFillOp
 
class  GlobalInitIsCalledGuard
 
class  GluOp
 
class  GPUFallbackOpEx
 A templated class to allow one to wrap a CPU operator as a CUDA operator. More...
 
class  GradientMakerBase
 
struct  GradientNotImplementedYet
 A helper class to indicate that the gradient mechanism is not ready. More...
 
struct  GradientOpsMeta
 A struct that holds the gradient operators and related gradient maps. More...
 
struct  GradientWrapper
 
class  GroupNormDNNLowPOp
 
class  GroupNormGradientOp
 
class  GroupNormOp
 
class  GroupSpatialSoftmaxGradientOp
 
class  GroupSpatialSoftmaxOp
 
class  GRUUnitGradientOp
 
class  GRUUnitOp
 
class  HalfToFloatOp
 
struct  HardSigmoidFunctor
 
struct  HardSigmoidGradientFunctor
 
class  HasElementsOp
 
class  HasScopeOp
 
class  HeatmapMaxKeypointOp
 
class  HillLearningRate
 
class  HistogramNetObserver
 
class  HistogramObserver
 Given min/max, collect histogram. More...
 
class  HSoftmaxGradientOp
 
class  HSoftmaxOp
 
class  HSoftmaxOpBase
 
class  HSoftmaxSearchOp
 
class  HuffmanTreeHierarchyOp
 
class  IDEEPAdamOp
 
class  IDEEPConcatOp
 
class  IDEEPContext
 
class  IDEEPConvFusionOp
 
class  IDEEPConvGradientOp
 
class  IDEEPConvOp
 
class  IDEEPConvPoolOpBase
 
class  IDEEPConvTransposeGradientOp
 
class  IDEEPConvTransposeOp
 
class  IDEEPConvTransposeUnpoolBase
 
class  IDEEPCopyOp
 
class  IDEEPCreateBlobsQueueOp
 
class  IDEEPDropoutGradientOp
 
class  IDEEPDropoutOp
 
class  IDEEPExpandDimsOp
 
class  IDEEPFallbackOp
 A templated class to allow one to wrap a CPU operator as an IDEEP operator. More...
 
class  IDEEPFullyConnectedGradientOp
 
class  IDEEPFullyConnectedOp
 
class  IDEEPLRNGradientOp
 
class  IDEEPLRNOp
 
class  IDEEPMomentumSGDOp
 
class  IDEEPMomentumSGDUpdateOp
 
class  IDEEPOperator
 
class  IDEEPPoolGradientOp
 
class  IDEEPPoolOp
 
class  IDEEPReluGradientOp
 
class  IDEEPReluOp
 
class  IDEEPReshapeOp
 
class  IDEEPSafeEnqueueBlobsOp
 
class  IDEEPShapeOp
 
class  IDEEPSigmoidGradientOp
 
class  IDEEPSigmoidOp
 
class  IDEEPSpatialBNGradientOp
 
class  IDEEPSpatialBNOp
 
class  IDEEPSplitOp
 
class  IDEEPSqueezeOp
 
class  IDEEPSumOp
 
class  IDEEPWeightedSumOp
 
class  IfOp
 
class  Im2ColOp
 
class  ImageInputOp
 
struct  Index
 
struct  IndexBase
 
class  IndexCreateOp
 
class  IndexDeserializer
 
class  IndexFreezeOp
 
class  IndexGetOp
 
class  IndexHashOp
 
class  IndexLoadOp
 
class  IndexSerializer
 
class  IndexSizeOp
 
class  IndexStoreOp
 
class  InitRegisterer
 
class  InstanceNormGradientOp
 
class  InstanceNormOp
 
struct  Int8ConvDNNLowPPackedWeightBlob
 Packed weight matrix for DNNLOWP Int8Conv operator. More...
 
struct  Int8FCDNNLowPPackedWeightBlob
 Packed weight matrix for DNNLOWP Int8FC operator. More...
 
class  IntegralImageGradientOp
 
class  IntegralImageOp
 
class  InvLearningRate
 
class  IsEmptyOp
 
class  IsMemberOfOp
 
class  IsMemberOfValueHolder
 
class  IsNanOp
 
class  IterOp
 
class  KeySplitOp
 
class  KeyValueToMapOp
 
class  L1DistanceGradientOp
 
class  L1DistanceOp
 
struct  L1Reducer
 
struct  L2Reducer
 
class  LabelCrossEntropyGradientOp
 
class  LabelCrossEntropyOp
 
class  LambdaRankNdcgGradientOp
 
class  LambdaRankNdcgOp
 
class  LarsOp
 
class  LayerNormGradientOp
 
class  LayerNormOp
 
class  LeakyReluGradientOp
 
class  LeakyReluOp
 
class  LearningRateAdaptionOp
 
class  LearningRateFunctor
 
class  LearningRateOp
 
class  LengthsGatherOp
 
struct  LengthsOpGetGradient
 
class  LengthsPadOp
 
class  LengthsPartitionOp
 
class  LengthsRangeFillOp
 
class  LengthsSplitOp
 
class  LengthsTileOp
 
class  LengthsTopKGradientOp
 
class  LengthsTopKOp
 
class  LengthsToRangesOp
 
class  LengthsToSegmentIdsOp
 
class  LengthsToShapeOp
 
class  LengthsToWeightsOp
 
class  LinearWarmupLearningRate
 
class  LoadOp
 
class  LocallyConnectedGradientOp
 
class  LocallyConnectedOp
 
class  LogFatalOp
 
struct  LogFunctor
 
struct  LogitFunctor
 
class  LogitGradientOp
 
class  LogMeanExpRangeReducer
 
class  LogMeanExpRangeReducer< T, CPUContext >
 
struct  LogMeanExpRangeReducerDef
 
class  LogMeanExpRangeReducerGradient
 
class  LogSumExpRangeReducer
 
class  LogSumExpRangeReducer< T, CPUContext >
 
struct  LogSumExpRangeReducerDef
 
class  LogSumExpRangeReducerGradient
 
class  LpNormGradientOp
 
class  LpNormOp
 
struct  LpPoolFunctor
 
class  LRNGradientOp
 
class  LRNOp
 
class  LRNOpBase
 
class  LSTMUnitDNNLowPOp
 
class  LSTMUnitGradientOp
 
class  LSTMUnitOp
 
struct  MakeAligned
 
class  MakeTwoClassGradientOp
 
class  MakeTwoClassOp
 
class  MapDeserializer
 
class  MapSerializer
 
class  MapToKeyValueOp
 
struct  MapTypeTraits
 
class  MarginRankingCriterionGradientOp
 
class  MarginRankingCriterionOp
 
class  MatMulOp
 
class  MaxGradientOp
 
class  MaxOp
 
struct  MaxPoolFunctor
 
class  MaxPoolGradientRTCOp
 
class  MaxPoolRTCOp
 
class  MaxPoolWithIndexGradientOp
 
class  MaxPoolWithIndexOp
 
class  MaxRangeReducer
 
class  MaxRangeReducer< T, CPUContext >
 
struct  MaxRangeReducerDef
 
class  MaxRangeReducerGradient
 
class  MaxReduceDimsGradientOp
 
class  MaxReduceDimsOp
 
class  MaxReducer
 
class  MaxReducer< T, CPUContext >
 
struct  MaxReducerDef
 
class  MaxReducerGradient
 
class  MaxReductionGradientOp
 
class  MaxReductionOp
 
class  MeanGradientOp
 
class  MeanOp
 
class  MeanRangeReducer
 
class  MeanRangeReducer< T, CPUContext >
 
struct  MeanRangeReducerDef
 
class  MeanRangeReducerGradient
 
class  MeanReducer
 
class  MeanReducer< T, CPUContext >
 
struct  MeanReducerDef
 
class  MeanReducerGradient
 
class  MergeDimOp
 
class  MergeIdListsOp
 
class  MergeMultiListFeatureTensorsOp
 
class  MergeMultiListOrMapFeatureTensorsGradientOp
 
class  MergeMultiMapFeatureTensorsOp
 
class  MergeMultiScalarFeatureTensorsGradientOp
 
class  MergeMultiScalarFeatureTensorsOp
 
class  MergeSingleListFeatureTensorsOp
 
class  MergeSingleListOrMapFeatureTensorsGradientOp
 
class  MergeSingleMapFeatureTensorsOp
 
class  MergeSingleScalarFeatureTensorsGradientOp
 
class  MergeSingleScalarFeatureTensorsOp
 
class  MinGradientOp
 
class  MinOp
 
struct  MinReducer
 
class  MIOPENActivationGradientOp
 
class  MIOPENActivationOp
 
class  MIOPENActivationOpBase
 
class  MIOPENState
 
class  miopenTensorDescWrapper
 miopenTensorDescWrapper is the placeholder that wraps around a miopenTensorDescriptor_t, allowing us to do descriptor change as-needed during runtime. More...
 
class  miopenTypeWrapper
 miopenTypeWrapper is a wrapper class that allows us to refer to the miopen type in a template function. More...
 
class  miopenTypeWrapper< at::Half >
 
class  miopenTypeWrapper< float >
 
struct  MIOPENWorkspace
 MIOPENWorkspace is a wrapper around a raw cuda pointer that holds the miopen scratch space. More...
 
class  MIOPENWrapper
 MIOPENWrapper is a class that wraps the miopen handles and miopen workspaces. More...
 
class  ModOp
 
class  ModuleSchema
 A module schema that can be used to store specific information about different modules. More...
 
class  MomentsGradientOp
 
class  MomentsOp
 
class  MomentumSGDOp
 
class  MomentumSGDUpdateOp
 
class  MPICommonWorldWrapper
 A simple wrapper over an MPI common world. More...
 
class  MPIDataTypeWrapper
 
struct  MPSCNNContext
 
class  MSRAFillOp
 
class  MulDNNLowPOp
 
struct  MulFunctor
 
class  MultiClassAccuracyOp
 
class  MutexDeserializer
 
class  MutexSerializer
 
class  NanCheckOp
 
class  NCHW2NHWCOp
 
class  NegateGradientOp
 
struct  NegativeFunctor
 
class  NetBase
 
class  NetObserverReporter
 
class  NetObserverReporterPrint
 
class  NGramFromCategoricalOp
 
class  NHWC2NCHWOp
 
class  NNApi
 
class  NNPACKConvOp
 
class  NoDefaultEngineOp
 A helper class to denote that an op does not have a default engine. More...
 
class  NoGradient
 A helper class to indicate that the operator does not need gradient computation. More...
 
class  NormalizeGradientOp
 
class  NormalizeL1Op
 
class  NormalizeOp
 
struct  NotFunctor
 
class  NumpyTileOp
 
class  Observable
 Inherit to make your class observable. More...
 
class  ObserverBase
 Use this to implement a Observer using the Observer Pattern template. More...
 
class  ObserverConfig
 
class  OneHotOp
 
class  OnnxifiOp
 
class  OnnxifiTransformer
 
struct  OnnxifiTransformerOptions
 
class  ONNXWhileOp
 
class  Operator
 
class  OperatorAttachingNetObserver
 
class  OperatorBase
 
class  OpSchema
 A class to record the schema of an op. More...
 
class  OpSchemaRegistry
 A registry to hold all the operator schemas. More...
 
struct  OpTask
 Data structure for a scheduled task in the task queue. More...
 
class  OptimizationPass
 
class  OpWrapper
 Wrap a floating-point operator with quantized inputs with type T. More...
 
class  OutputMinMaxNetObserver
 
class  OutputMinMaxObserver
 
class  PackedInt8BGRANHWCToNCHWCStylizerPreprocessOp
 
class  PackRNNSequenceOpBase
 
class  PackSegmentsOp
 
class  PadEmptySamplesOp
 
class  PadImageGradientOp
 
class  PadImageOp
 
class  PairWiseLossGradientOp
 
class  PairWiseLossOp
 
class  ParallelNet
 
class  ParallelNetExecutorHelper
 
class  Params
 
class  PartitionOp
 
class  PartitionOpBase
 
class  PatternNetTransform
 PatternNetTransform allows you to create transforms using a simple interface. More...
 
class  PercentileOp
 
class  PerfNetObserver
 
class  PerfOperatorObserver
 
struct  PerformanceInformation
 
class  PerplexityOp
 
class  PiecewiseLinearTransformOp
 
class  PolyLearningRate
 
class  PoolGradientOp
 
class  PoolOp
 
class  PowOp
 
class  Predictor
 
struct  PredictorConfig
 Stores parameters nessasary for creating a PredictorInterface object. More...
 
class  PrefetchOperator
 
class  PReluGradientOp
 
class  PReluOp
 
class  PrependDimOp
 
class  PrintOp
 
class  ProfDAGCounters
 A simple wrapper around prof_dag's counters. More...
 
class  ProfDAGReport
 
class  ProfDAGStats
 
class  ProfileCounter
 
class  ProfileObserver
 
class  ProfileOperatorObserver
 
class  PSRoIPoolGradientOp
 
class  PSRoIPoolOp
 
class  QConvOp
 
struct  QConvState
 
class  QTensor
 
class  QTensorDeserializer
 
class  QTensorSerializer
 
class  QuantDecodeGradientOp
 
class  QuantDecodeOp
 
class  QuantDecompZstdOp
 
class  QuantizeDNNLowPOp
 
class  RangeFillOp
 
class  RangeOp
 
class  RebatchingQueue
 
struct  ReciprocalFunctor
 
struct  ReciprocalGradientFunctor
 
class  RecurrentBaseOp
 
class  RecurrentGradientOp
 
class  RecurrentNetworkBlobFetcherOp
 
class  RecurrentNetworkExecutorBase
 RecurrentNetworkExecutor is a specialized runtime for recurrent neural networks (RNNs). More...
 
class  RecurrentNetworkGradientOp
 
class  RecurrentNetworkOp
 
class  RecurrentOp
 
class  RecurrentParamAccessOp
 
class  RedisStoreHandler
 
class  RedisStoreHandlerCreateOp
 
class  ReduceGradientOp
 
class  ReduceOp
 
class  ReduceTailSumOp
 
class  RegisterQuantizationParamsNetObserver
 Set quantization parameters of operators based on min/max collected from OutputMinMaxObserver. More...
 
class  RegisterQuantizationParamsWithHistogramNetObserver
 Set quantization parameters of operators based on min/max collected from OutputMinMaxObserver. More...
 
class  ReluDNNLowPOp
 
struct  ReluFunctor
 
struct  ReluGradientFunctor
 
struct  ReluNFunctor
 
struct  ReluNGradientFunctor
 
class  RemoveDataBlocksOp
 
class  RemovePaddingOp
 
class  ReplaceNaNOp
 
class  ResetCounterOp
 
class  ReshapeOp
 
class  ResizeLikeOp
 
class  ResizeNearestDNNLowPOp
 
class  ResizeNearestGradientOp
 
class  ResizeNearestOp
 
class  RetrieveCountOp
 
class  ReversePackedSegsOp
 
class  RMACRegionsOp
 
class  RmsPropOp
 
class  RNNApplyLinkOp
 
struct  RNNNetOperator
 Struct for operator in a timestep and its dependenceis. More...
 
class  RoIAlignGradientOp
 
class  RoIAlignOp
 
class  RoIAlignRotatedGradientOp
 
class  RoIAlignRotatedOp
 
class  RoIPoolFGradientOp
 
class  RoIPoolFOp
 
class  RoIPoolGradientOp
 
class  RoIPoolOp
 
class  RowMulOp
 
class  Rowwise8BitQuantizedToFloatOp
 
class  RowWiseSparseAdagradOp
 
class  RowWiseSparseAdamOp
 
struct  RsqrtFunctor
 
struct  RsqrtGradientFunctor
 
class  RunCountNetObserver
 
class  RunCountOperatorObserver
 
class  SafeDequeueBlobsOp
 
class  SafeEnqueueBlobsOp
 
struct  SameTypeAsInput
 
class  SampleAsGradientOp
 
class  SampleAsOp
 
struct  SampleInterval
 
class  SaveOp
 
class  ScaleOp
 
class  ScatterAssignOp
 Update slices of the tensor in-place by overriding. More...
 
class  ScatterWeightedSumOp
 Update slices of the tensor in-place with weighted sum. More...
 
class  SegmentIdsToLengthsOp
 
class  SegmentIdsToRangesOp
 
class  SegmentOneHotOp
 
struct  SegmentOpGetGradient
 
class  SelectGradientOpBase
 
class  SelectSmoothL1LossGradientOp
 
class  SelectSmoothL1LossOp
 
class  SeluGradientOp
 
class  SeluOp
 
class  SequenceMaskOp
 
struct  ShapeInfo
 
class  ShapeOp
 
class  SigmoidCrossEntropyLossGradientOp
 
class  SigmoidCrossEntropyLossOp
 
class  SigmoidCrossEntropyWithLogitsGradientOp
 
class  SigmoidCrossEntropyWithLogitsOp
 
class  SigmoidFocalLossGradientOp
 
class  SigmoidFocalLossOp
 
class  SigmoidFunctor
 
struct  SigmoidGradientFunctor
 
class  SignalHandler
 
struct  SignFunctor
 
struct  SimpleArray
 
class  SimpleNet
 
class  SimpleQueue
 
class  SimpleRefCountNet
 
struct  SinFunctor
 
class  SingleOpTransform
 Single Op Transform Base class. More...
 
struct  SinGradientFunctor
 
struct  SinhFunctor
 
struct  SinhGradientFunctor
 
class  SinusoidPositionEncodingOp
 
class  SizeOp
 
class  SkipIndices
 
class  SkipIndices<>
 
class  SliceGradientOp
 
class  SliceOp
 
class  SmartTensorPrinter
 
class  SmoothL1LossGradientOp
 
class  SmoothL1LossOp
 
class  SNPEOp
 
class  SoftmaxFocalLossGradientOp
 
class  SoftmaxFocalLossOp
 
class  SoftmaxGradientOp
 
class  SoftmaxOp
 
class  SoftmaxWithLossGradientOp
 
class  SoftmaxWithLossOp
 
class  SoftplusGradientOp
 
class  SoftplusOp
 
struct  SoftsignFunctor
 
struct  SoftsignGradientFunctor
 
class  SpaceBatchOpBase
 
class  SpaceToBatchOp
 
class  SparseAdadeltaOp
 
class  SparseAdagradOp
 
class  SparseAdamOp
 
class  SparseFtrlOp
 
class  SparseFunHashGradientOp
 
class  SparseFunHashOp
 
class  SparseLengths8BitsRowwiseOp
 
class  SparseLengthsFused8BitRowwiseOp
 
class  SparseMatrixReshapeOp
 
class  SparseMomentumSGDUpdateOp
 
class  SparseNormalizeOp
 
class  SparseToDenseMaskBase
 
class  SparseToDenseMaskGradientOp
 
class  SparseToDenseMaskOp
 
class  SparseToDenseOp
 
class  SparseWngradOp
 
class  SpatialBNDNNLowPOp
 Note this implementation assumes SCALE, BIAS, EST_MEAN, and EST_VAR inputs are still in fp32, so is epsilon argument. More...
 
class  SpatialBNGradientOp
 
class  SpatialBNOp
 
class  SpatialNarrowAsGradient
 
class  SpatialNarrowAsGradientOp
 
class  SpatialNarrowAsOp
 
class  SpatialSoftmaxWithLossGradientOp
 
class  SpatialSoftmaxWithLossOp
 
class  SplitByLengthsOp
 
class  SplitOp
 
struct  SqrFunctor
 
struct  SqrtFunctor
 
class  SquaredL2DistanceGradientOp
 
class  SquaredL2DistanceOp
 
class  SquareRootDivideOp
 
class  SqueezeOp
 
struct  Stat
 
struct  StaticLinkingProtector
 
class  StaticStat
 
class  StatRegistry
 Holds a map of atomic counters keyed by name. More...
 
class  StatRegistryCreateOp
 
class  StatRegistryExportOp
 
class  StatRegistryUpdateOp
 
class  StatValue
 
class  StdDevExportedStat
 
class  StepLearningRate
 
class  StopGradientOp
 
struct  StopOnSignal
 
class  StoreAddOp
 
class  StoreGetOp
 
class  StoreHandler
 
struct  StoreHandlerNotAvailableException
 
struct  StoreHandlerTimeoutException
 
class  StoreSetOp
 
class  StoreWaitOp
 
class  StringDeserializer
 StringDeserializer is the deserializer for Strings. More...
 
class  StringJoinOp
 
struct  StringProvider
 
class  StringSerializer
 StringSerializer is the serializer for String. More...
 
class  StumpFuncIndexOp
 
class  StumpFuncOp
 
struct  SubFunctor
 
class  SumDNNLowPOp
 
class  SumElementsGradientOp
 
class  SumElementsIntOp
 
class  SumElementsOp
 
class  SummarizeOp
 
class  SumOp
 
class  SumRangeReducer
 
class  SumRangeReducer< T, CPUContext >
 
struct  SumRangeReducerDef
 
class  SumRangeReducerGradient
 
class  SumReduceDimsGradientOp
 
class  SumReduceDimsOp
 
class  SumReduceLikeOp
 
class  SumReducer
 
class  SumReducer< T, CPUContext >
 
struct  SumReducerDef
 
class  SumReducerGradient
 
class  SumReluOp
 
class  SumSqrElementsOp
 
struct  SwishFunctor
 
class  SwishGradientOp
 
struct  TanFunctor
 
struct  TanGradientFunctor
 
class  TanhFunctor
 
struct  TanhGradientFunctor
 
struct  Task
 
struct  TemplatePutOp
 
class  Tensor
 Tensor class holds a shared pointer to the implementation TensorImpl, redirects API calls to TensorImpl; Copying of Tensor results in sharing the same underlying implementation object. More...
 
class  TensorDeserializer
 TensorDeserializer is the deserializer for Tensors. More...
 
class  TensorFiller
 
class  TensorPrinter
 
class  TensorProtosDBInput
 
class  TensorSerializer
 TensorSerializer is the serializer for Tensors. More...
 
struct  TensorTypes
 
struct  TensorTypes2
 
struct  TextFileReaderInstance
 
class  TextFileReaderReadOp
 
class  ThreadedRecurrentNetworkExecutor
 
class  ThreadLocalCUDAObjects
 A struct to host thread-local cuda objects. More...
 
class  ThreadPool
 
class  ThresholdedReluGradientOp
 
class  ThresholdedReluOp
 
class  ThrowChildThreadExceptionOp
 
class  ThrowExceptionOp
 
struct  ThrowInTheTowelIfGradientIsCalled
 A helper class to indicate that the operator should have no gradient. More...
 
class  TileGradientOp
 
class  TileOp
 
class  TimeCounter
 
class  TimeObserver
 
class  TimeOperatorObserver
 
class  Timer
 A simple timer object for measuring time. More...
 
struct  TimerBeginOp
 
struct  TimerEndOp
 
struct  TimerGetAndEndOp
 
struct  TimerGetOp
 
class  TimerInstance
 
struct  Token
 
class  TokenizedString
 
class  Tokenizer
 
class  TopKGradientOp
 
class  TopKOp
 
class  Transform
 The Transform Base Object. More...
 
class  TransposeOp
 
class  TTContractionGradientOp
 
class  TTContractionOp
 
class  TTLinearGradientOp
 
class  TTLinearOp
 
class  TTPadGradientOp
 
class  TTPadOp
 
class  TypeIdentifier
 A type id is a unique id for a given C++ type. More...
 
class  TypeMeta
 TypeMeta is a thin class that allows us to store the type of a container such as a blob, or the data type of a tensor, with a unique run-time id. More...
 
struct  TypeNameTraits
 
struct  TypeNameTraits< int32_t >
 
struct  TypeNameTraits< int64_t >
 
class  UnaryElementwiseWithArgsDNNLowPOp
 
class  UnaryElementwiseWithArgsOp
 
struct  UnaryFunctorWithDefaultCtor
 
class  UniformFillOp
 
class  UniqueOp
 Deduplicates input indices vector and optionally produces reverse remapping. More...
 
class  UniqueUniformFillOp
 
class  UnpackSegmentsOp
 
class  UnsupportedOperatorFeature
 
class  UpsampleBilinearGradientOp
 
class  UpsampleBilinearOp
 
class  UpsampleNearestGradientOp
 
class  UpsampleNearestOp
 
class  VariableLengthSequencePaddingOp
 
class  VideoDecoder
 
class  VideoInputOp
 
class  VideoIOContext
 
struct  VideoMeta
 
class  WallClockTimeOp
 
class  WeightedMultiSamplingOp
 
class  WeightedSampleDequeueBlobsOp
 
class  WeightedSampleOp
 
class  WeightedSigmoidCrossEntropyWithLogitsGradientOp
 
class  WeightedSigmoidCrossEntropyWithLogitsOp
 
class  WeightedSumGradientOp
 
class  WeightedSumOp
 
class  WeightedSumReducer
 
class  WeightedSumReducer< T, CPUContext >
 
struct  WeightedSumReducerDef
 
class  WeightedSumReducerGradient
 
class  WhereOp
 
class  WhileOp
 
class  WngradOp
 
class  Worker
 
class  WorkersPool
 
class  Workspace
 Workspace is a class that holds all the related objects created during runtime: (1) all blobs, and (2) all instantiated networks. More...
 
class  WorkspaceOptimizationPass
 
class  XavierFillOp
 
class  YellowFinOp
 
class  ZeroGradientOp
 
class  ZmqContext
 
class  ZmqMessage
 
class  ZmqSocket
 

Typedefs

template<typename Key , typename Value >
using CaffeMap = std::map< Key, Value >
 
using CUDAGuard = c10::cuda::CUDAGuard
 
using TensorCUDA = Tensor
 
typedef void(* EventCreateFunction) (const DeviceOption &option, Event *)
 
typedef void(* EventRecordFunction) (Event *, const void *, const char *)
 
typedef void(* EventWaitFunction) (const Event *, void *)
 
typedef void(* EventFinishFunction) (const Event *)
 
typedef EventStatus(* EventQueryFunction) (const Event *)
 
typedef const std::string &(* EventErrorMessageFunction) (const Event *)
 
typedef void(* EventSetFinishedFunction) (const Event *, const char *)
 
typedef void(* EventResetFunction) (Event *)
 
typedef std::function< void()> EventCallbackFunction
 
typedef void(* EventSetCallbackFunction) (Event *, EventCallbackFunction)
 
typedef ObserverBase< NetBaseNetObserver
 
typedef std::function< std::unique_ptr< NetObserver >NetBase *)> NetObserverCreator
 
typedef ObserverBase< OperatorBaseOperatorObserver
 
typedef c10::Registry< std::string, std::unique_ptr< OperatorBase >, const OperatorDef &, Workspace * > *(* RegistryFunction) ()
 
using EnginePrefType = std::vector< std::string >
 
using PerOpEnginePrefType = CaffeMap< DeviceType, CaffeMap< std::string, EnginePrefType >>
 
using GlobalEnginePrefType = CaffeMap< DeviceType, EnginePrefType >
 
typedef std::function< bool(int)> ShouldContinue
 
using ExportedStatList = std::vector< ExportedStatValue >
 Holds names and values of counters exported from a StatRegistry.
 
using ExportedStatMap = std::unordered_map< std::string, int64_t >
 
using StorageImpl = at::StorageImpl
 
using Storage = at::Storage
 
using TensorCPU = Tensor
 
typedef TypeMeta(* TypeCall) (const void *)
 
typedef vector< int64_t >(* TensorInfoCall) (const void *, size_t *capacity, DeviceOption *device)
 
template<typename T >
using deleted_unique_ptr = std::unique_ptr< T, std::function< void(T *)>>
 
using ParallelFor = std::function< void(size_t, std::function< void(size_t)>)>
 
using NumericTypes = TensorTypes< int32_t, int64_t, float, double >
 
using IntTypes = TensorTypes< int32_t, int64_t >
 
using BoolTypes = TensorTypes< bool >
 
using IntBoolTypes = TensorTypes< int32_t, int64_t, bool >
 
template<typename InputTypes , class Context , class Functor , class OutputTypeMap = SameTypeAsInput>
using UnaryElementwiseOp = UnaryElementwiseWithArgsOp< InputTypes, Context, UnaryFunctorWithDefaultCtor< Functor >, OutputTypeMap >
 
template<typename InputTypes , class Context , class Functor , class TypeMap = SameTypeAsInput>
using BinaryElementwiseOp = BinaryElementwiseWithArgsOp< InputTypes, Context, BinaryFunctorWithDefaultCtor< Functor >, TypeMap >
 
template<typename InputTypes , class Context , class Functor , class OutputTypeMap = SameTypeAsInput, class GradientTypeMap = SameTypeAsInput>
using BinaryElementwiseGradientOp = BinaryElementwiseWithArgsGradientOp< InputTypes, Context, BinaryFunctorWithDefaultCtor< Functor >, OutputTypeMap, GradientTypeMap >
 
using SparseLengthsSumOp = CPUSparseLengthsReductionOp< float, TensorTypes< float, at::Half >, 0, 0 >
 
using SparseLengthsWeightedSumOp = CPUSparseLengthsReductionOp< float, TensorTypes< float, at::Half >, 1, 0 >
 
using SparseLengthsMeanOp = CPUSparseLengthsReductionOp< float, TensorTypes< float, at::Half >, 0, 1 >
 
using SparseLengthsSumDef = AbstractSparseLengthsDef< float, int, CPUContext, SumReducerDef, true >
 
using SparseLengthsWeightedSumDef = AbstractSparseLengthsDef< float, int, CPUContext, WeightedSumReducerDef, true >
 
using op = core.CreateOperator("Save",["X","Y","Z"],[], db="test_db2", db_type="leveldb", blob_name_overrides=["x_scores","y_scores","z_scores"]) workspace.FeedBlob("X", np.random.randint(20, size=(5, 5))) workspace.FeedBlob("Y", np.random.randint(20, size=(5, 5))) workspace.FeedBlob("Z", np.random.randint(20, size=(5, 5))) workspace.RunOperatorOnce(op)```</details >) DOC") .Arg( "absolute_path", "*(type:int
 
using MapType64To64 = MapTypeTraits< int64_t, int64_t >::MapType
 
using MapType64To32 = MapTypeTraits< int64_t, int32_t >::MapType
 
using MapType32To32 = MapTypeTraits< int32_t, int32_t >::MapType
 
using MapType32To64 = MapTypeTraits< int32_t, int64_t >::MapType
 
using GPUFallbackOp = GPUFallbackOpEx< SkipIndices<>>
 
template<typename ScalarFunctor , typename TypeMap = FixedType<std::string>>
using StringElementwiseOp = UnaryElementwiseWithArgsOp< TensorTypes< std::string >, CPUContext, ForEach< ScalarFunctor >, TypeMap >
 
using ShapeInfoMap = std::unordered_map< std::string, ShapeInfo >
 
using PredictorParameters = std::map< std::string, std::shared_ptr< Blob >>
 
using DeviceType = at::DeviceType
 
using BatchPermutationFP32Op = CopyOp< CPUContext, CPUContext, CPUContext >
 
using ConvFp32Op = ConvOp< float, CPUContext >
 
using AddFp32Op = BinaryElementwiseOp< NumericTypes, CPUContext, AddFunctor< CPUContext >>
 
using ElementwiseLinearFp32Op = ElementwiseLinearOp< float, CPUContext >
 
using MulFp32Op = BinaryElementwiseOp< NumericTypes, CPUContext, MulFunctor< CPUContext >>
 
using FCFp32Op = FullyConnectedOp< CPUContext >
 
using GroupNormFP32Op = GroupNormOp< float, CPUContext >
 
using ResizeNearestFP32Op = ResizeNearestOp< float, CPUContext >
 
using RebatchingQueuePtr = std::unique_ptr< RebatchingQueue >
 
template<typename T >
using EigenMatrixMap = Eigen::Map< Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic >>
 
template<typename T >
using EigenArrayMap = Eigen::Map< Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic >>
 
template<typename T >
using EigenVectorMap = Eigen::Map< Eigen::Matrix< T, Eigen::Dynamic, 1 >>
 
template<typename T >
using EigenVectorArrayMap = Eigen::Map< Eigen::Array< T, Eigen::Dynamic, 1 >>
 
template<typename T >
using ConstEigenMatrixMap = Eigen::Map< const Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic >>
 
template<typename T >
using ConstEigenArrayMap = Eigen::Map< const Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic >>
 
template<typename T >
using ConstEigenVectorMap = Eigen::Map< const Eigen::Matrix< T, Eigen::Dynamic, 1 >>
 
template<typename T >
using ConstEigenVectorArrayMap = Eigen::Map< const Eigen::Array< T, Eigen::Dynamic, 1 >>
 
using EigenOuterStride = Eigen::OuterStride< Eigen::Dynamic >
 
using EigenInnerStride = Eigen::InnerStride< Eigen::Dynamic >
 
using EigenStride = Eigen::Stride< Eigen::Dynamic, Eigen::Dynamic >
 
template<typename T >
using EigenOuterStridedMatrixMap = Eigen::Map< Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic >, 0, EigenOuterStride >
 
template<typename T >
using EigenOuterStridedArrayMap = Eigen::Map< Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic >, 0, EigenOuterStride >
 
template<typename T >
using ConstEigenOuterStridedMatrixMap = Eigen::Map< const Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic >, 0, EigenOuterStride >
 
template<typename T >
using ConstEigenOuterStridedArrayMap = Eigen::Map< const Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic >, 0, EigenOuterStride >
 
template<typename T >
using EigenStridedMatrixMap = Eigen::Map< Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic >, 0, EigenStride >
 
template<typename T >
using EigenStridedArrayMap = Eigen::Map< Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic >, 0, EigenStride >
 
template<typename T >
using ConstEigenStridedMatrixMap = Eigen::Map< const Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic >, 0, EigenStride >
 
template<typename T >
using ConstEigenStridedArrayMap = Eigen::Map< const Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic >, 0, EigenStride >
 
template<typename T >
using EArrXt = Eigen::Array< T, Eigen::Dynamic, 1 >
 
using EArrXf = Eigen::ArrayXf
 
using EArrXd = Eigen::ArrayXd
 
using EArrXi = Eigen::ArrayXi
 
using EArrXb = EArrXt< bool >
 
template<typename T >
using EArrXXt = Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic >
 
using EArrXXf = Eigen::ArrayXXf
 
template<typename T >
using ERArrXXt = Eigen::Array< T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor >
 
using ERArrXXf = ERArrXXt< float >
 
template<typename T >
using EVecXt = Eigen::Matrix< T, Eigen::Dynamic, 1 >
 
using EVecXd = Eigen::VectorXd
 
using EVecXf = Eigen::VectorXf
 
using ERVecXd = Eigen::RowVectorXd
 
using ERVecXf = Eigen::RowVectorXf
 
template<typename T >
using EMatXt = Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic >
 
using EMatXd = Eigen::MatrixXd
 
using EMatXf = Eigen::MatrixXf
 
template<typename T >
using ERMatXt = Eigen::Matrix< T, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor >
 
using ERMatXd = ERMatXt< double >
 
using ERMatXf = ERMatXt< float >
 

Enumerations

enum  CudaMemoryPoolType { NONE = 0, CUB = 1, THC = 2 }
 
enum  EventStatus { EVENT_INITIALIZED = 0, EVENT_SCHEDULED = 1, EVENT_SUCCESS = 2, EVENT_FAILED = 3 }
 
enum  StorageOrder { UNKNOWN = 0, NHWC = 1, NCHW = 2 }
 
enum  { ALGO_FWD = 0, ALGO_WGRAD = 1, ALGO_DGRAD = 2 }
 
enum  PadMode { CONSTANT = 0, REFLECT = 1, EDGE = 2 }
 
enum  QuantDecodeRunTy { RUN_ALWAYS, RUN_ONCE }
 
enum  RecurrentParamOpMode { SET_PARAM, GET_PARAM, SET_PARAM, GET_PARAM }
 
enum  RecurrentParamOpMode { SET_PARAM, GET_PARAM, SET_PARAM, GET_PARAM }
 
enum  FillerDistribution { FD_UNIFORM, FD_FIXEDSUM, FD_SYNTHETIC }
 
enum  FLowAlgType { FarnebackOpticalFlow = 0, DensePyrLKOpticalFlow = 1, BroxOpticalFlow = 2, OpticalFlowDual_TVL1 = 3 }
 
enum  FlowDataType { Flow2C = 0, Flow3C = 1, FlowWithGray = 2, FlowWithRGB = 3 }
 
enum  SpecialFps { SAMPLE_NO_FRAME = 0, SAMPLE_ALL_FRAMES = -1, SAMPLE_TIMESTAMP_ONLY = -2 }
 
enum  VideoResType { USE_WIDTH_HEIGHT = 0, USE_MINIMAL_WIDTH_HEIGHT = 1, ORIGINAL_RES = 2 }
 
enum  DecodeType { DO_TMP_JITTER = 0, DO_UNIFORM_SMP = 1, USE_START_FRM = 2 }
 

Functions

void swap (Blob &lhs, Blob &rhs)
 
std::ostream & operator<< (std::ostream &out, const Blob &v)
 
void reportTime (std::string type, double ts, std::string metric, std::string unit)
 
void splitSizes (const std::string &arg, int *ptr0, int *ptr1)
 
cv::Mat resizeImage (cv::Mat &img)
 
cv::Mat cropToRec (cv::Mat &img, int *height_ptr, int *width_ptr)
 
std::vector< float > convertToVector (cv::Mat &img)
 
std::vector< float > convertOneImage (std::string &filename, int *height_ptr, int *width_ptr)
 
int getBatchSize (int num_items)
 
TensorProtos writeValues (std::vector< std::vector< std::vector< float >>> &values, std::vector< std::vector< int >> &dims)
 
TensorProtos convertImages (std::string &image_file)
 
template<class TYPE >
vector< TYPE > splitString (std::string &line)
 
TensorProtos convertValues (std::string &file_name)
 
void ConvertToRawDataset (const string &input_db_name, const string &output_db_name)
 
void writeValues (std::vector< std::vector< std::vector< float >>> &values, std::vector< std::vector< int >> &dims, std::string output_file)
 
void convertImages ()
 
void convertValues ()
 
void ReadImage (std::ifstream *file, int *label, char *buffer)
 
void WriteToDB (const string &filename, const int num_items, const int &offset, db::DB *db)
 
void ConvertCIFAR ()
 
void ConvertImageDataset (const string &input_folder, const string &list_filename, const string &output_db_name, const bool)
 
uint32_t swap_endian (uint32_t val)
 
void convert_dataset (const char *image_filename, const char *label_filename, const char *db_path, const int data_limit)
 
void run ()
 
 CAFFE_KNOWN_TYPE (TypeMetaTestFoo)
 
 CAFFE_KNOWN_TYPE (TypeMetaTestBar)
 
 CAFFE_KNOWN_TYPE (ClassAllowAssignment)
 
 CAFFE_KNOWN_TYPE (ClassNoAssignment)
 
template<>
C10_EXPORT const detail::TypeMetaDataTypeMeta::_typeMetaDataInstance< detail::_Uninitialized > () noexcept
 
 CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE (25, detail::_guard_long_unique< long >)
 
 CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE (26, detail::_guard_long_unique< std::vector< long >>)
 
bool operator< (TypeIdentifier lhs, TypeIdentifier rhs)
 
std::ostream & operator<< (std::ostream &stream, caffe2::TypeIdentifier typeId)
 
bool operator== (const TypeMeta &lhs, const TypeMeta &rhs) noexcept
 
bool operator!= (const TypeMeta &lhs, const TypeMeta &rhs) noexcept
 
std::ostream & operator<< (std::ostream &stream, caffe2::TypeMeta typeMeta)
 
bool BlobIsTensorType (const Blob &blob, DeviceType device_type)
 
TensorBlobSetTensor (Blob *blob, Tensor &&tensor)
 
Tensor GetSizedTensorWithOptions (Tensor &&previous_tensor, at::IntArrayRef dims, at::TensorOptions options)
 
TensorBlobGetMutableTensor (Blob *blob, at::IntArrayRef dims, at::TensorOptions options)
 
Tensor XBlobGetMutableTensor (Blob *blob, at::IntArrayRef dims, at::TensorOptions options)
 
TensorBlobGetMutableTensor (Blob *blob, DeviceType device_type)
 
const TensorBlobGetTensor (const Blob &blob, DeviceType device_type)
 
Tensor BlobGetTensorOrUndefined (const Blob &blob)
 
void SerializeBlob (const Blob &blob, const string &name, BlobSerializerBase::SerializationAcceptor acceptor, int chunk_size=kDefaultChunkSize)
 Serializes the given blob, if possible. More...
 
std::string SerializeBlob (const Blob &blob, const string &name)
 Convenience function to serialize a blob to a string. More...
 
int GetGPUIDForPointer (const void *ptr)
 Gets the GPU id that the current pointer is located at.
 
 C10_DEFINE_TYPED_REGISTRY (BlobSerializerRegistry, TypeIdentifier, BlobSerializerBase, std::unique_ptr)
 
 C10_DEFINE_REGISTRY (BlobDeserializerRegistry, BlobDeserializerBase)
 
void DeserializeBlob (const string &content, Blob *result)
 Deserializes from a string containing either BlobProto or TensorProto. More...
 
void DeserializeBlob (const BlobProto &blob_proto, Blob *result)
 
Tensor EmptyTensorFromProto (const TensorProto &tensor_proto)
 
std::string SerializeAsString_EnforceCheck (const google::protobuf::MessageLite &msg, const char *error_location)
 
std::string SerializeBlobProtoAsString_EnforceCheck (const BlobProto &blob)
 
 C10_DECLARE_TYPED_REGISTRY (BlobSerializerRegistry, TypeIdentifier, BlobSerializerBase, std::unique_ptr)
 
unique_ptr< BlobSerializerBaseCreateSerializer (TypeIdentifier id)
 
 C10_DECLARE_REGISTRY (BlobDeserializerRegistry, BlobDeserializerBase)
 
unique_ptr< BlobDeserializerBaseCreateDeserializer (const string &type)
 
bool HasCudaRuntime ()
 
bool HasHipRuntime ()
 
const std::map< string, string > & GetBuildOptions ()
 
template<typename T , typename... Args>
std::enable_if<!std::is_array< T >::value, std::unique_ptr< T > >::type make_unique (Args &&...args)
 
template<typename T >
std::enable_if< std::is_array< T >::value, std::unique_ptr< T > >::type make_unique (const size_t n)
 
template<typename T , typename... Args>
std::enable_if< std::extent< T >::value!=0, std::unique_ptr< T > >::type make_unique (Args &&...)=delete
 
template<typename Dst , typename Src >
Dst dynamic_cast_if_rtti (Src ptr)
 
size_t cudnnCompiledVersion ()
 
size_t cudnnRuntimeVersion ()
 
void CheckCuDNNVersions ()
 
cudnnTensorFormat_t GetCudnnTensorFormat (const StorageOrder &order)
 A wrapper function to convert the Caffe storage order to cudnn storage order enum values.
 
int NumCudaDevices ()
 Returns the number of devices.
 
void SetDefaultGPUID (const int deviceid)
 
int GetDefaultGPUID ()
 
int CaffeCudaGetDevice ()
 Gets the current GPU id. More...
 
void CaffeCudaSetDevice (const int id)
 Gets the current GPU id. More...
 
const cudaDeviceProp & GetDeviceProperty (const int device)
 Gets the device property for the given device. More...
 
void DeviceQuery (const int deviceid)
 Runs a device query function and prints out the results to LOG(INFO).
 
bool GetCudaPeerAccessPattern (vector< vector< bool > > *pattern)
 
bool TensorCoreAvailable ()
 Return the availability of TensorCores for math.
 
const char * cublasGetErrorString (cublasStatus_t error)
 Return a human readable cublas error string.
 
const char * curandGetErrorString (curandStatus_t error)
 Return a human readable curand error string.
 
int CudaVersion ()
 A runtime function to report the cuda version that Caffe2 is built with.
 
bool HasCudaGPU ()
 Check if the current running session has a cuda gpu present. More...
 
CAFFE2_CUDA_API bool GetCudaPeerAccessPattern (vector< vector< bool >> *pattern)
 Return a peer access pattern by returning a matrix (in the format of a nested vector) of boolean values specifying whether peer access is possible. More...
 
int CAFFE_GET_BLOCKS (const int N)
 Compute the number of blocks needed to run N threads.
 
dim3 CAFFE_GET_BLOCKS_2D (const int N, const int)
 Compute the number of blocks needed to run N threads for a 2D grid.
 
uint32_t RandomNumberSeed ()
 A function to generate a random number seed that is unique in a best-effort basis, using an ever-incrementing seed and the current time.
 
CAFFE2_CUDA_API CudaMemoryPoolType GetCudaMemoryPoolType ()
 Gets the current memory pool type used by Caffe2. More...
 
 CAFFE_KNOWN_TYPE (db::DBReader)
 
 CAFFE_KNOWN_TYPE (db::Cursor)
 
void EventCreateCPU (const DeviceOption &option, Event *event)
 
void EventRecordCPU (Event *event, const void *, const char *err_msg)
 
void EventFinishCPU (const Event *event)
 
void EventWaitCPUCPU (const Event *event, void *)
 
EventStatus EventQueryCPU (const Event *event)
 
const std::string & EventErrorMessageCPU (const Event *event)
 
void EventSetFinishedCPU (const Event *event, const char *err_msg)
 
void EventSetCallbackCPU (Event *event, EventCallbackFunction callback)
 
void EventResetCPU (Event *event)
 
 REGISTER_EVENT_CREATE_FUNCTION (CPU, EventCreateCPU)
 
 REGISTER_EVENT_RECORD_FUNCTION (CPU, EventRecordCPU)
 
 REGISTER_EVENT_WAIT_FUNCTION (CPU, CPU, EventWaitCPUCPU)
 
 REGISTER_EVENT_FINISH_FUNCTION (CPU, EventFinishCPU)
 
 REGISTER_EVENT_QUERY_FUNCTION (CPU, EventQueryCPU)
 
 REGISTER_EVENT_ERROR_MESSAGE_FUNCTION (CPU, EventErrorMessageCPU)
 
 REGISTER_EVENT_SET_FINISHED_FUNCTION (CPU, EventSetFinishedCPU)
 
 REGISTER_EVENT_RESET_FUNCTION (CPU, EventResetCPU)
 
 REGISTER_EVENT_SET_CALLBACK_FUNCTION (CPU, EventSetCallbackCPU)
 
bool EventCanScheduleCPU (const Event *, const Event *)
 
void EventCreateCUDA (const DeviceOption &option, Event *event)
 
void EventRecordCUDA (Event *event, const void *context, const char *err_msg)
 
void EventFinishCUDA (const Event *event)
 
void EventWaitCUDACUDA (const Event *event, void *context)
 
void EventWaitCPUCUDA (const Event *event, void *context)
 
void EventWaitCUDACPU (const Event *event, void *context)
 
EventStatus EventQueryCUDA (const Event *event)
 
const std::string & EventErrorMessageCUDA (const Event *event)
 
void EventSetFinishedCUDA (const Event *event, const char *err_msg)
 
void EventResetCUDA (Event *event)
 
 REGISTER_EVENT_CREATE_FUNCTION (CUDA, EventCreateCUDA)
 
 REGISTER_EVENT_RECORD_FUNCTION (CUDA, EventRecordCUDA)
 
 REGISTER_EVENT_WAIT_FUNCTION (CUDA, CUDA, EventWaitCUDACUDA)
 
 REGISTER_EVENT_WAIT_FUNCTION (CPU, CUDA, EventWaitCPUCUDA)
 
 REGISTER_EVENT_WAIT_FUNCTION (CUDA, CPU, EventWaitCUDACPU)
 
 REGISTER_EVENT_FINISH_FUNCTION (CUDA, EventFinishCUDA)
 
 REGISTER_EVENT_QUERY_FUNCTION (CUDA, EventQueryCUDA)
 
 REGISTER_EVENT_ERROR_MESSAGE_FUNCTION (CUDA, EventErrorMessageCUDA)
 
 REGISTER_EVENT_SET_FINISHED_FUNCTION (CUDA, EventSetFinishedCUDA)
 
 REGISTER_EVENT_RESET_FUNCTION (CUDA, EventResetCUDA)
 
 REGISTER_EVENT_WAIT_FUNCTION (MKLDNN, CUDA, EventWaitCPUCUDA)
 
 REGISTER_EVENT_WAIT_FUNCTION (CUDA, MKLDNN, EventWaitCUDACPU)
 
OperatorDef * AddOp (NetDef *netdef_ptr, string op_type, std::vector< string > inputs, std::vector< string > outputs)
 
bool MatchStrings (string p, string s)
 This allows for the use of * and | to match operator types, engines, or any other property that is represented by strings. More...
 
bool MatchArguments (const OperatorDef &p_op, const OperatorDef &g_op)
 This ensures that each named arg that exists in the pattern exists in g_op, is equal in value.
 
size_t miopenCompiledVersion ()
 
size_t miopenRuntimeVersion ()
 
void CheckMIOPENVersions ()
 
bool GlobalInitAlreadyRun ()
 Determine whether GlobalInit has already been run.
 
bool GlobalInit (int *pargc, char ***argv)
 Initialize the global environment of caffe2. More...
 
bool GlobalInit ()
 Initialize the global environment without command line arguments. More...
 
bool Caffe2CheckIntrinsicsFeatures (int *, char ***)
 
 REGISTER_CAFFE2_INIT_FUNCTION (Caffe2CheckIntrinsicsFeatures,&Caffe2CheckIntrinsicsFeatures,"Check intrinsics compatibility between the CPU feature and the binary.")
 
const CaffeMap< string, const ModuleSchema * > & CurrentModules ()
 Current Modules present in the Caffe2 runtime. More...
 
bool HasModule (const string &name)
 Checks whether a module is already present in the current binary.
 
void LoadModule (const string &name, const string &filename="")
 Load a module. More...
 
 C10_DEFINE_REGISTRY (NetRegistry, NetBase, const std::shared_ptr< const NetDef > &, Workspace *)
 
void AddGlobalNetObserverCreator (NetObserverCreator creator)
 
void ClearGlobalNetObservers ()
 
unique_ptr< NetBaseCreateNet (const NetDef &net_def, Workspace *ws)
 Creates a network, accessing / creating blobs in the given workspace. More...
 
unique_ptr< NetBaseCreateNet (const std::shared_ptr< const NetDef > &net_def, Workspace *ws)
 
 C10_DECLARE_REGISTRY (NetRegistry, NetBase, const std::shared_ptr< const NetDef > &, Workspace *)
 
template<class TaskThreadPoolImpl , int device_type>
std::shared_ptr< TaskThreadPoolBaseGetAsyncNetThreadPool (int device_id, int pool_size, bool create_new)
 
 REGISTER_NET (async_scheduling, AsyncSchedulingNet)
 
std::shared_ptr< AsyncTaskGraphBaseGetAsyncTaskGraph (ExecutorHelper *helper, const ExecutionOptions &options)
 
 C10_DEFINE_SHARED_REGISTRY (TaskGraphRegistry, AsyncTaskGraphBase, ExecutorHelper *, const ExecutionOptions &)
 
 C10_REGISTER_CREATOR (TaskGraphRegistry, futures, GetAsyncTaskGraph)
 
 REGISTER_NET (parallel, ParallelNet)
 
 C10_DECLARE_SHARED_REGISTRY (TaskGraphRegistry, AsyncTaskGraphBase, ExecutorHelper *, const ExecutionOptions &)
 
 REGISTER_NET (simple, SimpleNet)
 
 REGISTER_NET (simple_refcount, SimpleRefCountNet)
 
const std::string OpRegistryKey (const std::string &op_type, const std::string &engine)
 
void SetPerOpEnginePref (const PerOpEnginePrefType &per_op_engine_pref)
 
void SetGlobalEnginePref (const GlobalEnginePrefType &global_engine_pref)
 
void SetEnginePref (const PerOpEnginePrefType &per_op_engine_pref, const GlobalEnginePrefType &global_engine_pref)
 
void SetOpEnginePref (const std::string &op_type, const CaffeMap< DeviceType, EnginePrefType > &op_pref)
 
unique_ptr< OperatorBaseCreateOperator (const OperatorDef &operator_def, Workspace *ws, int net_position)
 
std::map< DeviceType, OperatorRegistry * > * gDeviceTypeRegistry ()
 
 C10_DEFINE_REGISTRY (CPUOperatorRegistry, OperatorBase, const OperatorDef &, Workspace *)
 
 CAFFE_REGISTER_DEVICE_TYPE (CPU, CPUOperatorRegistry)
 
 C10_DEFINE_REGISTRY (CUDAOperatorRegistry, OperatorBase, const OperatorDef &, Workspace *)
 
 CAFFE_REGISTER_DEVICE_TYPE (CUDA, CUDAOperatorRegistry)
 
 C10_DEFINE_REGISTRY (HIPOperatorRegistry, OperatorBase, const OperatorDef &, Workspace *)
 
 CAFFE_REGISTER_DEVICE_TYPE (HIP, HIPOperatorRegistry)
 
 C10_DEFINE_REGISTRY (GradientRegistry, GradientMakerBase, const OperatorDef &, const vector< GradientWrapper > &)
 
GradientOpsMeta GetGradientForOp (const OperatorDef &def, const vector< GradientWrapper > &g_output)
 Gets the GradientOpsMeta for the given operator def.
 
TensorShapes InferBlobShapesAndTypes (CaffeMap< string, TensorShape > &blob_desc, const vector< NetDef * > &nets)
 
TensorShape GetTensorShapeOfBlob (const Blob *b)
 
TensorShapes InferBlobShapesAndTypesFromWorkspace (Workspace *ws, const vector< NetDef * > &nets)
 
TensorShapes InferBlobShapesAndTypesFromMap (const CaffeMap< std::string, std::vector< int64_t >> &blob_dimensions, const vector< NetDef * > &nets)
 
TensorShapes InferBlobShapesAndTypesFromMap (const CaffeMap< std::string, std::vector< int64_t >> &blob_dimensions, const CaffeMap< std::string, TensorProto_DataType > &blob_types, const vector< NetDef * > &nets)
 
std::map< string, std::pair< DeviceOption, DeviceOption > > ValidateTensorDevices (OperatorBase &op, const OperatorDef &op_def)
 
std::set< std::string > GetRegisteredOperators ()
 
void SetOperatorLogger (std::function< void(const OperatorDef &)> tracer)
 
std::function< void(const OperatorDef &)> GetOperatorLogger ()
 
 C10_DEFINE_TENSOR_TYPES_DISPATCHER (TensorTypes, DoRunWithType, DoRunWithOtherType) C10_DEFINE_TENSOR_TYPES_DISPATCHER(TensorTypes2
 
 C10_DECLARE_REGISTRY (CPUOperatorRegistry, OperatorBase, const OperatorDef &, Workspace *)
 
 C10_DECLARE_REGISTRY (CUDAOperatorRegistry, OperatorBase, const OperatorDef &, Workspace *)
 
 C10_DECLARE_REGISTRY (HIPOperatorRegistry, OperatorBase, const OperatorDef &, Workspace *)
 
 C10_DEFINE_REGISTRY (C10OperatorRegistry, OperatorBase, const OperatorDef &, Workspace *)
 
 C10_DECLARE_REGISTRY (GradientRegistry, GradientMakerBase, const OperatorDef &, const vector< GradientWrapper > &)
 
C10_EXPORT std::ostream & operator<< (std::ostream &out, const OpSchema &schema)
 
template<typename T_I = int>
TensorShape CreateTensorShape (vector< T_I > dims,::caffe2::TensorProto_DataType dt)
 
vector< int64_t > GetDimsVector (const TensorShape &shape)
 
uint64_t nElemFromDim (const TensorShape &X, int dim=0)
 
uint64_t nElemBetweenDim (const TensorShape &X, int start, int stop)
 
std::pair< std::vector< DeviceOption >, std::vector< DeviceOption > > InferOpInputOutputDevice (const OperatorDef &op)
 
template<uint64_t OpsPerPoint>
OpSchema::Cost PointwiseCostInference (const OperatorDef &, const vector< TensorShape > &inputs)
 
bool RunPlanOnWorkspace (Workspace *ws, const PlanDef &plan, ShouldContinue shouldContinue)
 
 CAFFE_KNOWN_TYPE (QTensor< CPUContext >)
 
template<typename F >
detail::ScopeGuardImplDecay< F > MakeGuard (F &&f) noexcept(noexcept(detail::ScopeGuardImplDecay< F >(static_cast< F && >(f))))
 ScopeGuard is a general implementation of the "Initialization is Resource Acquisition" idiom. More...
 
ExportedStatMap toMap (const ExportedStatList &stats)
 
 CAFFE_DEFINE_PREALLOCATED_KNOWN_TYPE (12, Tensor)
 
TypeMeta GetTensorType (const void *c)
 
TypeCall GetTypeCallFunction (TypeIdentifier id)
 
void RegisterTypeCallFunction (TypeIdentifier id, TypeCall c)
 
vector< int64_t > GetTensorInfo (const void *c, size_t *capacity, DeviceOption *device)
 
TensorInfoCall GetTensorInfoFunction (TypeIdentifier id)
 
void RegisterTensorInfoFunction (TypeIdentifier id, TensorInfoCall c)
 
void TensorVectorResize (std::vector< Tensor > &tensors, int size, DeviceType type)
 
Tensor empty (at::IntArrayRef dims, at::TensorOptions options)
 
void ReinitializeTensor (Tensor *t, at::IntArrayRef dims, at::TensorOptions options)
 Reinitialize a Tensor to given dims and options if necessary, note that this will not do anything if the Tensor already has correct size and data type.
 
void ReinitializeAndCopyFrom (Tensor *t, at::TensorOptions options, const Tensor &src, bool async)
 
template<typename T >
Tensor TensorCPUFromValues (at::IntArrayRef dims, at::ArrayRef< T > values)
 Creates a CPU tensor, and fills its contents with the given values. More...
 
 CAFFE_KNOWN_TYPE (int8::Int8TensorCPU)
 
 C10_DEFINE_REGISTRY (TransformRegistry, Transform)
 
unique_ptr< TransformCreateTransform (string key)
 
NetDef ApplyTransform (const string &key, const NetDef &netdef)
 
double average_net_run_duration (const NetDef &netdef, const NetDef &init_netdef, const int warmup_runs, const int main_runs)
 
NetDef ApplyTransformIfFaster (const string &key, const NetDef &netdef, const NetDef &init_netdef, const int warmup_runs, const int main_runs, const double improvement_threshold)
 
 C10_DECLARE_REGISTRY (TransformRegistry, Transform)
 
TensorProto::DataType TypeMetaToDataType (const TypeMeta &meta)
 
const TypeMetaDataTypeToTypeMeta (const TensorProto::DataType &dt)
 
StorageOrder StringToStorageOrder (const string &str)
 
constexpr char NameScopeSeparator ()
 
template<typename T >
bool fp16_type ()
 
template<>
bool fp16_type< at::Half > ()
 
std::string GetUniqueName ()
 
 REGISTER_CPU_OPERATOR (CreateDB, CreateDBOp< CPUContext >)
 
 OPERATOR_SCHEMA (CreateDB).NumInputs(0).NumOutputs(1)
 
 NO_GRADIENT (CreateDB)
 
 REGISTER_CUDA_OPERATOR (CreateDB, CreateDBOp< CUDAContext >)
 
 REGISTER_CPU_OPERATOR (FileStoreHandlerCreate, FileStoreHandlerCreateOp< CPUContext >)
 
 NumInputs (0).NumOutputs(1).SetDoc(R"DOC( Creates a unique_ptr<StoreHandler> that uses the filesystem as backing store (typically a filesystem shared between many nodes
 
such as NFS This store handler is not built to be fast Its recommended use is for integration tests and prototypes where extra dependencies are cumbersome Use an ephemeral path to ensure multiple processes or runs don t interfere DOC Arg ("path","base path used by the FileStoreHandler").Arg("prefix"
 
such as NFS This store handler is not built to be fast Its recommended use is for integration tests and prototypes where extra dependencies are cumbersome Use an ephemeral path to ensure multiple processes or runs don t interfere DOC prefix for all keys used by this store Output (0,"handler","unique_ptr<StoreHandler>")
 
 NO_GRADIENT (FileStoreHandlerCreateOp)
 
 REGISTER_CUDA_OPERATOR (FileStoreHandlerCreate, FileStoreHandlerCreateOp< CUDAContext >)
 
 REGISTER_CPU_OPERATOR (RedisStoreHandlerCreate, RedisStoreHandlerCreateOp< CPUContext >)
 
host name of Redis server Arg ("port","port number of Redis server").Arg("prefix"
 
 NO_GRADIENT (RedisStoreHandlerCreateOp)
 
 REGISTER_CUDA_OPERATOR (RedisStoreHandlerCreate, RedisStoreHandlerCreateOp< CUDAContext >)
 
 CAFFE_KNOWN_TYPE (std::unique_ptr< StoreHandler >)
 
 REGISTER_CPU_OPERATOR (StoreSet, StoreSetOp)
 
 NumInputs (2).NumOutputs(0).SetDoc(R"DOC( Set a blob in a store. The key is the input blob's name and the value is the data in that blob. The key can be overridden by specifying the 'blob_name' argument. )DOC").Arg("blob_name" = Ai * Bi
 
alternative key for the blob (optional)") .Input(0
 
alternative key for the unique_ptr< StoreHandlerInput (1,"data","data blob")
 
 REGISTER_CPU_OPERATOR (StoreGet, StoreGetOp)
 
 NumInputs (1).NumOutputs(1).SetDoc(R"DOC( Get a blob from a store. The key is the output blob's name. The key can be overridden by specifying the 'blob_name' argument. )DOC").Arg("blob_name"
 
alternative key for the unique_ptr< StoreHandlerOutput (0,"data","data blob")
 
 REGISTER_CPU_OPERATOR (StoreAdd, StoreAddOp)
 
the store initializes it to and then performs the add operation The operation returns the resulting counter value DOC Arg ("blob_name","key of the counter (required)").Arg("add_value"
 
the store initializes it to and then performs the add operation The operation returns the resulting counter value DOC value that is added (optional, default:1)") .Input(0
 
the store initializes it to and then performs the add operation The operation returns the resulting counter value DOC value that is unique_ptr< StoreHandlerOutput (0,"value","the current value of the counter")
 
 REGISTER_CPU_OPERATOR (StoreWait, StoreWaitOp)
 
 NumInputs (1, 2).NumOutputs(0).SetDoc(R"DOC( Wait for the specified blob names to be set. The blob names can be passed either as an input blob with blob names or as an argument. )DOC").Arg("blob_names"
 
names of the blobs to wait for (optional)") .Input(0
 
names of the blobs to wait unique_ptr< StoreHandlerInput (1,"names","names of the blobs to wait for (optional)")
 
 REGISTER_CPU_OPERATOR (FC_Decomp, FullyConnectedOpDecomp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (FCGradient_Decomp, FullyConnectedDecompGradientOp< float, CPUContext >)
 
 OPERATOR_SCHEMA (FC_Decomp).NumInputs(4).NumOutputs(1)
 
 OPERATOR_SCHEMA (FCGradient_Decomp).NumInputs(4).NumOutputs(3
 
 REGISTER_GRADIENT (FC_Decomp, GetFCDecompGradient)
 
 REGISTER_CUDA_OPERATOR (FC_Decomp, FullyConnectedOpDecomp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (FCGradient_Decomp, FullyConnectedDecompGradientOp< float, CUDAContext >)
 
 REGISTER_CPU_OPERATOR (TTContraction, TTContractionOp< float, CPUContext >)
 
 REGISTER_CUDA_OPERATOR (TTContraction, TTContractionOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (TTContractionGradient, TTContractionGradientOp< float, CUDAContext >)
 
void adam_ideep_update (int N, const float *g, const float *m, const float *v, float *ng, float *nm, float *nv, float beta1, float beta2, float eps_hat, float correction, const float *lr)
 
void adam_ideep_compute (int N, const float *w, const float *g, const float *m, const float *v, float *nw, float *nm, float *nv, float beta1, float beta2, float eps_hat, float correction, const float *lr)
 
void adam_ideep_compute_output_grad (int N, const float *w, const float *g, const float *m, const float *v, float *nw, float *nm, float *nv, float *ng, float beta1, float beta2, float eps_hat, float correction, const float *lr)
 
 REGISTER_IDEEP_OPERATOR (Adam, IDEEPAdamOp< float >)
 
 REGISTER_IDEEP_OPERATOR (ChannelShuffle, ChannelShuffleOp)
 
 REGISTER_IDEEP_OPERATOR (ChannelShuffleGradient, ChannelShuffleGradientOp)
 
 REGISTER_IDEEP_OPERATOR (Concat, IDEEPConcatOp)
 
 REGISTER_IDEEP_OPERATOR (Split, IDEEPSplitOp)
 
 REGISTER_IDEEP_OPERATOR (ConvFusion, IDEEPConvFusionOp)
 
std::function< void(OpSchema &)> ConvFusionDocGenerator (const char *dim)
 
 REGISTER_IDEEP_OPERATOR (Conv, IDEEPConvOp)
 
 REGISTER_IDEEP_OPERATOR (ConvGradient, IDEEPConvGradientOp)
 
 REGISTER_IDEEP_OPERATOR (ConvTranspose, IDEEPConvTransposeOp)
 
 REGISTER_IDEEP_OPERATOR (ConvTransposeGradient, IDEEPConvTransposeGradientOp)
 
 REGISTER_IDEEP_OPERATOR (Dropout, IDEEPDropoutOp)
 
 REGISTER_IDEEP_OPERATOR (DropoutGrad, IDEEPDropoutGradientOp)
 
 REGISTER_IDEEP_OPERATOR (Sum, IDEEPSumOp)
 
 REGISTER_IDEEP_OPERATOR (Add, IDEEPSumOp)
 
 REGISTER_IDEEP_OPERATOR (ExpandDims, IDEEPExpandDimsOp)
 
 REGISTER_IDEEP_OPERATOR (Squeeze, IDEEPSqueezeOp)
 
 USE_IDEEP_DEF_ALIASES ()
 
 REGISTER_IDEEP_OPERATOR (FC, IDEEPFullyConnectedOp)
 
 REGISTER_IDEEP_OPERATOR (FCGradient, IDEEPFullyConnectedGradientOp)
 
 REGISTER_IDEEP_OPERATOR (LRN, IDEEPLRNOp)
 
 REGISTER_IDEEP_OPERATOR (LRNGradient, IDEEPLRNGradientOp)
 
void momentum_sgd_update (const int N, const float *g, const float *m, float *ng, float *nm, const float *lr, const float momentum, const bool nesterov, float *param)
 
 REGISTER_IDEEP_OPERATOR (MomentumSGD, IDEEPMomentumSGDOp)
 
 REGISTER_IDEEP_OPERATOR (MomentumSGDUpdate, IDEEPMomentumSGDUpdateOp)
 
 REGISTER_IDEEP_COMPARE_OPERATOR (EQ)
 
 REGISTER_IDEEP_COMPARE_OPERATOR (GT)
 
 REGISTER_IDEEP_COMPARE_OPERATOR (GE)
 
 REGISTER_IDEEP_COMPARE_OPERATOR (LT)
 
 REGISTER_IDEEP_COMPARE_OPERATOR (LE)
 
 REGISTER_IDEEP_COMPARE_OPERATOR (NE)
 
 REGISTER_IDEEP_OPERATOR (Softmax, IDEEPFallbackOp< SoftmaxOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (LabelCrossEntropy, IDEEPFallbackOp< LabelCrossEntropyOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (AveragedLoss, IDEEPFallbackOp< AveragedLoss< float, CPUContext >, SkipIndices< 0 >>)
 
 REGISTER_IDEEP_OPERATOR (Flatten, IDEEPFallbackOp< FlattenOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (ResizeLike, IDEEPFallbackOp< ResizeLikeOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Transpose, IDEEPFallbackOp< TransposeOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Slice, IDEEPFallbackOp< SliceOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Clip, IDEEPFallbackOp< ClipOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (ScatterAssign, IDEEPFallbackOp< ScatterAssignOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Cast, IDEEPFallbackOp< CastOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (XavierFill, IDEEPFallbackOp< XavierFillOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (ConstantFill, IDEEPFallbackOp< ConstantFillOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (GaussianFill, IDEEPFallbackOp< GaussianFillOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (MSRAFill, IDEEPFallbackOp< MSRAFillOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (GivenTensorFill, IDEEPFallbackOp< GivenTensorFillOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (GivenTensorDoubleFill, IDEEPFallbackOp< GivenTensorFillOp< double, CPUContext >, SkipIndices< 0 >>)
 
 REGISTER_IDEEP_OPERATOR (GivenTensorBoolFill, IDEEPFallbackOp< GivenTensorFillOp< bool, CPUContext >, SkipIndices< 0 >>)
 
 REGISTER_IDEEP_OPERATOR (GivenTensorIntFill, IDEEPFallbackOp< GivenTensorFillOp< int, CPUContext >, SkipIndices< 0 >>)
 
 REGISTER_IDEEP_OPERATOR (GivenTensorInt64Fill, IDEEPFallbackOp< GivenTensorFillOp< int64_t, CPUContext >, SkipIndices< 0 >>)
 
 REGISTER_IDEEP_OPERATOR (GivenTensorStringFill, IDEEPFallbackOp< GivenTensorFillOp< std::string, CPUContext >, SkipIndices< 0 >>)
 
 REGISTER_IDEEP_OPERATOR (Load, IDEEPFallbackOp< LoadOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Save, IDEEPFallbackOp< SaveOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (RMACRegions, IDEEPFallbackOp< RMACRegionsOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (RoIPool, IDEEPFallbackOp< RoIPoolOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (RoIAlign, IDEEPFallbackOp< RoIAlignOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (RoIAlignRotated, IDEEPFallbackOp< RoIAlignRotatedOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (GenerateProposals, IDEEPFallbackOp< GenerateProposalsOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (GenerateProposalsCPP, IDEEPFallbackOp< GenerateProposalsOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (CollectAndDistributeFpnRpnProposals, IDEEPFallbackOp< CollectAndDistributeFpnRpnProposalsOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (BoxWithNMSLimit, IDEEPFallbackOp< BoxWithNMSLimitOp< CPUContext >, SkipIndices< 0, 1, 2 >>)
 
 REGISTER_IDEEP_OPERATOR (BBoxTransform, IDEEPFallbackOp< BBoxTransformOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (AffineChannel, IDEEPFallbackOp< AffineChannelOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (StopGradient, IDEEPFallbackOp< StopGradientOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (PadImage, IDEEPFallbackOp< PadImageOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (PRelu, IDEEPFallbackOp< PReluOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (CTCGreedyDecoder, IDEEPFallbackOp< CTCGreedyDecoderOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (CTCBeamSearchDecoder, IDEEPFallbackOp< CTCBeamSearchDecoderOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (AveragedLossGradient, IDEEPFallbackOp< AveragedLossGradient< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (LabelCrossEntropyGradient, IDEEPFallbackOp< LabelCrossEntropyGradientOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (SoftmaxGradient, IDEEPFallbackOp< SoftmaxGradientOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Iter, IDEEPFallbackOp< IterOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (LearningRate, IDEEPFallbackOp< LearningRateOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Abs, IDEEPFallbackOp< UnaryElementwiseOp< TensorTypes< float >, CPUContext, AbsFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (Atan, IDEEPFallbackOp< UnaryElementwiseOp< TensorTypes< float >, CPUContext, AtanFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (Sqrt, IDEEPFallbackOp< UnaryElementwiseOp< TensorTypes< float >, CPUContext, SqrtFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (Div, IDEEPFallbackOp< BinaryElementwiseOp< NumericTypes, CPUContext, DivFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (Mul, IDEEPFallbackOp< BinaryElementwiseOp< NumericTypes, CPUContext, MulFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (Sub, IDEEPFallbackOp< BinaryElementwiseOp< NumericTypes, CPUContext, SubFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (Tanh, IDEEPFallbackOp< UnaryElementwiseOp< TensorTypes< float >, CPUContext, TanhFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (L1Distance, IDEEPFallbackOp< L1DistanceOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Scale, IDEEPFallbackOp< ScaleOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Accuracy, IDEEPFallbackOp< AccuracyOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (AddGradient, IDEEPFallbackOp< BinaryElementwiseGradientOp< NumericTypes, CPUContext, AddFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (TanhGradient, IDEEPFallbackOp< BinaryElementwiseOp< TensorTypes< float >, CPUContext, TanhGradientFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (MulGradient, IDEEPFallbackOp< BinaryElementwiseGradientOp< NumericTypes, CPUContext, MulFunctor< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (TensorProtosDBInput, IDEEPFallbackOp< TensorProtosDBInput< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (CloseBlobsQueue, IDEEPFallbackOp< CloseBlobsQueueOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (SoftmaxWithLoss, IDEEPFallbackOp< SoftmaxWithLossOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (SoftmaxWithLossGradient, IDEEPFallbackOp< SoftmaxWithLossGradientOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (NHWC2NCHW, IDEEPFallbackOp< NHWC2NCHWOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (NCHW2NHWC, IDEEPFallbackOp< NCHW2NHWCOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Expand, IDEEPFallbackOp< ExpandOp< TensorTypes< std::int32_t, std::int64_t, float, double >, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Gather, IDEEPFallbackOp< GatherOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (Normalize, IDEEPFallbackOp< NormalizeOp< float, CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (ReduceL2, IDEEPFallbackOp< ReduceOp< TensorTypes< float >, CPUContext, L2Reducer< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (ReduceSum, IDEEPFallbackOp< ReduceOp< TensorTypes< std::int32_t, std::int64_t, float, double >, CPUContext, SumReducer< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (ReduceMean, IDEEPFallbackOp< ReduceOp< TensorTypes< float >, CPUContext, MeanReducer< CPUContext >>>)
 
 REGISTER_IDEEP_OPERATOR (BatchMatMul, IDEEPFallbackOp< BatchMatMulOp< CPUContext >>)
 
 REGISTER_IDEEP_OPERATOR (MaxPool, IDEEPPoolOp)
 
 REGISTER_IDEEP_OPERATOR (MaxPoolGradient, IDEEPPoolGradientOp)
 
 REGISTER_IDEEP_OPERATOR (AveragePool, IDEEPPoolOp)
 
 REGISTER_IDEEP_OPERATOR (AveragePoolGradient, IDEEPPoolGradientOp)
 
 REGISTER_IDEEP_OPERATOR (CreateBlobsQueue, IDEEPCreateBlobsQueueOp)
 
 SHOULD_NOT_DO_GRADIENT (IDEEPCreateBlobsQueueOp)
 
 REGISTER_IDEEP_OPERATOR (SafeEnqueueBlobs, IDEEPSafeEnqueueBlobsOp)
 
 SHOULD_NOT_DO_GRADIENT (IDEEPSafeEnqueueBlobsOp)
 
 REGISTER_IDEEP_OPERATOR (Relu, IDEEPReluOp)
 
 REGISTER_IDEEP_OPERATOR (ReluGradient, IDEEPReluGradientOp)
 
 REGISTER_IDEEP_OPERATOR (LeakyRelu, IDEEPReluOp)
 
 REGISTER_IDEEP_OPERATOR (LeakyReluGradient, IDEEPReluGradientOp)
 
 REGISTER_IDEEP_OPERATOR (Reshape, IDEEPReshapeOp)
 
 REGISTER_IDEEP_OPERATOR (Shape, IDEEPShapeOp)
 
 REGISTER_IDEEP_OPERATOR (Sigmoid, IDEEPSigmoidOp)
 
 REGISTER_IDEEP_OPERATOR (SigmoidGradient, IDEEPSigmoidGradientOp)
 
 REGISTER_IDEEP_OPERATOR (SpatialBN, IDEEPSpatialBNOp)
 
 REGISTER_IDEEP_OPERATOR (CopyCPUToIDEEP, CopyCPUToIDEEPOp)
 
 REGISTER_IDEEP_OPERATOR (CopyIDEEPToCPU, CopyIDEEPToCPUOp)
 
 REGISTER_IDEEP_OPERATOR (Copy, IDEEPCopyOp)
 
 REGISTER_IDEEP_OPERATOR (WeightedSum, IDEEPWeightedSumOp)
 
The input TensorCPU to copy Output (0,"ideep_blob","The output IDEEP tensort to copy to")
 
The input IDEEP tensort to copy Output (0,"cpu_blob","The output TensorCPU to copy to")
 
 C10_DECLARE_REGISTRY (IDEEPOperatorRegistry, OperatorBase, const OperatorDef &, Workspace *)
 
 CAFFE_KNOWN_TYPE (ideep::tensor)
 
 C10_DEFINE_REGISTRY (IDEEPOperatorRegistry, OperatorBase, const OperatorDef &, Workspace *)
 
 CAFFE_REGISTER_DEVICE_TYPE (DeviceType::IDEEP, IDEEPOperatorRegistry)
 
 REGISTER_EVENT_CREATE_FUNCTION (IDEEP, EventCreateCPU)
 
 REGISTER_EVENT_RECORD_FUNCTION (IDEEP, EventRecordCPU)
 
 REGISTER_EVENT_WAIT_FUNCTION (IDEEP, IDEEP, EventWaitCPUCPU)
 
 REGISTER_EVENT_WAIT_FUNCTION (IDEEP, CPU, EventWaitCPUCPU)
 
 REGISTER_EVENT_WAIT_FUNCTION (CPU, IDEEP, EventWaitCPUCPU)
 
 REGISTER_EVENT_FINISH_FUNCTION (IDEEP, EventFinishCPU)
 
 REGISTER_EVENT_QUERY_FUNCTION (IDEEP, EventQueryCPU)
 
 REGISTER_EVENT_ERROR_MESSAGE_FUNCTION (IDEEP, EventErrorMessageCPU)
 
 REGISTER_EVENT_SET_FINISHED_FUNCTION (IDEEP, EventSetFinishedCPU)
 
 REGISTER_EVENT_RESET_FUNCTION (IDEEP, EventResetCPU)
 
 REGISTER_CPU_OPERATOR (ImageInput, ImageInputOp< CPUContext >)
 
 NumInputs (0, 1).NumOutputs(2
 
INT_MAX TensorInferenceFunction ([](const OperatorDef &def, const vector< TensorShape > &){vector< TensorShape > out(2);ArgumentHelper helper(def);int batch_size=helper.GetSingleArgument< int >("batch_size", 0);int crop=helper.GetSingleArgument< int >("crop",-1);int color=helper.GetSingleArgument< int >("color", 1);CHECK_GT(crop, 0);out[0]=CreateTensorShape(vector< int >{batch_size, crop, crop, color?3:1}, TensorProto::FLOAT);out[1]=CreateTensorShape(vector< int >{1, batch_size}, TensorProto::INT32);return out;}).SetDoc(R"DOC( Imports and processes images from a database. For each run of the operator
 
INT_MAX batch_size images will be processed GPUs can optionally be used for part of the processing The following transformations are applied to the image A bounding box is applied to the initial image (optional)-The image is rescaled either up or down(with the scale argument) or just up(with the minsize argument)-The image is randomly cropped(crop size is passed as an argument but the location of the crop is random except if is_test is passed in which case the image in cropped at the center)-The image is normalized.Each of its color channels can have separate normalization values The dimension of the output image will always be cropxcrop) DOC") .Arg("batch_size"
 
INT_MAX batch_size images will be processed GPUs can optionally be used for part of the processing The following transformations are applied to the image A bounding box is applied to the initial Number of images to output for each run of the operator" ".Must be 1 or greater") .Arg ("color","Number of color channels (1 or 3). Defaults to 1").Arg("color_jitter"
 
INT_MAX batch_size images will be processed GPUs can optionally be used for part of the processing The following transformations are applied to the image A bounding box is applied to the initial Number of images to output for each run of the Whether or not to do color jitter Defaults to Arg ("img_saturation","Image saturation scale used in color jittering. ""Defaults to 0.4").Arg("img_brightness"
 
INT_MAX batch_size images will be processed GPUs can optionally be used for part of the processing The following transformations are applied to the image A bounding box is applied to the initial Number of images to output for each run of the Whether or not to do color jitter Defaults to Image brightness scale used in color jittering Defaults to Arg ("img_contrast","Image contrast scale used in color jittering. ""Defaults to 0.4").Arg("color_lighting"
 
INT_MAX batch_size images will be processed GPUs can optionally be used for part of the processing The following transformations are applied to the image A bounding box is applied to the initial Number of images to output for each run of the Whether or not to do color jitter Defaults to Image brightness scale used in color jittering Defaults to Whether or not to do color lighting Defaults to Arg ("color_lighting_std","Std of normal distribution where color lighting"" scaling factor is sampled. Defaults to 0.1").Arg("scale_jitter_type"
 
INT_MAX batch_size images will be processed GPUs can optionally be used for part of the processing The following transformations are applied to the image A bounding box is applied to the initial Number of images to output for each run of the Whether or not to do color jitter Defaults to Image brightness scale used in color jittering Defaults to Whether or not to do color lighting Defaults to Scale the size of the smallest dimension of the image to this Scale and minsize are mutually exclusive Must be larger than crop Arg ("minsize","Scale the size of the smallest dimension of the image to"" this only if the size is initially smaller. Scale and minsize are"" mutually exclusive. Must be larger than crop.").Arg("warp"
 
the other dimension is proportionally scaled Defaults to Arg ("crop","Size to crop the image to. Must be provided").Arg("mirror"
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Arg ("mean","Mean by which to normalize color channels."" Defaults to 0.").Arg("mean_per_channel"
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color channel (1 or 3 elements).Defaults to mean argument.Channel order BGR") .Arg("std"
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Arg ("std_per_channel","Vector of standard dev. per color channel "" (1 or 3 elements). Defaults to std argument. Channel order is BGR").Arg("bounding_ymin"
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults to (none)") .Arg("bounding_xmin"
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults if the input is in Caffe format Defaults to Arg ("use_gpu_transform","1 if GPU acceleration should be used."" Defaults to 0. Can only be 1 in a CUDAContext").Arg("decode_threads"
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults if the input is in Caffe format Defaults to Number of CPU decode transform threads Defaults to Arg ("output_type","If gpu_transform, can set to FLOAT or FLOAT16.").Arg("db"
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults if the input is in Caffe format Defaults to Number of CPU decode transform threads Defaults to Name of the database (if not passed as input)") .Arg("db_type"
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults if the input is in Caffe format Defaults to Number of CPU decode transform threads Defaults to Name of the Type of The sizes of any outputs besides the data and label (should have a number of elements equal to the number of additional" "outputs)") .Arg("random_scale"
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults if the input is in Caffe format Defaults to Number of CPU decode transform threads Defaults to Name of the Type of The sizes of any outputs besides the data and shortest side desired for image resize Defaults to[-1,-1] or no random resize desired Input (0,"reader","The input reader (a db::DBReader)").Output(0
 
the other dimension is proportionally scaled Defaults to Whether or not to mirror the image Defaults to Vector of means per color Standard deviation by which to normalize color channels Defaults to Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults Bounding box coordinate Defaults if the input is in Caffe format Defaults to Number of CPU decode transform threads Defaults to Name of the Type of The sizes of any outputs besides the data and shortest side desired for image resize Defaults to[-1,-1] or no random resize desired Tensor containing the images Output (1,"label","Tensor containing the labels").Output(2
 
 NO_GRADIENT (ImageInput)
 
template<class Context >
bool RandomSizedCropping (cv::Mat *img, const int crop, std::mt19937 *randgen)
 
template<class Context >
void Saturation (float *img, const int img_size, const float alpha_rand, std::mt19937 *randgen)
 
template<class Context >
void Brightness (float *img, const int img_size, const float alpha_rand, std::mt19937 *randgen)
 
template<class Context >
void Contrast (float *img, const int img_size, const float alpha_rand, std::mt19937 *randgen)
 
template<class Context >
void ColorJitter (float *img, const int img_size, const float saturation, const float brightness, const float contrast, std::mt19937 *randgen)
 
template<class Context >
void ColorLighting (float *img, const int img_size, const float alpha_std, const std::vector< std::vector< float >> &eigvecs, const std::vector< float > &eigvals, std::mt19937 *randgen)
 
template<class Context >
void ColorNormalization (float *img, const int img_size, const int channels, const std::vector< float > &mean, const std::vector< float > &std)
 
template<class Context >
void TransformImage (const cv::Mat &scaled_img, const int channels, float *image_data, const bool color_jitter, const float saturation, const float brightness, const float contrast, const bool color_lighting, const float color_lighting_std, const std::vector< std::vector< float >> &color_lighting_eigvecs, const std::vector< float > &color_lighting_eigvals, const int crop, const bool mirror, const std::vector< float > &mean, const std::vector< float > &std, std::mt19937 *randgen, std::bernoulli_distribution *mirror_this_image, bool is_test=false)
 
template<class Context >
void CropTransposeImage (const cv::Mat &scaled_img, const int channels, uint8_t *cropped_data, const int crop, const bool mirror, std::mt19937 *randgen, std::bernoulli_distribution *mirror_this_image, bool is_test=false)
 
 REGISTER_CUDA_OPERATOR (ImageInput, ImageInputOp< CUDAContext >)
 
template<typename T_IN , typename T_OUT , class Context >
bool TransformOnGPU (Tensor &X, Tensor *Y, Tensor &mean, Tensor &std, Context *context)
 
bool tryConvertToMPSCNN (const NetDef &initNet, const NetDef &predictNet, NetDef *mpscnnPredictNet)
 
NetDef annotateDefWithReadCounts (const NetDef &net)
 
NetDef rewriteForMetal (const NetDef &net)
 
NetDef runMPSCNNFusion (const NetDef &net)
 
void dumpDef (const NetDef &d)
 
void mpscnnRecordExecutionFinish ()
 
MPSCNNContextgetMPSCNNContext ()
 
bool tryConvertToMPSCNNIntermediateCopies (const NetDef &initNet, const NetDef &predictNet, NetDef *mpscnnPredictNet)
 
NetDef setSpecialArgs (const NetDef &def)
 
void testMPSCNN ()
 
void compareModels (const NetDef &initNet, NetDef predictNet)
 
void verifyRewrite (const NetDef &initNet, const NetDef &net, std::vector< int > inputDims)
 
std::string & gSNPELocation ()
 
 REGISTER_CPU_OPERATOR (SNPE, SNPEOp)
 
void uniformQuantize2b1b (const TensorCPU &X, const std::vector< std::unique_ptr< TensorCPU >> &XQ, float offset, float inter_center_distance)
 
void qconv (const ConvArgs &args, const TensorCPU &X, const TensorCPU &W, const TensorCPU *b, TensorCPU *Y)
 
void qpad_zero (const ConvArgs &args, const TensorCPU &X, TensorCPU *Y)
 
void signQuantize (const TensorCPU &X, TensorCPU *XQ)
 
void filterNormalization11 (const TensorCPU &WQ, TensorCPU *WQN)
 
void filterNormalizationL1 (const TensorCPU &W, TensorCPU *WL1)
 
void qim2col (const ConvArgs &args, const TensorCPU &XQ, const TensorCPU &WQ, TensorCPU *XQcol)
 
std::unique_ptr< QConvStatecreate2b1bConvState (Workspace *ws, const TensorCPU &W, const TensorCPU *b)
 
void run2b1bConvGeneric (QConvState *state, const ConvArgs &args, const TensorCPU &X, TensorCPU *Y)
 
void run2b1bUnification (QConvState *state, size_t N, size_t C, const float *WQNVdata, const float *YQs0Vdata, const float *YQs1Vdata, size_t YQstride, float *Ydata, size_t Ystride, const float *bias)
 
 REGISTER_CPU_OPERATOR (QConv, QConvOp)
 
size_t divRoundUp (size_t x, size_t d)
 
bool run2b1bConvNeon (QConvState *state, const ConvArgs &args, const TensorCPU &X, TensorCPU *Y)
 
 CAFFE_KNOWN_TYPE (MPICommonWorldWrapper)
 
std::mutex & MPIMutex ()
 
MPI_Comm GlobalMPIComm ()
 Gets the global MPI communicator used by Caffe2. More...
 
void SetGlobalMPIComm (MPI_Comm new_comm)
 Sets the global MPI communicator. More...
 
int MPICommSize (MPI_Comm comm)
 A helper function to return the size of the given communicator.
 
int MPICommRank (MPI_Comm comm)
 A helper function to return the rank of the given communicator.
 
void MPISetupPeers (const int replicas, const string &role, const string &job_path)
 A function used to perform peer setup so one does not need to use mpirun / mpiexec to run the binary. More...
 
void CheckInitializedMPI ()
 
 REGISTER_CPU_OPERATOR (Abs, UnaryElementwiseOp< TensorTypes< float >, CPUContext, AbsFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (AbsGradient, BinaryElementwiseOp< TensorTypes< float >, CPUContext, AbsGradientFunctor< CPUContext >>)
 
element wise Github workspace FeedBlob("X", np.random.randn(5).astype(np.float32)) print("X OPERATOR_SCHEMA (AbsGradient).NumInputs(2).NumOutputs(1).IdenticalTypeAndShape()
 
 REGISTER_GRADIENT (Abs, GetAbsGradient)
 
 REGISTER_CPU_OPERATOR (Accumulate, AccumulateOp< float, CPUContext >)
 
we first initialize the output tensor to all and then do accumulation Any further calls to the operator, given that no one else fiddles with the output in the interim, will do simple accumulations.Accumulation is done using Axpby operation as shown:Y=1 *X+gamma *Y where X is the input tensor, Y is the output tensor and gamma is the multiplier argument.) DOC") .Arg ("gamma","(float, default 1.0) Accumulation multiplier").Input(0
 
we first initialize the output tensor to all and then do accumulation Any further calls to the The input tensor that has to be accumulated to the output tensor If the output size is not the same as input the output tensor is first reshaped and initialized to and only accumulation is done Output (0,"output","Accumulated output tensor")
 
 SHOULD_NOT_DO_GRADIENT (Accumulate)
 
 REGISTER_CPU_OPERATOR (Accuracy, AccuracyOp< float, CPUContext >)
 
NumInputs(2).NumOutputs(1).ScalarType(TensorProto SHOULD_NOT_DO_GRADIENT (Accuracy)
 
 REGISTER_CPU_OPERATOR (Acos, UnaryElementwiseOp< TensorTypes< float >, CPUContext, AcosFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (AcosGradient, BinaryElementwiseOp< TensorTypes< float >, CPUContext, AcosGradientFunctor< CPUContext >>)
 
element wise DOC Input (0,"input","Input tensor").Output(0
 
 REGISTER_GRADIENT (Acos, GetAcosGradient)
 
 REGISTER_CPU_OPERATOR (AdjustBatch, AdjustBatchOp< CPUContext >)
 
 Input (0,"Input","Input data").Input(1
 
Real batch size Output (0,"Output","Data with Adjusted batch size").Output(1
 
Real batch size Real batah size Arg ("max_batch_size","(*int*): max batch size").SetDoc(R"DOC( Adjust the batch size of `input` tensor. When we only have 1 input
 
 REGISTER_CPU_OPERATOR (AffineChannel, AffineChannelOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (AffineChannelGradient, AffineChannelGradientOp< float, CPUContext >)
 
 NumInputs (3).NumOutputs(1).AllowInplace(
 
 SetDoc (R"DOC( Applies a separate affine transformation to each channel of the input. Useful for replacing spatial batch norm with its equivalent fixed transformation. )DOC").Input(0
 
Feature map input with order NCHW or NHWC Input (1,"scale","1D input of shape (C); the c-th element is the scale factor of the ""affine transformation for the c-th channel of the input.").Input(2
 
Feature map input with order NCHW or NHWC input of shape (C)
 
the c th element is the bias of the affine transformation for the c th channel of the input Output (0,"Y","Output with the same order of Input.")
 
 NumInputs ({2, 3}).NumOutputs(
 
 AllowInplace ({{0, 0}})
 
 REGISTER_GRADIENT (AffineChannel, GetAffineChannelGradient)
 
 REGISTER_CPU_OPERATOR (ArgMax, ArgOp< CPUContext, ArgMaxReducer< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (ArgMin, ArgOp< CPUContext, ArgMinReducer< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (Asin, UnaryElementwiseOp< TensorTypes< float >, CPUContext, AsinFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (AsinGradient, BinaryElementwiseOp< TensorTypes< float >, CPUContext, AsinGradientFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Asin, GetAsinGradient)
 
 REGISTER_CPU_OPERATOR (Assert, AssertOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (Atan, UnaryElementwiseOp< TensorTypes< float >, CPUContext, AtanFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (AtanGradient, BinaryElementwiseOp< TensorTypes< float >, CPUContext, AtanGradientFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Atan, GetAtanGradient)
 
 REGISTER_CPU_OPERATOR (BatchBucketize, BatchBucketizeOp< CPUContext >)
 
 NumInputs (4).NumOutputs(1).SetDoc(R"DOC( Bucketize the float_features into sparse features. The float_features is a N * D tensor where N is the batch_size
 
and D is the feature_dim The indices is a tensor containing the indices of the features that need to be bucketized The lengths is a tensor that splits the following boundaries argument The boundaries is a tensor containing the border list for each feature With in each indices should not have duplicate and the number of elements in indices should be less than or euqal to D Each element in lengths vector (lengths[`i`]) represents the number of boundaries in the sub border list.The sum of all elements in`lengths`must be equal to the size of`boundaries`.If lengths[0]
 
 REGISTER_CPU_OPERATOR (BatchGather, BatchGatherOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (BatchGatherGradient, BatchGatherGradientOp< CPUContext >)
 
 SetDoc (R"DOC( Batch gather operation, first dimension in DATA is the batch size. Given DATA tensor of rank r >= 2, and INDICES tensor of rank q >= 1, gather entries of the second outer dimension (axis == 1) of DATA indexed by INDICES, and concatenate them in an output tensor of rank q + (r - 1). Example: DATA = [ [1.0, 1.2, 2.4, 4.5], [2.3, 3.4, 3.6, 2.3], [4.5, 5.7, 1.2, 4.5], ] INDICES = [0, 2] OUTPUT = [ [1.0, 2.4], [2.3, 3.6], [4.5, 1.2], ] )DOC").Input(0
 
Tensor of rank of any rank q Output (0,"OUTPUT","Tensor of rank q + (r - 1).").InheritOnnxSchema()
 
 OPERATOR_SCHEMA (BatchGatherGradient).NumInputs(3).NumOutputs(1)
 
 REGISTER_GRADIENT (BatchGather, GetBatchGatherGradient)
 
 REGISTER_CPU_OPERATOR (BatchMatMul, BatchMatMulOp< CPUContext >)
 
vector< TensorShape > TensorInferenceForBatchMatMul (const OperatorDef &def, const vector< TensorShape > &in)
 
OpSchema::Cost CostInferenceForBatchMatMul (const OperatorDef &def, const vector< TensorShape > &in)
 
where A has shape (dim0, dim1,...M, K)
 
where A has B has shape (dim0, dim1,...K, N)
 
 REGISTER_CPU_OPERATOR (BatchMoments, BatchMomentsOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (BatchMomentsGradient, BatchMomentsGradientOp< float, CPUContext >)
 
 OPERATOR_SCHEMA (BatchMoments).NumInputs(1).NumOutputs(2)
 
 OPERATOR_SCHEMA (BatchMomentsGradient).NumInputs(3).NumOutputs(1)
 
 REGISTER_GRADIENT (BatchMoments, GetBatchMomentsGradient)
 
 REGISTER_CPU_OPERATOR (BatchSparseToDense, BatchSparseToDenseOp< float, CPUContext >)
 
 NumInputs (3, 4).NumOutputs(1).DisallowInputFillers().SetDoc(R"DOC( Convert sparse matrix representation into dense matrix. A sparse matrix is represented by `lengths` vector
 
 REGISTER_CPU_OPERATOR (BisectPercentile, BisectPercentileOp< CPUContext >)
 
with the size of (batch_size, num_feature)
 
with the size where we also need additional information regarding the feature value distribution There are several vectors to keep data to percentile mappping information as arguments(context) the interpolation is apply by (R[t], R[t+1]) and(U[t] and L[t]).As there are F features(F >
 
 REGISTER_CPU_OPERATOR (BooleanMask, BooleanMaskOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (BooleanMaskLengths, BooleanMaskLengthsOp< CPUContext >)
 
 SetDoc (R"DOC( Given a 1D `data` tensor and a boolean `mask` tensor of the same shape, returns a `masked_data` tensor containing only the elements corresponding to positions where the `mask` is True, and a `masked_indices` tensor containing the indices of the True elements. Github Links: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_mask_ops.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "BooleanMask", ["data", "mask"], ["masked_data", "masked_indices"] ) workspace.FeedBlob("data", np.array([1,2,3,4,5,6])) workspace.FeedBlob("mask", np.array([True,False,False,True,True,False])) print("data:", workspace.FetchBlob("data")) print("mask:", workspace.FetchBlob("mask")) workspace.RunOperatorOnce(op) print("masked_data:", workspace.FetchBlob("masked_data")) print("masked_indices:", workspace.FetchBlob("masked_indices")) ``` **Result** ``` data: [1 2 3 4 5 6] mask: [ True False False True True False] masked_data: [1 4 5] masked_indices: [0 3 4] ``` </details> )DOC").Input(0
 
same shape as data Output (0,"masked_data","(*Tensor*): 1D tensor of same type as `data` input that contains the masked input tensor").Output(1
 
return the segment lengths of the corresponding segmented tensor after **BooleanMask **is applied If lengths tensor then length of mask tensor must be $a_1 a_2 a_n Github workspace FeedBlob ("lengths", np.array([1, 3, 2], dtype=np.int32)) workspace.FeedBlob("mask"
 
return the segment lengths of the corresponding segmented tensor after **BooleanMask **is applied If lengths tensor then length of mask tensor must be $a_1 a_2 a_n Github workspace np array([False, True, True, False, True, True])) print("lengths NO_GRADIENT (BooleanMaskLengths)
 
template<typename Functor >
void MaskWithFunctor (size_t N, size_t M, int B, const float *in, Functor fn, float fill_val, float *out)
 
template<typename Functor >
void RepeatedMaskWithFunctor (size_t N, size_t M, int D, const float *in, Functor fn, float fill_val, float *out)
 
 REGISTER_CPU_OPERATOR (SequenceMask, SequenceMaskOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (BooleanUnmask, BooleanUnmaskOp< CPUContext >)
 
 NumInputs ([](int n){return n > 0 &&n%2==0;}).NumOutputs(1).SetDoc(R"DOC( Given a series of masks and values
 
reconstruct values together according to masks A comprehensive False False True Reconstruct Note that for all mask there must be at least one True This is not False False we accept the first and no longer expect a value for that False True ***Note that we alternate data and mask inputs Github workspace FeedBlob ("mask1", np.array([True, False, False, True, True, False])) workspace.FeedBlob("data1"
 
reconstruct values together according to masks A comprehensive False False True Reconstruct Note that for all mask there must be at least one True This is not False False we accept the first and no longer expect a value for that False True ***Note that we alternate data and mask inputs Github workspace np array ([1, 4, 5])) workspace.FeedBlob("mask2"
 
 REGISTER_CPU_OPERATOR (ByteWeightDequant, ByteWeightDequantOp< CPUContext >)
 
 OPERATOR_SCHEMA (ByteWeightDequant).NumInputs(1).NumOutputs(1)
 
 REGISTER_CPU_OPERATOR (Cast, CastOp< CPUContext >)
 
out push_back (in[0])
 
out[0] set_data_type (cast::GetCastDataType(helper,"to"))
 
 SetDoc (R"DOC( Casts the elements of a given input tensor to a data type specified by the `to` argument and returns an output tensor of the same size in the converted type. The `to` argument must be one of the data types specified in the *DataType* enum field in the TensorProto message (see below). If the `to` argument is not provided or is not one of the enumerated types in *DataType*, Caffe2 throws an Enforce error. NOTE: Casting from strings is not supported, and casting to strings is only supported on CPU. TensorProto *DataType* field: ``` message TensorProto { ... enum DataType { UNDEFINED = 0; FLOAT = 1; // float INT32 = 2; // int BYTE = 3; // BYTE, when deserialized, is going to be restored as uint8. STRING = 4; // string BOOL = 5; // bool UINT8 = 6; // uint8_t INT8 = 7; // int8_t UINT16 = 8; // uint16_t INT16 = 9; // int16_t INT64 = 10; // int64_t FLOAT16 = 12; // at::Half DOUBLE = 13; // double } ``` Github Links: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cast_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "Cast", ["X"], ["Y"], to=2 ) workspace.FeedBlob("X", (np.random.rand(3,3)).astype(np.float32)*10) print("X:", workspace.FetchBlob("X")) workspace.RunOperatorOnce(op) print("Y:", workspace.FetchBlob("Y")) ``` **Result** ``` X: [[9.436466 5.8529844 0.54932857] [1.1583444 2.9936118 0.22950427] [3.9143739 3.4040766 8.905341 ]] Y: [[9 5 0] [1 2 0] [3 3 8]] ``` </details> )DOC").Arg("to"
 
 REGISTER_GRADIENT (Cast, GetCastGradient)
 
 REGISTER_CPU_OPERATOR (Cbrt, UnaryElementwiseOp< TensorTypes< float >, CPUContext, CbrtFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (CbrtGradient, BinaryElementwiseOp< TensorTypes< float >, CPUContext, CbrtGradientFunctor< CPUContext >>)
 
 IdenticalTypeAndShape ().Input(0
 
 REGISTER_GRADIENT (Cbrt, GetCbrtGradient)
 
 REGISTER_CPU_OPERATOR (Ceil, CeilOp< float, CPUContext >)
 
 SetDoc (R"DOC( Element-wise application of the ceil function ($y=ceil(x)$) to the input tensor `X`. Output tensor shape is the same as the input tensor. Github Link: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/ceil_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "Ceil", ["X"], ["X"], ) workspace.FeedBlob("X", (np.random.uniform(-10, 10, (5,5))).astype(np.float32)) print("X before running op:", workspace.FetchBlob("X")) workspace.RunOperatorOnce(op) print("X after running op:", workspace.FetchBlob("X")) ``` **Result** ``` X before running op: [[ 8.44598 -6.5098248 -2.2993476 -7.6859694 0.58566964] [-7.846551 -0.03689406 6.9362907 -4.0521703 4.4969673 ] [ 0.33355865 -7.895527 -8.393201 9.374202 -2.3930092 ] [-6.3061996 3.1403487 3.782099 -8.516556 -2.8387244 ] [-2.0164998 4.7663913 -3.422966 0.3636999 8.75713 ]] X after running op: [[ 9. -6. -2. -7. 1.] [-7. -0. 7. -4. 5.] [ 1. -7. -8. 10. -2.] [-6. 4. 4. -8. -2.] [-2. 5. -3. 1. 9.]] ``` </details> )DOC").Input(0
 
 GRADIENT_NOT_IMPLEMENTED_YET (Ceil)
 
 REGISTER_CPU_OPERATOR (ChannelBackpropStats, ChannelBackpropStatsOp< CPUContext >)
 
the gradient for the output of SpatialBN and the per channel mean and inverse std var vectors for the computes the per channel bias and scale gradient to be used during the backward pass for subsequent spatial batch normalization gradient calculation the results of this op are subsequently reduced over multiple devices to obtain statistics over a larger batch size in cases where the batch size for a single model copy is too low to yield the full benefit of batch normalization The resulting bias and scale can then be plugged back into SpatialBNGradient to get results over the larger batch size DOC Input (0,"X","The input 4-dimensional tensor of shape NCHW").Input(1
 
the gradient for the output of SpatialBN and the per channel mean and inverse std var vectors for the computes the per channel bias and scale gradient to be used during the backward pass for subsequent spatial batch normalization gradient calculation the results of this op are subsequently reduced over multiple devices to obtain statistics over a larger batch size in cases where the batch size for a single model copy is too low to yield the full benefit of batch normalization The resulting bias and scale can then be plugged back into SpatialBNGradient to get results over the larger batch size DOC The mean saved from the forward pass as a dimensional tensor of size C Input (2,"inv_std","The saved inverse standard deviation as a 1-dimensional tensor ""of size C.").Input(3
 
the gradient for the output of SpatialBN and the per channel mean and inverse std var vectors for the computes the per channel bias and scale gradient to be used during the backward pass for subsequent spatial batch normalization gradient calculation the results of this op are subsequently reduced over multiple devices to obtain statistics over a larger batch size in cases where the batch size for a single model copy is too low to yield the full benefit of batch normalization The resulting bias and scale can then be plugged back into SpatialBNGradient to get results over the larger batch size DOC The mean saved from the forward pass as a dimensional tensor of size C Gradient for the output layer of here used as input because we are on the backward pass Output (0,"scale_grad","Gradient for the scale vector").Output(1
 
 SHOULD_NOT_DO_GRADIENT (ChannelBackpropStats)
 
 REGISTER_CPU_OPERATOR (ChannelShuffle, ChannelShuffleOp< float, CPUContext >)
 
 REGISTER_CPU_GRADIENT_OPERATOR (ChannelShuffleGradient, ChannelShuffleGradientOp< float, CPUContext >)
 
 REGISTER_GRADIENT (ChannelShuffle, GetChannelShuffleGradient)
 
 REGISTER_CPU_OPERATOR (ChannelStats, ChannelStatsOp< CPUContext >)
 
computes the sum of all elements per channel and the sum of all elements squared per channel These values can be reduced across multiple batches and used to obtain the mean and variance across the full set of batches Using the new mean and variance as input to SpatialBN has the effect of changing the batch size over which SpatialBN is applied DOC The output dimensional tensor of size C containing the sum of elements of X per channel Output (1,"sumsq","The output 1-dimensional tensor of size C containing the sum of ""elements squared per channel.")
 
 SHOULD_NOT_DO_GRADIENT (ChannelStats)
 
 REGISTER_CPU_OPERATOR (Clip, ClipOp< float, CPUContext >)
 
 REGISTER_CPU_GRADIENT_OPERATOR (ClipGradient, ClipGradientOp< float, CPUContext >)
 
Key value handler for rendezvous (optional).") .Output(0
 
Key value handler for A common world for collective operations Arg ("size","(int) size of the common world.").Arg("rank"
 
Existing common world to clone Output (0,"comm_world","A common world for collective operations.")
 
 SetDoc ("Closes all connections managed by a common world.").Input(0
 
 NumInputsOutputs ([](int in, int out){return in >=2 &&out==(in-1);}).EnforceInplace([](int in
 
 InputsCanCrossDevices ().IdenticalTypeAndShapeOfInput(0).SetDoc(R"DOC( Does a broadcast operation from the root node to every other node. The tensor on each node should have been pre-created with the same shape and data type. )DOC").Input(0
 
The common world Input (1,"X","A tensor to be broadcasted.").Output(0
 
The common world In place as input Arg ("root","(int, default 0) the root to run broadcast from.")
 
The common world Input (1,"X","A tensor to be reduced.").Output(0
 
The common world The reduced result on not set for other nodes Arg ("root","(int, default 0) the root to run reduce into.")
 
 IdenticalTypeAndShapeOfInput (0).InputsCanCrossDevices().SetDoc(R"DOC( Does an allreduce operation among the nodes. Currently only Sum is supported. )DOC").Input(0
 
The common world Input (1,"X","A tensor to be allreduced.").Output(0
 
The common world Input (1,"X","A tensor to be reduce-scattered.").Output(0
 
 NumInputs (2, INT_MAX).NumOutputs(1).InputsCanCrossDevices().SetDoc(R"DOC( Does an allgather operation among the nodes. )DOC").Input(0
 
The common world Input (1,"X","A tensor to be allgathered.").Output(0
 
 NumInputs ({2, 4}).NumOutputs(0).SetDoc(R"DOC( Sends the tensor to another node. )DOC").Input(0
 
The common world An int CPUtensor of size specifying the rank If this overrides the to argument of the op Input (3,"tag","An int CPUtensor of size 1 specifying the tag to ""send the tensor with. This overrides the 'tag' ""argument of the op.").Arg("dst"
 
The common world An int CPUtensor of size specifying the rank If this overrides the to argument of the op The rank to send the tensor to Arg ("tag","(int) a tag to send the tensor with.").Arg("raw_buffer"
 
 AllowInplace ({{2, 1},{3, 2}}).SetDoc(R"DOC( Receives the tensor from another node. )DOC").Input(0
 
The common world Input (1,"Y","In-place output. If raw_buffer is specified, ""Y should have pre-allocated data and type..").Input(2
 
The common world An int CPUtensor of size specifying the rank If this overrides the from argument of the op The received tensor Output (1,"src","The sender that sent the message as a CPUTensor ""of size 1 and of type int.").Output(2
 
The common world An int CPUtensor of size specifying the rank If this overrides the from argument of the op The received tensor The tag that the message is sent with as a CPUTensor of size and of type int Arg ("src","(int) he rank to receive the tensor from.").Arg("tag"
 
The common world An int CPUtensor of size specifying the rank If this overrides the from argument of the op The received tensor The tag that the message is sent with as a CPUTensor of size and of type int int a tag to receive the tensor with Arg ("raw_buffer","(bool) if set, only send the content and assume that the receiver ""has already known the tensor's shape and information.")
 
 SHOULD_NOT_DO_GRADIENT (CreateCommonWorld)
 
 SHOULD_NOT_DO_GRADIENT (CloneCommonWorld)
 
 SHOULD_NOT_DO_GRADIENT (DestroyCommonWorld)
 
 SHOULD_NOT_DO_GRADIENT (Broadcast)
 
 SHOULD_NOT_DO_GRADIENT (Reduce)
 
 SHOULD_NOT_DO_GRADIENT (Allgather)
 
 SHOULD_NOT_DO_GRADIENT (Allreduce)
 
 SHOULD_NOT_DO_GRADIENT (ReduceScatter)
 
 SHOULD_NOT_DO_GRADIENT (Barrier)
 
 SHOULD_NOT_DO_GRADIENT (SendTensor)
 
 SHOULD_NOT_DO_GRADIENT (ReceiveTensor)
 
 REGISTER_CPU_OPERATOR (CreateCommonWorld, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (CloneCommonWorld, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (DestroyCommonWorld, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (Broadcast, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (Reduce, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (Allgather, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (Allreduce, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (ReduceScatter, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (Barrier, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (SendTensor, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (ReceiveTensor, NoDefaultEngineOp< CPUContext >)
 
 REGISTER_CUDA_OPERATOR (CreateCommonWorld, NoDefaultEngineOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (CloneCommonWorld, NoDefaultEngineOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Broadcast, NoDefaultEngineOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Reduce, NoDefaultEngineOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Allgather, NoDefaultEngineOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Allreduce, NoDefaultEngineOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (SendTensor, NoDefaultEngineOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (ReceiveTensor, NoDefaultEngineOp< CUDAContext >)
 
 REGISTER_CPU_OPERATOR (Split, SplitOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (SplitByLengths, SplitByLengthsOp< CPUContext >)
 
INT_MAX Input (0,"input","(*Tensor*): tensor to split").Input(1
 
INT_MAX * Tuple (int)*):length of each output") .Arg( "order", "(*string *):order of dimensions of input and output blobs;either\"NCHW\" or \"NHWC\"").Output(0,"[output_0, output_1, ...]","(*Tensor*): output tensor").DeviceInferenceFunction(splitOpDevInfer).SetDoc(R"DOC(Split an `input` tensor into a list of tensors, along the axis specified by the `axis` dimension. The lengths of the split can be specified using argument `split` or optional second input blob to the operator. Otherwise, the tensor is split to equal sized parts.Github Links:- https:<details><summary> <b>Example</b> </summary>**Code**```workspace.ResetWorkspace()op = core.CreateOperator( "Split", ["input"], ["output_0","output_1","output_2"], split=(3,2,4), axis=0)workspace.FeedBlob("input", np.random.randint(10, size=(9)))print("input:", workspace.FetchBlob("input"))workspace.RunOperatorOnce(op)print("output_0:", workspace.FetchBlob("output_0"))print("output_1:", workspace.FetchBlob("output_1"))print("output_2:", workspace.FetchBlob("output_2"))```**Result**```input: [2 2 6 6 6 0 5 7 4]output_0: [2 2 6]output_1: [6 6]output_2: [0 5 7 4]```</details>)DOC").InheritOnnxSchema(
 
INT_MAX Input (0,"input","The tensor to split").Input(1
 
INT_MAX The tensor l_i indicates the logic block of input Arg ("axis","Which axis to split on").Arg("order"
 
INT_MAX The tensor l_i indicates the logic block of input Either NHWC or will split on C defaults to NCHW DeviceInferenceFunction ([](const OperatorDef &def){auto op_device=def.has_device_option()?def.device_option():DeviceOption();vector< DeviceOption > in_dev(def.input_size(), op_device);vector< DeviceOption > out_dev(def.output_size(), op_device);in_dev[1]=DeviceOption();return std::make_pair(in_dev, out_dev);}).SetDoc(R"DOC( Split a tensor into a list of tensors
 
OpSchema::Cost CostInferenceForConcat (const OperatorDef &def, const std::vector< TensorShape > &in)
 
std::vector< TensorShape > TensorInferenceForConcat (const OperatorDef &def, const std::vector< TensorShape > &in)
 
 REGISTER_CUDA_OPERATOR (Split, SplitOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Concat, ConcatOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (DepthSplit, SplitOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (DepthConcat, ConcatOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (SplitByLengths, SplitByLengthsOp< CUDAContext >)
 
 REGISTER_CPU_OPERATOR (Conditional, ConditionalOp< CPUContext >)
 
apply conditional operator along the first dimension of DataT and DataF and return DataO.Note, DataT and DataF must have the exact same shape and type.) DOC") .Input (0,"Condition","Boolean tensor to select DataT or DataF").Input(1
 
apply conditional Data to use when True Input (2,"DataF","Data to use when False").Output(0
 
apply conditional Data to use when True Output data after applying ConditionalOp IdenticalTypeAndShapeOfInput (1)
 
 NO_GRADIENT (Conditional)
 
std::vector< TensorShape > TensorInferenceForConvGradient (const OperatorDef &def, const std::vector< TensorShape > &in)
 
OpSchema::Cost CostInferenceForConvGradient (const OperatorDef &def, const vector< TensorShape > &inputs)
 
 REGISTER_CPU_OPERATOR (ConvGradient, ConvGradientOp< float, CPUContext >)
 
 NumInputs (2, 3).NumOutputs(1
 
 TensorInferenceFunction (TensorInferenceForConvGradient).CostInferenceFunction(CostInferenceForConvGradient)
 
 REGISTER_CPU_OPERATOR (Conv1DGradient, ConvGradientOp< float, CPUContext >)
 
 OPERATOR_SCHEMA (Conv1DGradient).NumInputs(2
 
 NumOutputs (1, 3)
 
 REGISTER_CPU_OPERATOR (Conv2DGradient, ConvGradientOp< float, CPUContext >)
 
 OPERATOR_SCHEMA (Conv2DGradient).NumInputs(2
 
 REGISTER_CPU_OPERATOR (Conv3DGradient, ConvGradientOp< float, CPUContext >)
 
 OPERATOR_SCHEMA (Conv3DGradient).NumInputs(2
 
 REGISTER_GRADIENT (Conv, GetConvGradient)
 
 REGISTER_GRADIENT (Conv1D, GetConvGradient)
 
 REGISTER_GRADIENT (Conv2D, GetConvGradient)
 
 REGISTER_GRADIENT (Conv3D, GetConvGradient)
 
std::function< void(OpSchema &)> ConvDocGenerator (const char *dim)
 
 REGISTER_CPU_OPERATOR (Conv, ConvOp< float, CPUContext >)
 
NumInputs(2, 3).NumOutputs(1).TensorInferenceFunction(ConvPoolOpBase< CPUContextREGISTER_CPU_OPERATOR (Conv1D, ConvOp< float, CPUContext >)
 
NumInputs(2, 3).NumOutputs(1).TensorInferenceFunction(ConvPoolOpBase< CPUContextREGISTER_CPU_OPERATOR (Conv2D, ConvOp< float, CPUContext >)
 
NumInputs(2, 3).NumOutputs(1).CostInferenceFunction(OpSchema REGISTER_CPU_OPERATOR (Conv3D, ConvOp< float, CPUContext >)
 
 REGISTER_CUDNN_OPERATOR (Conv, CudnnConvOp)
 
 REGISTER_CUDNN_OPERATOR (ConvGradient, CudnnConvGradientOp)
 
 REGISTER_CUDNN_OPERATOR (Conv1D, CudnnConvOp)
 
 REGISTER_CUDNN_OPERATOR (Conv1DGradient, CudnnConvGradientOp)
 
 REGISTER_CUDNN_OPERATOR (Conv2D, CudnnConvOp)
 
 REGISTER_CUDNN_OPERATOR (Conv2DGradient, CudnnConvGradientOp)
 
 REGISTER_CUDNN_OPERATOR (Conv3D, CudnnConvOp)
 
 REGISTER_CUDNN_OPERATOR (Conv3DGradient, CudnnConvGradientOp)
 
 REGISTER_CUDA_OPERATOR (Conv, ConvOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (ConvGradient, ConvGradientOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Conv1D, ConvOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Conv1DGradient, ConvGradientOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Conv2D, ConvOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Conv2DGradient, ConvGradientOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Conv3D, ConvOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Conv3DGradient, ConvGradientOp< float, CUDAContext >)
 
template<>
void createSharedBuffer< CPUContext > (Workspace *ws)
 
template<>
void runWithSharedBuffer< CPUContext > (Workspace *ws, std::function< void(Tensor *buffer)> f)
 
template<typename Context >
void createSharedBuffer (Workspace *ws)
 Creates a mutex and shared buffer in the workspace. More...
 
template<typename Context >
void runWithSharedBuffer (Workspace *ws, std::function< void(Tensor *buffer)> f)
 Thread-safe, can be invoked from RunOnDevice() to serialize access to shared buffer.
 
template<>
void createSharedBuffer< CUDAContext > (Workspace *ws)
 
template<>
void runWithSharedBuffer< CUDAContext > (Workspace *ws, std::function< void(Tensor *buffer)> f)
 
 REGISTER_CPU_OPERATOR (ConvTransposeGradient, ConvTransposeGradientOp< float, CPUContext >)
 
 OPERATOR_SCHEMA (ConvTransposeGradient).NumInputs(3).NumOutputs(1
 
 REGISTER_GRADIENT (ConvTranspose, GetConvTransposeGradient)
 
 REGISTER_CPU_OPERATOR (ConvTranspose, ConvTransposeOp< float, CPUContext >)
 
an input weight tensor and optionally an input bias tensor $bias It then computes the transposed sometimes referred to as and produces a single output tensor $Y The hyperparameters of the op such as kernel and padding are specified as args At each the filter is deconvolved with a subset of $X and the $bias is added This is done throughout the input data until the output computation is complete The output shapes are computed as follows The number of channels in the output feature map is the number of kernels specified in the filter blob The spatial height and width are computed which is why they are separate files in the implementation this operator inherits from the *ConvTransposeUnpoolOpBase *operator.Github Links:-https:-https:-https:< details >< summary >< b >Example</b ></summary > **Code **```workspace.ResetWorkspace () op
 
 REGISTER_CUDNN_OPERATOR (ConvTranspose, CudnnConvTransposeOp< float >)
 
 REGISTER_CUDNN_OPERATOR (ConvTransposeGradient, CudnnConvTransposeGradientOp< float >)
 
 REGISTER_CUDA_OPERATOR (ConvTranspose, ConvTransposeOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (ConvTransposeGradient, ConvTransposeGradientOp< float, CUDAContext >)
 
 REGISTER_CPU_OPERATOR (CopyFromCPUInput, CopyOp< CPUContext, CPUContext, CPUContext >)
 
 REGISTER_CPU_OPERATOR (CopyOnDeviceLike, CopyOnDeviceLikeOp< CPUContext, CPUContext, CPUContext >)
 
 REGISTER_CPU_OPERATOR (Copy, CopyOp< CPUContext, CPUContext, CPUContext >)
 
 SetDoc (R"DOC( Copy tensor for GPU to CPU context. Must be run under GPU device option. )DOC").Input(0
 
The input tensor Output (0,"output","Tensor that will contain a copy of the input.")
 
 SetDoc (R"DOC( Copy tensor for CPU to GPU context. Must be run under GPU device option. )DOC").Input(0
 
 SetDoc (R"DOC( Take a CPU input tensor and copy it to an output in the current Context (GPU or CPU). This may involves cross-device MemCpy. )DOC").Input(0
 
The input CPU tensor Output (0,"output","either a TensorCUDA or a TensorCPU")
 
The input tensor Input (1,"dst","Tensor, on which device the copy will be performed.").Output(0
 
 REGISTER_GRADIENT (Copy, GetCopyGradient)
 
 REGISTER_GRADIENT (CopyGPUToCPU, GetGPUToCPUGradient)
 
 REGISTER_GRADIENT (CopyCPUToGPU, GetCPUToGPUGradient)
 
 REGISTER_CPU_OPERATOR (Cos, UnaryElementwiseOp< TensorTypes< float >, CPUContext, CosFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (CosGradient, BinaryElementwiseOp< TensorTypes< float >, CPUContext, CosGradientFunctor< CPUContext >>)
 
element wise Github workspace FeedBlob("X", np.random.rand(5).astype(np.float32)) print("X OPERATOR_SCHEMA (CosGradient).NumInputs(2).NumOutputs(1).IdenticalTypeAndShape()
 
 REGISTER_GRADIENT (Cos, GetCosGradient)
 
 REGISTER_CPU_OPERATOR (Cosh, UnaryElementwiseOp< TensorTypes< float >, CPUContext, CoshFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (CoshGradient, BinaryElementwiseOp< TensorTypes< float >, CPUContext, CoshGradientFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Cosh, GetCoshGradient)
 
 REGISTER_CPU_OPERATOR (CosineEmbeddingCriterion, CosineEmbeddingCriterionOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (CosineEmbeddingCriterionGradient, CosineEmbeddingCriterionGradientOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (CreateCounter, CreateCounterOp< int64_t, CPUContext >)
 
 REGISTER_CPU_OPERATOR (ResetCounter, ResetCounterOp< int64_t, CPUContext >)
 
 REGISTER_CPU_OPERATOR (CountDown, CountDownOp< int64_t, CPUContext >)
 
 REGISTER_CPU_OPERATOR (CheckCounterDone, CheckCounterDoneOp< int64_t, CPUContext >)
 
 REGISTER_CPU_OPERATOR (CountUp, CountUpOp< int64_t, CPUContext >)
 
 REGISTER_CPU_OPERATOR (RetrieveCount, RetrieveCountOp< int64_t, CPUContext >)
 
NumInputs(1).NumOutputs(1).ScalarType(TensorProto SHOULD_NOT_DO_GRADIENT (CreateCounter)
 
 SHOULD_NOT_DO_GRADIENT (ResetCounter)
 
 SHOULD_NOT_DO_GRADIENT (CountDown)
 
 SHOULD_NOT_DO_GRADIENT (CountUp)
 
 SHOULD_NOT_DO_GRADIENT (RetrieveCount)
 
 CAFFE_KNOWN_TYPE (std::unique_ptr< Counter< int64_t >>)
 
 REGISTER_BLOB_SERIALIZER ((TypeMeta::Id< std::unique_ptr< Counter< int64_t >>>()), CounterSerializer)
 
 REGISTER_BLOB_DESERIALIZER (std::unique_ptr< Counter< int64_t >>, CounterDeserializer)
 
 REGISTER_CUDA_OPERATOR (CreateCounter, CreateCounterOp< int64_t, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (ResetCounter, ResetCounterOp< int64_t, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (CountDown, CountDownOp< int64_t, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (CheckCounterDone, CheckCounterDoneOp< int64_t, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (CountUp, CountUpOp< int64_t, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (RetrieveCount, RetrieveCountOp< int64_t, CUDAContext >)
 
 CAFFE_KNOWN_TYPE (detail::WorkspaceStack)
 
 REGISTER_CPU_OPERATOR (CreateScope, CreateScopeOp< CPUContext >)
 
 SHOULD_NOT_DO_GRADIENT (CreateScope)
 
 OPERATOR_SCHEMA (CreateScope).NumInputs(0).NumOutputs(1).SetDoc(R"DOC( 'CreateScope' operator initializes and outputs empty scope that is used by Do operator to store local blobs )DOC")
 
 REGISTER_CPU_OPERATOR (HasScope, HasScopeOp< CPUContext >)
 
 SHOULD_NOT_DO_GRADIENT (HasScope)
 
 OPERATOR_SCHEMA (HasScope).NumInputs(1).NumOutputs(1).SetDoc(R"DOC( Checks whether scope blob has any saved scopes left )DOC")
 
 REGISTER_CPU_OPERATOR (LabelCrossEntropy, LabelCrossEntropyOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (LabelCrossEntropyGradient, LabelCrossEntropyGradientOp< float, CPUContext >)
 
 SetDoc (R"DOC( This operator computes the cross entropy between a $NxD$ dimensional input data tensor $X$ and a one dimensional input label tensor $label$. The op produces a single length $N$ output tensor $Y$. Here, $N$ is considered the batch size and $D$ is the size of each element in the batch. In practice, it is most commonly used at the end of models as a part of the loss computation, after the SoftMax operator and before the AveragedLoss operator. The cross entropy operation is defined as follows $$Y_i = -log(X_{ij})$$ where ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability. The difference between *LabelCrossEntropy* and *CrossEntropy* is how the labels are specified. Here, the labels are a length $N$ list of integers, whereas in CrossEntropy the labels are a $NxD$ dimensional matrix of one hot label vectors. However, the results of computation should be the same, as shown in the two examples where ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability. Github Links: - https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.h - https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "LabelCrossEntropy", ["X", "label"], ["Y"] ) // Create X: Sample softmax output for 5-class model X = np.array([[.01, .05, .02, .02, .9],[.03, .1, .42, .05, .4]]) print("X:\n",X) // Create label: Sample 1-hot ground truth label vectors label = np.array([4,2]) print("label:\n",label) // Feed X & label into workspace workspace.FeedBlob("X", X.astype(np.float32)) workspace.FeedBlob("label", label.astype(np.int32)) // Run op workspace.RunOperatorOnce(op) // Collect Output print("Y:\n", workspace.FetchBlob("Y")) ``` **Result** ``` X: [[0.01 0.05 0.02 0.02 0.9 ] [0.03 0.1 0.42 0.05 0.4 ]] label: [4 2] Y: [0.10536055 0.8675006 ] ``` </details> )DOC").Input(0
 
Input tensor which is almost always the result of a softmax operation $X is a array of size where $N is the batch size and $D is the number of classes Input (1,"label","Blob containing the labels used to compare the input. $label$ is a length $N$ list of integers, where each element is the integer label for the $n$th element of the batch.").Output(0
 
 REGISTER_GRADIENT (LabelCrossEntropy, GetLabelCrossEntropyGradient)
 
 REGISTER_CPU_OPERATOR (MakeTwoClass, MakeTwoClassOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (MakeTwoClassGradient, MakeTwoClassGradientOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (SigmoidCrossEntropyWithLogits, SigmoidCrossEntropyWithLogitsOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (SigmoidCrossEntropyWithLogitsGradient, SigmoidCrossEntropyWithLogitsGradientOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (WeightedSigmoidCrossEntropyWithLogits, WeightedSigmoidCrossEntropyWithLogitsOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (WeightedSigmoidCrossEntropyWithLogitsGradient, WeightedSigmoidCrossEntropyWithLogitsGradientOp< float, CPUContext >)
 
out[0] add_dims (in[0].dims(0))
 
out[0] add_dims (2)
 
 SetDoc (R"DOC( Given a vector of probabilities, this operator transforms this into a 2-column matrix with complimentary probabilities for binary classification. In explicit terms, given the vector X, the output Y is vstack(1 - X, X). )DOC").Input(0
 
Input vector of probabilities Output (0,"Y","2-column matrix with complimentary probabilities of X for ""binary classification")
 
 Arg ("log_D_trick", R"DOC( default is false; if enabled, will use the log d trick to avoid the vanishing gradients early on; see Goodfellow et. al (2014) )DOC").Arg("unjoined_lr_loss"
 
DOC (default is false;if enabled, the model will be allowed to train on an unjoined dataset, where some examples might be false negative and might appear in the dataset later as(true) positive example.) DOC") .NumInputs(2) .NumOutputs(1) .IdenticalTypeAndShapeOfInputDim(0
 
SetDoc (R"DOC( Given two matrices logits and targets, of same shape, (batch_size, num_classes), computes the sigmoid cross entropy between the two. Returns a tensor of shape (batch_size,) of losses for each example. )DOC").Input(0
 
R matrix of logits for each example and class Input (1,"targets","matrix of targets, same shape as logits.").Output(0
 
 SetDoc (R"DOC( Given three matrices: logits, targets, weights, all of the same shape, (batch_size, num_classes), computes the weighted sigmoid cross entropy between logits and targets. Specifically, at each position r,c, this computes weights[r, c] * crossentropy(sigmoid(logits[r, c]), targets[r, c]), and then averages over each row. Returns a tensor of shape (batch_size,) of losses for each example. )DOC").Input(0
 
matrix of logits for each example and class matrix of same shape as logits Output (0,"xentropy","Vector with the total xentropy for each example.")
 
 REGISTER_GRADIENT (MakeTwoClass, GetMakeTwoClassGradient)
 
 REGISTER_GRADIENT (SigmoidCrossEntropyWithLogits, GetSigmoidCrossEntropyWithLogitsGradient)
 
 REGISTER_GRADIENT (WeightedSigmoidCrossEntropyWithLogits, GetWeightedSigmoidCrossEntropyWithLogitsGradient)
 
 REGISTER_CPU_OPERATOR (CrossEntropy, CrossEntropyOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (CrossEntropyGradient, CrossEntropyGradientOp< float, CPUContext >)
 
 SetDoc (R"DOC( This operator computes the cross entropy between a $NxD$ dimensional input data tensor $X$ and a $NxD$ dimensional input label tensor $label$. The op produces a single length $N$ output tensor $Y$. Here, $N$ is considered the batch size and $D$ is the size of each element in the batch. In practice, it is most commonly used at the end of models as a part of the loss computation, after the SoftMax operator and before the AveragedLoss operator. The cross entropy operation is defined as follows $$Y_i = \sum_j (label_{ij} * log(X_{ij}))$$ where ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability. Github Links: - https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.h - https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "CrossEntropy", ["X", "label"], ["Y"] ) // Create X: Sample softmax output for 5-class model X = np.array([[.01, .05, .02, .02, .9],[.03, .1, .42, .05, .4]]) print("X:\n",X) // Create label: Sample 1-hot ground truth label vectors label = np.array([[0.,0.,0.,0.,1.],[0.,0.,1.,0.,0.]]) print("label:\n",label) // Feed X & label into workspace workspace.FeedBlob("X", X.astype(np.float32)) workspace.FeedBlob("label", label.astype(np.float32)) // Run op workspace.RunOperatorOnce(op) // Collect Output print("Y:\n", workspace.FetchBlob("Y")) ``` **Result** ``` X: [[0.01 0.05 0.02 0.02 0.9 ] [0.03 0.1 0.42 0.05 0.4 ]] label: [[0. 0. 0. 0. 1.] [0. 0. 1. 0. 0.]] Y: [0.10536055 0.8675006 ] ``` </details> )DOC").Input(0
 
Input tensor which is almost always the result of a softmax operation $X is a array of size where $N is the batch size and $D is the number of classes Input (1,"label","Blob containing the labels used to compare the input. $label$ is the same shape as $X$.").Output(0
 
 REGISTER_GRADIENT (CrossEntropy, GetCrossEntropyGradient)
 
 REGISTER_CPU_OPERATOR (CTCBeamSearchDecoder, CTCBeamSearchDecoderOp< CPUContext >)
 
Maximum number of candidates to carry over to next activation step Arg ("prune_threshold","Probability threshold below which outputs are ignored.").Input(0
 
Maximum number of candidates to carry over to next activation step float Tensor sized[max_activation_length, batch_size, alphabet_size] of network logits (before softmax application).") .Input( 1
 
Maximum number of candidates to carry over to next activation step float Tensor sized[max_activation_length, batch_size, alphabet_size] of network optional int vector containing sequence having size[batch_size] seq_len will be set to max_time if not provided Output (0,"OUTPUT_LEN","Output_len matrix size (batch_size). ""Each index stores final output length of its corresponding batch item.").Output(1
 
Maximum number of candidates to carry over to next activation step float Tensor sized[max_activation_length, batch_size, alphabet_size] of network optional int vector containing sequence having size[batch_size] seq_len will be set to max_time if not provided Values size (total_decoded_outputs)." "The flattened vector of final output sequences
 
Maximum number of candidates to carry over to next activation step float Tensor sized[max_activation_length, batch_size, alphabet_size] of network optional int vector containing sequence having size[batch_size] seq_len will be set to max_time if not provided Values in batch order InheritOnnxSchema ()
 
 SHOULD_NOT_DO_GRADIENT (CTCBeamSearchDecoder)
 
 REGISTER_CPU_OPERATOR (CTCGreedyDecoder, CTCGreedyDecoderOp< CPUContext >)
 
When merge_repeated is merge repeated classes in output SetDoc ("Greedy decoder for connectionist temporal classification.").Input(0
 
When merge_repeated is merge repeated classes in output float Tensor sized[max_time, batch_size, num_classes] Input (1,"SEQ_LEN","(optional) 1D int vector containing sequence lengths, ""having size [batch_size]""seq_len will be set to max_time if not provided").Output(0
 
 REGISTER_CPU_OPERATOR (Cube, UnaryElementwiseOp< NumericTypes, CPUContext, CubeFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (CubeGradient, BinaryElementwiseOp< NumericTypes, CPUContext, CubeGradientFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Cube, GetCubeGradient)
 
 REGISTER_CPU_OPERATOR (DataCouple, DataCoupleOp< CPUContext >)
 
 EnforceOneToOneInplace ().SetDoc(R"DOC( A one to one operator that takes an arbitrary number of input and output blobs such that each input blob is inplace with it's matching output blob. It then proceedes to do nothing with each of these operators. This serves two purposes. It can make it appear as if a blob has been written to
 
 CAFFE_KNOWN_TYPE (std::unique_ptr< dataset_ops::TreeCursor >)
 
 CAFFE_KNOWN_TYPE (dataset_ops::TensorVectorPtr)
 
 CAFFE_KNOWN_TYPE (dataset_ops::SharedTensorVectorPtr)
 
 OPERATOR_SCHEMA (DeformConvGradient).NumInputs(4
 
 NumOutputs (2, 4)
 
vector< TensorShape > TensorInferenceForDotProduct (const OperatorDef &, const vector< TensorShape > &in)
 
OpSchema::Cost CostInferenceForDotProduct (const OperatorDef &def, const vector< TensorShape > &in)
 
 REGISTER_CPU_OPERATOR (SquaredL2Distance, SquaredL2DistanceOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (SquaredL2DistanceGradient, SquaredL2DistanceGradientOp< float, CPUContext >)
 
 SetDoc (R"DOC( Given two input float tensors X, Y, and produces one output float tensor of the L2 difference between X and Y that is computed as ||(X - Y)^2 / 2||. )DOC").Input(0
 
or input tensor Input (1,"Y","1D or 2D input tensor (must have the same shape as X)").Output(0
 
 OPERATOR_SCHEMA (SquaredL2DistanceGradient).NumInputs(3).NumOutputs(2)
 
 REGISTER_GRADIENT (SquaredL2Distance, GetSquaredL2DistanceGradient)
 
 REGISTER_CPU_OPERATOR (L1Distance, L1DistanceOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (L1DistanceGradient, L1DistanceGradientOp< float, CPUContext >)
 
 SetDoc (R"DOC( Computes the row-wise L1 Distance between the two input tensors $X$ and $Y$, which is defined as $$L1Distance(\mathbf{x},\mathbf{y}) = \sum_{i}\mid x_i - y_i\mid$$ Note, both inputs must either be 1-dimensional or 2-dimensional and both must have the same shape. The output $Z$ will be 1-dimensional regardless and its length will equal the number of rows in the inputs. Github Links: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.h - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "L1Distance", ["X", "Y"], ["Z"] ) // Create X X = 5*np.ones((1, 4)) print("X:\n",X) // Create Y Y = np.ones((1, 4)) print("Y:\n",Y) // Feed X & Y into workspace workspace.FeedBlob("X", X.astype(np.float32)) workspace.FeedBlob("Y", Y.astype(np.float32)) // Run op workspace.RunOperatorOnce(op) // Collect Output print("Z:\n", workspace.FetchBlob("Z")) ``` **Result** ``` X: [[5. 5. 5. 5.]] Y: [[1. 1. 1. 1.]] Z: [16.] ``` </details> )DOC").Input(0
 
First input tensor (1D or 2D)") .Input(1
 
First input Second input tensor (must have the same shape as $X $)") .Output(0
 
 OPERATOR_SCHEMA (L1DistanceGradient).NumInputs(3).NumOutputs(2)
 
 REGISTER_GRADIENT (L1Distance, GetL1DistanceGradient)
 
 REGISTER_CPU_OPERATOR (DotProduct, DotProductOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (DotProductGradient, DotProductGradientOp< float, CPUContext >)
 
 SetDoc (R"DOC( Computes and outputs the dot product of the two input float tensors `X` and `Y`. Note that `X` and `Y` must be either 1D or 2D, and they must be the same shape. The output tensor is 1D, which represents either the product of each element in a respective dimension if the inputs are 1D, or the sum of the products in a given dimension if the inputs are 2D matrices. Note that the actual dot product is a scalar value, which is effectively the sum of the elements in the 1D output tensor. For 1D inputs: Given two vectors $X = [x_0, x_1, x_2]$ and $Y = [y_0, y_1, y_2]$; $Z = [x_0 * y_0, x_1 * y_1, x_2 * y_2]$ For 2D inputs: Given two matrices: $$X = [[x_0^0, x_1^0, x_2^0], \\ [x_0^1, x_1^1, x_2^1], \\ [x_0^2, x_1^2, x_2^2], \\ ..., \\ [x_0^n, x_1^n, x_2^n]]$$ and $$Y = [[y_0^0, y_1^0, y_2^0], \\ [y_0^1, y_1^1, y_2^1], \\ [y_0^2, y_1^2, y_2^2], \\ ..., \\ [y_0^n, y_1^n, y_2^n]]$$ then $$Z = \biggl[\Big((x_0^0 * y_0^0) + (x_1^0 * y_1^0) + (x_2^0 * y_2^0)\Big), \\ \Big((x_0^1 * y_0^1) + (x_1^1 * y_1^1) + (x_2^1 * y_2^1)\Big), \\ \Big((x_0^2 * y_0^2) + (x_1^2 * y_1^2) + (x_2^2 * y_2^2)\Big), \\ ..., \\ \Big((x_0^n * y_0^n) + (x_1^n * y_1^n) + (x_2^n * y_2^n)\Big)\biggr]$$ Github Link: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "DotProduct", ["X", "Y"], ["Z"] ) workspace.FeedBlob("X", np.random.randint(20, size=(5)).astype(np.float32)) workspace.FeedBlob("Y", np.random.randint(20, size=(5)).astype(np.float32)) print("X:\n", workspace.FetchBlob("X")) print("Y:\n", workspace.FetchBlob("Y")) workspace.RunOperatorOnce(op) print("Z:\n", workspace.FetchBlob("X")) workspace.ResetWorkspace() workspace.FeedBlob("X", np.random.randint(10, size=(3,3)).astype(np.float32)) workspace.FeedBlob("Y", np.random.randint(10, size=(3,3)).astype(np.float32)) print("X:\n", workspace.FetchBlob("X")) print("Y:\n", workspace.FetchBlob("Y")) workspace.RunOperatorOnce(op) print("Z:\n", workspace.FetchBlob("Z")) ``` **Result** ``` X: [ 2. 15. 2. 7. 12.] Y: [ 3. 12. 9. 3. 18.] Z: [ 2. 15. 2. 7. 12.] X: [[2. 0. 4.] [7. 7. 4.] [7. 9. 9.]] Y: [[2. 0. 8.] [9. 6. 1.] [7. 8. 0.]] Z: [ 36. 109. 121.] ``` </details> )DOC").Input(0
 
 OPERATOR_SCHEMA (DotProductGradient).NumInputs(3).NumOutputs(2)
 
 REGISTER_GRADIENT (DotProduct, GetDotProductGradient)
 
 REGISTER_CPU_OPERATOR (CosineSimilarity, CosineSimilarityOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (CosineSimilarityGradient, CosineSimilarityGradientOp< float, CPUContext >)
 
 SetDoc (R"DOC( This op takes two input float tensors of the same size, $X$ and $Y$, and produces one output float tensor , $Z$, calculated as the cosine similarity between $X$ and $Y$. Recall, the cosine similarity between two tensors $X$ and $Y$ is defined as: $$\mathbf{Z}=CosineSimilarity(\mathbf{X},\mathbf{Y}) = \frac{\mathbf{X}\cdot\mathbf{Y}}{\|\mathbf{X}\|\|\mathbf{Y}\|} = \frac{\sum_n^{i=1}X_iY_i}{\sqrt{\sum_n^{i=1}X_i^2}\sqrt{\sum_n^{i=1}Y_i^2}}$$ Github Links: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.h - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "CosineSimilarity", ["X", "Y"], ["Z"] ) // Create X X = np.random.randn(3, 3) print("X:\n",X) // Create Y Y = np.random.randn(3, 3) print("Y:\n",Y) // Feed X & Y into workspace workspace.FeedBlob("X", X.astype(np.float32)) workspace.FeedBlob("Y", Y.astype(np.float32)) // Run op workspace.RunOperatorOnce(op) // Collect Output print("Z:\n", workspace.FetchBlob("Z")) ``` **Result** ``` X: [[-0.42635564 -0.23831588 -0.25515547] [ 1.43914719 -1.05613228 1.01717373] [ 0.06883105 0.33386519 -1.46648334]] Y: [[-0.90648691 -0.14241514 -1.1070837 ] [ 0.92152729 -0.28115511 -0.17756722] [-0.88394254 1.34654037 -0.80080998]] Z: [-1.7849885e-23 1.7849885e-23 -1.0842022e-07] ``` </details> )DOC").Input(0
 
 OPERATOR_SCHEMA (CosineSimilarityGradient).NumInputs(3).NumOutputs(2)
 
 REGISTER_GRADIENT (CosineSimilarity, GetCosineSimilarityGradient)
 
 REGISTER_CPU_OPERATOR (DotProductWithPadding, DotProductWithPaddingOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (DotProductWithPaddingGradient, DotProductWithPaddingGradientOp< float, CPUContext >)
 
Y with different shapes and produces one output float tensor of the dot product between X and Y We currently support two kinds of strategies to achieve this Before doing normal dot_product pad the smaller tensor (using pad_value) to the same shape as the other one.2) replicate the smaller tensor to the same shape as the other one.Note the first dimension of X
 
Y with different shapes and produces one output float tensor of the dot product between X and Y We currently support two kinds of strategies to achieve this Before doing normal dot_product pad the smaller Y must be equal Only the second dimension of X or Y can be padded DOC Input (0,"X","1D or 2D input tensor").Input(1
 
Y with different shapes and produces one output float tensor of the dot product between X and Y We currently support two kinds of strategies to achieve this Before doing normal dot_product pad the smaller Y must be equal Only the second dimension of X or Y can be padded DOC or input tensor Output (0,"Z","1D output tensor").IdenticalTypeAndShapeOfInputDim(0
 
Y with different shapes and produces one output float tensor of the dot product between X and Y We currently support two kinds of strategies to achieve this Before doing normal dot_product pad the smaller Y must be equal Only the second dimension of X or Y can be padded DOC or input tensor Arg ("pad_value","the padding value for tensors with smaller dimension").Arg("replicate"
 
 OPERATOR_SCHEMA (DotProductWithPaddingGradient).NumInputs(3).NumOutputs(2)
 
 REGISTER_GRADIENT (DotProductWithPadding, GetDotProductWithPaddingGradient)
 
 REGISTER_CPU_OPERATOR (Do, DoOp< CPUContext >)
 
 NumInputs (1, INT_MAX).NumOutputs(1
 
INT_MAX SetDoc (R"DOC( 'Do' control operator, executes a subnet in a separate workspace. Last blobs in the input and output lists should be the same blob created with CreateScope op. Arguments 'inner_blobs' and 'outer_blobs_idx' provide a mapping between selected inner blob names and corresponding outer blob indices. )DOC").Arg("net"
 
INT_MAX Subnet with blob bindings Arg ("inner_blobs","List of inner net blob names to bind to outer workspace").Arg("outer_blobs_idx"
 
INT_MAX Subnet with blob bindings Indices of corresponding outer workspace in operator outputs (skipping workspace blobs)") .Arg( "saved_fwd_blobs"
 
INT_MAX Subnet with blob bindings Indices of corresponding outer workspace in List of blobs from the forward Do operator workspace needed" "in backward pass, used in gradient Do operator") .Arg ("reuse_workspace","Whether to reuse workspace or create a new one in a given scope").AllowInplace([](int in
 
 REGISTER_CUDA_OPERATOR (Do, DoOp< CUDAContext >)
 
 REGISTER_CPU_OPERATOR (Dropout, DropoutOp< float, CPUContext >)
 
 REGISTER_CPU_GRADIENT_OPERATOR (DropoutGrad, DropoutGradientOp< float, CPUContext >)
 
 if (def.output().size()==2)
 
 SetDoc (R"DOC( `Dropout` takes one input data tensor (`X`) and produces two tensor outputs, `Y` and `mask`. If the `is_test` argument is zero (default=0), the output `Y` will be the input with random elements zeroed. The probability that a given element is zeroed is determined by the `ratio` argument. If the `is_test` argument is set to non-zero, the output `Y` is exactly the same as the input `X`. Note that outputs are scaled by a factor of $\frac{1}{1-ratio}$ during training, so that during test time, we can simply compute an identity function. This scaling is important because we want the output at test time to equal the expected value at training time. Dropout has been proven to be an effective regularization technique to prevent overfitting during training. Github Links: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dropout_op.h - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dropout_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "Dropout", ["X"], ["Y"] + ["mask"], ratio=0.5, is_test=0 ) workspace.FeedBlob("X", np.random.randint(10, size=(5, 5)).astype(np.float32)) print("X:", workspace.FetchBlob("X")) workspace.RunOperatorOnce(op) print("Y:", workspace.FetchBlob("Y")) print("mask:", workspace.FetchBlob("mask")) ``` **Result** ``` X: [[5. 4. 3. 6. 9.] [2. 1. 8. 0. 9.] [7. 3. 0. 6. 3.] [1. 8. 2. 6. 4.] [6. 2. 6. 4. 0.]] Y: [[ 0. 0. 0. 12. 18.] [ 0. 0. 16. 0. 0.] [ 0. 0. 0. 12. 6.] [ 0. 0. 4. 0. 0.] [12. 0. 0. 0. 0.]] mask: [[False False False True True] [False False True True False] [False False True True True] [False False True False False] [ True False False False False]] ``` </details> )DOC").Arg("ratio"
 
default perform dropout If non zero (test mode)
 
 REGISTER_GRADIENT (Dropout, GetDropoutGradient)
 
 REGISTER_CPU_OPERATOR (AddGradient, BinaryElementwiseGradientOp< NumericTypes, CPUContext, AddFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Add, GetAddGradient)
 
 REGISTER_CPU_OPERATOR (Add, BinaryElementwiseOp< NumericTypes, CPUContext, AddFunctor< CPUContext >>)
 
 REGISTER_CUDA_OPERATOR (Add, BinaryElementwiseOp< NumericTypes, CUDAContext, AddFunctor< CUDAContext >>)
 
 REGISTER_CUDA_OPERATOR (AddGradient, BinaryElementwiseGradientOp< NumericTypes, CUDAContext, AddFunctor< CUDAContext >>)
 
 REGISTER_CPU_OPERATOR (DivGradient, BinaryElementwiseGradientOp< NumericTypes, CPUContext, DivFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Div, GetDivGradient)
 
 REGISTER_CPU_OPERATOR (Div, BinaryElementwiseOp< NumericTypes, CPUContext, DivFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (ElementwiseLinear, ElementwiseLinearOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (ElementwiseLinearGradient, ElementwiseLinearGradientOp< float, CPUContext >)
 
 REGISTER_GRADIENT (ElementwiseLinear, GetElementwiseLinearGradient)
 
 REGISTER_CPU_OPERATOR (MulGradient, BinaryElementwiseGradientOp< NumericTypes, CPUContext, MulFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Mul, GetMulGradient)
 
 REGISTER_CPU_OPERATOR (Mul, BinaryElementwiseOp< NumericTypes, CPUContext, MulFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (Not, UnaryElementwiseOp< BoolTypes, CPUContext, NotFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (Sign, UnaryElementwiseOp< NumericTypes, CPUContext, SignFunctor< CPUContext >>)
 
 REGISTER_CPU_COMPARE_OPERATOR (EQ)
 
 REGISTER_CPU_COMPARE_OPERATOR (NE)
 
 REGISTER_CPU_COMPARE_OPERATOR (LT)
 
 REGISTER_CPU_COMPARE_OPERATOR (LE)
 
 REGISTER_CPU_COMPARE_OPERATOR (GT)
 
 REGISTER_CPU_COMPARE_OPERATOR (GE)
 
 REGISTER_CPU_LOGICAL_BINARY_OPERATOR (And)
 
 REGISTER_CPU_LOGICAL_BINARY_OPERATOR (Or)
 
 REGISTER_CPU_LOGICAL_BINARY_OPERATOR (Xor)
 
 REGISTER_CPU_BITWISE_BINARY_OPERATOR (BitwiseAnd)
 
 REGISTER_CPU_BITWISE_BINARY_OPERATOR (BitwiseOr)
 
 REGISTER_CPU_BITWISE_BINARY_OPERATOR (BitwiseXor)
 
 REGISTER_CPU_OPERATOR (SumReduceLike, SumReduceLikeOp< CPUContext >)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (EQ)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (NE)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (LT)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (LE)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (GT)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (GE)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (And)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (Or)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (Xor)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (BitwiseAnd)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (BitwiseOr)
 
 C10_DECLARE_FOWARD_ONLY_BINARY_FUNCTOR (BitwiseXor)
 
 CostInferenceFunction (PointwiseCostInference< 1 >).TensorInferenceFunction(ElementwiseOpShapeInference).FillUsing(MathDocGenerator("addition"
 
and the dimensions of the second input is the contiguous subset of the dimensions of the first For the following tensor shapes are shape (B)
 
and the dimensions of the second input is the contiguous subset of the dimensions of the first For the following tensor shapes are i e B is a scalar shape (A)
 
 REGISTER_CPU_OPERATOR (SubGradient, BinaryElementwiseGradientOp< NumericTypes, CPUContext, SubFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Sub, GetSubGradient)
 
 REGISTER_CPU_OPERATOR (Sub, BinaryElementwiseOp< NumericTypes, CPUContext, SubFunctor< CPUContext >>)
 
 REGISTER_CUDA_OPERATOR (Sub, BinaryElementwiseOp< NumericTypes, CUDAContext, SubFunctor< CUDAContext >>)
 
 REGISTER_CUDA_OPERATOR (SubGradient, BinaryElementwiseGradientOp< NumericTypes, CUDAContext, SubFunctor< CUDAContext >>)
 
 REGISTER_CPU_OPERATOR (Sum, SumOp< CPUContext >)
 
 CostInferenceFunction (CostInferenceForSum).InputsCanCrossDevices().IdenticalTypeAndShapeOfInput(0).SetDoc(R"DOC( Element-wise sum of each of the input tensors. The first input tensor can be used in-place as the output tensor
 
in which case the sum will be done in place and results will be accumulated the first input tensor All inputs and outputs must have the same shape and data type Github workspace FeedBlob ("A", np.array([[1, 2],[3, 4]]).astype(np.float32)) workspace.FeedBlob("B"
 
 REGISTER_CPU_OPERATOR (Elu, UnaryElementwiseWithArgsOp< TensorTypes< float >, CPUContext, EluFunctor< CPUContext >>)
 
 REGISTER_CPU_GRADIENT_OPERATOR (EluGradient, BinaryElementwiseWithArgsOp< TensorTypes< float >, CPUContext, EluGradientFunctor< CPUContext >>)
 
 REGISTER_CUDNN_OPERATOR (Elu, CuDNNActivationOp< CUDNN_ACTIVATION_ELU >)
 
 REGISTER_CUDNN_OPERATOR (EluGradient, CuDNNActivationGradientOp< CUDNN_ACTIVATION_ELU >)
 
 REGISTER_CPU_OPERATOR (EnforceFinite, EnforceFiniteOp< CPUContext >)
 
 SHOULD_NOT_DO_GRADIENT (EnforceFinite)
 
 REGISTER_CPU_OPERATOR (EnsureClipped, EnsureClippedOp< float, CPUContext >)
 
 NumInputs (1, 3).NumOutputs(1).Input(0
 
Parameters to be normalized Input (1,"indices","Sparse indices, only needed for sparse param").Input(2
 
Parameters to be normalized Gradient only needed for sparse param Output (0,"output_param","param ensured to be clipped within range").AllowInplace(
 
 SetDoc (R"DOC( Given a tensor, apply clip after gradient is applied; when the param is sparse as indicated by valid indices and grad, in-place is required )DOC")
 
 SHOULD_NOT_DO_GRADIENT (EnsureClipped)
 
 REGISTER_CPU_OPERATOR (EnsureCPUOutput, EnsureCPUOutputOp< CPUContext >)
 
 SetDoc (R"DOC( This Op always create TensorCPU output, and may involves cross-device MemCpy. Under CPU Context, this Op takes TensorCPU as input. Under the CUDA Context, this Op accepts either CUDA or CPU Tensor input. )DOC").Input(0
 
The input CUDA or CPU tensor Output (0,"output","TensorCPU that is a copy of the input.")
 
 NO_GRADIENT (EnsureCPUOutput)
 
 REGISTER_CPU_OPERATOR (Erf, UnaryElementwiseOp< TensorTypes< float >, CPUContext, ErfFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (ErfGradient, BinaryElementwiseOp< TensorTypes< float >, CPUContext, ErfGradientFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Erf, GetErfGradient)
 
 REGISTER_CPU_OPERATOR (Exp, UnaryElementwiseOp< TensorTypes< float >, CPUContext, ExpFunctor< CPUContext >>)
 
 REGISTER_GRADIENT (Exp, GetExpGradient)
 
 REGISTER_CUDA_OPERATOR (Exp, UnaryElementwiseOp< TensorTypes< float >, CUDAContext, ExpFunctor< CUDAContext >>)
 
 REGISTER_CPU_OPERATOR (Expand, ExpandOp< TensorTypes< std::int32_t, std::int64_t, float, double >, CPUContext >)
 
 REGISTER_CPU_OPERATOR (ExpandGradient, ExpandGradientOp< TensorTypes< std::int32_t, std::int64_t, float, double >, CPUContext >)
 
NumInputs(2).NumOutputs(1).SetDoc(R"DOC( Broadcast the input tensor to a materialized new tensor using given shape. Broadcast rule is similar to "numpy.array(input)*numpy.ones(shape)" Two corresponding dimensions must have the same or one of them equals to In order to align with PyTorch s shape is allowed to have entries equal which means to preserve the size of the corresponding dimension in X (so it's actually equivalent to equal to 1).) DOC") .Input(0
 
 OPERATOR_SCHEMA (ExpandGradient).NumInputs(2).NumOutputs(1)
 
 REGISTER_GRADIENT (Expand, GetExpandGradient)
 
 REGISTER_CUDA_OPERATOR (Expand, ExpandOp< TensorTypes< std::int32_t, std::int64_t, float, double >, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (ExpandGradient, ExpandGradientOp< TensorTypes< std::int32_t, std::int64_t, float, double >, CUDAContext >)
 
 REGISTER_CPU_OPERATOR (ExpandDims, ExpandDimsOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (Squeeze, SqueezeOp< CPUContext >)
 
 TensorInferenceFunction ([](const OperatorDef &def, const vector< TensorShape > &in){ArgumentHelper helper(def);auto dims=helper.template GetRepeatedArgument< int >("dims");auto originalSize=dims.size();CAFFE_ENFORCE(originalSize > 0,"Parameter `dims` must be provided.");std::sort(dims.begin(), dims.end());dims.erase(std::unique(dims.begin(), dims.end()), dims.end());if(dims.size()< originalSize){LOG(WARNING)<< "Parameter `dims` has repeated dimensions.";}CAFFE_ENFORCE(dims.front() >=0,"Dimension ids must be non-negative.");CAFFE_ENFORCE_GE(in[0].dims_size()+dims.size(), dims.back()+1,"Input needs at least ",(1+dims.back()-dims.size())," dimensions given `dims`.");vector< TensorShape > out(1);int cur_pos=0;int idx=0;for(const auto new_dim:dims){for(int i=cur_pos;i< new_dim;i++){out[0].add_dims(in[0].dims(idx++));}out[0].add_dims(1);cur_pos=new_dim+1;}for(;idx< in[0].dims_size();idx++){out[0].add_dims(in[0].dims(idx));}out[0].set_data_type(in[0].data_type());return out;}).SetDoc(R"DOC( The *ExpandDims* op inserts single-dimensional entries into the shape of the input tensor *data
 
 SetDoc (R"DOC( The *Squeeze* op removes single-dimensional entries from the shape of the input tensor *data,* and produces a single output tensor *squeezed*. The op also takes an argument *dims* with a list of dimensions to squeeze. If the same blob is provided as input and output, the operation is copy-free. This is the exact inverse operation of *ExpandDims* given the same *dims* argument. Github Links: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.h - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "Squeeze", ["data"], ["squeezed"], dims=[0,1], ) workspace.FeedBlob("data", np.zeros((1,1,100,100)).astype(np.float32)) print("data.shape:", workspace.FetchBlob("data").shape) workspace.RunOperatorOnce(op) print("squeezed.shape:", workspace.FetchBlob("squeezed").shape) ``` **Result** ``` data.shape: (1, 1, 100, 100) squeezed.shape: (100, 100) ``` </details> )DOC").Input(0
 
Input tensor of data to be operated on Output (0,"squeezed","Reshaped tensor with same data as input.").Arg("dims"
 
dims erase (std::unique(dims.begin(), dims.end()), dims.end())
 
 if (dims.size()< originalSize)
 
 CAFFE_ENFORCE (dims.front() >=0,"Dimension ids must be non-negative.")
 
vector< TensorShape > out (1)
 
 REGISTER_GRADIENT (Squeeze, GetSqueezeGradient)
 
 REGISTER_GRADIENT (ExpandDims, GetExpandDimsGradient)
 
 REGISTER_CUDA_OPERATOR (Squeeze, SqueezeOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (ExpandDims, ExpandDimsOp< CUDAContext >)
 
std::vector< TensorShape > FCShapeInference (const OperatorDef &def, const vector< TensorShape > &in, bool pretransposed_weight)
 
OpSchema::Cost CostInferenceForFC (const OperatorDef &def, const vector< TensorShape > &in, bool pretransposed_weight)
 
 REGISTER_CPU_OPERATOR (FeedBlob, FeedBlobOp< CPUContext >)
 
 SHOULD_NOT_DO_GRADIENT (FeedBlob)
 
 NumInputs (0, 0).NumOutputs(1
 
 SetDoc (R"DOC( FeedBlobs the content of the blobs. The input and output blobs should be one-to-one inplace.)DOC").Arg("value"
 
 REGISTER_CPU_OPERATOR (UniformFill, UniformFillOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (UniformIntFill, UniformFillOp< int, CPUContext >)
 
 REGISTER_CPU_OPERATOR (UniqueUniformFill, UniqueUniformFillOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (ConstantFill, ConstantFillOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (DiagonalFill, DiagonalFillOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (GaussianFill, GaussianFillOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (XavierFill, XavierFillOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (MSRAFill, MSRAFillOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (RangeFill, RangeFillOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (LengthsRangeFill, LengthsRangeFillOp< CPUContext >)
 
 TensorInferenceFunction (FillerTensorInference<>).SetDoc(R"DOC( This operator fills the elements of the output tensor with a const ant value specified by the `value` argument. - The data type is specified by the `dtype` argument - Currently
 
the data types supported are *float *int32 *int64 and *bool *If the dtype argument is not the data type of value is used The output tensor shape is either specified by the shape argument or will match the shape of the input tensor if one is provided (if an input tensor is provided, a shape argument should not be set)-Optional additional dimensions can be appended at the end as specified by`extra_shape`argument-If`input_as_shape`is set to True
 
the data types supported are *float *int32 *int64 and *bool *If the dtype argument is not the data type of value is used The output tensor shape is either specified by the shape argument or will match the shape of the input tensor if one is the input should be a tensor containing the desired output shape (the dimensions specified in`extra_shape`will also be appended) When specifying`dtype`argument
 
shape input must be in CPU context Input (0,"shape","(*Tensor`<int>`*): 1-D tensor of the shape of the output, must be used with `input_as_shape` argument").Input(1
 
shape input must be in CPU context inclusive Input (2,"max","(*Tensor`<float>`*): scalar tensor containing maximum value, inclusive").Output(0
 
 NumInputs ({0, 1, 3}).NumOutputs(1).AllowInplace(
 
 TensorInferenceFunction (FillerTensorInference< TensorProto_DataType_INT32 >).SetDoc(R"DOC( Fill the output tensor with int32 samples from uniform distribution [`min`
 
max The range can be defined either by arguments or input blobs min and max are inclusive If the range is given by input you also need to give the shape as input When the range is given as this operator enforces min<=max.When the range is given as inputs, the constraint is not enforced.-When the range is given as inputs and max< min, the first dimension of the output is set to 0.This behavior is allowed so that dynamically sampling indices into a dynamically sized tensor is possible.-The shape of the output can be given as argument or input.Github Links:-https:-https:< details >< summary >< b >Example</b ></summary > **Code **```workspace.ResetWorkspace () op_1
 
shape input must be in CPU context inclusive Input (2,"max","(*Tensor`<int>`*): scalar tensor containing maximum value, inclusive").Output(0
 
 NumInputs (0, 2).NumOutputs(1).AllowInplace(
 
its elements will be excluded from uniform sampling Using the second input will require you to provide shape via the first input DOC Arg ("min","Minimum value, inclusive").Arg("max"
 
its elements will be excluded from uniform sampling Using the second input will require you to provide shape via the first input DOC Maximum inclusive Arg ("dtype","The data type for the elements of the output tensor.""Strictly must be one of the types from DataType enum in TensorProto.""This only supports INT32 and INT64 now. If not set, assume INT32").Arg("shape"
 
its elements will be excluded from uniform sampling Using the second input will require you to provide shape via the first input DOC Maximum inclusive The shape of the output tensor Cannot set the shape argument and pass in an input at the same time Arg ("extra_shape","The additional dimensions appended at the end of the shape indicated""by the input blob. ""Cannot set the extra_shape argument when there is no input blob.").Arg("input_as_shape"
 
its elements will be excluded from uniform sampling Using the second input will require you to provide shape via the first input DOC Maximum inclusive The shape of the output tensor Cannot set the shape argument and pass in an input at the same time tensor containing the desired output shape First input must be in CPU context Input (0,"input","Input tensor to provide shape information").Input(1
 
its elements will be excluded from uniform sampling Using the second input will require you to provide shape via the first input DOC Maximum inclusive The shape of the output tensor Cannot set the shape argument and pass in an input at the same time tensor containing the desired output shape First input must be in CPU context optional Avoid elements in this tensor Elements must be unique Output (0,"output","Output tensor of unique uniform samples")
 
if *input_as_shape *is set to *true then the *input *should be a tensor containing the desired output shape (the dimensions specified in *extra_shape *will also be appended).In this case
 
template<int VALUE_TYPE = TensorProto_DataType_FLOAT>
std::vector< TensorShape > FillerTensorInference (const OperatorDef &def, const vector< TensorShape > &in)
 
 Index (integers)") .Input(1
 
Needles query Output (0,"query_indices","Indices of the needles in index or 'missing value'").Arg("missing_value"
 
Needles query Placeholder for items that are not found SetDoc (R"DOC( Finds elements of second input from first input, outputting the last (max) index for each query. If query not find, inserts missing_value. See IndexGet() for a version that modifies the index when values are not found. )DOC")
 
 REGISTER_CPU_OPERATOR (Flatten, FlattenOp< CPUContext >)
 
std::vector< TensorShape > TensorInferenceForFlatten (const OperatorDef &def, const std::vector< TensorShape > &in)
 
 REGISTER_CPU_OPERATOR (FlexibleTopK, FlexibleTopKOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (FlexibleTopKGradient, FlexibleTopKGradientOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (Floor, FloorOp< float, CPUContext >)
 
 SetDoc (R"DOC( Element-wise application of the floor function ($y=floor(x)$) to the input tensor `X`. Output tensor shape is the same as the input tensor. This operator can be used in an in-place fashion by using the same input blob as the output blob. Github Link: - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/floor_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "Floor", ["X"], ["X"], ) workspace.FeedBlob("X", (np.random.uniform(-10, 10, (5,5))).astype(np.float32)) print("X before running op:", workspace.FetchBlob("X")) workspace.RunOperatorOnce(op) print("X after running op:", workspace.FetchBlob("X")) ``` **Result** ``` X before running op: [[ 3.813361 -1.319647 5.2089314 -4.931328 0.6218652 ] [ 7.2757645 5.5552588 5.785643 -2.4790506 -0.41400087] [ 1.1541046 -6.933266 3.3754056 1.6569928 -1.7670316 ] [-3.4932013 4.891472 1.5530115 -3.2443287 -4.605099 ] [-4.574543 -7.360948 5.91305 -8.196495 -5.357458 ]] X after running op: [[ 3. -2. 5. -5. 0.] [ 7. 5. 5. -3. -1.] [ 1. -7. 3. 1. -2.] [-4. 4. 1. -4. -5.] [-5. -8. 5. -9. -6.]] ``` </details> )DOC").Input(0
 
 GRADIENT_NOT_IMPLEMENTED_YET (Floor)
 
 REGISTER_CPU_OPERATOR (Free, FreeOp< CPUContext >)
 
 SHOULD_NOT_DO_GRADIENT (Free)
 
INT_MAX SameNumberOfOutput ().EnforceOneToOneInplace().SetDoc(R"DOC( Frees the content of the blobs. The input and output blobs should be one-to-one inplace.)DOC")
 
 REGISTER_CUDA_OPERATOR (Free, FreeOp< CUDAContext >)
 
 REGISTER_CPU_OPERATOR (FC, FullyConnectedOp< CPUContext >)
 
 REGISTER_CPU_GRADIENT_OPERATOR (FCGradient, FullyConnectedGradientOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (FCTransposed, FullyConnectedOp< CPUContext, DefaultEngine, false >)
 
 REGISTER_CPU_GRADIENT_OPERATOR (FCTransposedGradient, FullyConnectedGradientOp< CPUContext, DefaultEngine, false >)
 
 REGISTER_CUDA_OPERATOR (FC, FullyConnectedOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (FCGradient, FullyConnectedGradientOp< CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (FCTransposed, FullyConnectedOp< CUDAContext, DefaultEngine, false >)
 
 REGISTER_CUDA_OPERATOR (FCTransposedGradient, FullyConnectedGradientOp< CUDAContext, DefaultEngine, false >)
 
 REGISTER_CPU_OPERATOR (FloatToFused8BitRowwiseQuantized, FloatToFused8BitRowwiseQuantizedOp< float, convertfp32fp32, CPUContext >)
 
set_dims (1, X.dims(1)+8)
 
out push_back (std::move(X))
 
out[0] set_data_type (TensorProto_DataType_UINT8)
 
 SetDoc (R"DOC( Applies 8-bit row-wise quantization by determining the range (maximum - minimum) and offset (minimum value) of each row in the input matrix, and then scaling each element to an 8-bit number between 0 and 255. To later de-quantize values, the scale (range / 255) and offset (bias) are stored alongside the data. More precisely, the first 4 bytes of each row in the output matrix are a 32-bit float storing the scale, the next 4 bytes store the bias as a 32-bit float, and all remaining bytes in the row encode single quantized values.) )DOC").Input(0
 
Float32 input data Output (0,"output","Fused scale, bias and quantized data")
 
 NO_GRADIENT (FloatToFused8BitRowwiseQuantized)
 
 REGISTER_CPU_OPERATOR (HalfFloatToFused8BitRowwiseQuantized, FloatToFused8BitRowwiseQuantizedOp< at::Half, convertfp16fp32, CPUContext >)
 
 NO_GRADIENT (HalfFloatToFused8BitRowwiseQuantized)
 
 REGISTER_CPU_OPERATOR (Fused8BitRowwiseQuantizedToFloat, Fused8BitRowwiseQuantizedToFloatOp< float, convertfp32fp32, CPUContext >)
 
set_dims (1, X.dims(1)-8)
 
out[0] set_data_type (TensorProto_DataType_FLOAT)
 
 SetDoc (R"DOC( De-quantizes the result of the FloatToFused8BitRowwiseQuantized operator. The input is expected to encode the scale as a 32-bit float in the second to the last 4 bytes of each row, followed by the bias as a 32-bit float in the next 4 bytes, and the quantized values in the preceding bytes of the row. The output is a matrix containing only the values, but de-quantized. De-quantization is performed by multiplying each value by its row's scale and bias parameters. The de-quantized values will thus not be exactly equal to the original, un-quantized floating point values. )DOC").Input(0
 
Fused bias and quantized data Output (0,"float_output","Float32 data")
 
 NO_GRADIENT (Fused8BitRowwiseQuantizedToFloat)
 
 REGISTER_CPU_OPERATOR (Fused8BitRowwiseQuantizedToHalfFloat, Fused8BitRowwiseQuantizedToFloatOp< at::Half, convertfp32fp16, CPUContext >)
 
out[0] set_data_type (TensorProto_DataType_FLOAT16)
 
 SetDoc (R"DOC( De-quantizes the result of the HalfFloatToFused8BitRowwiseQuantized operator. The input is expected to encode the scale as a 32-bit float in the second to the last 4 bytes of each row, followed by the bias as a 32-bit float in the next 4 bytes, and the quantized values in the preceding bytes of the row. The output is a matrix containing only the values, but de-quantized. De-quantization is performed by multiplying each value by its row's scale and bias parameters. The de-quantized values will thus not be exactly equal to the original, un-quantized floating point values. )DOC").Input(0
 
Fused bias and quantized data Output (0,"float16_output","Float16 data")
 
 NO_GRADIENT (Fused8BitRowwiseQuantizedToHalfFloat)
 
 REGISTER_CPU_OPERATOR (FloatToFusedRandRowwiseQuantized, FloatToFusedRandRowwiseQuantizedOp< CPUContext >)
 
set_dims (1, 10+(X.dims(1)+data_per_byte-1)/data_per_byte)
 
 SetDoc (R"DOC( Applies row-wise stochastic/random quantization by determining the range of each row in the input matrix, and then quantize each element to one of two closest discrete levels by randomly drawing Bernoulli distribution. The method is extended from TernGrad [1], which randomly quantizes gradients to three levels to reduce communication in distributed training. The format of each row (x) in the output matrix is [bitwidth][tail][min][max][data]: bitwidth[1 Byte]: bitwidth per data [1, 2, 4 or 8]; tail[1 Byte]: the number of unused buckets [1-8] (One byte is split to 8/bitwidth buckets and each bucket stores one low-precision data in bitwidth bits); min[4 Bytes]: the minimum floating value min(x); max[4 Bytes]: the maximum floating value max(x); data: quantized data. The quantization is uniform with levels q = min + (max-min)/(2^bitwidth - 1)*[0:1:2^bitwidth]. During stochastic/random quantization x'=Quantize(x), for q_j < x_i <= q_{j+1}, we draw quantization x'_i from Bernoulli distributions with P(x'_i = q_{j+1}) = (x_i - q_j)/(q_{j+1} - q_j), and P(x'_i = q_j) = (q_{j+1} - x_i)/(q_{j+1} - q_j) where x'_i is the quantized value of x_i. [1] proved E{x'_i}=x_i, which is an unbiased approximation. More details are in the paper. For example, suppose targeted bitwidth = 2 and x = [0.3, -1.4, -0.6, 0.9, 1.0], then tail = 3, min = -1.4, max = 1.0 and q = [-1.4, -0.6, 0.2, 1.0]. x_1 = 0.3 will be quantized to x'_1 = 0.2 with probability 7/8 and to x'_1 = 1.0 with probability 1/8. The storage format of quantized data is: [x'_1|x'_3|x'_5|xxx]-[x'_2|x'_4|xxx|xxx]. In general, a input row is split to multiple segments. One segment is a continuous subarray of the row, and its length is the number of bytes storing quantized data in the output matrix. The b-th bucket of the i-th byte stores the i-th data of the b-th segment of input row. [1] Wen, Wei, Cong Xu, Feng Yan, Chunpeng Wu, Yandan Wang, Yiran Chen, and Hai Li. "Terngrad:Ternary gradients to reduce communication in distributed deep learning." In Advances in Neural Information Processing Systems, pp. 1508-1518. 2017. )DOC").Input(0
 
Float32 input data Output (0,"output","Fused bitwidth, tail, min, max and quantized data").Arg("bitwidth"
 
Float32 input data How many bits to quantiz per data (defaults to 8).") .Arg("random"
 
Float32 input data How many bits to quantiz per random or not (True).False is set up for unittest.")
 
 NO_GRADIENT (FloatToFusedRandRowwiseQuantized)
 
 REGISTER_CPU_OPERATOR (FusedRandRowwiseQuantizedToFloat, FusedRandRowwiseQuantizedToFloatOp< CPUContext >)
 
const vector< TensorShape > & for (int i=0;i< def.output_size();i++)
 
 SetDoc (R"DOC( De-quantizes the result of the FloatToFusedRandRowwiseQuantized operator. Refer FloatToFusedRandRowwiseQuantized operator for details. )DOC").Input(0
 
Fused max and quantized data Output (0,"float_input","Float32 data")
 
 NO_GRADIENT (FusedRandRowwiseQuantizedToFloat)
 
but operating on bit rowwise quantized matrices with fused storage (where each row stores quantized values, and then the scale and offset).DATA needs to have rank 2 and INDICES needs to have rank 1.) DOC") .Input( 0
 
but operating on bit rowwise quantized matrices with fused uint8 tensor with rank obtained with operator FloatToFused8BitRowwiseQuantized") .Input (1,"INDICES","Integer vector containing indices of the first dimension of DATA for""the rows that are being gathered").Output(0
 
but operating on bit rowwise quantized matrices with fused uint8 tensor with rank obtained with output TensorInferenceFunction ([](const OperatorDef &def, const vector< TensorShape > &in){vector< TensorShape > out(1);for(auto d:in[1].dims()){out[0].add_dims(d);}for(int i=1;i< in[0].dims_size();++i){out[0].add_dims(in[0].dims(i));}out[0].set_data_type(in[0].data_type());return out;})
 
 REGISTER_CPU_OPERATOR (GatherFused8BitRowwise, GatherFused8BitRowwiseOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (Gather, GatherOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (GenerateProposals, GenerateProposalsOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (GenerateProposalsCPP, GenerateProposalsOp< CPUContext >)
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC Arg ("spatial_scale","(float) spatial scale").Arg("pre_nms_topN"
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N Arg ("post_nms_topN","(int) RPN_POST_NMS_TOP_N").Arg("nms_thresh"
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH Arg ("min_size","(float) RPN_MIN_SIZE").Arg("angle_bound_on"
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH bool (default true).If set
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Arg ("angle_bound_lo","int (default -90 degrees). If set, for rotated boxes, angle is ""normalized to be within [angle_bound_lo, angle_bound_hi].").Arg("angle_bound_hi"
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] int (default 90 degrees).If set
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Arg ("clip_angle_thresh","float (default 1.0 degrees). For RRPN, clip almost horizontal boxes ""within this threshold of tolerance for backward compatibility. ""Set to negative value for no clipping.").Input(0
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Scores from conv size (img_count, A, H, W)") .Input( 1
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Scores from conv Bounding box deltas from conv size (img_count, 4 *A, H, W)") .Input( 2
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Scores from conv Bounding box deltas from conv Image size (img_count, 3)
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Scores from conv Bounding box deltas from conv Image format (height, width, scale)") .Input(3
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Scores from conv Bounding box deltas from conv Image Bounding box size (A, 4)") .Output( 0
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Scores from conv Bounding box deltas from conv Image Bounding box size (n x 5)
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Scores from conv Bounding box deltas from conv Image Bounding box format (image_index, x1, y1, x2, y2)") .Output(1
 
bounding box regression result deltas as well as predefined bounding box shapes anchors Greedy non maximum suppression is applied to generate the final bounding boxes DOC int RPN_PRE_NMS_TOP_N float RPN_NMS_THRESH for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] for rotated angle is normalized to be within[angle_bound_lo, angle_bound_hi] Scores from conv Bounding box deltas from conv Image Bounding box scores of size (n)")
 
 OPERATOR_SCHEMA (GenerateProposalsCPP).NumInputs(4).NumOutputs(2)
 
 SHOULD_NOT_DO_GRADIENT (GenerateProposals)
 
 SHOULD_NOT_DO_GRADIENT (GenerateProposalsCPP)
 
 REGISTER_CPU_OPERATOR (GivenTensorByteStringToUInt8Fill, GivenTensorByteStringToUInt8FillOp< CPUContext >)
 
 NO_GRADIENT (GivenTensorByteStringToUInt8Fill)
 
 SetDoc (R"DOC( This op fills a uint8 output tensor with the data specified by the *value* argument. The data must previously be serialized as a byte string. The output tensor shape is specified by the *shape* argument. Beware, when using this argument *value* should have a value for every element of the *output*, as missing values will not be initialized automatically. If *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set. This op allows us to write uint8 tensors to Protobuf as byte strings and read them back as uint8 tensors in order to avoid the Protobuf uint32_t varint encoding size penalty. <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() val = np.array([1, 2, 3], dtype=np.uint8) op = core.CreateOperator( "GivenTensorByteStringToUInt8Fill", [], ["out"], values=[val.tobytes()], shape=val.shape, ) workspace.RunOperatorOnce(op) print("Out:\n", workspace.FetchBlob("out")) ``` **Result** ``` Out: [1 2 3] ``` </details> )DOC").Arg("values"
 
The value for the elements of the output true Arg ("shape","The shape of the output tensor.""Cannot set the shape argument and pass in an input at the same time.").Arg("extra_shape"
 
The value for the elements of the output true The additional dimensions appended at the end of the shape indicated by the input blob Cannot set the extra_shape argument when there is no input blob Arg ("input_as_shape","1D tensor containing the desired output shape. First input must be in CPU context.").TensorInferenceFunction(FillerTensorInference< TensorProto_DataType_STRING >)
 
 REGISTER_CPU_OPERATOR (GivenTensorFill, GivenTensorFillOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (GivenTensorDoubleFill, GivenTensorFillOp< double, CPUContext >)
 
 REGISTER_CPU_OPERATOR (GivenTensorBoolFill, GivenTensorFillOp< bool, CPUContext >)
 
 REGISTER_CPU_OPERATOR (GivenTensorIntFill, GivenTensorFillOp< int, CPUContext >)
 
 REGISTER_CPU_OPERATOR (GivenTensorInt64Fill, GivenTensorFillOp< int64_t, CPUContext >)
 
 REGISTER_CPU_OPERATOR (GivenTensorStringFill, GivenTensorFillOp< std::string, CPUContext >)
 
 NO_GRADIENT (GivenTensorFill)
 
 NO_GRADIENT (GivenTensorDoubleFill)
 
 NO_GRADIENT (GivenTensorBoolFill)
 
 NO_GRADIENT (GivenTensorIntFill)
 
 NO_GRADIENT (GivenTensorInt64Fill)
 
 NO_GRADIENT (GivenTensorStringFill)
 
 SetDoc (R"DOC( This op fills an output tensor with the data specified by the *value* and *dtype* arguments. The output tensor shape is specified by the *shape* argument. Beware, when using this argument *value* should have a value for every element of the *output*, as missing values will not be initialized automatically. If *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set. *Note: Do not set the shape argument and pass in an input at the same time.* Github Links: - https://github.com/caffe2/caffe2/blob/master/caffe2/operators/given_tensor_fill_op.h - https://github.com/caffe2/caffe2/blob/master/caffe2/operators/given_tensor_fill_op.cc <details> <summary> <b>Example</b> </summary> **Code** ``` workspace.ResetWorkspace() op = core.CreateOperator( "GivenTensorFill", [], ["out"], values=[1., 2., 3.], shape=[3], ) workspace.RunOperatorOnce(op) print("Out:\n", workspace.FetchBlob("out")) ``` **Result** ``` Out: [1. 2. 3.] ``` </details> )DOC").Arg("values"
 
 REGISTER_CPU_OPERATOR (GroupNorm, GroupNormOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (GroupNormGradient, GroupNormGradientOp< float, CPUContext >)
 
 SetDoc (R"DOC( Group Normalization (GN) operation: https://arxiv.org/abs/1803.08494 )DOC").Arg("num_groups"
 
number of groups used by GN Arg ("epsilon","(float) default 1e-5; small constant added to var.").Input(0
 
 REGISTER_CPU_OPERATOR (GRUUnit, GRUUnitOp< float, CPUContext >)
 
in a sequence length aware fashion given the (fused) inputs X(TxNxD)
 
in a sequence length aware fashion given the previous hidden state (NxD)
 
in a sequence length aware fashion given the previous hidden and the sequence lengths (N)
 
in a sequence length aware fashion given the previous hidden and the sequence computes the GRU avoiding computation if the input is invalid (as in, the value at X[t][n] >=seqLengths[n].) DOC") .Arg( "drop_states"
 
in a sequence length aware fashion given the previous hidden and the sequence computes the GRU avoiding computation if the input is Bool to determine if hidden state is zeroes or passed along for timesteps past the given sequence_length Arg ("sequence_lengths","When false, the sequence lengths input is left out, ""and all following inputs are shifted left by one.").Output(0
 
 REGISTER_CPU_OPERATOR (GRUUnitGradient, GRUUnitGradientOp< float, CPUContext >)
 
 NumInputs (5, 6).NumOutputs(2).Arg("sequence_lengths"
 
 REGISTER_GRADIENT (GRUUnit, GetGRUUnitGradient)
 
 REGISTER_CPU_OPERATOR (FloatToHalf, FloatToHalfOp< CPUContext >)
 
 REGISTER_CPU_OPERATOR (HalfToFloat, HalfToFloatOp< CPUContext >)
 
out push_back (X)
 
 REGISTER_CPU_OPERATOR (Float16ConstantFill, Float16ConstantFillOp)
 
 REGISTER_CPU_OPERATOR (Float16UniformFill, Float16UniformFillOp)
 
max Arg ("shape","Shape of the tensor").Arg("min"
 
max Minimim value to generate Arg ("max","Maximum value to generate")
 
 NO_GRADIENT (Float16UniformFill)
 
The value for the elements of the output tensor Arg ("shape","The shape of the output tensor.").Output(0
 
 REGISTER_GRADIENT (FloatToHalf, GetFloatToHalfGradient)
 
 REGISTER_GRADIENT (HalfToFloat, GetHalfToFloatGradient)
 
 NO_GRADIENT (Float16ConstantFill)
 
std::vector< TensorShape > Float16FillerTensorInference (const OperatorDef &def, const vector< TensorShape > &in)
 
 REGISTER_CPU_OPERATOR (HardSigmoid, UnaryElementwiseWithArgsOp< TensorTypes< float >, CPUContext, HardSigmoidFunctor< CPUContext >>)
 
 REGISTER_CPU_OPERATOR (HardSigmoidGradient, BinaryElementwiseWithArgsOp< TensorTypes< float >, CPUContext, HardSigmoidGradientFunctor< CPUContext >>)
 
 CostInferenceFunction (CostInferenceForHardSigmoid).IdenticalTypeAndShape().SetDoc(R"DOC( Applies hard sigmoid operation to the input data element-wise. The HardSigmoid operation takes one input $X$
 
 REGISTER_CPU_OPERATOR (If, IfOp< CPUContext >)
 
INT_MAX SetDoc (R"DOC( 'If' control operator, first input is a scalar boolean blob that stores condition value. Accepts 'then_net' (required) and 'else_net' (optional) arguments for 'then' and 'else' subnets respectively. Subnets are executed in the same workspace as 'If'. )DOC").Arg("then_net"
 
INT_MAX Net executed when condition is true Arg ("else_net","Net executed when condition is false (optional)").Input(0
 
INT_MAX Net executed when condition is true Scalar boolean condition AllowInplace ([](int in, int out) -> bool{return true;})
 
 REGISTER_CUDA_OPERATOR (If, IfOp< CUDAContext >)
 
 REGISTER_CPU_OPERATOR (Im2Col, Im2ColOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (Col2Im, Col2ImOp< float, CPUContext >)
 
 REGISTER_GRADIENT (Im2Col, GetIm2ColGradient)
 
 REGISTER_GRADIENT (Col2Im, GetCol2ImGradient)
 
 switch (order)
 
 CAFFE_ENFORCE (H >=dkernel_h)
 
 CAFFE_ENFORCE (W >=dkernel_w)
 
 Input (0,"X","4-tensor in NCHW or NHWC.").Output(0
 
 OPERATOR_SCHEMA (Col2Im).NumInputs(2).NumOutputs(1)
 
 REGISTER_CUDA_OPERATOR (Im2Col, Im2ColOp< float, CUDAContext >)
 
 REGISTER_CUDA_OPERATOR (Col2Im, Col2ImOp< float, CUDAContext >)
 
 REGISTER_CPU_OPERATOR (IntIndexCreate, IndexCreateOp< int32_t >)
 
 REGISTER_CPU_OPERATOR (LongIndexCreate, IndexCreateOp< int64_t >)
 
 REGISTER_CPU_OPERATOR (StringIndexCreate, IndexCreateOp< std::string >)
 
 REGISTER_CPU_OPERATOR (IndexGet, IndexGetOp)
 
 REGISTER_CPU_OPERATOR (IndexLoad, IndexLoadOp)
 
 REGISTER_CPU_OPERATOR (IndexStore, IndexStoreOp)
 
 REGISTER_CPU_OPERATOR (IndexFreeze, IndexFreezeOp)
 
 REGISTER_CPU_OPERATOR (IndexSize, IndexSizeOp)
 
Max number of including the zero entry Output (0,"handler","Pointer to an Index instance.").ScalarType(TensorProto_DataType_UNDEFINED)
 
Max number of including the zero entry Output (0,"handle","Pointer to an Index instance.").ScalarType(TensorProto_DataType_UNDEFINED)
 
return an Int tensor of same shape containing the indices for each of the keys If the index is unknown entries are given index new entries are added into the index If an insert is necessary but max_elements has been fail DOC Input (0,"handle","Pointer to an Index instance.").Input(1
 
return an Int tensor of same shape containing the indices for each of the keys If the index is unknown entries are given index new entries are added into the index If an insert is necessary but max_elements has been fail DOC Tensor of keys to be looked up Output(0,"indices","Indices for each of the keys.").ScalarType(TensorProto disallowing creation of new index entries Should not be called concurrently with IndexGet DOC The input handle EnforceInplace ({{0, 0}}).ScalarType(TensorProto_DataType_UNDEFINED)
 
Pointer to an Index instance Input (1,"items","1-D tensor with elements starting with index 1.").Output(0
 
Pointer to an Index instance The input handle If skips the first entry of the tensor This allows to load tensors that are aligned with an where the first entry corresponds to the default index entry ScalarType (TensorProto_DataType_UNDEFINED)
 
Pointer to an Index instance Output (0,"items","Scalar int64 tensor with number of entries.")
 
 NO_GRADIENT (IndexGetOp)
 
 NO_GRADIENT (IntIndexCreate)
 
 NO_GRADIENT (LongIndexCreate)
 
 NO_GRADIENT (StringIndexCreate)
 
 SHOULD_NOT_DO_GRADIENT (IndexFreeze)
 
 SHOULD_NOT_DO_GRADIENT (IndexLoad)
 
 SHOULD_NOT_DO_GRADIENT (IndexStore)
 
 SHOULD_NOT_DO_GRADIENT (IndexSize)
 
 CAFFE_KNOWN_TYPE (std::unique_ptr< caffe2::IndexBase >)
 
 REGISTER_BLOB_SERIALIZER ((TypeMeta::Id< std::unique_ptr< caffe2::IndexBase >>()), IndexSerializer)
 
 REGISTER_BLOB_DESERIALIZER (std::unique_ptr< caffe2::IndexBase >, IndexDeserializer)
 
 REGISTER_CPU_OPERATOR (InstanceNormGradient, InstanceNormGradientOp< float, CPUContext >)
 
 OPERATOR_SCHEMA (InstanceNormGradient).NumInputs(4
 
 NumOutputs (3)
 
 REGISTER_GRADIENT (InstanceNorm, GetInstanceNormGradient)
 
 REGISTER_CPU_OPERATOR (InstanceNorm, InstanceNormOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (IntegralImage, IntegralImageOp< float, CPUContext >)
 
 REGISTER_CPU_OPERATOR (IntegralImageGradient, IntegralImageGradientOp< float, CPUContext >)
 
which contains the sum of pixel values within an image vertically and horizontally This integral image can then be used with other detection and tracking techniques DOC Input (0,"X","Images tensor of the form (N, C, H, W)").Output(0
 
which contains the sum of pixel values within an image vertically and horizontally This integral image can then be used with other detection and tracking techniques DOC Integrated image of the form (N, C, H+1, W+1)")