1 #include "caffe2/operators/elementwise_ops.h" 3 #include "caffe2/core/operator_gradient.h" 4 #include "caffe2/utils/proto_utils.h" 10 const char kBroadcastDoc[] = R
"DOC( 11 If necessary the right-hand-side argument will be broadcasted to match the 12 shape of left-hand-side argument. When broadcasting is specified, the second 13 tensor can either be of size 1 (a scalar value), or having its shape as a 14 contiguous subset of the first tensor's shape. The starting of the mutually 15 equal shape is specified by the argument "axis", and if it is not set, suffix 16 matching is assumed. 1-dim expansion doesn't work yet. 18 For example, the following tensor shapes are supported (with broadcast=1): 20 shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar 21 shape(A) = (2, 3, 4, 5), shape(B) = (5,) 22 shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) 23 shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 24 shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0 26 Argument `broadcast=1` needs to be passed to enable broadcasting. 30 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_op_schema.cc 34 const char kAddExample[] = R
"DOC( 37 <summary> <b>Example</b> </summary> 43 workspace.ResetWorkspace() 45 op = core.CreateOperator( 51 workspace.FeedBlob("A", np.array([[1,2],[3,4]])) 52 workspace.FeedBlob("B", np.array([[5,6],[7,8]])) 53 print("A:", workspace.FetchBlob("A")) 54 print("B:", workspace.FetchBlob("B")) 55 workspace.RunOperatorOnce(op) 56 print("C:", workspace.FetchBlob("C")) 80 const char kSubExample[] = R
"DOC( 83 <summary> <b>Example</b> </summary> 89 workspace.ResetWorkspace() 91 op = core.CreateOperator( 97 workspace.FeedBlob("A", np.array([[10,12],[4,14]])) 98 workspace.FeedBlob("B", np.array([[5,16],[1,19]])) 99 print("A:", workspace.FetchBlob("A")) 100 print("B:", workspace.FetchBlob("B")) 101 workspace.RunOperatorOnce(op) 102 print("C:", workspace.FetchBlob("C")) 126 const char kMulExample[] = R
"DOC( 129 <summary> <b>Example</b> </summary> 135 workspace.ResetWorkspace() 137 op = core.CreateOperator( 143 workspace.FeedBlob("A", np.array([[1,2],[3,4]])) 144 workspace.FeedBlob("B", np.array([[5,6],[7,8]])) 145 print("A:", workspace.FetchBlob("A")) 146 print("B:", workspace.FetchBlob("B")) 147 workspace.RunOperatorOnce(op) 148 print("C:", workspace.FetchBlob("C")) 172 const char kDivExample[] = R
"DOC( 175 <summary> <b>Example</b> </summary> 181 workspace.ResetWorkspace() 183 op = core.CreateOperator( 189 workspace.FeedBlob("A", np.array([[18,8],[2,9]])) 190 workspace.FeedBlob("B", np.array([[9,2],[3,2]])) 191 print("A:", workspace.FetchBlob("A")) 192 print("B:", workspace.FetchBlob("B")) 193 workspace.RunOperatorOnce(op) 194 print("C:", workspace.FetchBlob("C")) 217 std::function<void(OpSchema&)> MathDocGenerator(const char* name,
const char* extra) {
218 return [=](OpSchema& schema) {
220 Performs element-wise binary {name} (with limited broadcast support). 225 c10::ReplaceAll(doc, "{name}", name);
226 c10::ReplaceAll(doc,
"{broadcast_doc}", kBroadcastDoc);
227 c10::ReplaceAll(doc,
"{extra}", extra);
229 schema.Arg(
"broadcast",
"*(type: int; default: 0)* Pass 1 to enable broadcasting");
232 "*(type: int; default: -1)* Axis to concatenate on.");
236 "*(type: Tensor`<float>`)* First operand, should share the type with the second operand.");
240 "*(type: Tensor`<float>`)* Second operand. With broadcasting can be of smaller size than A. " 241 "If broadcasting is disabled it should be of the same size as A.");
242 schema.Output(0,
"C",
"*(type: Tensor`<float>`)* Output tensor with same dimensions and type as A.");
246 std::vector<TensorShape> ElementwiseOpShapeInference(
247 const OperatorDef& def,
248 const std::vector<TensorShape>& in) {
249 std::vector<TensorShape> out(1);
250 out[0].set_data_type(in[0].data_type());
251 ArgumentHelper helper(def);
252 const bool broadcast = helper.GetSingleArgument<
bool>(
"broadcast",
false);
254 out[0].mutable_dims()->CopyFrom(in[0].dims());
256 const std::vector<int> A_dims(in[0].dims().begin(), in[0].dims().end());
257 const std::vector<int> B_dims(in[1].dims().begin(), in[1].dims().end());
258 const std::vector<int> C_dims =
259 elementwise_ops_utils::ComputeBinaryBroadcastForwardDims(
261 for (
const int dim : C_dims) {
262 out[0].add_dims(dim);
273 .AllowInplace({{0, 0}, {1, 0}})
274 .CostInferenceFunction(PointwiseCostInference<1>)
275 .TensorInferenceFunction(ElementwiseOpShapeInference)
276 .FillUsing(MathDocGenerator(
"addition", kAddExample))
277 .InheritOnnxSchema();
278 OPERATOR_SCHEMA(AddGradient)
281 .AllowInplace({{0, 0}, {0, 1}});
286 .AllowInplace({{0, 0}, {1, 0}})
287 .CostInferenceFunction(PointwiseCostInference<1>)
288 .TensorInferenceFunction(ElementwiseOpShapeInference)
289 .FillUsing(MathDocGenerator(
"subtraction", kSubExample))
290 .InheritOnnxSchema();
291 OPERATOR_SCHEMA(SubGradient)
294 .AllowInplace({{0, 0}, {0, 1}});
299 .AllowInplace({{0, 0}, {1, 0}})
300 .CostInferenceFunction(PointwiseCostInference<1>)
301 .TensorInferenceFunction(ElementwiseOpShapeInference)
302 .FillUsing(MathDocGenerator(
"multiplication", kMulExample))
303 .InheritOnnxSchema();
304 OPERATOR_SCHEMA(MulGradient)
307 .AllowInplace({{0, 0}, {0, 1}});
312 .AllowInplace({{0, 0}})
313 .CostInferenceFunction(PointwiseCostInference<1>)
314 .TensorInferenceFunction(ElementwiseOpShapeInference)
315 .FillUsing(MathDocGenerator(
"division", kDivExample))
316 .InheritOnnxSchema();
317 OPERATOR_SCHEMA(DivGradient)
320 .AllowInplace({{0, 0}});
322 OPERATOR_SCHEMA(SumReduceLike)
325 .IdenticalTypeAndShapeOfInput(0)
327 SumReduceLike operator takes 2 tensors as input. It performs reduce sum to the 328 first input so that the output looks like the second one. 329 It assumes that the first input 330 has more dimensions than the second, and the dimensions of the second input is 331 the contiguous subset of the dimensions of the first. 332 For example, the following tensor shapes are supported: 334 shape(A) = (2, 3, 4, 5), shape(B) = (4, 5) 335 shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar 336 shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1 337 shape(A) = (2, 3, 2, 5), shape(B) = (2), with axis=0 341 "If set, defines the starting dimension for reduction. Args `axis` and " 342 "`axis_str` cannot be used simultaneously.")
345 "If set, it could only be N or C or H or W. `order` arg should also be " 346 "provided. It defines the reduction dimensions on NCHW or NHWC. Args " 347 "`axis` and `axis_str` cannot be used simultaneously.")
348 .Arg(
"order",
"Either NHWC or HCWH")
352 "First operand, should share the type with the second operand.")
356 "Second operand. With broadcasting can be of smaller size than A. " 357 "If broadcasting is disabled it should be of the same size.")
358 .Output(0,
"C",
"Result, has same dimensions and type as B");
360 const char kLTExample[] = R
"DOC( 363 <summary> <b>Example</b> </summary> 369 workspace.ResetWorkspace() 371 op = core.CreateOperator( 377 workspace.FeedBlob("A", np.array([1, 5, 2, 9, 12, 3])) 378 workspace.FeedBlob("B", np.array([1, 3, 4, 9, 12, 8])) 379 print("A:", workspace.FetchBlob("A")) 380 print("B:", workspace.FetchBlob("B")) 381 workspace.RunOperatorOnce(op) 382 print("C:", workspace.FetchBlob("C")) 392 C: [False False True False False True] 399 const char kLEExample[] = R
"DOC( 402 <summary> <b>Example</b> </summary> 408 workspace.ResetWorkspace() 410 op = core.CreateOperator( 416 workspace.FeedBlob("A", np.array([1, 5, 2, 9, 12, 3])) 417 workspace.FeedBlob("B", np.array([1, 3, 4, 9, 12, 8])) 418 print("A:", workspace.FetchBlob("A")) 419 print("B:", workspace.FetchBlob("B")) 420 workspace.RunOperatorOnce(op) 421 print("C:", workspace.FetchBlob("C")) 431 C: [ True False True True True True] 438 const char kGTExample[] = R
"DOC( 441 <summary> <b>Example</b> </summary> 447 workspace.ResetWorkspace() 449 op = core.CreateOperator( 455 workspace.FeedBlob("A", np.array([1, 5, 2, 9, 12, 3])) 456 workspace.FeedBlob("B", np.array([1, 3, 4, 9, 12, 8])) 457 print("A:", workspace.FetchBlob("A")) 458 print("B:", workspace.FetchBlob("B")) 459 workspace.RunOperatorOnce(op) 460 print("C:", workspace.FetchBlob("C")) 470 C: [False True False False False False] 477 const char kGEExample[] = R
"DOC( 480 <summary> <b>Example</b> </summary> 486 workspace.ResetWorkspace() 488 op = core.CreateOperator( 494 workspace.FeedBlob("A", np.array([1, 5, 2, 9, 12, 3])) 495 workspace.FeedBlob("B", np.array([1, 3, 4, 9, 12, 8])) 496 print("A:", workspace.FetchBlob("A")) 497 print("B:", workspace.FetchBlob("B")) 498 workspace.RunOperatorOnce(op) 499 print("C:", workspace.FetchBlob("C")) 509 C: [ True True False True True False] 516 const char kEQExample[] = R
"DOC( 519 <summary> <b>Example</b> </summary> 525 workspace.ResetWorkspace() 527 op = core.CreateOperator( 533 workspace.FeedBlob("A", np.array([1, 5, 2, 9, 12, 3])) 534 workspace.FeedBlob("B", np.array([1, 3, 4, 9, 12, 8])) 535 print("A:", workspace.FetchBlob("A")) 536 print("B:", workspace.FetchBlob("B")) 537 workspace.RunOperatorOnce(op) 538 print("C:", workspace.FetchBlob("C")) 547 C: [ True False False True True False] 553 const char kNEExample[] = R
"DOC( 556 <summary> <b>Example</b> </summary> 561 workspace.ResetWorkspace() 563 op = core.CreateOperator( 569 workspace.FeedBlob("A", np.array([1, 5, 2, 9, 12, 3])) 570 workspace.FeedBlob("B", np.array([1, 3, 4, 9, 12, 8])) 571 print("A:", workspace.FetchBlob("A")) 572 print("B:", workspace.FetchBlob("B")) 573 workspace.RunOperatorOnce(op) 574 print("C:", workspace.FetchBlob("C")) 583 C: [False True True False False True] 589 std::function<void(OpSchema&)> ComparisonDocGenerator( 593 return [=](OpSchema& schema) {
595 Performs element-wise {desc} comparison **{name}** (with limited broadcast support). 601 c10::ReplaceAll(doc, "{name}", name);
602 c10::ReplaceAll(doc,
"{desc}", desc);
603 c10::ReplaceAll(doc,
"{broadcast_doc}", kBroadcastDoc);
604 c10::ReplaceAll(doc,
"{extra}", extra);
606 schema.Arg(
"broadcast",
"*(type: int; default: 0)* Pass 1 to enable broadcasting.");
609 "*(type: int; default: -1)* Axis to concatenate on. If set, defines the broadcast dimensions.");
613 "*(type: Tensor`<bool>`)* First operand, should share the type with the second operand.");
617 "*(type: Tensor`<bool>`)* Second operand. With broadcasting can be of smaller size than `A`. " 618 "If broadcasting is disabled it should be of the same size.");
619 schema.Output(0,
"C",
"*(type: Tensor`<bool>`)* Output tensor with same dimensions as `A`.");
623 #define CAFFE2_SCHEMA_FOR_BINARY_COMPARISON_OP(name, symbol, desc, extra) \ 624 OPERATOR_SCHEMA(name) \ 627 .TensorInferenceFunction( \ 628 [](const OperatorDef& def, const vector<TensorShape>& in) { \ 629 ArgumentHelper helper(def); \ 630 const auto broadcasted = \ 631 helper.GetSingleArgument<bool>("broadcast", false); \ 632 if (!broadcasted) { \ 633 CAFFE_ENFORCE_EQ(in[0].dims().size(), in[1].dims().size()); \ 634 for (int i = 0; i < in[0].dims().size(); ++i) { \ 635 CAFFE_ENFORCE_EQ(in[0].dims(i), in[1].dims(i)); \ 639 std::vector<int64_t>(in[0].dims().begin(), in[0].dims().end()); \ 640 return vector<TensorShape>{ \ 641 CreateTensorShape(output_dims, TensorProto::BOOL)}; \ 643 .FillUsing(ComparisonDocGenerator(symbol, desc, extra)); \ 644 SHOULD_NOT_DO_GRADIENT(name) 646 CAFFE2_SCHEMA_FOR_BINARY_COMPARISON_OP(EQ,
"==",
"equal to", kEQExample);
647 CAFFE2_SCHEMA_FOR_BINARY_COMPARISON_OP(NE,
"!=",
"not equal to", kNEExample);
648 CAFFE2_SCHEMA_FOR_BINARY_COMPARISON_OP(LT,
"<",
"less than", kLTExample);
649 CAFFE2_SCHEMA_FOR_BINARY_COMPARISON_OP(LE,
"<=",
"less or equal than", kLEExample);
650 CAFFE2_SCHEMA_FOR_BINARY_COMPARISON_OP(GT,
">",
"greater than", kGTExample);
651 CAFFE2_SCHEMA_FOR_BINARY_COMPARISON_OP(GE,
">=",
"greater or equal than", kGEExample);
653 const char kAndExample[] = R
"DOC( 656 <summary> <b>Example</b> </summary> 662 workspace.ResetWorkspace() 664 op = core.CreateOperator( 670 workspace.FeedBlob("A", (np.random.rand(3, 3) > 0.5)) 671 workspace.FeedBlob("B", (np.random.rand(3, 3) > 0.5)) 672 print("A:", workspace.FetchBlob("A")) 673 print("B:", workspace.FetchBlob("B")) 674 workspace.RunOperatorOnce(op) 675 print("C:", workspace.FetchBlob("C")) 701 const char kOrExample[] = R
"DOC( 704 <summary> <b>Example</b> </summary> 710 workspace.ResetWorkspace() 712 op = core.CreateOperator( 718 workspace.FeedBlob("A", (np.random.rand(3, 3) > 0.5)) 719 workspace.FeedBlob("B", (np.random.rand(3, 3) > 0.5)) 720 print("A:", workspace.FetchBlob("A")) 721 print("B:", workspace.FetchBlob("B")) 722 workspace.RunOperatorOnce(op) 723 print("C:", workspace.FetchBlob("C")) 749 const char kXorExample[] = R
"DOC( 752 <summary> <b>Example</b> </summary> 758 workspace.ResetWorkspace() 760 op = core.CreateOperator( 766 workspace.FeedBlob("A", (np.random.rand(3, 3) > 0.5)) 767 workspace.FeedBlob("B", (np.random.rand(3, 3) > 0.5)) 768 print("A:", workspace.FetchBlob("A")) 769 print("B:", workspace.FetchBlob("B")) 770 workspace.RunOperatorOnce(op) 771 print("C:", workspace.FetchBlob("C")) 797 std::function<void(OpSchema&)> LogicalDocGenerator(const char* name,
const char* extra) {
798 return [=](OpSchema& schema) {
800 Performs element-wise logical operation **{name}** (with limited broadcast support). 801 Both input operands should be of type `bool`. 807 c10::ReplaceAll(doc, "{name}", name);
808 c10::ReplaceAll(doc,
"{broadcast_doc}", kBroadcastDoc);
809 c10::ReplaceAll(doc,
"{extra}", extra);
811 schema.Arg(
"broadcast",
"*(type: int; default: 0)* Pass 1 to enable broadcasting.");
814 "*(type: int; default: -1)* Axis to concatenate on. If set, defines the broadcast dimensions.");
815 schema.Input(0,
"A",
"*(type: Tensor`<bool>`)* First operand.");
819 "*(type: Tensor`<bool>`)* Second operand. With broadcasting can be of smaller size than `A`. " 820 "If broadcasting is disabled it should be of the same size.");
821 schema.Output(0,
"C",
"*(type: Tensor`<bool>`)* Output tensor of booleans. Has same dimensions as input `A`.");
825 #define CAFFE2_SCHEMA_FOR_BINARY_LOGICAL_OP(name, symbol, onnx_schema, extra) \ 826 OPERATOR_SCHEMA(name) \ 829 .AllowInplace({{0, 0}}) \ 830 .FillUsing(LogicalDocGenerator(symbol, extra)) \ 831 .TensorInferenceFunction(ElementwiseOpShapeInference) \ 832 .InheritOnnxSchema(onnx_schema); \ 833 SHOULD_NOT_DO_GRADIENT(name) 835 CAFFE2_SCHEMA_FOR_BINARY_LOGICAL_OP(Or,
"or",
"Or", kOrExample);
836 CAFFE2_SCHEMA_FOR_BINARY_LOGICAL_OP(And,
"and",
"And", kAndExample);
837 CAFFE2_SCHEMA_FOR_BINARY_LOGICAL_OP(Xor,
"xor",
"Xor", kXorExample);
839 #undef CAFFE2_SCHEMA_FOR_BINARY_LOGICAL_OP 841 std::function<void(OpSchema&)> BitwiseDocGenerator(
const char* name) {
842 return [=](OpSchema& schema) {
844 Performs element-wise bitwise operation `{name}` (with limited broadcast support). 845 Both input operands should be of type `bool`. 846 {broadcast_doc})DOC"; 847 c10::ReplaceAll(doc, "{name}", name);
848 c10::ReplaceAll(doc,
"{broadcast_doc}", kBroadcastDoc);
850 schema.Arg(
"broadcast",
"*(type: int; default: 0)* Pass 1 to enable broadcasting.");
853 "*(type: int; default: -1)* Axis to concatenate on. If set, defines the broadcast dimensions.");
854 schema.Input(0,
"A",
"*(type: Tensor)* First operand.");
858 "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. " 859 "If broadcasting is disabled it should be of the same size.");
860 schema.Output(0,
"C",
"*(type: Tensor)* Output tensor. Has same dimensions as input `A`.");
864 #define CAFFE2_SCHEMA_FOR_BINARY_BITWISE_OP(name, symbol) \ 865 OPERATOR_SCHEMA(name) \ 868 .AllowInplace({{0, 0}}) \ 869 .FillUsing(BitwiseDocGenerator(symbol)); \ 870 SHOULD_NOT_DO_GRADIENT(name) 872 CAFFE2_SCHEMA_FOR_BINARY_BITWISE_OP(BitwiseOr,
"bitwise_or");
873 CAFFE2_SCHEMA_FOR_BINARY_BITWISE_OP(BitwiseAnd,
"bitwise_and");
874 CAFFE2_SCHEMA_FOR_BINARY_BITWISE_OP(BitwiseXor,
"bitwise_xor");
876 #undef CAFFE2_SCHEMA_FOR_BINARY_BITWISE_OP 882 Performs element-wise negation on input tensor `X`. 886 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_op_schema.cc 890 <summary> <b>Example</b> </summary> 896 workspace.ResetWorkspace() 898 op = core.CreateOperator( 904 workspace.FeedBlob("X", (np.random.rand(3, 3) > 0.5)) 905 print("X:", workspace.FetchBlob("X")) 906 workspace.RunOperatorOnce(op) 907 print("Y:", workspace.FetchBlob("Y")) 929 .Input(0, "X",
"*(Tensor`<bool>`)* Input tensor.")
930 .Output(0,
"Y",
"*(Tensor`<bool>`)* Negated output tensor.")
931 .InheritOnnxSchema();
932 SHOULD_NOT_DO_GRADIENT(Not);
934 OPERATOR_SCHEMA(Sign)
938 Computes sign for each element of the input: -1, 0 or 1. 941 - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_op_schema.cc 945 <summary> <b>Example</b> </summary> 951 workspace.ResetWorkspace() 953 op = core.CreateOperator( 959 workspace.FeedBlob("X", (np.random.rand(3, 3).astype(np.float32) - np.random.rand(3, 3).astype(np.float32))) 960 print("X:", workspace.FetchBlob("X")) 961 workspace.RunOperatorOnce(op) 962 print("Y:", workspace.FetchBlob("Y")) 971 [[ 0.02816287 0.22408086 -0.30342305] 972 [-0.18481976 0.03948995 0.39698976] 973 [-0.63304734 -0.6919183 -0.31524038]] 984 .Input(0, "X",
"*(type: Tensor`<float>`)* Input data tensor.")
985 .Output(0,
"Y",
"*(type: Tensor`<float>`)* Output tensor.");
986 SHOULD_NOT_DO_GRADIENT(Sign);
A global dictionary that holds information about what Caffe2 modules have been loaded in the current ...