diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayAPIBase.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayAPIBase.scala new file mode 100644 index 000000000..57c7747e1 --- /dev/null +++ b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayAPIBase.scala @@ -0,0 +1,9925 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.mxnet + +import org.apache.mxnet.annotation.Experimental + +// scalastyle:off +abstract class NDArrayAPIBase { + /** + * + * {{{ + * + * Applies an activation function element-wise to the input. + * + * The following activation functions are supported: + * + * - `relu`: Rectified Linear Unit, :math:`y = max(x, 0)` + * - `sigmoid`: :math:`y = \frac{1}{1 + exp(-x)}` + * - `tanh`: Hyperbolic tangent, :math:`y = \frac{exp(x) - exp(-x)}{exp(x) + exp(-x)}` + * - `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))` + * - `softsign`: :math:`y = \frac{x}{1 + abs(x)}` + * + * + * + * Defined in src/operator/nn/activation.cc:L168 + * }}} + * + * @param data The input array. + * @param act_type Activation function to be applied. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Activation (data : org.apache.mxnet.NDArray, act_type : String, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Batch normalization. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these + * two outputs are blocked. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel + * axis to be the last item in the input shape. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * .. Note:: + * When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False, + * the sparse tensors will fallback. + * + * + * + * Defined in src/operator/nn/batch_norm.cc:L571 + * }}} + * + * @param data Input data to batch normalization + * @param gamma gamma array + * @param beta beta array + * @param moving_mean running mean of input + * @param moving_var running variance of input + * @param eps Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) + * @param momentum Momentum for moving average + * @param fix_gamma Fix gamma while training + * @param use_global_stats Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. + * @param output_mean_var Output the mean and inverse std + * @param axis Specify which shape axis the channel is specified + * @param cudnn_off Do not select CUDNN operator, if available + * @param min_calib_range The minimum scalar value in the form of float32 obtained through calibration. If present, it will be used to by quantized batch norm op to calculate primitive scale.Note: this calib_range is to calib bn output. + * @param max_calib_range The maximum scalar value in the form of float32 obtained through calibration. If present, it will be used to by quantized batch norm op to calculate primitive scale.Note: this calib_range is to calib bn output. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def BatchNorm (data : org.apache.mxnet.NDArray, gamma : org.apache.mxnet.NDArray, beta : org.apache.mxnet.NDArray, moving_mean : org.apache.mxnet.NDArray, moving_var : org.apache.mxnet.NDArray, eps : Option[Double] = None, momentum : Option[Float] = None, fix_gamma : Option[Boolean] = None, use_global_stats : Option[Boolean] = None, output_mean_var : Option[Boolean] = None, axis : Option[Int] = None, cudnn_off : Option[Boolean] = None, min_calib_range : Option[Float] = None, max_calib_range : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Batch normalization. + * + * This operator is DEPRECATED. Perform BatchNorm on the input. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_var`` as well, which are needed for the backward pass. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * There's no sparse support for this operator, and it will exhibit problematic behavior if used with + * sparse tensors. + * + * + * + * Defined in src/operator/batch_norm_v1.cc:L95 + * }}} + * + * @param data Input data to batch normalization + * @param gamma gamma array + * @param beta beta array + * @param eps Epsilon to prevent div 0 + * @param momentum Momentum for moving average + * @param fix_gamma Fix gamma while training + * @param use_global_stats Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. + * @param output_mean_var Output All,normal mean and var + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def BatchNorm_v1 (data : org.apache.mxnet.NDArray, gamma : org.apache.mxnet.NDArray, beta : org.apache.mxnet.NDArray, eps : Option[Float] = None, momentum : Option[Float] = None, fix_gamma : Option[Boolean] = None, use_global_stats : Option[Boolean] = None, output_mean_var : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies bilinear sampling to input feature map. + * + * Bilinear Sampling is the key of [NIPS2015] \"Spatial Transformer Networks\". The usage of the operator is very similar to remap function in OpenCV, + * except that the operator has the backward pass. + * + * Given :math:`data` and :math:`grid`, then the output is computed by + * + * .. math:: + * x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\ + * y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\ + * output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}) + * + * :math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and :math:`G()` denotes the bilinear interpolation kernel. + * The out-boundary points will be padded with zeros.The shape of the output will be (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]). + * + * The operator assumes that :math:`data` has 'NCHW' layout and :math:`grid` has been normalized to [-1, 1]. + * + * BilinearSampler often cooperates with GridGenerator which generates sampling grids for BilinearSampler. + * GridGenerator supports two kinds of transformation: ``affine`` and ``warp``. + * If users want to design a CustomOp to manipulate :math:`grid`, please firstly refer to the code of GridGenerator. + * + * Example 1:: + * + * ## Zoom out data two times + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * affine_matrix = array(`[ [2, 0, 0], + * [0, 2, 0] ]) + * + * affine_matrix = reshape(affine_matrix, shape=(1, 6)) + * + * grid = GridGenerator(data=affine_matrix, transform_type='affine', target_shape=(4, 4)) + * + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 0, 0, 0, 0], + * [ 0, 3.5, 6.5, 0], + * [ 0, 1.25, 2.5, 0], + * [ 0, 0, 0, 0] ] ] + * + * + * Example 2:: + * + * ## shift data horizontally by -1 pixel + * + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * warp_maxtrix = array(`[ [`[ [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1] ], + * `[ [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0] ] ] ]) + * + * grid = GridGenerator(data=warp_matrix, transform_type='warp') + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 4, 3, 6, 0], + * [ 8, 8, 9, 0], + * [ 4, 1, 5, 0], + * [ 0, 1, 3, 0] ] ] + * + * + * Defined in src/operator/bilinear_sampler.cc:L256 + * }}} + * + * @param data Input data to the BilinearsamplerOp. + * @param grid Input grid to the BilinearsamplerOp.grid has two channels: x_src, y_src + * @param cudnn_off whether to turn cudnn off + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def BilinearSampler (data : org.apache.mxnet.NDArray, grid : org.apache.mxnet.NDArray, cudnn_off : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def BlockGrad (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @param data Input ndarray + * @param label Ground-truth labels for the loss. + * @param data_lengths Lengths of data for each of the samples. Only required when use_data_lengths is true. + * @param label_lengths Lengths of labels for each of the samples. Only required when use_label_lengths is true. + * @param use_data_lengths Whether the data lenghts are decided by `data_lengths`. If false, the lengths are equal to the max sequence length. + * @param use_label_lengths Whether the label lenghts are decided by `label_lengths`, or derived from `padding_mask`. If false, the lengths are derived from the first occurrence of the value of `padding_mask`. The value of `padding_mask` is ``0`` when first CTC label is reserved for blank, and ``-1`` when last label is reserved for blank. See `blank_label`. + * @param blank_label Set the label that is reserved for blank label.If "first", 0-th label is reserved, and label values for tokens in the vocabulary are between ``1`` and ``alphabet_size-1``, and the padding mask is ``-1``. If "last", last label value ``alphabet_size-1`` is reserved for blank label instead, and label values for tokens in the vocabulary are between ``0`` and ``alphabet_size-2``, and the padding mask is ``0``. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def CTCLoss (data : org.apache.mxnet.NDArray, label : org.apache.mxnet.NDArray, data_lengths : org.apache.mxnet.NDArray, label_lengths : org.apache.mxnet.NDArray, use_data_lengths : Option[Boolean] = None, use_label_lengths : Option[Boolean] = None, blank_label : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @param data The input. + * @param dtype Output data type. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Cast (data : org.apache.mxnet.NDArray, dtype : String, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @param data List of arrays to concatenate + * @param num_args Number of inputs to be concated. + * @param dim the dimension to be concated. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Concat (data : Array[org.apache.mxnet.NDArray], num_args : Int, dim : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Compute *N*-D convolution on *(N+2)*-D input. + * + * In the 2-D convolution, given input data with shape *(batch_size, + * channel, height, width)*, the output is computed by + * + * .. math:: + * + * out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star + * weight[i,j,:,:] + * + * where :math:`\star` is the 2-D cross-correlation operator. + * + * For general 2-D convolution, the shapes are + * + * - **data**: *(batch_size, channel, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*. + * + * Define:: + * + * f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1 + * + * then we have:: + * + * out_height=f(height, kernel[0], pad[0], stride[0], dilate[0]) + * out_width=f(width, kernel[1], pad[1], stride[1], dilate[1]) + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height, + * width)*. We can choose other layouts such as *NWC*. + * + * If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data`` + * evenly into *g* parts along the channel axis, and also evenly split ``weight`` + * along the first dimension. Next compute the convolution on the *i*-th part of + * the data with the *i*-th weight part. The output is obtained by concatenating all + * the *g* results. + * + * 1-D convolution does not have *height* dimension but only *width* in space. + * + * - **data**: *(batch_size, channel, width)* + * - **weight**: *(num_filter, channel, kernel[0])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_width)*. + * + * 3-D convolution adds an additional *depth* dimension besides *height* and + * *width*. The shapes are + * + * - **data**: *(batch_size, channel, depth, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*. + * + * Both ``weight`` and ``bias`` are learnable parameters. + * + * There are other options to tune the performance. + * + * - **cudnn_tune**: enable this option leads to higher startup time but may give + * faster speed. Options are + * + * - **off**: no tuning + * - **limited_workspace**:run test and pick the fastest algorithm that doesn't + * exceed workspace limit. + * - **fastest**: pick the fastest algorithm and ignore workspace limit. + * - **None** (default): the behavior is determined by environment variable + * ``MXNET_CUDNN_AUTOTUNE_DEFAULT``. 0 for off, 1 for limited workspace + * (default), 2 for fastest. + * + * - **workspace**: A large number leads to more (GPU) memory usage but may improve + * the performance. + * + * + * + * Defined in src/operator/nn/convolution.cc:L473 + * }}} + * + * @param data Input data to the ConvolutionOp. + * @param weight Weight matrix. + * @param bias Bias parameter. + * @param kernel Convolution kernel size: (w,), (h, w) or (d, h, w) + * @param stride Convolution stride: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + * @param dilate Convolution dilate: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + * @param pad Zero pad for convolution: (w,), (h, w) or (d, h, w). Defaults to no padding. + * @param num_filter Convolution filter(channel) number + * @param num_group Number of group partitions. + * @param workspace Maximum temporary workspace allowed (MB) in convolution.This parameter has two usages. When CUDNN is not used, it determines the effective batch size of the convolution kernel. When CUDNN is used, it controls the maximum temporary storage used for tuning the best CUDNN kernel when `limited_workspace` strategy is used. + * @param no_bias Whether to disable bias parameter. + * @param cudnn_tune Whether to pick convolution algo by running performance test. + * @param cudnn_off Turn off cudnn for this layer. + * @param layout Set layout for input, output and weight. Empty for + default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d.NHWC and NDHWC are only supported on GPU. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Convolution (data : org.apache.mxnet.NDArray, weight : org.apache.mxnet.NDArray, bias : org.apache.mxnet.NDArray, kernel : org.apache.mxnet.Shape, stride : Option[org.apache.mxnet.Shape] = None, dilate : Option[org.apache.mxnet.Shape] = None, pad : Option[org.apache.mxnet.Shape] = None, num_filter : Int, num_group : Option[Int] = None, workspace : Option[Long] = None, no_bias : Option[Boolean] = None, cudnn_tune : Option[String] = None, cudnn_off : Option[Boolean] = None, layout : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * This operator is DEPRECATED. Apply convolution to input then add a bias. + * }}} + * + * @param data Input data to the ConvolutionV1Op. + * @param weight Weight matrix. + * @param bias Bias parameter. + * @param kernel convolution kernel size: (h, w) or (d, h, w) + * @param stride convolution stride: (h, w) or (d, h, w) + * @param dilate convolution dilate: (h, w) or (d, h, w) + * @param pad pad for convolution: (h, w) or (d, h, w) + * @param num_filter convolution filter(channel) number + * @param num_group Number of group partitions. Equivalent to slicing input into num_group + partitions, apply convolution on each, then concatenate the results + * @param workspace Maximum temporary workspace allowed for convolution (MB).This parameter determines the effective batch size of the convolution kernel, which may be smaller than the given batch size. Also, the workspace will be automatically enlarged to make sure that we can run the kernel with batch_size=1 + * @param no_bias Whether to disable bias parameter. + * @param cudnn_tune Whether to pick convolution algo by running performance test. + Leads to higher startup time but may give faster speed. Options are: + 'off': no tuning + 'limited_workspace': run test and pick the fastest algorithm that doesn't exceed workspace limit. + 'fastest': pick the fastest algorithm and ignore workspace limit. + If set to None (default), behavior is determined by environment + variable MXNET_CUDNN_AUTOTUNE_DEFAULT: 0 for off, + 1 for limited workspace (default), 2 for fastest. + * @param cudnn_off Turn off cudnn for this layer. + * @param layout Set layout for input, output and weight. Empty for + default layout: NCHW for 2d and NCDHW for 3d. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Convolution_v1 (data : org.apache.mxnet.NDArray, weight : org.apache.mxnet.NDArray, bias : org.apache.mxnet.NDArray, kernel : org.apache.mxnet.Shape, stride : Option[org.apache.mxnet.Shape] = None, dilate : Option[org.apache.mxnet.Shape] = None, pad : Option[org.apache.mxnet.Shape] = None, num_filter : Int, num_group : Option[Int] = None, workspace : Option[Long] = None, no_bias : Option[Boolean] = None, cudnn_tune : Option[String] = None, cudnn_off : Option[Boolean] = None, layout : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies correlation to inputs. + * + * The correlation layer performs multiplicative patch comparisons between two feature maps. + * + * Given two multi-channel feature maps :math:`f_{1}, f_{2}`, with :math:`w`, :math:`h`, and :math:`c` being their width, height, and number of channels, + * the correlation layer lets the network compare each patch from :math:`f_{1}` with each patch from :math:`f_{2}`. + * + * For now we consider only a single comparison of two patches. The 'correlation' of two patches centered at :math:`x_{1}` in the first map and + * :math:`x_{2}` in the second map is then defined as: + * + * .. math:: + * + * c(x_{1}, x_{2}) = \sum_{o \in [-k,k] \times [-k,k]} + * + * for a square patch of size :math:`K:=2k+1`. + * + * Note that the equation above is identical to one step of a convolution in neural networks, but instead of convolving data with a filter, it convolves data with other + * data. For this reason, it has no training weights. + * + * Computing :math:`c(x_{1}, x_{2})` involves :math:`c * K^{2}` multiplications. Comparing all patch combinations involves :math:`w^{2}*h^{2}` such computations. + * + * Given a maximum displacement :math:`d`, for each location :math:`x_{1}` it computes correlations :math:`c(x_{1}, x_{2})` only in a neighborhood of size :math:`D:=2d+1`, + * by limiting the range of :math:`x_{2}`. We use strides :math:`s_{1}, s_{2}`, to quantize :math:`x_{1}` globally and to quantize :math:`x_{2}` within the neighborhood + * centered around :math:`x_{1}`. + * + * The final output is defined by the following expression: + * + * .. math:: + * out[n, q, i, j] = c(x_{i, j}, x_{q}) + * + * where :math:`i` and :math:`j` enumerate spatial locations in :math:`f_{1}`, and :math:`q` denotes the :math:`q^{th}` neighborhood of :math:`x_{i,j}`. + * + * + * Defined in src/operator/correlation.cc:L198 + * }}} + * + * @param data1 Input data1 to the correlation. + * @param data2 Input data2 to the correlation. + * @param kernel_size kernel size for Correlation must be an odd number + * @param max_displacement Max displacement of Correlation + * @param stride1 stride1 quantize data1 globally + * @param stride2 stride2 quantize data2 within the neighborhood centered around data1 + * @param pad_size pad for Correlation + * @param is_multiply operation type is either multiplication or subduction + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Correlation (data1 : org.apache.mxnet.NDArray, data2 : org.apache.mxnet.NDArray, kernel_size : Option[Int] = None, max_displacement : Option[Int] = None, stride1 : Option[Int] = None, stride2 : Option[Int] = None, pad_size : Option[Int] = None, is_multiply : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * + * + * .. note:: `Crop` is deprecated. Use `slice` instead. + * + * Crop the 2nd and 3rd dim of input data, with the corresponding size of h_w or + * with width and height of the second input symbol, i.e., with one input, we need h_w to + * specify the crop height and width, otherwise the second input symbol's size will be used + * + * + * Defined in src/operator/crop.cc:L50 + * }}} + * + * @param data Tensor or List of Tensors, the second input will be used as crop_like shape reference + * @param num_args Number of inputs for crop, if equals one, then we will use the h_wfor crop height and width, else if equals two, then we will use the heightand width of the second input symbol, we name crop_like here + * @param offset crop offset coordinate: (y, x) + * @param h_w crop height and width: (h, w) + * @param center_crop If set to true, then it will use be the center_crop,or it will crop using the shape of crop_like + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Crop (data : Array[org.apache.mxnet.NDArray], num_args : Int, offset : Option[org.apache.mxnet.Shape] = None, h_w : Option[org.apache.mxnet.Shape] = None, center_crop : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes 1D or 2D transposed convolution (aka fractionally strided convolution) of the input tensor. This operation can be seen as the gradient of Convolution operation with respect to its input. Convolution usually reduces the size of the input. Transposed convolution works the other way, going from a smaller input to a larger output while preserving the connectivity pattern. + * }}} + * + * @param data Input tensor to the deconvolution operation. + * @param weight Weights representing the kernel. + * @param bias Bias added to the result after the deconvolution operation. + * @param kernel Deconvolution kernel size: (w,), (h, w) or (d, h, w). This is same as the kernel size used for the corresponding convolution + * @param stride The stride used for the corresponding convolution: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + * @param dilate Dilation factor for each dimension of the input: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + * @param pad The amount of implicit zero padding added during convolution for each dimension of the input: (w,), (h, w) or (d, h, w). ``(kernel-1)/2`` is usually a good choice. If `target_shape` is set, `pad` will be ignored and a padding that will generate the target shape will be used. Defaults to no padding. + * @param adj Adjustment for output shape: (w,), (h, w) or (d, h, w). If `target_shape` is set, `adj` will be ignored and computed accordingly. + * @param target_shape Shape of the output tensor: (w,), (h, w) or (d, h, w). + * @param num_filter Number of output filters. + * @param num_group Number of groups partition. + * @param workspace Maximum temporary workspace allowed (MB) in deconvolution.This parameter has two usages. When CUDNN is not used, it determines the effective batch size of the deconvolution kernel. When CUDNN is used, it controls the maximum temporary storage used for tuning the best CUDNN kernel when `limited_workspace` strategy is used. + * @param no_bias Whether to disable bias parameter. + * @param cudnn_tune Whether to pick convolution algorithm by running performance test. + * @param cudnn_off Turn off cudnn for this layer. + * @param layout Set layout for input, output and weight. Empty for default layout, NCW for 1d, NCHW for 2d and NCDHW for 3d.NHWC and NDHWC are only supported on GPU. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Deconvolution (data : org.apache.mxnet.NDArray, weight : org.apache.mxnet.NDArray, bias : org.apache.mxnet.NDArray, kernel : org.apache.mxnet.Shape, stride : Option[org.apache.mxnet.Shape] = None, dilate : Option[org.apache.mxnet.Shape] = None, pad : Option[org.apache.mxnet.Shape] = None, adj : Option[org.apache.mxnet.Shape] = None, target_shape : Option[org.apache.mxnet.Shape] = None, num_filter : Int, num_group : Option[Int] = None, workspace : Option[Long] = None, no_bias : Option[Boolean] = None, cudnn_tune : Option[String] = None, cudnn_off : Option[Boolean] = None, layout : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies dropout operation to input array. + * + * - During training, each element of the input is set to zero with probability p. + * The whole array is rescaled by :math:`1/(1-p)` to keep the expected + * sum of the input unchanged. + * + * - During testing, this operator does not change the input if mode is 'training'. + * If mode is 'always', the same computaion as during training will be applied. + * + * Example:: + * + * random.seed(998) + * input_array = array(`[ [3., 0.5, -0.5, 2., 7.], + * [2., -0.4, 7., 3., 0.2] ]) + * a = symbol.Variable('a') + * dropout = symbol.Dropout(a, p = 0.2) + * executor = dropout.simple_bind(a = input_array.shape) + * + * ## If training + * executor.forward(is_train = True, a = input_array) + * executor.outputs + * `[ [ 3.75 0.625 -0. 2.5 8.75 ] + * [ 2.5 -0.5 8.75 3.75 0. ] ] + * + * ## If testing + * executor.forward(is_train = False, a = input_array) + * executor.outputs + * `[ [ 3. 0.5 -0.5 2. 7. ] + * [ 2. -0.4 7. 3. 0.2 ] ] + * + * + * Defined in src/operator/nn/dropout.cc:L96 + * }}} + * + * @param data Input array to which dropout will be applied. + * @param p Fraction of the input that gets dropped out during training time. + * @param mode Whether to only turn on dropout during training or to also turn on for inference. + * @param axes Axes for variational dropout kernel. + * @param cudnn_off Whether to turn off cudnn in dropout operator. This option is ignored if axes is specified. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Dropout (data : org.apache.mxnet.NDArray, p : Option[Float] = None, mode : Option[String] = None, axes : Option[org.apache.mxnet.Shape] = None, cudnn_off : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @param args Positional input arguments + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def ElementWiseSum (args : Array[org.apache.mxnet.NDArray], out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Maps integer indices to vector representations (embeddings). + * + * This operator maps words to real-valued vectors in a high-dimensional space, + * called word embeddings. These embeddings can capture semantic and syntactic properties of the words. + * For example, it has been noted that in the learned embedding spaces, similar words tend + * to be close to each other and dissimilar words far apart. + * + * For an input array of shape (d1, ..., dK), + * the shape of an output array is (d1, ..., dK, output_dim). + * All the input values should be integers in the range [0, input_dim). + * + * If the input_dim is ip0 and output_dim is op0, then shape of the embedding weight matrix must be + * (ip0, op0). + * + * When "sparse_grad" is False, if any index mentioned is too large, it is replaced by the index that + * addresses the last vector in an embedding matrix. + * When "sparse_grad" is True, an error will be raised if invalid indices are found. + * + * Examples:: + * + * input_dim = 4 + * output_dim = 5 + * + * // Each row in weight matrix y represents a word. So, y = (w0,w1,w2,w3) + * y = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.], + * [ 10., 11., 12., 13., 14.], + * [ 15., 16., 17., 18., 19.] ] + * + * // Input array x represents n-grams(2-gram). So, x = [(w1,w3), (w0,w2)] + * x = `[ [ 1., 3.], + * [ 0., 2.] ] + * + * // Mapped input x to its vector representation y. + * Embedding(x, y, 4, 5) = `[ `[ [ 5., 6., 7., 8., 9.], + * [ 15., 16., 17., 18., 19.] ], + * + * `[ [ 0., 1., 2., 3., 4.], + * [ 10., 11., 12., 13., 14.] ] ] + * + * + * The storage type of weight can be either row_sparse or default. + * + * .. Note:: + * + * If "sparse_grad" is set to True, the storage type of gradient w.r.t weights will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L539 + * }}} + * + * @param data The input array to the embedding operator. + * @param weight The embedding weight matrix. + * @param input_dim Vocabulary size of the input indices. + * @param output_dim Dimension of the embedding vectors. + * @param dtype Data type of weight. + * @param sparse_grad Compute row sparse gradient in the backward calculation. If set to True, the grad's storage type is row_sparse. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Embedding (data : org.apache.mxnet.NDArray, weight : org.apache.mxnet.NDArray, input_dim : Int, output_dim : Int, dtype : Option[String] = None, sparse_grad : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @param data Input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Flatten (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies a linear transformation: :math:`Y = XW^T + b`. + * + * If ``flatten`` is set to be true, then the shapes are: + * + * - **data**: `(batch_size, x1, x2, ..., xn)` + * - **weight**: `(num_hidden, x1 * x2 * ... * xn)` + * - **bias**: `(num_hidden,)` + * - **out**: `(batch_size, num_hidden)` + * + * If ``flatten`` is set to be false, then the shapes are: + * + * - **data**: `(x1, x2, ..., xn, input_dim)` + * - **weight**: `(num_hidden, input_dim)` + * - **bias**: `(num_hidden,)` + * - **out**: `(x1, x2, ..., xn, num_hidden)` + * + * The learnable parameters include both ``weight`` and ``bias``. + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * .. Note:: + * + * The sparse support for FullyConnected is limited to forward evaluation with `row_sparse` + * weight and bias, where the length of `weight.indices` and `bias.indices` must be equal + * to `num_hidden`. This could be useful for model inference with `row_sparse` weights + * trained with importance sampling or noise contrastive estimation. + * + * To compute linear transformation with 'csr' sparse data, sparse.dot is recommended instead + * of sparse.FullyConnected. + * + * + * + * Defined in src/operator/nn/fully_connected.cc:L291 + * }}} + * + * @param data Input data. + * @param weight Weight matrix. + * @param bias Bias parameter. + * @param num_hidden Number of hidden nodes of the output. + * @param no_bias Whether to disable bias parameter. + * @param flatten Whether to collapse all but the first axis of the input data tensor. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def FullyConnected (data : org.apache.mxnet.NDArray, weight : org.apache.mxnet.NDArray, bias : org.apache.mxnet.NDArray, num_hidden : Int, no_bias : Option[Boolean] = None, flatten : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Generates 2D sampling grid for bilinear sampling. + * }}} + * + * @param data Input data to the function. + * @param transform_type The type of transformation. For `affine`, input data should be an affine matrix of size (batch, 6). For `warp`, input data should be an optical flow of size (batch, 2, h, w). + * @param target_shape Specifies the output shape (H, W). This is required if transformation type is `affine`. If transformation type is `warp`, this parameter is ignored. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def GridGenerator (data : org.apache.mxnet.NDArray, transform_type : String, target_shape : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Group normalization. + * + * The input channels are separated into ``num_groups`` groups, each containing ``num_channels / num_groups`` channels. + * The mean and standard-deviation are calculated separately over the each group. + * + * .. math:: + * + * data = data.reshape((N, num_groups, C // num_groups, ...)) + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * + * + * Defined in src/operator/nn/group_norm.cc:L77 + * }}} + * + * @param data Input data + * @param gamma gamma array + * @param beta beta array + * @param num_groups Total number of groups. + * @param eps An `epsilon` parameter to prevent division by 0. + * @param output_mean_var Output the mean and std calculated along the given axis. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def GroupNorm (data : org.apache.mxnet.NDArray, gamma : org.apache.mxnet.NDArray, beta : org.apache.mxnet.NDArray, num_groups : Option[Int] = None, eps : Option[Float] = None, output_mean_var : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Apply a sparse regularization to the output a sigmoid activation function. + * }}} + * + * @param data Input data. + * @param sparseness_target The sparseness target + * @param penalty The tradeoff parameter for the sparseness penalty + * @param momentum The momentum for running average + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def IdentityAttachKLSparseReg (data : org.apache.mxnet.NDArray, sparseness_target : Option[Float] = None, penalty : Option[Float] = None, momentum : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies instance normalization to the n-dimensional input array. + * + * This operator takes an n-dimensional input array where (n>2) and normalizes + * the input using the following formula: + * + * .. math:: + * + * out = \frac{x - mean[data]}{ \sqrt{Var[data]} + \epsilon} * gamma + beta + * + * This layer is similar to batch normalization layer (`BatchNorm`) + * with two differences: first, the normalization is + * carried out per example (instance), not over a batch. Second, the + * same normalization is applied both at test and train time. This + * operation is also known as `contrast normalization`. + * + * If the input data is of shape [batch, channel, spacial_dim1, spacial_dim2, ...], + * `gamma` and `beta` parameters must be vectors of shape [channel]. + * + * This implementation is based on this paper [1]_ + * + * .. [1] Instance Normalization: The Missing Ingredient for Fast Stylization, + * D. Ulyanov, A. Vedaldi, V. Lempitsky, 2016 (arXiv:1607.08022v2). + * + * Examples:: + * + * // Input of shape (2,1,2) + * x = `[ `[ [ 1.1, 2.2] ], + * `[ [ 3.3, 4.4] ] ] + * + * // gamma parameter of length 1 + * gamma = [1.5] + * + * // beta parameter of length 1 + * beta = [0.5] + * + * // Instance normalization is calculated with the above formula + * InstanceNorm(x,gamma,beta) = `[ `[ [-0.997527 , 1.99752665] ], + * `[ [-0.99752653, 1.99752724] ] ] + * + * + * + * Defined in src/operator/instance_norm.cc:L95 + * }}} + * + * @param data An n-dimensional input array (n > 2) of the form [batch, channel, spatial_dim1, spatial_dim2, ...]. + * @param gamma A vector of length 'channel', which multiplies the normalized input. + * @param beta A vector of length 'channel', which is added to the product of the normalized input and the weight. + * @param eps An `epsilon` parameter to prevent division by 0. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def InstanceNorm (data : org.apache.mxnet.NDArray, gamma : org.apache.mxnet.NDArray, beta : org.apache.mxnet.NDArray, eps : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Normalize the input array using the L2 norm. + * + * For 1-D NDArray, it computes:: + * + * out = data / sqrt(sum(data ** 2) + eps) + * + * For N-D NDArray, if the input array has shape (N, N, ..., N), + * + * with ``mode`` = ``instance``, it normalizes each instance in the multidimensional + * array by its L2 norm.:: + * + * for i in 0...N + * out[i,:,:,...,:] = data[i,:,:,...,:] / sqrt(sum(data[i,:,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``channel``, it normalizes each channel in the array by its L2 norm.:: + * + * for i in 0...N + * out[:,i,:,...,:] = data[:,i,:,...,:] / sqrt(sum(data[:,i,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``spatial``, it normalizes the cross channel norm for each position + * in the array by its L2 norm.:: + * + * for dim in 2...N + * for i in 0...N + * out[.....,i,...] = take(out, indices=i, axis=dim) / sqrt(sum(take(out, indices=i, axis=dim) ** 2) + eps) + * -dim- + * + * Example:: + * + * x = `[ `[ [1,2], + * [3,4] ], + * `[ [2,2], + * [5,6] ] ] + * + * L2Normalization(x, mode='instance') + * =`[ `[ [ 0.18257418 0.36514837] + * [ 0.54772252 0.73029673] ] + * `[ [ 0.24077171 0.24077171] + * [ 0.60192931 0.72231513] ] ] + * + * L2Normalization(x, mode='channel') + * =`[ `[ [ 0.31622776 0.44721359] + * [ 0.94868326 0.89442718] ] + * `[ [ 0.37139067 0.31622776] + * [ 0.92847669 0.94868326] ] ] + * + * L2Normalization(x, mode='spatial') + * =`[ `[ [ 0.44721359 0.89442718] + * [ 0.60000002 0.80000001] ] + * `[ [ 0.70710677 0.70710677] + * [ 0.6401844 0.76822126] ] ] + * + * + * + * Defined in src/operator/l2_normalization.cc:L196 + * }}} + * + * @param data Input array to normalize. + * @param eps A small constant for numerical stability. + * @param mode Specify the dimension along which to compute L2 norm. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def L2Normalization (data : org.apache.mxnet.NDArray, eps : Option[Float] = None, mode : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies local response normalization to the input. + * + * The local response normalization layer performs "lateral inhibition" by normalizing + * over local input regions. + * + * If :math:`a_{x,y}^{i}` is the activity of a neuron computed by applying kernel :math:`i` at position + * :math:`(x, y)` and then applying the ReLU nonlinearity, the response-normalized + * activity :math:`b_{x,y}^{i}` is given by the expression: + * + * .. math:: + * b_{x,y}^{i} = \frac{a_{x,y}^{i}}{\Bigg({k + \frac{\alpha}{n} \sum_{j=max(0, i-\frac{n}{2})}^{min(N-1, i+\frac{n}{2})} (a_{x,y}^{j})^{2}}\Bigg)^{\beta}} + * + * where the sum runs over :math:`n` "adjacent" kernel maps at the same spatial position, and :math:`N` is the total + * number of kernels in the layer. + * + * + * + * Defined in src/operator/nn/lrn.cc:L164 + * }}} + * + * @param data Input data to LRN + * @param alpha The variance scaling parameter :math:`lpha` in the LRN expression. + * @param beta The power parameter :math:`eta` in the LRN expression. + * @param knorm The parameter :math:`k` in the LRN expression. + * @param nsize normalization window width in elements. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def LRN (data : org.apache.mxnet.NDArray, alpha : Option[Float] = None, beta : Option[Float] = None, knorm : Option[Float] = None, nsize : Int, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Layer normalization. + * + * Normalizes the channels of the input tensor by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis and then + * compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * Unlike BatchNorm and InstanceNorm, the *mean* and *var* are computed along the channel dimension. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_std``. Note that no gradient will be passed through these two outputs. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is -1, which sets the channel + * axis to be the last item in the input shape. + * + * + * + * Defined in src/operator/nn/layer_norm.cc:L156 + * }}} + * + * @param data Input data to layer normalization + * @param gamma gamma array + * @param beta beta array + * @param axis The axis to perform layer normalization. Usually, this should be be axis of the channel dimension. Negative values means indexing from right to left. + * @param eps An `epsilon` parameter to prevent division by 0. + * @param output_mean_var Output the mean and std calculated along the given axis. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def LayerNorm (data : org.apache.mxnet.NDArray, gamma : org.apache.mxnet.NDArray, beta : org.apache.mxnet.NDArray, axis : Option[Int] = None, eps : Option[Float] = None, output_mean_var : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies Leaky rectified linear unit activation element-wise to the input. + * + * Leaky ReLUs attempt to fix the "dying ReLU" problem by allowing a small `slope` + * when the input is negative and has a slope of one when input is positive. + * + * The following modified ReLU Activation functions are supported: + * + * - *elu*: Exponential Linear Unit. `y = x > 0 ? x : slope * (exp(x)-1)` + * - *selu*: Scaled Exponential Linear Unit. `y = lambda * (x > 0 ? x : alpha * (exp(x) - 1))` where + * *lambda = 1.0507009873554804934193349852946* and *alpha = 1.6732632423543772848170429916717*. + * - *leaky*: Leaky ReLU. `y = x > 0 ? x : slope * x` + * - *prelu*: Parametric ReLU. This is same as *leaky* except that `slope` is learnt during training. + * - *rrelu*: Randomized ReLU. same as *leaky* but the `slope` is uniformly and randomly chosen from + * *[lower_bound, upper_bound)* for training, while fixed to be + * *(lower_bound+upper_bound)/2* for inference. + * + * + * + * Defined in src/operator/leaky_relu.cc:L161 + * }}} + * + * @param data Input data to activation function. + * @param gamma Input data to activation function. + * @param act_type Activation function to be applied. + * @param slope Init slope for the activation. (For leaky and elu only) + * @param lower_bound Lower bound of random slope. (For rrelu only) + * @param upper_bound Upper bound of random slope. (For rrelu only) + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def LeakyReLU (data : org.apache.mxnet.NDArray, gamma : org.apache.mxnet.NDArray, act_type : Option[String] = None, slope : Option[Float] = None, lower_bound : Option[Float] = None, upper_bound : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes and optimizes for squared loss during backward propagation. + * Just outputs ``data`` during forward propagation. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the squared loss estimated over :math:`n` samples is defined as + * + * :math:`\text{SquaredLoss}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_2` + * + * .. note:: + * Use the LinearRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LinearRegressionOutput(default, default) = default + * - LinearRegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L92 + * }}} + * + * @param data Input data to the function. + * @param label Input label to the function. + * @param grad_scale Scale the gradient by a float factor + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def LinearRegressionOutput (data : org.apache.mxnet.NDArray, label : org.apache.mxnet.NDArray, grad_scale : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies a logistic function to the input. + * + * The logistic function, also known as the sigmoid function, is computed as + * :math:`\frac{1}{1+exp(-\textbf{x})}`. + * + * Commonly, the sigmoid is used to squash the real-valued output of a linear model + * :math:`wTx+b` into the [0,1] range so that it can be interpreted as a probability. + * It is suitable for binary classification or probability prediction tasks. + * + * .. note:: + * Use the LogisticRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LogisticRegressionOutput(default, default) = default + * - LogisticRegressionOutput(default, csr) = default + * + * The loss function used is the Binary Cross Entropy Loss: + * + * :math:`-{(y\log(p) + (1 - y)\log(1 - p))}` + * + * Where `y` is the ground truth probability of positive outcome for a given example, and `p` the probability predicted by the model. By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L152 + * }}} + * + * @param data Input data to the function. + * @param label Input label to the function. + * @param grad_scale Scale the gradient by a float factor + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def LogisticRegressionOutput (data : org.apache.mxnet.NDArray, label : org.apache.mxnet.NDArray, grad_scale : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes mean absolute error of the input. + * + * MAE is a risk metric corresponding to the expected value of the absolute error. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the mean absolute error (MAE) estimated over :math:`n` samples is defined as + * + * :math:`\text{MAE}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_1` + * + * .. note:: + * Use the MAERegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - MAERegressionOutput(default, default) = default + * - MAERegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L120 + * }}} + * + * @param data Input data to the function. + * @param label Input label to the function. + * @param grad_scale Scale the gradient by a float factor + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def MAERegressionOutput (data : org.apache.mxnet.NDArray, label : org.apache.mxnet.NDArray, grad_scale : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = MakeLoss(cross_entropy) + * + * We will need to use ``MakeLoss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * In addition, we can give a scale to the loss by setting ``grad_scale``, + * so that the gradient of the loss will be rescaled in the backpropagation. + * + * .. note:: This operator should be used as a Symbol instead of NDArray. + * + * + * + * Defined in src/operator/make_loss.cc:L71 + * }}} + * + * @param data Input array. + * @param grad_scale Gradient scale as a supplement to unary and binary operators + * @param valid_thresh clip each element in the array to 0 when it is less than ``valid_thresh``. This is used when ``normalization`` is set to ``'valid'``. + * @param normalization If this is set to null, the output gradient will not be normalized. If this is set to batch, the output gradient will be divided by the batch size. If this is set to valid, the output gradient will be divided by the number of valid input elements. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def MakeLoss (data : org.apache.mxnet.NDArray, grad_scale : Option[Float] = None, valid_thresh : Option[Float] = None, normalization : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @param data An n-dimensional input array. + * @param mode Padding type to use. "constant" pads with `constant_value` "edge" pads using the edge values of the input array "reflect" pads by reflecting values with respect to the edges. + * @param pad_width Widths of the padding regions applied to the edges of each axis. It is a tuple of integer padding widths for each axis of the format ``(before_1, after_1, ... , before_N, after_N)``. It should be of length ``2*N`` where ``N`` is the number of dimensions of the array.This is equivalent to pad_width in numpy.pad, but flattened. + * @param constant_value The value used for padding when `mode` is "constant". + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Pad (data : org.apache.mxnet.NDArray, mode : String, pad_width : org.apache.mxnet.Shape, constant_value : Option[Double] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Performs pooling on the input. + * + * The shapes for 1-D pooling are + * + * - **data** and **out**: *(batch_size, channel, width)* (NCW layout) or + * *(batch_size, width, channel)* (NWC layout), + * + * The shapes for 2-D pooling are + * + * - **data** and **out**: *(batch_size, channel, height, width)* (NCHW layout) or + * *(batch_size, height, width, channel)* (NHWC layout), + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * When ``global_pool`` is set to be true, then global pooling is performed. It will reset + * ``kernel=(height, width)`` and set the appropiate padding to 0. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * - **lp**: Lp pooling + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data and output will have shape *(batch_size, channel, depth, + * height, width)* (NCDHW layout) or *(batch_size, depth, height, width, channel)* (NDHWC layout). + * + * Notes on Lp pooling: + * + * Lp pooling was first introduced by this paper: https://arxiv.org/pdf/1204.3968.pdf. + * L-1 pooling is simply sum pooling, while L-inf pooling is simply max pooling. + * We can see that Lp pooling stands between those two, in practice the most common value for p is 2. + * + * For each window ``X``, the mathematical expression for Lp pooling is: + * + * :math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}` + * + * + * + * Defined in src/operator/nn/pooling.cc:L417 + * }}} + * + * @param data Input data to the pooling operator. + * @param kernel Pooling kernel size: (y, x) or (d, y, x) + * @param pool_type Pooling type to be applied. + * @param global_pool Ignore kernel size, do global pooling based on current input feature map. + * @param cudnn_off Turn off cudnn pooling and use MXNet pooling operator. + * @param pooling_convention Pooling convention to be applied. + * @param stride Stride: for pooling (y, x) or (d, y, x). Defaults to 1 for each dimension. + * @param pad Pad for pooling: (y, x) or (d, y, x). Defaults to no padding. + * @param p_value Value of p for Lp pooling, can be 1 or 2, required for Lp Pooling. + * @param count_include_pad Only used for AvgPool, specify whether to count padding elements for averagecalculation. For example, with a 5*5 kernel on a 3*3 corner of a image,the sum of the 9 valid elements will be divided by 25 if this is set to true,or it will be divided by 9 if this is set to false. Defaults to true. + * @param layout Set layout for input and output. Empty for + default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Pooling (data : org.apache.mxnet.NDArray, kernel : Option[org.apache.mxnet.Shape] = None, pool_type : Option[String] = None, global_pool : Option[Boolean] = None, cudnn_off : Option[Boolean] = None, pooling_convention : Option[String] = None, stride : Option[org.apache.mxnet.Shape] = None, pad : Option[org.apache.mxnet.Shape] = None, p_value : Option[Int] = None, count_include_pad : Option[Boolean] = None, layout : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * This operator is DEPRECATED. + * Perform pooling on the input. + * + * The shapes for 2-D pooling is + * + * - **data**: *(batch_size, channel, height, width)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*, with:: + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * But ``global_pool`` is set to be true, then do a global pooling, namely reset + * ``kernel=(height, width)``. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * + * 1-D pooling is special case of 2-D pooling with *weight=1* and + * *kernel[1]=1*. + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data will have shape *(batch_size, channel, depth, + * height, width)*. + * + * + * + * Defined in src/operator/pooling_v1.cc:L104 + * }}} + * + * @param data Input data to the pooling operator. + * @param kernel pooling kernel size: (y, x) or (d, y, x) + * @param pool_type Pooling type to be applied. + * @param global_pool Ignore kernel size, do global pooling based on current input feature map. + * @param pooling_convention Pooling convention to be applied. + * @param stride stride: for pooling (y, x) or (d, y, x) + * @param pad pad for pooling: (y, x) or (d, y, x) + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Pooling_v1 (data : org.apache.mxnet.NDArray, kernel : Option[org.apache.mxnet.Shape] = None, pool_type : Option[String] = None, global_pool : Option[Boolean] = None, pooling_convention : Option[String] = None, stride : Option[org.apache.mxnet.Shape] = None, pad : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies recurrent layers to input data. Currently, vanilla RNN, LSTM and GRU are + * implemented, with both multi-layer and bidirectional support. + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * **Vanilla RNN** + * + * Applies a single-gate recurrent layer to input X. Two kinds of activation function are supported: + * ReLU and Tanh. + * + * With ReLU activation function: + * + * .. math:: + * h_t = relu(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * With Tanh activtion function: + * + * .. math:: + * h_t = \tanh(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * Reference paper: Finding structure in time - Elman, 1988. + * https://crl.ucsd.edu/~elman/Papers/fsit.pdf + * + * **LSTM** + * + * Long Short-Term Memory - Hochreiter, 1997. http://www.bioinf.jku.at/publications/older/2604.pdf + * + * .. math:: + * \begin{array}{ll} + * i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\ + * f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\ + * g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\ + * o_t = \mathrm{sigmoid}(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ + * c_t = f_t * c_{(t-1)} + i_t * g_t \\ + * h_t = o_t * \tanh(c_t) + * \end{array} + * + * **GRU** + * + * Gated Recurrent Unit - Cho et al. 2014. http://arxiv.org/abs/1406.1078 + * + * The definition of GRU here is slightly different from paper but compatible with CUDNN. + * + * .. math:: + * \begin{array}{ll} + * r_t = \mathrm{sigmoid}(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ + * z_t = \mathrm{sigmoid}(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ + * n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ + * h_t = (1 - z_t) * n_t + z_t * h_{(t-1)} \\ + * \end{array} + * + * + * Defined in src/operator/rnn.cc:L354 + * }}} + * + * @param data Input data to RNN + * @param parameters Vector of all RNN trainable parameters concatenated + * @param state initial hidden state of the RNN + * @param state_cell initial cell state for LSTM networks (only for LSTM) + * @param sequence_length Vector of valid sequence lengths for each element in batch. (Only used if use_sequence_length kwarg is True) + * @param state_size size of the state for each layer + * @param num_layers number of stacked layers + * @param bidirectional whether to use bidirectional recurrent layers + * @param mode the type of RNN to compute + * @param p drop rate of the dropout on the outputs of each RNN layer, except the last layer. + * @param state_outputs Whether to have the states as symbol outputs. + * @param projection_size size of project size + * @param lstm_state_clip_min Minimum clip value of LSTM states. This option must be used together with lstm_state_clip_max. + * @param lstm_state_clip_max Maximum clip value of LSTM states. This option must be used together with lstm_state_clip_min. + * @param lstm_state_clip_nan Whether to stop NaN from propagating in state by clipping it to min/max. If clipping range is not specified, this option is ignored. + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def RNN (data : org.apache.mxnet.NDArray, parameters : org.apache.mxnet.NDArray, state : org.apache.mxnet.NDArray, state_cell : org.apache.mxnet.NDArray, sequence_length : org.apache.mxnet.NDArray, state_size : Int, num_layers : Int, bidirectional : Option[Boolean] = None, mode : String, p : Option[Float] = None, state_outputs : Option[Boolean] = None, projection_size : Option[Int] = None, lstm_state_clip_min : Option[Double] = None, lstm_state_clip_max : Option[Double] = None, lstm_state_clip_nan : Option[Boolean] = None, use_sequence_length : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Performs region of interest(ROI) pooling on the input array. + * + * ROI pooling is a variant of a max pooling layer, in which the output size is fixed and + * region of interest is a parameter. Its purpose is to perform max pooling on the inputs + * of non-uniform sizes to obtain fixed-size feature maps. ROI pooling is a neural-net + * layer mostly used in training a `Fast R-CNN` network for object detection. + * + * This operator takes a 4D feature map as an input array and region proposals as `rois`, + * then it pools over sub-regions of input and produces a fixed-sized output array + * regardless of the ROI size. + * + * To crop the feature map accordingly, you can resize the bounding box coordinates + * by changing the parameters `rois` and `spatial_scale`. + * + * The cropped feature maps are pooled by standard max pooling operation to a fixed size output + * indicated by a `pooled_size` parameter. batch_size will change to the number of region + * bounding boxes after `ROIPooling`. + * + * The size of each region of interest doesn't have to be perfectly divisible by + * the number of pooling sections(`pooled_size`). + * + * Example:: + * + * x = `[ [`[ [ 0., 1., 2., 3., 4., 5.], + * [ 6., 7., 8., 9., 10., 11.], + * [ 12., 13., 14., 15., 16., 17.], + * [ 18., 19., 20., 21., 22., 23.], + * [ 24., 25., 26., 27., 28., 29.], + * [ 30., 31., 32., 33., 34., 35.], + * [ 36., 37., 38., 39., 40., 41.], + * [ 42., 43., 44., 45., 46., 47.] ] ] ] + * + * // region of interest i.e. bounding box coordinates. + * y = `[ [0,0,0,4,4] ] + * + * // returns array of shape (2,2) according to the given roi with max pooling. + * ROIPooling(x, y, (2,2), 1.0) = `[ [`[ [ 14., 16.], + * [ 26., 28.] ] ] ] + * + * // region of interest is changed due to the change in `spacial_scale` parameter. + * ROIPooling(x, y, (2,2), 0.7) = `[ [`[ [ 7., 9.], + * [ 19., 21.] ] ] ] + * + * + * + * Defined in src/operator/roi_pooling.cc:L225 + * }}} + * + * @param data The input array to the pooling operator, a 4D Feature maps + * @param rois Bounding box coordinates, a 2D array of `[ [batch_index, x1, y1, x2, y2] ], where (x1, y1) and (x2, y2) are top left and bottom right corners of designated region of interest. `batch_index` indicates the index of corresponding image in the input array + * @param pooled_size ROI pooling output shape (h,w) + * @param spatial_scale Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal of total stride in convolutional layers + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def ROIPooling (data : org.apache.mxnet.NDArray, rois : org.apache.mxnet.NDArray, pooled_size : org.apache.mxnet.Shape, spatial_scale : Float, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @param data Input data to reshape. + * @param shape The target shape + * @param reverse If true then the special values are inferred from right to left + * @param target_shape (Deprecated! Use ``shape`` instead.) Target new shape. One and only one dim can be 0, in which case it will be inferred from the rest of dims + * @param keep_highest (Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged.If set to true, then the first dim in target_shape is ignored,and always fixed as input + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Reshape (data : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, reverse : Option[Boolean] = None, target_shape : Option[org.apache.mxnet.Shape] = None, keep_highest : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes support vector machine based transformation of the input. + * + * This tutorial demonstrates using SVM as output layer for classification instead of softmax: + * https://github.com/dmlc/mxnet/tree/master/example/svm_mnist. + * }}} + * + * @param data Input data for SVM transformation. + * @param label Class label for the input data. + * @param margin The loss function penalizes outputs that lie outside this margin. Default margin is 1. + * @param regularization_coefficient Regularization parameter for the SVM. This balances the tradeoff between coefficient size and error. + * @param use_linear Whether to use L1-SVM objective. L2-SVM objective is used by default. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def SVMOutput (data : org.apache.mxnet.NDArray, label : org.apache.mxnet.NDArray, margin : Option[Float] = None, regularization_coefficient : Option[Float] = None, use_linear : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Takes the last element of a sequence. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns a (n-1)-dimensional array + * of the form [batch_size, other_feature_dims]. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` should be + * an input array of positive ints of dimension [batch_size]. To use this parameter, + * set `use_sequence_length` to `True`, otherwise each example in the batch is assumed + * to have the max sequence length. + * + * .. note:: Alternatively, you can also use `take` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ], + * + * `[ [ 10., 11., 12.], + * [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] ] + * + * // returns last sequence when sequence_length parameter is not used + * SequenceLast(x) = `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,1,1], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,2,3], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 13., 14., 15.], + * [ 25., 26., 27.] ] + * + * + * + * Defined in src/operator/sequence_last.cc:L106 + * }}} + * + * @param data n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] where n>2 + * @param sequence_length vector of sequence lengths of the form [batch_size] + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + * @param axis The sequence axis. Only values of 0 and 1 are currently supported. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def SequenceLast (data : org.apache.mxnet.NDArray, sequence_length : org.apache.mxnet.NDArray, use_sequence_length : Option[Boolean] = None, axis : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Sets all elements outside the sequence to a constant value. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` + * should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length and + * this operator works as the `identity` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // works as identity operator when sequence_length parameter is not used + * SequenceMask(x) = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [1,1] means 1 of each batch will be kept + * // and other rows are masked with default mask value = 0 + * SequenceMask(x, sequence_length=[1,1], use_sequence_length=True) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] ] + * + * // sequence_length [2,3] means 2 of batch B1 and 3 of batch B2 will be kept + * // and other rows are masked with value = 1 + * SequenceMask(x, sequence_length=[2,3], use_sequence_length=True, value=1) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 1., 1.], + * [ 16., 17., 18.] ] ] + * + * + * + * Defined in src/operator/sequence_mask.cc:L186 + * }}} + * + * @param data n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] where n>2 + * @param sequence_length vector of sequence lengths of the form [batch_size] + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + * @param value The value to be used as a mask. + * @param axis The sequence axis. Only values of 0 and 1 are currently supported. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def SequenceMask (data : org.apache.mxnet.NDArray, sequence_length : org.apache.mxnet.NDArray, use_sequence_length : Option[Boolean] = None, value : Option[Float] = None, axis : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Reverses the elements of each sequence. + * + * This function takes an n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] + * and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. + * `sequence_length` should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // returns reverse sequence when sequence_length parameter is not used + * SequenceReverse(x) = `[ `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ] ] + * + * // sequence_length [2,2] means 2 rows of + * // both batch B1 and B2 will be reversed. + * SequenceReverse(x, sequence_length=[2,2], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [2,3] means 2 of batch B2 and 3 of batch B3 + * // will be reversed. + * SequenceReverse(x, sequence_length=[2,3], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 16., 17., 18.] ], + * + * `[ [ 1., 2., 3.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14, 15.], + * [ 4., 5., 6.] ] ] + * + * + * + * Defined in src/operator/sequence_reverse.cc:L122 + * }}} + * + * @param data n-dimensional input array of the form [max_sequence_length, batch_size, other dims] where n>2 + * @param sequence_length vector of sequence lengths of the form [batch_size] + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + * @param axis The sequence axis. Only 0 is currently supported. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def SequenceReverse (data : org.apache.mxnet.NDArray, sequence_length : org.apache.mxnet.NDArray, use_sequence_length : Option[Boolean] = None, axis : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @param data The input + * @param num_outputs Number of splits. Note that this should evenly divide the length of the `axis`. + * @param axis Axis along which to split. + * @param squeeze_axis If true, Removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to ``true`` only if ``input.shape[axis] == num_outputs``. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def SliceChannel (data : org.apache.mxnet.NDArray, num_outputs : Int, axis : Option[Int] = None, squeeze_axis : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @param data Input array. + * @param label Ground truth label. + * @param grad_scale Scales the gradient by a float factor. + * @param ignore_label The instances whose `labels` == `ignore_label` will be ignored during backward, if `use_ignore` is set to ``true``). + * @param multi_output If set to ``true``, the softmax function will be computed along axis ``1``. This is applied when the shape of input array differs from the shape of label array. + * @param use_ignore If set to ``true``, the `ignore_label` value will not contribute to the backward gradient. + * @param preserve_shape If set to ``true``, the softmax function will be computed along the last axis (``-1``). + * @param normalization Normalizes the gradient. + * @param out_grad Multiplies gradient with output gradient element-wise. + * @param smooth_alpha Constant for computing a label smoothed version of cross-entropyfor the backwards pass. This constant gets subtracted from theone-hot encoding of the gold label and distributed uniformly toall other labels. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def Softmax (data : org.apache.mxnet.NDArray, label : org.apache.mxnet.NDArray, grad_scale : Option[Float] = None, ignore_label : Option[Float] = None, multi_output : Option[Boolean] = None, use_ignore : Option[Boolean] = None, preserve_shape : Option[Boolean] = None, normalization : Option[String] = None, out_grad : Option[Boolean] = None, smooth_alpha : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies softmax activation to input. This is intended for internal layers. + * + * .. note:: + * + * This operator has been deprecated, please use `softmax`. + * + * If `mode` = ``instance``, this operator will compute a softmax for each instance in the batch. + * This is the default mode. + * + * If `mode` = ``channel``, this operator will compute a k-class softmax at each position + * of each instance, where `k` = ``num_channel``. This mode can only be used when the input array + * has at least 3 dimensions. + * This can be used for `fully convolutional network`, `image segmentation`, etc. + * + * Example:: + * + * >>> input_array = mx.nd.array(`[ [3., 0.5, -0.5, 2., 7.], + * >>> [2., -.4, 7., 3., 0.2] ]) + * >>> softmax_act = mx.nd.SoftmaxActivation(input_array) + * >>> print softmax_act.asnumpy() + * `[ [ 1.78322066e-02 1.46375655e-03 5.38485940e-04 6.56010211e-03 9.73605454e-01] + * [ 6.56221947e-03 5.95310994e-04 9.73919690e-01 1.78379621e-02 1.08472735e-03] ] + * + * + * + * Defined in src/operator/nn/softmax_activation.cc:L59 + * }}} + * + * @param data The input array. + * @param mode Specifies how to compute the softmax. If set to ``instance``, it computes softmax for each instance. If set to ``channel``, It computes cross channel softmax for each position of each instance. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def SoftmaxActivation (data : org.apache.mxnet.NDArray, mode : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @param data Input array. + * @param label Ground truth label. + * @param grad_scale Scales the gradient by a float factor. + * @param ignore_label The instances whose `labels` == `ignore_label` will be ignored during backward, if `use_ignore` is set to ``true``). + * @param multi_output If set to ``true``, the softmax function will be computed along axis ``1``. This is applied when the shape of input array differs from the shape of label array. + * @param use_ignore If set to ``true``, the `ignore_label` value will not contribute to the backward gradient. + * @param preserve_shape If set to ``true``, the softmax function will be computed along the last axis (``-1``). + * @param normalization Normalizes the gradient. + * @param out_grad Multiplies gradient with output gradient element-wise. + * @param smooth_alpha Constant for computing a label smoothed version of cross-entropyfor the backwards pass. This constant gets subtracted from theone-hot encoding of the gold label and distributed uniformly toall other labels. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def SoftmaxOutput (data : org.apache.mxnet.NDArray, label : org.apache.mxnet.NDArray, grad_scale : Option[Float] = None, ignore_label : Option[Float] = None, multi_output : Option[Boolean] = None, use_ignore : Option[Boolean] = None, preserve_shape : Option[Boolean] = None, normalization : Option[String] = None, out_grad : Option[Boolean] = None, smooth_alpha : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies a spatial transformer to input feature map. + * }}} + * + * @param data Input data to the SpatialTransformerOp. + * @param loc localisation net, the output dim should be 6 when transform_type is affine. You shold initialize the weight and bias with identity tranform. + * @param target_shape output shape(h, w) of spatial transformer: (y, x) + * @param transform_type transformation type + * @param sampler_type sampling type + * @param cudnn_off whether to turn cudnn off + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def SpatialTransformer (data : org.apache.mxnet.NDArray, loc : org.apache.mxnet.NDArray, target_shape : Option[org.apache.mxnet.Shape] = None, transform_type : String, sampler_type : String, cudnn_off : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @param data Input array. + * @param dim1 the first axis to be swapped. + * @param dim2 the second axis to be swapped. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def SwapAxis (data : org.apache.mxnet.NDArray, dim1 : Option[Int] = None, dim2 : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Upsamples the given input data. + * + * Two algorithms (``sample_type``) are available for upsampling: + * + * - Nearest Neighbor + * - Bilinear + * + * **Nearest Neighbor Upsampling** + * + * Input data is expected to be NCHW. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * UpSampling(x, scale=2, sample_type='nearest') = `[ [`[ [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] ] ] ] + * + * **Bilinear Upsampling** + * + * Uses `deconvolution` algorithm under the hood. You need provide both input data and the kernel. + * + * Input data is expected to be NCHW. + * + * `num_filter` is expected to be same as the number of channels. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * w = `[ [`[ [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] ] ] ] + * + * UpSampling(x, w, scale=2, sample_type='bilinear', num_filter=1) = `[ [`[ [1. 2. 2. 2. 2. 1.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [1. 2. 2. 2. 2. 1.] ] ] ] + * + * + * Defined in src/operator/nn/upsampling.cc:L173 + * }}} + * + * @param data Array of tensors to upsample. For bilinear upsampling, there should be 2 inputs - 1 data and 1 weight. + * @param scale Up sampling scale + * @param num_filter Input filter. Only used by bilinear sample_type.Since bilinear upsampling uses deconvolution, num_filters is set to the number of channels. + * @param sample_type upsampling method + * @param multi_input_mode How to handle multiple input. concat means concatenate upsampled images along the channel dimension. sum means add all images together, only available for nearest neighbor upsampling. + * @param num_args Number of inputs to be upsampled. For nearest neighbor upsampling, this can be 1-N; the size of output will be(scale*h_0,scale*w_0) and all other inputs will be upsampled to thesame size. For bilinear upsampling this must be 2; 1 input and 1 weight. + * @param workspace Tmp workspace for deconvolution (MB) + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def UpSampling (data : Array[org.apache.mxnet.NDArray], scale : Int, num_filter : Option[Int] = None, sample_type : String, multi_input_mode : Option[String] = None, num_args : Int, workspace : Option[Long] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise absolute value of the input. + * + * Example:: + * + * abs([-2, 0, 3]) = [2, 0, 3] + * + * The storage type of ``abs`` output depends upon the input storage type: + * + * - abs(default) = default + * - abs(row_sparse) = row_sparse + * - abs(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L721 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def abs (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for Adam optimizer. Adam is seen as a generalization + * of AdaGrad. + * + * Adam update consists of the following steps, where g represents gradient and m, v + * are 1st and 2nd order moment estimates (mean and variance). + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * W_t = W_{t-1} - \alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon } + * + * It updates the weights using:: + * + * m = beta1*m + (1-beta1)*grad + * v = beta2*v + (1-beta2)*(grad**2) + * w += - learning_rate * m / (sqrt(v) + epsilon) + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and the storage + * type of weight is the same as those of m and v, + * only the row slices whose indices appear in grad.indices are updated (for w, m and v):: + * + * for row in grad.indices: + * m[row] = beta1*m[row] + (1-beta1)*grad[row] + * v[row] = beta2*v[row] + (1-beta2)*(grad[row]**2) + * w[row] += - learning_rate * m[row] / (sqrt(v[row]) + epsilon) + * + * + * + * Defined in src/operator/optimizer_op.cc:L688 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mean Moving mean + * @param vari Moving variance + * @param lr Learning rate + * @param beta1 The decay rate for the 1st moment estimates. + * @param beta2 The decay rate for the 2nd moment estimates. + * @param epsilon A small constant for numerical stability. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse and all of w, m and v have the same stype + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def adam_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, mean : org.apache.mxnet.NDArray, vari : org.apache.mxnet.NDArray, lr : Float, beta1 : Option[Float] = None, beta2 : Option[Float] = None, epsilon : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @param args Positional input arguments + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def add_n (args : Array[org.apache.mxnet.NDArray], out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Check if all the float numbers in the array are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L101 + * }}} + * + * @param data Array + * @param init_output Initialize output to 1. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def all_finite (data : org.apache.mxnet.NDArray, init_output : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Cast function between low precision float/FP32 used by AMP. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * Defined in src/operator/tensor/amp_cast.cc:L37 + * }}} + * + * @param data The input. + * @param dtype Output data type. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def amp_cast (data : org.apache.mxnet.NDArray, dtype : String, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Cast function used by AMP, that casts its inputs to the common widest type. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * + * Defined in src/operator/tensor/amp_cast.cc:L71 + * }}} + * + * @param data Weights + * @param num_outputs Number of input/output pairs to be casted to the widest type. + * @param cast_narrow Whether to cast to the narrowest type + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def amp_multicast (data : Array[org.apache.mxnet.NDArray], num_outputs : Int, cast_narrow : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise inverse cosine of the input array. + * + * The input should be in range `[-1, 1]`. + * The output is in the closed interval :math:`[0, \pi]` + * + * .. math:: + * arccos([-1, -.707, 0, .707, 1]) = [\pi, 3\pi/4, \pi/2, \pi/4, 0] + * + * The storage type of ``arccos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L206 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def arccos (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic cosine of the input array, \ + * computed element-wise. + * + * The storage type of ``arccosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L474 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def arccosh (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise inverse sine of the input array. + * + * The input should be in the range `[-1, 1]`. + * The output is in the closed interval of [:math:`-\pi/2`, :math:`\pi/2`]. + * + * .. math:: + * arcsin([-1, -.707, 0, .707, 1]) = [-\pi/2, -\pi/4, 0, \pi/4, \pi/2] + * + * The storage type of ``arcsin`` output depends upon the input storage type: + * + * - arcsin(default) = default + * - arcsin(row_sparse) = row_sparse + * - arcsin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L187 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def arcsin (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic sine of the input array, \ + * computed element-wise. + * + * The storage type of ``arcsinh`` output depends upon the input storage type: + * + * - arcsinh(default) = default + * - arcsinh(row_sparse) = row_sparse + * - arcsinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L436 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def arcsinh (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise inverse tangent of the input array. + * + * The output is in the closed interval :math:`[-\pi/2, \pi/2]` + * + * .. math:: + * arctan([-1, 0, 1]) = [-\pi/4, 0, \pi/4] + * + * The storage type of ``arctan`` output depends upon the input storage type: + * + * - arctan(default) = default + * - arctan(row_sparse) = row_sparse + * - arctan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L227 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def arctan (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic tangent of the input array, \ + * computed element-wise. + * + * The storage type of ``arctanh`` output depends upon the input storage type: + * + * - arctanh(default) = default + * - arctanh(row_sparse) = row_sparse + * - arctanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L515 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def arctanh (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns indices of the maximum values along an axis. + * + * In the case of multiple occurrences of maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmax along axis 0 + * argmax(x, axis=0) = [ 1., 1., 1.] + * + * // argmax along axis 1 + * argmax(x, axis=1) = [ 2., 2.] + * + * // argmax along axis 1 keeping same dims as an input array + * argmax(x, axis=1, keepdims=True) = `[ [ 2.], + * [ 2.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L52 + * }}} + * + * @param data The input + * @param axis The axis along which to perform the reduction. Negative values means indexing from right to left. ``Requires axis to be set as int, because global reduction is not supported yet.`` + * @param keepdims If this is set to `True`, the reduced axis is left in the result as dimension with size one. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def argmax (data : org.apache.mxnet.NDArray, axis : Option[Int] = None, keepdims : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns argmax indices of each channel from the input array. + * + * The result will be an NDArray of shape (num_channel,). + * + * In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * argmax_channel(x) = [ 2., 2.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L97 + * }}} + * + * @param data The input array + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def argmax_channel (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns indices of the minimum values along an axis. + * + * In the case of multiple occurrences of minimum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmin along axis 0 + * argmin(x, axis=0) = [ 0., 0., 0.] + * + * // argmin along axis 1 + * argmin(x, axis=1) = [ 0., 0.] + * + * // argmin along axis 1 keeping same dims as an input array + * argmin(x, axis=1, keepdims=True) = `[ [ 0.], + * [ 0.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L77 + * }}} + * + * @param data The input + * @param axis The axis along which to perform the reduction. Negative values means indexing from right to left. ``Requires axis to be set as int, because global reduction is not supported yet.`` + * @param keepdims If this is set to `True`, the reduced axis is left in the result as dimension with size one. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def argmin (data : org.apache.mxnet.NDArray, axis : Option[Int] = None, keepdims : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the indices that would sort an input array along the given axis. + * + * This function performs sorting along the given axis and returns an array of indices having same shape + * as an input array that index data in sorted order. + * + * Examples:: + * + * x = `[ [ 0.3, 0.2, 0.4], + * [ 0.1, 0.3, 0.2] ] + * + * // sort along axis -1 + * argsort(x) = `[ [ 1., 0., 2.], + * [ 0., 2., 1.] ] + * + * // sort along axis 0 + * argsort(x, axis=0) = `[ [ 1., 0., 1.] + * [ 0., 1., 0.] ] + * + * // flatten and then sort + * argsort(x, axis=None) = [ 3., 1., 5., 0., 4., 2.] + * + * + * Defined in src/operator/tensor/ordering_op.cc:L183 + * }}} + * + * @param data The input array + * @param axis Axis along which to sort the input tensor. If not given, the flattened array is used. Default is -1. + * @param is_ascend Whether to sort in ascending or descending order. + * @param dtype DType of the output indices. It is only valid when ret_typ is "indices" or "both". An error will be raised if the selected data type cannot precisely represent the indices. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def argsort (data : org.apache.mxnet.NDArray, axis : Option[Int] = None, is_ascend : Option[Boolean] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Batchwise dot product. + * + * ``batch_dot`` is used to compute dot product of ``x`` and ``y`` when ``x`` and + * ``y`` are data in batch, namely N-D (N >= 3) arrays in shape of `(B0, ..., B_i, :, :)`. + * + * For example, given ``x`` with shape `(B_0, ..., B_i, N, M)` and ``y`` with shape + * `(B_0, ..., B_i, M, K)`, the result array will have shape `(B_0, ..., B_i, N, K)`, + * which is computed by:: + * + * batch_dot(x,y)[b_0, ..., b_i, :, :] = dot(x[b_0, ..., b_i, :, :], y[b_0, ..., b_i, :, :]) + * + * + * + * Defined in src/operator/tensor/dot.cc:L127 + * }}} + * + * @param lhs The first input + * @param rhs The second input + * @param transpose_a If true then transpose the first input before dot. + * @param transpose_b If true then transpose the second input before dot. + * @param forward_stype The desired storage type of the forward output given by user, if thecombination of input storage types and this hint does not matchany implemented ones, the dot operator will perform fallback operationand still produce an output of the desired storage type. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def batch_dot (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, transpose_a : Option[Boolean] = None, transpose_b : Option[Boolean] = None, forward_stype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Takes elements from a data batch. + * + * .. note:: + * `batch_take` is deprecated. Use `pick` instead. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // takes elements with specified indices + * batch_take(x, [0,1,0]) = [ 1. 4. 5.] + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L777 + * }}} + * + * @param a The input array + * @param indices The index array + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def batch_take (a : org.apache.mxnet.NDArray, indices : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_add (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @param data The input + * @param axis The axes to perform the broadcasting. + * @param size Target sizes of the broadcasting axes. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_axes (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, size : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @param data The input + * @param axis The axes to perform the broadcasting. + * @param size Target sizes of the broadcasting axes. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_axis (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, size : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise division of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 6., 6., 6.], + * [ 6., 6., 6.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_div(x, y) = `[ [ 3., 3., 3.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_div(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L187 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_div (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L46 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_equal (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of element-wise **greater than** (>) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L82 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_greater (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater_equal(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L100 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_greater_equal (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the hypotenuse of a right angled triangle, given its "legs" + * with broadcasting. + * + * It is equivalent to doing :math:`sqrt(x_1^2 + x_2^2)`. + * + * Example:: + * + * x = `[ [ 3., 3., 3.] ] + * + * y = `[ [ 4.], + * [ 4.] ] + * + * broadcast_hypot(x, y) = `[ [ 5., 5., 5.], + * [ 5., 5., 5.] ] + * + * z = `[ [ 0.], + * [ 4.] ] + * + * broadcast_hypot(x, z) = `[ [ 3., 3., 3.], + * [ 5., 5., 5.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L158 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_hypot (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than** (<) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser(x, y) = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L118 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_lesser (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than or equal to** (<=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L136 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_lesser_equal (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Broadcasts lhs to have the same shape as rhs. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_like(`[ [1,2,3] ], `[ [5,6,7],[7,8,9] ]) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * broadcast_like([9], [1,2,3,4,5], lhs_axes=(0,), rhs_axes=(-1,)) = [9,9,9,9,9] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L135 + * }}} + * + * @param lhs First input. + * @param rhs Second input. + * @param lhs_axes Axes to perform broadcast on in the first input array + * @param rhs_axes Axes to copy from the second input array + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_like (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, lhs_axes : Option[org.apache.mxnet.Shape] = None, rhs_axes : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of element-wise **logical and** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_logical_and(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L154 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_logical_and (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of element-wise **logical or** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_or(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L172 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_logical_or (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of element-wise **logical xor** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_xor(x, y) = `[ [ 0., 0., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L190 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_logical_xor (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise maximum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise maxima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L81 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_maximum (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise minimum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise minima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L117 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_minimum (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_minus (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise modulo of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 8., 8., 8.], + * [ 8., 8., 8.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_mod(x, y) = `[ [ 0., 0., 0.], + * [ 2., 2., 2.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L222 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_mod (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise product of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_mul(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * Supported sparse operations: + * + * broadcast_mul(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L146 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_mul (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of element-wise **not equal to** (!=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_not_equal(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L64 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_not_equal (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_plus (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns result of first array elements raised to powers from second array, element-wise with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_power(x, y) = `[ [ 2., 2., 2.], + * [ 4., 4., 4.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L45 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_power (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_sub (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Broadcasts the input array to a new shape. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_to(`[ [1,2,3] ], shape=(2,3)) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * The dimension which you do not want to change can also be kept as `0` which means copy the original value. + * So with `shape=(2,0)`, we will obtain the same result as in the above example. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L82 + * }}} + * + * @param data The input + * @param shape The shape of the desired array. We can set the dim to zero if it's same as the original. E.g `A = broadcast_to(B, shape=(10, 0, 0))` has the same meaning as `A = broadcast_axis(B, axis=0, size=10)`. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def broadcast_to (data : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @param data The input. + * @param dtype Output data type. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def cast (data : org.apache.mxnet.NDArray, dtype : String, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Casts tensor storage type to the new type. + * + * When an NDArray with default storage type is cast to csr or row_sparse storage, + * the result is compact, which means: + * + * - for csr, zero values will not be retained + * - for row_sparse, row slices of all zeros will not be retained + * + * The storage type of ``cast_storage`` output depends on stype parameter: + * + * - cast_storage(csr, 'default') = default + * - cast_storage(row_sparse, 'default') = default + * - cast_storage(default, 'csr') = csr + * - cast_storage(default, 'row_sparse') = row_sparse + * - cast_storage(csr, 'csr') = csr + * - cast_storage(row_sparse, 'row_sparse') = row_sparse + * + * Example:: + * + * dense = `[ [ 0., 1., 0.], + * [ 2., 0., 3.], + * [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * # cast to row_sparse storage type + * rsp = cast_storage(dense, 'row_sparse') + * rsp.indices = [0, 1] + * rsp.values = `[ [ 0., 1., 0.], + * [ 2., 0., 3.] ] + * + * # cast to csr storage type + * csr = cast_storage(dense, 'csr') + * csr.indices = [1, 0, 2] + * csr.values = [ 1., 2., 3.] + * csr.indptr = [0, 1, 3, 3, 3] + * + * + * + * Defined in src/operator/tensor/cast_storage.cc:L71 + * }}} + * + * @param data The input. + * @param stype Output storage type. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def cast_storage (data : org.apache.mxnet.NDArray, stype : String, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise cube-root value of the input. + * + * .. math:: + * cbrt(x) = \sqrt[3]{x} + * + * Example:: + * + * cbrt([1, 8, -125]) = [1, 2, -5] + * + * The storage type of ``cbrt`` output depends upon the input storage type: + * + * - cbrt(default) = default + * - cbrt(row_sparse) = row_sparse + * - cbrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L216 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def cbrt (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise ceiling of the input. + * + * The ceil of the scalar x is the smallest integer i, such that i >= x. + * + * Example:: + * + * ceil([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 2., 2., 3.] + * + * The storage type of ``ceil`` output depends upon the input storage type: + * + * - ceil(default) = default + * - ceil(row_sparse) = row_sparse + * - ceil(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L818 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def ceil (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @param data The input array + * @param index The index array + * @param axis int or None. The axis to picking the elements. Negative values means indexing from right to left. If is `None`, the elements in the index w.r.t the flattened input will be picked. + * @param keepdims If true, the axis where we pick the elements is left in the result as dimension with size one. + * @param mode Specify how out-of-bound indices behave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def choose_element_0index (data : org.apache.mxnet.NDArray, index : org.apache.mxnet.NDArray, axis : Option[Int] = None, keepdims : Option[Boolean] = None, mode : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Clips (limits) the values in an array. + * Given an interval, values outside the interval are clipped to the interval edges. + * Clipping ``x`` between `a_min` and `a_max` would be:: + * .. math:: + * clip(x, a_min, a_max) = \max(\min(x, a_max), a_min)) + * Example:: + * x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + * clip(x,1,8) = [ 1., 1., 2., 3., 4., 5., 6., 7., 8., 8.] + * The storage type of ``clip`` output depends on storage types of inputs and the a_min, a_max \ + * parameter values: + * - clip(default) = default + * - clip(row_sparse, a_min <= 0, a_max >= 0) = row_sparse + * - clip(csr, a_min <= 0, a_max >= 0) = csr + * - clip(row_sparse, a_min < 0, a_max < 0) = default + * - clip(row_sparse, a_min > 0, a_max > 0) = default + * - clip(csr, a_min < 0, a_max < 0) = csr + * - clip(csr, a_min > 0, a_max > 0) = csr + * + * + * Defined in src/operator/tensor/matrix_op.cc:L677 + * }}} + * + * @param data Input array. + * @param a_min Minimum value + * @param a_max Maximum value + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def clip (data : org.apache.mxnet.NDArray, a_min : Float, a_max : Float, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @param data List of arrays to concatenate + * @param num_args Number of inputs to be concated. + * @param dim the dimension to be concated. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def concat (data : Array[org.apache.mxnet.NDArray], num_args : Int, dim : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the element-wise cosine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * cos([0, \pi/4, \pi/2]) = [1, 0.707, 0] + * + * The storage type of ``cos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L90 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def cos (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the hyperbolic cosine of the input array, computed element-wise. + * + * .. math:: + * cosh(x) = 0.5\times(exp(x) + exp(-x)) + * + * The storage type of ``cosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L351 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def cosh (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @param data Source input + * @param begin starting indices for the slice operation, supports negative indices. + * @param end ending indices for the slice operation, supports negative indices. + * @param step step for the slice operation, supports negative values. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def crop (data : org.apache.mxnet.NDArray, begin : org.apache.mxnet.Shape, end : org.apache.mxnet.Shape, step : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @param data Input ndarray + * @param label Ground-truth labels for the loss. + * @param data_lengths Lengths of data for each of the samples. Only required when use_data_lengths is true. + * @param label_lengths Lengths of labels for each of the samples. Only required when use_label_lengths is true. + * @param use_data_lengths Whether the data lenghts are decided by `data_lengths`. If false, the lengths are equal to the max sequence length. + * @param use_label_lengths Whether the label lenghts are decided by `label_lengths`, or derived from `padding_mask`. If false, the lengths are derived from the first occurrence of the value of `padding_mask`. The value of `padding_mask` is ``0`` when first CTC label is reserved for blank, and ``-1`` when last label is reserved for blank. See `blank_label`. + * @param blank_label Set the label that is reserved for blank label.If "first", 0-th label is reserved, and label values for tokens in the vocabulary are between ``1`` and ``alphabet_size-1``, and the padding mask is ``-1``. If "last", last label value ``alphabet_size-1`` is reserved for blank label instead, and label values for tokens in the vocabulary are between ``0`` and ``alphabet_size-2``, and the padding mask is ``0``. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def ctc_loss (data : org.apache.mxnet.NDArray, label : org.apache.mxnet.NDArray, data_lengths : org.apache.mxnet.NDArray, label_lengths : org.apache.mxnet.NDArray, use_data_lengths : Option[Boolean] = None, use_label_lengths : Option[Boolean] = None, blank_label : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Return the cumulative sum of the elements along a given axis. + * + * Defined in src/operator/numpy/np_cumsum.cc:L70 + * }}} + * + * @param a Input ndarray + * @param axis Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. + * @param dtype Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def cumsum (a : org.apache.mxnet.NDArray, axis : Option[Int] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Converts each element of the input array from radians to degrees. + * + * .. math:: + * degrees([0, \pi/2, \pi, 3\pi/2, 2\pi]) = [0, 90, 180, 270, 360] + * + * The storage type of ``degrees`` output depends upon the input storage type: + * + * - degrees(default) = default + * - degrees(row_sparse) = row_sparse + * - degrees(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L274 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def degrees (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Rearranges(permutes) data from depth into blocks of spatial data. + * Similar to ONNX DepthToSpace operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace. + * The output is a new tensor where the values from depth dimension are moved in spatial blocks + * to height and width dimension. The reverse of this operation is ``space_to_depth``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, block\_size, block\_size, C / (block\_size ^ 2), H * block\_size, W * block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 4, 1, 5, 2]) \\ + * y = reshape(x \prime \prime, [N, C / (block\_size ^ 2), H * block\_size, W * block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C / (block\_size ^ 2), H * block\_size, W * block\_size]` + * Example:: + * x = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * depth_to_space(x, 2) = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L972 + * }}} + * + * @param data Input ndarray + * @param block_size Blocks of [block_size. block_size] are moved + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def depth_to_space (data : org.apache.mxnet.NDArray, block_size : Int, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Extracts a diagonal or constructs a diagonal array. + * + * ``diag``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero. + * - N-D arrays: extracts the diagonals of the sub-arrays with axes specified by ``axis1`` and ``axis2``. + * The output shape would be decided by removing the axes numbered ``axis1`` and ``axis2`` from the + * input shape and appending to the result a new axis with the size of the diagonals in question. + * + * For example, when the input shape is `(2, 3, 4, 5)`, ``axis1`` and ``axis2`` are 0 and 2 + * respectively and ``k`` is 0, the resulting shape would be `(3, 5, 2)`. + * + * Examples:: + * + * x = `[ [1, 2, 3], + * [4, 5, 6] ] + * + * diag(x) = [1, 5] + * + * diag(x, k=1) = [2, 6] + * + * diag(x, k=-1) = [4] + * + * x = [1, 2, 3] + * + * diag(x) = `[ [1, 0, 0], + * [0, 2, 0], + * [0, 0, 3] ] + * + * diag(x, k=1) = `[ [0, 1, 0], + * [0, 0, 2], + * [0, 0, 0] ] + * + * diag(x, k=-1) = `[ [0, 0, 0], + * [1, 0, 0], + * [0, 2, 0] ] + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * + * `[ [5, 6], + * [7, 8] ] ] + * + * diag(x) = `[ [1, 7], + * [2, 8] ] + * + * diag(x, k=1) = `[ [3], + * [4] ] + * + * diag(x, axis1=-2, axis2=-1) = `[ [1, 4], + * [5, 8] ] + * + * + * + * Defined in src/operator/tensor/diag_op.cc:L87 + * }}} + * + * @param data Input ndarray + * @param k Diagonal in question. The default is 0. Use k>0 for diagonals above the main diagonal, and k<0 for diagonals below the main diagonal. If input has shape (S0 S1) k must be between -S0 and S1 + * @param axis1 The first axis of the sub-arrays of interest. Ignored when the input is a 1-D array. + * @param axis2 The second axis of the sub-arrays of interest. Ignored when the input is a 1-D array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def diag (data : org.apache.mxnet.NDArray, k : Option[Int] = None, axis1 : Option[Int] = None, axis2 : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Dot product of two arrays. + * + * ``dot``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: inner product of vectors + * - 2-D arrays: matrix multiplication + * - N-D arrays: a sum product over the last axis of the first input and the first + * axis of the second input + * + * For example, given 3-D ``x`` with shape `(n,m,k)` and ``y`` with shape `(k,r,s)`, the + * result array will have shape `(n,m,r,s)`. It is computed by:: + * + * dot(x,y)[i,j,a,b] = sum(x[i,j,:]*y[:,a,b]) + * + * Example:: + * + * x = reshape([0,1,2,3,4,5,6,7], shape=(2,2,2)) + * y = reshape([7,6,5,4,3,2,1,0], shape=(2,2,2)) + * dot(x,y)[0,0,1,1] = 0 + * sum(x[0,0,:]*y[:,1,1]) = 0 + * + * The storage type of ``dot`` output depends on storage types of inputs, transpose option and + * forward_stype option for output storage type. Implemented sparse operations include: + * + * - dot(default, default, transpose_a=True/False, transpose_b=True/False) = default + * - dot(csr, default, transpose_a=True) = default + * - dot(csr, default, transpose_a=True) = row_sparse + * - dot(csr, default) = default + * - dot(csr, row_sparse) = default + * - dot(default, csr) = csr (CPU only) + * - dot(default, csr, forward_stype='default') = default + * - dot(default, csr, transpose_b=True, forward_stype='default') = default + * + * If the combination of input storage types and forward_stype does not match any of the + * above patterns, ``dot`` will fallback and generate output with default storage. + * + * .. Note:: + * + * If the storage type of the lhs is "csr", the storage type of gradient w.r.t rhs will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/dot.cc:L77 + * }}} + * + * @param lhs The first input + * @param rhs The second input + * @param transpose_a If true then transpose the first input before dot. + * @param transpose_b If true then transpose the second input before dot. + * @param forward_stype The desired storage type of the forward output given by user, if thecombination of input storage types and this hint does not matchany implemented ones, the dot operator will perform fallback operationand still produce an output of the desired storage type. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def dot (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, transpose_a : Option[Boolean] = None, transpose_b : Option[Boolean] = None, forward_stype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Adds arguments element-wise. + * + * The storage type of ``elemwise_add`` output depends on storage types of inputs + * + * - elemwise_add(row_sparse, row_sparse) = row_sparse + * - elemwise_add(csr, csr) = csr + * - elemwise_add(default, csr) = default + * - elemwise_add(csr, default) = default + * - elemwise_add(default, rsp) = default + * - elemwise_add(rsp, default) = default + * - otherwise, ``elemwise_add`` generates output with default storage + * }}} + * + * @param lhs first input + * @param rhs second input + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def elemwise_add (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Divides arguments element-wise. + * + * The storage type of ``elemwise_div`` output is always dense + * }}} + * + * @param lhs first input + * @param rhs second input + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def elemwise_div (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Multiplies arguments element-wise. + * + * The storage type of ``elemwise_mul`` output depends on storage types of inputs + * + * - elemwise_mul(default, default) = default + * - elemwise_mul(row_sparse, row_sparse) = row_sparse + * - elemwise_mul(default, row_sparse) = row_sparse + * - elemwise_mul(row_sparse, default) = row_sparse + * - elemwise_mul(csr, csr) = csr + * - otherwise, ``elemwise_mul`` generates output with default storage + * }}} + * + * @param lhs first input + * @param rhs second input + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def elemwise_mul (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Subtracts arguments element-wise. + * + * The storage type of ``elemwise_sub`` output depends on storage types of inputs + * + * - elemwise_sub(row_sparse, row_sparse) = row_sparse + * - elemwise_sub(csr, csr) = csr + * - elemwise_sub(default, csr) = default + * - elemwise_sub(csr, default) = default + * - elemwise_sub(default, rsp) = default + * - elemwise_sub(rsp, default) = default + * - otherwise, ``elemwise_sub`` generates output with default storage + * }}} + * + * @param lhs first input + * @param rhs second input + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def elemwise_sub (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise gauss error function of the input. + * + * Example:: + * + * erf([0, -1., 10.]) = [0., -0.8427, 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L886 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def erf (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise inverse gauss error function of the input. + * + * Example:: + * + * erfinv([0, 0.5., -1.]) = [0., 0.4769, -inf] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L907 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def erfinv (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise exponential value of the input. + * + * .. math:: + * exp(x) = e^x \approx 2.718^x + * + * Example:: + * + * exp([0, 1, 2]) = [1., 2.71828175, 7.38905621] + * + * The storage type of ``exp`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L63 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def exp (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Inserts a new axis of size 1 into the array shape + * For example, given ``x`` with shape ``(2,3,4)``, then ``expand_dims(x, axis=1)`` + * will return a new array with shape ``(2,1,3,4)``. + * + * + * Defined in src/operator/tensor/matrix_op.cc:L395 + * }}} + * + * @param data Source input + * @param axis Position where new axis is to be inserted. Suppose that the input `NDArray`'s dimension is `ndim`, the range of the inserted axis is `[-ndim, ndim]` + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def expand_dims (data : org.apache.mxnet.NDArray, axis : Int, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns ``exp(x) - 1`` computed element-wise on the input. + * + * This function provides greater precision than ``exp(x) - 1`` for small values of ``x``. + * + * The storage type of ``expm1`` output depends upon the input storage type: + * + * - expm1(default) = default + * - expm1(row_sparse) = row_sparse + * - expm1(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L224 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def expm1 (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Fill one element of each line(row for python, column for R/Julia) in lhs according to index indicated by rhs and values indicated by mhs. This function assume rhs uses 0-based index. + * }}} + * + * @param lhs Left operand to the function. + * @param mhs Middle operand to the function. + * @param rhs Right operand to the function. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def fill_element_0index (lhs : org.apache.mxnet.NDArray, mhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest \ + * integer towards zero of the input. + * + * Example:: + * + * fix([-2.1, -1.9, 1.9, 2.1]) = [-2., -1., 1., 2.] + * + * The storage type of ``fix`` output depends upon the input storage type: + * + * - fix(default) = default + * - fix(row_sparse) = row_sparse + * - fix(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L875 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def fix (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @param data Input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def flatten (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @param data Input data array + * @param axis The axis which to reverse elements. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def flip (data : org.apache.mxnet.NDArray, axis : org.apache.mxnet.Shape, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise floor of the input. + * + * The floor of the scalar x is the largest integer i, such that i <= x. + * + * Example:: + * + * floor([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-3., -2., 1., 1., 2.] + * + * The storage type of ``floor`` output depends upon the input storage type: + * + * - floor(default) = default + * - floor(row_sparse) = row_sparse + * - floor(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L837 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def floor (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * The FTML optimizer described in + * *FTML - Follow the Moving Leader in Deep Learning*, + * available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * d_t = \frac{ 1 - \beta_1^t }{ \eta_t } (\sqrt{ \frac{ v_t }{ 1 - \beta_2^t } } + \epsilon) + * \sigma_t = d_t - \beta_1 d_{t-1} + * z_t = \beta_1 z_{ t-1 } + (1 - \beta_1^t) g_t - \sigma_t W_{t-1} + * W_t = - \frac{ z_t }{ d_t } + * + * + * + * Defined in src/operator/optimizer_op.cc:L640 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param d Internal state ``d_t`` + * @param v Internal state ``v_t`` + * @param z Internal state ``z_t`` + * @param lr Learning rate. + * @param beta1 Generally close to 0.5. + * @param beta2 Generally close to 1. + * @param epsilon Epsilon to prevent div 0. + * @param t Number of update. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_grad Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def ftml_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, d : org.apache.mxnet.NDArray, v : org.apache.mxnet.NDArray, z : org.apache.mxnet.NDArray, lr : Float, beta1 : Option[Float] = None, beta2 : Option[Float] = None, epsilon : Option[Double] = None, t : Int, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_grad : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for Ftrl optimizer. + * Referenced from *Ad Click Prediction: a View from the Trenches*, available at + * http://dl.acm.org/citation.cfm?id=2488200. + * + * It updates the weights using:: + * + * rescaled_grad = clip(grad * rescale_grad, clip_gradient) + * z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate + * n += rescaled_grad**2 + * w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1) + * + * If w, z and n are all of ``row_sparse`` storage type, + * only the row slices whose indices appear in grad.indices are updated (for w, z and n):: + * + * for row in grad.indices: + * rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient) + * z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate + * n[row] += rescaled_grad[row]**2 + * w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1) + * + * + * + * Defined in src/operator/optimizer_op.cc:L876 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param z z + * @param n Square of grad + * @param lr Learning rate + * @param lamda1 The L1 regularization coefficient. + * @param beta Per-Coordinate Learning Rate beta. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def ftrl_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, z : org.apache.mxnet.NDArray, n : org.apache.mxnet.NDArray, lr : Float, lamda1 : Option[Float] = None, beta : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the gamma function (extension of the factorial function \ + * to the reals), computed element-wise on the input array. + * + * The storage type of ``gamma`` output is always dense + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def gamma (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise log of the absolute value of the gamma function \ + * of the input. + * + * The storage type of ``gammaln`` output is always dense + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def gammaln (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Gather elements or slices from `data` and store to a tensor whose + * shape is defined by `indices`. + * + * Given `data` with shape `(X_0, X_1, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, output shape will simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] = data[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] + * + * Examples:: + * + * data = `[ [0, 1], [2, 3] ] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * gather_nd(data, indices) = [2, 3, 0] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 0] ] + * gather_nd(data, indices) = `[ [3, 4], [5, 6] ] + * }}} + * + * @param data data + * @param indices indices + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def gather_nd (data : org.apache.mxnet.NDArray, indices : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes hard sigmoid of x element-wise. + * + * .. math:: + * y = max(0, min(1, alpha * x + beta)) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L161 + * }}} + * + * @param data The input array. + * @param alpha Slope of hard sigmoid + * @param beta Bias of hard sigmoid. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def hard_sigmoid (data : org.apache.mxnet.NDArray, alpha : Option[Float] = None, beta : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns a copy of the input. + * + * From:src/operator/tensor/elemwise_unary_op_basic.cc:246 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def identity (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the Khatri-Rao product of the input matrices. + * + * Given a collection of :math:`n` input matrices, + * + * .. math:: + * A_1 \in \mathbb{R}^{M_1 \times M}, \ldots, A_n \in \mathbb{R}^{M_n \times N}, + * + * the (column-wise) Khatri-Rao product is defined as the matrix, + * + * .. math:: + * X = A_1 \otimes \cdots \otimes A_n \in \mathbb{R}^{(M_1 \cdots M_n) \times N}, + * + * where the :math:`k` th column is equal to the column-wise outer product + * :math:`{A_1}_k \otimes \cdots \otimes {A_n}_k` where :math:`{A_i}_k` is the kth + * column of the ith matrix. + * + * Example:: + * + * >>> A = mx.nd.array(`[ [1, -1], + * >>> [2, -3] ]) + * >>> B = mx.nd.array(`[ [1, 4], + * >>> [2, 5], + * >>> [3, 6] ]) + * >>> C = mx.nd.khatri_rao(A, B) + * >>> print(C.asnumpy()) + * `[ [ 1. -4.] + * [ 2. -5.] + * [ 3. -6.] + * [ 2. -12.] + * [ 4. -15.] + * [ 6. -18.] ] + * + * + * + * Defined in src/operator/contrib/krprod.cc:L108 + * }}} + * + * @param args Positional input matrices + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def khatri_rao (args : Array[org.apache.mxnet.NDArray], out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Phase I of lamb update it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad = grad * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L953 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mean Moving mean + * @param vari Moving variance + * @param beta1 The decay rate for the 1st moment estimates. + * @param beta2 The decay rate for the 2nd moment estimates. + * @param epsilon A small constant for numerical stability. + * @param t Index update count. + * @param bias_correction Whether to use bias correction. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def lamb_update_phase1 (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, mean : org.apache.mxnet.NDArray, vari : org.apache.mxnet.NDArray, beta1 : Option[Float] = None, beta2 : Option[Float] = None, epsilon : Option[Float] = None, t : Int, bias_correction : Option[Boolean] = None, wd : Float, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Phase II of lamb update it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight = weight - lr * g + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L992 + * }}} + * + * @param weight Weight + * @param g Output of lamb_update_phase 1 + * @param r1 r1 + * @param r2 r2 + * @param lr Learning rate + * @param lower_bound Lower limit of norm of weight. If lower_bound <= 0, Lower limit is not set + * @param upper_bound Upper limit of norm of weight. If upper_bound <= 0, Upper limit is not set + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def lamb_update_phase2 (weight : org.apache.mxnet.NDArray, g : org.apache.mxnet.NDArray, r1 : org.apache.mxnet.NDArray, r2 : org.apache.mxnet.NDArray, lr : Float, lower_bound : Option[Float] = None, upper_bound : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Compute the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *det(A)* + * + * If *n>2*, *det* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: There is no gradient backwarded when A is non-invertible (which is + * equivalent to det(A) = 0) because zero is rarely hit upon in float + * point computation and the Jacobi's formula on determinant gradient + * is not computationally efficient when A is non-invertible. + * + * Examples:: + * + * Single matrix determinant + * A = `[ [1., 4.], [2., 3.] ] + * det(A) = [-5.] + * + * Batch matrix determinant + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [2., 3.], [1., 4.] ] ] + * det(A) = [-5., 5.] + * + * + * Defined in src/operator/tensor/la_op.cc:L973 + * }}} + * + * @param A Tensor of square matrix + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_det (A : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Extracts the diagonal entries of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix which diagonal elements get extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted diagonals are returned as an *n-1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix diagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extractdiag(A) = [1.0, 4.0] + * + * extractdiag(A, 1) = [2.0] + * + * Batch matrix diagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extractdiag(A) = `[ [1.0, 4.0], + * [5.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L495 + * }}} + * + * @param A Tensor of square matrices + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_extractdiag (A : org.apache.mxnet.NDArray, offset : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Extracts a triangular sub-matrix from a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix from which a triangular sub-matrix is extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted triangular sub-matrices are returned as an *n-1*-dimensional tensor. + * + * The *offset* and *lower* parameters determine the triangle to be extracted: + * + * - When *offset = 0* either the lower or upper triangle with respect to the main diagonal is extracted depending on the value of parameter *lower*. + * - When *offset = k > 0* the upper triangle with respect to the k-th diagonal above the main diagonal is extracted. + * - When *offset = k < 0* the lower triangle with respect to the k-th diagonal below the main diagonal is extracted. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extracttrian(A) = [1.0, 3.0, 4.0] + * extracttrian(A, lower=False) = [1.0, 2.0, 4.0] + * extracttrian(A, 1) = [2.0] + * extracttrian(A, -1) = [3.0] + * + * Batch triagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extracttrian(A) = `[ [1.0, 3.0, 4.0], + * [5.0, 7.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L605 + * }}} + * + * @param A Tensor of square matrices + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @param lower Refer to the lower triangular matrix if lower=true, refer to the upper otherwise. Only relevant when offset=0 + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_extracttrian (A : org.apache.mxnet.NDArray, offset : Option[Int] = None, lower : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * LQ factorization for general matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, we compute the LQ factorization (LAPACK *gelqf*, followed by *orglq*). *A* + * must have shape *(x, y)* with *x <= y*, and must have full rank *=x*. The LQ + * factorization consists of *L* with shape *(x, x)* and *Q* with shape *(x, y)*, so + * that: + * + * *A* = *L* \* *Q* + * + * Here, *L* is lower triangular (upper triangle equal to zero) with nonzero diagonal, + * and *Q* is row-orthonormal, meaning that + * + * *Q* \* *Q*\ :sup:`T` + * + * is equal to the identity matrix of shape *(x, x)*. + * + * If *n>2*, *gelqf* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single LQ factorization + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * Q, L = gelqf(A) + * Q = `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ] + * L = `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ] + * + * Batch LQ factorization + * A = `[ `[ [1., 2., 3.], [4., 5., 6.] ], + * `[ [7., 8., 9.], [10., 11., 12.] ] ] + * Q, L = gelqf(A) + * Q = `[ `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ], + * `[ [-0.50257071, -0.57436653, -0.64616234], + * [0.7620735, 0.05862104, -0.64483142] ] ] + * L = `[ `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ], + * `[ [-13.92838828, 0.], + * [-19.09768702, 0.52758934] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L798 + * }}} + * + * @param A Tensor of input matrices to be factorized + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_gelqf (A : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Performs general matrix multiplication and accumulation. + * Input are tensors *A*, *B*, *C*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + *beta* \* *C* + * + * Here, *alpha* and *beta* are scalar parameters, and *op()* is either the identity or + * matrix transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B*, *C* be 5 dimensional tensors. Then gemm(*A*, *B*, *C*, axis=1) is equivalent + * to the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = swapaxes(C, dim1=1, dim2=3) + * C = gemm(A1, B1, C) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply-add + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * C = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0, beta=10.0) + * = `[ [14.0, 14.0, 14.0], [14.0, 14.0, 14.0] ] + * + * Batch matrix multiply-add + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * C = `[ `[ [10.0] ], `[ [0.01] ] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0 , beta=10.0) + * = `[ `[ [104.0] ], `[ [0.14] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L89 + * }}} + * + * @param A Tensor of input matrices + * @param B Tensor of input matrices + * @param C Tensor of input matrices + * @param transpose_a Multiply with transposed of first input (A). + * @param transpose_b Multiply with transposed of second input (B). + * @param alpha Scalar factor multiplied with A*B. + * @param beta Scalar factor multiplied with C. + * @param axis Axis corresponding to the matrix rows. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_gemm (A : org.apache.mxnet.NDArray, B : org.apache.mxnet.NDArray, C : org.apache.mxnet.NDArray, transpose_a : Option[Boolean] = None, transpose_b : Option[Boolean] = None, alpha : Option[Double] = None, beta : Option[Double] = None, axis : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Performs general matrix multiplication. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + * + * Here *alpha* is a scalar parameter and *op()* is either the identity or the matrix + * transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B* be 5 dimensional tensors. Then gemm(*A*, *B*, axis=1) is equivalent to + * the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = gemm2(A1, B1) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ [4.0, 4.0, 4.0], [4.0, 4.0, 4.0] ] + * + * Batch matrix multiply + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ `[ [4.0] ], `[ [0.04 ] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L163 + * }}} + * + * @param A Tensor of input matrices + * @param B Tensor of input matrices + * @param transpose_a Multiply with transposed of first input (A). + * @param transpose_b Multiply with transposed of second input (B). + * @param alpha Scalar factor multiplied with A*B. + * @param axis Axis corresponding to the matrix row indices. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_gemm2 (A : org.apache.mxnet.NDArray, B : org.apache.mxnet.NDArray, transpose_a : Option[Boolean] = None, transpose_b : Option[Boolean] = None, alpha : Option[Double] = None, axis : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Compute the inverse of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *A*\ :sup:`-1` + * + * If *n>2*, *inverse* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [1., 4.], [2., 3.] ] + * inverse(A) = `[ [-0.6, 0.8], [0.4, -0.2] ] + * + * Batch matrix inverse + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [1., 3.], [2., 4.] ] ] + * inverse(A) = `[ `[ [-0.6, 0.8], [0.4, -0.2] ], + * `[ [-2., 1.5], [1., -0.5] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L919 + * }}} + * + * @param A Tensor of square matrix + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_inverse (A : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Constructs a square matrix with the input as diagonal. + * Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the diagonal entries of a single square matrix. This matrix will be returned as a 2-dimensional tensor. + * If *n>1*, then *A* represents a batch of diagonals of square matrices. The batch of diagonal matrices will be returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single diagonal matrix construction + * A = [1.0, 2.0] + * + * makediag(A) = `[ [1.0, 0.0], + * [0.0, 2.0] ] + * + * makediag(A, 1) = `[ [0.0, 1.0, 0.0], + * [0.0, 0.0, 2.0], + * [0.0, 0.0, 0.0] ] + * + * Batch diagonal matrix construction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * makediag(A) = `[ `[ [1.0, 0.0], + * [0.0, 2.0] ], + * `[ [3.0, 0.0], + * [0.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L547 + * }}} + * + * @param A Tensor of diagonal entries + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_makediag (A : org.apache.mxnet.NDArray, offset : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Constructs a square matrix with the input representing a specific triangular sub-matrix. + * This is basically the inverse of *linalg.extracttrian*. Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the entries of a triangular matrix which is lower triangular if *offset<0* or *offset=0*, *lower=true*. The resulting matrix is derived by first constructing the square + * matrix with the entries outside the triangle set to zero and then adding *offset*-times an additional + * diagonal with zero entries to the square matrix. + * + * If *n>1*, then *A* represents a batch of triangular sub-matrices. The batch of corresponding square matrices is returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix construction + * A = [1.0, 2.0, 3.0] + * + * maketrian(A) = `[ [1.0, 0.0], + * [2.0, 3.0] ] + * + * maketrian(A, lower=false) = `[ [1.0, 2.0], + * [0.0, 3.0] ] + * + * maketrian(A, offset=1) = `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ] + * maketrian(A, offset=-1) = `[ [0.0, 0.0, 0.0], + * [1.0, 0.0, 0.0], + * [2.0, 3.0, 0.0] ] + * + * Batch matrix construction + * A = `[ [1.0, 2.0, 3.0], + * [4.0, 5.0, 6.0] ] + * + * maketrian(A) = `[ `[ [1.0, 0.0], + * [2.0, 3.0] ], + * `[ [4.0, 0.0], + * [5.0, 6.0] ] ] + * + * maketrian(A, offset=1) = `[ `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ], + * `[ [0.0, 4.0, 5.0], + * [0.0, 0.0, 6.0], + * [0.0, 0.0, 0.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L673 + * }}} + * + * @param A Tensor of triangular matrices stored as vectors + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @param lower Refer to the lower triangular matrix if lower=true, refer to the upper otherwise. Only relevant when offset=0 + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_maketrian (A : org.apache.mxnet.NDArray, offset : Option[Int] = None, lower : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Performs Cholesky factorization of a symmetric positive-definite matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the Cholesky factor *B* of the symmetric, positive definite matrix *A* is + * computed. *B* is triangular (entries of upper or lower triangle are all zero), has + * positive diagonal entries, and: + * + * *A* = *B* \* *B*\ :sup:`T` if *lower* = *true* + * *A* = *B*\ :sup:`T` \* *B* if *lower* = *false* + * + * If *n>2*, *potrf* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix factorization + * A = `[ [4.0, 1.0], [1.0, 4.25] ] + * potrf(A) = `[ [2.0, 0], [0.5, 2.0] ] + * + * Batch matrix factorization + * A = `[ `[ [4.0, 1.0], [1.0, 4.25] ], `[ [16.0, 4.0], [4.0, 17.0] ] ] + * potrf(A) = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L214 + * }}} + * + * @param A Tensor of input matrices to be decomposed + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_potrf (A : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Performs matrix inversion from a Cholesky factorization. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a triangular matrix (entries of upper or lower triangle are all zero) + * with positive diagonal. We compute: + * + * *out* = *A*\ :sup:`-T` \* *A*\ :sup:`-1` if *lower* = *true* + * *out* = *A*\ :sup:`-1` \* *A*\ :sup:`-T` if *lower* = *false* + * + * In other words, if *A* is the Cholesky factor of a symmetric positive definite matrix + * *B* (obtained by *potrf*), then + * + * *out* = *B*\ :sup:`-1` + * + * If *n>2*, *potri* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * .. note:: Use this operator only if you are certain you need the inverse of *B*, and + * cannot use the Cholesky factor *A* (*potrf*), together with backsubstitution + * (*trsm*). The latter is numerically much safer, and also cheaper. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [2.0, 0], [0.5, 2.0] ] + * potri(A) = `[ [0.26563, -0.0625], [-0.0625, 0.25] ] + * + * Batch matrix inverse + * A = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * potri(A) = `[ `[ [0.26563, -0.0625], [-0.0625, 0.25] ], + * `[ [0.06641, -0.01562], [-0.01562, 0,0625] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L275 + * }}} + * + * @param A Tensor of lower triangular matrices + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_potri (A : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Compute the sign and log of the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *sign* = *sign(det(A))* + * *logabsdet* = *log(abs(det(A)))* + * + * If *n>2*, *slogdet* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: The gradient is not properly defined on sign, so the gradient of + * it is not backwarded. + * .. note:: No gradient is backwarded when A is non-invertible. Please see + * the docs of operator det for detail. + * + * Examples:: + * + * Single matrix signed log determinant + * A = `[ [2., 3.], [1., 4.] ] + * sign, logabsdet = slogdet(A) + * sign = [1.] + * logabsdet = [1.609438] + * + * Batch matrix signed log determinant + * A = `[ `[ [2., 3.], [1., 4.] ], + * `[ [1., 2.], [2., 4.] ], + * `[ [1., 2.], [4., 3.] ] ] + * sign, logabsdet = slogdet(A) + * sign = [1., 0., -1.] + * logabsdet = [1.609438, -inf, 1.609438] + * + * + * Defined in src/operator/tensor/la_op.cc:L1031 + * }}} + * + * @param A Tensor of square matrix + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_slogdet (A : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the sum of the logarithms of the diagonal elements of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* must be square with positive diagonal entries. We sum the natural + * logarithms of the diagonal elements, the result has shape (1,). + * + * If *n>2*, *sumlogdiag* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix reduction + * A = `[ [1.0, 1.0], [1.0, 7.0] ] + * sumlogdiag(A) = [1.9459] + * + * Batch matrix reduction + * A = `[ `[ [1.0, 1.0], [1.0, 7.0] ], `[ [3.0, 0], [0, 17.0] ] ] + * sumlogdiag(A) = [1.9459, 3.9318] + * + * + * Defined in src/operator/tensor/la_op.cc:L445 + * }}} + * + * @param A Tensor of square matrices + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_sumlogdiag (A : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Multiplication of matrix with its transpose. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the operator performs the BLAS3 function *syrk*: + * + * *out* = *alpha* \* *A* \* *A*\ :sup:`T` + * + * if *transpose=False*, or + * + * *out* = *alpha* \* *A*\ :sup:`T` \ \* *A* + * + * if *transpose=True*. + * + * If *n>2*, *syrk* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * syrk(A, alpha=1., transpose=False) + * = `[ [14., 32.], + * [32., 77.] ] + * syrk(A, alpha=1., transpose=True) + * = `[ [17., 22., 27.], + * [22., 29., 36.], + * [27., 36., 45.] ] + * + * Batch matrix multiply + * A = `[ `[ [1., 1.] ], `[ [0.1, 0.1] ] ] + * syrk(A, alpha=2., transpose=False) = `[ `[ [4.] ], `[ [0.04] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L730 + * }}} + * + * @param A Tensor of input matrices + * @param transpose Use transpose of input matrix. + * @param alpha Scalar factor to be applied to the result. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_syrk (A : org.apache.mxnet.NDArray, transpose : Option[Boolean] = None, alpha : Option[Double] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Performs multiplication with a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trmm*: + * + * *out* = *alpha* \* *op*\ (*A*) \* *B* + * + * if *rightside=False*, or + * + * *out* = *alpha* \* *B* \* *op*\ (*A*) + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trmm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triangular matrix multiply + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * trmm(A, B, alpha=2.0) = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * + * Batch triangular matrix multiply + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], `[ [0.5, 0.5, 0.5], [0.5, 0.5, 0.5] ] ] + * trmm(A, B, alpha=2.0) = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [1.0, 1.0, 1.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L333 + * }}} + * + * @param A Tensor of lower triangular matrices + * @param B Tensor of matrices + * @param transpose Use transposed of the triangular matrix + * @param rightside Multiply triangular matrix from the right to non-triangular one. + * @param lower True if the triangular matrix is lower triangular, false if it is upper triangular. + * @param alpha Scalar factor to be applied to the result. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_trmm (A : org.apache.mxnet.NDArray, B : org.apache.mxnet.NDArray, transpose : Option[Boolean] = None, rightside : Option[Boolean] = None, lower : Option[Boolean] = None, alpha : Option[Double] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Solves matrix equation involving a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trsm*, solving for *out* in: + * + * *op*\ (*A*) \* *out* = *alpha* \* *B* + * + * if *rightside=False*, or + * + * *out* \* *op*\ (*A*) = *alpha* \* *B* + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trsm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix solve + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * trsm(A, B, alpha=0.5) = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * + * Batch matrix solve + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [4.0, 4.0, 4.0], [8.0, 8.0, 8.0] ] ] + * trsm(A, B, alpha=0.5) = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], + * `[ [2.0, 2.0, 2.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L396 + * }}} + * + * @param A Tensor of lower triangular matrices + * @param B Tensor of matrices + * @param transpose Use transposed of the triangular matrix + * @param rightside Multiply triangular matrix from the right to non-triangular one. + * @param lower True if the triangular matrix is lower triangular, false if it is upper triangular. + * @param alpha Scalar factor to be applied to the result. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def linalg_trsm (A : org.apache.mxnet.NDArray, B : org.apache.mxnet.NDArray, transpose : Option[Boolean] = None, rightside : Option[Boolean] = None, lower : Option[Boolean] = None, alpha : Option[Double] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise Natural logarithmic value of the input. + * + * The natural logarithm is logarithm in base *e*, so that ``log(exp(x)) = x`` + * + * The storage type of ``log`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L76 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def log (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise Base-10 logarithmic value of the input. + * + * ``10**log10(x) = x`` + * + * The storage type of ``log10`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L93 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def log10 (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise ``log(1 + x)`` value of the input. + * + * This function is more accurate than ``log(1 + x)`` for small ``x`` so that + * :math:`1+x\approx 1` + * + * The storage type of ``log1p`` output depends upon the input storage type: + * + * - log1p(default) = default + * - log1p(row_sparse) = row_sparse + * - log1p(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L206 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def log1p (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise Base-2 logarithmic value of the input. + * + * ``2**log2(x) = x`` + * + * The storage type of ``log2`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L105 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def log2 (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the log softmax of the input. + * This is equivalent to computing softmax followed by log. + * + * Examples:: + * + * >>> x = mx.nd.array([1, 2, .1]) + * >>> mx.nd.log_softmax(x).asnumpy() + * array([-1.41702998, -0.41702995, -2.31702995], dtype=float32) + * + * >>> x = mx.nd.array( `[ [1, 2, .1],[.1, 2, 1] ] ) + * >>> mx.nd.log_softmax(x, axis=0).asnumpy() + * array(`[ [-0.34115392, -0.69314718, -1.24115396], + * [-1.24115396, -0.69314718, -0.34115392] ], dtype=float32) + * }}} + * + * @param data The input array. + * @param axis The axis along which to compute softmax. + * @param temperature Temperature parameter in softmax + * @param dtype DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). + * @param use_length Whether to use the length input as a mask over the data input. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def log_softmax (data : org.apache.mxnet.NDArray, axis : Option[Int] = None, temperature : Option[Double] = None, dtype : Option[String] = None, use_length : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the result of logical NOT (!) function + * + * Example: + * logical_not([-2., 0., 1.]) = [0., 1., 0.] + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def logical_not (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = make_loss(cross_entropy) + * + * We will need to use ``make_loss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * The storage type of ``make_loss`` output depends upon the input storage type: + * + * - make_loss(default) = default + * - make_loss(row_sparse) = row_sparse + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L360 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def make_loss (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def max (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def max_axis (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the mean of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L84 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def mean (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def min (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def min_axis (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * + * Calculate the mean and variance of `data`. + * + * The mean and variance are calculated by aggregating the contents of data across axes. + * If x is 1-D and axes = [0] this is just the mean and variance of a vector. + * + * Example: + * + * x = `[ [1, 2, 3], [4, 5, 6] ] + * mean, var = moments(data=x, axes=[0]) + * mean = [2.5, 3.5, 4.5] + * var = [2.25, 2.25, 2.25] + * mean, var = moments(data=x, axes=[1]) + * mean = [2.0, 5.0] + * var = [0.66666667, 0.66666667] + * mean, var = moments(data=x, axis=[0, 1]) + * mean = [3.5] + * var = [2.9166667] + * + * + * + * Defined in src/operator/nn/moments.cc:L54 + * }}} + * + * @param data Input ndarray + * @param axes Array of ints. Axes along which to compute mean and variance. + * @param keepdims produce moments with the same dimensionality as the input. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def moments (data : org.apache.mxnet.NDArray, axes : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Mixed Precision version of Phase I of lamb update + * it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad32 = grad(float16) * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight32; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight32; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1033 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mean Moving mean + * @param vari Moving variance + * @param weight32 Weight32 + * @param beta1 The decay rate for the 1st moment estimates. + * @param beta2 The decay rate for the 2nd moment estimates. + * @param epsilon A small constant for numerical stability. + * @param t Index update count. + * @param bias_correction Whether to use bias correction. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def mp_lamb_update_phase1 (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, mean : org.apache.mxnet.NDArray, vari : org.apache.mxnet.NDArray, weight32 : org.apache.mxnet.NDArray, beta1 : Option[Float] = None, beta2 : Option[Float] = None, epsilon : Option[Float] = None, t : Int, bias_correction : Option[Boolean] = None, wd : Float, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Mixed Precision version Phase II of lamb update + * it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight32 = weight32 - lr * g + * weight(float16) = weight32 + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1075 + * }}} + * + * @param weight Weight + * @param g Output of mp_lamb_update_phase 1 + * @param r1 r1 + * @param r2 r2 + * @param weight32 Weight32 + * @param lr Learning rate + * @param lower_bound Lower limit of norm of weight. If lower_bound <= 0, Lower limit is not set + * @param upper_bound Upper limit of norm of weight. If upper_bound <= 0, Upper limit is not set + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def mp_lamb_update_phase2 (weight : org.apache.mxnet.NDArray, g : org.apache.mxnet.NDArray, r1 : org.apache.mxnet.NDArray, r2 : org.apache.mxnet.NDArray, weight32 : org.apache.mxnet.NDArray, lr : Float, lower_bound : Option[Float] = None, upper_bound : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for multi-precision Nesterov Accelerated Gradient( NAG) optimizer. + * + * + * Defined in src/operator/optimizer_op.cc:L745 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param weight32 Weight32 + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def mp_nag_mom_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, mom : org.apache.mxnet.NDArray, weight32 : org.apache.mxnet.NDArray, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param weight32 Weight32 + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse and both weight and momentum have the same stype + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def mp_sgd_mom_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, mom : org.apache.mxnet.NDArray, weight32 : org.apache.mxnet.NDArray, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @param weight Weight + * @param grad gradient + * @param weight32 Weight32 + * @param lr Learning rate + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def mp_sgd_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, weight32 : org.apache.mxnet.NDArray, lr : Float, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Check if all the float numbers in all the arrays are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L133 + * }}} + * + * @param data Arrays + * @param num_arrays Number of arrays. + * @param init_output Initialize output to 1. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def multi_all_finite (data : Array[org.apache.mxnet.NDArray], num_arrays : Option[Int] = None, init_output : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Compute the LARS coefficients of multiple weights and grads from their sums of square" + * + * + * Defined in src/operator/contrib/multi_lars.cc:L37 + * }}} + * + * @param lrs Learning rates to scale by LARS coefficient + * @param weights_sum_sq sum of square of weights arrays + * @param grads_sum_sq sum of square of gradients arrays + * @param wds weight decays + * @param eta LARS eta + * @param eps LARS eps + * @param rescale_grad Gradient rescaling factor + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def multi_lars (lrs : org.apache.mxnet.NDArray, weights_sum_sq : org.apache.mxnet.NDArray, grads_sum_sq : org.apache.mxnet.NDArray, wds : org.apache.mxnet.NDArray, eta : Float, eps : Float, rescale_grad : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L472 + * }}} + * + * @param data Weights + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param momentum The decay rate of momentum estimates at each epoch. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def multi_mp_sgd_mom_update (data : Array[org.apache.mxnet.NDArray], lrs : Any, wds : Any, momentum : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L417 + * }}} + * + * @param data Weights + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def multi_mp_sgd_update (data : Array[org.apache.mxnet.NDArray], lrs : Any, wds : Any, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L374 + * }}} + * + * @param data Weights, gradients and momentum + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param momentum The decay rate of momentum estimates at each epoch. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def multi_sgd_mom_update (data : Array[org.apache.mxnet.NDArray], lrs : Any, wds : Any, momentum : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L329 + * }}} + * + * @param data Weights + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def multi_sgd_update (data : Array[org.apache.mxnet.NDArray], lrs : Any, wds : Any, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Compute the sums of squares of multiple arrays + * + * + * Defined in src/operator/contrib/multi_sum_sq.cc:L36 + * }}} + * + * @param data Arrays + * @param num_arrays number of input arrays. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def multi_sum_sq (data : Array[org.apache.mxnet.NDArray], num_arrays : Int, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for Nesterov Accelerated Gradient( NAG) optimizer. + * It updates the weights using the following formula, + * + * .. math:: + * v_t = \gamma v_{t-1} + \eta * \nabla J(W_{t-1} - \gamma v_{t-1})\\ + * W_t = W_{t-1} - v_t + * + * Where + * :math:`\eta` is the learning rate of the optimizer + * :math:`\gamma` is the decay rate of the momentum estimate + * :math:`\v_t` is the update vector at time step `t` + * :math:`\W_t` is the weight vector at time step `t` + * + * + * + * Defined in src/operator/optimizer_op.cc:L726 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def nag_mom_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, mom : org.apache.mxnet.NDArray, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the product of array elements over given axes treating Not a Numbers (``NaN``) as one. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_prod_value.cc:L47 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def nanprod (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the sum of array elements over given axes treating Not a Numbers (``NaN``) as zero. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L102 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def nansum (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Numerical negative of the argument, element-wise. + * + * The storage type of ``negative`` output depends upon the input storage type: + * + * - negative(default) = default + * - negative(row_sparse) = row_sparse + * - negative(csr) = csr + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def negative (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the norm on an NDArray. + * + * This operator computes the norm on an NDArray with the specified axis, depending + * on the value of the ord parameter. By default, it computes the L2 norm on the entire + * array. Currently only ord=2 supports sparse ndarrays. + * + * Examples:: + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * `[ [2, 2], + * [5, 6] ] ] + * + * norm(x, ord=2, axis=1) = `[ [3.1622777 4.472136 ] + * [5.3851647 6.3245554] ] + * + * norm(x, ord=1, axis=1) = `[ [4., 6.], + * [7., 8.] ] + * + * rsp = x.cast_storage('row_sparse') + * + * norm(rsp) = [5.47722578] + * + * csr = x.cast_storage('csr') + * + * norm(csr) = [5.47722578] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_norm_value.cc:L89 + * }}} + * + * @param data The input + * @param ord Order of the norm. Currently ord=1 and ord=2 is supported. + * @param axis The axis or axes along which to perform the reduction. + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + If `axis` is int, a reduction is performed on a particular axis. + If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, + and the matrix norms of these matrices are computed. + * @param out_dtype The data type of the output. + * @param keepdims If this is set to `True`, the reduced axis is left in the result as dimension with size one. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def norm (data : org.apache.mxnet.NDArray, ord : Option[Int] = None, axis : Option[org.apache.mxnet.Shape] = None, out_dtype : Option[String] = None, keepdims : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @param loc Mean of the distribution. + * @param scale Standard deviation of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def normal (loc : Option[Float] = None, scale : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns a one-hot array. + * + * The locations represented by `indices` take value `on_value`, while all + * other locations take value `off_value`. + * + * `one_hot` operation with `indices` of shape ``(i0, i1)`` and `depth` of ``d`` would result + * in an output array of shape ``(i0, i1, d)`` with:: + * + * output[i,j,:] = off_value + * output[i,j,indices[i,j] ] = on_value + * + * Examples:: + * + * one_hot([1,0,2,0], 3) = `[ [ 0. 1. 0.] + * [ 1. 0. 0.] + * [ 0. 0. 1.] + * [ 1. 0. 0.] ] + * + * one_hot([1,0,2,0], 3, on_value=8, off_value=1, + * dtype='int32') = `[ [1 8 1] + * [8 1 1] + * [1 1 8] + * [8 1 1] ] + * + * one_hot(`[ [1,0],[1,0],[2,0] ], 3) = `[ `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 0. 1.] + * [ 1. 0. 0.] ] ] + * + * + * Defined in src/operator/tensor/indexing_op.cc:L824 + * }}} + * + * @param indices array of locations where to set on_value + * @param depth Depth of the one hot dimension. + * @param on_value The value assigned to the locations represented by indices. + * @param off_value The value assigned to the locations not represented by indices. + * @param dtype DType of the output + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def one_hot (indices : org.apache.mxnet.NDArray, depth : Int, on_value : Option[Double] = None, off_value : Option[Double] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Return an array of ones with the same shape and type + * as the input array. + * + * Examples:: + * + * x = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * ones_like(x) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * }}} + * + * @param data The input + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def ones_like (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @param data An n-dimensional input array. + * @param mode Padding type to use. "constant" pads with `constant_value` "edge" pads using the edge values of the input array "reflect" pads by reflecting values with respect to the edges. + * @param pad_width Widths of the padding regions applied to the edges of each axis. It is a tuple of integer padding widths for each axis of the format ``(before_1, after_1, ... , before_N, after_N)``. It should be of length ``2*N`` where ``N`` is the number of dimensions of the array.This is equivalent to pad_width in numpy.pad, but flattened. + * @param constant_value The value used for padding when `mode` is "constant". + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def pad (data : org.apache.mxnet.NDArray, mode : String, pad_width : org.apache.mxnet.Shape, constant_value : Option[Double] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @param data The input array + * @param index The index array + * @param axis int or None. The axis to picking the elements. Negative values means indexing from right to left. If is `None`, the elements in the index w.r.t the flattened input will be picked. + * @param keepdims If true, the axis where we pick the elements is left in the result as dimension with size one. + * @param mode Specify how out-of-bound indices behave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def pick (data : org.apache.mxnet.NDArray, index : org.apache.mxnet.NDArray, axis : Option[Int] = None, keepdims : Option[Boolean] = None, mode : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L200 + * }}} + * + * @param data Weights, gradients, momentums, learning rates and weight decays + * @param momentum The decay rate of momentum estimates at each epoch. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def preloaded_multi_mp_sgd_mom_update (data : Array[org.apache.mxnet.NDArray], momentum : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L140 + * }}} + * + * @param data Weights, gradients, learning rates and weight decays + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def preloaded_multi_mp_sgd_update (data : Array[org.apache.mxnet.NDArray], rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L91 + * }}} + * + * @param data Weights, gradients, momentum, learning rates and weight decays + * @param momentum The decay rate of momentum estimates at each epoch. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def preloaded_multi_sgd_mom_update (data : Array[org.apache.mxnet.NDArray], momentum : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L42 + * }}} + * + * @param data Weights, gradients, learning rates and weight decays + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def preloaded_multi_sgd_update (data : Array[org.apache.mxnet.NDArray], rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the product of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L31 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def prod (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Converts each element of the input array from degrees to radians. + * + * .. math:: + * radians([0, 90, 180, 270, 360]) = [0, \pi/2, \pi, 3\pi/2, 2\pi] + * + * The storage type of ``radians`` output depends upon the input storage type: + * + * - radians(default) = default + * - radians(row_sparse) = row_sparse + * - radians(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L293 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def radians (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from an exponential distribution. + * + * Samples are distributed according to an exponential distribution parametrized by *lambda* (rate). + * + * Example:: + * + * exponential(lam=4, shape=(2,2)) = `[ [ 0.0097189 , 0.08999364], + * [ 0.04146638, 0.31715935] ] + * + * + * Defined in src/operator/random/sample_op.cc:L137 + * }}} + * + * @param lam Lambda parameter (rate) of the exponential distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_exponential (lam : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from a gamma distribution. + * + * Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). + * + * Example:: + * + * gamma(alpha=9, beta=0.5, shape=(2,2)) = `[ [ 7.10486984, 3.37695289], + * [ 3.91697288, 3.65933681] ] + * + * + * Defined in src/operator/random/sample_op.cc:L125 + * }}} + * + * @param alpha Alpha parameter (shape) of the gamma distribution. + * @param beta Beta parameter (scale) of the gamma distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_gamma (alpha : Option[Float] = None, beta : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from a generalized negative binomial distribution. + * + * Samples are distributed according to a generalized negative binomial distribution parametrized by + * *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the + * number of unsuccessful experiments (generalized to real numbers). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * generalized_negative_binomial(mu=2.0, alpha=0.3, shape=(2,2)) = `[ [ 2., 1.], + * [ 6., 4.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L179 + * }}} + * + * @param mu Mean of the negative binomial distribution. + * @param alpha Alpha (dispersion) parameter of the negative binomial distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_generalized_negative_binomial (mu : Option[Float] = None, alpha : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from a negative binomial distribution. + * + * Samples are distributed according to a negative binomial distribution parametrized by + * *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * negative_binomial(k=3, p=0.4, shape=(2,2)) = `[ [ 4., 7.], + * [ 2., 5.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L164 + * }}} + * + * @param k Limit of unsuccessful experiments. + * @param p Failure probability in each experiment. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_negative_binomial (k : Option[Int] = None, p : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @param loc Mean of the distribution. + * @param scale Standard deviation of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_normal (loc : Option[Float] = None, scale : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Dirichlet distributions with parameter *alpha*. + * + * The shape of *alpha* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *alpha*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *alpha* + * at index *i*. + * + * Examples:: + * + * random_pdf_dirichlet(sample=`[ [1,2],[2,3],[3,4] ], alpha=[2.5, 2.5]) = + * [38.413498, 199.60245, 564.56085] + * + * sample = `[ `[ [1, 2, 3], [10, 20, 30], [100, 200, 300] ], + * `[ [0.1, 0.2, 0.3], [0.01, 0.02, 0.03], [0.001, 0.002, 0.003] ] ] + * + * random_pdf_dirichlet(sample=sample, alpha=[0.1, 0.4, 0.9]) = + * `[ [2.3257459e-02, 5.8420084e-04, 1.4674458e-05], + * [9.2589635e-01, 3.6860607e+01, 1.4674468e+03] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L315 + * }}} + * + * @param sample Samples from the distributions. + * @param alpha Concentration parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_pdf_dirichlet (sample : org.apache.mxnet.NDArray, alpha : org.apache.mxnet.NDArray, is_log : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * exponential distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_exponential(sample=`[ [1, 2, 3] ], lam=[1]) = + * `[ [0.36787945, 0.13533528, 0.04978707] ] + * + * sample = `[ [1,2,3], + * [1,2,3], + * [1,2,3] ] + * + * random_pdf_exponential(sample=sample, lam=[1,0.5,0.25]) = + * `[ [0.36787945, 0.13533528, 0.04978707], + * [0.30326533, 0.18393973, 0.11156508], + * [0.1947002, 0.15163267, 0.11809164] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L304 + * }}} + * + * @param sample Samples from the distributions. + * @param lam Lambda (rate) parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_pdf_exponential (sample : org.apache.mxnet.NDArray, lam : org.apache.mxnet.NDArray, is_log : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * gamma distributions with parameters *alpha* (shape) and *beta* (rate). + * + * *alpha* and *beta* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *alpha* and *beta*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *alpha* and *beta* at index *i*. + * + * Examples:: + * + * random_pdf_gamma(sample=`[ [1,2,3,4,5] ], alpha=[5], beta=[1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739] ] + * + * sample = `[ [1, 2, 3, 4, 5], + * [2, 3, 4, 5, 6], + * [3, 4, 5, 6, 7] ] + * + * random_pdf_gamma(sample=sample, alpha=[5,6,7], beta=[1,1,1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739], + * [0.03608941, 0.10081882, 0.15629345, 0.17546739, 0.16062315], + * [0.05040941, 0.10419563, 0.14622283, 0.16062315, 0.14900276] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L301 + * }}} + * + * @param sample Samples from the distributions. + * @param alpha Alpha (shape) parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param beta Beta (scale) parameters of the distributions. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_pdf_gamma (sample : org.apache.mxnet.NDArray, alpha : org.apache.mxnet.NDArray, is_log : Option[Boolean] = None, beta : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * generalized negative binomial distributions with parameters *mu* (mean) + * and *alpha* (dispersion). This can be understood as a reparameterization of + * the negative binomial, where *k* = *1 / alpha* and *p* = *1 / (mu \* alpha + 1)*. + * + * *mu* and *alpha* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *alpha*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *alpha* at index *i*. + * + * Examples:: + * + * random_pdf_generalized_negative_binomial(sample=`[ [1, 2, 3, 4] ], alpha=[1], mu=[1]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_generalized_negative_binomial(sample=sample, alpha=[1, 0.6666], mu=[1, 1.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26517063, 0.16573331, 0.09667706, 0.05437994] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L311 + * }}} + * + * @param sample Samples from the distributions. + * @param mu Means of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param alpha Alpha (dispersion) parameters of the distributions. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_pdf_generalized_negative_binomial (sample : org.apache.mxnet.NDArray, mu : org.apache.mxnet.NDArray, is_log : Option[Boolean] = None, alpha : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the value of the PDF of samples of + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * *k* and *p* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *k* and *p*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *k* and *p* at index *i*. + * + * Examples:: + * + * random_pdf_negative_binomial(sample=`[ [1,2,3,4] ], k=[1], p=a[0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * # Note that k may be real-valued + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_negative_binomial(sample=sample, k=[1, 1.5], p=[0.5, 0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26516506, 0.16572815, 0.09667476, 0.05437956] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L308 + * }}} + * + * @param sample Samples from the distributions. + * @param k Limits of unsuccessful experiments. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param p Failure probabilities in each experiment. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_pdf_negative_binomial (sample : org.apache.mxnet.NDArray, k : org.apache.mxnet.NDArray, is_log : Option[Boolean] = None, p : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * *mu* and *sigma* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *sigma*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *sigma* at index *i*. + * + * Examples:: + * + * sample = `[ [-2, -1, 0, 1, 2] ] + * random_pdf_normal(sample=sample, mu=[0], sigma=[1]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097] ] + * + * random_pdf_normal(sample=sample*2, mu=[0,0], sigma=[1,2]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097], + * [0.12098537, 0.17603266, 0.19947115, 0.17603266, 0.12098537] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L299 + * }}} + * + * @param sample Samples from the distributions. + * @param mu Means of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param sigma Standard deviations of the distributions. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_pdf_normal (sample : org.apache.mxnet.NDArray, mu : org.apache.mxnet.NDArray, is_log : Option[Boolean] = None, sigma : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Poisson distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_poisson(sample=`[ [0,1,2,3] ], lam=[1]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324] ] + * + * sample = `[ [0,1,2,3], + * [0,1,2,3], + * [0,1,2,3] ] + * + * random_pdf_poisson(sample=sample, lam=[1,2,3]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324], + * [0.13533528, 0.27067056, 0.27067056, 0.18044704], + * [0.04978707, 0.14936121, 0.22404182, 0.22404182] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L306 + * }}} + * + * @param sample Samples from the distributions. + * @param lam Lambda (rate) parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_pdf_poisson (sample : org.apache.mxnet.NDArray, lam : org.apache.mxnet.NDArray, is_log : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * uniform distributions on the intervals given by *[low,high)*. + * + * *low* and *high* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *low* and *high*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *low* and *high* at index *i*. + * + * Examples:: + * + * random_pdf_uniform(sample=`[ [1,2,3,4] ], low=[0], high=[10]) = [0.1, 0.1, 0.1, 0.1] + * + * sample = `[ `[ [1, 2, 3], + * [1, 2, 3] ], + * `[ [1, 2, 3], + * [1, 2, 3] ] ] + * low = `[ [0, 0], + * [0, 0] ] + * high = `[ [ 5, 10], + * [15, 20] ] + * random_pdf_uniform(sample=sample, low=low, high=high) = + * `[ `[ [0.2, 0.2, 0.2 ], + * [0.1, 0.1, 0.1 ] ], + * `[ [0.06667, 0.06667, 0.06667], + * [0.05, 0.05, 0.05 ] ] ] + * + * + * + * Defined in src/operator/random/pdf_op.cc:L297 + * }}} + * + * @param sample Samples from the distributions. + * @param low Lower bounds of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param high Upper bounds of the distributions. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_pdf_uniform (sample : org.apache.mxnet.NDArray, low : org.apache.mxnet.NDArray, is_log : Option[Boolean] = None, high : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from a Poisson distribution. + * + * Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * poisson(lam=4, shape=(2,2)) = `[ [ 5., 2.], + * [ 4., 6.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L150 + * }}} + * + * @param lam Lambda parameter (rate) of the Poisson distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_poisson (lam : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from a discrete uniform distribution. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * randint(low=0, high=5, shape=(2,2)) = `[ [ 0, 2], + * [ 3, 1] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L194 + * }}} + * + * @param low Lower bound of the distribution. + * @param high Upper bound of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to int32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_randint (low : Long, high : Long, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from a uniform distribution. + * + * .. note:: The existing alias ``uniform`` is deprecated. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * uniform(low=0, high=1, shape=(2,2)) = `[ [ 0.60276335, 0.85794562], + * [ 0.54488319, 0.84725171] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L96 + * }}} + * + * @param low Lower bound of the distribution. + * @param high Upper bound of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def random_uniform (low : Option[Float] = None, high : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Converts a batch of index arrays into an array of flat indices. The operator follows numpy conventions so a single multi index is given by a column of the input matrix. The leading dimension may be left unspecified by using -1 as placeholder. + * + * Examples:: + * + * A = `[ [3,6,6],[4,5,1] ] + * ravel(A, shape=(7,6)) = [22,41,37] + * ravel(A, shape=(-1,6)) = [22,41,37] + * + * + * + * Defined in src/operator/tensor/ravel.cc:L42 + * }}} + * + * @param data Batch of multi-indices + * @param shape Shape of the array into which the multi-indices apply. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def ravel_multi_index (data : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise inverse cube-root value of the input. + * + * .. math:: + * rcbrt(x) = 1/\sqrt[3]{x} + * + * Example:: + * + * rcbrt([1,8,-125]) = [1.0, 0.5, -0.2] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L269 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def rcbrt (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the reciprocal of the argument, element-wise. + * + * Calculates 1/x. + * + * Example:: + * + * reciprocal([-2, 1, 3, 1.6, 0.2]) = [-0.5, 1.0, 0.33333334, 0.625, 5.0] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L42 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def reciprocal (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes rectified linear activation. + * + * .. math:: + * max(features, 0) + * + * The storage type of ``relu`` output depends upon the input storage type: + * + * - relu(default) = default + * - relu(row_sparse) = row_sparse + * - relu(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L85 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def relu (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Repeats elements of an array. + * By default, ``repeat`` flattens the input array into 1-D and then repeats the + * elements:: + * x = `[ [ 1, 2], + * [ 3, 4] ] + * repeat(x, repeats=2) = [ 1., 1., 2., 2., 3., 3., 4., 4.] + * The parameter ``axis`` specifies the axis along which to perform repeat:: + * repeat(x, repeats=2, axis=1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * repeat(x, repeats=2, axis=0) = `[ [ 1., 2.], + * [ 1., 2.], + * [ 3., 4.], + * [ 3., 4.] ] + * repeat(x, repeats=2, axis=-1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L744 + * }}} + * + * @param data Input data array + * @param repeats The number of repetitions for each element. + * @param axis The axis along which to repeat values. The negative numbers are interpreted counting from the backward. By default, use the flattened input array, and return a flat output array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def repeat (data : org.apache.mxnet.NDArray, repeats : Int, axis : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Set to zero multiple arrays + * + * + * Defined in src/operator/contrib/reset_arrays.cc:L36 + * }}} + * + * @param data Arrays + * @param num_arrays number of input arrays. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def reset_arrays (data : Array[org.apache.mxnet.NDArray], num_arrays : Int, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @param data Input data to reshape. + * @param shape The target shape + * @param reverse If true then the special values are inferred from right to left + * @param target_shape (Deprecated! Use ``shape`` instead.) Target new shape. One and only one dim can be 0, in which case it will be inferred from the rest of dims + * @param keep_highest (Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged.If set to true, then the first dim in target_shape is ignored,and always fixed as input + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def reshape (data : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, reverse : Option[Boolean] = None, target_shape : Option[org.apache.mxnet.Shape] = None, keep_highest : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Reshape some or all dimensions of `lhs` to have the same shape as some or all dimensions of `rhs`. + * + * Returns a **view** of the `lhs` array with a new shape without altering any data. + * + * Example:: + * + * x = [1, 2, 3, 4, 5, 6] + * y = `[ [0, -4], [3, 2], [2, 2] ] + * reshape_like(x, y) = `[ [1, 2], [3, 4], [5, 6] ] + * + * More precise control over how dimensions are inherited is achieved by specifying \ + * slices over the `lhs` and `rhs` array dimensions. Only the sliced `lhs` dimensions \ + * are reshaped to the `rhs` sliced dimensions, with the non-sliced `lhs` dimensions staying the same. + * + * Examples:: + * + * - lhs shape = (30,7), rhs shape = (15,2,4), lhs_begin=0, lhs_end=1, rhs_begin=0, rhs_end=2, output shape = (15,2,7) + * - lhs shape = (3, 5), rhs shape = (1,15,4), lhs_begin=0, lhs_end=2, rhs_begin=1, rhs_end=2, output shape = (15) + * + * Negative indices are supported, and `None` can be used for either `lhs_end` or `rhs_end` to indicate the end of the range. + * + * Example:: + * + * - lhs shape = (30, 12), rhs shape = (4, 2, 2, 3), lhs_begin=-1, lhs_end=None, rhs_begin=1, rhs_end=None, output shape = (30, 2, 2, 3) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L513 + * }}} + * + * @param lhs First input. + * @param rhs Second input. + * @param lhs_begin Defaults to 0. The beginning index along which the lhs dimensions are to be reshaped. Supports negative indices. + * @param lhs_end Defaults to None. The ending index along which the lhs dimensions are to be used for reshaping. Supports negative indices. + * @param rhs_begin Defaults to 0. The beginning index along which the rhs dimensions are to be used for reshaping. Supports negative indices. + * @param rhs_end Defaults to None. The ending index along which the rhs dimensions are to be used for reshaping. Supports negative indices. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def reshape_like (lhs : org.apache.mxnet.NDArray, rhs : org.apache.mxnet.NDArray, lhs_begin : Option[Int] = None, lhs_end : Option[Int] = None, rhs_begin : Option[Int] = None, rhs_end : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @param data Input data array + * @param axis The axis which to reverse elements. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def reverse (data : org.apache.mxnet.NDArray, axis : org.apache.mxnet.Shape, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * .. note:: + * - For input ``n.5`` ``rint`` returns ``n`` while ``round`` returns ``n+1``. + * - For input ``-n.5`` both ``rint`` and ``round`` returns ``-n-1``. + * + * Example:: + * + * rint([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 1., -2., 2., 2.] + * + * The storage type of ``rint`` output depends upon the input storage type: + * + * - rint(default) = default + * - rint(row_sparse) = row_sparse + * - rint(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L799 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def rint (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for `RMSProp` optimizer. + * + * `RMSprop` is a variant of stochastic gradient descent where the gradients are + * divided by a cache which grows with the sum of squares of recent gradients? + * + * `RMSProp` is similar to `AdaGrad`, a popular variant of `SGD` which adaptively + * tunes the learning rate of each parameter. `AdaGrad` lowers the learning rate for + * each parameter monotonically over the course of training. + * While this is analytically motivated for convex optimizations, it may not be ideal + * for non-convex problems. `RMSProp` deals with this heuristically by allowing the + * learning rates to rebound as the denominator decays over time. + * + * Define the Root Mean Square (RMS) error criterion of the gradient as + * :math:`RMS[g]_t = \sqrt{E[g^2]_t + \epsilon}`, where :math:`g` represents + * gradient and :math:`E[g^2]_t` is the decaying average over past squared gradient. + * + * The :math:`E[g^2]_t` is given by: + * + * .. math:: + * E[g^2]_t = \gamma * E[g^2]_{t-1} + (1-\gamma) * g_t^2 + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t - \frac{\eta}{RMS[g]_t} g_t + * + * The RMSProp code follows the version in + * http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf + * Tieleman & Hinton, 2012. + * + * Hinton suggests the momentum term :math:`\gamma` to be 0.9 and the learning rate + * :math:`\eta` to be 0.001. + * + * + * + * Defined in src/operator/optimizer_op.cc:L797 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param n n + * @param lr Learning rate + * @param gamma1 The decay rate of momentum estimates. + * @param epsilon A small constant for numerical stability. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param clip_weights Clip weights to the range of [-clip_weights, clip_weights] If clip_weights <= 0, weight clipping is turned off. weights = max(min(weights, clip_weights), -clip_weights). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def rmsprop_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, n : org.apache.mxnet.NDArray, lr : Float, gamma1 : Option[Float] = None, epsilon : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, clip_weights : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for RMSPropAlex optimizer. + * + * `RMSPropAlex` is non-centered version of `RMSProp`. + * + * Define :math:`E[g^2]_t` is the decaying average over past squared gradient and + * :math:`E[g]_t` is the decaying average over past gradient. + * + * .. math:: + * E[g^2]_t = \gamma_1 * E[g^2]_{t-1} + (1 - \gamma_1) * g_t^2\\ + * E[g]_t = \gamma_1 * E[g]_{t-1} + (1 - \gamma_1) * g_t\\ + * \Delta_t = \gamma_2 * \Delta_{t-1} - \frac{\eta}{\sqrt{E[g^2]_t - E[g]_t^2 + \epsilon}} g_t\\ + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t + \Delta_t + * + * The RMSPropAlex code follows the version in + * http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. + * + * Graves suggests the momentum term :math:`\gamma_1` to be 0.95, :math:`\gamma_2` + * to be 0.9 and the learning rate :math:`\eta` to be 0.0001. + * + * + * Defined in src/operator/optimizer_op.cc:L836 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param n n + * @param g g + * @param delta delta + * @param lr Learning rate + * @param gamma1 Decay rate. + * @param gamma2 Decay rate. + * @param epsilon A small constant for numerical stability. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param clip_weights Clip weights to the range of [-clip_weights, clip_weights] If clip_weights <= 0, weight clipping is turned off. weights = max(min(weights, clip_weights), -clip_weights). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def rmspropalex_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, n : org.apache.mxnet.NDArray, g : org.apache.mxnet.NDArray, delta : org.apache.mxnet.NDArray, lr : Float, gamma1 : Option[Float] = None, gamma2 : Option[Float] = None, epsilon : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, clip_weights : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * Example:: + * + * round([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 2., -2., 2., 2.] + * + * The storage type of ``round`` output depends upon the input storage type: + * + * - round(default) = default + * - round(row_sparse) = row_sparse + * - round(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L778 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def round (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise inverse square-root value of the input. + * + * .. math:: + * rsqrt(x) = 1/\sqrt{x} + * + * Example:: + * + * rsqrt([4,9,16]) = [0.5, 0.33333334, 0.25] + * + * The storage type of ``rsqrt`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L193 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def rsqrt (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Concurrent sampling from multiple + * exponential distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_exponential(lam) = [ 0.51837951, 0.09994757] + * + * // Draw a vector containing two samples for each distribution + * sample_exponential(lam, shape=(2)) = `[ [ 0.51837951, 0.19866663], + * [ 0.09994757, 0.50447971] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L283 + * }}} + * + * @param lam Lambda (rate) parameters of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sample_exponential (lam : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Concurrent sampling from multiple + * gamma distributions with parameters *alpha* (shape) and *beta* (scale). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * alpha = [ 0.0, 2.5 ] + * beta = [ 1.0, 0.7 ] + * + * // Draw a single sample for each distribution + * sample_gamma(alpha, beta) = [ 0. , 2.25797319] + * + * // Draw a vector containing two samples for each distribution + * sample_gamma(alpha, beta, shape=(2)) = `[ [ 0. , 0. ], + * [ 2.25797319, 1.70734084] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L280 + * }}} + * + * @param alpha Alpha (shape) parameters of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param beta Beta (scale) parameters of the distributions. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sample_gamma (alpha : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, beta : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Concurrent sampling from multiple + * generalized negative binomial distributions with parameters *mu* (mean) and *alpha* (dispersion). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * mu = [ 2.0, 2.5 ] + * alpha = [ 1.0, 0.1 ] + * + * // Draw a single sample for each distribution + * sample_generalized_negative_binomial(mu, alpha) = [ 0., 3.] + * + * // Draw a vector containing two samples for each distribution + * sample_generalized_negative_binomial(mu, alpha, shape=(2)) = `[ [ 0., 3.], + * [ 3., 1.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L290 + * }}} + * + * @param mu Means of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param alpha Alpha (dispersion) parameters of the distributions. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sample_generalized_negative_binomial (mu : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, alpha : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Concurrent sampling from multiple multinomial distributions. + * + * *data* is an *n* dimensional array whose last dimension has length *k*, where + * *k* is the number of possible outcomes of each multinomial distribution. This + * operator will draw *shape* samples from each distribution. If shape is empty + * one sample will be drawn from each distribution. + * + * If *get_prob* is true, a second array containing log likelihood of the drawn + * samples will also be returned. This is usually used for reinforcement learning + * where you can provide reward as head gradient for this array to estimate + * gradient. + * + * Note that the input distribution must be normalized, i.e. *data* must sum to + * 1 along its last axis. + * + * Examples:: + * + * probs = `[ [0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0] ] + * + * // Draw a single sample for each distribution + * sample_multinomial(probs) = [3, 0] + * + * // Draw a vector containing two samples for each distribution + * sample_multinomial(probs, shape=(2)) = `[ [4, 2], + * [0, 0] ] + * + * // requests log likelihood + * sample_multinomial(probs, get_prob=True) = [2, 1], [0.2, 0.3] + * }}} + * + * @param data Distribution probabilities. Must sum to one on the last axis. + * @param shape Shape to be sampled from each random distribution. + * @param get_prob Whether to also return the log probability of sampled result. This is usually used for differentiating through stochastic variables, e.g. in reinforcement learning. + * @param dtype DType of the output in case this can't be inferred. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sample_multinomial (data : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, get_prob : Option[Boolean] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Concurrent sampling from multiple + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * k = [ 20, 49 ] + * p = [ 0.4 , 0.77 ] + * + * // Draw a single sample for each distribution + * sample_negative_binomial(k, p) = [ 15., 16.] + * + * // Draw a vector containing two samples for each distribution + * sample_negative_binomial(k, p, shape=(2)) = `[ [ 15., 50.], + * [ 16., 12.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L287 + * }}} + * + * @param k Limits of unsuccessful experiments. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param p Failure probabilities in each experiment. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sample_negative_binomial (k : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, p : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Concurrent sampling from multiple + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * mu = [ 0.0, 2.5 ] + * sigma = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_normal(mu, sigma) = [-0.56410581, 0.95934606] + * + * // Draw a vector containing two samples for each distribution + * sample_normal(mu, sigma, shape=(2)) = `[ [-0.56410581, 0.2928229 ], + * [ 0.95934606, 4.48287058] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L278 + * }}} + * + * @param mu Means of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param sigma Standard deviations of the distributions. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sample_normal (mu : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, sigma : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Concurrent sampling from multiple + * Poisson distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_poisson(lam) = [ 0., 13.] + * + * // Draw a vector containing two samples for each distribution + * sample_poisson(lam, shape=(2)) = `[ [ 0., 4.], + * [ 13., 8.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L285 + * }}} + * + * @param lam Lambda (rate) parameters of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sample_poisson (lam : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Concurrent sampling from multiple + * uniform distributions on the intervals given by *[low,high)*. + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * low = [ 0.0, 2.5 ] + * high = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_uniform(low, high) = [ 0.40451524, 3.18687344] + * + * // Draw a vector containing two samples for each distribution + * sample_uniform(low, high, shape=(2)) = `[ [ 0.40451524, 0.18017688], + * [ 3.18687344, 3.68352246] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L276 + * }}} + * + * @param low Lower bounds of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param high Upper bounds of the distributions. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sample_uniform (low : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, high : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Scatters data into a new tensor according to indices. + * + * Given `data` with shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(X_0, X_1, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, data shape should simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] = data[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] + * + * all other entries in output are 0. + * + * .. warning:: + * + * If the indices have duplicates, the result will be non-deterministic and + * the gradient of `scatter_nd` will not be correct!! + * + * + * Examples:: + * + * data = [2, 3, 0] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * shape = (2, 2) + * scatter_nd(data, indices, shape) = `[ [0, 0], [2, 3] ] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 1] ] + * shape = (2, 2, 2, 2) + * scatter_nd(data, indices, shape) = `[ [`[ [0, 0], + * [0, 0] ], + * + * `[ [1, 2], + * [3, 4] ] ], + * + * `[ `[ [0, 0], + * [0, 0] ], + * + * `[ [5, 6], + * [7, 8] ] ] ] + * }}} + * + * @param data data + * @param indices indices + * @param shape Shape of output. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def scatter_nd (data : org.apache.mxnet.NDArray, indices : org.apache.mxnet.NDArray, shape : org.apache.mxnet.Shape, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and weight's storage + * type is the same as momentum's storage type, + * only the row slices whose indices appear in grad.indices are updated (for both weight and momentum):: + * + * for row in gradient.indices: + * v[row] = momentum[row] * v[row] - learning_rate * gradient[row] + * weight[row] += v[row] + * + * + * + * Defined in src/operator/optimizer_op.cc:L565 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse and both weight and momentum have the same stype + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sgd_mom_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, mom : org.apache.mxnet.NDArray, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SGD) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * However, if gradient is of ``row_sparse`` storage type and ``lazy_update`` is True, + * only the row slices whose indices appear in grad.indices are updated:: + * + * for row in gradient.indices: + * weight[row] = weight[row] - learning_rate * (gradient[row] + wd * weight[row]) + * + * + * + * Defined in src/operator/optimizer_op.cc:L524 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param lr Learning rate + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sgd_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, lr : Float, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns a 1D int64 array containing the shape of data. + * + * Example:: + * + * shape_array(`[ [1,2,3,4], [5,6,7,8] ]) = [2,4] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L574 + * }}} + * + * @param data Input Array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def shape_array (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Randomly shuffle the elements. + * + * This shuffles the array along the first axis. + * The order of the elements in each subarray does not change. + * For example, if a 2D array is given, the order of the rows randomly changes, + * but the order of the elements in each row does not change. + * }}} + * + * @param data Data to be shuffled. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def shuffle (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes sigmoid of x element-wise. + * + * .. math:: + * y = 1 / (1 + exp(-x)) + * + * The storage type of ``sigmoid`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L119 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sigmoid (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise sign of the input. + * + * Example:: + * + * sign([-2, 0, 3]) = [-1, 0, 1] + * + * The storage type of ``sign`` output depends upon the input storage type: + * + * - sign(default) = default + * - sign(row_sparse) = row_sparse + * - sign(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L759 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sign (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Update function for SignSGD optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * W_t = W_{t-1} - \eta_t \text{sign}(g_t) + * + * It updates the weights using:: + * + * weight = weight - learning_rate * sign(gradient) + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L63 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param lr Learning rate + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def signsgd_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, lr : Float, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * SIGN momentUM (Signum) optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta m_{t-1} + (1 - \beta) g_t\\ + * W_t = W_{t-1} - \eta_t \text{sign}(m_t) + * + * It updates the weights using:: + * state = momentum * state + (1-momentum) * gradient + * weight = weight - learning_rate * sign(state) + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L92 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param wd_lh The amount of weight decay that does not go into gradient/momentum calculationsotherwise do weight decay algorithmically only. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def signum_update (weight : org.apache.mxnet.NDArray, grad : org.apache.mxnet.NDArray, mom : org.apache.mxnet.NDArray, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, wd_lh : Option[Float] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the element-wise sine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * sin([0, \pi/4, \pi/2]) = [0, 0.707, 1] + * + * The storage type of ``sin`` output depends upon the input storage type: + * + * - sin(default) = default + * - sin(row_sparse) = row_sparse + * - sin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L47 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sin (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the hyperbolic sine of the input array, computed element-wise. + * + * .. math:: + * sinh(x) = 0.5\times(exp(x) - exp(-x)) + * + * The storage type of ``sinh`` output depends upon the input storage type: + * + * - sinh(default) = default + * - sinh(row_sparse) = row_sparse + * - sinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L313 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sinh (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns a 1D int64 array containing the size of data. + * + * Example:: + * + * size_array(`[ [1,2,3,4], [5,6,7,8] ]) = [8] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L625 + * }}} + * + * @param data Input Array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def size_array (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @param data Source input + * @param begin starting indices for the slice operation, supports negative indices. + * @param end ending indices for the slice operation, supports negative indices. + * @param step step for the slice operation, supports negative values. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def slice (data : org.apache.mxnet.NDArray, begin : org.apache.mxnet.Shape, end : org.apache.mxnet.Shape, step : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Slices along a given axis. + * Returns an array slice along a given `axis` starting from the `begin` index + * to the `end` index. + * Examples:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=0, begin=1, end=3) = `[ [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=1, begin=0, end=2) = `[ [ 1., 2.], + * [ 5., 6.], + * [ 9., 10.] ] + * slice_axis(x, axis=1, begin=-3, end=-1) = `[ [ 2., 3.], + * [ 6., 7.], + * [ 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L571 + * }}} + * + * @param data Source input + * @param axis Axis along which to be sliced, supports negative indexes. + * @param begin The beginning index along the axis to be sliced, supports negative indexes. + * @param end The ending index along the axis to be sliced, supports negative indexes. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def slice_axis (data : org.apache.mxnet.NDArray, axis : Int, begin : Int, end : Int, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Slices a region of the array like the shape of another array. + * This function is similar to ``slice``, however, the `begin` are always `0`s + * and `end` of specific axes are inferred from the second input `shape_like`. + * Given the second `shape_like` input of ``shape=(d_0, d_1, ..., d_n-1)``, + * a ``slice_like`` operator with default empty `axes`, it performs the + * following operation: + * `` out = slice(input, begin=(0, 0, ..., 0), end=(d_0, d_1, ..., d_n-1))``. + * When `axes` is not empty, it is used to speficy which axes are being sliced. + * Given a 4-d input data, ``slice_like`` operator with ``axes=(0, 2, -1)`` + * will perform the following operation: + * `` out = slice(input, begin=(0, 0, 0, 0), end=(d_0, None, d_2, d_3))``. + * Note that it is allowed to have first and second input with different dimensions, + * however, you have to make sure the `axes` are specified and not exceeding the + * dimension limits. + * For example, given `input_1` with ``shape=(2,3,4,5)`` and `input_2` with + * ``shape=(1,2,3)``, it is not allowed to use: + * `` out = slice_like(a, b)`` because ndim of `input_1` is 4, and ndim of `input_2` + * is 3. + * The following is allowed in this situation: + * `` out = slice_like(a, b, axes=(0, 2))`` + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * y = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * slice_like(x, y) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0, 1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0)) = `[ [ 1., 2., 3., 4.] + * [ 5., 6., 7., 8.] ] + * slice_like(x, y, axes=(-1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] + * [ 9., 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L625 + * }}} + * + * @param data Source input + * @param shape_like Shape like input + * @param axes List of axes on which input data will be sliced according to the corresponding size of the second input. By default will slice on all axes. Negative axes are supported. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def slice_like (data : org.apache.mxnet.NDArray, shape_like : org.apache.mxnet.NDArray, axes : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Calculate Smooth L1 Loss(lhs, scalar) by summing + * + * .. math:: + * + * f(x) = + * \begin{cases} + * (\sigma x)^2/2,& \text{if }x < 1/\sigma^2\\ + * |x|-0.5/\sigma^2,& \text{otherwise} + * \end{cases} + * + * where :math:`x` is an element of the tensor *lhs* and :math:`\sigma` is the scalar. + * + * Example:: + * + * smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5] + * smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_scalar_op_extended.cc:L108 + * }}} + * + * @param data source input + * @param scalar scalar input + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def smooth_l1 (data : org.apache.mxnet.NDArray, scalar : Float, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies the softmax function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum up to 1. + * + * .. math:: + * softmax(\mathbf{z/t})_j = \frac{e^{z_j/t}}{\sum_{k=1}^K e^{z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 1. 1.] + * [ 1. 1. 1.] ] + * + * softmax(x,axis=0) = `[ [ 0.5 0.5 0.5] + * [ 0.5 0.5 0.5] ] + * + * softmax(x,axis=1) = `[ [ 0.33333334, 0.33333334, 0.33333334], + * [ 0.33333334, 0.33333334, 0.33333334] ] + * + * + * + * Defined in src/operator/nn/softmax.cc:L103 + * }}} + * + * @param data The input array. + * @param length The length array. + * @param axis The axis along which to compute softmax. + * @param temperature Temperature parameter in softmax + * @param dtype DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). + * @param use_length Whether to use the length input as a mask over the data input. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def softmax (data : org.apache.mxnet.NDArray, length : org.apache.mxnet.NDArray, axis : Option[Int] = None, temperature : Option[Double] = None, dtype : Option[String] = None, use_length : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Calculate cross entropy of softmax output and one-hot label. + * + * - This operator computes the cross entropy in two steps: + * - Applies softmax function on the input array. + * - Computes and returns the cross entropy loss between the softmax output and the labels. + * + * - The softmax function and cross entropy loss is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * Example:: + * + * x = `[ [1, 2, 3], + * [11, 7, 5] ] + * + * label = [2, 0] + * + * softmax(x) = `[ [0.09003057, 0.24472848, 0.66524094], + * [0.97962922, 0.01794253, 0.00242826] ] + * + * softmax_cross_entropy(data, label) = - log(0.66524084) - log(0.97962922) = 0.4281871 + * + * + * + * Defined in src/operator/loss_binary_op.cc:L59 + * }}} + * + * @param data Input data + * @param label Input label + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def softmax_cross_entropy (data : org.apache.mxnet.NDArray, label : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Applies the softmin function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum + * up to 1. + * + * .. math:: + * softmin(\mathbf{z/t})_j = \frac{e^{-z_j/t}}{\sum_{k=1}^K e^{-z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 2. 3.] + * [ 3. 2. 1.] ] + * + * softmin(x,axis=0) = `[ [ 0.88079703, 0.5, 0.11920292], + * [ 0.11920292, 0.5, 0.88079703] ] + * + * softmin(x,axis=1) = `[ [ 0.66524094, 0.24472848, 0.09003057], + * [ 0.09003057, 0.24472848, 0.66524094] ] + * + * + * + * Defined in src/operator/nn/softmin.cc:L57 + * }}} + * + * @param data The input array. + * @param axis The axis along which to compute softmax. + * @param temperature Temperature parameter in softmax + * @param dtype DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). + * @param use_length Whether to use the length input as a mask over the data input. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def softmin (data : org.apache.mxnet.NDArray, axis : Option[Int] = None, temperature : Option[Double] = None, dtype : Option[String] = None, use_length : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes softsign of x element-wise. + * + * .. math:: + * y = x / (1 + abs(x)) + * + * The storage type of ``softsign`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L191 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def softsign (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns a sorted copy of an input array along the given axis. + * + * Examples:: + * + * x = `[ [ 1, 4], + * [ 3, 1] ] + * + * // sorts along the last axis + * sort(x) = `[ [ 1., 4.], + * [ 1., 3.] ] + * + * // flattens and then sorts + * sort(x, axis=None) = [ 1., 1., 3., 4.] + * + * // sorts along the first axis + * sort(x, axis=0) = `[ [ 1., 1.], + * [ 3., 4.] ] + * + * // in a descend order + * sort(x, is_ascend=0) = `[ [ 4., 1.], + * [ 3., 1.] ] + * + * + * + * Defined in src/operator/tensor/ordering_op.cc:L132 + * }}} + * + * @param data The input array + * @param axis Axis along which to choose sort the input tensor. If not given, the flattened array is used. Default is -1. + * @param is_ascend Whether to sort in ascending or descending order. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sort (data : org.apache.mxnet.NDArray, axis : Option[Int] = None, is_ascend : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Rearranges(permutes) blocks of spatial data into depth. + * Similar to ONNX SpaceToDepth operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#SpaceToDepth + * The output is a new tensor where the values from height and width dimension are + * moved to the depth dimension. The reverse of this operation is ``depth_to_space``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, C, H / block\_size, block\_size, W / block\_size, block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 5, 1, 2, 4]) \\ + * y = reshape(x \prime \prime, [N, C * (block\_size ^ 2), H / block\_size, W / block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C * (block\_size ^ 2), H / block\_size, W / block\_size]` + * Example:: + * x = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * space_to_depth(x, 2) = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L1019 + * }}} + * + * @param data Input ndarray + * @param block_size Blocks of [block_size. block_size] are moved + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def space_to_depth (data : org.apache.mxnet.NDArray, block_size : Int, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @param data The input + * @param num_outputs Number of splits. Note that this should evenly divide the length of the `axis`. + * @param axis Axis along which to split. + * @param squeeze_axis If true, Removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to ``true`` only if ``input.shape[axis] == num_outputs``. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def split (data : org.apache.mxnet.NDArray, num_outputs : Int, axis : Option[Int] = None, squeeze_axis : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise square-root value of the input. + * + * .. math:: + * \textrm{sqrt}(x) = \sqrt{x} + * + * Example:: + * + * sqrt([4, 9, 16]) = [2, 3, 4] + * + * The storage type of ``sqrt`` output depends upon the input storage type: + * + * - sqrt(default) = default + * - sqrt(row_sparse) = row_sparse + * - sqrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L142 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sqrt (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns element-wise squared value of the input. + * + * .. math:: + * square(x) = x^2 + * + * Example:: + * + * square([2, 3, 4]) = [4, 9, 16] + * + * The storage type of ``square`` output depends upon the input storage type: + * + * - square(default) = default + * - square(row_sparse) = row_sparse + * - square(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L118 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def square (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Remove single-dimensional entries from the shape of an array. + * Same behavior of defining the output tensor shape as numpy.squeeze for the most of cases. + * See the following note for exception. + * Examples:: + * data = `[ `[ [0], [1], [2] ] ] + * squeeze(data) = [0, 1, 2] + * squeeze(data, axis=0) = `[ [0], [1], [2] ] + * squeeze(data, axis=2) = `[ [0, 1, 2] ] + * squeeze(data, axis=(0, 2)) = [0, 1, 2] + * .. Note:: + * The output of this operator will keep at least one dimension not removed. For example, + * squeeze(`[ `[ [4] ] ]) = [4], while in numpy.squeeze, the output will become a scalar. + * }}} + * + * @param data data to squeeze + * @param axis Selects a subset of the single-dimensional entries in the shape. If an axis is selected with shape entry greater than one, an error is raised. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def squeeze (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Join a sequence of arrays along a new axis. + * The axis parameter specifies the index of the new axis in the dimensions of the + * result. For example, if axis=0 it will be the first dimension and if axis=-1 it + * will be the last dimension. + * Examples:: + * x = [1, 2] + * y = [3, 4] + * stack(x, y) = `[ [1, 2], + * [3, 4] ] + * stack(x, y, axis=1) = `[ [1, 3], + * [2, 4] ] + * }}} + * + * @param data List of arrays to stack + * @param axis The axis in the result array along which the input arrays are stacked. + * @param num_args Number of inputs to be stacked. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def stack (data : Array[org.apache.mxnet.NDArray], axis : Option[Int] = None, num_args : Int, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def stop_gradient (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sum (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def sum_axis (data : org.apache.mxnet.NDArray, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @param data Input array. + * @param dim1 the first axis to be swapped. + * @param dim2 the second axis to be swapped. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def swapaxes (data : org.apache.mxnet.NDArray, dim1 : Option[Int] = None, dim2 : Option[Int] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Takes elements from an input array along the given axis. + * + * This function slices the input array along a particular axis with the provided indices. + * + * Given data tensor of rank r >= 1, and indices tensor of rank q, gather entries of the axis + * dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates them + * in an output tensor of rank q + (r - 1). + * + * Examples:: + * + * x = [4. 5. 6.] + * + * // Trivial case, take the second element along the first axis. + * + * take(x, [1]) = [ 5. ] + * + * // The other trivial case, axis=-1, take the third element along the first axis + * + * take(x, [3], axis=-1, mode='clip') = [ 6. ] + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2. Along axis 0 + * + * take(x, `[ [0,1],[1,2] ]) = `[ `[ [ 1., 2.], + * [ 3., 4.] ], + * + * `[ [ 3., 4.], + * [ 5., 6.] ] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2 (calculated by wrapping around). + * // Along axis 1 + * + * take(x, `[ [0, 3], [-1, -2] ], axis=1, mode='wrap') = `[ `[ [ 1. 2.] + * [ 2. 1.] ] + * + * `[ [ 3. 4.] + * [ 4. 3.] ] + * + * `[ [ 5. 6.] + * [ 6. 5.] ] ] + * + * The storage type of ``take`` output depends upon the input storage type: + * + * - take(default, default) = default + * - take(csr, default, axis=0) = csr + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L718 + * }}} + * + * @param a The input array. + * @param indices The indices of the values to be extracted. + * @param axis The axis of input array to be taken.For input tensor of rank r, it could be in the range of [-r, r-1] + * @param mode Specify how out-of-bound indices bahave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. "raise" means to raise an error when index out of range. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def take (a : org.apache.mxnet.NDArray, indices : org.apache.mxnet.NDArray, axis : Option[Int] = None, mode : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Computes the element-wise tangent of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * tan([0, \pi/4, \pi/2]) = [0, 1, -inf] + * + * The storage type of ``tan`` output depends upon the input storage type: + * + * - tan(default) = default + * - tan(row_sparse) = row_sparse + * - tan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L140 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def tan (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the hyperbolic tangent of the input array, computed element-wise. + * + * .. math:: + * tanh(x) = sinh(x) / cosh(x) + * + * The storage type of ``tanh`` output depends upon the input storage type: + * + * - tanh(default) = default + * - tanh(row_sparse) = row_sparse + * - tanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L393 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def tanh (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Repeats the whole array multiple times. + * If ``reps`` has length *d*, and input array has dimension of *n*. There are + * three cases: + * - **n=d**. Repeat *i*-th dimension of the input by ``reps[i]`` times:: + * x = `[ [1, 2], + * [3, 4] ] + * tile(x, reps=(2,3)) = `[ [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.], + * [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.] ] + * - **n>d**. ``reps`` is promoted to length *n* by pre-pending 1's to it. Thus for + * an input shape ``(2,3)``, ``repos=(2,)`` is treated as ``(1,2)``:: + * tile(x, reps=(2,)) = `[ [ 1., 2., 1., 2.], + * [ 3., 4., 3., 4.] ] + * - **n d, reps is promoted to a.ndim by pre-pending 1's to it. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def tile (data : org.apache.mxnet.NDArray, reps : org.apache.mxnet.Shape, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Returns the indices of the top *k* elements in an input array along the given + * axis (by default). + * If ret_type is set to 'value' returns the value of top *k* elements (instead of indices). + * In case of ret_type = 'both', both value and index would be returned. + * The returned elements will be sorted. + * + * Examples:: + * + * x = `[ [ 0.3, 0.2, 0.4], + * [ 0.1, 0.3, 0.2] ] + * + * // returns an index of the largest element on last axis + * topk(x) = `[ [ 2.], + * [ 1.] ] + * + * // returns the value of top-2 largest elements on last axis + * topk(x, ret_typ='value', k=2) = `[ [ 0.4, 0.3], + * [ 0.3, 0.2] ] + * + * // returns the value of top-2 smallest elements on last axis + * topk(x, ret_typ='value', k=2, is_ascend=1) = `[ [ 0.2 , 0.3], + * [ 0.1 , 0.2] ] + * + * // returns the value of top-2 largest elements on axis 0 + * topk(x, axis=0, ret_typ='value', k=2) = `[ [ 0.3, 0.3, 0.4], + * [ 0.1, 0.2, 0.2] ] + * + * // flattens and then returns list of both values and indices + * topk(x, ret_typ='both', k=2) = `[ `[ [ 0.4, 0.3], [ 0.3, 0.2] ] , `[ [ 2., 0.], [ 1., 2.] ] ] + * + * + * + * Defined in src/operator/tensor/ordering_op.cc:L68 + * }}} + * + * @param data The input array + * @param axis Axis along which to choose the top k indices. If not given, the flattened array is used. Default is -1. + * @param k Number of top elements to select, should be always smaller than or equal to the element number in the given axis. A global sort is performed if set k < 1. + * @param ret_typ The return type. + "value" means to return the top k values, "indices" means to return the indices of the top k values, "mask" means to return a mask array containing 0 and 1. 1 means the top k values. "both" means to return a list of both values and indices of top k elements. + * @param is_ascend Whether to choose k largest or k smallest elements. Top K largest elements will be chosen if set to false. + * @param dtype DType of the output indices when ret_typ is "indices" or "both". An error will be raised if the selected data type cannot precisely represent the indices. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def topk (data : org.apache.mxnet.NDArray, axis : Option[Int] = None, k : Option[Int] = None, ret_typ : Option[String] = None, is_ascend : Option[Boolean] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Permutes the dimensions of an array. + * Examples:: + * x = `[ [ 1, 2], + * [ 3, 4] ] + * transpose(x) = `[ [ 1., 3.], + * [ 2., 4.] ] + * x = `[ `[ [ 1., 2.], + * [ 3., 4.] ], + * `[ [ 5., 6.], + * [ 7., 8.] ] ] + * transpose(x) = `[ `[ [ 1., 5.], + * [ 3., 7.] ], + * `[ [ 2., 6.], + * [ 4., 8.] ] ] + * transpose(x, axes=(1,0,2)) = `[ `[ [ 1., 2.], + * [ 5., 6.] ], + * `[ [ 3., 4.], + * [ 7., 8.] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L328 + * }}} + * + * @param data Source input + * @param axes Target axis order. By default the axes will be inverted. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def transpose (data : org.apache.mxnet.NDArray, axes : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Return the element-wise truncated value of the input. + * + * The truncated value of the scalar x is the nearest integer i which is closer to + * zero than x is. In short, the fractional part of the signed number x is discarded. + * + * Example:: + * + * trunc([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 1., 1., 2.] + * + * The storage type of ``trunc`` output depends upon the input storage type: + * + * - trunc(default) = default + * - trunc(row_sparse) = row_sparse + * - trunc(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L857 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def trunc (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Draw random samples from a uniform distribution. + * + * .. note:: The existing alias ``uniform`` is deprecated. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * uniform(low=0, high=1, shape=(2,2)) = `[ [ 0.60276335, 0.85794562], + * [ 0.54488319, 0.84725171] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L96 + * }}} + * + * @param low Lower bound of the distribution. + * @param high Upper bound of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def uniform (low : Option[Float] = None, high : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Converts an array of flat indices into a batch of index arrays. The operator follows numpy conventions so a single multi index is given by a column of the output matrix. The leading dimension may be left unspecified by using -1 as placeholder. + * + * Examples:: + * + * A = [22,41,37] + * unravel(A, shape=(7,6)) = `[ [3,6,6],[4,5,1] ] + * unravel(A, shape=(-1,6)) = `[ [3,6,6],[4,5,1] ] + * + * + * + * Defined in src/operator/tensor/ravel.cc:L68 + * }}} + * + * @param data Array of flat indices + * @param shape Shape of the array into which the multi-indices apply. + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def unravel_index (data : org.apache.mxnet.NDArray, shape : Option[org.apache.mxnet.Shape] = None, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Return the elements, either from x or y, depending on the condition. + * + * Given three ndarrays, condition, x, and y, return an ndarray with the elements from x or y, + * depending on the elements from condition are true or false. x and y must have the same shape. + * If condition has the same shape as x, each element in the output array is from x if the + * corresponding element in the condition is true, and from y if false. + * + * If condition does not have the same shape as x, it must be a 1D array whose size is + * the same as x's first dimension size. Each row of the output array is from x's row + * if the corresponding element from condition is true, and from y's row if false. + * + * Note that all non-zero values are interpreted as ``True`` in condition. + * + * Examples:: + * + * x = `[ [1, 2], [3, 4] ] + * y = `[ [5, 6], [7, 8] ] + * cond = `[ [0, 1], [-1, 0] ] + * + * where(cond, x, y) = `[ [5, 2], [3, 8] ] + * + * csr_cond = cast_storage(cond, 'csr') + * + * where(csr_cond, x, y) = `[ [5, 2], [3, 8] ] + * + * + * + * Defined in src/operator/tensor/control_flow_op.cc:L57 + * }}} + * + * @param condition condition array + * @param x + * @param y + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def where (condition : org.apache.mxnet.NDArray, x : org.apache.mxnet.NDArray, y : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn + /** + * + * {{{ + * + * Return an array of zeros with the same shape, type and storage type + * as the input array. + * + * The storage type of ``zeros_like`` output depends on the storage type of the input + * + * - zeros_like(row_sparse) = row_sparse + * - zeros_like(csr) = csr + * - zeros_like(default) = default + * + * Examples:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * zeros_like(x) = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * }}} + * + * @param data The input + * @return org.apache.mxnet.NDArrayFuncReturn + */ +@Experimental +def zeros_like (data : org.apache.mxnet.NDArray, out : Option[NDArray] = None): org.apache.mxnet.NDArrayFuncReturn +} + diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayBase.scala b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayBase.scala new file mode 100644 index 000000000..6b87a9928 --- /dev/null +++ b/scala-package/core/src/main/scala/org/apache/mxnet/NDArrayBase.scala @@ -0,0 +1,17858 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.mxnet + +import org.apache.mxnet.annotation.Experimental + +// scalastyle:off +abstract class NDArrayBase { + /** + * + * {{{ + * + * Applies an activation function element-wise to the input. + * + * The following activation functions are supported: + * + * - `relu`: Rectified Linear Unit, :math:`y = max(x, 0)` + * - `sigmoid`: :math:`y = \frac{1}{1 + exp(-x)}` + * - `tanh`: Hyperbolic tangent, :math:`y = \frac{exp(x) - exp(-x)}{exp(x) + exp(-x)}` + * - `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))` + * - `softsign`: :math:`y = \frac{x}{1 + abs(x)}` + * + * + * + * Defined in src/operator/nn/activation.cc:L168 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Activation(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies an activation function element-wise to the input. + * + * The following activation functions are supported: + * + * - `relu`: Rectified Linear Unit, :math:`y = max(x, 0)` + * - `sigmoid`: :math:`y = \frac{1}{1 + exp(-x)}` + * - `tanh`: Hyperbolic tangent, :math:`y = \frac{exp(x) - exp(-x)}{exp(x) + exp(-x)}` + * - `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))` + * - `softsign`: :math:`y = \frac{x}{1 + abs(x)}` + * + * + * + * Defined in src/operator/nn/activation.cc:L168 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Activation(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Batch normalization. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these + * two outputs are blocked. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel + * axis to be the last item in the input shape. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * .. Note:: + * When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False, + * the sparse tensors will fallback. + * + * + * + * Defined in src/operator/nn/batch_norm.cc:L571 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def BatchNorm(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Batch normalization. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these + * two outputs are blocked. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel + * axis to be the last item in the input shape. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * .. Note:: + * When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False, + * the sparse tensors will fallback. + * + * + * + * Defined in src/operator/nn/batch_norm.cc:L571 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def BatchNorm(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Batch normalization. + * + * This operator is DEPRECATED. Perform BatchNorm on the input. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_var`` as well, which are needed for the backward pass. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * There's no sparse support for this operator, and it will exhibit problematic behavior if used with + * sparse tensors. + * + * + * + * Defined in src/operator/batch_norm_v1.cc:L95 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def BatchNorm_v1(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Batch normalization. + * + * This operator is DEPRECATED. Perform BatchNorm on the input. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_var`` as well, which are needed for the backward pass. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * There's no sparse support for this operator, and it will exhibit problematic behavior if used with + * sparse tensors. + * + * + * + * Defined in src/operator/batch_norm_v1.cc:L95 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def BatchNorm_v1(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies bilinear sampling to input feature map. + * + * Bilinear Sampling is the key of [NIPS2015] \"Spatial Transformer Networks\". The usage of the operator is very similar to remap function in OpenCV, + * except that the operator has the backward pass. + * + * Given :math:`data` and :math:`grid`, then the output is computed by + * + * .. math:: + * x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\ + * y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\ + * output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}) + * + * :math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and :math:`G()` denotes the bilinear interpolation kernel. + * The out-boundary points will be padded with zeros.The shape of the output will be (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]). + * + * The operator assumes that :math:`data` has 'NCHW' layout and :math:`grid` has been normalized to [-1, 1]. + * + * BilinearSampler often cooperates with GridGenerator which generates sampling grids for BilinearSampler. + * GridGenerator supports two kinds of transformation: ``affine`` and ``warp``. + * If users want to design a CustomOp to manipulate :math:`grid`, please firstly refer to the code of GridGenerator. + * + * Example 1:: + * + * ## Zoom out data two times + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * affine_matrix = array(`[ [2, 0, 0], + * [0, 2, 0] ]) + * + * affine_matrix = reshape(affine_matrix, shape=(1, 6)) + * + * grid = GridGenerator(data=affine_matrix, transform_type='affine', target_shape=(4, 4)) + * + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 0, 0, 0, 0], + * [ 0, 3.5, 6.5, 0], + * [ 0, 1.25, 2.5, 0], + * [ 0, 0, 0, 0] ] ] + * + * + * Example 2:: + * + * ## shift data horizontally by -1 pixel + * + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * warp_maxtrix = array(`[ [`[ [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1] ], + * `[ [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0] ] ] ]) + * + * grid = GridGenerator(data=warp_matrix, transform_type='warp') + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 4, 3, 6, 0], + * [ 8, 8, 9, 0], + * [ 4, 1, 5, 0], + * [ 0, 1, 3, 0] ] ] + * + * + * Defined in src/operator/bilinear_sampler.cc:L256 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def BilinearSampler(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies bilinear sampling to input feature map. + * + * Bilinear Sampling is the key of [NIPS2015] \"Spatial Transformer Networks\". The usage of the operator is very similar to remap function in OpenCV, + * except that the operator has the backward pass. + * + * Given :math:`data` and :math:`grid`, then the output is computed by + * + * .. math:: + * x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\ + * y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\ + * output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}) + * + * :math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and :math:`G()` denotes the bilinear interpolation kernel. + * The out-boundary points will be padded with zeros.The shape of the output will be (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]). + * + * The operator assumes that :math:`data` has 'NCHW' layout and :math:`grid` has been normalized to [-1, 1]. + * + * BilinearSampler often cooperates with GridGenerator which generates sampling grids for BilinearSampler. + * GridGenerator supports two kinds of transformation: ``affine`` and ``warp``. + * If users want to design a CustomOp to manipulate :math:`grid`, please firstly refer to the code of GridGenerator. + * + * Example 1:: + * + * ## Zoom out data two times + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * affine_matrix = array(`[ [2, 0, 0], + * [0, 2, 0] ]) + * + * affine_matrix = reshape(affine_matrix, shape=(1, 6)) + * + * grid = GridGenerator(data=affine_matrix, transform_type='affine', target_shape=(4, 4)) + * + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 0, 0, 0, 0], + * [ 0, 3.5, 6.5, 0], + * [ 0, 1.25, 2.5, 0], + * [ 0, 0, 0, 0] ] ] + * + * + * Example 2:: + * + * ## shift data horizontally by -1 pixel + * + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * warp_maxtrix = array(`[ [`[ [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1] ], + * `[ [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0] ] ] ]) + * + * grid = GridGenerator(data=warp_matrix, transform_type='warp') + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 4, 3, 6, 0], + * [ 8, 8, 9, 0], + * [ 4, 1, 5, 0], + * [ 0, 1, 3, 0] ] ] + * + * + * Defined in src/operator/bilinear_sampler.cc:L256 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def BilinearSampler(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def BlockGrad(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def BlockGrad(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def CTCLoss(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def CTCLoss(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Cast(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Cast(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Concat(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Concat(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute *N*-D convolution on *(N+2)*-D input. + * + * In the 2-D convolution, given input data with shape *(batch_size, + * channel, height, width)*, the output is computed by + * + * .. math:: + * + * out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star + * weight[i,j,:,:] + * + * where :math:`\star` is the 2-D cross-correlation operator. + * + * For general 2-D convolution, the shapes are + * + * - **data**: *(batch_size, channel, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*. + * + * Define:: + * + * f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1 + * + * then we have:: + * + * out_height=f(height, kernel[0], pad[0], stride[0], dilate[0]) + * out_width=f(width, kernel[1], pad[1], stride[1], dilate[1]) + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height, + * width)*. We can choose other layouts such as *NWC*. + * + * If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data`` + * evenly into *g* parts along the channel axis, and also evenly split ``weight`` + * along the first dimension. Next compute the convolution on the *i*-th part of + * the data with the *i*-th weight part. The output is obtained by concatenating all + * the *g* results. + * + * 1-D convolution does not have *height* dimension but only *width* in space. + * + * - **data**: *(batch_size, channel, width)* + * - **weight**: *(num_filter, channel, kernel[0])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_width)*. + * + * 3-D convolution adds an additional *depth* dimension besides *height* and + * *width*. The shapes are + * + * - **data**: *(batch_size, channel, depth, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*. + * + * Both ``weight`` and ``bias`` are learnable parameters. + * + * There are other options to tune the performance. + * + * - **cudnn_tune**: enable this option leads to higher startup time but may give + * faster speed. Options are + * + * - **off**: no tuning + * - **limited_workspace**:run test and pick the fastest algorithm that doesn't + * exceed workspace limit. + * - **fastest**: pick the fastest algorithm and ignore workspace limit. + * - **None** (default): the behavior is determined by environment variable + * ``MXNET_CUDNN_AUTOTUNE_DEFAULT``. 0 for off, 1 for limited workspace + * (default), 2 for fastest. + * + * - **workspace**: A large number leads to more (GPU) memory usage but may improve + * the performance. + * + * + * + * Defined in src/operator/nn/convolution.cc:L473 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Convolution(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute *N*-D convolution on *(N+2)*-D input. + * + * In the 2-D convolution, given input data with shape *(batch_size, + * channel, height, width)*, the output is computed by + * + * .. math:: + * + * out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star + * weight[i,j,:,:] + * + * where :math:`\star` is the 2-D cross-correlation operator. + * + * For general 2-D convolution, the shapes are + * + * - **data**: *(batch_size, channel, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*. + * + * Define:: + * + * f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1 + * + * then we have:: + * + * out_height=f(height, kernel[0], pad[0], stride[0], dilate[0]) + * out_width=f(width, kernel[1], pad[1], stride[1], dilate[1]) + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height, + * width)*. We can choose other layouts such as *NWC*. + * + * If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data`` + * evenly into *g* parts along the channel axis, and also evenly split ``weight`` + * along the first dimension. Next compute the convolution on the *i*-th part of + * the data with the *i*-th weight part. The output is obtained by concatenating all + * the *g* results. + * + * 1-D convolution does not have *height* dimension but only *width* in space. + * + * - **data**: *(batch_size, channel, width)* + * - **weight**: *(num_filter, channel, kernel[0])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_width)*. + * + * 3-D convolution adds an additional *depth* dimension besides *height* and + * *width*. The shapes are + * + * - **data**: *(batch_size, channel, depth, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*. + * + * Both ``weight`` and ``bias`` are learnable parameters. + * + * There are other options to tune the performance. + * + * - **cudnn_tune**: enable this option leads to higher startup time but may give + * faster speed. Options are + * + * - **off**: no tuning + * - **limited_workspace**:run test and pick the fastest algorithm that doesn't + * exceed workspace limit. + * - **fastest**: pick the fastest algorithm and ignore workspace limit. + * - **None** (default): the behavior is determined by environment variable + * ``MXNET_CUDNN_AUTOTUNE_DEFAULT``. 0 for off, 1 for limited workspace + * (default), 2 for fastest. + * + * - **workspace**: A large number leads to more (GPU) memory usage but may improve + * the performance. + * + * + * + * Defined in src/operator/nn/convolution.cc:L473 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Convolution(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * This operator is DEPRECATED. Apply convolution to input then add a bias. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Convolution_v1(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * This operator is DEPRECATED. Apply convolution to input then add a bias. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Convolution_v1(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies correlation to inputs. + * + * The correlation layer performs multiplicative patch comparisons between two feature maps. + * + * Given two multi-channel feature maps :math:`f_{1}, f_{2}`, with :math:`w`, :math:`h`, and :math:`c` being their width, height, and number of channels, + * the correlation layer lets the network compare each patch from :math:`f_{1}` with each patch from :math:`f_{2}`. + * + * For now we consider only a single comparison of two patches. The 'correlation' of two patches centered at :math:`x_{1}` in the first map and + * :math:`x_{2}` in the second map is then defined as: + * + * .. math:: + * + * c(x_{1}, x_{2}) = \sum_{o \in [-k,k] \times [-k,k]} + * + * for a square patch of size :math:`K:=2k+1`. + * + * Note that the equation above is identical to one step of a convolution in neural networks, but instead of convolving data with a filter, it convolves data with other + * data. For this reason, it has no training weights. + * + * Computing :math:`c(x_{1}, x_{2})` involves :math:`c * K^{2}` multiplications. Comparing all patch combinations involves :math:`w^{2}*h^{2}` such computations. + * + * Given a maximum displacement :math:`d`, for each location :math:`x_{1}` it computes correlations :math:`c(x_{1}, x_{2})` only in a neighborhood of size :math:`D:=2d+1`, + * by limiting the range of :math:`x_{2}`. We use strides :math:`s_{1}, s_{2}`, to quantize :math:`x_{1}` globally and to quantize :math:`x_{2}` within the neighborhood + * centered around :math:`x_{1}`. + * + * The final output is defined by the following expression: + * + * .. math:: + * out[n, q, i, j] = c(x_{i, j}, x_{q}) + * + * where :math:`i` and :math:`j` enumerate spatial locations in :math:`f_{1}`, and :math:`q` denotes the :math:`q^{th}` neighborhood of :math:`x_{i,j}`. + * + * + * Defined in src/operator/correlation.cc:L198 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Correlation(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies correlation to inputs. + * + * The correlation layer performs multiplicative patch comparisons between two feature maps. + * + * Given two multi-channel feature maps :math:`f_{1}, f_{2}`, with :math:`w`, :math:`h`, and :math:`c` being their width, height, and number of channels, + * the correlation layer lets the network compare each patch from :math:`f_{1}` with each patch from :math:`f_{2}`. + * + * For now we consider only a single comparison of two patches. The 'correlation' of two patches centered at :math:`x_{1}` in the first map and + * :math:`x_{2}` in the second map is then defined as: + * + * .. math:: + * + * c(x_{1}, x_{2}) = \sum_{o \in [-k,k] \times [-k,k]} + * + * for a square patch of size :math:`K:=2k+1`. + * + * Note that the equation above is identical to one step of a convolution in neural networks, but instead of convolving data with a filter, it convolves data with other + * data. For this reason, it has no training weights. + * + * Computing :math:`c(x_{1}, x_{2})` involves :math:`c * K^{2}` multiplications. Comparing all patch combinations involves :math:`w^{2}*h^{2}` such computations. + * + * Given a maximum displacement :math:`d`, for each location :math:`x_{1}` it computes correlations :math:`c(x_{1}, x_{2})` only in a neighborhood of size :math:`D:=2d+1`, + * by limiting the range of :math:`x_{2}`. We use strides :math:`s_{1}, s_{2}`, to quantize :math:`x_{1}` globally and to quantize :math:`x_{2}` within the neighborhood + * centered around :math:`x_{1}`. + * + * The final output is defined by the following expression: + * + * .. math:: + * out[n, q, i, j] = c(x_{i, j}, x_{q}) + * + * where :math:`i` and :math:`j` enumerate spatial locations in :math:`f_{1}`, and :math:`q` denotes the :math:`q^{th}` neighborhood of :math:`x_{i,j}`. + * + * + * Defined in src/operator/correlation.cc:L198 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Correlation(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * + * + * .. note:: `Crop` is deprecated. Use `slice` instead. + * + * Crop the 2nd and 3rd dim of input data, with the corresponding size of h_w or + * with width and height of the second input symbol, i.e., with one input, we need h_w to + * specify the crop height and width, otherwise the second input symbol's size will be used + * + * + * Defined in src/operator/crop.cc:L50 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Crop(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * + * + * .. note:: `Crop` is deprecated. Use `slice` instead. + * + * Crop the 2nd and 3rd dim of input data, with the corresponding size of h_w or + * with width and height of the second input symbol, i.e., with one input, we need h_w to + * specify the crop height and width, otherwise the second input symbol's size will be used + * + * + * Defined in src/operator/crop.cc:L50 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Crop(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Apply a custom operator implemented in a frontend language (like Python). + * + * Custom operators should override required methods like `forward` and `backward`. + * The custom operator must be registered before it can be used. + * Please check the tutorial here: https://mxnet.incubator.apache.org/api/faq/new_op + * + * + * + * Defined in src/operator/custom/custom.cc:L546 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Custom(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Apply a custom operator implemented in a frontend language (like Python). + * + * Custom operators should override required methods like `forward` and `backward`. + * The custom operator must be registered before it can be used. + * Please check the tutorial here: https://mxnet.incubator.apache.org/api/faq/new_op + * + * + * + * Defined in src/operator/custom/custom.cc:L546 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Custom(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes 1D or 2D transposed convolution (aka fractionally strided convolution) of the input tensor. This operation can be seen as the gradient of Convolution operation with respect to its input. Convolution usually reduces the size of the input. Transposed convolution works the other way, going from a smaller input to a larger output while preserving the connectivity pattern. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Deconvolution(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes 1D or 2D transposed convolution (aka fractionally strided convolution) of the input tensor. This operation can be seen as the gradient of Convolution operation with respect to its input. Convolution usually reduces the size of the input. Transposed convolution works the other way, going from a smaller input to a larger output while preserving the connectivity pattern. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Deconvolution(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies dropout operation to input array. + * + * - During training, each element of the input is set to zero with probability p. + * The whole array is rescaled by :math:`1/(1-p)` to keep the expected + * sum of the input unchanged. + * + * - During testing, this operator does not change the input if mode is 'training'. + * If mode is 'always', the same computaion as during training will be applied. + * + * Example:: + * + * random.seed(998) + * input_array = array(`[ [3., 0.5, -0.5, 2., 7.], + * [2., -0.4, 7., 3., 0.2] ]) + * a = symbol.Variable('a') + * dropout = symbol.Dropout(a, p = 0.2) + * executor = dropout.simple_bind(a = input_array.shape) + * + * ## If training + * executor.forward(is_train = True, a = input_array) + * executor.outputs + * `[ [ 3.75 0.625 -0. 2.5 8.75 ] + * [ 2.5 -0.5 8.75 3.75 0. ] ] + * + * ## If testing + * executor.forward(is_train = False, a = input_array) + * executor.outputs + * `[ [ 3. 0.5 -0.5 2. 7. ] + * [ 2. -0.4 7. 3. 0.2 ] ] + * + * + * Defined in src/operator/nn/dropout.cc:L96 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Dropout(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies dropout operation to input array. + * + * - During training, each element of the input is set to zero with probability p. + * The whole array is rescaled by :math:`1/(1-p)` to keep the expected + * sum of the input unchanged. + * + * - During testing, this operator does not change the input if mode is 'training'. + * If mode is 'always', the same computaion as during training will be applied. + * + * Example:: + * + * random.seed(998) + * input_array = array(`[ [3., 0.5, -0.5, 2., 7.], + * [2., -0.4, 7., 3., 0.2] ]) + * a = symbol.Variable('a') + * dropout = symbol.Dropout(a, p = 0.2) + * executor = dropout.simple_bind(a = input_array.shape) + * + * ## If training + * executor.forward(is_train = True, a = input_array) + * executor.outputs + * `[ [ 3.75 0.625 -0. 2.5 8.75 ] + * [ 2.5 -0.5 8.75 3.75 0. ] ] + * + * ## If testing + * executor.forward(is_train = False, a = input_array) + * executor.outputs + * `[ [ 3. 0.5 -0.5 2. 7. ] + * [ 2. -0.4 7. 3. 0.2 ] ] + * + * + * Defined in src/operator/nn/dropout.cc:L96 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Dropout(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ElementWiseSum(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ElementWiseSum(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Maps integer indices to vector representations (embeddings). + * + * This operator maps words to real-valued vectors in a high-dimensional space, + * called word embeddings. These embeddings can capture semantic and syntactic properties of the words. + * For example, it has been noted that in the learned embedding spaces, similar words tend + * to be close to each other and dissimilar words far apart. + * + * For an input array of shape (d1, ..., dK), + * the shape of an output array is (d1, ..., dK, output_dim). + * All the input values should be integers in the range [0, input_dim). + * + * If the input_dim is ip0 and output_dim is op0, then shape of the embedding weight matrix must be + * (ip0, op0). + * + * When "sparse_grad" is False, if any index mentioned is too large, it is replaced by the index that + * addresses the last vector in an embedding matrix. + * When "sparse_grad" is True, an error will be raised if invalid indices are found. + * + * Examples:: + * + * input_dim = 4 + * output_dim = 5 + * + * // Each row in weight matrix y represents a word. So, y = (w0,w1,w2,w3) + * y = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.], + * [ 10., 11., 12., 13., 14.], + * [ 15., 16., 17., 18., 19.] ] + * + * // Input array x represents n-grams(2-gram). So, x = [(w1,w3), (w0,w2)] + * x = `[ [ 1., 3.], + * [ 0., 2.] ] + * + * // Mapped input x to its vector representation y. + * Embedding(x, y, 4, 5) = `[ `[ [ 5., 6., 7., 8., 9.], + * [ 15., 16., 17., 18., 19.] ], + * + * `[ [ 0., 1., 2., 3., 4.], + * [ 10., 11., 12., 13., 14.] ] ] + * + * + * The storage type of weight can be either row_sparse or default. + * + * .. Note:: + * + * If "sparse_grad" is set to True, the storage type of gradient w.r.t weights will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L539 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Embedding(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Maps integer indices to vector representations (embeddings). + * + * This operator maps words to real-valued vectors in a high-dimensional space, + * called word embeddings. These embeddings can capture semantic and syntactic properties of the words. + * For example, it has been noted that in the learned embedding spaces, similar words tend + * to be close to each other and dissimilar words far apart. + * + * For an input array of shape (d1, ..., dK), + * the shape of an output array is (d1, ..., dK, output_dim). + * All the input values should be integers in the range [0, input_dim). + * + * If the input_dim is ip0 and output_dim is op0, then shape of the embedding weight matrix must be + * (ip0, op0). + * + * When "sparse_grad" is False, if any index mentioned is too large, it is replaced by the index that + * addresses the last vector in an embedding matrix. + * When "sparse_grad" is True, an error will be raised if invalid indices are found. + * + * Examples:: + * + * input_dim = 4 + * output_dim = 5 + * + * // Each row in weight matrix y represents a word. So, y = (w0,w1,w2,w3) + * y = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.], + * [ 10., 11., 12., 13., 14.], + * [ 15., 16., 17., 18., 19.] ] + * + * // Input array x represents n-grams(2-gram). So, x = [(w1,w3), (w0,w2)] + * x = `[ [ 1., 3.], + * [ 0., 2.] ] + * + * // Mapped input x to its vector representation y. + * Embedding(x, y, 4, 5) = `[ `[ [ 5., 6., 7., 8., 9.], + * [ 15., 16., 17., 18., 19.] ], + * + * `[ [ 0., 1., 2., 3., 4.], + * [ 10., 11., 12., 13., 14.] ] ] + * + * + * The storage type of weight can be either row_sparse or default. + * + * .. Note:: + * + * If "sparse_grad" is set to True, the storage type of gradient w.r.t weights will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L539 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Embedding(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Flatten(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Flatten(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies a linear transformation: :math:`Y = XW^T + b`. + * + * If ``flatten`` is set to be true, then the shapes are: + * + * - **data**: `(batch_size, x1, x2, ..., xn)` + * - **weight**: `(num_hidden, x1 * x2 * ... * xn)` + * - **bias**: `(num_hidden,)` + * - **out**: `(batch_size, num_hidden)` + * + * If ``flatten`` is set to be false, then the shapes are: + * + * - **data**: `(x1, x2, ..., xn, input_dim)` + * - **weight**: `(num_hidden, input_dim)` + * - **bias**: `(num_hidden,)` + * - **out**: `(x1, x2, ..., xn, num_hidden)` + * + * The learnable parameters include both ``weight`` and ``bias``. + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * .. Note:: + * + * The sparse support for FullyConnected is limited to forward evaluation with `row_sparse` + * weight and bias, where the length of `weight.indices` and `bias.indices` must be equal + * to `num_hidden`. This could be useful for model inference with `row_sparse` weights + * trained with importance sampling or noise contrastive estimation. + * + * To compute linear transformation with 'csr' sparse data, sparse.dot is recommended instead + * of sparse.FullyConnected. + * + * + * + * Defined in src/operator/nn/fully_connected.cc:L291 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def FullyConnected(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies a linear transformation: :math:`Y = XW^T + b`. + * + * If ``flatten`` is set to be true, then the shapes are: + * + * - **data**: `(batch_size, x1, x2, ..., xn)` + * - **weight**: `(num_hidden, x1 * x2 * ... * xn)` + * - **bias**: `(num_hidden,)` + * - **out**: `(batch_size, num_hidden)` + * + * If ``flatten`` is set to be false, then the shapes are: + * + * - **data**: `(x1, x2, ..., xn, input_dim)` + * - **weight**: `(num_hidden, input_dim)` + * - **bias**: `(num_hidden,)` + * - **out**: `(x1, x2, ..., xn, num_hidden)` + * + * The learnable parameters include both ``weight`` and ``bias``. + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * .. Note:: + * + * The sparse support for FullyConnected is limited to forward evaluation with `row_sparse` + * weight and bias, where the length of `weight.indices` and `bias.indices` must be equal + * to `num_hidden`. This could be useful for model inference with `row_sparse` weights + * trained with importance sampling or noise contrastive estimation. + * + * To compute linear transformation with 'csr' sparse data, sparse.dot is recommended instead + * of sparse.FullyConnected. + * + * + * + * Defined in src/operator/nn/fully_connected.cc:L291 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def FullyConnected(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Generates 2D sampling grid for bilinear sampling. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def GridGenerator(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Generates 2D sampling grid for bilinear sampling. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def GridGenerator(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Group normalization. + * + * The input channels are separated into ``num_groups`` groups, each containing ``num_channels / num_groups`` channels. + * The mean and standard-deviation are calculated separately over the each group. + * + * .. math:: + * + * data = data.reshape((N, num_groups, C // num_groups, ...)) + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * + * + * Defined in src/operator/nn/group_norm.cc:L77 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def GroupNorm(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Group normalization. + * + * The input channels are separated into ``num_groups`` groups, each containing ``num_channels / num_groups`` channels. + * The mean and standard-deviation are calculated separately over the each group. + * + * .. math:: + * + * data = data.reshape((N, num_groups, C // num_groups, ...)) + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * + * + * Defined in src/operator/nn/group_norm.cc:L77 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def GroupNorm(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Apply a sparse regularization to the output a sigmoid activation function. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def IdentityAttachKLSparseReg(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Apply a sparse regularization to the output a sigmoid activation function. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def IdentityAttachKLSparseReg(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies instance normalization to the n-dimensional input array. + * + * This operator takes an n-dimensional input array where (n>2) and normalizes + * the input using the following formula: + * + * .. math:: + * + * out = \frac{x - mean[data]}{ \sqrt{Var[data]} + \epsilon} * gamma + beta + * + * This layer is similar to batch normalization layer (`BatchNorm`) + * with two differences: first, the normalization is + * carried out per example (instance), not over a batch. Second, the + * same normalization is applied both at test and train time. This + * operation is also known as `contrast normalization`. + * + * If the input data is of shape [batch, channel, spacial_dim1, spacial_dim2, ...], + * `gamma` and `beta` parameters must be vectors of shape [channel]. + * + * This implementation is based on this paper [1]_ + * + * .. [1] Instance Normalization: The Missing Ingredient for Fast Stylization, + * D. Ulyanov, A. Vedaldi, V. Lempitsky, 2016 (arXiv:1607.08022v2). + * + * Examples:: + * + * // Input of shape (2,1,2) + * x = `[ `[ [ 1.1, 2.2] ], + * `[ [ 3.3, 4.4] ] ] + * + * // gamma parameter of length 1 + * gamma = [1.5] + * + * // beta parameter of length 1 + * beta = [0.5] + * + * // Instance normalization is calculated with the above formula + * InstanceNorm(x,gamma,beta) = `[ `[ [-0.997527 , 1.99752665] ], + * `[ [-0.99752653, 1.99752724] ] ] + * + * + * + * Defined in src/operator/instance_norm.cc:L95 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def InstanceNorm(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies instance normalization to the n-dimensional input array. + * + * This operator takes an n-dimensional input array where (n>2) and normalizes + * the input using the following formula: + * + * .. math:: + * + * out = \frac{x - mean[data]}{ \sqrt{Var[data]} + \epsilon} * gamma + beta + * + * This layer is similar to batch normalization layer (`BatchNorm`) + * with two differences: first, the normalization is + * carried out per example (instance), not over a batch. Second, the + * same normalization is applied both at test and train time. This + * operation is also known as `contrast normalization`. + * + * If the input data is of shape [batch, channel, spacial_dim1, spacial_dim2, ...], + * `gamma` and `beta` parameters must be vectors of shape [channel]. + * + * This implementation is based on this paper [1]_ + * + * .. [1] Instance Normalization: The Missing Ingredient for Fast Stylization, + * D. Ulyanov, A. Vedaldi, V. Lempitsky, 2016 (arXiv:1607.08022v2). + * + * Examples:: + * + * // Input of shape (2,1,2) + * x = `[ `[ [ 1.1, 2.2] ], + * `[ [ 3.3, 4.4] ] ] + * + * // gamma parameter of length 1 + * gamma = [1.5] + * + * // beta parameter of length 1 + * beta = [0.5] + * + * // Instance normalization is calculated with the above formula + * InstanceNorm(x,gamma,beta) = `[ `[ [-0.997527 , 1.99752665] ], + * `[ [-0.99752653, 1.99752724] ] ] + * + * + * + * Defined in src/operator/instance_norm.cc:L95 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def InstanceNorm(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Normalize the input array using the L2 norm. + * + * For 1-D NDArray, it computes:: + * + * out = data / sqrt(sum(data ** 2) + eps) + * + * For N-D NDArray, if the input array has shape (N, N, ..., N), + * + * with ``mode`` = ``instance``, it normalizes each instance in the multidimensional + * array by its L2 norm.:: + * + * for i in 0...N + * out[i,:,:,...,:] = data[i,:,:,...,:] / sqrt(sum(data[i,:,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``channel``, it normalizes each channel in the array by its L2 norm.:: + * + * for i in 0...N + * out[:,i,:,...,:] = data[:,i,:,...,:] / sqrt(sum(data[:,i,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``spatial``, it normalizes the cross channel norm for each position + * in the array by its L2 norm.:: + * + * for dim in 2...N + * for i in 0...N + * out[.....,i,...] = take(out, indices=i, axis=dim) / sqrt(sum(take(out, indices=i, axis=dim) ** 2) + eps) + * -dim- + * + * Example:: + * + * x = `[ `[ [1,2], + * [3,4] ], + * `[ [2,2], + * [5,6] ] ] + * + * L2Normalization(x, mode='instance') + * =`[ `[ [ 0.18257418 0.36514837] + * [ 0.54772252 0.73029673] ] + * `[ [ 0.24077171 0.24077171] + * [ 0.60192931 0.72231513] ] ] + * + * L2Normalization(x, mode='channel') + * =`[ `[ [ 0.31622776 0.44721359] + * [ 0.94868326 0.89442718] ] + * `[ [ 0.37139067 0.31622776] + * [ 0.92847669 0.94868326] ] ] + * + * L2Normalization(x, mode='spatial') + * =`[ `[ [ 0.44721359 0.89442718] + * [ 0.60000002 0.80000001] ] + * `[ [ 0.70710677 0.70710677] + * [ 0.6401844 0.76822126] ] ] + * + * + * + * Defined in src/operator/l2_normalization.cc:L196 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def L2Normalization(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Normalize the input array using the L2 norm. + * + * For 1-D NDArray, it computes:: + * + * out = data / sqrt(sum(data ** 2) + eps) + * + * For N-D NDArray, if the input array has shape (N, N, ..., N), + * + * with ``mode`` = ``instance``, it normalizes each instance in the multidimensional + * array by its L2 norm.:: + * + * for i in 0...N + * out[i,:,:,...,:] = data[i,:,:,...,:] / sqrt(sum(data[i,:,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``channel``, it normalizes each channel in the array by its L2 norm.:: + * + * for i in 0...N + * out[:,i,:,...,:] = data[:,i,:,...,:] / sqrt(sum(data[:,i,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``spatial``, it normalizes the cross channel norm for each position + * in the array by its L2 norm.:: + * + * for dim in 2...N + * for i in 0...N + * out[.....,i,...] = take(out, indices=i, axis=dim) / sqrt(sum(take(out, indices=i, axis=dim) ** 2) + eps) + * -dim- + * + * Example:: + * + * x = `[ `[ [1,2], + * [3,4] ], + * `[ [2,2], + * [5,6] ] ] + * + * L2Normalization(x, mode='instance') + * =`[ `[ [ 0.18257418 0.36514837] + * [ 0.54772252 0.73029673] ] + * `[ [ 0.24077171 0.24077171] + * [ 0.60192931 0.72231513] ] ] + * + * L2Normalization(x, mode='channel') + * =`[ `[ [ 0.31622776 0.44721359] + * [ 0.94868326 0.89442718] ] + * `[ [ 0.37139067 0.31622776] + * [ 0.92847669 0.94868326] ] ] + * + * L2Normalization(x, mode='spatial') + * =`[ `[ [ 0.44721359 0.89442718] + * [ 0.60000002 0.80000001] ] + * `[ [ 0.70710677 0.70710677] + * [ 0.6401844 0.76822126] ] ] + * + * + * + * Defined in src/operator/l2_normalization.cc:L196 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def L2Normalization(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies local response normalization to the input. + * + * The local response normalization layer performs "lateral inhibition" by normalizing + * over local input regions. + * + * If :math:`a_{x,y}^{i}` is the activity of a neuron computed by applying kernel :math:`i` at position + * :math:`(x, y)` and then applying the ReLU nonlinearity, the response-normalized + * activity :math:`b_{x,y}^{i}` is given by the expression: + * + * .. math:: + * b_{x,y}^{i} = \frac{a_{x,y}^{i}}{\Bigg({k + \frac{\alpha}{n} \sum_{j=max(0, i-\frac{n}{2})}^{min(N-1, i+\frac{n}{2})} (a_{x,y}^{j})^{2}}\Bigg)^{\beta}} + * + * where the sum runs over :math:`n` "adjacent" kernel maps at the same spatial position, and :math:`N` is the total + * number of kernels in the layer. + * + * + * + * Defined in src/operator/nn/lrn.cc:L164 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LRN(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies local response normalization to the input. + * + * The local response normalization layer performs "lateral inhibition" by normalizing + * over local input regions. + * + * If :math:`a_{x,y}^{i}` is the activity of a neuron computed by applying kernel :math:`i` at position + * :math:`(x, y)` and then applying the ReLU nonlinearity, the response-normalized + * activity :math:`b_{x,y}^{i}` is given by the expression: + * + * .. math:: + * b_{x,y}^{i} = \frac{a_{x,y}^{i}}{\Bigg({k + \frac{\alpha}{n} \sum_{j=max(0, i-\frac{n}{2})}^{min(N-1, i+\frac{n}{2})} (a_{x,y}^{j})^{2}}\Bigg)^{\beta}} + * + * where the sum runs over :math:`n` "adjacent" kernel maps at the same spatial position, and :math:`N` is the total + * number of kernels in the layer. + * + * + * + * Defined in src/operator/nn/lrn.cc:L164 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LRN(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Layer normalization. + * + * Normalizes the channels of the input tensor by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis and then + * compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * Unlike BatchNorm and InstanceNorm, the *mean* and *var* are computed along the channel dimension. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_std``. Note that no gradient will be passed through these two outputs. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is -1, which sets the channel + * axis to be the last item in the input shape. + * + * + * + * Defined in src/operator/nn/layer_norm.cc:L156 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LayerNorm(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Layer normalization. + * + * Normalizes the channels of the input tensor by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis and then + * compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * Unlike BatchNorm and InstanceNorm, the *mean* and *var* are computed along the channel dimension. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_std``. Note that no gradient will be passed through these two outputs. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is -1, which sets the channel + * axis to be the last item in the input shape. + * + * + * + * Defined in src/operator/nn/layer_norm.cc:L156 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LayerNorm(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies Leaky rectified linear unit activation element-wise to the input. + * + * Leaky ReLUs attempt to fix the "dying ReLU" problem by allowing a small `slope` + * when the input is negative and has a slope of one when input is positive. + * + * The following modified ReLU Activation functions are supported: + * + * - *elu*: Exponential Linear Unit. `y = x > 0 ? x : slope * (exp(x)-1)` + * - *selu*: Scaled Exponential Linear Unit. `y = lambda * (x > 0 ? x : alpha * (exp(x) - 1))` where + * *lambda = 1.0507009873554804934193349852946* and *alpha = 1.6732632423543772848170429916717*. + * - *leaky*: Leaky ReLU. `y = x > 0 ? x : slope * x` + * - *prelu*: Parametric ReLU. This is same as *leaky* except that `slope` is learnt during training. + * - *rrelu*: Randomized ReLU. same as *leaky* but the `slope` is uniformly and randomly chosen from + * *[lower_bound, upper_bound)* for training, while fixed to be + * *(lower_bound+upper_bound)/2* for inference. + * + * + * + * Defined in src/operator/leaky_relu.cc:L161 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LeakyReLU(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies Leaky rectified linear unit activation element-wise to the input. + * + * Leaky ReLUs attempt to fix the "dying ReLU" problem by allowing a small `slope` + * when the input is negative and has a slope of one when input is positive. + * + * The following modified ReLU Activation functions are supported: + * + * - *elu*: Exponential Linear Unit. `y = x > 0 ? x : slope * (exp(x)-1)` + * - *selu*: Scaled Exponential Linear Unit. `y = lambda * (x > 0 ? x : alpha * (exp(x) - 1))` where + * *lambda = 1.0507009873554804934193349852946* and *alpha = 1.6732632423543772848170429916717*. + * - *leaky*: Leaky ReLU. `y = x > 0 ? x : slope * x` + * - *prelu*: Parametric ReLU. This is same as *leaky* except that `slope` is learnt during training. + * - *rrelu*: Randomized ReLU. same as *leaky* but the `slope` is uniformly and randomly chosen from + * *[lower_bound, upper_bound)* for training, while fixed to be + * *(lower_bound+upper_bound)/2* for inference. + * + * + * + * Defined in src/operator/leaky_relu.cc:L161 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LeakyReLU(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes and optimizes for squared loss during backward propagation. + * Just outputs ``data`` during forward propagation. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the squared loss estimated over :math:`n` samples is defined as + * + * :math:`\text{SquaredLoss}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_2` + * + * .. note:: + * Use the LinearRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LinearRegressionOutput(default, default) = default + * - LinearRegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L92 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LinearRegressionOutput(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes and optimizes for squared loss during backward propagation. + * Just outputs ``data`` during forward propagation. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the squared loss estimated over :math:`n` samples is defined as + * + * :math:`\text{SquaredLoss}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_2` + * + * .. note:: + * Use the LinearRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LinearRegressionOutput(default, default) = default + * - LinearRegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L92 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LinearRegressionOutput(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies a logistic function to the input. + * + * The logistic function, also known as the sigmoid function, is computed as + * :math:`\frac{1}{1+exp(-\textbf{x})}`. + * + * Commonly, the sigmoid is used to squash the real-valued output of a linear model + * :math:`wTx+b` into the [0,1] range so that it can be interpreted as a probability. + * It is suitable for binary classification or probability prediction tasks. + * + * .. note:: + * Use the LogisticRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LogisticRegressionOutput(default, default) = default + * - LogisticRegressionOutput(default, csr) = default + * + * The loss function used is the Binary Cross Entropy Loss: + * + * :math:`-{(y\log(p) + (1 - y)\log(1 - p))}` + * + * Where `y` is the ground truth probability of positive outcome for a given example, and `p` the probability predicted by the model. By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L152 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LogisticRegressionOutput(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies a logistic function to the input. + * + * The logistic function, also known as the sigmoid function, is computed as + * :math:`\frac{1}{1+exp(-\textbf{x})}`. + * + * Commonly, the sigmoid is used to squash the real-valued output of a linear model + * :math:`wTx+b` into the [0,1] range so that it can be interpreted as a probability. + * It is suitable for binary classification or probability prediction tasks. + * + * .. note:: + * Use the LogisticRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LogisticRegressionOutput(default, default) = default + * - LogisticRegressionOutput(default, csr) = default + * + * The loss function used is the Binary Cross Entropy Loss: + * + * :math:`-{(y\log(p) + (1 - y)\log(1 - p))}` + * + * Where `y` is the ground truth probability of positive outcome for a given example, and `p` the probability predicted by the model. By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L152 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def LogisticRegressionOutput(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes mean absolute error of the input. + * + * MAE is a risk metric corresponding to the expected value of the absolute error. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the mean absolute error (MAE) estimated over :math:`n` samples is defined as + * + * :math:`\text{MAE}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_1` + * + * .. note:: + * Use the MAERegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - MAERegressionOutput(default, default) = default + * - MAERegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L120 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def MAERegressionOutput(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes mean absolute error of the input. + * + * MAE is a risk metric corresponding to the expected value of the absolute error. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the mean absolute error (MAE) estimated over :math:`n` samples is defined as + * + * :math:`\text{MAE}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_1` + * + * .. note:: + * Use the MAERegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - MAERegressionOutput(default, default) = default + * - MAERegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L120 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def MAERegressionOutput(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = MakeLoss(cross_entropy) + * + * We will need to use ``MakeLoss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * In addition, we can give a scale to the loss by setting ``grad_scale``, + * so that the gradient of the loss will be rescaled in the backpropagation. + * + * .. note:: This operator should be used as a Symbol instead of NDArray. + * + * + * + * Defined in src/operator/make_loss.cc:L71 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def MakeLoss(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = MakeLoss(cross_entropy) + * + * We will need to use ``MakeLoss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * In addition, we can give a scale to the loss by setting ``grad_scale``, + * so that the gradient of the loss will be rescaled in the backpropagation. + * + * .. note:: This operator should be used as a Symbol instead of NDArray. + * + * + * + * Defined in src/operator/make_loss.cc:L71 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def MakeLoss(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Pad(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Pad(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs pooling on the input. + * + * The shapes for 1-D pooling are + * + * - **data** and **out**: *(batch_size, channel, width)* (NCW layout) or + * *(batch_size, width, channel)* (NWC layout), + * + * The shapes for 2-D pooling are + * + * - **data** and **out**: *(batch_size, channel, height, width)* (NCHW layout) or + * *(batch_size, height, width, channel)* (NHWC layout), + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * When ``global_pool`` is set to be true, then global pooling is performed. It will reset + * ``kernel=(height, width)`` and set the appropiate padding to 0. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * - **lp**: Lp pooling + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data and output will have shape *(batch_size, channel, depth, + * height, width)* (NCDHW layout) or *(batch_size, depth, height, width, channel)* (NDHWC layout). + * + * Notes on Lp pooling: + * + * Lp pooling was first introduced by this paper: https://arxiv.org/pdf/1204.3968.pdf. + * L-1 pooling is simply sum pooling, while L-inf pooling is simply max pooling. + * We can see that Lp pooling stands between those two, in practice the most common value for p is 2. + * + * For each window ``X``, the mathematical expression for Lp pooling is: + * + * :math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}` + * + * + * + * Defined in src/operator/nn/pooling.cc:L417 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Pooling(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs pooling on the input. + * + * The shapes for 1-D pooling are + * + * - **data** and **out**: *(batch_size, channel, width)* (NCW layout) or + * *(batch_size, width, channel)* (NWC layout), + * + * The shapes for 2-D pooling are + * + * - **data** and **out**: *(batch_size, channel, height, width)* (NCHW layout) or + * *(batch_size, height, width, channel)* (NHWC layout), + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * When ``global_pool`` is set to be true, then global pooling is performed. It will reset + * ``kernel=(height, width)`` and set the appropiate padding to 0. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * - **lp**: Lp pooling + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data and output will have shape *(batch_size, channel, depth, + * height, width)* (NCDHW layout) or *(batch_size, depth, height, width, channel)* (NDHWC layout). + * + * Notes on Lp pooling: + * + * Lp pooling was first introduced by this paper: https://arxiv.org/pdf/1204.3968.pdf. + * L-1 pooling is simply sum pooling, while L-inf pooling is simply max pooling. + * We can see that Lp pooling stands between those two, in practice the most common value for p is 2. + * + * For each window ``X``, the mathematical expression for Lp pooling is: + * + * :math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}` + * + * + * + * Defined in src/operator/nn/pooling.cc:L417 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Pooling(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * This operator is DEPRECATED. + * Perform pooling on the input. + * + * The shapes for 2-D pooling is + * + * - **data**: *(batch_size, channel, height, width)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*, with:: + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * But ``global_pool`` is set to be true, then do a global pooling, namely reset + * ``kernel=(height, width)``. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * + * 1-D pooling is special case of 2-D pooling with *weight=1* and + * *kernel[1]=1*. + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data will have shape *(batch_size, channel, depth, + * height, width)*. + * + * + * + * Defined in src/operator/pooling_v1.cc:L104 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Pooling_v1(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * This operator is DEPRECATED. + * Perform pooling on the input. + * + * The shapes for 2-D pooling is + * + * - **data**: *(batch_size, channel, height, width)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*, with:: + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * But ``global_pool`` is set to be true, then do a global pooling, namely reset + * ``kernel=(height, width)``. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * + * 1-D pooling is special case of 2-D pooling with *weight=1* and + * *kernel[1]=1*. + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data will have shape *(batch_size, channel, depth, + * height, width)*. + * + * + * + * Defined in src/operator/pooling_v1.cc:L104 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Pooling_v1(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies recurrent layers to input data. Currently, vanilla RNN, LSTM and GRU are + * implemented, with both multi-layer and bidirectional support. + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * **Vanilla RNN** + * + * Applies a single-gate recurrent layer to input X. Two kinds of activation function are supported: + * ReLU and Tanh. + * + * With ReLU activation function: + * + * .. math:: + * h_t = relu(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * With Tanh activtion function: + * + * .. math:: + * h_t = \tanh(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * Reference paper: Finding structure in time - Elman, 1988. + * https://crl.ucsd.edu/~elman/Papers/fsit.pdf + * + * **LSTM** + * + * Long Short-Term Memory - Hochreiter, 1997. http://www.bioinf.jku.at/publications/older/2604.pdf + * + * .. math:: + * \begin{array}{ll} + * i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\ + * f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\ + * g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\ + * o_t = \mathrm{sigmoid}(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ + * c_t = f_t * c_{(t-1)} + i_t * g_t \\ + * h_t = o_t * \tanh(c_t) + * \end{array} + * + * **GRU** + * + * Gated Recurrent Unit - Cho et al. 2014. http://arxiv.org/abs/1406.1078 + * + * The definition of GRU here is slightly different from paper but compatible with CUDNN. + * + * .. math:: + * \begin{array}{ll} + * r_t = \mathrm{sigmoid}(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ + * z_t = \mathrm{sigmoid}(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ + * n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ + * h_t = (1 - z_t) * n_t + z_t * h_{(t-1)} \\ + * \end{array} + * + * + * Defined in src/operator/rnn.cc:L354 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def RNN(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies recurrent layers to input data. Currently, vanilla RNN, LSTM and GRU are + * implemented, with both multi-layer and bidirectional support. + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * **Vanilla RNN** + * + * Applies a single-gate recurrent layer to input X. Two kinds of activation function are supported: + * ReLU and Tanh. + * + * With ReLU activation function: + * + * .. math:: + * h_t = relu(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * With Tanh activtion function: + * + * .. math:: + * h_t = \tanh(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * Reference paper: Finding structure in time - Elman, 1988. + * https://crl.ucsd.edu/~elman/Papers/fsit.pdf + * + * **LSTM** + * + * Long Short-Term Memory - Hochreiter, 1997. http://www.bioinf.jku.at/publications/older/2604.pdf + * + * .. math:: + * \begin{array}{ll} + * i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\ + * f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\ + * g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\ + * o_t = \mathrm{sigmoid}(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ + * c_t = f_t * c_{(t-1)} + i_t * g_t \\ + * h_t = o_t * \tanh(c_t) + * \end{array} + * + * **GRU** + * + * Gated Recurrent Unit - Cho et al. 2014. http://arxiv.org/abs/1406.1078 + * + * The definition of GRU here is slightly different from paper but compatible with CUDNN. + * + * .. math:: + * \begin{array}{ll} + * r_t = \mathrm{sigmoid}(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ + * z_t = \mathrm{sigmoid}(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ + * n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ + * h_t = (1 - z_t) * n_t + z_t * h_{(t-1)} \\ + * \end{array} + * + * + * Defined in src/operator/rnn.cc:L354 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def RNN(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs region of interest(ROI) pooling on the input array. + * + * ROI pooling is a variant of a max pooling layer, in which the output size is fixed and + * region of interest is a parameter. Its purpose is to perform max pooling on the inputs + * of non-uniform sizes to obtain fixed-size feature maps. ROI pooling is a neural-net + * layer mostly used in training a `Fast R-CNN` network for object detection. + * + * This operator takes a 4D feature map as an input array and region proposals as `rois`, + * then it pools over sub-regions of input and produces a fixed-sized output array + * regardless of the ROI size. + * + * To crop the feature map accordingly, you can resize the bounding box coordinates + * by changing the parameters `rois` and `spatial_scale`. + * + * The cropped feature maps are pooled by standard max pooling operation to a fixed size output + * indicated by a `pooled_size` parameter. batch_size will change to the number of region + * bounding boxes after `ROIPooling`. + * + * The size of each region of interest doesn't have to be perfectly divisible by + * the number of pooling sections(`pooled_size`). + * + * Example:: + * + * x = `[ [`[ [ 0., 1., 2., 3., 4., 5.], + * [ 6., 7., 8., 9., 10., 11.], + * [ 12., 13., 14., 15., 16., 17.], + * [ 18., 19., 20., 21., 22., 23.], + * [ 24., 25., 26., 27., 28., 29.], + * [ 30., 31., 32., 33., 34., 35.], + * [ 36., 37., 38., 39., 40., 41.], + * [ 42., 43., 44., 45., 46., 47.] ] ] ] + * + * // region of interest i.e. bounding box coordinates. + * y = `[ [0,0,0,4,4] ] + * + * // returns array of shape (2,2) according to the given roi with max pooling. + * ROIPooling(x, y, (2,2), 1.0) = `[ [`[ [ 14., 16.], + * [ 26., 28.] ] ] ] + * + * // region of interest is changed due to the change in `spacial_scale` parameter. + * ROIPooling(x, y, (2,2), 0.7) = `[ [`[ [ 7., 9.], + * [ 19., 21.] ] ] ] + * + * + * + * Defined in src/operator/roi_pooling.cc:L225 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ROIPooling(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs region of interest(ROI) pooling on the input array. + * + * ROI pooling is a variant of a max pooling layer, in which the output size is fixed and + * region of interest is a parameter. Its purpose is to perform max pooling on the inputs + * of non-uniform sizes to obtain fixed-size feature maps. ROI pooling is a neural-net + * layer mostly used in training a `Fast R-CNN` network for object detection. + * + * This operator takes a 4D feature map as an input array and region proposals as `rois`, + * then it pools over sub-regions of input and produces a fixed-sized output array + * regardless of the ROI size. + * + * To crop the feature map accordingly, you can resize the bounding box coordinates + * by changing the parameters `rois` and `spatial_scale`. + * + * The cropped feature maps are pooled by standard max pooling operation to a fixed size output + * indicated by a `pooled_size` parameter. batch_size will change to the number of region + * bounding boxes after `ROIPooling`. + * + * The size of each region of interest doesn't have to be perfectly divisible by + * the number of pooling sections(`pooled_size`). + * + * Example:: + * + * x = `[ [`[ [ 0., 1., 2., 3., 4., 5.], + * [ 6., 7., 8., 9., 10., 11.], + * [ 12., 13., 14., 15., 16., 17.], + * [ 18., 19., 20., 21., 22., 23.], + * [ 24., 25., 26., 27., 28., 29.], + * [ 30., 31., 32., 33., 34., 35.], + * [ 36., 37., 38., 39., 40., 41.], + * [ 42., 43., 44., 45., 46., 47.] ] ] ] + * + * // region of interest i.e. bounding box coordinates. + * y = `[ [0,0,0,4,4] ] + * + * // returns array of shape (2,2) according to the given roi with max pooling. + * ROIPooling(x, y, (2,2), 1.0) = `[ [`[ [ 14., 16.], + * [ 26., 28.] ] ] ] + * + * // region of interest is changed due to the change in `spacial_scale` parameter. + * ROIPooling(x, y, (2,2), 0.7) = `[ [`[ [ 7., 9.], + * [ 19., 21.] ] ] ] + * + * + * + * Defined in src/operator/roi_pooling.cc:L225 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ROIPooling(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Reshape(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Reshape(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes support vector machine based transformation of the input. + * + * This tutorial demonstrates using SVM as output layer for classification instead of softmax: + * https://github.com/dmlc/mxnet/tree/master/example/svm_mnist. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SVMOutput(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes support vector machine based transformation of the input. + * + * This tutorial demonstrates using SVM as output layer for classification instead of softmax: + * https://github.com/dmlc/mxnet/tree/master/example/svm_mnist. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SVMOutput(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Takes the last element of a sequence. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns a (n-1)-dimensional array + * of the form [batch_size, other_feature_dims]. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` should be + * an input array of positive ints of dimension [batch_size]. To use this parameter, + * set `use_sequence_length` to `True`, otherwise each example in the batch is assumed + * to have the max sequence length. + * + * .. note:: Alternatively, you can also use `take` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ], + * + * `[ [ 10., 11., 12.], + * [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] ] + * + * // returns last sequence when sequence_length parameter is not used + * SequenceLast(x) = `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,1,1], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,2,3], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 13., 14., 15.], + * [ 25., 26., 27.] ] + * + * + * + * Defined in src/operator/sequence_last.cc:L106 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SequenceLast(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Takes the last element of a sequence. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns a (n-1)-dimensional array + * of the form [batch_size, other_feature_dims]. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` should be + * an input array of positive ints of dimension [batch_size]. To use this parameter, + * set `use_sequence_length` to `True`, otherwise each example in the batch is assumed + * to have the max sequence length. + * + * .. note:: Alternatively, you can also use `take` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ], + * + * `[ [ 10., 11., 12.], + * [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] ] + * + * // returns last sequence when sequence_length parameter is not used + * SequenceLast(x) = `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,1,1], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,2,3], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 13., 14., 15.], + * [ 25., 26., 27.] ] + * + * + * + * Defined in src/operator/sequence_last.cc:L106 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SequenceLast(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Sets all elements outside the sequence to a constant value. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` + * should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length and + * this operator works as the `identity` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // works as identity operator when sequence_length parameter is not used + * SequenceMask(x) = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [1,1] means 1 of each batch will be kept + * // and other rows are masked with default mask value = 0 + * SequenceMask(x, sequence_length=[1,1], use_sequence_length=True) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] ] + * + * // sequence_length [2,3] means 2 of batch B1 and 3 of batch B2 will be kept + * // and other rows are masked with value = 1 + * SequenceMask(x, sequence_length=[2,3], use_sequence_length=True, value=1) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 1., 1.], + * [ 16., 17., 18.] ] ] + * + * + * + * Defined in src/operator/sequence_mask.cc:L186 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SequenceMask(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Sets all elements outside the sequence to a constant value. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` + * should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length and + * this operator works as the `identity` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // works as identity operator when sequence_length parameter is not used + * SequenceMask(x) = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [1,1] means 1 of each batch will be kept + * // and other rows are masked with default mask value = 0 + * SequenceMask(x, sequence_length=[1,1], use_sequence_length=True) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] ] + * + * // sequence_length [2,3] means 2 of batch B1 and 3 of batch B2 will be kept + * // and other rows are masked with value = 1 + * SequenceMask(x, sequence_length=[2,3], use_sequence_length=True, value=1) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 1., 1.], + * [ 16., 17., 18.] ] ] + * + * + * + * Defined in src/operator/sequence_mask.cc:L186 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SequenceMask(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reverses the elements of each sequence. + * + * This function takes an n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] + * and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. + * `sequence_length` should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // returns reverse sequence when sequence_length parameter is not used + * SequenceReverse(x) = `[ `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ] ] + * + * // sequence_length [2,2] means 2 rows of + * // both batch B1 and B2 will be reversed. + * SequenceReverse(x, sequence_length=[2,2], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [2,3] means 2 of batch B2 and 3 of batch B3 + * // will be reversed. + * SequenceReverse(x, sequence_length=[2,3], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 16., 17., 18.] ], + * + * `[ [ 1., 2., 3.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14, 15.], + * [ 4., 5., 6.] ] ] + * + * + * + * Defined in src/operator/sequence_reverse.cc:L122 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SequenceReverse(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reverses the elements of each sequence. + * + * This function takes an n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] + * and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. + * `sequence_length` should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // returns reverse sequence when sequence_length parameter is not used + * SequenceReverse(x) = `[ `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ] ] + * + * // sequence_length [2,2] means 2 rows of + * // both batch B1 and B2 will be reversed. + * SequenceReverse(x, sequence_length=[2,2], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [2,3] means 2 of batch B2 and 3 of batch B3 + * // will be reversed. + * SequenceReverse(x, sequence_length=[2,3], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 16., 17., 18.] ], + * + * `[ [ 1., 2., 3.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14, 15.], + * [ 4., 5., 6.] ] ] + * + * + * + * Defined in src/operator/sequence_reverse.cc:L122 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SequenceReverse(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SliceChannel(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SliceChannel(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Softmax(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def Softmax(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies softmax activation to input. This is intended for internal layers. + * + * .. note:: + * + * This operator has been deprecated, please use `softmax`. + * + * If `mode` = ``instance``, this operator will compute a softmax for each instance in the batch. + * This is the default mode. + * + * If `mode` = ``channel``, this operator will compute a k-class softmax at each position + * of each instance, where `k` = ``num_channel``. This mode can only be used when the input array + * has at least 3 dimensions. + * This can be used for `fully convolutional network`, `image segmentation`, etc. + * + * Example:: + * + * >>> input_array = mx.nd.array(`[ [3., 0.5, -0.5, 2., 7.], + * >>> [2., -.4, 7., 3., 0.2] ]) + * >>> softmax_act = mx.nd.SoftmaxActivation(input_array) + * >>> print softmax_act.asnumpy() + * `[ [ 1.78322066e-02 1.46375655e-03 5.38485940e-04 6.56010211e-03 9.73605454e-01] + * [ 6.56221947e-03 5.95310994e-04 9.73919690e-01 1.78379621e-02 1.08472735e-03] ] + * + * + * + * Defined in src/operator/nn/softmax_activation.cc:L59 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SoftmaxActivation(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies softmax activation to input. This is intended for internal layers. + * + * .. note:: + * + * This operator has been deprecated, please use `softmax`. + * + * If `mode` = ``instance``, this operator will compute a softmax for each instance in the batch. + * This is the default mode. + * + * If `mode` = ``channel``, this operator will compute a k-class softmax at each position + * of each instance, where `k` = ``num_channel``. This mode can only be used when the input array + * has at least 3 dimensions. + * This can be used for `fully convolutional network`, `image segmentation`, etc. + * + * Example:: + * + * >>> input_array = mx.nd.array(`[ [3., 0.5, -0.5, 2., 7.], + * >>> [2., -.4, 7., 3., 0.2] ]) + * >>> softmax_act = mx.nd.SoftmaxActivation(input_array) + * >>> print softmax_act.asnumpy() + * `[ [ 1.78322066e-02 1.46375655e-03 5.38485940e-04 6.56010211e-03 9.73605454e-01] + * [ 6.56221947e-03 5.95310994e-04 9.73919690e-01 1.78379621e-02 1.08472735e-03] ] + * + * + * + * Defined in src/operator/nn/softmax_activation.cc:L59 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SoftmaxActivation(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SoftmaxOutput(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SoftmaxOutput(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies a spatial transformer to input feature map. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SpatialTransformer(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies a spatial transformer to input feature map. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SpatialTransformer(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SwapAxis(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def SwapAxis(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Upsamples the given input data. + * + * Two algorithms (``sample_type``) are available for upsampling: + * + * - Nearest Neighbor + * - Bilinear + * + * **Nearest Neighbor Upsampling** + * + * Input data is expected to be NCHW. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * UpSampling(x, scale=2, sample_type='nearest') = `[ [`[ [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] ] ] ] + * + * **Bilinear Upsampling** + * + * Uses `deconvolution` algorithm under the hood. You need provide both input data and the kernel. + * + * Input data is expected to be NCHW. + * + * `num_filter` is expected to be same as the number of channels. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * w = `[ [`[ [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] ] ] ] + * + * UpSampling(x, w, scale=2, sample_type='bilinear', num_filter=1) = `[ [`[ [1. 2. 2. 2. 2. 1.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [1. 2. 2. 2. 2. 1.] ] ] ] + * + * + * Defined in src/operator/nn/upsampling.cc:L173 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def UpSampling(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Upsamples the given input data. + * + * Two algorithms (``sample_type``) are available for upsampling: + * + * - Nearest Neighbor + * - Bilinear + * + * **Nearest Neighbor Upsampling** + * + * Input data is expected to be NCHW. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * UpSampling(x, scale=2, sample_type='nearest') = `[ [`[ [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] ] ] ] + * + * **Bilinear Upsampling** + * + * Uses `deconvolution` algorithm under the hood. You need provide both input data and the kernel. + * + * Input data is expected to be NCHW. + * + * `num_filter` is expected to be same as the number of channels. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * w = `[ [`[ [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] ] ] ] + * + * UpSampling(x, w, scale=2, sample_type='bilinear', num_filter=1) = `[ [`[ [1. 2. 2. 2. 2. 1.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [1. 2. 2. 2. 2. 1.] ] ] ] + * + * + * Defined in src/operator/nn/upsampling.cc:L173 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def UpSampling(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise absolute value of the input. + * + * Example:: + * + * abs([-2, 0, 3]) = [2, 0, 3] + * + * The storage type of ``abs`` output depends upon the input storage type: + * + * - abs(default) = default + * - abs(row_sparse) = row_sparse + * - abs(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L721 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def abs(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise absolute value of the input. + * + * Example:: + * + * abs([-2, 0, 3]) = [2, 0, 3] + * + * The storage type of ``abs`` output depends upon the input storage type: + * + * - abs(default) = default + * - abs(row_sparse) = row_sparse + * - abs(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L721 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def abs(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Adam optimizer. Adam is seen as a generalization + * of AdaGrad. + * + * Adam update consists of the following steps, where g represents gradient and m, v + * are 1st and 2nd order moment estimates (mean and variance). + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * W_t = W_{t-1} - \alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon } + * + * It updates the weights using:: + * + * m = beta1*m + (1-beta1)*grad + * v = beta2*v + (1-beta2)*(grad**2) + * w += - learning_rate * m / (sqrt(v) + epsilon) + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and the storage + * type of weight is the same as those of m and v, + * only the row slices whose indices appear in grad.indices are updated (for w, m and v):: + * + * for row in grad.indices: + * m[row] = beta1*m[row] + (1-beta1)*grad[row] + * v[row] = beta2*v[row] + (1-beta2)*(grad[row]**2) + * w[row] += - learning_rate * m[row] / (sqrt(v[row]) + epsilon) + * + * + * + * Defined in src/operator/optimizer_op.cc:L688 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def adam_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Adam optimizer. Adam is seen as a generalization + * of AdaGrad. + * + * Adam update consists of the following steps, where g represents gradient and m, v + * are 1st and 2nd order moment estimates (mean and variance). + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * W_t = W_{t-1} - \alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon } + * + * It updates the weights using:: + * + * m = beta1*m + (1-beta1)*grad + * v = beta2*v + (1-beta2)*(grad**2) + * w += - learning_rate * m / (sqrt(v) + epsilon) + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and the storage + * type of weight is the same as those of m and v, + * only the row slices whose indices appear in grad.indices are updated (for w, m and v):: + * + * for row in grad.indices: + * m[row] = beta1*m[row] + (1-beta1)*grad[row] + * v[row] = beta2*v[row] + (1-beta2)*(grad[row]**2) + * w[row] += - learning_rate * m[row] / (sqrt(v[row]) + epsilon) + * + * + * + * Defined in src/operator/optimizer_op.cc:L688 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def adam_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def add_n(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def add_n(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Check if all the float numbers in the array are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L101 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def all_finite(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Check if all the float numbers in the array are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L101 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def all_finite(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Cast function between low precision float/FP32 used by AMP. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * Defined in src/operator/tensor/amp_cast.cc:L37 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def amp_cast(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Cast function between low precision float/FP32 used by AMP. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * Defined in src/operator/tensor/amp_cast.cc:L37 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def amp_cast(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Cast function used by AMP, that casts its inputs to the common widest type. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * + * Defined in src/operator/tensor/amp_cast.cc:L71 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def amp_multicast(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Cast function used by AMP, that casts its inputs to the common widest type. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * + * Defined in src/operator/tensor/amp_cast.cc:L71 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def amp_multicast(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse cosine of the input array. + * + * The input should be in range `[-1, 1]`. + * The output is in the closed interval :math:`[0, \pi]` + * + * .. math:: + * arccos([-1, -.707, 0, .707, 1]) = [\pi, 3\pi/4, \pi/2, \pi/4, 0] + * + * The storage type of ``arccos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L206 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arccos(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse cosine of the input array. + * + * The input should be in range `[-1, 1]`. + * The output is in the closed interval :math:`[0, \pi]` + * + * .. math:: + * arccos([-1, -.707, 0, .707, 1]) = [\pi, 3\pi/4, \pi/2, \pi/4, 0] + * + * The storage type of ``arccos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L206 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arccos(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic cosine of the input array, \ + * computed element-wise. + * + * The storage type of ``arccosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L474 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arccosh(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic cosine of the input array, \ + * computed element-wise. + * + * The storage type of ``arccosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L474 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arccosh(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse sine of the input array. + * + * The input should be in the range `[-1, 1]`. + * The output is in the closed interval of [:math:`-\pi/2`, :math:`\pi/2`]. + * + * .. math:: + * arcsin([-1, -.707, 0, .707, 1]) = [-\pi/2, -\pi/4, 0, \pi/4, \pi/2] + * + * The storage type of ``arcsin`` output depends upon the input storage type: + * + * - arcsin(default) = default + * - arcsin(row_sparse) = row_sparse + * - arcsin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L187 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arcsin(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse sine of the input array. + * + * The input should be in the range `[-1, 1]`. + * The output is in the closed interval of [:math:`-\pi/2`, :math:`\pi/2`]. + * + * .. math:: + * arcsin([-1, -.707, 0, .707, 1]) = [-\pi/2, -\pi/4, 0, \pi/4, \pi/2] + * + * The storage type of ``arcsin`` output depends upon the input storage type: + * + * - arcsin(default) = default + * - arcsin(row_sparse) = row_sparse + * - arcsin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L187 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arcsin(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic sine of the input array, \ + * computed element-wise. + * + * The storage type of ``arcsinh`` output depends upon the input storage type: + * + * - arcsinh(default) = default + * - arcsinh(row_sparse) = row_sparse + * - arcsinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L436 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arcsinh(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic sine of the input array, \ + * computed element-wise. + * + * The storage type of ``arcsinh`` output depends upon the input storage type: + * + * - arcsinh(default) = default + * - arcsinh(row_sparse) = row_sparse + * - arcsinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L436 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arcsinh(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse tangent of the input array. + * + * The output is in the closed interval :math:`[-\pi/2, \pi/2]` + * + * .. math:: + * arctan([-1, 0, 1]) = [-\pi/4, 0, \pi/4] + * + * The storage type of ``arctan`` output depends upon the input storage type: + * + * - arctan(default) = default + * - arctan(row_sparse) = row_sparse + * - arctan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L227 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arctan(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse tangent of the input array. + * + * The output is in the closed interval :math:`[-\pi/2, \pi/2]` + * + * .. math:: + * arctan([-1, 0, 1]) = [-\pi/4, 0, \pi/4] + * + * The storage type of ``arctan`` output depends upon the input storage type: + * + * - arctan(default) = default + * - arctan(row_sparse) = row_sparse + * - arctan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L227 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arctan(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic tangent of the input array, \ + * computed element-wise. + * + * The storage type of ``arctanh`` output depends upon the input storage type: + * + * - arctanh(default) = default + * - arctanh(row_sparse) = row_sparse + * - arctanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L515 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arctanh(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic tangent of the input array, \ + * computed element-wise. + * + * The storage type of ``arctanh`` output depends upon the input storage type: + * + * - arctanh(default) = default + * - arctanh(row_sparse) = row_sparse + * - arctanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L515 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def arctanh(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns indices of the maximum values along an axis. + * + * In the case of multiple occurrences of maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmax along axis 0 + * argmax(x, axis=0) = [ 1., 1., 1.] + * + * // argmax along axis 1 + * argmax(x, axis=1) = [ 2., 2.] + * + * // argmax along axis 1 keeping same dims as an input array + * argmax(x, axis=1, keepdims=True) = `[ [ 2.], + * [ 2.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L52 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def argmax(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns indices of the maximum values along an axis. + * + * In the case of multiple occurrences of maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmax along axis 0 + * argmax(x, axis=0) = [ 1., 1., 1.] + * + * // argmax along axis 1 + * argmax(x, axis=1) = [ 2., 2.] + * + * // argmax along axis 1 keeping same dims as an input array + * argmax(x, axis=1, keepdims=True) = `[ [ 2.], + * [ 2.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L52 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def argmax(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns argmax indices of each channel from the input array. + * + * The result will be an NDArray of shape (num_channel,). + * + * In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * argmax_channel(x) = [ 2., 2.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L97 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def argmax_channel(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns argmax indices of each channel from the input array. + * + * The result will be an NDArray of shape (num_channel,). + * + * In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * argmax_channel(x) = [ 2., 2.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L97 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def argmax_channel(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns indices of the minimum values along an axis. + * + * In the case of multiple occurrences of minimum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmin along axis 0 + * argmin(x, axis=0) = [ 0., 0., 0.] + * + * // argmin along axis 1 + * argmin(x, axis=1) = [ 0., 0.] + * + * // argmin along axis 1 keeping same dims as an input array + * argmin(x, axis=1, keepdims=True) = `[ [ 0.], + * [ 0.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L77 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def argmin(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns indices of the minimum values along an axis. + * + * In the case of multiple occurrences of minimum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmin along axis 0 + * argmin(x, axis=0) = [ 0., 0., 0.] + * + * // argmin along axis 1 + * argmin(x, axis=1) = [ 0., 0.] + * + * // argmin along axis 1 keeping same dims as an input array + * argmin(x, axis=1, keepdims=True) = `[ [ 0.], + * [ 0.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L77 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def argmin(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the indices that would sort an input array along the given axis. + * + * This function performs sorting along the given axis and returns an array of indices having same shape + * as an input array that index data in sorted order. + * + * Examples:: + * + * x = `[ [ 0.3, 0.2, 0.4], + * [ 0.1, 0.3, 0.2] ] + * + * // sort along axis -1 + * argsort(x) = `[ [ 1., 0., 2.], + * [ 0., 2., 1.] ] + * + * // sort along axis 0 + * argsort(x, axis=0) = `[ [ 1., 0., 1.] + * [ 0., 1., 0.] ] + * + * // flatten and then sort + * argsort(x, axis=None) = [ 3., 1., 5., 0., 4., 2.] + * + * + * Defined in src/operator/tensor/ordering_op.cc:L183 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def argsort(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the indices that would sort an input array along the given axis. + * + * This function performs sorting along the given axis and returns an array of indices having same shape + * as an input array that index data in sorted order. + * + * Examples:: + * + * x = `[ [ 0.3, 0.2, 0.4], + * [ 0.1, 0.3, 0.2] ] + * + * // sort along axis -1 + * argsort(x) = `[ [ 1., 0., 2.], + * [ 0., 2., 1.] ] + * + * // sort along axis 0 + * argsort(x, axis=0) = `[ [ 1., 0., 1.] + * [ 0., 1., 0.] ] + * + * // flatten and then sort + * argsort(x, axis=None) = [ 3., 1., 5., 0., 4., 2.] + * + * + * Defined in src/operator/tensor/ordering_op.cc:L183 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def argsort(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Batchwise dot product. + * + * ``batch_dot`` is used to compute dot product of ``x`` and ``y`` when ``x`` and + * ``y`` are data in batch, namely N-D (N >= 3) arrays in shape of `(B0, ..., B_i, :, :)`. + * + * For example, given ``x`` with shape `(B_0, ..., B_i, N, M)` and ``y`` with shape + * `(B_0, ..., B_i, M, K)`, the result array will have shape `(B_0, ..., B_i, N, K)`, + * which is computed by:: + * + * batch_dot(x,y)[b_0, ..., b_i, :, :] = dot(x[b_0, ..., b_i, :, :], y[b_0, ..., b_i, :, :]) + * + * + * + * Defined in src/operator/tensor/dot.cc:L127 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def batch_dot(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Batchwise dot product. + * + * ``batch_dot`` is used to compute dot product of ``x`` and ``y`` when ``x`` and + * ``y`` are data in batch, namely N-D (N >= 3) arrays in shape of `(B0, ..., B_i, :, :)`. + * + * For example, given ``x`` with shape `(B_0, ..., B_i, N, M)` and ``y`` with shape + * `(B_0, ..., B_i, M, K)`, the result array will have shape `(B_0, ..., B_i, N, K)`, + * which is computed by:: + * + * batch_dot(x,y)[b_0, ..., b_i, :, :] = dot(x[b_0, ..., b_i, :, :], y[b_0, ..., b_i, :, :]) + * + * + * + * Defined in src/operator/tensor/dot.cc:L127 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def batch_dot(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Takes elements from a data batch. + * + * .. note:: + * `batch_take` is deprecated. Use `pick` instead. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // takes elements with specified indices + * batch_take(x, [0,1,0]) = [ 1. 4. 5.] + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L777 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def batch_take(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Takes elements from a data batch. + * + * .. note:: + * `batch_take` is deprecated. Use `pick` instead. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // takes elements with specified indices + * batch_take(x, [0,1,0]) = [ 1. 4. 5.] + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L777 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def batch_take(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_add(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_add(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_axes(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_axes(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_axis(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_axis(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise division of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 6., 6., 6.], + * [ 6., 6., 6.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_div(x, y) = `[ [ 3., 3., 3.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_div(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L187 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_div(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise division of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 6., 6., 6.], + * [ 6., 6., 6.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_div(x, y) = `[ [ 3., 3., 3.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_div(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L187 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_div(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L46 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_equal(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L46 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_equal(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **greater than** (>) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L82 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_greater(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **greater than** (>) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L82 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_greater(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater_equal(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L100 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_greater_equal(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater_equal(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L100 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_greater_equal(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the hypotenuse of a right angled triangle, given its "legs" + * with broadcasting. + * + * It is equivalent to doing :math:`sqrt(x_1^2 + x_2^2)`. + * + * Example:: + * + * x = `[ [ 3., 3., 3.] ] + * + * y = `[ [ 4.], + * [ 4.] ] + * + * broadcast_hypot(x, y) = `[ [ 5., 5., 5.], + * [ 5., 5., 5.] ] + * + * z = `[ [ 0.], + * [ 4.] ] + * + * broadcast_hypot(x, z) = `[ [ 3., 3., 3.], + * [ 5., 5., 5.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L158 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_hypot(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the hypotenuse of a right angled triangle, given its "legs" + * with broadcasting. + * + * It is equivalent to doing :math:`sqrt(x_1^2 + x_2^2)`. + * + * Example:: + * + * x = `[ [ 3., 3., 3.] ] + * + * y = `[ [ 4.], + * [ 4.] ] + * + * broadcast_hypot(x, y) = `[ [ 5., 5., 5.], + * [ 5., 5., 5.] ] + * + * z = `[ [ 0.], + * [ 4.] ] + * + * broadcast_hypot(x, z) = `[ [ 3., 3., 3.], + * [ 5., 5., 5.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L158 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_hypot(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than** (<) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser(x, y) = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L118 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_lesser(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than** (<) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser(x, y) = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L118 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_lesser(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than or equal to** (<=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L136 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_lesser_equal(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than or equal to** (<=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L136 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_lesser_equal(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Broadcasts lhs to have the same shape as rhs. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_like(`[ [1,2,3] ], `[ [5,6,7],[7,8,9] ]) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * broadcast_like([9], [1,2,3,4,5], lhs_axes=(0,), rhs_axes=(-1,)) = [9,9,9,9,9] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L135 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_like(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Broadcasts lhs to have the same shape as rhs. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_like(`[ [1,2,3] ], `[ [5,6,7],[7,8,9] ]) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * broadcast_like([9], [1,2,3,4,5], lhs_axes=(0,), rhs_axes=(-1,)) = [9,9,9,9,9] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L135 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_like(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **logical and** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_logical_and(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L154 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_logical_and(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **logical and** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_logical_and(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L154 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_logical_and(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **logical or** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_or(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L172 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_logical_or(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **logical or** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_or(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L172 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_logical_or(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **logical xor** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_xor(x, y) = `[ [ 0., 0., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L190 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_logical_xor(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **logical xor** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_xor(x, y) = `[ [ 0., 0., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L190 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_logical_xor(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise maximum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise maxima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L81 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_maximum(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise maximum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise maxima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L81 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_maximum(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise minimum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise minima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L117 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_minimum(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise minimum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise minima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L117 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_minimum(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_minus(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_minus(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise modulo of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 8., 8., 8.], + * [ 8., 8., 8.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_mod(x, y) = `[ [ 0., 0., 0.], + * [ 2., 2., 2.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L222 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_mod(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise modulo of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 8., 8., 8.], + * [ 8., 8., 8.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_mod(x, y) = `[ [ 0., 0., 0.], + * [ 2., 2., 2.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L222 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_mod(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise product of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_mul(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * Supported sparse operations: + * + * broadcast_mul(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L146 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_mul(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise product of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_mul(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * Supported sparse operations: + * + * broadcast_mul(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L146 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_mul(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **not equal to** (!=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_not_equal(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L64 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_not_equal(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of element-wise **not equal to** (!=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_not_equal(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L64 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_not_equal(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_plus(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_plus(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns result of first array elements raised to powers from second array, element-wise with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_power(x, y) = `[ [ 2., 2., 2.], + * [ 4., 4., 4.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L45 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_power(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns result of first array elements raised to powers from second array, element-wise with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_power(x, y) = `[ [ 2., 2., 2.], + * [ 4., 4., 4.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L45 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_power(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_sub(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_sub(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Broadcasts the input array to a new shape. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_to(`[ [1,2,3] ], shape=(2,3)) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * The dimension which you do not want to change can also be kept as `0` which means copy the original value. + * So with `shape=(2,0)`, we will obtain the same result as in the above example. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L82 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_to(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Broadcasts the input array to a new shape. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_to(`[ [1,2,3] ], shape=(2,3)) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * The dimension which you do not want to change can also be kept as `0` which means copy the original value. + * So with `shape=(2,0)`, we will obtain the same result as in the above example. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L82 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def broadcast_to(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cast(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cast(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Casts tensor storage type to the new type. + * + * When an NDArray with default storage type is cast to csr or row_sparse storage, + * the result is compact, which means: + * + * - for csr, zero values will not be retained + * - for row_sparse, row slices of all zeros will not be retained + * + * The storage type of ``cast_storage`` output depends on stype parameter: + * + * - cast_storage(csr, 'default') = default + * - cast_storage(row_sparse, 'default') = default + * - cast_storage(default, 'csr') = csr + * - cast_storage(default, 'row_sparse') = row_sparse + * - cast_storage(csr, 'csr') = csr + * - cast_storage(row_sparse, 'row_sparse') = row_sparse + * + * Example:: + * + * dense = `[ [ 0., 1., 0.], + * [ 2., 0., 3.], + * [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * # cast to row_sparse storage type + * rsp = cast_storage(dense, 'row_sparse') + * rsp.indices = [0, 1] + * rsp.values = `[ [ 0., 1., 0.], + * [ 2., 0., 3.] ] + * + * # cast to csr storage type + * csr = cast_storage(dense, 'csr') + * csr.indices = [1, 0, 2] + * csr.values = [ 1., 2., 3.] + * csr.indptr = [0, 1, 3, 3, 3] + * + * + * + * Defined in src/operator/tensor/cast_storage.cc:L71 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cast_storage(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Casts tensor storage type to the new type. + * + * When an NDArray with default storage type is cast to csr or row_sparse storage, + * the result is compact, which means: + * + * - for csr, zero values will not be retained + * - for row_sparse, row slices of all zeros will not be retained + * + * The storage type of ``cast_storage`` output depends on stype parameter: + * + * - cast_storage(csr, 'default') = default + * - cast_storage(row_sparse, 'default') = default + * - cast_storage(default, 'csr') = csr + * - cast_storage(default, 'row_sparse') = row_sparse + * - cast_storage(csr, 'csr') = csr + * - cast_storage(row_sparse, 'row_sparse') = row_sparse + * + * Example:: + * + * dense = `[ [ 0., 1., 0.], + * [ 2., 0., 3.], + * [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * # cast to row_sparse storage type + * rsp = cast_storage(dense, 'row_sparse') + * rsp.indices = [0, 1] + * rsp.values = `[ [ 0., 1., 0.], + * [ 2., 0., 3.] ] + * + * # cast to csr storage type + * csr = cast_storage(dense, 'csr') + * csr.indices = [1, 0, 2] + * csr.values = [ 1., 2., 3.] + * csr.indptr = [0, 1, 3, 3, 3] + * + * + * + * Defined in src/operator/tensor/cast_storage.cc:L71 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cast_storage(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise cube-root value of the input. + * + * .. math:: + * cbrt(x) = \sqrt[3]{x} + * + * Example:: + * + * cbrt([1, 8, -125]) = [1, 2, -5] + * + * The storage type of ``cbrt`` output depends upon the input storage type: + * + * - cbrt(default) = default + * - cbrt(row_sparse) = row_sparse + * - cbrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L216 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cbrt(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise cube-root value of the input. + * + * .. math:: + * cbrt(x) = \sqrt[3]{x} + * + * Example:: + * + * cbrt([1, 8, -125]) = [1, 2, -5] + * + * The storage type of ``cbrt`` output depends upon the input storage type: + * + * - cbrt(default) = default + * - cbrt(row_sparse) = row_sparse + * - cbrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L216 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cbrt(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise ceiling of the input. + * + * The ceil of the scalar x is the smallest integer i, such that i >= x. + * + * Example:: + * + * ceil([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 2., 2., 3.] + * + * The storage type of ``ceil`` output depends upon the input storage type: + * + * - ceil(default) = default + * - ceil(row_sparse) = row_sparse + * - ceil(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L818 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ceil(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise ceiling of the input. + * + * The ceil of the scalar x is the smallest integer i, such that i >= x. + * + * Example:: + * + * ceil([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 2., 2., 3.] + * + * The storage type of ``ceil`` output depends upon the input storage type: + * + * - ceil(default) = default + * - ceil(row_sparse) = row_sparse + * - ceil(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L818 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ceil(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def choose_element_0index(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def choose_element_0index(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Clips (limits) the values in an array. + * Given an interval, values outside the interval are clipped to the interval edges. + * Clipping ``x`` between `a_min` and `a_max` would be:: + * .. math:: + * clip(x, a_min, a_max) = \max(\min(x, a_max), a_min)) + * Example:: + * x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + * clip(x,1,8) = [ 1., 1., 2., 3., 4., 5., 6., 7., 8., 8.] + * The storage type of ``clip`` output depends on storage types of inputs and the a_min, a_max \ + * parameter values: + * - clip(default) = default + * - clip(row_sparse, a_min <= 0, a_max >= 0) = row_sparse + * - clip(csr, a_min <= 0, a_max >= 0) = csr + * - clip(row_sparse, a_min < 0, a_max < 0) = default + * - clip(row_sparse, a_min > 0, a_max > 0) = default + * - clip(csr, a_min < 0, a_max < 0) = csr + * - clip(csr, a_min > 0, a_max > 0) = csr + * + * + * Defined in src/operator/tensor/matrix_op.cc:L677 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def clip(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Clips (limits) the values in an array. + * Given an interval, values outside the interval are clipped to the interval edges. + * Clipping ``x`` between `a_min` and `a_max` would be:: + * .. math:: + * clip(x, a_min, a_max) = \max(\min(x, a_max), a_min)) + * Example:: + * x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + * clip(x,1,8) = [ 1., 1., 2., 3., 4., 5., 6., 7., 8., 8.] + * The storage type of ``clip`` output depends on storage types of inputs and the a_min, a_max \ + * parameter values: + * - clip(default) = default + * - clip(row_sparse, a_min <= 0, a_max >= 0) = row_sparse + * - clip(csr, a_min <= 0, a_max >= 0) = csr + * - clip(row_sparse, a_min < 0, a_max < 0) = default + * - clip(row_sparse, a_min > 0, a_max > 0) = default + * - clip(csr, a_min < 0, a_max < 0) = csr + * - clip(csr, a_min > 0, a_max > 0) = csr + * + * + * Defined in src/operator/tensor/matrix_op.cc:L677 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def clip(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def concat(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def concat(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the element-wise cosine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * cos([0, \pi/4, \pi/2]) = [1, 0.707, 0] + * + * The storage type of ``cos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L90 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cos(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the element-wise cosine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * cos([0, \pi/4, \pi/2]) = [1, 0.707, 0] + * + * The storage type of ``cos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L90 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cos(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the hyperbolic cosine of the input array, computed element-wise. + * + * .. math:: + * cosh(x) = 0.5\times(exp(x) + exp(-x)) + * + * The storage type of ``cosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L351 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cosh(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the hyperbolic cosine of the input array, computed element-wise. + * + * .. math:: + * cosh(x) = 0.5\times(exp(x) + exp(-x)) + * + * The storage type of ``cosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L351 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cosh(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def crop(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def crop(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ctc_loss(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ctc_loss(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Return the cumulative sum of the elements along a given axis. + * + * Defined in src/operator/numpy/np_cumsum.cc:L70 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cumsum(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Return the cumulative sum of the elements along a given axis. + * + * Defined in src/operator/numpy/np_cumsum.cc:L70 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def cumsum(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Converts each element of the input array from radians to degrees. + * + * .. math:: + * degrees([0, \pi/2, \pi, 3\pi/2, 2\pi]) = [0, 90, 180, 270, 360] + * + * The storage type of ``degrees`` output depends upon the input storage type: + * + * - degrees(default) = default + * - degrees(row_sparse) = row_sparse + * - degrees(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L274 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def degrees(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Converts each element of the input array from radians to degrees. + * + * .. math:: + * degrees([0, \pi/2, \pi, 3\pi/2, 2\pi]) = [0, 90, 180, 270, 360] + * + * The storage type of ``degrees`` output depends upon the input storage type: + * + * - degrees(default) = default + * - degrees(row_sparse) = row_sparse + * - degrees(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L274 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def degrees(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Rearranges(permutes) data from depth into blocks of spatial data. + * Similar to ONNX DepthToSpace operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace. + * The output is a new tensor where the values from depth dimension are moved in spatial blocks + * to height and width dimension. The reverse of this operation is ``space_to_depth``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, block\_size, block\_size, C / (block\_size ^ 2), H * block\_size, W * block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 4, 1, 5, 2]) \\ + * y = reshape(x \prime \prime, [N, C / (block\_size ^ 2), H * block\_size, W * block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C / (block\_size ^ 2), H * block\_size, W * block\_size]` + * Example:: + * x = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * depth_to_space(x, 2) = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L972 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def depth_to_space(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Rearranges(permutes) data from depth into blocks of spatial data. + * Similar to ONNX DepthToSpace operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace. + * The output is a new tensor where the values from depth dimension are moved in spatial blocks + * to height and width dimension. The reverse of this operation is ``space_to_depth``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, block\_size, block\_size, C / (block\_size ^ 2), H * block\_size, W * block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 4, 1, 5, 2]) \\ + * y = reshape(x \prime \prime, [N, C / (block\_size ^ 2), H * block\_size, W * block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C / (block\_size ^ 2), H * block\_size, W * block\_size]` + * Example:: + * x = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * depth_to_space(x, 2) = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L972 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def depth_to_space(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Extracts a diagonal or constructs a diagonal array. + * + * ``diag``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero. + * - N-D arrays: extracts the diagonals of the sub-arrays with axes specified by ``axis1`` and ``axis2``. + * The output shape would be decided by removing the axes numbered ``axis1`` and ``axis2`` from the + * input shape and appending to the result a new axis with the size of the diagonals in question. + * + * For example, when the input shape is `(2, 3, 4, 5)`, ``axis1`` and ``axis2`` are 0 and 2 + * respectively and ``k`` is 0, the resulting shape would be `(3, 5, 2)`. + * + * Examples:: + * + * x = `[ [1, 2, 3], + * [4, 5, 6] ] + * + * diag(x) = [1, 5] + * + * diag(x, k=1) = [2, 6] + * + * diag(x, k=-1) = [4] + * + * x = [1, 2, 3] + * + * diag(x) = `[ [1, 0, 0], + * [0, 2, 0], + * [0, 0, 3] ] + * + * diag(x, k=1) = `[ [0, 1, 0], + * [0, 0, 2], + * [0, 0, 0] ] + * + * diag(x, k=-1) = `[ [0, 0, 0], + * [1, 0, 0], + * [0, 2, 0] ] + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * + * `[ [5, 6], + * [7, 8] ] ] + * + * diag(x) = `[ [1, 7], + * [2, 8] ] + * + * diag(x, k=1) = `[ [3], + * [4] ] + * + * diag(x, axis1=-2, axis2=-1) = `[ [1, 4], + * [5, 8] ] + * + * + * + * Defined in src/operator/tensor/diag_op.cc:L87 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def diag(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Extracts a diagonal or constructs a diagonal array. + * + * ``diag``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero. + * - N-D arrays: extracts the diagonals of the sub-arrays with axes specified by ``axis1`` and ``axis2``. + * The output shape would be decided by removing the axes numbered ``axis1`` and ``axis2`` from the + * input shape and appending to the result a new axis with the size of the diagonals in question. + * + * For example, when the input shape is `(2, 3, 4, 5)`, ``axis1`` and ``axis2`` are 0 and 2 + * respectively and ``k`` is 0, the resulting shape would be `(3, 5, 2)`. + * + * Examples:: + * + * x = `[ [1, 2, 3], + * [4, 5, 6] ] + * + * diag(x) = [1, 5] + * + * diag(x, k=1) = [2, 6] + * + * diag(x, k=-1) = [4] + * + * x = [1, 2, 3] + * + * diag(x) = `[ [1, 0, 0], + * [0, 2, 0], + * [0, 0, 3] ] + * + * diag(x, k=1) = `[ [0, 1, 0], + * [0, 0, 2], + * [0, 0, 0] ] + * + * diag(x, k=-1) = `[ [0, 0, 0], + * [1, 0, 0], + * [0, 2, 0] ] + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * + * `[ [5, 6], + * [7, 8] ] ] + * + * diag(x) = `[ [1, 7], + * [2, 8] ] + * + * diag(x, k=1) = `[ [3], + * [4] ] + * + * diag(x, axis1=-2, axis2=-1) = `[ [1, 4], + * [5, 8] ] + * + * + * + * Defined in src/operator/tensor/diag_op.cc:L87 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def diag(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Dot product of two arrays. + * + * ``dot``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: inner product of vectors + * - 2-D arrays: matrix multiplication + * - N-D arrays: a sum product over the last axis of the first input and the first + * axis of the second input + * + * For example, given 3-D ``x`` with shape `(n,m,k)` and ``y`` with shape `(k,r,s)`, the + * result array will have shape `(n,m,r,s)`. It is computed by:: + * + * dot(x,y)[i,j,a,b] = sum(x[i,j,:]*y[:,a,b]) + * + * Example:: + * + * x = reshape([0,1,2,3,4,5,6,7], shape=(2,2,2)) + * y = reshape([7,6,5,4,3,2,1,0], shape=(2,2,2)) + * dot(x,y)[0,0,1,1] = 0 + * sum(x[0,0,:]*y[:,1,1]) = 0 + * + * The storage type of ``dot`` output depends on storage types of inputs, transpose option and + * forward_stype option for output storage type. Implemented sparse operations include: + * + * - dot(default, default, transpose_a=True/False, transpose_b=True/False) = default + * - dot(csr, default, transpose_a=True) = default + * - dot(csr, default, transpose_a=True) = row_sparse + * - dot(csr, default) = default + * - dot(csr, row_sparse) = default + * - dot(default, csr) = csr (CPU only) + * - dot(default, csr, forward_stype='default') = default + * - dot(default, csr, transpose_b=True, forward_stype='default') = default + * + * If the combination of input storage types and forward_stype does not match any of the + * above patterns, ``dot`` will fallback and generate output with default storage. + * + * .. Note:: + * + * If the storage type of the lhs is "csr", the storage type of gradient w.r.t rhs will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/dot.cc:L77 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def dot(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Dot product of two arrays. + * + * ``dot``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: inner product of vectors + * - 2-D arrays: matrix multiplication + * - N-D arrays: a sum product over the last axis of the first input and the first + * axis of the second input + * + * For example, given 3-D ``x`` with shape `(n,m,k)` and ``y`` with shape `(k,r,s)`, the + * result array will have shape `(n,m,r,s)`. It is computed by:: + * + * dot(x,y)[i,j,a,b] = sum(x[i,j,:]*y[:,a,b]) + * + * Example:: + * + * x = reshape([0,1,2,3,4,5,6,7], shape=(2,2,2)) + * y = reshape([7,6,5,4,3,2,1,0], shape=(2,2,2)) + * dot(x,y)[0,0,1,1] = 0 + * sum(x[0,0,:]*y[:,1,1]) = 0 + * + * The storage type of ``dot`` output depends on storage types of inputs, transpose option and + * forward_stype option for output storage type. Implemented sparse operations include: + * + * - dot(default, default, transpose_a=True/False, transpose_b=True/False) = default + * - dot(csr, default, transpose_a=True) = default + * - dot(csr, default, transpose_a=True) = row_sparse + * - dot(csr, default) = default + * - dot(csr, row_sparse) = default + * - dot(default, csr) = csr (CPU only) + * - dot(default, csr, forward_stype='default') = default + * - dot(default, csr, transpose_b=True, forward_stype='default') = default + * + * If the combination of input storage types and forward_stype does not match any of the + * above patterns, ``dot`` will fallback and generate output with default storage. + * + * .. Note:: + * + * If the storage type of the lhs is "csr", the storage type of gradient w.r.t rhs will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/dot.cc:L77 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def dot(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Adds arguments element-wise. + * + * The storage type of ``elemwise_add`` output depends on storage types of inputs + * + * - elemwise_add(row_sparse, row_sparse) = row_sparse + * - elemwise_add(csr, csr) = csr + * - elemwise_add(default, csr) = default + * - elemwise_add(csr, default) = default + * - elemwise_add(default, rsp) = default + * - elemwise_add(rsp, default) = default + * - otherwise, ``elemwise_add`` generates output with default storage + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def elemwise_add(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Adds arguments element-wise. + * + * The storage type of ``elemwise_add`` output depends on storage types of inputs + * + * - elemwise_add(row_sparse, row_sparse) = row_sparse + * - elemwise_add(csr, csr) = csr + * - elemwise_add(default, csr) = default + * - elemwise_add(csr, default) = default + * - elemwise_add(default, rsp) = default + * - elemwise_add(rsp, default) = default + * - otherwise, ``elemwise_add`` generates output with default storage + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def elemwise_add(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Divides arguments element-wise. + * + * The storage type of ``elemwise_div`` output is always dense + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def elemwise_div(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Divides arguments element-wise. + * + * The storage type of ``elemwise_div`` output is always dense + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def elemwise_div(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Multiplies arguments element-wise. + * + * The storage type of ``elemwise_mul`` output depends on storage types of inputs + * + * - elemwise_mul(default, default) = default + * - elemwise_mul(row_sparse, row_sparse) = row_sparse + * - elemwise_mul(default, row_sparse) = row_sparse + * - elemwise_mul(row_sparse, default) = row_sparse + * - elemwise_mul(csr, csr) = csr + * - otherwise, ``elemwise_mul`` generates output with default storage + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def elemwise_mul(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Multiplies arguments element-wise. + * + * The storage type of ``elemwise_mul`` output depends on storage types of inputs + * + * - elemwise_mul(default, default) = default + * - elemwise_mul(row_sparse, row_sparse) = row_sparse + * - elemwise_mul(default, row_sparse) = row_sparse + * - elemwise_mul(row_sparse, default) = row_sparse + * - elemwise_mul(csr, csr) = csr + * - otherwise, ``elemwise_mul`` generates output with default storage + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def elemwise_mul(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Subtracts arguments element-wise. + * + * The storage type of ``elemwise_sub`` output depends on storage types of inputs + * + * - elemwise_sub(row_sparse, row_sparse) = row_sparse + * - elemwise_sub(csr, csr) = csr + * - elemwise_sub(default, csr) = default + * - elemwise_sub(csr, default) = default + * - elemwise_sub(default, rsp) = default + * - elemwise_sub(rsp, default) = default + * - otherwise, ``elemwise_sub`` generates output with default storage + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def elemwise_sub(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Subtracts arguments element-wise. + * + * The storage type of ``elemwise_sub`` output depends on storage types of inputs + * + * - elemwise_sub(row_sparse, row_sparse) = row_sparse + * - elemwise_sub(csr, csr) = csr + * - elemwise_sub(default, csr) = default + * - elemwise_sub(csr, default) = default + * - elemwise_sub(default, rsp) = default + * - elemwise_sub(rsp, default) = default + * - otherwise, ``elemwise_sub`` generates output with default storage + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def elemwise_sub(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise gauss error function of the input. + * + * Example:: + * + * erf([0, -1., 10.]) = [0., -0.8427, 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L886 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def erf(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise gauss error function of the input. + * + * Example:: + * + * erf([0, -1., 10.]) = [0., -0.8427, 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L886 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def erf(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse gauss error function of the input. + * + * Example:: + * + * erfinv([0, 0.5., -1.]) = [0., 0.4769, -inf] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L907 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def erfinv(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse gauss error function of the input. + * + * Example:: + * + * erfinv([0, 0.5., -1.]) = [0., 0.4769, -inf] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L907 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def erfinv(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise exponential value of the input. + * + * .. math:: + * exp(x) = e^x \approx 2.718^x + * + * Example:: + * + * exp([0, 1, 2]) = [1., 2.71828175, 7.38905621] + * + * The storage type of ``exp`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L63 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def exp(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise exponential value of the input. + * + * .. math:: + * exp(x) = e^x \approx 2.718^x + * + * Example:: + * + * exp([0, 1, 2]) = [1., 2.71828175, 7.38905621] + * + * The storage type of ``exp`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L63 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def exp(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Inserts a new axis of size 1 into the array shape + * For example, given ``x`` with shape ``(2,3,4)``, then ``expand_dims(x, axis=1)`` + * will return a new array with shape ``(2,1,3,4)``. + * + * + * Defined in src/operator/tensor/matrix_op.cc:L395 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def expand_dims(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Inserts a new axis of size 1 into the array shape + * For example, given ``x`` with shape ``(2,3,4)``, then ``expand_dims(x, axis=1)`` + * will return a new array with shape ``(2,1,3,4)``. + * + * + * Defined in src/operator/tensor/matrix_op.cc:L395 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def expand_dims(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns ``exp(x) - 1`` computed element-wise on the input. + * + * This function provides greater precision than ``exp(x) - 1`` for small values of ``x``. + * + * The storage type of ``expm1`` output depends upon the input storage type: + * + * - expm1(default) = default + * - expm1(row_sparse) = row_sparse + * - expm1(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L224 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def expm1(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns ``exp(x) - 1`` computed element-wise on the input. + * + * This function provides greater precision than ``exp(x) - 1`` for small values of ``x``. + * + * The storage type of ``expm1`` output depends upon the input storage type: + * + * - expm1(default) = default + * - expm1(row_sparse) = row_sparse + * - expm1(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L224 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def expm1(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Fill one element of each line(row for python, column for R/Julia) in lhs according to index indicated by rhs and values indicated by mhs. This function assume rhs uses 0-based index. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def fill_element_0index(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Fill one element of each line(row for python, column for R/Julia) in lhs according to index indicated by rhs and values indicated by mhs. This function assume rhs uses 0-based index. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def fill_element_0index(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest \ + * integer towards zero of the input. + * + * Example:: + * + * fix([-2.1, -1.9, 1.9, 2.1]) = [-2., -1., 1., 2.] + * + * The storage type of ``fix`` output depends upon the input storage type: + * + * - fix(default) = default + * - fix(row_sparse) = row_sparse + * - fix(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L875 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def fix(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest \ + * integer towards zero of the input. + * + * Example:: + * + * fix([-2.1, -1.9, 1.9, 2.1]) = [-2., -1., 1., 2.] + * + * The storage type of ``fix`` output depends upon the input storage type: + * + * - fix(default) = default + * - fix(row_sparse) = row_sparse + * - fix(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L875 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def fix(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def flatten(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def flatten(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def flip(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def flip(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise floor of the input. + * + * The floor of the scalar x is the largest integer i, such that i <= x. + * + * Example:: + * + * floor([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-3., -2., 1., 1., 2.] + * + * The storage type of ``floor`` output depends upon the input storage type: + * + * - floor(default) = default + * - floor(row_sparse) = row_sparse + * - floor(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L837 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def floor(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise floor of the input. + * + * The floor of the scalar x is the largest integer i, such that i <= x. + * + * Example:: + * + * floor([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-3., -2., 1., 1., 2.] + * + * The storage type of ``floor`` output depends upon the input storage type: + * + * - floor(default) = default + * - floor(row_sparse) = row_sparse + * - floor(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L837 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def floor(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * The FTML optimizer described in + * *FTML - Follow the Moving Leader in Deep Learning*, + * available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * d_t = \frac{ 1 - \beta_1^t }{ \eta_t } (\sqrt{ \frac{ v_t }{ 1 - \beta_2^t } } + \epsilon) + * \sigma_t = d_t - \beta_1 d_{t-1} + * z_t = \beta_1 z_{ t-1 } + (1 - \beta_1^t) g_t - \sigma_t W_{t-1} + * W_t = - \frac{ z_t }{ d_t } + * + * + * + * Defined in src/operator/optimizer_op.cc:L640 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ftml_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * The FTML optimizer described in + * *FTML - Follow the Moving Leader in Deep Learning*, + * available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * d_t = \frac{ 1 - \beta_1^t }{ \eta_t } (\sqrt{ \frac{ v_t }{ 1 - \beta_2^t } } + \epsilon) + * \sigma_t = d_t - \beta_1 d_{t-1} + * z_t = \beta_1 z_{ t-1 } + (1 - \beta_1^t) g_t - \sigma_t W_{t-1} + * W_t = - \frac{ z_t }{ d_t } + * + * + * + * Defined in src/operator/optimizer_op.cc:L640 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ftml_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Ftrl optimizer. + * Referenced from *Ad Click Prediction: a View from the Trenches*, available at + * http://dl.acm.org/citation.cfm?id=2488200. + * + * It updates the weights using:: + * + * rescaled_grad = clip(grad * rescale_grad, clip_gradient) + * z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate + * n += rescaled_grad**2 + * w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1) + * + * If w, z and n are all of ``row_sparse`` storage type, + * only the row slices whose indices appear in grad.indices are updated (for w, z and n):: + * + * for row in grad.indices: + * rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient) + * z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate + * n[row] += rescaled_grad[row]**2 + * w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1) + * + * + * + * Defined in src/operator/optimizer_op.cc:L876 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ftrl_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Ftrl optimizer. + * Referenced from *Ad Click Prediction: a View from the Trenches*, available at + * http://dl.acm.org/citation.cfm?id=2488200. + * + * It updates the weights using:: + * + * rescaled_grad = clip(grad * rescale_grad, clip_gradient) + * z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate + * n += rescaled_grad**2 + * w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1) + * + * If w, z and n are all of ``row_sparse`` storage type, + * only the row slices whose indices appear in grad.indices are updated (for w, z and n):: + * + * for row in grad.indices: + * rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient) + * z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate + * n[row] += rescaled_grad[row]**2 + * w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1) + * + * + * + * Defined in src/operator/optimizer_op.cc:L876 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ftrl_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the gamma function (extension of the factorial function \ + * to the reals), computed element-wise on the input array. + * + * The storage type of ``gamma`` output is always dense + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def gamma(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the gamma function (extension of the factorial function \ + * to the reals), computed element-wise on the input array. + * + * The storage type of ``gamma`` output is always dense + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def gamma(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise log of the absolute value of the gamma function \ + * of the input. + * + * The storage type of ``gammaln`` output is always dense + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def gammaln(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise log of the absolute value of the gamma function \ + * of the input. + * + * The storage type of ``gammaln`` output is always dense + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def gammaln(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Gather elements or slices from `data` and store to a tensor whose + * shape is defined by `indices`. + * + * Given `data` with shape `(X_0, X_1, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, output shape will simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] = data[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] + * + * Examples:: + * + * data = `[ [0, 1], [2, 3] ] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * gather_nd(data, indices) = [2, 3, 0] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 0] ] + * gather_nd(data, indices) = `[ [3, 4], [5, 6] ] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def gather_nd(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Gather elements or slices from `data` and store to a tensor whose + * shape is defined by `indices`. + * + * Given `data` with shape `(X_0, X_1, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, output shape will simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] = data[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] + * + * Examples:: + * + * data = `[ [0, 1], [2, 3] ] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * gather_nd(data, indices) = [2, 3, 0] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 0] ] + * gather_nd(data, indices) = `[ [3, 4], [5, 6] ] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def gather_nd(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes hard sigmoid of x element-wise. + * + * .. math:: + * y = max(0, min(1, alpha * x + beta)) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L161 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def hard_sigmoid(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes hard sigmoid of x element-wise. + * + * .. math:: + * y = max(0, min(1, alpha * x + beta)) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L161 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def hard_sigmoid(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a copy of the input. + * + * From:src/operator/tensor/elemwise_unary_op_basic.cc:246 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def identity(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a copy of the input. + * + * From:src/operator/tensor/elemwise_unary_op_basic.cc:246 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def identity(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the Khatri-Rao product of the input matrices. + * + * Given a collection of :math:`n` input matrices, + * + * .. math:: + * A_1 \in \mathbb{R}^{M_1 \times M}, \ldots, A_n \in \mathbb{R}^{M_n \times N}, + * + * the (column-wise) Khatri-Rao product is defined as the matrix, + * + * .. math:: + * X = A_1 \otimes \cdots \otimes A_n \in \mathbb{R}^{(M_1 \cdots M_n) \times N}, + * + * where the :math:`k` th column is equal to the column-wise outer product + * :math:`{A_1}_k \otimes \cdots \otimes {A_n}_k` where :math:`{A_i}_k` is the kth + * column of the ith matrix. + * + * Example:: + * + * >>> A = mx.nd.array(`[ [1, -1], + * >>> [2, -3] ]) + * >>> B = mx.nd.array(`[ [1, 4], + * >>> [2, 5], + * >>> [3, 6] ]) + * >>> C = mx.nd.khatri_rao(A, B) + * >>> print(C.asnumpy()) + * `[ [ 1. -4.] + * [ 2. -5.] + * [ 3. -6.] + * [ 2. -12.] + * [ 4. -15.] + * [ 6. -18.] ] + * + * + * + * Defined in src/operator/contrib/krprod.cc:L108 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def khatri_rao(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the Khatri-Rao product of the input matrices. + * + * Given a collection of :math:`n` input matrices, + * + * .. math:: + * A_1 \in \mathbb{R}^{M_1 \times M}, \ldots, A_n \in \mathbb{R}^{M_n \times N}, + * + * the (column-wise) Khatri-Rao product is defined as the matrix, + * + * .. math:: + * X = A_1 \otimes \cdots \otimes A_n \in \mathbb{R}^{(M_1 \cdots M_n) \times N}, + * + * where the :math:`k` th column is equal to the column-wise outer product + * :math:`{A_1}_k \otimes \cdots \otimes {A_n}_k` where :math:`{A_i}_k` is the kth + * column of the ith matrix. + * + * Example:: + * + * >>> A = mx.nd.array(`[ [1, -1], + * >>> [2, -3] ]) + * >>> B = mx.nd.array(`[ [1, 4], + * >>> [2, 5], + * >>> [3, 6] ]) + * >>> C = mx.nd.khatri_rao(A, B) + * >>> print(C.asnumpy()) + * `[ [ 1. -4.] + * [ 2. -5.] + * [ 3. -6.] + * [ 2. -12.] + * [ 4. -15.] + * [ 6. -18.] ] + * + * + * + * Defined in src/operator/contrib/krprod.cc:L108 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def khatri_rao(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Phase I of lamb update it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad = grad * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L953 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def lamb_update_phase1(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Phase I of lamb update it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad = grad * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L953 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def lamb_update_phase1(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Phase II of lamb update it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight = weight - lr * g + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L992 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def lamb_update_phase2(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Phase II of lamb update it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight = weight - lr * g + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L992 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def lamb_update_phase2(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *det(A)* + * + * If *n>2*, *det* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: There is no gradient backwarded when A is non-invertible (which is + * equivalent to det(A) = 0) because zero is rarely hit upon in float + * point computation and the Jacobi's formula on determinant gradient + * is not computationally efficient when A is non-invertible. + * + * Examples:: + * + * Single matrix determinant + * A = `[ [1., 4.], [2., 3.] ] + * det(A) = [-5.] + * + * Batch matrix determinant + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [2., 3.], [1., 4.] ] ] + * det(A) = [-5., 5.] + * + * + * Defined in src/operator/tensor/la_op.cc:L973 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_det(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *det(A)* + * + * If *n>2*, *det* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: There is no gradient backwarded when A is non-invertible (which is + * equivalent to det(A) = 0) because zero is rarely hit upon in float + * point computation and the Jacobi's formula on determinant gradient + * is not computationally efficient when A is non-invertible. + * + * Examples:: + * + * Single matrix determinant + * A = `[ [1., 4.], [2., 3.] ] + * det(A) = [-5.] + * + * Batch matrix determinant + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [2., 3.], [1., 4.] ] ] + * det(A) = [-5., 5.] + * + * + * Defined in src/operator/tensor/la_op.cc:L973 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_det(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Extracts the diagonal entries of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix which diagonal elements get extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted diagonals are returned as an *n-1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix diagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extractdiag(A) = [1.0, 4.0] + * + * extractdiag(A, 1) = [2.0] + * + * Batch matrix diagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extractdiag(A) = `[ [1.0, 4.0], + * [5.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L495 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_extractdiag(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Extracts the diagonal entries of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix which diagonal elements get extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted diagonals are returned as an *n-1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix diagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extractdiag(A) = [1.0, 4.0] + * + * extractdiag(A, 1) = [2.0] + * + * Batch matrix diagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extractdiag(A) = `[ [1.0, 4.0], + * [5.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L495 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_extractdiag(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Extracts a triangular sub-matrix from a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix from which a triangular sub-matrix is extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted triangular sub-matrices are returned as an *n-1*-dimensional tensor. + * + * The *offset* and *lower* parameters determine the triangle to be extracted: + * + * - When *offset = 0* either the lower or upper triangle with respect to the main diagonal is extracted depending on the value of parameter *lower*. + * - When *offset = k > 0* the upper triangle with respect to the k-th diagonal above the main diagonal is extracted. + * - When *offset = k < 0* the lower triangle with respect to the k-th diagonal below the main diagonal is extracted. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extracttrian(A) = [1.0, 3.0, 4.0] + * extracttrian(A, lower=False) = [1.0, 2.0, 4.0] + * extracttrian(A, 1) = [2.0] + * extracttrian(A, -1) = [3.0] + * + * Batch triagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extracttrian(A) = `[ [1.0, 3.0, 4.0], + * [5.0, 7.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L605 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_extracttrian(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Extracts a triangular sub-matrix from a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix from which a triangular sub-matrix is extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted triangular sub-matrices are returned as an *n-1*-dimensional tensor. + * + * The *offset* and *lower* parameters determine the triangle to be extracted: + * + * - When *offset = 0* either the lower or upper triangle with respect to the main diagonal is extracted depending on the value of parameter *lower*. + * - When *offset = k > 0* the upper triangle with respect to the k-th diagonal above the main diagonal is extracted. + * - When *offset = k < 0* the lower triangle with respect to the k-th diagonal below the main diagonal is extracted. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extracttrian(A) = [1.0, 3.0, 4.0] + * extracttrian(A, lower=False) = [1.0, 2.0, 4.0] + * extracttrian(A, 1) = [2.0] + * extracttrian(A, -1) = [3.0] + * + * Batch triagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extracttrian(A) = `[ [1.0, 3.0, 4.0], + * [5.0, 7.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L605 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_extracttrian(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * LQ factorization for general matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, we compute the LQ factorization (LAPACK *gelqf*, followed by *orglq*). *A* + * must have shape *(x, y)* with *x <= y*, and must have full rank *=x*. The LQ + * factorization consists of *L* with shape *(x, x)* and *Q* with shape *(x, y)*, so + * that: + * + * *A* = *L* \* *Q* + * + * Here, *L* is lower triangular (upper triangle equal to zero) with nonzero diagonal, + * and *Q* is row-orthonormal, meaning that + * + * *Q* \* *Q*\ :sup:`T` + * + * is equal to the identity matrix of shape *(x, x)*. + * + * If *n>2*, *gelqf* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single LQ factorization + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * Q, L = gelqf(A) + * Q = `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ] + * L = `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ] + * + * Batch LQ factorization + * A = `[ `[ [1., 2., 3.], [4., 5., 6.] ], + * `[ [7., 8., 9.], [10., 11., 12.] ] ] + * Q, L = gelqf(A) + * Q = `[ `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ], + * `[ [-0.50257071, -0.57436653, -0.64616234], + * [0.7620735, 0.05862104, -0.64483142] ] ] + * L = `[ `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ], + * `[ [-13.92838828, 0.], + * [-19.09768702, 0.52758934] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L798 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_gelqf(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * LQ factorization for general matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, we compute the LQ factorization (LAPACK *gelqf*, followed by *orglq*). *A* + * must have shape *(x, y)* with *x <= y*, and must have full rank *=x*. The LQ + * factorization consists of *L* with shape *(x, x)* and *Q* with shape *(x, y)*, so + * that: + * + * *A* = *L* \* *Q* + * + * Here, *L* is lower triangular (upper triangle equal to zero) with nonzero diagonal, + * and *Q* is row-orthonormal, meaning that + * + * *Q* \* *Q*\ :sup:`T` + * + * is equal to the identity matrix of shape *(x, x)*. + * + * If *n>2*, *gelqf* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single LQ factorization + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * Q, L = gelqf(A) + * Q = `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ] + * L = `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ] + * + * Batch LQ factorization + * A = `[ `[ [1., 2., 3.], [4., 5., 6.] ], + * `[ [7., 8., 9.], [10., 11., 12.] ] ] + * Q, L = gelqf(A) + * Q = `[ `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ], + * `[ [-0.50257071, -0.57436653, -0.64616234], + * [0.7620735, 0.05862104, -0.64483142] ] ] + * L = `[ `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ], + * `[ [-13.92838828, 0.], + * [-19.09768702, 0.52758934] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L798 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_gelqf(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs general matrix multiplication and accumulation. + * Input are tensors *A*, *B*, *C*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + *beta* \* *C* + * + * Here, *alpha* and *beta* are scalar parameters, and *op()* is either the identity or + * matrix transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B*, *C* be 5 dimensional tensors. Then gemm(*A*, *B*, *C*, axis=1) is equivalent + * to the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = swapaxes(C, dim1=1, dim2=3) + * C = gemm(A1, B1, C) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply-add + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * C = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0, beta=10.0) + * = `[ [14.0, 14.0, 14.0], [14.0, 14.0, 14.0] ] + * + * Batch matrix multiply-add + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * C = `[ `[ [10.0] ], `[ [0.01] ] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0 , beta=10.0) + * = `[ `[ [104.0] ], `[ [0.14] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L89 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_gemm(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs general matrix multiplication and accumulation. + * Input are tensors *A*, *B*, *C*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + *beta* \* *C* + * + * Here, *alpha* and *beta* are scalar parameters, and *op()* is either the identity or + * matrix transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B*, *C* be 5 dimensional tensors. Then gemm(*A*, *B*, *C*, axis=1) is equivalent + * to the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = swapaxes(C, dim1=1, dim2=3) + * C = gemm(A1, B1, C) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply-add + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * C = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0, beta=10.0) + * = `[ [14.0, 14.0, 14.0], [14.0, 14.0, 14.0] ] + * + * Batch matrix multiply-add + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * C = `[ `[ [10.0] ], `[ [0.01] ] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0 , beta=10.0) + * = `[ `[ [104.0] ], `[ [0.14] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L89 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_gemm(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs general matrix multiplication. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + * + * Here *alpha* is a scalar parameter and *op()* is either the identity or the matrix + * transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B* be 5 dimensional tensors. Then gemm(*A*, *B*, axis=1) is equivalent to + * the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = gemm2(A1, B1) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ [4.0, 4.0, 4.0], [4.0, 4.0, 4.0] ] + * + * Batch matrix multiply + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ `[ [4.0] ], `[ [0.04 ] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L163 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_gemm2(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs general matrix multiplication. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + * + * Here *alpha* is a scalar parameter and *op()* is either the identity or the matrix + * transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B* be 5 dimensional tensors. Then gemm(*A*, *B*, axis=1) is equivalent to + * the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = gemm2(A1, B1) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ [4.0, 4.0, 4.0], [4.0, 4.0, 4.0] ] + * + * Batch matrix multiply + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ `[ [4.0] ], `[ [0.04 ] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L163 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_gemm2(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the inverse of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *A*\ :sup:`-1` + * + * If *n>2*, *inverse* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [1., 4.], [2., 3.] ] + * inverse(A) = `[ [-0.6, 0.8], [0.4, -0.2] ] + * + * Batch matrix inverse + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [1., 3.], [2., 4.] ] ] + * inverse(A) = `[ `[ [-0.6, 0.8], [0.4, -0.2] ], + * `[ [-2., 1.5], [1., -0.5] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L919 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_inverse(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the inverse of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *A*\ :sup:`-1` + * + * If *n>2*, *inverse* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [1., 4.], [2., 3.] ] + * inverse(A) = `[ [-0.6, 0.8], [0.4, -0.2] ] + * + * Batch matrix inverse + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [1., 3.], [2., 4.] ] ] + * inverse(A) = `[ `[ [-0.6, 0.8], [0.4, -0.2] ], + * `[ [-2., 1.5], [1., -0.5] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L919 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_inverse(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Constructs a square matrix with the input as diagonal. + * Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the diagonal entries of a single square matrix. This matrix will be returned as a 2-dimensional tensor. + * If *n>1*, then *A* represents a batch of diagonals of square matrices. The batch of diagonal matrices will be returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single diagonal matrix construction + * A = [1.0, 2.0] + * + * makediag(A) = `[ [1.0, 0.0], + * [0.0, 2.0] ] + * + * makediag(A, 1) = `[ [0.0, 1.0, 0.0], + * [0.0, 0.0, 2.0], + * [0.0, 0.0, 0.0] ] + * + * Batch diagonal matrix construction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * makediag(A) = `[ `[ [1.0, 0.0], + * [0.0, 2.0] ], + * `[ [3.0, 0.0], + * [0.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L547 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_makediag(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Constructs a square matrix with the input as diagonal. + * Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the diagonal entries of a single square matrix. This matrix will be returned as a 2-dimensional tensor. + * If *n>1*, then *A* represents a batch of diagonals of square matrices. The batch of diagonal matrices will be returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single diagonal matrix construction + * A = [1.0, 2.0] + * + * makediag(A) = `[ [1.0, 0.0], + * [0.0, 2.0] ] + * + * makediag(A, 1) = `[ [0.0, 1.0, 0.0], + * [0.0, 0.0, 2.0], + * [0.0, 0.0, 0.0] ] + * + * Batch diagonal matrix construction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * makediag(A) = `[ `[ [1.0, 0.0], + * [0.0, 2.0] ], + * `[ [3.0, 0.0], + * [0.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L547 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_makediag(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Constructs a square matrix with the input representing a specific triangular sub-matrix. + * This is basically the inverse of *linalg.extracttrian*. Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the entries of a triangular matrix which is lower triangular if *offset<0* or *offset=0*, *lower=true*. The resulting matrix is derived by first constructing the square + * matrix with the entries outside the triangle set to zero and then adding *offset*-times an additional + * diagonal with zero entries to the square matrix. + * + * If *n>1*, then *A* represents a batch of triangular sub-matrices. The batch of corresponding square matrices is returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix construction + * A = [1.0, 2.0, 3.0] + * + * maketrian(A) = `[ [1.0, 0.0], + * [2.0, 3.0] ] + * + * maketrian(A, lower=false) = `[ [1.0, 2.0], + * [0.0, 3.0] ] + * + * maketrian(A, offset=1) = `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ] + * maketrian(A, offset=-1) = `[ [0.0, 0.0, 0.0], + * [1.0, 0.0, 0.0], + * [2.0, 3.0, 0.0] ] + * + * Batch matrix construction + * A = `[ [1.0, 2.0, 3.0], + * [4.0, 5.0, 6.0] ] + * + * maketrian(A) = `[ `[ [1.0, 0.0], + * [2.0, 3.0] ], + * `[ [4.0, 0.0], + * [5.0, 6.0] ] ] + * + * maketrian(A, offset=1) = `[ `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ], + * `[ [0.0, 4.0, 5.0], + * [0.0, 0.0, 6.0], + * [0.0, 0.0, 0.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L673 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_maketrian(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Constructs a square matrix with the input representing a specific triangular sub-matrix. + * This is basically the inverse of *linalg.extracttrian*. Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the entries of a triangular matrix which is lower triangular if *offset<0* or *offset=0*, *lower=true*. The resulting matrix is derived by first constructing the square + * matrix with the entries outside the triangle set to zero and then adding *offset*-times an additional + * diagonal with zero entries to the square matrix. + * + * If *n>1*, then *A* represents a batch of triangular sub-matrices. The batch of corresponding square matrices is returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix construction + * A = [1.0, 2.0, 3.0] + * + * maketrian(A) = `[ [1.0, 0.0], + * [2.0, 3.0] ] + * + * maketrian(A, lower=false) = `[ [1.0, 2.0], + * [0.0, 3.0] ] + * + * maketrian(A, offset=1) = `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ] + * maketrian(A, offset=-1) = `[ [0.0, 0.0, 0.0], + * [1.0, 0.0, 0.0], + * [2.0, 3.0, 0.0] ] + * + * Batch matrix construction + * A = `[ [1.0, 2.0, 3.0], + * [4.0, 5.0, 6.0] ] + * + * maketrian(A) = `[ `[ [1.0, 0.0], + * [2.0, 3.0] ], + * `[ [4.0, 0.0], + * [5.0, 6.0] ] ] + * + * maketrian(A, offset=1) = `[ `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ], + * `[ [0.0, 4.0, 5.0], + * [0.0, 0.0, 6.0], + * [0.0, 0.0, 0.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L673 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_maketrian(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs Cholesky factorization of a symmetric positive-definite matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the Cholesky factor *B* of the symmetric, positive definite matrix *A* is + * computed. *B* is triangular (entries of upper or lower triangle are all zero), has + * positive diagonal entries, and: + * + * *A* = *B* \* *B*\ :sup:`T` if *lower* = *true* + * *A* = *B*\ :sup:`T` \* *B* if *lower* = *false* + * + * If *n>2*, *potrf* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix factorization + * A = `[ [4.0, 1.0], [1.0, 4.25] ] + * potrf(A) = `[ [2.0, 0], [0.5, 2.0] ] + * + * Batch matrix factorization + * A = `[ `[ [4.0, 1.0], [1.0, 4.25] ], `[ [16.0, 4.0], [4.0, 17.0] ] ] + * potrf(A) = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L214 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_potrf(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs Cholesky factorization of a symmetric positive-definite matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the Cholesky factor *B* of the symmetric, positive definite matrix *A* is + * computed. *B* is triangular (entries of upper or lower triangle are all zero), has + * positive diagonal entries, and: + * + * *A* = *B* \* *B*\ :sup:`T` if *lower* = *true* + * *A* = *B*\ :sup:`T` \* *B* if *lower* = *false* + * + * If *n>2*, *potrf* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix factorization + * A = `[ [4.0, 1.0], [1.0, 4.25] ] + * potrf(A) = `[ [2.0, 0], [0.5, 2.0] ] + * + * Batch matrix factorization + * A = `[ `[ [4.0, 1.0], [1.0, 4.25] ], `[ [16.0, 4.0], [4.0, 17.0] ] ] + * potrf(A) = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L214 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_potrf(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs matrix inversion from a Cholesky factorization. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a triangular matrix (entries of upper or lower triangle are all zero) + * with positive diagonal. We compute: + * + * *out* = *A*\ :sup:`-T` \* *A*\ :sup:`-1` if *lower* = *true* + * *out* = *A*\ :sup:`-1` \* *A*\ :sup:`-T` if *lower* = *false* + * + * In other words, if *A* is the Cholesky factor of a symmetric positive definite matrix + * *B* (obtained by *potrf*), then + * + * *out* = *B*\ :sup:`-1` + * + * If *n>2*, *potri* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * .. note:: Use this operator only if you are certain you need the inverse of *B*, and + * cannot use the Cholesky factor *A* (*potrf*), together with backsubstitution + * (*trsm*). The latter is numerically much safer, and also cheaper. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [2.0, 0], [0.5, 2.0] ] + * potri(A) = `[ [0.26563, -0.0625], [-0.0625, 0.25] ] + * + * Batch matrix inverse + * A = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * potri(A) = `[ `[ [0.26563, -0.0625], [-0.0625, 0.25] ], + * `[ [0.06641, -0.01562], [-0.01562, 0,0625] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L275 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_potri(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs matrix inversion from a Cholesky factorization. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a triangular matrix (entries of upper or lower triangle are all zero) + * with positive diagonal. We compute: + * + * *out* = *A*\ :sup:`-T` \* *A*\ :sup:`-1` if *lower* = *true* + * *out* = *A*\ :sup:`-1` \* *A*\ :sup:`-T` if *lower* = *false* + * + * In other words, if *A* is the Cholesky factor of a symmetric positive definite matrix + * *B* (obtained by *potrf*), then + * + * *out* = *B*\ :sup:`-1` + * + * If *n>2*, *potri* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * .. note:: Use this operator only if you are certain you need the inverse of *B*, and + * cannot use the Cholesky factor *A* (*potrf*), together with backsubstitution + * (*trsm*). The latter is numerically much safer, and also cheaper. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [2.0, 0], [0.5, 2.0] ] + * potri(A) = `[ [0.26563, -0.0625], [-0.0625, 0.25] ] + * + * Batch matrix inverse + * A = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * potri(A) = `[ `[ [0.26563, -0.0625], [-0.0625, 0.25] ], + * `[ [0.06641, -0.01562], [-0.01562, 0,0625] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L275 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_potri(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the sign and log of the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *sign* = *sign(det(A))* + * *logabsdet* = *log(abs(det(A)))* + * + * If *n>2*, *slogdet* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: The gradient is not properly defined on sign, so the gradient of + * it is not backwarded. + * .. note:: No gradient is backwarded when A is non-invertible. Please see + * the docs of operator det for detail. + * + * Examples:: + * + * Single matrix signed log determinant + * A = `[ [2., 3.], [1., 4.] ] + * sign, logabsdet = slogdet(A) + * sign = [1.] + * logabsdet = [1.609438] + * + * Batch matrix signed log determinant + * A = `[ `[ [2., 3.], [1., 4.] ], + * `[ [1., 2.], [2., 4.] ], + * `[ [1., 2.], [4., 3.] ] ] + * sign, logabsdet = slogdet(A) + * sign = [1., 0., -1.] + * logabsdet = [1.609438, -inf, 1.609438] + * + * + * Defined in src/operator/tensor/la_op.cc:L1031 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_slogdet(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the sign and log of the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *sign* = *sign(det(A))* + * *logabsdet* = *log(abs(det(A)))* + * + * If *n>2*, *slogdet* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: The gradient is not properly defined on sign, so the gradient of + * it is not backwarded. + * .. note:: No gradient is backwarded when A is non-invertible. Please see + * the docs of operator det for detail. + * + * Examples:: + * + * Single matrix signed log determinant + * A = `[ [2., 3.], [1., 4.] ] + * sign, logabsdet = slogdet(A) + * sign = [1.] + * logabsdet = [1.609438] + * + * Batch matrix signed log determinant + * A = `[ `[ [2., 3.], [1., 4.] ], + * `[ [1., 2.], [2., 4.] ], + * `[ [1., 2.], [4., 3.] ] ] + * sign, logabsdet = slogdet(A) + * sign = [1., 0., -1.] + * logabsdet = [1.609438, -inf, 1.609438] + * + * + * Defined in src/operator/tensor/la_op.cc:L1031 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_slogdet(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the sum of the logarithms of the diagonal elements of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* must be square with positive diagonal entries. We sum the natural + * logarithms of the diagonal elements, the result has shape (1,). + * + * If *n>2*, *sumlogdiag* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix reduction + * A = `[ [1.0, 1.0], [1.0, 7.0] ] + * sumlogdiag(A) = [1.9459] + * + * Batch matrix reduction + * A = `[ `[ [1.0, 1.0], [1.0, 7.0] ], `[ [3.0, 0], [0, 17.0] ] ] + * sumlogdiag(A) = [1.9459, 3.9318] + * + * + * Defined in src/operator/tensor/la_op.cc:L445 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_sumlogdiag(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the sum of the logarithms of the diagonal elements of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* must be square with positive diagonal entries. We sum the natural + * logarithms of the diagonal elements, the result has shape (1,). + * + * If *n>2*, *sumlogdiag* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix reduction + * A = `[ [1.0, 1.0], [1.0, 7.0] ] + * sumlogdiag(A) = [1.9459] + * + * Batch matrix reduction + * A = `[ `[ [1.0, 1.0], [1.0, 7.0] ], `[ [3.0, 0], [0, 17.0] ] ] + * sumlogdiag(A) = [1.9459, 3.9318] + * + * + * Defined in src/operator/tensor/la_op.cc:L445 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_sumlogdiag(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Multiplication of matrix with its transpose. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the operator performs the BLAS3 function *syrk*: + * + * *out* = *alpha* \* *A* \* *A*\ :sup:`T` + * + * if *transpose=False*, or + * + * *out* = *alpha* \* *A*\ :sup:`T` \ \* *A* + * + * if *transpose=True*. + * + * If *n>2*, *syrk* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * syrk(A, alpha=1., transpose=False) + * = `[ [14., 32.], + * [32., 77.] ] + * syrk(A, alpha=1., transpose=True) + * = `[ [17., 22., 27.], + * [22., 29., 36.], + * [27., 36., 45.] ] + * + * Batch matrix multiply + * A = `[ `[ [1., 1.] ], `[ [0.1, 0.1] ] ] + * syrk(A, alpha=2., transpose=False) = `[ `[ [4.] ], `[ [0.04] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L730 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_syrk(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Multiplication of matrix with its transpose. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the operator performs the BLAS3 function *syrk*: + * + * *out* = *alpha* \* *A* \* *A*\ :sup:`T` + * + * if *transpose=False*, or + * + * *out* = *alpha* \* *A*\ :sup:`T` \ \* *A* + * + * if *transpose=True*. + * + * If *n>2*, *syrk* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * syrk(A, alpha=1., transpose=False) + * = `[ [14., 32.], + * [32., 77.] ] + * syrk(A, alpha=1., transpose=True) + * = `[ [17., 22., 27.], + * [22., 29., 36.], + * [27., 36., 45.] ] + * + * Batch matrix multiply + * A = `[ `[ [1., 1.] ], `[ [0.1, 0.1] ] ] + * syrk(A, alpha=2., transpose=False) = `[ `[ [4.] ], `[ [0.04] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L730 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_syrk(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs multiplication with a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trmm*: + * + * *out* = *alpha* \* *op*\ (*A*) \* *B* + * + * if *rightside=False*, or + * + * *out* = *alpha* \* *B* \* *op*\ (*A*) + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trmm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triangular matrix multiply + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * trmm(A, B, alpha=2.0) = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * + * Batch triangular matrix multiply + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], `[ [0.5, 0.5, 0.5], [0.5, 0.5, 0.5] ] ] + * trmm(A, B, alpha=2.0) = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [1.0, 1.0, 1.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L333 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_trmm(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Performs multiplication with a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trmm*: + * + * *out* = *alpha* \* *op*\ (*A*) \* *B* + * + * if *rightside=False*, or + * + * *out* = *alpha* \* *B* \* *op*\ (*A*) + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trmm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triangular matrix multiply + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * trmm(A, B, alpha=2.0) = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * + * Batch triangular matrix multiply + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], `[ [0.5, 0.5, 0.5], [0.5, 0.5, 0.5] ] ] + * trmm(A, B, alpha=2.0) = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [1.0, 1.0, 1.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L333 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_trmm(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Solves matrix equation involving a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trsm*, solving for *out* in: + * + * *op*\ (*A*) \* *out* = *alpha* \* *B* + * + * if *rightside=False*, or + * + * *out* \* *op*\ (*A*) = *alpha* \* *B* + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trsm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix solve + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * trsm(A, B, alpha=0.5) = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * + * Batch matrix solve + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [4.0, 4.0, 4.0], [8.0, 8.0, 8.0] ] ] + * trsm(A, B, alpha=0.5) = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], + * `[ [2.0, 2.0, 2.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L396 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_trsm(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Solves matrix equation involving a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trsm*, solving for *out* in: + * + * *op*\ (*A*) \* *out* = *alpha* \* *B* + * + * if *rightside=False*, or + * + * *out* \* *op*\ (*A*) = *alpha* \* *B* + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trsm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix solve + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * trsm(A, B, alpha=0.5) = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * + * Batch matrix solve + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [4.0, 4.0, 4.0], [8.0, 8.0, 8.0] ] ] + * trsm(A, B, alpha=0.5) = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], + * `[ [2.0, 2.0, 2.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L396 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def linalg_trsm(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise Natural logarithmic value of the input. + * + * The natural logarithm is logarithm in base *e*, so that ``log(exp(x)) = x`` + * + * The storage type of ``log`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L76 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise Natural logarithmic value of the input. + * + * The natural logarithm is logarithm in base *e*, so that ``log(exp(x)) = x`` + * + * The storage type of ``log`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L76 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise Base-10 logarithmic value of the input. + * + * ``10**log10(x) = x`` + * + * The storage type of ``log10`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L93 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log10(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise Base-10 logarithmic value of the input. + * + * ``10**log10(x) = x`` + * + * The storage type of ``log10`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L93 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log10(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise ``log(1 + x)`` value of the input. + * + * This function is more accurate than ``log(1 + x)`` for small ``x`` so that + * :math:`1+x\approx 1` + * + * The storage type of ``log1p`` output depends upon the input storage type: + * + * - log1p(default) = default + * - log1p(row_sparse) = row_sparse + * - log1p(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L206 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log1p(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise ``log(1 + x)`` value of the input. + * + * This function is more accurate than ``log(1 + x)`` for small ``x`` so that + * :math:`1+x\approx 1` + * + * The storage type of ``log1p`` output depends upon the input storage type: + * + * - log1p(default) = default + * - log1p(row_sparse) = row_sparse + * - log1p(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L206 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log1p(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise Base-2 logarithmic value of the input. + * + * ``2**log2(x) = x`` + * + * The storage type of ``log2`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L105 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log2(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise Base-2 logarithmic value of the input. + * + * ``2**log2(x) = x`` + * + * The storage type of ``log2`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L105 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log2(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the log softmax of the input. + * This is equivalent to computing softmax followed by log. + * + * Examples:: + * + * >>> x = mx.nd.array([1, 2, .1]) + * >>> mx.nd.log_softmax(x).asnumpy() + * array([-1.41702998, -0.41702995, -2.31702995], dtype=float32) + * + * >>> x = mx.nd.array( `[ [1, 2, .1],[.1, 2, 1] ] ) + * >>> mx.nd.log_softmax(x, axis=0).asnumpy() + * array(`[ [-0.34115392, -0.69314718, -1.24115396], + * [-1.24115396, -0.69314718, -0.34115392] ], dtype=float32) + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log_softmax(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the log softmax of the input. + * This is equivalent to computing softmax followed by log. + * + * Examples:: + * + * >>> x = mx.nd.array([1, 2, .1]) + * >>> mx.nd.log_softmax(x).asnumpy() + * array([-1.41702998, -0.41702995, -2.31702995], dtype=float32) + * + * >>> x = mx.nd.array( `[ [1, 2, .1],[.1, 2, 1] ] ) + * >>> mx.nd.log_softmax(x, axis=0).asnumpy() + * array(`[ [-0.34115392, -0.69314718, -1.24115396], + * [-1.24115396, -0.69314718, -0.34115392] ], dtype=float32) + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def log_softmax(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of logical NOT (!) function + * + * Example: + * logical_not([-2., 0., 1.]) = [0., 1., 0.] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def logical_not(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the result of logical NOT (!) function + * + * Example: + * logical_not([-2., 0., 1.]) = [0., 1., 0.] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def logical_not(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = make_loss(cross_entropy) + * + * We will need to use ``make_loss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * The storage type of ``make_loss`` output depends upon the input storage type: + * + * - make_loss(default) = default + * - make_loss(row_sparse) = row_sparse + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L360 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def make_loss(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = make_loss(cross_entropy) + * + * We will need to use ``make_loss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * The storage type of ``make_loss`` output depends upon the input storage type: + * + * - make_loss(default) = default + * - make_loss(row_sparse) = row_sparse + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L360 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def make_loss(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def max(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def max(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def max_axis(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def max_axis(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the mean of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L84 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mean(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the mean of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L84 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mean(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def min(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def min(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def min_axis(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def min_axis(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * + * Calculate the mean and variance of `data`. + * + * The mean and variance are calculated by aggregating the contents of data across axes. + * If x is 1-D and axes = [0] this is just the mean and variance of a vector. + * + * Example: + * + * x = `[ [1, 2, 3], [4, 5, 6] ] + * mean, var = moments(data=x, axes=[0]) + * mean = [2.5, 3.5, 4.5] + * var = [2.25, 2.25, 2.25] + * mean, var = moments(data=x, axes=[1]) + * mean = [2.0, 5.0] + * var = [0.66666667, 0.66666667] + * mean, var = moments(data=x, axis=[0, 1]) + * mean = [3.5] + * var = [2.9166667] + * + * + * + * Defined in src/operator/nn/moments.cc:L54 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def moments(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * + * Calculate the mean and variance of `data`. + * + * The mean and variance are calculated by aggregating the contents of data across axes. + * If x is 1-D and axes = [0] this is just the mean and variance of a vector. + * + * Example: + * + * x = `[ [1, 2, 3], [4, 5, 6] ] + * mean, var = moments(data=x, axes=[0]) + * mean = [2.5, 3.5, 4.5] + * var = [2.25, 2.25, 2.25] + * mean, var = moments(data=x, axes=[1]) + * mean = [2.0, 5.0] + * var = [0.66666667, 0.66666667] + * mean, var = moments(data=x, axis=[0, 1]) + * mean = [3.5] + * var = [2.9166667] + * + * + * + * Defined in src/operator/nn/moments.cc:L54 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def moments(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Mixed Precision version of Phase I of lamb update + * it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad32 = grad(float16) * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight32; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight32; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1033 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_lamb_update_phase1(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Mixed Precision version of Phase I of lamb update + * it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad32 = grad(float16) * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight32; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight32; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1033 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_lamb_update_phase1(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Mixed Precision version Phase II of lamb update + * it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight32 = weight32 - lr * g + * weight(float16) = weight32 + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1075 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_lamb_update_phase2(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Mixed Precision version Phase II of lamb update + * it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight32 = weight32 - lr * g + * weight(float16) = weight32 + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1075 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_lamb_update_phase2(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for multi-precision Nesterov Accelerated Gradient( NAG) optimizer. + * + * + * Defined in src/operator/optimizer_op.cc:L745 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_nag_mom_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for multi-precision Nesterov Accelerated Gradient( NAG) optimizer. + * + * + * Defined in src/operator/optimizer_op.cc:L745 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_nag_mom_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_sgd_mom_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_sgd_mom_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_sgd_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def mp_sgd_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Check if all the float numbers in all the arrays are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L133 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_all_finite(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Check if all the float numbers in all the arrays are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L133 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_all_finite(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the LARS coefficients of multiple weights and grads from their sums of square" + * + * + * Defined in src/operator/contrib/multi_lars.cc:L37 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_lars(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the LARS coefficients of multiple weights and grads from their sums of square" + * + * + * Defined in src/operator/contrib/multi_lars.cc:L37 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_lars(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L472 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_mp_sgd_mom_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L472 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_mp_sgd_mom_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L417 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_mp_sgd_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L417 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_mp_sgd_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L374 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_sgd_mom_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L374 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_sgd_mom_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L329 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_sgd_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L329 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_sgd_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the sums of squares of multiple arrays + * + * + * Defined in src/operator/contrib/multi_sum_sq.cc:L36 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_sum_sq(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Compute the sums of squares of multiple arrays + * + * + * Defined in src/operator/contrib/multi_sum_sq.cc:L36 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def multi_sum_sq(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Nesterov Accelerated Gradient( NAG) optimizer. + * It updates the weights using the following formula, + * + * .. math:: + * v_t = \gamma v_{t-1} + \eta * \nabla J(W_{t-1} - \gamma v_{t-1})\\ + * W_t = W_{t-1} - v_t + * + * Where + * :math:`\eta` is the learning rate of the optimizer + * :math:`\gamma` is the decay rate of the momentum estimate + * :math:`\v_t` is the update vector at time step `t` + * :math:`\W_t` is the weight vector at time step `t` + * + * + * + * Defined in src/operator/optimizer_op.cc:L726 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def nag_mom_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Nesterov Accelerated Gradient( NAG) optimizer. + * It updates the weights using the following formula, + * + * .. math:: + * v_t = \gamma v_{t-1} + \eta * \nabla J(W_{t-1} - \gamma v_{t-1})\\ + * W_t = W_{t-1} - v_t + * + * Where + * :math:`\eta` is the learning rate of the optimizer + * :math:`\gamma` is the decay rate of the momentum estimate + * :math:`\v_t` is the update vector at time step `t` + * :math:`\W_t` is the weight vector at time step `t` + * + * + * + * Defined in src/operator/optimizer_op.cc:L726 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def nag_mom_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the product of array elements over given axes treating Not a Numbers (``NaN``) as one. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_prod_value.cc:L47 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def nanprod(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the product of array elements over given axes treating Not a Numbers (``NaN``) as one. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_prod_value.cc:L47 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def nanprod(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes treating Not a Numbers (``NaN``) as zero. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L102 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def nansum(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes treating Not a Numbers (``NaN``) as zero. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L102 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def nansum(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Numerical negative of the argument, element-wise. + * + * The storage type of ``negative`` output depends upon the input storage type: + * + * - negative(default) = default + * - negative(row_sparse) = row_sparse + * - negative(csr) = csr + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def negative(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Numerical negative of the argument, element-wise. + * + * The storage type of ``negative`` output depends upon the input storage type: + * + * - negative(default) = default + * - negative(row_sparse) = row_sparse + * - negative(csr) = csr + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def negative(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the norm on an NDArray. + * + * This operator computes the norm on an NDArray with the specified axis, depending + * on the value of the ord parameter. By default, it computes the L2 norm on the entire + * array. Currently only ord=2 supports sparse ndarrays. + * + * Examples:: + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * `[ [2, 2], + * [5, 6] ] ] + * + * norm(x, ord=2, axis=1) = `[ [3.1622777 4.472136 ] + * [5.3851647 6.3245554] ] + * + * norm(x, ord=1, axis=1) = `[ [4., 6.], + * [7., 8.] ] + * + * rsp = x.cast_storage('row_sparse') + * + * norm(rsp) = [5.47722578] + * + * csr = x.cast_storage('csr') + * + * norm(csr) = [5.47722578] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_norm_value.cc:L89 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def norm(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the norm on an NDArray. + * + * This operator computes the norm on an NDArray with the specified axis, depending + * on the value of the ord parameter. By default, it computes the L2 norm on the entire + * array. Currently only ord=2 supports sparse ndarrays. + * + * Examples:: + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * `[ [2, 2], + * [5, 6] ] ] + * + * norm(x, ord=2, axis=1) = `[ [3.1622777 4.472136 ] + * [5.3851647 6.3245554] ] + * + * norm(x, ord=1, axis=1) = `[ [4., 6.], + * [7., 8.] ] + * + * rsp = x.cast_storage('row_sparse') + * + * norm(rsp) = [5.47722578] + * + * csr = x.cast_storage('csr') + * + * norm(csr) = [5.47722578] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_norm_value.cc:L89 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def norm(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def normal(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def normal(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a one-hot array. + * + * The locations represented by `indices` take value `on_value`, while all + * other locations take value `off_value`. + * + * `one_hot` operation with `indices` of shape ``(i0, i1)`` and `depth` of ``d`` would result + * in an output array of shape ``(i0, i1, d)`` with:: + * + * output[i,j,:] = off_value + * output[i,j,indices[i,j] ] = on_value + * + * Examples:: + * + * one_hot([1,0,2,0], 3) = `[ [ 0. 1. 0.] + * [ 1. 0. 0.] + * [ 0. 0. 1.] + * [ 1. 0. 0.] ] + * + * one_hot([1,0,2,0], 3, on_value=8, off_value=1, + * dtype='int32') = `[ [1 8 1] + * [8 1 1] + * [1 1 8] + * [8 1 1] ] + * + * one_hot(`[ [1,0],[1,0],[2,0] ], 3) = `[ `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 0. 1.] + * [ 1. 0. 0.] ] ] + * + * + * Defined in src/operator/tensor/indexing_op.cc:L824 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def one_hot(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a one-hot array. + * + * The locations represented by `indices` take value `on_value`, while all + * other locations take value `off_value`. + * + * `one_hot` operation with `indices` of shape ``(i0, i1)`` and `depth` of ``d`` would result + * in an output array of shape ``(i0, i1, d)`` with:: + * + * output[i,j,:] = off_value + * output[i,j,indices[i,j] ] = on_value + * + * Examples:: + * + * one_hot([1,0,2,0], 3) = `[ [ 0. 1. 0.] + * [ 1. 0. 0.] + * [ 0. 0. 1.] + * [ 1. 0. 0.] ] + * + * one_hot([1,0,2,0], 3, on_value=8, off_value=1, + * dtype='int32') = `[ [1 8 1] + * [8 1 1] + * [1 1 8] + * [8 1 1] ] + * + * one_hot(`[ [1,0],[1,0],[2,0] ], 3) = `[ `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 0. 1.] + * [ 1. 0. 0.] ] ] + * + * + * Defined in src/operator/tensor/indexing_op.cc:L824 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def one_hot(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Return an array of ones with the same shape and type + * as the input array. + * + * Examples:: + * + * x = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * ones_like(x) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ones_like(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Return an array of ones with the same shape and type + * as the input array. + * + * Examples:: + * + * x = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * ones_like(x) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ones_like(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def pad(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def pad(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def pick(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def pick(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L200 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def preloaded_multi_mp_sgd_mom_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L200 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def preloaded_multi_mp_sgd_mom_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L140 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def preloaded_multi_mp_sgd_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L140 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def preloaded_multi_mp_sgd_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L91 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def preloaded_multi_sgd_mom_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L91 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def preloaded_multi_sgd_mom_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L42 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def preloaded_multi_sgd_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L42 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def preloaded_multi_sgd_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the product of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L31 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def prod(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the product of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L31 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def prod(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Converts each element of the input array from degrees to radians. + * + * .. math:: + * radians([0, 90, 180, 270, 360]) = [0, \pi/2, \pi, 3\pi/2, 2\pi] + * + * The storage type of ``radians`` output depends upon the input storage type: + * + * - radians(default) = default + * - radians(row_sparse) = row_sparse + * - radians(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L293 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def radians(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Converts each element of the input array from degrees to radians. + * + * .. math:: + * radians([0, 90, 180, 270, 360]) = [0, \pi/2, \pi, 3\pi/2, 2\pi] + * + * The storage type of ``radians`` output depends upon the input storage type: + * + * - radians(default) = default + * - radians(row_sparse) = row_sparse + * - radians(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L293 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def radians(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from an exponential distribution. + * + * Samples are distributed according to an exponential distribution parametrized by *lambda* (rate). + * + * Example:: + * + * exponential(lam=4, shape=(2,2)) = `[ [ 0.0097189 , 0.08999364], + * [ 0.04146638, 0.31715935] ] + * + * + * Defined in src/operator/random/sample_op.cc:L137 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_exponential(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from an exponential distribution. + * + * Samples are distributed according to an exponential distribution parametrized by *lambda* (rate). + * + * Example:: + * + * exponential(lam=4, shape=(2,2)) = `[ [ 0.0097189 , 0.08999364], + * [ 0.04146638, 0.31715935] ] + * + * + * Defined in src/operator/random/sample_op.cc:L137 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_exponential(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a gamma distribution. + * + * Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). + * + * Example:: + * + * gamma(alpha=9, beta=0.5, shape=(2,2)) = `[ [ 7.10486984, 3.37695289], + * [ 3.91697288, 3.65933681] ] + * + * + * Defined in src/operator/random/sample_op.cc:L125 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_gamma(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a gamma distribution. + * + * Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). + * + * Example:: + * + * gamma(alpha=9, beta=0.5, shape=(2,2)) = `[ [ 7.10486984, 3.37695289], + * [ 3.91697288, 3.65933681] ] + * + * + * Defined in src/operator/random/sample_op.cc:L125 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_gamma(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a generalized negative binomial distribution. + * + * Samples are distributed according to a generalized negative binomial distribution parametrized by + * *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the + * number of unsuccessful experiments (generalized to real numbers). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * generalized_negative_binomial(mu=2.0, alpha=0.3, shape=(2,2)) = `[ [ 2., 1.], + * [ 6., 4.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L179 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_generalized_negative_binomial(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a generalized negative binomial distribution. + * + * Samples are distributed according to a generalized negative binomial distribution parametrized by + * *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the + * number of unsuccessful experiments (generalized to real numbers). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * generalized_negative_binomial(mu=2.0, alpha=0.3, shape=(2,2)) = `[ [ 2., 1.], + * [ 6., 4.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L179 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_generalized_negative_binomial(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a negative binomial distribution. + * + * Samples are distributed according to a negative binomial distribution parametrized by + * *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * negative_binomial(k=3, p=0.4, shape=(2,2)) = `[ [ 4., 7.], + * [ 2., 5.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L164 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_negative_binomial(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a negative binomial distribution. + * + * Samples are distributed according to a negative binomial distribution parametrized by + * *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * negative_binomial(k=3, p=0.4, shape=(2,2)) = `[ [ 4., 7.], + * [ 2., 5.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L164 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_negative_binomial(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_normal(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_normal(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Dirichlet distributions with parameter *alpha*. + * + * The shape of *alpha* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *alpha*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *alpha* + * at index *i*. + * + * Examples:: + * + * random_pdf_dirichlet(sample=`[ [1,2],[2,3],[3,4] ], alpha=[2.5, 2.5]) = + * [38.413498, 199.60245, 564.56085] + * + * sample = `[ `[ [1, 2, 3], [10, 20, 30], [100, 200, 300] ], + * `[ [0.1, 0.2, 0.3], [0.01, 0.02, 0.03], [0.001, 0.002, 0.003] ] ] + * + * random_pdf_dirichlet(sample=sample, alpha=[0.1, 0.4, 0.9]) = + * `[ [2.3257459e-02, 5.8420084e-04, 1.4674458e-05], + * [9.2589635e-01, 3.6860607e+01, 1.4674468e+03] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L315 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_dirichlet(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Dirichlet distributions with parameter *alpha*. + * + * The shape of *alpha* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *alpha*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *alpha* + * at index *i*. + * + * Examples:: + * + * random_pdf_dirichlet(sample=`[ [1,2],[2,3],[3,4] ], alpha=[2.5, 2.5]) = + * [38.413498, 199.60245, 564.56085] + * + * sample = `[ `[ [1, 2, 3], [10, 20, 30], [100, 200, 300] ], + * `[ [0.1, 0.2, 0.3], [0.01, 0.02, 0.03], [0.001, 0.002, 0.003] ] ] + * + * random_pdf_dirichlet(sample=sample, alpha=[0.1, 0.4, 0.9]) = + * `[ [2.3257459e-02, 5.8420084e-04, 1.4674458e-05], + * [9.2589635e-01, 3.6860607e+01, 1.4674468e+03] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L315 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_dirichlet(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * exponential distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_exponential(sample=`[ [1, 2, 3] ], lam=[1]) = + * `[ [0.36787945, 0.13533528, 0.04978707] ] + * + * sample = `[ [1,2,3], + * [1,2,3], + * [1,2,3] ] + * + * random_pdf_exponential(sample=sample, lam=[1,0.5,0.25]) = + * `[ [0.36787945, 0.13533528, 0.04978707], + * [0.30326533, 0.18393973, 0.11156508], + * [0.1947002, 0.15163267, 0.11809164] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L304 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_exponential(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * exponential distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_exponential(sample=`[ [1, 2, 3] ], lam=[1]) = + * `[ [0.36787945, 0.13533528, 0.04978707] ] + * + * sample = `[ [1,2,3], + * [1,2,3], + * [1,2,3] ] + * + * random_pdf_exponential(sample=sample, lam=[1,0.5,0.25]) = + * `[ [0.36787945, 0.13533528, 0.04978707], + * [0.30326533, 0.18393973, 0.11156508], + * [0.1947002, 0.15163267, 0.11809164] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L304 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_exponential(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * gamma distributions with parameters *alpha* (shape) and *beta* (rate). + * + * *alpha* and *beta* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *alpha* and *beta*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *alpha* and *beta* at index *i*. + * + * Examples:: + * + * random_pdf_gamma(sample=`[ [1,2,3,4,5] ], alpha=[5], beta=[1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739] ] + * + * sample = `[ [1, 2, 3, 4, 5], + * [2, 3, 4, 5, 6], + * [3, 4, 5, 6, 7] ] + * + * random_pdf_gamma(sample=sample, alpha=[5,6,7], beta=[1,1,1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739], + * [0.03608941, 0.10081882, 0.15629345, 0.17546739, 0.16062315], + * [0.05040941, 0.10419563, 0.14622283, 0.16062315, 0.14900276] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L301 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_gamma(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * gamma distributions with parameters *alpha* (shape) and *beta* (rate). + * + * *alpha* and *beta* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *alpha* and *beta*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *alpha* and *beta* at index *i*. + * + * Examples:: + * + * random_pdf_gamma(sample=`[ [1,2,3,4,5] ], alpha=[5], beta=[1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739] ] + * + * sample = `[ [1, 2, 3, 4, 5], + * [2, 3, 4, 5, 6], + * [3, 4, 5, 6, 7] ] + * + * random_pdf_gamma(sample=sample, alpha=[5,6,7], beta=[1,1,1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739], + * [0.03608941, 0.10081882, 0.15629345, 0.17546739, 0.16062315], + * [0.05040941, 0.10419563, 0.14622283, 0.16062315, 0.14900276] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L301 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_gamma(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * generalized negative binomial distributions with parameters *mu* (mean) + * and *alpha* (dispersion). This can be understood as a reparameterization of + * the negative binomial, where *k* = *1 / alpha* and *p* = *1 / (mu \* alpha + 1)*. + * + * *mu* and *alpha* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *alpha*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *alpha* at index *i*. + * + * Examples:: + * + * random_pdf_generalized_negative_binomial(sample=`[ [1, 2, 3, 4] ], alpha=[1], mu=[1]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_generalized_negative_binomial(sample=sample, alpha=[1, 0.6666], mu=[1, 1.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26517063, 0.16573331, 0.09667706, 0.05437994] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L311 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_generalized_negative_binomial(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * generalized negative binomial distributions with parameters *mu* (mean) + * and *alpha* (dispersion). This can be understood as a reparameterization of + * the negative binomial, where *k* = *1 / alpha* and *p* = *1 / (mu \* alpha + 1)*. + * + * *mu* and *alpha* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *alpha*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *alpha* at index *i*. + * + * Examples:: + * + * random_pdf_generalized_negative_binomial(sample=`[ [1, 2, 3, 4] ], alpha=[1], mu=[1]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_generalized_negative_binomial(sample=sample, alpha=[1, 0.6666], mu=[1, 1.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26517063, 0.16573331, 0.09667706, 0.05437994] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L311 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_generalized_negative_binomial(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of samples of + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * *k* and *p* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *k* and *p*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *k* and *p* at index *i*. + * + * Examples:: + * + * random_pdf_negative_binomial(sample=`[ [1,2,3,4] ], k=[1], p=a[0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * # Note that k may be real-valued + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_negative_binomial(sample=sample, k=[1, 1.5], p=[0.5, 0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26516506, 0.16572815, 0.09667476, 0.05437956] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L308 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_negative_binomial(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of samples of + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * *k* and *p* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *k* and *p*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *k* and *p* at index *i*. + * + * Examples:: + * + * random_pdf_negative_binomial(sample=`[ [1,2,3,4] ], k=[1], p=a[0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * # Note that k may be real-valued + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_negative_binomial(sample=sample, k=[1, 1.5], p=[0.5, 0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26516506, 0.16572815, 0.09667476, 0.05437956] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L308 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_negative_binomial(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * *mu* and *sigma* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *sigma*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *sigma* at index *i*. + * + * Examples:: + * + * sample = `[ [-2, -1, 0, 1, 2] ] + * random_pdf_normal(sample=sample, mu=[0], sigma=[1]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097] ] + * + * random_pdf_normal(sample=sample*2, mu=[0,0], sigma=[1,2]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097], + * [0.12098537, 0.17603266, 0.19947115, 0.17603266, 0.12098537] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L299 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_normal(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * *mu* and *sigma* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *sigma*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *sigma* at index *i*. + * + * Examples:: + * + * sample = `[ [-2, -1, 0, 1, 2] ] + * random_pdf_normal(sample=sample, mu=[0], sigma=[1]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097] ] + * + * random_pdf_normal(sample=sample*2, mu=[0,0], sigma=[1,2]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097], + * [0.12098537, 0.17603266, 0.19947115, 0.17603266, 0.12098537] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L299 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_normal(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Poisson distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_poisson(sample=`[ [0,1,2,3] ], lam=[1]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324] ] + * + * sample = `[ [0,1,2,3], + * [0,1,2,3], + * [0,1,2,3] ] + * + * random_pdf_poisson(sample=sample, lam=[1,2,3]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324], + * [0.13533528, 0.27067056, 0.27067056, 0.18044704], + * [0.04978707, 0.14936121, 0.22404182, 0.22404182] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L306 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_poisson(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Poisson distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_poisson(sample=`[ [0,1,2,3] ], lam=[1]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324] ] + * + * sample = `[ [0,1,2,3], + * [0,1,2,3], + * [0,1,2,3] ] + * + * random_pdf_poisson(sample=sample, lam=[1,2,3]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324], + * [0.13533528, 0.27067056, 0.27067056, 0.18044704], + * [0.04978707, 0.14936121, 0.22404182, 0.22404182] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L306 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_poisson(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * uniform distributions on the intervals given by *[low,high)*. + * + * *low* and *high* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *low* and *high*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *low* and *high* at index *i*. + * + * Examples:: + * + * random_pdf_uniform(sample=`[ [1,2,3,4] ], low=[0], high=[10]) = [0.1, 0.1, 0.1, 0.1] + * + * sample = `[ `[ [1, 2, 3], + * [1, 2, 3] ], + * `[ [1, 2, 3], + * [1, 2, 3] ] ] + * low = `[ [0, 0], + * [0, 0] ] + * high = `[ [ 5, 10], + * [15, 20] ] + * random_pdf_uniform(sample=sample, low=low, high=high) = + * `[ `[ [0.2, 0.2, 0.2 ], + * [0.1, 0.1, 0.1 ] ], + * `[ [0.06667, 0.06667, 0.06667], + * [0.05, 0.05, 0.05 ] ] ] + * + * + * + * Defined in src/operator/random/pdf_op.cc:L297 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_uniform(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * uniform distributions on the intervals given by *[low,high)*. + * + * *low* and *high* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *low* and *high*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *low* and *high* at index *i*. + * + * Examples:: + * + * random_pdf_uniform(sample=`[ [1,2,3,4] ], low=[0], high=[10]) = [0.1, 0.1, 0.1, 0.1] + * + * sample = `[ `[ [1, 2, 3], + * [1, 2, 3] ], + * `[ [1, 2, 3], + * [1, 2, 3] ] ] + * low = `[ [0, 0], + * [0, 0] ] + * high = `[ [ 5, 10], + * [15, 20] ] + * random_pdf_uniform(sample=sample, low=low, high=high) = + * `[ `[ [0.2, 0.2, 0.2 ], + * [0.1, 0.1, 0.1 ] ], + * `[ [0.06667, 0.06667, 0.06667], + * [0.05, 0.05, 0.05 ] ] ] + * + * + * + * Defined in src/operator/random/pdf_op.cc:L297 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_pdf_uniform(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a Poisson distribution. + * + * Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * poisson(lam=4, shape=(2,2)) = `[ [ 5., 2.], + * [ 4., 6.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L150 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_poisson(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a Poisson distribution. + * + * Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * poisson(lam=4, shape=(2,2)) = `[ [ 5., 2.], + * [ 4., 6.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L150 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_poisson(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a discrete uniform distribution. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * randint(low=0, high=5, shape=(2,2)) = `[ [ 0, 2], + * [ 3, 1] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L194 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_randint(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a discrete uniform distribution. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * randint(low=0, high=5, shape=(2,2)) = `[ [ 0, 2], + * [ 3, 1] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L194 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_randint(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a uniform distribution. + * + * .. note:: The existing alias ``uniform`` is deprecated. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * uniform(low=0, high=1, shape=(2,2)) = `[ [ 0.60276335, 0.85794562], + * [ 0.54488319, 0.84725171] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L96 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_uniform(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Draw random samples from a uniform distribution. + * + * .. note:: The existing alias ``uniform`` is deprecated. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * uniform(low=0, high=1, shape=(2,2)) = `[ [ 0.60276335, 0.85794562], + * [ 0.54488319, 0.84725171] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L96 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def random_uniform(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Converts a batch of index arrays into an array of flat indices. The operator follows numpy conventions so a single multi index is given by a column of the input matrix. The leading dimension may be left unspecified by using -1 as placeholder. + * + * Examples:: + * + * A = `[ [3,6,6],[4,5,1] ] + * ravel(A, shape=(7,6)) = [22,41,37] + * ravel(A, shape=(-1,6)) = [22,41,37] + * + * + * + * Defined in src/operator/tensor/ravel.cc:L42 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ravel_multi_index(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Converts a batch of index arrays into an array of flat indices. The operator follows numpy conventions so a single multi index is given by a column of the input matrix. The leading dimension may be left unspecified by using -1 as placeholder. + * + * Examples:: + * + * A = `[ [3,6,6],[4,5,1] ] + * ravel(A, shape=(7,6)) = [22,41,37] + * ravel(A, shape=(-1,6)) = [22,41,37] + * + * + * + * Defined in src/operator/tensor/ravel.cc:L42 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def ravel_multi_index(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse cube-root value of the input. + * + * .. math:: + * rcbrt(x) = 1/\sqrt[3]{x} + * + * Example:: + * + * rcbrt([1,8,-125]) = [1.0, 0.5, -0.2] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L269 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rcbrt(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse cube-root value of the input. + * + * .. math:: + * rcbrt(x) = 1/\sqrt[3]{x} + * + * Example:: + * + * rcbrt([1,8,-125]) = [1.0, 0.5, -0.2] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L269 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rcbrt(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the reciprocal of the argument, element-wise. + * + * Calculates 1/x. + * + * Example:: + * + * reciprocal([-2, 1, 3, 1.6, 0.2]) = [-0.5, 1.0, 0.33333334, 0.625, 5.0] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L42 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reciprocal(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the reciprocal of the argument, element-wise. + * + * Calculates 1/x. + * + * Example:: + * + * reciprocal([-2, 1, 3, 1.6, 0.2]) = [-0.5, 1.0, 0.33333334, 0.625, 5.0] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L42 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reciprocal(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes rectified linear activation. + * + * .. math:: + * max(features, 0) + * + * The storage type of ``relu`` output depends upon the input storage type: + * + * - relu(default) = default + * - relu(row_sparse) = row_sparse + * - relu(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L85 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def relu(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes rectified linear activation. + * + * .. math:: + * max(features, 0) + * + * The storage type of ``relu`` output depends upon the input storage type: + * + * - relu(default) = default + * - relu(row_sparse) = row_sparse + * - relu(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L85 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def relu(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Repeats elements of an array. + * By default, ``repeat`` flattens the input array into 1-D and then repeats the + * elements:: + * x = `[ [ 1, 2], + * [ 3, 4] ] + * repeat(x, repeats=2) = [ 1., 1., 2., 2., 3., 3., 4., 4.] + * The parameter ``axis`` specifies the axis along which to perform repeat:: + * repeat(x, repeats=2, axis=1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * repeat(x, repeats=2, axis=0) = `[ [ 1., 2.], + * [ 1., 2.], + * [ 3., 4.], + * [ 3., 4.] ] + * repeat(x, repeats=2, axis=-1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L744 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def repeat(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Repeats elements of an array. + * By default, ``repeat`` flattens the input array into 1-D and then repeats the + * elements:: + * x = `[ [ 1, 2], + * [ 3, 4] ] + * repeat(x, repeats=2) = [ 1., 1., 2., 2., 3., 3., 4., 4.] + * The parameter ``axis`` specifies the axis along which to perform repeat:: + * repeat(x, repeats=2, axis=1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * repeat(x, repeats=2, axis=0) = `[ [ 1., 2.], + * [ 1., 2.], + * [ 3., 4.], + * [ 3., 4.] ] + * repeat(x, repeats=2, axis=-1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L744 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def repeat(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Set to zero multiple arrays + * + * + * Defined in src/operator/contrib/reset_arrays.cc:L36 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reset_arrays(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Set to zero multiple arrays + * + * + * Defined in src/operator/contrib/reset_arrays.cc:L36 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reset_arrays(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reshape(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reshape(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reshape some or all dimensions of `lhs` to have the same shape as some or all dimensions of `rhs`. + * + * Returns a **view** of the `lhs` array with a new shape without altering any data. + * + * Example:: + * + * x = [1, 2, 3, 4, 5, 6] + * y = `[ [0, -4], [3, 2], [2, 2] ] + * reshape_like(x, y) = `[ [1, 2], [3, 4], [5, 6] ] + * + * More precise control over how dimensions are inherited is achieved by specifying \ + * slices over the `lhs` and `rhs` array dimensions. Only the sliced `lhs` dimensions \ + * are reshaped to the `rhs` sliced dimensions, with the non-sliced `lhs` dimensions staying the same. + * + * Examples:: + * + * - lhs shape = (30,7), rhs shape = (15,2,4), lhs_begin=0, lhs_end=1, rhs_begin=0, rhs_end=2, output shape = (15,2,7) + * - lhs shape = (3, 5), rhs shape = (1,15,4), lhs_begin=0, lhs_end=2, rhs_begin=1, rhs_end=2, output shape = (15) + * + * Negative indices are supported, and `None` can be used for either `lhs_end` or `rhs_end` to indicate the end of the range. + * + * Example:: + * + * - lhs shape = (30, 12), rhs shape = (4, 2, 2, 3), lhs_begin=-1, lhs_end=None, rhs_begin=1, rhs_end=None, output shape = (30, 2, 2, 3) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L513 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reshape_like(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reshape some or all dimensions of `lhs` to have the same shape as some or all dimensions of `rhs`. + * + * Returns a **view** of the `lhs` array with a new shape without altering any data. + * + * Example:: + * + * x = [1, 2, 3, 4, 5, 6] + * y = `[ [0, -4], [3, 2], [2, 2] ] + * reshape_like(x, y) = `[ [1, 2], [3, 4], [5, 6] ] + * + * More precise control over how dimensions are inherited is achieved by specifying \ + * slices over the `lhs` and `rhs` array dimensions. Only the sliced `lhs` dimensions \ + * are reshaped to the `rhs` sliced dimensions, with the non-sliced `lhs` dimensions staying the same. + * + * Examples:: + * + * - lhs shape = (30,7), rhs shape = (15,2,4), lhs_begin=0, lhs_end=1, rhs_begin=0, rhs_end=2, output shape = (15,2,7) + * - lhs shape = (3, 5), rhs shape = (1,15,4), lhs_begin=0, lhs_end=2, rhs_begin=1, rhs_end=2, output shape = (15) + * + * Negative indices are supported, and `None` can be used for either `lhs_end` or `rhs_end` to indicate the end of the range. + * + * Example:: + * + * - lhs shape = (30, 12), rhs shape = (4, 2, 2, 3), lhs_begin=-1, lhs_end=None, rhs_begin=1, rhs_end=None, output shape = (30, 2, 2, 3) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L513 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reshape_like(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reverse(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def reverse(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * .. note:: + * - For input ``n.5`` ``rint`` returns ``n`` while ``round`` returns ``n+1``. + * - For input ``-n.5`` both ``rint`` and ``round`` returns ``-n-1``. + * + * Example:: + * + * rint([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 1., -2., 2., 2.] + * + * The storage type of ``rint`` output depends upon the input storage type: + * + * - rint(default) = default + * - rint(row_sparse) = row_sparse + * - rint(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L799 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rint(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * .. note:: + * - For input ``n.5`` ``rint`` returns ``n`` while ``round`` returns ``n+1``. + * - For input ``-n.5`` both ``rint`` and ``round`` returns ``-n-1``. + * + * Example:: + * + * rint([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 1., -2., 2., 2.] + * + * The storage type of ``rint`` output depends upon the input storage type: + * + * - rint(default) = default + * - rint(row_sparse) = row_sparse + * - rint(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L799 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rint(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for `RMSProp` optimizer. + * + * `RMSprop` is a variant of stochastic gradient descent where the gradients are + * divided by a cache which grows with the sum of squares of recent gradients? + * + * `RMSProp` is similar to `AdaGrad`, a popular variant of `SGD` which adaptively + * tunes the learning rate of each parameter. `AdaGrad` lowers the learning rate for + * each parameter monotonically over the course of training. + * While this is analytically motivated for convex optimizations, it may not be ideal + * for non-convex problems. `RMSProp` deals with this heuristically by allowing the + * learning rates to rebound as the denominator decays over time. + * + * Define the Root Mean Square (RMS) error criterion of the gradient as + * :math:`RMS[g]_t = \sqrt{E[g^2]_t + \epsilon}`, where :math:`g` represents + * gradient and :math:`E[g^2]_t` is the decaying average over past squared gradient. + * + * The :math:`E[g^2]_t` is given by: + * + * .. math:: + * E[g^2]_t = \gamma * E[g^2]_{t-1} + (1-\gamma) * g_t^2 + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t - \frac{\eta}{RMS[g]_t} g_t + * + * The RMSProp code follows the version in + * http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf + * Tieleman & Hinton, 2012. + * + * Hinton suggests the momentum term :math:`\gamma` to be 0.9 and the learning rate + * :math:`\eta` to be 0.001. + * + * + * + * Defined in src/operator/optimizer_op.cc:L797 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rmsprop_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for `RMSProp` optimizer. + * + * `RMSprop` is a variant of stochastic gradient descent where the gradients are + * divided by a cache which grows with the sum of squares of recent gradients? + * + * `RMSProp` is similar to `AdaGrad`, a popular variant of `SGD` which adaptively + * tunes the learning rate of each parameter. `AdaGrad` lowers the learning rate for + * each parameter monotonically over the course of training. + * While this is analytically motivated for convex optimizations, it may not be ideal + * for non-convex problems. `RMSProp` deals with this heuristically by allowing the + * learning rates to rebound as the denominator decays over time. + * + * Define the Root Mean Square (RMS) error criterion of the gradient as + * :math:`RMS[g]_t = \sqrt{E[g^2]_t + \epsilon}`, where :math:`g` represents + * gradient and :math:`E[g^2]_t` is the decaying average over past squared gradient. + * + * The :math:`E[g^2]_t` is given by: + * + * .. math:: + * E[g^2]_t = \gamma * E[g^2]_{t-1} + (1-\gamma) * g_t^2 + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t - \frac{\eta}{RMS[g]_t} g_t + * + * The RMSProp code follows the version in + * http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf + * Tieleman & Hinton, 2012. + * + * Hinton suggests the momentum term :math:`\gamma` to be 0.9 and the learning rate + * :math:`\eta` to be 0.001. + * + * + * + * Defined in src/operator/optimizer_op.cc:L797 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rmsprop_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for RMSPropAlex optimizer. + * + * `RMSPropAlex` is non-centered version of `RMSProp`. + * + * Define :math:`E[g^2]_t` is the decaying average over past squared gradient and + * :math:`E[g]_t` is the decaying average over past gradient. + * + * .. math:: + * E[g^2]_t = \gamma_1 * E[g^2]_{t-1} + (1 - \gamma_1) * g_t^2\\ + * E[g]_t = \gamma_1 * E[g]_{t-1} + (1 - \gamma_1) * g_t\\ + * \Delta_t = \gamma_2 * \Delta_{t-1} - \frac{\eta}{\sqrt{E[g^2]_t - E[g]_t^2 + \epsilon}} g_t\\ + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t + \Delta_t + * + * The RMSPropAlex code follows the version in + * http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. + * + * Graves suggests the momentum term :math:`\gamma_1` to be 0.95, :math:`\gamma_2` + * to be 0.9 and the learning rate :math:`\eta` to be 0.0001. + * + * + * Defined in src/operator/optimizer_op.cc:L836 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rmspropalex_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for RMSPropAlex optimizer. + * + * `RMSPropAlex` is non-centered version of `RMSProp`. + * + * Define :math:`E[g^2]_t` is the decaying average over past squared gradient and + * :math:`E[g]_t` is the decaying average over past gradient. + * + * .. math:: + * E[g^2]_t = \gamma_1 * E[g^2]_{t-1} + (1 - \gamma_1) * g_t^2\\ + * E[g]_t = \gamma_1 * E[g]_{t-1} + (1 - \gamma_1) * g_t\\ + * \Delta_t = \gamma_2 * \Delta_{t-1} - \frac{\eta}{\sqrt{E[g^2]_t - E[g]_t^2 + \epsilon}} g_t\\ + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t + \Delta_t + * + * The RMSPropAlex code follows the version in + * http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. + * + * Graves suggests the momentum term :math:`\gamma_1` to be 0.95, :math:`\gamma_2` + * to be 0.9 and the learning rate :math:`\eta` to be 0.0001. + * + * + * Defined in src/operator/optimizer_op.cc:L836 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rmspropalex_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * Example:: + * + * round([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 2., -2., 2., 2.] + * + * The storage type of ``round`` output depends upon the input storage type: + * + * - round(default) = default + * - round(row_sparse) = row_sparse + * - round(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L778 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def round(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * Example:: + * + * round([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 2., -2., 2., 2.] + * + * The storage type of ``round`` output depends upon the input storage type: + * + * - round(default) = default + * - round(row_sparse) = row_sparse + * - round(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L778 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def round(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse square-root value of the input. + * + * .. math:: + * rsqrt(x) = 1/\sqrt{x} + * + * Example:: + * + * rsqrt([4,9,16]) = [0.5, 0.33333334, 0.25] + * + * The storage type of ``rsqrt`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L193 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rsqrt(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise inverse square-root value of the input. + * + * .. math:: + * rsqrt(x) = 1/\sqrt{x} + * + * Example:: + * + * rsqrt([4,9,16]) = [0.5, 0.33333334, 0.25] + * + * The storage type of ``rsqrt`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L193 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def rsqrt(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * exponential distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_exponential(lam) = [ 0.51837951, 0.09994757] + * + * // Draw a vector containing two samples for each distribution + * sample_exponential(lam, shape=(2)) = `[ [ 0.51837951, 0.19866663], + * [ 0.09994757, 0.50447971] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L283 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_exponential(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * exponential distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_exponential(lam) = [ 0.51837951, 0.09994757] + * + * // Draw a vector containing two samples for each distribution + * sample_exponential(lam, shape=(2)) = `[ [ 0.51837951, 0.19866663], + * [ 0.09994757, 0.50447971] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L283 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_exponential(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * gamma distributions with parameters *alpha* (shape) and *beta* (scale). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * alpha = [ 0.0, 2.5 ] + * beta = [ 1.0, 0.7 ] + * + * // Draw a single sample for each distribution + * sample_gamma(alpha, beta) = [ 0. , 2.25797319] + * + * // Draw a vector containing two samples for each distribution + * sample_gamma(alpha, beta, shape=(2)) = `[ [ 0. , 0. ], + * [ 2.25797319, 1.70734084] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L280 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_gamma(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * gamma distributions with parameters *alpha* (shape) and *beta* (scale). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * alpha = [ 0.0, 2.5 ] + * beta = [ 1.0, 0.7 ] + * + * // Draw a single sample for each distribution + * sample_gamma(alpha, beta) = [ 0. , 2.25797319] + * + * // Draw a vector containing two samples for each distribution + * sample_gamma(alpha, beta, shape=(2)) = `[ [ 0. , 0. ], + * [ 2.25797319, 1.70734084] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L280 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_gamma(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * generalized negative binomial distributions with parameters *mu* (mean) and *alpha* (dispersion). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * mu = [ 2.0, 2.5 ] + * alpha = [ 1.0, 0.1 ] + * + * // Draw a single sample for each distribution + * sample_generalized_negative_binomial(mu, alpha) = [ 0., 3.] + * + * // Draw a vector containing two samples for each distribution + * sample_generalized_negative_binomial(mu, alpha, shape=(2)) = `[ [ 0., 3.], + * [ 3., 1.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L290 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_generalized_negative_binomial(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * generalized negative binomial distributions with parameters *mu* (mean) and *alpha* (dispersion). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * mu = [ 2.0, 2.5 ] + * alpha = [ 1.0, 0.1 ] + * + * // Draw a single sample for each distribution + * sample_generalized_negative_binomial(mu, alpha) = [ 0., 3.] + * + * // Draw a vector containing two samples for each distribution + * sample_generalized_negative_binomial(mu, alpha, shape=(2)) = `[ [ 0., 3.], + * [ 3., 1.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L290 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_generalized_negative_binomial(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple multinomial distributions. + * + * *data* is an *n* dimensional array whose last dimension has length *k*, where + * *k* is the number of possible outcomes of each multinomial distribution. This + * operator will draw *shape* samples from each distribution. If shape is empty + * one sample will be drawn from each distribution. + * + * If *get_prob* is true, a second array containing log likelihood of the drawn + * samples will also be returned. This is usually used for reinforcement learning + * where you can provide reward as head gradient for this array to estimate + * gradient. + * + * Note that the input distribution must be normalized, i.e. *data* must sum to + * 1 along its last axis. + * + * Examples:: + * + * probs = `[ [0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0] ] + * + * // Draw a single sample for each distribution + * sample_multinomial(probs) = [3, 0] + * + * // Draw a vector containing two samples for each distribution + * sample_multinomial(probs, shape=(2)) = `[ [4, 2], + * [0, 0] ] + * + * // requests log likelihood + * sample_multinomial(probs, get_prob=True) = [2, 1], [0.2, 0.3] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_multinomial(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple multinomial distributions. + * + * *data* is an *n* dimensional array whose last dimension has length *k*, where + * *k* is the number of possible outcomes of each multinomial distribution. This + * operator will draw *shape* samples from each distribution. If shape is empty + * one sample will be drawn from each distribution. + * + * If *get_prob* is true, a second array containing log likelihood of the drawn + * samples will also be returned. This is usually used for reinforcement learning + * where you can provide reward as head gradient for this array to estimate + * gradient. + * + * Note that the input distribution must be normalized, i.e. *data* must sum to + * 1 along its last axis. + * + * Examples:: + * + * probs = `[ [0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0] ] + * + * // Draw a single sample for each distribution + * sample_multinomial(probs) = [3, 0] + * + * // Draw a vector containing two samples for each distribution + * sample_multinomial(probs, shape=(2)) = `[ [4, 2], + * [0, 0] ] + * + * // requests log likelihood + * sample_multinomial(probs, get_prob=True) = [2, 1], [0.2, 0.3] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_multinomial(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * k = [ 20, 49 ] + * p = [ 0.4 , 0.77 ] + * + * // Draw a single sample for each distribution + * sample_negative_binomial(k, p) = [ 15., 16.] + * + * // Draw a vector containing two samples for each distribution + * sample_negative_binomial(k, p, shape=(2)) = `[ [ 15., 50.], + * [ 16., 12.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L287 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_negative_binomial(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * k = [ 20, 49 ] + * p = [ 0.4 , 0.77 ] + * + * // Draw a single sample for each distribution + * sample_negative_binomial(k, p) = [ 15., 16.] + * + * // Draw a vector containing two samples for each distribution + * sample_negative_binomial(k, p, shape=(2)) = `[ [ 15., 50.], + * [ 16., 12.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L287 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_negative_binomial(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * mu = [ 0.0, 2.5 ] + * sigma = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_normal(mu, sigma) = [-0.56410581, 0.95934606] + * + * // Draw a vector containing two samples for each distribution + * sample_normal(mu, sigma, shape=(2)) = `[ [-0.56410581, 0.2928229 ], + * [ 0.95934606, 4.48287058] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L278 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_normal(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * mu = [ 0.0, 2.5 ] + * sigma = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_normal(mu, sigma) = [-0.56410581, 0.95934606] + * + * // Draw a vector containing two samples for each distribution + * sample_normal(mu, sigma, shape=(2)) = `[ [-0.56410581, 0.2928229 ], + * [ 0.95934606, 4.48287058] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L278 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_normal(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * Poisson distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_poisson(lam) = [ 0., 13.] + * + * // Draw a vector containing two samples for each distribution + * sample_poisson(lam, shape=(2)) = `[ [ 0., 4.], + * [ 13., 8.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L285 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_poisson(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * Poisson distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_poisson(lam) = [ 0., 13.] + * + * // Draw a vector containing two samples for each distribution + * sample_poisson(lam, shape=(2)) = `[ [ 0., 4.], + * [ 13., 8.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L285 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_poisson(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * uniform distributions on the intervals given by *[low,high)*. + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * low = [ 0.0, 2.5 ] + * high = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_uniform(low, high) = [ 0.40451524, 3.18687344] + * + * // Draw a vector containing two samples for each distribution + * sample_uniform(low, high, shape=(2)) = `[ [ 0.40451524, 0.18017688], + * [ 3.18687344, 3.68352246] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L276 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_uniform(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * uniform distributions on the intervals given by *[low,high)*. + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * low = [ 0.0, 2.5 ] + * high = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_uniform(low, high) = [ 0.40451524, 3.18687344] + * + * // Draw a vector containing two samples for each distribution + * sample_uniform(low, high, shape=(2)) = `[ [ 0.40451524, 0.18017688], + * [ 3.18687344, 3.68352246] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L276 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sample_uniform(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Scatters data into a new tensor according to indices. + * + * Given `data` with shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(X_0, X_1, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, data shape should simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] = data[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] + * + * all other entries in output are 0. + * + * .. warning:: + * + * If the indices have duplicates, the result will be non-deterministic and + * the gradient of `scatter_nd` will not be correct!! + * + * + * Examples:: + * + * data = [2, 3, 0] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * shape = (2, 2) + * scatter_nd(data, indices, shape) = `[ [0, 0], [2, 3] ] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 1] ] + * shape = (2, 2, 2, 2) + * scatter_nd(data, indices, shape) = `[ [`[ [0, 0], + * [0, 0] ], + * + * `[ [1, 2], + * [3, 4] ] ], + * + * `[ `[ [0, 0], + * [0, 0] ], + * + * `[ [5, 6], + * [7, 8] ] ] ] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def scatter_nd(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Scatters data into a new tensor according to indices. + * + * Given `data` with shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(X_0, X_1, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, data shape should simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] = data[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] + * + * all other entries in output are 0. + * + * .. warning:: + * + * If the indices have duplicates, the result will be non-deterministic and + * the gradient of `scatter_nd` will not be correct!! + * + * + * Examples:: + * + * data = [2, 3, 0] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * shape = (2, 2) + * scatter_nd(data, indices, shape) = `[ [0, 0], [2, 3] ] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 1] ] + * shape = (2, 2, 2, 2) + * scatter_nd(data, indices, shape) = `[ [`[ [0, 0], + * [0, 0] ], + * + * `[ [1, 2], + * [3, 4] ] ], + * + * `[ `[ [0, 0], + * [0, 0] ], + * + * `[ [5, 6], + * [7, 8] ] ] ] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def scatter_nd(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and weight's storage + * type is the same as momentum's storage type, + * only the row slices whose indices appear in grad.indices are updated (for both weight and momentum):: + * + * for row in gradient.indices: + * v[row] = momentum[row] * v[row] - learning_rate * gradient[row] + * weight[row] += v[row] + * + * + * + * Defined in src/operator/optimizer_op.cc:L565 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sgd_mom_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and weight's storage + * type is the same as momentum's storage type, + * only the row slices whose indices appear in grad.indices are updated (for both weight and momentum):: + * + * for row in gradient.indices: + * v[row] = momentum[row] * v[row] - learning_rate * gradient[row] + * weight[row] += v[row] + * + * + * + * Defined in src/operator/optimizer_op.cc:L565 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sgd_mom_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SGD) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * However, if gradient is of ``row_sparse`` storage type and ``lazy_update`` is True, + * only the row slices whose indices appear in grad.indices are updated:: + * + * for row in gradient.indices: + * weight[row] = weight[row] - learning_rate * (gradient[row] + wd * weight[row]) + * + * + * + * Defined in src/operator/optimizer_op.cc:L524 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sgd_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SGD) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * However, if gradient is of ``row_sparse`` storage type and ``lazy_update`` is True, + * only the row slices whose indices appear in grad.indices are updated:: + * + * for row in gradient.indices: + * weight[row] = weight[row] - learning_rate * (gradient[row] + wd * weight[row]) + * + * + * + * Defined in src/operator/optimizer_op.cc:L524 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sgd_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a 1D int64 array containing the shape of data. + * + * Example:: + * + * shape_array(`[ [1,2,3,4], [5,6,7,8] ]) = [2,4] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L574 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def shape_array(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a 1D int64 array containing the shape of data. + * + * Example:: + * + * shape_array(`[ [1,2,3,4], [5,6,7,8] ]) = [2,4] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L574 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def shape_array(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Randomly shuffle the elements. + * + * This shuffles the array along the first axis. + * The order of the elements in each subarray does not change. + * For example, if a 2D array is given, the order of the rows randomly changes, + * but the order of the elements in each row does not change. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def shuffle(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Randomly shuffle the elements. + * + * This shuffles the array along the first axis. + * The order of the elements in each subarray does not change. + * For example, if a 2D array is given, the order of the rows randomly changes, + * but the order of the elements in each row does not change. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def shuffle(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes sigmoid of x element-wise. + * + * .. math:: + * y = 1 / (1 + exp(-x)) + * + * The storage type of ``sigmoid`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L119 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sigmoid(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes sigmoid of x element-wise. + * + * .. math:: + * y = 1 / (1 + exp(-x)) + * + * The storage type of ``sigmoid`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L119 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sigmoid(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise sign of the input. + * + * Example:: + * + * sign([-2, 0, 3]) = [-1, 0, 1] + * + * The storage type of ``sign`` output depends upon the input storage type: + * + * - sign(default) = default + * - sign(row_sparse) = row_sparse + * - sign(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L759 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sign(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise sign of the input. + * + * Example:: + * + * sign([-2, 0, 3]) = [-1, 0, 1] + * + * The storage type of ``sign`` output depends upon the input storage type: + * + * - sign(default) = default + * - sign(row_sparse) = row_sparse + * - sign(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L759 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sign(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for SignSGD optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * W_t = W_{t-1} - \eta_t \text{sign}(g_t) + * + * It updates the weights using:: + * + * weight = weight - learning_rate * sign(gradient) + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L63 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def signsgd_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Update function for SignSGD optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * W_t = W_{t-1} - \eta_t \text{sign}(g_t) + * + * It updates the weights using:: + * + * weight = weight - learning_rate * sign(gradient) + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L63 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def signsgd_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * SIGN momentUM (Signum) optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta m_{t-1} + (1 - \beta) g_t\\ + * W_t = W_{t-1} - \eta_t \text{sign}(m_t) + * + * It updates the weights using:: + * state = momentum * state + (1-momentum) * gradient + * weight = weight - learning_rate * sign(state) + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L92 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def signum_update(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * SIGN momentUM (Signum) optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta m_{t-1} + (1 - \beta) g_t\\ + * W_t = W_{t-1} - \eta_t \text{sign}(m_t) + * + * It updates the weights using:: + * state = momentum * state + (1-momentum) * gradient + * weight = weight - learning_rate * sign(state) + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L92 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def signum_update(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the element-wise sine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * sin([0, \pi/4, \pi/2]) = [0, 0.707, 1] + * + * The storage type of ``sin`` output depends upon the input storage type: + * + * - sin(default) = default + * - sin(row_sparse) = row_sparse + * - sin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L47 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sin(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the element-wise sine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * sin([0, \pi/4, \pi/2]) = [0, 0.707, 1] + * + * The storage type of ``sin`` output depends upon the input storage type: + * + * - sin(default) = default + * - sin(row_sparse) = row_sparse + * - sin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L47 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sin(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the hyperbolic sine of the input array, computed element-wise. + * + * .. math:: + * sinh(x) = 0.5\times(exp(x) - exp(-x)) + * + * The storage type of ``sinh`` output depends upon the input storage type: + * + * - sinh(default) = default + * - sinh(row_sparse) = row_sparse + * - sinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L313 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sinh(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the hyperbolic sine of the input array, computed element-wise. + * + * .. math:: + * sinh(x) = 0.5\times(exp(x) - exp(-x)) + * + * The storage type of ``sinh`` output depends upon the input storage type: + * + * - sinh(default) = default + * - sinh(row_sparse) = row_sparse + * - sinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L313 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sinh(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a 1D int64 array containing the size of data. + * + * Example:: + * + * size_array(`[ [1,2,3,4], [5,6,7,8] ]) = [8] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L625 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def size_array(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a 1D int64 array containing the size of data. + * + * Example:: + * + * size_array(`[ [1,2,3,4], [5,6,7,8] ]) = [8] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L625 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def size_array(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def slice(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def slice(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Slices along a given axis. + * Returns an array slice along a given `axis` starting from the `begin` index + * to the `end` index. + * Examples:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=0, begin=1, end=3) = `[ [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=1, begin=0, end=2) = `[ [ 1., 2.], + * [ 5., 6.], + * [ 9., 10.] ] + * slice_axis(x, axis=1, begin=-3, end=-1) = `[ [ 2., 3.], + * [ 6., 7.], + * [ 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L571 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def slice_axis(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Slices along a given axis. + * Returns an array slice along a given `axis` starting from the `begin` index + * to the `end` index. + * Examples:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=0, begin=1, end=3) = `[ [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=1, begin=0, end=2) = `[ [ 1., 2.], + * [ 5., 6.], + * [ 9., 10.] ] + * slice_axis(x, axis=1, begin=-3, end=-1) = `[ [ 2., 3.], + * [ 6., 7.], + * [ 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L571 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def slice_axis(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Slices a region of the array like the shape of another array. + * This function is similar to ``slice``, however, the `begin` are always `0`s + * and `end` of specific axes are inferred from the second input `shape_like`. + * Given the second `shape_like` input of ``shape=(d_0, d_1, ..., d_n-1)``, + * a ``slice_like`` operator with default empty `axes`, it performs the + * following operation: + * `` out = slice(input, begin=(0, 0, ..., 0), end=(d_0, d_1, ..., d_n-1))``. + * When `axes` is not empty, it is used to speficy which axes are being sliced. + * Given a 4-d input data, ``slice_like`` operator with ``axes=(0, 2, -1)`` + * will perform the following operation: + * `` out = slice(input, begin=(0, 0, 0, 0), end=(d_0, None, d_2, d_3))``. + * Note that it is allowed to have first and second input with different dimensions, + * however, you have to make sure the `axes` are specified and not exceeding the + * dimension limits. + * For example, given `input_1` with ``shape=(2,3,4,5)`` and `input_2` with + * ``shape=(1,2,3)``, it is not allowed to use: + * `` out = slice_like(a, b)`` because ndim of `input_1` is 4, and ndim of `input_2` + * is 3. + * The following is allowed in this situation: + * `` out = slice_like(a, b, axes=(0, 2))`` + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * y = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * slice_like(x, y) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0, 1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0)) = `[ [ 1., 2., 3., 4.] + * [ 5., 6., 7., 8.] ] + * slice_like(x, y, axes=(-1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] + * [ 9., 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L625 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def slice_like(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Slices a region of the array like the shape of another array. + * This function is similar to ``slice``, however, the `begin` are always `0`s + * and `end` of specific axes are inferred from the second input `shape_like`. + * Given the second `shape_like` input of ``shape=(d_0, d_1, ..., d_n-1)``, + * a ``slice_like`` operator with default empty `axes`, it performs the + * following operation: + * `` out = slice(input, begin=(0, 0, ..., 0), end=(d_0, d_1, ..., d_n-1))``. + * When `axes` is not empty, it is used to speficy which axes are being sliced. + * Given a 4-d input data, ``slice_like`` operator with ``axes=(0, 2, -1)`` + * will perform the following operation: + * `` out = slice(input, begin=(0, 0, 0, 0), end=(d_0, None, d_2, d_3))``. + * Note that it is allowed to have first and second input with different dimensions, + * however, you have to make sure the `axes` are specified and not exceeding the + * dimension limits. + * For example, given `input_1` with ``shape=(2,3,4,5)`` and `input_2` with + * ``shape=(1,2,3)``, it is not allowed to use: + * `` out = slice_like(a, b)`` because ndim of `input_1` is 4, and ndim of `input_2` + * is 3. + * The following is allowed in this situation: + * `` out = slice_like(a, b, axes=(0, 2))`` + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * y = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * slice_like(x, y) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0, 1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0)) = `[ [ 1., 2., 3., 4.] + * [ 5., 6., 7., 8.] ] + * slice_like(x, y, axes=(-1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] + * [ 9., 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L625 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def slice_like(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Calculate Smooth L1 Loss(lhs, scalar) by summing + * + * .. math:: + * + * f(x) = + * \begin{cases} + * (\sigma x)^2/2,& \text{if }x < 1/\sigma^2\\ + * |x|-0.5/\sigma^2,& \text{otherwise} + * \end{cases} + * + * where :math:`x` is an element of the tensor *lhs* and :math:`\sigma` is the scalar. + * + * Example:: + * + * smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5] + * smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_scalar_op_extended.cc:L108 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def smooth_l1(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Calculate Smooth L1 Loss(lhs, scalar) by summing + * + * .. math:: + * + * f(x) = + * \begin{cases} + * (\sigma x)^2/2,& \text{if }x < 1/\sigma^2\\ + * |x|-0.5/\sigma^2,& \text{otherwise} + * \end{cases} + * + * where :math:`x` is an element of the tensor *lhs* and :math:`\sigma` is the scalar. + * + * Example:: + * + * smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5] + * smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_scalar_op_extended.cc:L108 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def smooth_l1(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies the softmax function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum up to 1. + * + * .. math:: + * softmax(\mathbf{z/t})_j = \frac{e^{z_j/t}}{\sum_{k=1}^K e^{z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 1. 1.] + * [ 1. 1. 1.] ] + * + * softmax(x,axis=0) = `[ [ 0.5 0.5 0.5] + * [ 0.5 0.5 0.5] ] + * + * softmax(x,axis=1) = `[ [ 0.33333334, 0.33333334, 0.33333334], + * [ 0.33333334, 0.33333334, 0.33333334] ] + * + * + * + * Defined in src/operator/nn/softmax.cc:L103 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def softmax(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies the softmax function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum up to 1. + * + * .. math:: + * softmax(\mathbf{z/t})_j = \frac{e^{z_j/t}}{\sum_{k=1}^K e^{z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 1. 1.] + * [ 1. 1. 1.] ] + * + * softmax(x,axis=0) = `[ [ 0.5 0.5 0.5] + * [ 0.5 0.5 0.5] ] + * + * softmax(x,axis=1) = `[ [ 0.33333334, 0.33333334, 0.33333334], + * [ 0.33333334, 0.33333334, 0.33333334] ] + * + * + * + * Defined in src/operator/nn/softmax.cc:L103 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def softmax(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Calculate cross entropy of softmax output and one-hot label. + * + * - This operator computes the cross entropy in two steps: + * - Applies softmax function on the input array. + * - Computes and returns the cross entropy loss between the softmax output and the labels. + * + * - The softmax function and cross entropy loss is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * Example:: + * + * x = `[ [1, 2, 3], + * [11, 7, 5] ] + * + * label = [2, 0] + * + * softmax(x) = `[ [0.09003057, 0.24472848, 0.66524094], + * [0.97962922, 0.01794253, 0.00242826] ] + * + * softmax_cross_entropy(data, label) = - log(0.66524084) - log(0.97962922) = 0.4281871 + * + * + * + * Defined in src/operator/loss_binary_op.cc:L59 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def softmax_cross_entropy(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Calculate cross entropy of softmax output and one-hot label. + * + * - This operator computes the cross entropy in two steps: + * - Applies softmax function on the input array. + * - Computes and returns the cross entropy loss between the softmax output and the labels. + * + * - The softmax function and cross entropy loss is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * Example:: + * + * x = `[ [1, 2, 3], + * [11, 7, 5] ] + * + * label = [2, 0] + * + * softmax(x) = `[ [0.09003057, 0.24472848, 0.66524094], + * [0.97962922, 0.01794253, 0.00242826] ] + * + * softmax_cross_entropy(data, label) = - log(0.66524084) - log(0.97962922) = 0.4281871 + * + * + * + * Defined in src/operator/loss_binary_op.cc:L59 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def softmax_cross_entropy(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies the softmin function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum + * up to 1. + * + * .. math:: + * softmin(\mathbf{z/t})_j = \frac{e^{-z_j/t}}{\sum_{k=1}^K e^{-z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 2. 3.] + * [ 3. 2. 1.] ] + * + * softmin(x,axis=0) = `[ [ 0.88079703, 0.5, 0.11920292], + * [ 0.11920292, 0.5, 0.88079703] ] + * + * softmin(x,axis=1) = `[ [ 0.66524094, 0.24472848, 0.09003057], + * [ 0.09003057, 0.24472848, 0.66524094] ] + * + * + * + * Defined in src/operator/nn/softmin.cc:L57 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def softmin(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Applies the softmin function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum + * up to 1. + * + * .. math:: + * softmin(\mathbf{z/t})_j = \frac{e^{-z_j/t}}{\sum_{k=1}^K e^{-z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 2. 3.] + * [ 3. 2. 1.] ] + * + * softmin(x,axis=0) = `[ [ 0.88079703, 0.5, 0.11920292], + * [ 0.11920292, 0.5, 0.88079703] ] + * + * softmin(x,axis=1) = `[ [ 0.66524094, 0.24472848, 0.09003057], + * [ 0.09003057, 0.24472848, 0.66524094] ] + * + * + * + * Defined in src/operator/nn/softmin.cc:L57 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def softmin(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes softsign of x element-wise. + * + * .. math:: + * y = x / (1 + abs(x)) + * + * The storage type of ``softsign`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L191 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def softsign(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes softsign of x element-wise. + * + * .. math:: + * y = x / (1 + abs(x)) + * + * The storage type of ``softsign`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L191 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def softsign(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a sorted copy of an input array along the given axis. + * + * Examples:: + * + * x = `[ [ 1, 4], + * [ 3, 1] ] + * + * // sorts along the last axis + * sort(x) = `[ [ 1., 4.], + * [ 1., 3.] ] + * + * // flattens and then sorts + * sort(x, axis=None) = [ 1., 1., 3., 4.] + * + * // sorts along the first axis + * sort(x, axis=0) = `[ [ 1., 1.], + * [ 3., 4.] ] + * + * // in a descend order + * sort(x, is_ascend=0) = `[ [ 4., 1.], + * [ 3., 1.] ] + * + * + * + * Defined in src/operator/tensor/ordering_op.cc:L132 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sort(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns a sorted copy of an input array along the given axis. + * + * Examples:: + * + * x = `[ [ 1, 4], + * [ 3, 1] ] + * + * // sorts along the last axis + * sort(x) = `[ [ 1., 4.], + * [ 1., 3.] ] + * + * // flattens and then sorts + * sort(x, axis=None) = [ 1., 1., 3., 4.] + * + * // sorts along the first axis + * sort(x, axis=0) = `[ [ 1., 1.], + * [ 3., 4.] ] + * + * // in a descend order + * sort(x, is_ascend=0) = `[ [ 4., 1.], + * [ 3., 1.] ] + * + * + * + * Defined in src/operator/tensor/ordering_op.cc:L132 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sort(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Rearranges(permutes) blocks of spatial data into depth. + * Similar to ONNX SpaceToDepth operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#SpaceToDepth + * The output is a new tensor where the values from height and width dimension are + * moved to the depth dimension. The reverse of this operation is ``depth_to_space``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, C, H / block\_size, block\_size, W / block\_size, block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 5, 1, 2, 4]) \\ + * y = reshape(x \prime \prime, [N, C * (block\_size ^ 2), H / block\_size, W / block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C * (block\_size ^ 2), H / block\_size, W / block\_size]` + * Example:: + * x = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * space_to_depth(x, 2) = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L1019 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def space_to_depth(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Rearranges(permutes) blocks of spatial data into depth. + * Similar to ONNX SpaceToDepth operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#SpaceToDepth + * The output is a new tensor where the values from height and width dimension are + * moved to the depth dimension. The reverse of this operation is ``depth_to_space``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, C, H / block\_size, block\_size, W / block\_size, block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 5, 1, 2, 4]) \\ + * y = reshape(x \prime \prime, [N, C * (block\_size ^ 2), H / block\_size, W / block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C * (block\_size ^ 2), H / block\_size, W / block\_size]` + * Example:: + * x = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * space_to_depth(x, 2) = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L1019 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def space_to_depth(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def split(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def split(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise square-root value of the input. + * + * .. math:: + * \textrm{sqrt}(x) = \sqrt{x} + * + * Example:: + * + * sqrt([4, 9, 16]) = [2, 3, 4] + * + * The storage type of ``sqrt`` output depends upon the input storage type: + * + * - sqrt(default) = default + * - sqrt(row_sparse) = row_sparse + * - sqrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L142 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sqrt(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise square-root value of the input. + * + * .. math:: + * \textrm{sqrt}(x) = \sqrt{x} + * + * Example:: + * + * sqrt([4, 9, 16]) = [2, 3, 4] + * + * The storage type of ``sqrt`` output depends upon the input storage type: + * + * - sqrt(default) = default + * - sqrt(row_sparse) = row_sparse + * - sqrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L142 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sqrt(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise squared value of the input. + * + * .. math:: + * square(x) = x^2 + * + * Example:: + * + * square([2, 3, 4]) = [4, 9, 16] + * + * The storage type of ``square`` output depends upon the input storage type: + * + * - square(default) = default + * - square(row_sparse) = row_sparse + * - square(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L118 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def square(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns element-wise squared value of the input. + * + * .. math:: + * square(x) = x^2 + * + * Example:: + * + * square([2, 3, 4]) = [4, 9, 16] + * + * The storage type of ``square`` output depends upon the input storage type: + * + * - square(default) = default + * - square(row_sparse) = row_sparse + * - square(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L118 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def square(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Remove single-dimensional entries from the shape of an array. + * Same behavior of defining the output tensor shape as numpy.squeeze for the most of cases. + * See the following note for exception. + * Examples:: + * data = `[ `[ [0], [1], [2] ] ] + * squeeze(data) = [0, 1, 2] + * squeeze(data, axis=0) = `[ [0], [1], [2] ] + * squeeze(data, axis=2) = `[ [0, 1, 2] ] + * squeeze(data, axis=(0, 2)) = [0, 1, 2] + * .. Note:: + * The output of this operator will keep at least one dimension not removed. For example, + * squeeze(`[ `[ [4] ] ]) = [4], while in numpy.squeeze, the output will become a scalar. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def squeeze(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Remove single-dimensional entries from the shape of an array. + * Same behavior of defining the output tensor shape as numpy.squeeze for the most of cases. + * See the following note for exception. + * Examples:: + * data = `[ `[ [0], [1], [2] ] ] + * squeeze(data) = [0, 1, 2] + * squeeze(data, axis=0) = `[ [0], [1], [2] ] + * squeeze(data, axis=2) = `[ [0, 1, 2] ] + * squeeze(data, axis=(0, 2)) = [0, 1, 2] + * .. Note:: + * The output of this operator will keep at least one dimension not removed. For example, + * squeeze(`[ `[ [4] ] ]) = [4], while in numpy.squeeze, the output will become a scalar. + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def squeeze(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Join a sequence of arrays along a new axis. + * The axis parameter specifies the index of the new axis in the dimensions of the + * result. For example, if axis=0 it will be the first dimension and if axis=-1 it + * will be the last dimension. + * Examples:: + * x = [1, 2] + * y = [3, 4] + * stack(x, y) = `[ [1, 2], + * [3, 4] ] + * stack(x, y, axis=1) = `[ [1, 3], + * [2, 4] ] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def stack(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Join a sequence of arrays along a new axis. + * The axis parameter specifies the index of the new axis in the dimensions of the + * result. For example, if axis=0 it will be the first dimension and if axis=-1 it + * will be the last dimension. + * Examples:: + * x = [1, 2] + * y = [3, 4] + * stack(x, y) = `[ [1, 2], + * [3, 4] ] + * stack(x, y, axis=1) = `[ [1, 3], + * [2, 4] ] + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def stack(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def stop_gradient(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def stop_gradient(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sum(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sum(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sum_axis(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def sum_axis(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def swapaxes(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def swapaxes(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Takes elements from an input array along the given axis. + * + * This function slices the input array along a particular axis with the provided indices. + * + * Given data tensor of rank r >= 1, and indices tensor of rank q, gather entries of the axis + * dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates them + * in an output tensor of rank q + (r - 1). + * + * Examples:: + * + * x = [4. 5. 6.] + * + * // Trivial case, take the second element along the first axis. + * + * take(x, [1]) = [ 5. ] + * + * // The other trivial case, axis=-1, take the third element along the first axis + * + * take(x, [3], axis=-1, mode='clip') = [ 6. ] + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2. Along axis 0 + * + * take(x, `[ [0,1],[1,2] ]) = `[ `[ [ 1., 2.], + * [ 3., 4.] ], + * + * `[ [ 3., 4.], + * [ 5., 6.] ] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2 (calculated by wrapping around). + * // Along axis 1 + * + * take(x, `[ [0, 3], [-1, -2] ], axis=1, mode='wrap') = `[ `[ [ 1. 2.] + * [ 2. 1.] ] + * + * `[ [ 3. 4.] + * [ 4. 3.] ] + * + * `[ [ 5. 6.] + * [ 6. 5.] ] ] + * + * The storage type of ``take`` output depends upon the input storage type: + * + * - take(default, default) = default + * - take(csr, default, axis=0) = csr + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L718 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def take(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Takes elements from an input array along the given axis. + * + * This function slices the input array along a particular axis with the provided indices. + * + * Given data tensor of rank r >= 1, and indices tensor of rank q, gather entries of the axis + * dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates them + * in an output tensor of rank q + (r - 1). + * + * Examples:: + * + * x = [4. 5. 6.] + * + * // Trivial case, take the second element along the first axis. + * + * take(x, [1]) = [ 5. ] + * + * // The other trivial case, axis=-1, take the third element along the first axis + * + * take(x, [3], axis=-1, mode='clip') = [ 6. ] + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2. Along axis 0 + * + * take(x, `[ [0,1],[1,2] ]) = `[ `[ [ 1., 2.], + * [ 3., 4.] ], + * + * `[ [ 3., 4.], + * [ 5., 6.] ] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2 (calculated by wrapping around). + * // Along axis 1 + * + * take(x, `[ [0, 3], [-1, -2] ], axis=1, mode='wrap') = `[ `[ [ 1. 2.] + * [ 2. 1.] ] + * + * `[ [ 3. 4.] + * [ 4. 3.] ] + * + * `[ [ 5. 6.] + * [ 6. 5.] ] ] + * + * The storage type of ``take`` output depends upon the input storage type: + * + * - take(default, default) = default + * - take(csr, default, axis=0) = csr + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L718 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def take(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the element-wise tangent of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * tan([0, \pi/4, \pi/2]) = [0, 1, -inf] + * + * The storage type of ``tan`` output depends upon the input storage type: + * + * - tan(default) = default + * - tan(row_sparse) = row_sparse + * - tan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L140 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def tan(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Computes the element-wise tangent of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * tan([0, \pi/4, \pi/2]) = [0, 1, -inf] + * + * The storage type of ``tan`` output depends upon the input storage type: + * + * - tan(default) = default + * - tan(row_sparse) = row_sparse + * - tan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L140 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def tan(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the hyperbolic tangent of the input array, computed element-wise. + * + * .. math:: + * tanh(x) = sinh(x) / cosh(x) + * + * The storage type of ``tanh`` output depends upon the input storage type: + * + * - tanh(default) = default + * - tanh(row_sparse) = row_sparse + * - tanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L393 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def tanh(kwargs: Map[String, Any] = null) + (args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Returns the hyperbolic tangent of the input array, computed element-wise. + * + * .. math:: + * tanh(x) = sinh(x) / cosh(x) + * + * The storage type of ``tanh`` output depends upon the input storage type: + * + * - tanh(default) = default + * - tanh(row_sparse) = row_sparse + * - tanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L393 + * }}} + * + * @return org.apache.mxnet.NDArrayFuncReturn + */ +def tanh(args: Any*): org.apache.mxnet.NDArrayFuncReturn + + /** + * + * {{{ + * + * Repeats the whole array multiple times. + * If ``reps`` has length *d*, and input array has dimension of *n*. There are + * three cases: + * - **n=d**. Repeat *i*-th dimension of the input by ``reps[i]`` times:: + * x = `[ [1, 2], + * [3, 4] ] + * tile(x, reps=(2,3)) = `[ [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.], + * [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.] ] + * - **n>d**. ``reps`` is promoted to length *n* by pre-pending 1's to it. Thus for + * an input shape ``(2,3)``, ``repos=(2,)`` is treated as ``(1,2)``:: + * tile(x, reps=(2,)) = `[ [ 1., 2., 1., 2.], + * [ 3., 4., 3., 4.] ] + * - **nd**. ``reps`` is promoted to length *n* by pre-pending 1's to it. Thus for + * an input shape ``(2,3)``, ``repos=(2,)`` is treated as ``(1,2)``:: + * tile(x, reps=(2,)) = `[ [ 1., 2., 1., 2.], + * [ 3., 4., 3., 4.] ] + * - **n + * + * for a square patch of size :math:`K:=2k+1`. + * + * Note that the equation above is identical to one step of a convolution in neural networks, but instead of convolving data with a filter, it convolves data with other + * data. For this reason, it has no training weights. + * + * Computing :math:`c(x_{1}, x_{2})` involves :math:`c * K^{2}` multiplications. Comparing all patch combinations involves :math:`w^{2}*h^{2}` such computations. + * + * Given a maximum displacement :math:`d`, for each location :math:`x_{1}` it computes correlations :math:`c(x_{1}, x_{2})` only in a neighborhood of size :math:`D:=2d+1`, + * by limiting the range of :math:`x_{2}`. We use strides :math:`s_{1}, s_{2}`, to quantize :math:`x_{1}` globally and to quantize :math:`x_{2}` within the neighborhood + * centered around :math:`x_{1}`. + * + * The final output is defined by the following expression: + * + * .. math:: + * out[n, q, i, j] = c(x_{i, j}, x_{q}) + * + * where :math:`i` and :math:`j` enumerate spatial locations in :math:`f_{1}`, and :math:`q` denotes the :math:`q^{th}` neighborhood of :math:`x_{i,j}`. + * + * + * Defined in src/operator/correlation.cc:L198 + * }}} + * + * @param data1 Input data1 to the correlation. + * @param data2 Input data2 to the correlation. + * @param kernel_size kernel size for Correlation must be an odd number + * @param max_displacement Max displacement of Correlation + * @param stride1 stride1 quantize data1 globally + * @param stride2 stride2 quantize data2 within the neighborhood centered around data1 + * @param pad_size pad for Correlation + * @param is_multiply operation type is either multiplication or subduction + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Correlation (data1 : Option[org.apache.mxnet.Symbol] = None, data2 : Option[org.apache.mxnet.Symbol] = None, kernel_size : Option[Int] = None, max_displacement : Option[Int] = None, stride1 : Option[Int] = None, stride2 : Option[Int] = None, pad_size : Option[Int] = None, is_multiply : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * + * + * .. note:: `Crop` is deprecated. Use `slice` instead. + * + * Crop the 2nd and 3rd dim of input data, with the corresponding size of h_w or + * with width and height of the second input symbol, i.e., with one input, we need h_w to + * specify the crop height and width, otherwise the second input symbol's size will be used + * + * + * Defined in src/operator/crop.cc:L50 + * }}} + * + * @param data Tensor or List of Tensors, the second input will be used as crop_like shape reference + * @param num_args Number of inputs for crop, if equals one, then we will use the h_wfor crop height and width, else if equals two, then we will use the heightand width of the second input symbol, we name crop_like here + * @param offset crop offset coordinate: (y, x) + * @param h_w crop height and width: (h, w) + * @param center_crop If set to true, then it will use be the center_crop,or it will crop using the shape of crop_like + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Crop (data : Array[org.apache.mxnet.Symbol], num_args : Int, offset : Option[org.apache.mxnet.Shape] = None, h_w : Option[org.apache.mxnet.Shape] = None, center_crop : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes 1D or 2D transposed convolution (aka fractionally strided convolution) of the input tensor. This operation can be seen as the gradient of Convolution operation with respect to its input. Convolution usually reduces the size of the input. Transposed convolution works the other way, going from a smaller input to a larger output while preserving the connectivity pattern. + * }}} + * + * @param data Input tensor to the deconvolution operation. + * @param weight Weights representing the kernel. + * @param bias Bias added to the result after the deconvolution operation. + * @param kernel Deconvolution kernel size: (w,), (h, w) or (d, h, w). This is same as the kernel size used for the corresponding convolution + * @param stride The stride used for the corresponding convolution: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + * @param dilate Dilation factor for each dimension of the input: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + * @param pad The amount of implicit zero padding added during convolution for each dimension of the input: (w,), (h, w) or (d, h, w). ``(kernel-1)/2`` is usually a good choice. If `target_shape` is set, `pad` will be ignored and a padding that will generate the target shape will be used. Defaults to no padding. + * @param adj Adjustment for output shape: (w,), (h, w) or (d, h, w). If `target_shape` is set, `adj` will be ignored and computed accordingly. + * @param target_shape Shape of the output tensor: (w,), (h, w) or (d, h, w). + * @param num_filter Number of output filters. + * @param num_group Number of groups partition. + * @param workspace Maximum temporary workspace allowed (MB) in deconvolution.This parameter has two usages. When CUDNN is not used, it determines the effective batch size of the deconvolution kernel. When CUDNN is used, it controls the maximum temporary storage used for tuning the best CUDNN kernel when `limited_workspace` strategy is used. + * @param no_bias Whether to disable bias parameter. + * @param cudnn_tune Whether to pick convolution algorithm by running performance test. + * @param cudnn_off Turn off cudnn for this layer. + * @param layout Set layout for input, output and weight. Empty for default layout, NCW for 1d, NCHW for 2d and NCDHW for 3d.NHWC and NDHWC are only supported on GPU. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Deconvolution (data : Option[org.apache.mxnet.Symbol] = None, weight : Option[org.apache.mxnet.Symbol] = None, bias : Option[org.apache.mxnet.Symbol] = None, kernel : org.apache.mxnet.Shape, stride : Option[org.apache.mxnet.Shape] = None, dilate : Option[org.apache.mxnet.Shape] = None, pad : Option[org.apache.mxnet.Shape] = None, adj : Option[org.apache.mxnet.Shape] = None, target_shape : Option[org.apache.mxnet.Shape] = None, num_filter : Int, num_group : Option[Int] = None, workspace : Option[Long] = None, no_bias : Option[Boolean] = None, cudnn_tune : Option[String] = None, cudnn_off : Option[Boolean] = None, layout : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies dropout operation to input array. + * + * - During training, each element of the input is set to zero with probability p. + * The whole array is rescaled by :math:`1/(1-p)` to keep the expected + * sum of the input unchanged. + * + * - During testing, this operator does not change the input if mode is 'training'. + * If mode is 'always', the same computaion as during training will be applied. + * + * Example:: + * + * random.seed(998) + * input_array = array(`[ [3., 0.5, -0.5, 2., 7.], + * [2., -0.4, 7., 3., 0.2] ]) + * a = symbol.Variable('a') + * dropout = symbol.Dropout(a, p = 0.2) + * executor = dropout.simple_bind(a = input_array.shape) + * + * ## If training + * executor.forward(is_train = True, a = input_array) + * executor.outputs + * `[ [ 3.75 0.625 -0. 2.5 8.75 ] + * [ 2.5 -0.5 8.75 3.75 0. ] ] + * + * ## If testing + * executor.forward(is_train = False, a = input_array) + * executor.outputs + * `[ [ 3. 0.5 -0.5 2. 7. ] + * [ 2. -0.4 7. 3. 0.2 ] ] + * + * + * Defined in src/operator/nn/dropout.cc:L96 + * }}} + * + * @param data Input array to which dropout will be applied. + * @param p Fraction of the input that gets dropped out during training time. + * @param mode Whether to only turn on dropout during training or to also turn on for inference. + * @param axes Axes for variational dropout kernel. + * @param cudnn_off Whether to turn off cudnn in dropout operator. This option is ignored if axes is specified. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Dropout (data : Option[org.apache.mxnet.Symbol] = None, p : Option[Float] = None, mode : Option[String] = None, axes : Option[org.apache.mxnet.Shape] = None, cudnn_off : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @param args Positional input arguments + * @return org.apache.mxnet.Symbol + */ +@Experimental +def ElementWiseSum (args : Array[org.apache.mxnet.Symbol], name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Maps integer indices to vector representations (embeddings). + * + * This operator maps words to real-valued vectors in a high-dimensional space, + * called word embeddings. These embeddings can capture semantic and syntactic properties of the words. + * For example, it has been noted that in the learned embedding spaces, similar words tend + * to be close to each other and dissimilar words far apart. + * + * For an input array of shape (d1, ..., dK), + * the shape of an output array is (d1, ..., dK, output_dim). + * All the input values should be integers in the range [0, input_dim). + * + * If the input_dim is ip0 and output_dim is op0, then shape of the embedding weight matrix must be + * (ip0, op0). + * + * When "sparse_grad" is False, if any index mentioned is too large, it is replaced by the index that + * addresses the last vector in an embedding matrix. + * When "sparse_grad" is True, an error will be raised if invalid indices are found. + * + * Examples:: + * + * input_dim = 4 + * output_dim = 5 + * + * // Each row in weight matrix y represents a word. So, y = (w0,w1,w2,w3) + * y = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.], + * [ 10., 11., 12., 13., 14.], + * [ 15., 16., 17., 18., 19.] ] + * + * // Input array x represents n-grams(2-gram). So, x = [(w1,w3), (w0,w2)] + * x = `[ [ 1., 3.], + * [ 0., 2.] ] + * + * // Mapped input x to its vector representation y. + * Embedding(x, y, 4, 5) = `[ `[ [ 5., 6., 7., 8., 9.], + * [ 15., 16., 17., 18., 19.] ], + * + * `[ [ 0., 1., 2., 3., 4.], + * [ 10., 11., 12., 13., 14.] ] ] + * + * + * The storage type of weight can be either row_sparse or default. + * + * .. Note:: + * + * If "sparse_grad" is set to True, the storage type of gradient w.r.t weights will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L539 + * }}} + * + * @param data The input array to the embedding operator. + * @param weight The embedding weight matrix. + * @param input_dim Vocabulary size of the input indices. + * @param output_dim Dimension of the embedding vectors. + * @param dtype Data type of weight. + * @param sparse_grad Compute row sparse gradient in the backward calculation. If set to True, the grad's storage type is row_sparse. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Embedding (data : Option[org.apache.mxnet.Symbol] = None, weight : Option[org.apache.mxnet.Symbol] = None, input_dim : Int, output_dim : Int, dtype : Option[String] = None, sparse_grad : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @param data Input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Flatten (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies a linear transformation: :math:`Y = XW^T + b`. + * + * If ``flatten`` is set to be true, then the shapes are: + * + * - **data**: `(batch_size, x1, x2, ..., xn)` + * - **weight**: `(num_hidden, x1 * x2 * ... * xn)` + * - **bias**: `(num_hidden,)` + * - **out**: `(batch_size, num_hidden)` + * + * If ``flatten`` is set to be false, then the shapes are: + * + * - **data**: `(x1, x2, ..., xn, input_dim)` + * - **weight**: `(num_hidden, input_dim)` + * - **bias**: `(num_hidden,)` + * - **out**: `(x1, x2, ..., xn, num_hidden)` + * + * The learnable parameters include both ``weight`` and ``bias``. + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * .. Note:: + * + * The sparse support for FullyConnected is limited to forward evaluation with `row_sparse` + * weight and bias, where the length of `weight.indices` and `bias.indices` must be equal + * to `num_hidden`. This could be useful for model inference with `row_sparse` weights + * trained with importance sampling or noise contrastive estimation. + * + * To compute linear transformation with 'csr' sparse data, sparse.dot is recommended instead + * of sparse.FullyConnected. + * + * + * + * Defined in src/operator/nn/fully_connected.cc:L291 + * }}} + * + * @param data Input data. + * @param weight Weight matrix. + * @param bias Bias parameter. + * @param num_hidden Number of hidden nodes of the output. + * @param no_bias Whether to disable bias parameter. + * @param flatten Whether to collapse all but the first axis of the input data tensor. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def FullyConnected (data : Option[org.apache.mxnet.Symbol] = None, weight : Option[org.apache.mxnet.Symbol] = None, bias : Option[org.apache.mxnet.Symbol] = None, num_hidden : Int, no_bias : Option[Boolean] = None, flatten : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Generates 2D sampling grid for bilinear sampling. + * }}} + * + * @param data Input data to the function. + * @param transform_type The type of transformation. For `affine`, input data should be an affine matrix of size (batch, 6). For `warp`, input data should be an optical flow of size (batch, 2, h, w). + * @param target_shape Specifies the output shape (H, W). This is required if transformation type is `affine`. If transformation type is `warp`, this parameter is ignored. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def GridGenerator (data : Option[org.apache.mxnet.Symbol] = None, transform_type : String, target_shape : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Group normalization. + * + * The input channels are separated into ``num_groups`` groups, each containing ``num_channels / num_groups`` channels. + * The mean and standard-deviation are calculated separately over the each group. + * + * .. math:: + * + * data = data.reshape((N, num_groups, C // num_groups, ...)) + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * + * + * Defined in src/operator/nn/group_norm.cc:L77 + * }}} + * + * @param data Input data + * @param gamma gamma array + * @param beta beta array + * @param num_groups Total number of groups. + * @param eps An `epsilon` parameter to prevent division by 0. + * @param output_mean_var Output the mean and std calculated along the given axis. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def GroupNorm (data : Option[org.apache.mxnet.Symbol] = None, gamma : Option[org.apache.mxnet.Symbol] = None, beta : Option[org.apache.mxnet.Symbol] = None, num_groups : Option[Int] = None, eps : Option[Float] = None, output_mean_var : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Apply a sparse regularization to the output a sigmoid activation function. + * }}} + * + * @param data Input data. + * @param sparseness_target The sparseness target + * @param penalty The tradeoff parameter for the sparseness penalty + * @param momentum The momentum for running average + * @return org.apache.mxnet.Symbol + */ +@Experimental +def IdentityAttachKLSparseReg (data : Option[org.apache.mxnet.Symbol] = None, sparseness_target : Option[Float] = None, penalty : Option[Float] = None, momentum : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies instance normalization to the n-dimensional input array. + * + * This operator takes an n-dimensional input array where (n>2) and normalizes + * the input using the following formula: + * + * .. math:: + * + * out = \frac{x - mean[data]}{ \sqrt{Var[data]} + \epsilon} * gamma + beta + * + * This layer is similar to batch normalization layer (`BatchNorm`) + * with two differences: first, the normalization is + * carried out per example (instance), not over a batch. Second, the + * same normalization is applied both at test and train time. This + * operation is also known as `contrast normalization`. + * + * If the input data is of shape [batch, channel, spacial_dim1, spacial_dim2, ...], + * `gamma` and `beta` parameters must be vectors of shape [channel]. + * + * This implementation is based on this paper [1]_ + * + * .. [1] Instance Normalization: The Missing Ingredient for Fast Stylization, + * D. Ulyanov, A. Vedaldi, V. Lempitsky, 2016 (arXiv:1607.08022v2). + * + * Examples:: + * + * // Input of shape (2,1,2) + * x = `[ `[ [ 1.1, 2.2] ], + * `[ [ 3.3, 4.4] ] ] + * + * // gamma parameter of length 1 + * gamma = [1.5] + * + * // beta parameter of length 1 + * beta = [0.5] + * + * // Instance normalization is calculated with the above formula + * InstanceNorm(x,gamma,beta) = `[ `[ [-0.997527 , 1.99752665] ], + * `[ [-0.99752653, 1.99752724] ] ] + * + * + * + * Defined in src/operator/instance_norm.cc:L95 + * }}} + * + * @param data An n-dimensional input array (n > 2) of the form [batch, channel, spatial_dim1, spatial_dim2, ...]. + * @param gamma A vector of length 'channel', which multiplies the normalized input. + * @param beta A vector of length 'channel', which is added to the product of the normalized input and the weight. + * @param eps An `epsilon` parameter to prevent division by 0. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def InstanceNorm (data : Option[org.apache.mxnet.Symbol] = None, gamma : Option[org.apache.mxnet.Symbol] = None, beta : Option[org.apache.mxnet.Symbol] = None, eps : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Normalize the input array using the L2 norm. + * + * For 1-D NDArray, it computes:: + * + * out = data / sqrt(sum(data ** 2) + eps) + * + * For N-D NDArray, if the input array has shape (N, N, ..., N), + * + * with ``mode`` = ``instance``, it normalizes each instance in the multidimensional + * array by its L2 norm.:: + * + * for i in 0...N + * out[i,:,:,...,:] = data[i,:,:,...,:] / sqrt(sum(data[i,:,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``channel``, it normalizes each channel in the array by its L2 norm.:: + * + * for i in 0...N + * out[:,i,:,...,:] = data[:,i,:,...,:] / sqrt(sum(data[:,i,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``spatial``, it normalizes the cross channel norm for each position + * in the array by its L2 norm.:: + * + * for dim in 2...N + * for i in 0...N + * out[.....,i,...] = take(out, indices=i, axis=dim) / sqrt(sum(take(out, indices=i, axis=dim) ** 2) + eps) + * -dim- + * + * Example:: + * + * x = `[ `[ [1,2], + * [3,4] ], + * `[ [2,2], + * [5,6] ] ] + * + * L2Normalization(x, mode='instance') + * =`[ `[ [ 0.18257418 0.36514837] + * [ 0.54772252 0.73029673] ] + * `[ [ 0.24077171 0.24077171] + * [ 0.60192931 0.72231513] ] ] + * + * L2Normalization(x, mode='channel') + * =`[ `[ [ 0.31622776 0.44721359] + * [ 0.94868326 0.89442718] ] + * `[ [ 0.37139067 0.31622776] + * [ 0.92847669 0.94868326] ] ] + * + * L2Normalization(x, mode='spatial') + * =`[ `[ [ 0.44721359 0.89442718] + * [ 0.60000002 0.80000001] ] + * `[ [ 0.70710677 0.70710677] + * [ 0.6401844 0.76822126] ] ] + * + * + * + * Defined in src/operator/l2_normalization.cc:L196 + * }}} + * + * @param data Input array to normalize. + * @param eps A small constant for numerical stability. + * @param mode Specify the dimension along which to compute L2 norm. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def L2Normalization (data : Option[org.apache.mxnet.Symbol] = None, eps : Option[Float] = None, mode : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies local response normalization to the input. + * + * The local response normalization layer performs "lateral inhibition" by normalizing + * over local input regions. + * + * If :math:`a_{x,y}^{i}` is the activity of a neuron computed by applying kernel :math:`i` at position + * :math:`(x, y)` and then applying the ReLU nonlinearity, the response-normalized + * activity :math:`b_{x,y}^{i}` is given by the expression: + * + * .. math:: + * b_{x,y}^{i} = \frac{a_{x,y}^{i}}{\Bigg({k + \frac{\alpha}{n} \sum_{j=max(0, i-\frac{n}{2})}^{min(N-1, i+\frac{n}{2})} (a_{x,y}^{j})^{2}}\Bigg)^{\beta}} + * + * where the sum runs over :math:`n` "adjacent" kernel maps at the same spatial position, and :math:`N` is the total + * number of kernels in the layer. + * + * + * + * Defined in src/operator/nn/lrn.cc:L164 + * }}} + * + * @param data Input data to LRN + * @param alpha The variance scaling parameter :math:`lpha` in the LRN expression. + * @param beta The power parameter :math:`eta` in the LRN expression. + * @param knorm The parameter :math:`k` in the LRN expression. + * @param nsize normalization window width in elements. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def LRN (data : Option[org.apache.mxnet.Symbol] = None, alpha : Option[Float] = None, beta : Option[Float] = None, knorm : Option[Float] = None, nsize : Int, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Layer normalization. + * + * Normalizes the channels of the input tensor by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis and then + * compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * Unlike BatchNorm and InstanceNorm, the *mean* and *var* are computed along the channel dimension. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_std``. Note that no gradient will be passed through these two outputs. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is -1, which sets the channel + * axis to be the last item in the input shape. + * + * + * + * Defined in src/operator/nn/layer_norm.cc:L156 + * }}} + * + * @param data Input data to layer normalization + * @param gamma gamma array + * @param beta beta array + * @param axis The axis to perform layer normalization. Usually, this should be be axis of the channel dimension. Negative values means indexing from right to left. + * @param eps An `epsilon` parameter to prevent division by 0. + * @param output_mean_var Output the mean and std calculated along the given axis. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def LayerNorm (data : Option[org.apache.mxnet.Symbol] = None, gamma : Option[org.apache.mxnet.Symbol] = None, beta : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, eps : Option[Float] = None, output_mean_var : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies Leaky rectified linear unit activation element-wise to the input. + * + * Leaky ReLUs attempt to fix the "dying ReLU" problem by allowing a small `slope` + * when the input is negative and has a slope of one when input is positive. + * + * The following modified ReLU Activation functions are supported: + * + * - *elu*: Exponential Linear Unit. `y = x > 0 ? x : slope * (exp(x)-1)` + * - *selu*: Scaled Exponential Linear Unit. `y = lambda * (x > 0 ? x : alpha * (exp(x) - 1))` where + * *lambda = 1.0507009873554804934193349852946* and *alpha = 1.6732632423543772848170429916717*. + * - *leaky*: Leaky ReLU. `y = x > 0 ? x : slope * x` + * - *prelu*: Parametric ReLU. This is same as *leaky* except that `slope` is learnt during training. + * - *rrelu*: Randomized ReLU. same as *leaky* but the `slope` is uniformly and randomly chosen from + * *[lower_bound, upper_bound)* for training, while fixed to be + * *(lower_bound+upper_bound)/2* for inference. + * + * + * + * Defined in src/operator/leaky_relu.cc:L161 + * }}} + * + * @param data Input data to activation function. + * @param gamma Input data to activation function. + * @param act_type Activation function to be applied. + * @param slope Init slope for the activation. (For leaky and elu only) + * @param lower_bound Lower bound of random slope. (For rrelu only) + * @param upper_bound Upper bound of random slope. (For rrelu only) + * @return org.apache.mxnet.Symbol + */ +@Experimental +def LeakyReLU (data : Option[org.apache.mxnet.Symbol] = None, gamma : Option[org.apache.mxnet.Symbol] = None, act_type : Option[String] = None, slope : Option[Float] = None, lower_bound : Option[Float] = None, upper_bound : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes and optimizes for squared loss during backward propagation. + * Just outputs ``data`` during forward propagation. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the squared loss estimated over :math:`n` samples is defined as + * + * :math:`\text{SquaredLoss}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_2` + * + * .. note:: + * Use the LinearRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LinearRegressionOutput(default, default) = default + * - LinearRegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L92 + * }}} + * + * @param data Input data to the function. + * @param label Input label to the function. + * @param grad_scale Scale the gradient by a float factor + * @return org.apache.mxnet.Symbol + */ +@Experimental +def LinearRegressionOutput (data : Option[org.apache.mxnet.Symbol] = None, label : Option[org.apache.mxnet.Symbol] = None, grad_scale : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies a logistic function to the input. + * + * The logistic function, also known as the sigmoid function, is computed as + * :math:`\frac{1}{1+exp(-\textbf{x})}`. + * + * Commonly, the sigmoid is used to squash the real-valued output of a linear model + * :math:`wTx+b` into the [0,1] range so that it can be interpreted as a probability. + * It is suitable for binary classification or probability prediction tasks. + * + * .. note:: + * Use the LogisticRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LogisticRegressionOutput(default, default) = default + * - LogisticRegressionOutput(default, csr) = default + * + * The loss function used is the Binary Cross Entropy Loss: + * + * :math:`-{(y\log(p) + (1 - y)\log(1 - p))}` + * + * Where `y` is the ground truth probability of positive outcome for a given example, and `p` the probability predicted by the model. By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L152 + * }}} + * + * @param data Input data to the function. + * @param label Input label to the function. + * @param grad_scale Scale the gradient by a float factor + * @return org.apache.mxnet.Symbol + */ +@Experimental +def LogisticRegressionOutput (data : Option[org.apache.mxnet.Symbol] = None, label : Option[org.apache.mxnet.Symbol] = None, grad_scale : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes mean absolute error of the input. + * + * MAE is a risk metric corresponding to the expected value of the absolute error. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the mean absolute error (MAE) estimated over :math:`n` samples is defined as + * + * :math:`\text{MAE}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_1` + * + * .. note:: + * Use the MAERegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - MAERegressionOutput(default, default) = default + * - MAERegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L120 + * }}} + * + * @param data Input data to the function. + * @param label Input label to the function. + * @param grad_scale Scale the gradient by a float factor + * @return org.apache.mxnet.Symbol + */ +@Experimental +def MAERegressionOutput (data : Option[org.apache.mxnet.Symbol] = None, label : Option[org.apache.mxnet.Symbol] = None, grad_scale : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = MakeLoss(cross_entropy) + * + * We will need to use ``MakeLoss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * In addition, we can give a scale to the loss by setting ``grad_scale``, + * so that the gradient of the loss will be rescaled in the backpropagation. + * + * .. note:: This operator should be used as a Symbol instead of NDArray. + * + * + * + * Defined in src/operator/make_loss.cc:L71 + * }}} + * + * @param data Input array. + * @param grad_scale Gradient scale as a supplement to unary and binary operators + * @param valid_thresh clip each element in the array to 0 when it is less than ``valid_thresh``. This is used when ``normalization`` is set to ``'valid'``. + * @param normalization If this is set to null, the output gradient will not be normalized. If this is set to batch, the output gradient will be divided by the batch size. If this is set to valid, the output gradient will be divided by the number of valid input elements. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def MakeLoss (data : Option[org.apache.mxnet.Symbol] = None, grad_scale : Option[Float] = None, valid_thresh : Option[Float] = None, normalization : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @param data An n-dimensional input array. + * @param mode Padding type to use. "constant" pads with `constant_value` "edge" pads using the edge values of the input array "reflect" pads by reflecting values with respect to the edges. + * @param pad_width Widths of the padding regions applied to the edges of each axis. It is a tuple of integer padding widths for each axis of the format ``(before_1, after_1, ... , before_N, after_N)``. It should be of length ``2*N`` where ``N`` is the number of dimensions of the array.This is equivalent to pad_width in numpy.pad, but flattened. + * @param constant_value The value used for padding when `mode` is "constant". + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Pad (data : Option[org.apache.mxnet.Symbol] = None, mode : String, pad_width : org.apache.mxnet.Shape, constant_value : Option[Double] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Performs pooling on the input. + * + * The shapes for 1-D pooling are + * + * - **data** and **out**: *(batch_size, channel, width)* (NCW layout) or + * *(batch_size, width, channel)* (NWC layout), + * + * The shapes for 2-D pooling are + * + * - **data** and **out**: *(batch_size, channel, height, width)* (NCHW layout) or + * *(batch_size, height, width, channel)* (NHWC layout), + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * When ``global_pool`` is set to be true, then global pooling is performed. It will reset + * ``kernel=(height, width)`` and set the appropiate padding to 0. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * - **lp**: Lp pooling + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data and output will have shape *(batch_size, channel, depth, + * height, width)* (NCDHW layout) or *(batch_size, depth, height, width, channel)* (NDHWC layout). + * + * Notes on Lp pooling: + * + * Lp pooling was first introduced by this paper: https://arxiv.org/pdf/1204.3968.pdf. + * L-1 pooling is simply sum pooling, while L-inf pooling is simply max pooling. + * We can see that Lp pooling stands between those two, in practice the most common value for p is 2. + * + * For each window ``X``, the mathematical expression for Lp pooling is: + * + * :math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}` + * + * + * + * Defined in src/operator/nn/pooling.cc:L417 + * }}} + * + * @param data Input data to the pooling operator. + * @param kernel Pooling kernel size: (y, x) or (d, y, x) + * @param pool_type Pooling type to be applied. + * @param global_pool Ignore kernel size, do global pooling based on current input feature map. + * @param cudnn_off Turn off cudnn pooling and use MXNet pooling operator. + * @param pooling_convention Pooling convention to be applied. + * @param stride Stride: for pooling (y, x) or (d, y, x). Defaults to 1 for each dimension. + * @param pad Pad for pooling: (y, x) or (d, y, x). Defaults to no padding. + * @param p_value Value of p for Lp pooling, can be 1 or 2, required for Lp Pooling. + * @param count_include_pad Only used for AvgPool, specify whether to count padding elements for averagecalculation. For example, with a 5*5 kernel on a 3*3 corner of a image,the sum of the 9 valid elements will be divided by 25 if this is set to true,or it will be divided by 9 if this is set to false. Defaults to true. + * @param layout Set layout for input and output. Empty for + default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Pooling (data : Option[org.apache.mxnet.Symbol] = None, kernel : Option[org.apache.mxnet.Shape] = None, pool_type : Option[String] = None, global_pool : Option[Boolean] = None, cudnn_off : Option[Boolean] = None, pooling_convention : Option[String] = None, stride : Option[org.apache.mxnet.Shape] = None, pad : Option[org.apache.mxnet.Shape] = None, p_value : Option[Int] = None, count_include_pad : Option[Boolean] = None, layout : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * This operator is DEPRECATED. + * Perform pooling on the input. + * + * The shapes for 2-D pooling is + * + * - **data**: *(batch_size, channel, height, width)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*, with:: + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * But ``global_pool`` is set to be true, then do a global pooling, namely reset + * ``kernel=(height, width)``. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * + * 1-D pooling is special case of 2-D pooling with *weight=1* and + * *kernel[1]=1*. + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data will have shape *(batch_size, channel, depth, + * height, width)*. + * + * + * + * Defined in src/operator/pooling_v1.cc:L104 + * }}} + * + * @param data Input data to the pooling operator. + * @param kernel pooling kernel size: (y, x) or (d, y, x) + * @param pool_type Pooling type to be applied. + * @param global_pool Ignore kernel size, do global pooling based on current input feature map. + * @param pooling_convention Pooling convention to be applied. + * @param stride stride: for pooling (y, x) or (d, y, x) + * @param pad pad for pooling: (y, x) or (d, y, x) + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Pooling_v1 (data : Option[org.apache.mxnet.Symbol] = None, kernel : Option[org.apache.mxnet.Shape] = None, pool_type : Option[String] = None, global_pool : Option[Boolean] = None, pooling_convention : Option[String] = None, stride : Option[org.apache.mxnet.Shape] = None, pad : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies recurrent layers to input data. Currently, vanilla RNN, LSTM and GRU are + * implemented, with both multi-layer and bidirectional support. + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * **Vanilla RNN** + * + * Applies a single-gate recurrent layer to input X. Two kinds of activation function are supported: + * ReLU and Tanh. + * + * With ReLU activation function: + * + * .. math:: + * h_t = relu(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * With Tanh activtion function: + * + * .. math:: + * h_t = \tanh(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * Reference paper: Finding structure in time - Elman, 1988. + * https://crl.ucsd.edu/~elman/Papers/fsit.pdf + * + * **LSTM** + * + * Long Short-Term Memory - Hochreiter, 1997. http://www.bioinf.jku.at/publications/older/2604.pdf + * + * .. math:: + * \begin{array}{ll} + * i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\ + * f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\ + * g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\ + * o_t = \mathrm{sigmoid}(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ + * c_t = f_t * c_{(t-1)} + i_t * g_t \\ + * h_t = o_t * \tanh(c_t) + * \end{array} + * + * **GRU** + * + * Gated Recurrent Unit - Cho et al. 2014. http://arxiv.org/abs/1406.1078 + * + * The definition of GRU here is slightly different from paper but compatible with CUDNN. + * + * .. math:: + * \begin{array}{ll} + * r_t = \mathrm{sigmoid}(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ + * z_t = \mathrm{sigmoid}(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ + * n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ + * h_t = (1 - z_t) * n_t + z_t * h_{(t-1)} \\ + * \end{array} + * + * + * Defined in src/operator/rnn.cc:L354 + * }}} + * + * @param data Input data to RNN + * @param parameters Vector of all RNN trainable parameters concatenated + * @param state initial hidden state of the RNN + * @param state_cell initial cell state for LSTM networks (only for LSTM) + * @param sequence_length Vector of valid sequence lengths for each element in batch. (Only used if use_sequence_length kwarg is True) + * @param state_size size of the state for each layer + * @param num_layers number of stacked layers + * @param bidirectional whether to use bidirectional recurrent layers + * @param mode the type of RNN to compute + * @param p drop rate of the dropout on the outputs of each RNN layer, except the last layer. + * @param state_outputs Whether to have the states as symbol outputs. + * @param projection_size size of project size + * @param lstm_state_clip_min Minimum clip value of LSTM states. This option must be used together with lstm_state_clip_max. + * @param lstm_state_clip_max Maximum clip value of LSTM states. This option must be used together with lstm_state_clip_min. + * @param lstm_state_clip_nan Whether to stop NaN from propagating in state by clipping it to min/max. If clipping range is not specified, this option is ignored. + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + * @return org.apache.mxnet.Symbol + */ +@Experimental +def RNN (data : Option[org.apache.mxnet.Symbol] = None, parameters : Option[org.apache.mxnet.Symbol] = None, state : Option[org.apache.mxnet.Symbol] = None, state_cell : Option[org.apache.mxnet.Symbol] = None, sequence_length : Option[org.apache.mxnet.Symbol] = None, state_size : Int, num_layers : Int, bidirectional : Option[Boolean] = None, mode : String, p : Option[Float] = None, state_outputs : Option[Boolean] = None, projection_size : Option[Int] = None, lstm_state_clip_min : Option[Double] = None, lstm_state_clip_max : Option[Double] = None, lstm_state_clip_nan : Option[Boolean] = None, use_sequence_length : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Performs region of interest(ROI) pooling on the input array. + * + * ROI pooling is a variant of a max pooling layer, in which the output size is fixed and + * region of interest is a parameter. Its purpose is to perform max pooling on the inputs + * of non-uniform sizes to obtain fixed-size feature maps. ROI pooling is a neural-net + * layer mostly used in training a `Fast R-CNN` network for object detection. + * + * This operator takes a 4D feature map as an input array and region proposals as `rois`, + * then it pools over sub-regions of input and produces a fixed-sized output array + * regardless of the ROI size. + * + * To crop the feature map accordingly, you can resize the bounding box coordinates + * by changing the parameters `rois` and `spatial_scale`. + * + * The cropped feature maps are pooled by standard max pooling operation to a fixed size output + * indicated by a `pooled_size` parameter. batch_size will change to the number of region + * bounding boxes after `ROIPooling`. + * + * The size of each region of interest doesn't have to be perfectly divisible by + * the number of pooling sections(`pooled_size`). + * + * Example:: + * + * x = `[ [`[ [ 0., 1., 2., 3., 4., 5.], + * [ 6., 7., 8., 9., 10., 11.], + * [ 12., 13., 14., 15., 16., 17.], + * [ 18., 19., 20., 21., 22., 23.], + * [ 24., 25., 26., 27., 28., 29.], + * [ 30., 31., 32., 33., 34., 35.], + * [ 36., 37., 38., 39., 40., 41.], + * [ 42., 43., 44., 45., 46., 47.] ] ] ] + * + * // region of interest i.e. bounding box coordinates. + * y = `[ [0,0,0,4,4] ] + * + * // returns array of shape (2,2) according to the given roi with max pooling. + * ROIPooling(x, y, (2,2), 1.0) = `[ [`[ [ 14., 16.], + * [ 26., 28.] ] ] ] + * + * // region of interest is changed due to the change in `spacial_scale` parameter. + * ROIPooling(x, y, (2,2), 0.7) = `[ [`[ [ 7., 9.], + * [ 19., 21.] ] ] ] + * + * + * + * Defined in src/operator/roi_pooling.cc:L225 + * }}} + * + * @param data The input array to the pooling operator, a 4D Feature maps + * @param rois Bounding box coordinates, a 2D array of `[ [batch_index, x1, y1, x2, y2] ], where (x1, y1) and (x2, y2) are top left and bottom right corners of designated region of interest. `batch_index` indicates the index of corresponding image in the input array + * @param pooled_size ROI pooling output shape (h,w) + * @param spatial_scale Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal of total stride in convolutional layers + * @return org.apache.mxnet.Symbol + */ +@Experimental +def ROIPooling (data : Option[org.apache.mxnet.Symbol] = None, rois : Option[org.apache.mxnet.Symbol] = None, pooled_size : org.apache.mxnet.Shape, spatial_scale : Float, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @param data Input data to reshape. + * @param shape The target shape + * @param reverse If true then the special values are inferred from right to left + * @param target_shape (Deprecated! Use ``shape`` instead.) Target new shape. One and only one dim can be 0, in which case it will be inferred from the rest of dims + * @param keep_highest (Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged.If set to true, then the first dim in target_shape is ignored,and always fixed as input + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Reshape (data : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, reverse : Option[Boolean] = None, target_shape : Option[org.apache.mxnet.Shape] = None, keep_highest : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes support vector machine based transformation of the input. + * + * This tutorial demonstrates using SVM as output layer for classification instead of softmax: + * https://github.com/dmlc/mxnet/tree/master/example/svm_mnist. + * }}} + * + * @param data Input data for SVM transformation. + * @param label Class label for the input data. + * @param margin The loss function penalizes outputs that lie outside this margin. Default margin is 1. + * @param regularization_coefficient Regularization parameter for the SVM. This balances the tradeoff between coefficient size and error. + * @param use_linear Whether to use L1-SVM objective. L2-SVM objective is used by default. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def SVMOutput (data : Option[org.apache.mxnet.Symbol] = None, label : Option[org.apache.mxnet.Symbol] = None, margin : Option[Float] = None, regularization_coefficient : Option[Float] = None, use_linear : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Takes the last element of a sequence. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns a (n-1)-dimensional array + * of the form [batch_size, other_feature_dims]. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` should be + * an input array of positive ints of dimension [batch_size]. To use this parameter, + * set `use_sequence_length` to `True`, otherwise each example in the batch is assumed + * to have the max sequence length. + * + * .. note:: Alternatively, you can also use `take` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ], + * + * `[ [ 10., 11., 12.], + * [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] ] + * + * // returns last sequence when sequence_length parameter is not used + * SequenceLast(x) = `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,1,1], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,2,3], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 13., 14., 15.], + * [ 25., 26., 27.] ] + * + * + * + * Defined in src/operator/sequence_last.cc:L106 + * }}} + * + * @param data n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] where n>2 + * @param sequence_length vector of sequence lengths of the form [batch_size] + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + * @param axis The sequence axis. Only values of 0 and 1 are currently supported. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def SequenceLast (data : Option[org.apache.mxnet.Symbol] = None, sequence_length : Option[org.apache.mxnet.Symbol] = None, use_sequence_length : Option[Boolean] = None, axis : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Sets all elements outside the sequence to a constant value. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` + * should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length and + * this operator works as the `identity` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // works as identity operator when sequence_length parameter is not used + * SequenceMask(x) = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [1,1] means 1 of each batch will be kept + * // and other rows are masked with default mask value = 0 + * SequenceMask(x, sequence_length=[1,1], use_sequence_length=True) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] ] + * + * // sequence_length [2,3] means 2 of batch B1 and 3 of batch B2 will be kept + * // and other rows are masked with value = 1 + * SequenceMask(x, sequence_length=[2,3], use_sequence_length=True, value=1) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 1., 1.], + * [ 16., 17., 18.] ] ] + * + * + * + * Defined in src/operator/sequence_mask.cc:L186 + * }}} + * + * @param data n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] where n>2 + * @param sequence_length vector of sequence lengths of the form [batch_size] + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + * @param value The value to be used as a mask. + * @param axis The sequence axis. Only values of 0 and 1 are currently supported. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def SequenceMask (data : Option[org.apache.mxnet.Symbol] = None, sequence_length : Option[org.apache.mxnet.Symbol] = None, use_sequence_length : Option[Boolean] = None, value : Option[Float] = None, axis : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Reverses the elements of each sequence. + * + * This function takes an n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] + * and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. + * `sequence_length` should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // returns reverse sequence when sequence_length parameter is not used + * SequenceReverse(x) = `[ `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ] ] + * + * // sequence_length [2,2] means 2 rows of + * // both batch B1 and B2 will be reversed. + * SequenceReverse(x, sequence_length=[2,2], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [2,3] means 2 of batch B2 and 3 of batch B3 + * // will be reversed. + * SequenceReverse(x, sequence_length=[2,3], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 16., 17., 18.] ], + * + * `[ [ 1., 2., 3.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14, 15.], + * [ 4., 5., 6.] ] ] + * + * + * + * Defined in src/operator/sequence_reverse.cc:L122 + * }}} + * + * @param data n-dimensional input array of the form [max_sequence_length, batch_size, other dims] where n>2 + * @param sequence_length vector of sequence lengths of the form [batch_size] + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + * @param axis The sequence axis. Only 0 is currently supported. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def SequenceReverse (data : Option[org.apache.mxnet.Symbol] = None, sequence_length : Option[org.apache.mxnet.Symbol] = None, use_sequence_length : Option[Boolean] = None, axis : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @param data The input + * @param num_outputs Number of splits. Note that this should evenly divide the length of the `axis`. + * @param axis Axis along which to split. + * @param squeeze_axis If true, Removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to ``true`` only if ``input.shape[axis] == num_outputs``. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def SliceChannel (data : Option[org.apache.mxnet.Symbol] = None, num_outputs : Int, axis : Option[Int] = None, squeeze_axis : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @param data Input array. + * @param label Ground truth label. + * @param grad_scale Scales the gradient by a float factor. + * @param ignore_label The instances whose `labels` == `ignore_label` will be ignored during backward, if `use_ignore` is set to ``true``). + * @param multi_output If set to ``true``, the softmax function will be computed along axis ``1``. This is applied when the shape of input array differs from the shape of label array. + * @param use_ignore If set to ``true``, the `ignore_label` value will not contribute to the backward gradient. + * @param preserve_shape If set to ``true``, the softmax function will be computed along the last axis (``-1``). + * @param normalization Normalizes the gradient. + * @param out_grad Multiplies gradient with output gradient element-wise. + * @param smooth_alpha Constant for computing a label smoothed version of cross-entropyfor the backwards pass. This constant gets subtracted from theone-hot encoding of the gold label and distributed uniformly toall other labels. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def Softmax (data : Option[org.apache.mxnet.Symbol] = None, label : Option[org.apache.mxnet.Symbol] = None, grad_scale : Option[Float] = None, ignore_label : Option[Float] = None, multi_output : Option[Boolean] = None, use_ignore : Option[Boolean] = None, preserve_shape : Option[Boolean] = None, normalization : Option[String] = None, out_grad : Option[Boolean] = None, smooth_alpha : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies softmax activation to input. This is intended for internal layers. + * + * .. note:: + * + * This operator has been deprecated, please use `softmax`. + * + * If `mode` = ``instance``, this operator will compute a softmax for each instance in the batch. + * This is the default mode. + * + * If `mode` = ``channel``, this operator will compute a k-class softmax at each position + * of each instance, where `k` = ``num_channel``. This mode can only be used when the input array + * has at least 3 dimensions. + * This can be used for `fully convolutional network`, `image segmentation`, etc. + * + * Example:: + * + * >>> input_array = mx.nd.array(`[ [3., 0.5, -0.5, 2., 7.], + * >>> [2., -.4, 7., 3., 0.2] ]) + * >>> softmax_act = mx.nd.SoftmaxActivation(input_array) + * >>> print softmax_act.asnumpy() + * `[ [ 1.78322066e-02 1.46375655e-03 5.38485940e-04 6.56010211e-03 9.73605454e-01] + * [ 6.56221947e-03 5.95310994e-04 9.73919690e-01 1.78379621e-02 1.08472735e-03] ] + * + * + * + * Defined in src/operator/nn/softmax_activation.cc:L59 + * }}} + * + * @param data The input array. + * @param mode Specifies how to compute the softmax. If set to ``instance``, it computes softmax for each instance. If set to ``channel``, It computes cross channel softmax for each position of each instance. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def SoftmaxActivation (data : Option[org.apache.mxnet.Symbol] = None, mode : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @param data Input array. + * @param label Ground truth label. + * @param grad_scale Scales the gradient by a float factor. + * @param ignore_label The instances whose `labels` == `ignore_label` will be ignored during backward, if `use_ignore` is set to ``true``). + * @param multi_output If set to ``true``, the softmax function will be computed along axis ``1``. This is applied when the shape of input array differs from the shape of label array. + * @param use_ignore If set to ``true``, the `ignore_label` value will not contribute to the backward gradient. + * @param preserve_shape If set to ``true``, the softmax function will be computed along the last axis (``-1``). + * @param normalization Normalizes the gradient. + * @param out_grad Multiplies gradient with output gradient element-wise. + * @param smooth_alpha Constant for computing a label smoothed version of cross-entropyfor the backwards pass. This constant gets subtracted from theone-hot encoding of the gold label and distributed uniformly toall other labels. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def SoftmaxOutput (data : Option[org.apache.mxnet.Symbol] = None, label : Option[org.apache.mxnet.Symbol] = None, grad_scale : Option[Float] = None, ignore_label : Option[Float] = None, multi_output : Option[Boolean] = None, use_ignore : Option[Boolean] = None, preserve_shape : Option[Boolean] = None, normalization : Option[String] = None, out_grad : Option[Boolean] = None, smooth_alpha : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies a spatial transformer to input feature map. + * }}} + * + * @param data Input data to the SpatialTransformerOp. + * @param loc localisation net, the output dim should be 6 when transform_type is affine. You shold initialize the weight and bias with identity tranform. + * @param target_shape output shape(h, w) of spatial transformer: (y, x) + * @param transform_type transformation type + * @param sampler_type sampling type + * @param cudnn_off whether to turn cudnn off + * @return org.apache.mxnet.Symbol + */ +@Experimental +def SpatialTransformer (data : Option[org.apache.mxnet.Symbol] = None, loc : Option[org.apache.mxnet.Symbol] = None, target_shape : Option[org.apache.mxnet.Shape] = None, transform_type : String, sampler_type : String, cudnn_off : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @param data Input array. + * @param dim1 the first axis to be swapped. + * @param dim2 the second axis to be swapped. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def SwapAxis (data : Option[org.apache.mxnet.Symbol] = None, dim1 : Option[Int] = None, dim2 : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Upsamples the given input data. + * + * Two algorithms (``sample_type``) are available for upsampling: + * + * - Nearest Neighbor + * - Bilinear + * + * **Nearest Neighbor Upsampling** + * + * Input data is expected to be NCHW. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * UpSampling(x, scale=2, sample_type='nearest') = `[ [`[ [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] ] ] ] + * + * **Bilinear Upsampling** + * + * Uses `deconvolution` algorithm under the hood. You need provide both input data and the kernel. + * + * Input data is expected to be NCHW. + * + * `num_filter` is expected to be same as the number of channels. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * w = `[ [`[ [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] ] ] ] + * + * UpSampling(x, w, scale=2, sample_type='bilinear', num_filter=1) = `[ [`[ [1. 2. 2. 2. 2. 1.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [1. 2. 2. 2. 2. 1.] ] ] ] + * + * + * Defined in src/operator/nn/upsampling.cc:L173 + * }}} + * + * @param data Array of tensors to upsample. For bilinear upsampling, there should be 2 inputs - 1 data and 1 weight. + * @param scale Up sampling scale + * @param num_filter Input filter. Only used by bilinear sample_type.Since bilinear upsampling uses deconvolution, num_filters is set to the number of channels. + * @param sample_type upsampling method + * @param multi_input_mode How to handle multiple input. concat means concatenate upsampled images along the channel dimension. sum means add all images together, only available for nearest neighbor upsampling. + * @param num_args Number of inputs to be upsampled. For nearest neighbor upsampling, this can be 1-N; the size of output will be(scale*h_0,scale*w_0) and all other inputs will be upsampled to thesame size. For bilinear upsampling this must be 2; 1 input and 1 weight. + * @param workspace Tmp workspace for deconvolution (MB) + * @return org.apache.mxnet.Symbol + */ +@Experimental +def UpSampling (data : Array[org.apache.mxnet.Symbol], scale : Int, num_filter : Option[Int] = None, sample_type : String, multi_input_mode : Option[String] = None, num_args : Int, workspace : Option[Long] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise absolute value of the input. + * + * Example:: + * + * abs([-2, 0, 3]) = [2, 0, 3] + * + * The storage type of ``abs`` output depends upon the input storage type: + * + * - abs(default) = default + * - abs(row_sparse) = row_sparse + * - abs(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L721 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def abs (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for Adam optimizer. Adam is seen as a generalization + * of AdaGrad. + * + * Adam update consists of the following steps, where g represents gradient and m, v + * are 1st and 2nd order moment estimates (mean and variance). + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * W_t = W_{t-1} - \alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon } + * + * It updates the weights using:: + * + * m = beta1*m + (1-beta1)*grad + * v = beta2*v + (1-beta2)*(grad**2) + * w += - learning_rate * m / (sqrt(v) + epsilon) + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and the storage + * type of weight is the same as those of m and v, + * only the row slices whose indices appear in grad.indices are updated (for w, m and v):: + * + * for row in grad.indices: + * m[row] = beta1*m[row] + (1-beta1)*grad[row] + * v[row] = beta2*v[row] + (1-beta2)*(grad[row]**2) + * w[row] += - learning_rate * m[row] / (sqrt(v[row]) + epsilon) + * + * + * + * Defined in src/operator/optimizer_op.cc:L688 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mean Moving mean + * @param vari Moving variance + * @param lr Learning rate + * @param beta1 The decay rate for the 1st moment estimates. + * @param beta2 The decay rate for the 2nd moment estimates. + * @param epsilon A small constant for numerical stability. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse and all of w, m and v have the same stype + * @return org.apache.mxnet.Symbol + */ +@Experimental +def adam_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, mean : Option[org.apache.mxnet.Symbol] = None, vari : Option[org.apache.mxnet.Symbol] = None, lr : Float, beta1 : Option[Float] = None, beta2 : Option[Float] = None, epsilon : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @param args Positional input arguments + * @return org.apache.mxnet.Symbol + */ +@Experimental +def add_n (args : Array[org.apache.mxnet.Symbol], name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Check if all the float numbers in the array are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L101 + * }}} + * + * @param data Array + * @param init_output Initialize output to 1. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def all_finite (data : Option[org.apache.mxnet.Symbol] = None, init_output : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Cast function between low precision float/FP32 used by AMP. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * Defined in src/operator/tensor/amp_cast.cc:L37 + * }}} + * + * @param data The input. + * @param dtype Output data type. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def amp_cast (data : Option[org.apache.mxnet.Symbol] = None, dtype : String, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Cast function used by AMP, that casts its inputs to the common widest type. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * + * Defined in src/operator/tensor/amp_cast.cc:L71 + * }}} + * + * @param data Weights + * @param num_outputs Number of input/output pairs to be casted to the widest type. + * @param cast_narrow Whether to cast to the narrowest type + * @return org.apache.mxnet.Symbol + */ +@Experimental +def amp_multicast (data : Array[org.apache.mxnet.Symbol], num_outputs : Int, cast_narrow : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise inverse cosine of the input array. + * + * The input should be in range `[-1, 1]`. + * The output is in the closed interval :math:`[0, \pi]` + * + * .. math:: + * arccos([-1, -.707, 0, .707, 1]) = [\pi, 3\pi/4, \pi/2, \pi/4, 0] + * + * The storage type of ``arccos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L206 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def arccos (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic cosine of the input array, \ + * computed element-wise. + * + * The storage type of ``arccosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L474 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def arccosh (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise inverse sine of the input array. + * + * The input should be in the range `[-1, 1]`. + * The output is in the closed interval of [:math:`-\pi/2`, :math:`\pi/2`]. + * + * .. math:: + * arcsin([-1, -.707, 0, .707, 1]) = [-\pi/2, -\pi/4, 0, \pi/4, \pi/2] + * + * The storage type of ``arcsin`` output depends upon the input storage type: + * + * - arcsin(default) = default + * - arcsin(row_sparse) = row_sparse + * - arcsin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L187 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def arcsin (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic sine of the input array, \ + * computed element-wise. + * + * The storage type of ``arcsinh`` output depends upon the input storage type: + * + * - arcsinh(default) = default + * - arcsinh(row_sparse) = row_sparse + * - arcsinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L436 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def arcsinh (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise inverse tangent of the input array. + * + * The output is in the closed interval :math:`[-\pi/2, \pi/2]` + * + * .. math:: + * arctan([-1, 0, 1]) = [-\pi/4, 0, \pi/4] + * + * The storage type of ``arctan`` output depends upon the input storage type: + * + * - arctan(default) = default + * - arctan(row_sparse) = row_sparse + * - arctan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L227 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def arctan (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic tangent of the input array, \ + * computed element-wise. + * + * The storage type of ``arctanh`` output depends upon the input storage type: + * + * - arctanh(default) = default + * - arctanh(row_sparse) = row_sparse + * - arctanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L515 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def arctanh (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns indices of the maximum values along an axis. + * + * In the case of multiple occurrences of maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmax along axis 0 + * argmax(x, axis=0) = [ 1., 1., 1.] + * + * // argmax along axis 1 + * argmax(x, axis=1) = [ 2., 2.] + * + * // argmax along axis 1 keeping same dims as an input array + * argmax(x, axis=1, keepdims=True) = `[ [ 2.], + * [ 2.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L52 + * }}} + * + * @param data The input + * @param axis The axis along which to perform the reduction. Negative values means indexing from right to left. ``Requires axis to be set as int, because global reduction is not supported yet.`` + * @param keepdims If this is set to `True`, the reduced axis is left in the result as dimension with size one. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def argmax (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, keepdims : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns argmax indices of each channel from the input array. + * + * The result will be an NDArray of shape (num_channel,). + * + * In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * argmax_channel(x) = [ 2., 2.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L97 + * }}} + * + * @param data The input array + * @return org.apache.mxnet.Symbol + */ +@Experimental +def argmax_channel (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns indices of the minimum values along an axis. + * + * In the case of multiple occurrences of minimum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmin along axis 0 + * argmin(x, axis=0) = [ 0., 0., 0.] + * + * // argmin along axis 1 + * argmin(x, axis=1) = [ 0., 0.] + * + * // argmin along axis 1 keeping same dims as an input array + * argmin(x, axis=1, keepdims=True) = `[ [ 0.], + * [ 0.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L77 + * }}} + * + * @param data The input + * @param axis The axis along which to perform the reduction. Negative values means indexing from right to left. ``Requires axis to be set as int, because global reduction is not supported yet.`` + * @param keepdims If this is set to `True`, the reduced axis is left in the result as dimension with size one. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def argmin (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, keepdims : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the indices that would sort an input array along the given axis. + * + * This function performs sorting along the given axis and returns an array of indices having same shape + * as an input array that index data in sorted order. + * + * Examples:: + * + * x = `[ [ 0.3, 0.2, 0.4], + * [ 0.1, 0.3, 0.2] ] + * + * // sort along axis -1 + * argsort(x) = `[ [ 1., 0., 2.], + * [ 0., 2., 1.] ] + * + * // sort along axis 0 + * argsort(x, axis=0) = `[ [ 1., 0., 1.] + * [ 0., 1., 0.] ] + * + * // flatten and then sort + * argsort(x, axis=None) = [ 3., 1., 5., 0., 4., 2.] + * + * + * Defined in src/operator/tensor/ordering_op.cc:L183 + * }}} + * + * @param data The input array + * @param axis Axis along which to sort the input tensor. If not given, the flattened array is used. Default is -1. + * @param is_ascend Whether to sort in ascending or descending order. + * @param dtype DType of the output indices. It is only valid when ret_typ is "indices" or "both". An error will be raised if the selected data type cannot precisely represent the indices. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def argsort (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, is_ascend : Option[Boolean] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Batchwise dot product. + * + * ``batch_dot`` is used to compute dot product of ``x`` and ``y`` when ``x`` and + * ``y`` are data in batch, namely N-D (N >= 3) arrays in shape of `(B0, ..., B_i, :, :)`. + * + * For example, given ``x`` with shape `(B_0, ..., B_i, N, M)` and ``y`` with shape + * `(B_0, ..., B_i, M, K)`, the result array will have shape `(B_0, ..., B_i, N, K)`, + * which is computed by:: + * + * batch_dot(x,y)[b_0, ..., b_i, :, :] = dot(x[b_0, ..., b_i, :, :], y[b_0, ..., b_i, :, :]) + * + * + * + * Defined in src/operator/tensor/dot.cc:L127 + * }}} + * + * @param lhs The first input + * @param rhs The second input + * @param transpose_a If true then transpose the first input before dot. + * @param transpose_b If true then transpose the second input before dot. + * @param forward_stype The desired storage type of the forward output given by user, if thecombination of input storage types and this hint does not matchany implemented ones, the dot operator will perform fallback operationand still produce an output of the desired storage type. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def batch_dot (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, transpose_a : Option[Boolean] = None, transpose_b : Option[Boolean] = None, forward_stype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Takes elements from a data batch. + * + * .. note:: + * `batch_take` is deprecated. Use `pick` instead. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // takes elements with specified indices + * batch_take(x, [0,1,0]) = [ 1. 4. 5.] + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L777 + * }}} + * + * @param a The input array + * @param indices The index array + * @return org.apache.mxnet.Symbol + */ +@Experimental +def batch_take (a : Option[org.apache.mxnet.Symbol] = None, indices : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_add (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @param data The input + * @param axis The axes to perform the broadcasting. + * @param size Target sizes of the broadcasting axes. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_axes (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, size : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @param data The input + * @param axis The axes to perform the broadcasting. + * @param size Target sizes of the broadcasting axes. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_axis (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, size : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise division of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 6., 6., 6.], + * [ 6., 6., 6.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_div(x, y) = `[ [ 3., 3., 3.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_div(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L187 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_div (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L46 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_equal (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of element-wise **greater than** (>) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L82 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_greater (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater_equal(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L100 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_greater_equal (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the hypotenuse of a right angled triangle, given its "legs" + * with broadcasting. + * + * It is equivalent to doing :math:`sqrt(x_1^2 + x_2^2)`. + * + * Example:: + * + * x = `[ [ 3., 3., 3.] ] + * + * y = `[ [ 4.], + * [ 4.] ] + * + * broadcast_hypot(x, y) = `[ [ 5., 5., 5.], + * [ 5., 5., 5.] ] + * + * z = `[ [ 0.], + * [ 4.] ] + * + * broadcast_hypot(x, z) = `[ [ 3., 3., 3.], + * [ 5., 5., 5.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L158 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_hypot (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than** (<) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser(x, y) = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L118 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_lesser (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than or equal to** (<=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L136 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_lesser_equal (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Broadcasts lhs to have the same shape as rhs. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_like(`[ [1,2,3] ], `[ [5,6,7],[7,8,9] ]) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * broadcast_like([9], [1,2,3,4,5], lhs_axes=(0,), rhs_axes=(-1,)) = [9,9,9,9,9] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L135 + * }}} + * + * @param lhs First input. + * @param rhs Second input. + * @param lhs_axes Axes to perform broadcast on in the first input array + * @param rhs_axes Axes to copy from the second input array + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_like (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, lhs_axes : Option[org.apache.mxnet.Shape] = None, rhs_axes : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of element-wise **logical and** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_logical_and(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L154 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_logical_and (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of element-wise **logical or** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_or(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L172 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_logical_or (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of element-wise **logical xor** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_xor(x, y) = `[ [ 0., 0., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L190 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_logical_xor (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise maximum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise maxima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L81 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_maximum (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise minimum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise minima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L117 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_minimum (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_minus (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise modulo of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 8., 8., 8.], + * [ 8., 8., 8.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_mod(x, y) = `[ [ 0., 0., 0.], + * [ 2., 2., 2.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L222 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_mod (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise product of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_mul(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * Supported sparse operations: + * + * broadcast_mul(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L146 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_mul (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of element-wise **not equal to** (!=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_not_equal(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L64 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_not_equal (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_plus (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns result of first array elements raised to powers from second array, element-wise with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_power(x, y) = `[ [ 2., 2., 2.], + * [ 4., 4., 4.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L45 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_power (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_sub (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Broadcasts the input array to a new shape. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_to(`[ [1,2,3] ], shape=(2,3)) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * The dimension which you do not want to change can also be kept as `0` which means copy the original value. + * So with `shape=(2,0)`, we will obtain the same result as in the above example. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L82 + * }}} + * + * @param data The input + * @param shape The shape of the desired array. We can set the dim to zero if it's same as the original. E.g `A = broadcast_to(B, shape=(10, 0, 0))` has the same meaning as `A = broadcast_axis(B, axis=0, size=10)`. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def broadcast_to (data : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @param data The input. + * @param dtype Output data type. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def cast (data : Option[org.apache.mxnet.Symbol] = None, dtype : String, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Casts tensor storage type to the new type. + * + * When an NDArray with default storage type is cast to csr or row_sparse storage, + * the result is compact, which means: + * + * - for csr, zero values will not be retained + * - for row_sparse, row slices of all zeros will not be retained + * + * The storage type of ``cast_storage`` output depends on stype parameter: + * + * - cast_storage(csr, 'default') = default + * - cast_storage(row_sparse, 'default') = default + * - cast_storage(default, 'csr') = csr + * - cast_storage(default, 'row_sparse') = row_sparse + * - cast_storage(csr, 'csr') = csr + * - cast_storage(row_sparse, 'row_sparse') = row_sparse + * + * Example:: + * + * dense = `[ [ 0., 1., 0.], + * [ 2., 0., 3.], + * [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * # cast to row_sparse storage type + * rsp = cast_storage(dense, 'row_sparse') + * rsp.indices = [0, 1] + * rsp.values = `[ [ 0., 1., 0.], + * [ 2., 0., 3.] ] + * + * # cast to csr storage type + * csr = cast_storage(dense, 'csr') + * csr.indices = [1, 0, 2] + * csr.values = [ 1., 2., 3.] + * csr.indptr = [0, 1, 3, 3, 3] + * + * + * + * Defined in src/operator/tensor/cast_storage.cc:L71 + * }}} + * + * @param data The input. + * @param stype Output storage type. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def cast_storage (data : Option[org.apache.mxnet.Symbol] = None, stype : String, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise cube-root value of the input. + * + * .. math:: + * cbrt(x) = \sqrt[3]{x} + * + * Example:: + * + * cbrt([1, 8, -125]) = [1, 2, -5] + * + * The storage type of ``cbrt`` output depends upon the input storage type: + * + * - cbrt(default) = default + * - cbrt(row_sparse) = row_sparse + * - cbrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L216 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def cbrt (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise ceiling of the input. + * + * The ceil of the scalar x is the smallest integer i, such that i >= x. + * + * Example:: + * + * ceil([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 2., 2., 3.] + * + * The storage type of ``ceil`` output depends upon the input storage type: + * + * - ceil(default) = default + * - ceil(row_sparse) = row_sparse + * - ceil(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L818 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def ceil (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @param data The input array + * @param index The index array + * @param axis int or None. The axis to picking the elements. Negative values means indexing from right to left. If is `None`, the elements in the index w.r.t the flattened input will be picked. + * @param keepdims If true, the axis where we pick the elements is left in the result as dimension with size one. + * @param mode Specify how out-of-bound indices behave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def choose_element_0index (data : Option[org.apache.mxnet.Symbol] = None, index : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, keepdims : Option[Boolean] = None, mode : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Clips (limits) the values in an array. + * Given an interval, values outside the interval are clipped to the interval edges. + * Clipping ``x`` between `a_min` and `a_max` would be:: + * .. math:: + * clip(x, a_min, a_max) = \max(\min(x, a_max), a_min)) + * Example:: + * x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + * clip(x,1,8) = [ 1., 1., 2., 3., 4., 5., 6., 7., 8., 8.] + * The storage type of ``clip`` output depends on storage types of inputs and the a_min, a_max \ + * parameter values: + * - clip(default) = default + * - clip(row_sparse, a_min <= 0, a_max >= 0) = row_sparse + * - clip(csr, a_min <= 0, a_max >= 0) = csr + * - clip(row_sparse, a_min < 0, a_max < 0) = default + * - clip(row_sparse, a_min > 0, a_max > 0) = default + * - clip(csr, a_min < 0, a_max < 0) = csr + * - clip(csr, a_min > 0, a_max > 0) = csr + * + * + * Defined in src/operator/tensor/matrix_op.cc:L677 + * }}} + * + * @param data Input array. + * @param a_min Minimum value + * @param a_max Maximum value + * @return org.apache.mxnet.Symbol + */ +@Experimental +def clip (data : Option[org.apache.mxnet.Symbol] = None, a_min : Float, a_max : Float, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @param data List of arrays to concatenate + * @param num_args Number of inputs to be concated. + * @param dim the dimension to be concated. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def concat (data : Array[org.apache.mxnet.Symbol], num_args : Int, dim : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the element-wise cosine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * cos([0, \pi/4, \pi/2]) = [1, 0.707, 0] + * + * The storage type of ``cos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L90 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def cos (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the hyperbolic cosine of the input array, computed element-wise. + * + * .. math:: + * cosh(x) = 0.5\times(exp(x) + exp(-x)) + * + * The storage type of ``cosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L351 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def cosh (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @param data Source input + * @param begin starting indices for the slice operation, supports negative indices. + * @param end ending indices for the slice operation, supports negative indices. + * @param step step for the slice operation, supports negative values. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def crop (data : Option[org.apache.mxnet.Symbol] = None, begin : org.apache.mxnet.Shape, end : org.apache.mxnet.Shape, step : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @param data Input ndarray + * @param label Ground-truth labels for the loss. + * @param data_lengths Lengths of data for each of the samples. Only required when use_data_lengths is true. + * @param label_lengths Lengths of labels for each of the samples. Only required when use_label_lengths is true. + * @param use_data_lengths Whether the data lenghts are decided by `data_lengths`. If false, the lengths are equal to the max sequence length. + * @param use_label_lengths Whether the label lenghts are decided by `label_lengths`, or derived from `padding_mask`. If false, the lengths are derived from the first occurrence of the value of `padding_mask`. The value of `padding_mask` is ``0`` when first CTC label is reserved for blank, and ``-1`` when last label is reserved for blank. See `blank_label`. + * @param blank_label Set the label that is reserved for blank label.If "first", 0-th label is reserved, and label values for tokens in the vocabulary are between ``1`` and ``alphabet_size-1``, and the padding mask is ``-1``. If "last", last label value ``alphabet_size-1`` is reserved for blank label instead, and label values for tokens in the vocabulary are between ``0`` and ``alphabet_size-2``, and the padding mask is ``0``. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def ctc_loss (data : Option[org.apache.mxnet.Symbol] = None, label : Option[org.apache.mxnet.Symbol] = None, data_lengths : Option[org.apache.mxnet.Symbol] = None, label_lengths : Option[org.apache.mxnet.Symbol] = None, use_data_lengths : Option[Boolean] = None, use_label_lengths : Option[Boolean] = None, blank_label : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Return the cumulative sum of the elements along a given axis. + * + * Defined in src/operator/numpy/np_cumsum.cc:L70 + * }}} + * + * @param a Input ndarray + * @param axis Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. + * @param dtype Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def cumsum (a : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Converts each element of the input array from radians to degrees. + * + * .. math:: + * degrees([0, \pi/2, \pi, 3\pi/2, 2\pi]) = [0, 90, 180, 270, 360] + * + * The storage type of ``degrees`` output depends upon the input storage type: + * + * - degrees(default) = default + * - degrees(row_sparse) = row_sparse + * - degrees(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L274 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def degrees (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Rearranges(permutes) data from depth into blocks of spatial data. + * Similar to ONNX DepthToSpace operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace. + * The output is a new tensor where the values from depth dimension are moved in spatial blocks + * to height and width dimension. The reverse of this operation is ``space_to_depth``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, block\_size, block\_size, C / (block\_size ^ 2), H * block\_size, W * block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 4, 1, 5, 2]) \\ + * y = reshape(x \prime \prime, [N, C / (block\_size ^ 2), H * block\_size, W * block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C / (block\_size ^ 2), H * block\_size, W * block\_size]` + * Example:: + * x = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * depth_to_space(x, 2) = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L972 + * }}} + * + * @param data Input ndarray + * @param block_size Blocks of [block_size. block_size] are moved + * @return org.apache.mxnet.Symbol + */ +@Experimental +def depth_to_space (data : Option[org.apache.mxnet.Symbol] = None, block_size : Int, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Extracts a diagonal or constructs a diagonal array. + * + * ``diag``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero. + * - N-D arrays: extracts the diagonals of the sub-arrays with axes specified by ``axis1`` and ``axis2``. + * The output shape would be decided by removing the axes numbered ``axis1`` and ``axis2`` from the + * input shape and appending to the result a new axis with the size of the diagonals in question. + * + * For example, when the input shape is `(2, 3, 4, 5)`, ``axis1`` and ``axis2`` are 0 and 2 + * respectively and ``k`` is 0, the resulting shape would be `(3, 5, 2)`. + * + * Examples:: + * + * x = `[ [1, 2, 3], + * [4, 5, 6] ] + * + * diag(x) = [1, 5] + * + * diag(x, k=1) = [2, 6] + * + * diag(x, k=-1) = [4] + * + * x = [1, 2, 3] + * + * diag(x) = `[ [1, 0, 0], + * [0, 2, 0], + * [0, 0, 3] ] + * + * diag(x, k=1) = `[ [0, 1, 0], + * [0, 0, 2], + * [0, 0, 0] ] + * + * diag(x, k=-1) = `[ [0, 0, 0], + * [1, 0, 0], + * [0, 2, 0] ] + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * + * `[ [5, 6], + * [7, 8] ] ] + * + * diag(x) = `[ [1, 7], + * [2, 8] ] + * + * diag(x, k=1) = `[ [3], + * [4] ] + * + * diag(x, axis1=-2, axis2=-1) = `[ [1, 4], + * [5, 8] ] + * + * + * + * Defined in src/operator/tensor/diag_op.cc:L87 + * }}} + * + * @param data Input ndarray + * @param k Diagonal in question. The default is 0. Use k>0 for diagonals above the main diagonal, and k<0 for diagonals below the main diagonal. If input has shape (S0 S1) k must be between -S0 and S1 + * @param axis1 The first axis of the sub-arrays of interest. Ignored when the input is a 1-D array. + * @param axis2 The second axis of the sub-arrays of interest. Ignored when the input is a 1-D array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def diag (data : Option[org.apache.mxnet.Symbol] = None, k : Option[Int] = None, axis1 : Option[Int] = None, axis2 : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Dot product of two arrays. + * + * ``dot``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: inner product of vectors + * - 2-D arrays: matrix multiplication + * - N-D arrays: a sum product over the last axis of the first input and the first + * axis of the second input + * + * For example, given 3-D ``x`` with shape `(n,m,k)` and ``y`` with shape `(k,r,s)`, the + * result array will have shape `(n,m,r,s)`. It is computed by:: + * + * dot(x,y)[i,j,a,b] = sum(x[i,j,:]*y[:,a,b]) + * + * Example:: + * + * x = reshape([0,1,2,3,4,5,6,7], shape=(2,2,2)) + * y = reshape([7,6,5,4,3,2,1,0], shape=(2,2,2)) + * dot(x,y)[0,0,1,1] = 0 + * sum(x[0,0,:]*y[:,1,1]) = 0 + * + * The storage type of ``dot`` output depends on storage types of inputs, transpose option and + * forward_stype option for output storage type. Implemented sparse operations include: + * + * - dot(default, default, transpose_a=True/False, transpose_b=True/False) = default + * - dot(csr, default, transpose_a=True) = default + * - dot(csr, default, transpose_a=True) = row_sparse + * - dot(csr, default) = default + * - dot(csr, row_sparse) = default + * - dot(default, csr) = csr (CPU only) + * - dot(default, csr, forward_stype='default') = default + * - dot(default, csr, transpose_b=True, forward_stype='default') = default + * + * If the combination of input storage types and forward_stype does not match any of the + * above patterns, ``dot`` will fallback and generate output with default storage. + * + * .. Note:: + * + * If the storage type of the lhs is "csr", the storage type of gradient w.r.t rhs will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/dot.cc:L77 + * }}} + * + * @param lhs The first input + * @param rhs The second input + * @param transpose_a If true then transpose the first input before dot. + * @param transpose_b If true then transpose the second input before dot. + * @param forward_stype The desired storage type of the forward output given by user, if thecombination of input storage types and this hint does not matchany implemented ones, the dot operator will perform fallback operationand still produce an output of the desired storage type. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def dot (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, transpose_a : Option[Boolean] = None, transpose_b : Option[Boolean] = None, forward_stype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Adds arguments element-wise. + * + * The storage type of ``elemwise_add`` output depends on storage types of inputs + * + * - elemwise_add(row_sparse, row_sparse) = row_sparse + * - elemwise_add(csr, csr) = csr + * - elemwise_add(default, csr) = default + * - elemwise_add(csr, default) = default + * - elemwise_add(default, rsp) = default + * - elemwise_add(rsp, default) = default + * - otherwise, ``elemwise_add`` generates output with default storage + * }}} + * + * @param lhs first input + * @param rhs second input + * @return org.apache.mxnet.Symbol + */ +@Experimental +def elemwise_add (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Divides arguments element-wise. + * + * The storage type of ``elemwise_div`` output is always dense + * }}} + * + * @param lhs first input + * @param rhs second input + * @return org.apache.mxnet.Symbol + */ +@Experimental +def elemwise_div (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Multiplies arguments element-wise. + * + * The storage type of ``elemwise_mul`` output depends on storage types of inputs + * + * - elemwise_mul(default, default) = default + * - elemwise_mul(row_sparse, row_sparse) = row_sparse + * - elemwise_mul(default, row_sparse) = row_sparse + * - elemwise_mul(row_sparse, default) = row_sparse + * - elemwise_mul(csr, csr) = csr + * - otherwise, ``elemwise_mul`` generates output with default storage + * }}} + * + * @param lhs first input + * @param rhs second input + * @return org.apache.mxnet.Symbol + */ +@Experimental +def elemwise_mul (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Subtracts arguments element-wise. + * + * The storage type of ``elemwise_sub`` output depends on storage types of inputs + * + * - elemwise_sub(row_sparse, row_sparse) = row_sparse + * - elemwise_sub(csr, csr) = csr + * - elemwise_sub(default, csr) = default + * - elemwise_sub(csr, default) = default + * - elemwise_sub(default, rsp) = default + * - elemwise_sub(rsp, default) = default + * - otherwise, ``elemwise_sub`` generates output with default storage + * }}} + * + * @param lhs first input + * @param rhs second input + * @return org.apache.mxnet.Symbol + */ +@Experimental +def elemwise_sub (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise gauss error function of the input. + * + * Example:: + * + * erf([0, -1., 10.]) = [0., -0.8427, 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L886 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def erf (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise inverse gauss error function of the input. + * + * Example:: + * + * erfinv([0, 0.5., -1.]) = [0., 0.4769, -inf] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L907 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def erfinv (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise exponential value of the input. + * + * .. math:: + * exp(x) = e^x \approx 2.718^x + * + * Example:: + * + * exp([0, 1, 2]) = [1., 2.71828175, 7.38905621] + * + * The storage type of ``exp`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L63 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def exp (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Inserts a new axis of size 1 into the array shape + * For example, given ``x`` with shape ``(2,3,4)``, then ``expand_dims(x, axis=1)`` + * will return a new array with shape ``(2,1,3,4)``. + * + * + * Defined in src/operator/tensor/matrix_op.cc:L395 + * }}} + * + * @param data Source input + * @param axis Position where new axis is to be inserted. Suppose that the input `NDArray`'s dimension is `ndim`, the range of the inserted axis is `[-ndim, ndim]` + * @return org.apache.mxnet.Symbol + */ +@Experimental +def expand_dims (data : Option[org.apache.mxnet.Symbol] = None, axis : Int, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns ``exp(x) - 1`` computed element-wise on the input. + * + * This function provides greater precision than ``exp(x) - 1`` for small values of ``x``. + * + * The storage type of ``expm1`` output depends upon the input storage type: + * + * - expm1(default) = default + * - expm1(row_sparse) = row_sparse + * - expm1(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L224 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def expm1 (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Fill one element of each line(row for python, column for R/Julia) in lhs according to index indicated by rhs and values indicated by mhs. This function assume rhs uses 0-based index. + * }}} + * + * @param lhs Left operand to the function. + * @param mhs Middle operand to the function. + * @param rhs Right operand to the function. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def fill_element_0index (lhs : Option[org.apache.mxnet.Symbol] = None, mhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest \ + * integer towards zero of the input. + * + * Example:: + * + * fix([-2.1, -1.9, 1.9, 2.1]) = [-2., -1., 1., 2.] + * + * The storage type of ``fix`` output depends upon the input storage type: + * + * - fix(default) = default + * - fix(row_sparse) = row_sparse + * - fix(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L875 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def fix (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @param data Input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def flatten (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @param data Input data array + * @param axis The axis which to reverse elements. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def flip (data : Option[org.apache.mxnet.Symbol] = None, axis : org.apache.mxnet.Shape, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise floor of the input. + * + * The floor of the scalar x is the largest integer i, such that i <= x. + * + * Example:: + * + * floor([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-3., -2., 1., 1., 2.] + * + * The storage type of ``floor`` output depends upon the input storage type: + * + * - floor(default) = default + * - floor(row_sparse) = row_sparse + * - floor(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L837 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def floor (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * The FTML optimizer described in + * *FTML - Follow the Moving Leader in Deep Learning*, + * available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * d_t = \frac{ 1 - \beta_1^t }{ \eta_t } (\sqrt{ \frac{ v_t }{ 1 - \beta_2^t } } + \epsilon) + * \sigma_t = d_t - \beta_1 d_{t-1} + * z_t = \beta_1 z_{ t-1 } + (1 - \beta_1^t) g_t - \sigma_t W_{t-1} + * W_t = - \frac{ z_t }{ d_t } + * + * + * + * Defined in src/operator/optimizer_op.cc:L640 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param d Internal state ``d_t`` + * @param v Internal state ``v_t`` + * @param z Internal state ``z_t`` + * @param lr Learning rate. + * @param beta1 Generally close to 0.5. + * @param beta2 Generally close to 1. + * @param epsilon Epsilon to prevent div 0. + * @param t Number of update. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_grad Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def ftml_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, d : Option[org.apache.mxnet.Symbol] = None, v : Option[org.apache.mxnet.Symbol] = None, z : Option[org.apache.mxnet.Symbol] = None, lr : Float, beta1 : Option[Float] = None, beta2 : Option[Float] = None, epsilon : Option[Double] = None, t : Int, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_grad : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for Ftrl optimizer. + * Referenced from *Ad Click Prediction: a View from the Trenches*, available at + * http://dl.acm.org/citation.cfm?id=2488200. + * + * It updates the weights using:: + * + * rescaled_grad = clip(grad * rescale_grad, clip_gradient) + * z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate + * n += rescaled_grad**2 + * w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1) + * + * If w, z and n are all of ``row_sparse`` storage type, + * only the row slices whose indices appear in grad.indices are updated (for w, z and n):: + * + * for row in grad.indices: + * rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient) + * z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate + * n[row] += rescaled_grad[row]**2 + * w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1) + * + * + * + * Defined in src/operator/optimizer_op.cc:L876 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param z z + * @param n Square of grad + * @param lr Learning rate + * @param lamda1 The L1 regularization coefficient. + * @param beta Per-Coordinate Learning Rate beta. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def ftrl_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, z : Option[org.apache.mxnet.Symbol] = None, n : Option[org.apache.mxnet.Symbol] = None, lr : Float, lamda1 : Option[Float] = None, beta : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the gamma function (extension of the factorial function \ + * to the reals), computed element-wise on the input array. + * + * The storage type of ``gamma`` output is always dense + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def gamma (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise log of the absolute value of the gamma function \ + * of the input. + * + * The storage type of ``gammaln`` output is always dense + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def gammaln (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Gather elements or slices from `data` and store to a tensor whose + * shape is defined by `indices`. + * + * Given `data` with shape `(X_0, X_1, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, output shape will simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] = data[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] + * + * Examples:: + * + * data = `[ [0, 1], [2, 3] ] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * gather_nd(data, indices) = [2, 3, 0] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 0] ] + * gather_nd(data, indices) = `[ [3, 4], [5, 6] ] + * }}} + * + * @param data data + * @param indices indices + * @return org.apache.mxnet.Symbol + */ +@Experimental +def gather_nd (data : Option[org.apache.mxnet.Symbol] = None, indices : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes hard sigmoid of x element-wise. + * + * .. math:: + * y = max(0, min(1, alpha * x + beta)) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L161 + * }}} + * + * @param data The input array. + * @param alpha Slope of hard sigmoid + * @param beta Bias of hard sigmoid. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def hard_sigmoid (data : Option[org.apache.mxnet.Symbol] = None, alpha : Option[Float] = None, beta : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns a copy of the input. + * + * From:src/operator/tensor/elemwise_unary_op_basic.cc:246 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def identity (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the Khatri-Rao product of the input matrices. + * + * Given a collection of :math:`n` input matrices, + * + * .. math:: + * A_1 \in \mathbb{R}^{M_1 \times M}, \ldots, A_n \in \mathbb{R}^{M_n \times N}, + * + * the (column-wise) Khatri-Rao product is defined as the matrix, + * + * .. math:: + * X = A_1 \otimes \cdots \otimes A_n \in \mathbb{R}^{(M_1 \cdots M_n) \times N}, + * + * where the :math:`k` th column is equal to the column-wise outer product + * :math:`{A_1}_k \otimes \cdots \otimes {A_n}_k` where :math:`{A_i}_k` is the kth + * column of the ith matrix. + * + * Example:: + * + * >>> A = mx.nd.array(`[ [1, -1], + * >>> [2, -3] ]) + * >>> B = mx.nd.array(`[ [1, 4], + * >>> [2, 5], + * >>> [3, 6] ]) + * >>> C = mx.nd.khatri_rao(A, B) + * >>> print(C.asnumpy()) + * `[ [ 1. -4.] + * [ 2. -5.] + * [ 3. -6.] + * [ 2. -12.] + * [ 4. -15.] + * [ 6. -18.] ] + * + * + * + * Defined in src/operator/contrib/krprod.cc:L108 + * }}} + * + * @param args Positional input matrices + * @return org.apache.mxnet.Symbol + */ +@Experimental +def khatri_rao (args : Array[org.apache.mxnet.Symbol], name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Phase I of lamb update it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad = grad * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L953 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mean Moving mean + * @param vari Moving variance + * @param beta1 The decay rate for the 1st moment estimates. + * @param beta2 The decay rate for the 2nd moment estimates. + * @param epsilon A small constant for numerical stability. + * @param t Index update count. + * @param bias_correction Whether to use bias correction. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def lamb_update_phase1 (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, mean : Option[org.apache.mxnet.Symbol] = None, vari : Option[org.apache.mxnet.Symbol] = None, beta1 : Option[Float] = None, beta2 : Option[Float] = None, epsilon : Option[Float] = None, t : Int, bias_correction : Option[Boolean] = None, wd : Float, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Phase II of lamb update it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight = weight - lr * g + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L992 + * }}} + * + * @param weight Weight + * @param g Output of lamb_update_phase 1 + * @param r1 r1 + * @param r2 r2 + * @param lr Learning rate + * @param lower_bound Lower limit of norm of weight. If lower_bound <= 0, Lower limit is not set + * @param upper_bound Upper limit of norm of weight. If upper_bound <= 0, Upper limit is not set + * @return org.apache.mxnet.Symbol + */ +@Experimental +def lamb_update_phase2 (weight : Option[org.apache.mxnet.Symbol] = None, g : Option[org.apache.mxnet.Symbol] = None, r1 : Option[org.apache.mxnet.Symbol] = None, r2 : Option[org.apache.mxnet.Symbol] = None, lr : Float, lower_bound : Option[Float] = None, upper_bound : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Compute the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *det(A)* + * + * If *n>2*, *det* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: There is no gradient backwarded when A is non-invertible (which is + * equivalent to det(A) = 0) because zero is rarely hit upon in float + * point computation and the Jacobi's formula on determinant gradient + * is not computationally efficient when A is non-invertible. + * + * Examples:: + * + * Single matrix determinant + * A = `[ [1., 4.], [2., 3.] ] + * det(A) = [-5.] + * + * Batch matrix determinant + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [2., 3.], [1., 4.] ] ] + * det(A) = [-5., 5.] + * + * + * Defined in src/operator/tensor/la_op.cc:L973 + * }}} + * + * @param A Tensor of square matrix + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_det (A : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Extracts the diagonal entries of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix which diagonal elements get extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted diagonals are returned as an *n-1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix diagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extractdiag(A) = [1.0, 4.0] + * + * extractdiag(A, 1) = [2.0] + * + * Batch matrix diagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extractdiag(A) = `[ [1.0, 4.0], + * [5.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L495 + * }}} + * + * @param A Tensor of square matrices + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_extractdiag (A : Option[org.apache.mxnet.Symbol] = None, offset : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Extracts a triangular sub-matrix from a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix from which a triangular sub-matrix is extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted triangular sub-matrices are returned as an *n-1*-dimensional tensor. + * + * The *offset* and *lower* parameters determine the triangle to be extracted: + * + * - When *offset = 0* either the lower or upper triangle with respect to the main diagonal is extracted depending on the value of parameter *lower*. + * - When *offset = k > 0* the upper triangle with respect to the k-th diagonal above the main diagonal is extracted. + * - When *offset = k < 0* the lower triangle with respect to the k-th diagonal below the main diagonal is extracted. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extracttrian(A) = [1.0, 3.0, 4.0] + * extracttrian(A, lower=False) = [1.0, 2.0, 4.0] + * extracttrian(A, 1) = [2.0] + * extracttrian(A, -1) = [3.0] + * + * Batch triagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extracttrian(A) = `[ [1.0, 3.0, 4.0], + * [5.0, 7.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L605 + * }}} + * + * @param A Tensor of square matrices + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @param lower Refer to the lower triangular matrix if lower=true, refer to the upper otherwise. Only relevant when offset=0 + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_extracttrian (A : Option[org.apache.mxnet.Symbol] = None, offset : Option[Int] = None, lower : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * LQ factorization for general matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, we compute the LQ factorization (LAPACK *gelqf*, followed by *orglq*). *A* + * must have shape *(x, y)* with *x <= y*, and must have full rank *=x*. The LQ + * factorization consists of *L* with shape *(x, x)* and *Q* with shape *(x, y)*, so + * that: + * + * *A* = *L* \* *Q* + * + * Here, *L* is lower triangular (upper triangle equal to zero) with nonzero diagonal, + * and *Q* is row-orthonormal, meaning that + * + * *Q* \* *Q*\ :sup:`T` + * + * is equal to the identity matrix of shape *(x, x)*. + * + * If *n>2*, *gelqf* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single LQ factorization + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * Q, L = gelqf(A) + * Q = `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ] + * L = `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ] + * + * Batch LQ factorization + * A = `[ `[ [1., 2., 3.], [4., 5., 6.] ], + * `[ [7., 8., 9.], [10., 11., 12.] ] ] + * Q, L = gelqf(A) + * Q = `[ `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ], + * `[ [-0.50257071, -0.57436653, -0.64616234], + * [0.7620735, 0.05862104, -0.64483142] ] ] + * L = `[ `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ], + * `[ [-13.92838828, 0.], + * [-19.09768702, 0.52758934] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L798 + * }}} + * + * @param A Tensor of input matrices to be factorized + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_gelqf (A : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Performs general matrix multiplication and accumulation. + * Input are tensors *A*, *B*, *C*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + *beta* \* *C* + * + * Here, *alpha* and *beta* are scalar parameters, and *op()* is either the identity or + * matrix transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B*, *C* be 5 dimensional tensors. Then gemm(*A*, *B*, *C*, axis=1) is equivalent + * to the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = swapaxes(C, dim1=1, dim2=3) + * C = gemm(A1, B1, C) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply-add + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * C = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0, beta=10.0) + * = `[ [14.0, 14.0, 14.0], [14.0, 14.0, 14.0] ] + * + * Batch matrix multiply-add + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * C = `[ `[ [10.0] ], `[ [0.01] ] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0 , beta=10.0) + * = `[ `[ [104.0] ], `[ [0.14] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L89 + * }}} + * + * @param A Tensor of input matrices + * @param B Tensor of input matrices + * @param C Tensor of input matrices + * @param transpose_a Multiply with transposed of first input (A). + * @param transpose_b Multiply with transposed of second input (B). + * @param alpha Scalar factor multiplied with A*B. + * @param beta Scalar factor multiplied with C. + * @param axis Axis corresponding to the matrix rows. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_gemm (A : Option[org.apache.mxnet.Symbol] = None, B : Option[org.apache.mxnet.Symbol] = None, C : Option[org.apache.mxnet.Symbol] = None, transpose_a : Option[Boolean] = None, transpose_b : Option[Boolean] = None, alpha : Option[Double] = None, beta : Option[Double] = None, axis : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Performs general matrix multiplication. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + * + * Here *alpha* is a scalar parameter and *op()* is either the identity or the matrix + * transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B* be 5 dimensional tensors. Then gemm(*A*, *B*, axis=1) is equivalent to + * the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = gemm2(A1, B1) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ [4.0, 4.0, 4.0], [4.0, 4.0, 4.0] ] + * + * Batch matrix multiply + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ `[ [4.0] ], `[ [0.04 ] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L163 + * }}} + * + * @param A Tensor of input matrices + * @param B Tensor of input matrices + * @param transpose_a Multiply with transposed of first input (A). + * @param transpose_b Multiply with transposed of second input (B). + * @param alpha Scalar factor multiplied with A*B. + * @param axis Axis corresponding to the matrix row indices. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_gemm2 (A : Option[org.apache.mxnet.Symbol] = None, B : Option[org.apache.mxnet.Symbol] = None, transpose_a : Option[Boolean] = None, transpose_b : Option[Boolean] = None, alpha : Option[Double] = None, axis : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Compute the inverse of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *A*\ :sup:`-1` + * + * If *n>2*, *inverse* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [1., 4.], [2., 3.] ] + * inverse(A) = `[ [-0.6, 0.8], [0.4, -0.2] ] + * + * Batch matrix inverse + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [1., 3.], [2., 4.] ] ] + * inverse(A) = `[ `[ [-0.6, 0.8], [0.4, -0.2] ], + * `[ [-2., 1.5], [1., -0.5] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L919 + * }}} + * + * @param A Tensor of square matrix + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_inverse (A : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Constructs a square matrix with the input as diagonal. + * Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the diagonal entries of a single square matrix. This matrix will be returned as a 2-dimensional tensor. + * If *n>1*, then *A* represents a batch of diagonals of square matrices. The batch of diagonal matrices will be returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single diagonal matrix construction + * A = [1.0, 2.0] + * + * makediag(A) = `[ [1.0, 0.0], + * [0.0, 2.0] ] + * + * makediag(A, 1) = `[ [0.0, 1.0, 0.0], + * [0.0, 0.0, 2.0], + * [0.0, 0.0, 0.0] ] + * + * Batch diagonal matrix construction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * makediag(A) = `[ `[ [1.0, 0.0], + * [0.0, 2.0] ], + * `[ [3.0, 0.0], + * [0.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L547 + * }}} + * + * @param A Tensor of diagonal entries + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_makediag (A : Option[org.apache.mxnet.Symbol] = None, offset : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Constructs a square matrix with the input representing a specific triangular sub-matrix. + * This is basically the inverse of *linalg.extracttrian*. Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the entries of a triangular matrix which is lower triangular if *offset<0* or *offset=0*, *lower=true*. The resulting matrix is derived by first constructing the square + * matrix with the entries outside the triangle set to zero and then adding *offset*-times an additional + * diagonal with zero entries to the square matrix. + * + * If *n>1*, then *A* represents a batch of triangular sub-matrices. The batch of corresponding square matrices is returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix construction + * A = [1.0, 2.0, 3.0] + * + * maketrian(A) = `[ [1.0, 0.0], + * [2.0, 3.0] ] + * + * maketrian(A, lower=false) = `[ [1.0, 2.0], + * [0.0, 3.0] ] + * + * maketrian(A, offset=1) = `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ] + * maketrian(A, offset=-1) = `[ [0.0, 0.0, 0.0], + * [1.0, 0.0, 0.0], + * [2.0, 3.0, 0.0] ] + * + * Batch matrix construction + * A = `[ [1.0, 2.0, 3.0], + * [4.0, 5.0, 6.0] ] + * + * maketrian(A) = `[ `[ [1.0, 0.0], + * [2.0, 3.0] ], + * `[ [4.0, 0.0], + * [5.0, 6.0] ] ] + * + * maketrian(A, offset=1) = `[ `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ], + * `[ [0.0, 4.0, 5.0], + * [0.0, 0.0, 6.0], + * [0.0, 0.0, 0.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L673 + * }}} + * + * @param A Tensor of triangular matrices stored as vectors + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @param lower Refer to the lower triangular matrix if lower=true, refer to the upper otherwise. Only relevant when offset=0 + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_maketrian (A : Option[org.apache.mxnet.Symbol] = None, offset : Option[Int] = None, lower : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Performs Cholesky factorization of a symmetric positive-definite matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the Cholesky factor *B* of the symmetric, positive definite matrix *A* is + * computed. *B* is triangular (entries of upper or lower triangle are all zero), has + * positive diagonal entries, and: + * + * *A* = *B* \* *B*\ :sup:`T` if *lower* = *true* + * *A* = *B*\ :sup:`T` \* *B* if *lower* = *false* + * + * If *n>2*, *potrf* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix factorization + * A = `[ [4.0, 1.0], [1.0, 4.25] ] + * potrf(A) = `[ [2.0, 0], [0.5, 2.0] ] + * + * Batch matrix factorization + * A = `[ `[ [4.0, 1.0], [1.0, 4.25] ], `[ [16.0, 4.0], [4.0, 17.0] ] ] + * potrf(A) = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L214 + * }}} + * + * @param A Tensor of input matrices to be decomposed + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_potrf (A : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Performs matrix inversion from a Cholesky factorization. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a triangular matrix (entries of upper or lower triangle are all zero) + * with positive diagonal. We compute: + * + * *out* = *A*\ :sup:`-T` \* *A*\ :sup:`-1` if *lower* = *true* + * *out* = *A*\ :sup:`-1` \* *A*\ :sup:`-T` if *lower* = *false* + * + * In other words, if *A* is the Cholesky factor of a symmetric positive definite matrix + * *B* (obtained by *potrf*), then + * + * *out* = *B*\ :sup:`-1` + * + * If *n>2*, *potri* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * .. note:: Use this operator only if you are certain you need the inverse of *B*, and + * cannot use the Cholesky factor *A* (*potrf*), together with backsubstitution + * (*trsm*). The latter is numerically much safer, and also cheaper. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [2.0, 0], [0.5, 2.0] ] + * potri(A) = `[ [0.26563, -0.0625], [-0.0625, 0.25] ] + * + * Batch matrix inverse + * A = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * potri(A) = `[ `[ [0.26563, -0.0625], [-0.0625, 0.25] ], + * `[ [0.06641, -0.01562], [-0.01562, 0,0625] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L275 + * }}} + * + * @param A Tensor of lower triangular matrices + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_potri (A : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Compute the sign and log of the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *sign* = *sign(det(A))* + * *logabsdet* = *log(abs(det(A)))* + * + * If *n>2*, *slogdet* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: The gradient is not properly defined on sign, so the gradient of + * it is not backwarded. + * .. note:: No gradient is backwarded when A is non-invertible. Please see + * the docs of operator det for detail. + * + * Examples:: + * + * Single matrix signed log determinant + * A = `[ [2., 3.], [1., 4.] ] + * sign, logabsdet = slogdet(A) + * sign = [1.] + * logabsdet = [1.609438] + * + * Batch matrix signed log determinant + * A = `[ `[ [2., 3.], [1., 4.] ], + * `[ [1., 2.], [2., 4.] ], + * `[ [1., 2.], [4., 3.] ] ] + * sign, logabsdet = slogdet(A) + * sign = [1., 0., -1.] + * logabsdet = [1.609438, -inf, 1.609438] + * + * + * Defined in src/operator/tensor/la_op.cc:L1031 + * }}} + * + * @param A Tensor of square matrix + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_slogdet (A : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the sum of the logarithms of the diagonal elements of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* must be square with positive diagonal entries. We sum the natural + * logarithms of the diagonal elements, the result has shape (1,). + * + * If *n>2*, *sumlogdiag* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix reduction + * A = `[ [1.0, 1.0], [1.0, 7.0] ] + * sumlogdiag(A) = [1.9459] + * + * Batch matrix reduction + * A = `[ `[ [1.0, 1.0], [1.0, 7.0] ], `[ [3.0, 0], [0, 17.0] ] ] + * sumlogdiag(A) = [1.9459, 3.9318] + * + * + * Defined in src/operator/tensor/la_op.cc:L445 + * }}} + * + * @param A Tensor of square matrices + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_sumlogdiag (A : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Multiplication of matrix with its transpose. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the operator performs the BLAS3 function *syrk*: + * + * *out* = *alpha* \* *A* \* *A*\ :sup:`T` + * + * if *transpose=False*, or + * + * *out* = *alpha* \* *A*\ :sup:`T` \ \* *A* + * + * if *transpose=True*. + * + * If *n>2*, *syrk* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * syrk(A, alpha=1., transpose=False) + * = `[ [14., 32.], + * [32., 77.] ] + * syrk(A, alpha=1., transpose=True) + * = `[ [17., 22., 27.], + * [22., 29., 36.], + * [27., 36., 45.] ] + * + * Batch matrix multiply + * A = `[ `[ [1., 1.] ], `[ [0.1, 0.1] ] ] + * syrk(A, alpha=2., transpose=False) = `[ `[ [4.] ], `[ [0.04] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L730 + * }}} + * + * @param A Tensor of input matrices + * @param transpose Use transpose of input matrix. + * @param alpha Scalar factor to be applied to the result. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_syrk (A : Option[org.apache.mxnet.Symbol] = None, transpose : Option[Boolean] = None, alpha : Option[Double] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Performs multiplication with a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trmm*: + * + * *out* = *alpha* \* *op*\ (*A*) \* *B* + * + * if *rightside=False*, or + * + * *out* = *alpha* \* *B* \* *op*\ (*A*) + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trmm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triangular matrix multiply + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * trmm(A, B, alpha=2.0) = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * + * Batch triangular matrix multiply + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], `[ [0.5, 0.5, 0.5], [0.5, 0.5, 0.5] ] ] + * trmm(A, B, alpha=2.0) = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [1.0, 1.0, 1.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L333 + * }}} + * + * @param A Tensor of lower triangular matrices + * @param B Tensor of matrices + * @param transpose Use transposed of the triangular matrix + * @param rightside Multiply triangular matrix from the right to non-triangular one. + * @param lower True if the triangular matrix is lower triangular, false if it is upper triangular. + * @param alpha Scalar factor to be applied to the result. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_trmm (A : Option[org.apache.mxnet.Symbol] = None, B : Option[org.apache.mxnet.Symbol] = None, transpose : Option[Boolean] = None, rightside : Option[Boolean] = None, lower : Option[Boolean] = None, alpha : Option[Double] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Solves matrix equation involving a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trsm*, solving for *out* in: + * + * *op*\ (*A*) \* *out* = *alpha* \* *B* + * + * if *rightside=False*, or + * + * *out* \* *op*\ (*A*) = *alpha* \* *B* + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trsm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix solve + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * trsm(A, B, alpha=0.5) = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * + * Batch matrix solve + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [4.0, 4.0, 4.0], [8.0, 8.0, 8.0] ] ] + * trsm(A, B, alpha=0.5) = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], + * `[ [2.0, 2.0, 2.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L396 + * }}} + * + * @param A Tensor of lower triangular matrices + * @param B Tensor of matrices + * @param transpose Use transposed of the triangular matrix + * @param rightside Multiply triangular matrix from the right to non-triangular one. + * @param lower True if the triangular matrix is lower triangular, false if it is upper triangular. + * @param alpha Scalar factor to be applied to the result. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def linalg_trsm (A : Option[org.apache.mxnet.Symbol] = None, B : Option[org.apache.mxnet.Symbol] = None, transpose : Option[Boolean] = None, rightside : Option[Boolean] = None, lower : Option[Boolean] = None, alpha : Option[Double] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise Natural logarithmic value of the input. + * + * The natural logarithm is logarithm in base *e*, so that ``log(exp(x)) = x`` + * + * The storage type of ``log`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L76 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def log (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise Base-10 logarithmic value of the input. + * + * ``10**log10(x) = x`` + * + * The storage type of ``log10`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L93 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def log10 (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise ``log(1 + x)`` value of the input. + * + * This function is more accurate than ``log(1 + x)`` for small ``x`` so that + * :math:`1+x\approx 1` + * + * The storage type of ``log1p`` output depends upon the input storage type: + * + * - log1p(default) = default + * - log1p(row_sparse) = row_sparse + * - log1p(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L206 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def log1p (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise Base-2 logarithmic value of the input. + * + * ``2**log2(x) = x`` + * + * The storage type of ``log2`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L105 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def log2 (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the log softmax of the input. + * This is equivalent to computing softmax followed by log. + * + * Examples:: + * + * >>> x = mx.nd.array([1, 2, .1]) + * >>> mx.nd.log_softmax(x).asnumpy() + * array([-1.41702998, -0.41702995, -2.31702995], dtype=float32) + * + * >>> x = mx.nd.array( `[ [1, 2, .1],[.1, 2, 1] ] ) + * >>> mx.nd.log_softmax(x, axis=0).asnumpy() + * array(`[ [-0.34115392, -0.69314718, -1.24115396], + * [-1.24115396, -0.69314718, -0.34115392] ], dtype=float32) + * }}} + * + * @param data The input array. + * @param axis The axis along which to compute softmax. + * @param temperature Temperature parameter in softmax + * @param dtype DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). + * @param use_length Whether to use the length input as a mask over the data input. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def log_softmax (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, temperature : Option[Double] = None, dtype : Option[String] = None, use_length : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the result of logical NOT (!) function + * + * Example: + * logical_not([-2., 0., 1.]) = [0., 1., 0.] + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def logical_not (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = make_loss(cross_entropy) + * + * We will need to use ``make_loss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * The storage type of ``make_loss`` output depends upon the input storage type: + * + * - make_loss(default) = default + * - make_loss(row_sparse) = row_sparse + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L360 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def make_loss (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def max (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def max_axis (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the mean of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L84 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def mean (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def min (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def min_axis (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * + * Calculate the mean and variance of `data`. + * + * The mean and variance are calculated by aggregating the contents of data across axes. + * If x is 1-D and axes = [0] this is just the mean and variance of a vector. + * + * Example: + * + * x = `[ [1, 2, 3], [4, 5, 6] ] + * mean, var = moments(data=x, axes=[0]) + * mean = [2.5, 3.5, 4.5] + * var = [2.25, 2.25, 2.25] + * mean, var = moments(data=x, axes=[1]) + * mean = [2.0, 5.0] + * var = [0.66666667, 0.66666667] + * mean, var = moments(data=x, axis=[0, 1]) + * mean = [3.5] + * var = [2.9166667] + * + * + * + * Defined in src/operator/nn/moments.cc:L54 + * }}} + * + * @param data Input ndarray + * @param axes Array of ints. Axes along which to compute mean and variance. + * @param keepdims produce moments with the same dimensionality as the input. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def moments (data : Option[org.apache.mxnet.Symbol] = None, axes : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Mixed Precision version of Phase I of lamb update + * it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad32 = grad(float16) * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight32; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight32; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1033 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mean Moving mean + * @param vari Moving variance + * @param weight32 Weight32 + * @param beta1 The decay rate for the 1st moment estimates. + * @param beta2 The decay rate for the 2nd moment estimates. + * @param epsilon A small constant for numerical stability. + * @param t Index update count. + * @param bias_correction Whether to use bias correction. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def mp_lamb_update_phase1 (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, mean : Option[org.apache.mxnet.Symbol] = None, vari : Option[org.apache.mxnet.Symbol] = None, weight32 : Option[org.apache.mxnet.Symbol] = None, beta1 : Option[Float] = None, beta2 : Option[Float] = None, epsilon : Option[Float] = None, t : Int, bias_correction : Option[Boolean] = None, wd : Float, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Mixed Precision version Phase II of lamb update + * it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight32 = weight32 - lr * g + * weight(float16) = weight32 + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1075 + * }}} + * + * @param weight Weight + * @param g Output of mp_lamb_update_phase 1 + * @param r1 r1 + * @param r2 r2 + * @param weight32 Weight32 + * @param lr Learning rate + * @param lower_bound Lower limit of norm of weight. If lower_bound <= 0, Lower limit is not set + * @param upper_bound Upper limit of norm of weight. If upper_bound <= 0, Upper limit is not set + * @return org.apache.mxnet.Symbol + */ +@Experimental +def mp_lamb_update_phase2 (weight : Option[org.apache.mxnet.Symbol] = None, g : Option[org.apache.mxnet.Symbol] = None, r1 : Option[org.apache.mxnet.Symbol] = None, r2 : Option[org.apache.mxnet.Symbol] = None, weight32 : Option[org.apache.mxnet.Symbol] = None, lr : Float, lower_bound : Option[Float] = None, upper_bound : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for multi-precision Nesterov Accelerated Gradient( NAG) optimizer. + * + * + * Defined in src/operator/optimizer_op.cc:L745 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param weight32 Weight32 + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def mp_nag_mom_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, mom : Option[org.apache.mxnet.Symbol] = None, weight32 : Option[org.apache.mxnet.Symbol] = None, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param weight32 Weight32 + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse and both weight and momentum have the same stype + * @return org.apache.mxnet.Symbol + */ +@Experimental +def mp_sgd_mom_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, mom : Option[org.apache.mxnet.Symbol] = None, weight32 : Option[org.apache.mxnet.Symbol] = None, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @param weight Weight + * @param grad gradient + * @param weight32 Weight32 + * @param lr Learning rate + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def mp_sgd_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, weight32 : Option[org.apache.mxnet.Symbol] = None, lr : Float, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Check if all the float numbers in all the arrays are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L133 + * }}} + * + * @param data Arrays + * @param num_arrays Number of arrays. + * @param init_output Initialize output to 1. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def multi_all_finite (data : Array[org.apache.mxnet.Symbol], num_arrays : Option[Int] = None, init_output : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Compute the LARS coefficients of multiple weights and grads from their sums of square" + * + * + * Defined in src/operator/contrib/multi_lars.cc:L37 + * }}} + * + * @param lrs Learning rates to scale by LARS coefficient + * @param weights_sum_sq sum of square of weights arrays + * @param grads_sum_sq sum of square of gradients arrays + * @param wds weight decays + * @param eta LARS eta + * @param eps LARS eps + * @param rescale_grad Gradient rescaling factor + * @return org.apache.mxnet.Symbol + */ +@Experimental +def multi_lars (lrs : Option[org.apache.mxnet.Symbol] = None, weights_sum_sq : Option[org.apache.mxnet.Symbol] = None, grads_sum_sq : Option[org.apache.mxnet.Symbol] = None, wds : Option[org.apache.mxnet.Symbol] = None, eta : Float, eps : Float, rescale_grad : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L472 + * }}} + * + * @param data Weights + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param momentum The decay rate of momentum estimates at each epoch. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def multi_mp_sgd_mom_update (data : Array[org.apache.mxnet.Symbol], lrs : Any, wds : Any, momentum : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L417 + * }}} + * + * @param data Weights + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def multi_mp_sgd_update (data : Array[org.apache.mxnet.Symbol], lrs : Any, wds : Any, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L374 + * }}} + * + * @param data Weights, gradients and momentum + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param momentum The decay rate of momentum estimates at each epoch. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def multi_sgd_mom_update (data : Array[org.apache.mxnet.Symbol], lrs : Any, wds : Any, momentum : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L329 + * }}} + * + * @param data Weights + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def multi_sgd_update (data : Array[org.apache.mxnet.Symbol], lrs : Any, wds : Any, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Compute the sums of squares of multiple arrays + * + * + * Defined in src/operator/contrib/multi_sum_sq.cc:L36 + * }}} + * + * @param data Arrays + * @param num_arrays number of input arrays. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def multi_sum_sq (data : Array[org.apache.mxnet.Symbol], num_arrays : Int, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for Nesterov Accelerated Gradient( NAG) optimizer. + * It updates the weights using the following formula, + * + * .. math:: + * v_t = \gamma v_{t-1} + \eta * \nabla J(W_{t-1} - \gamma v_{t-1})\\ + * W_t = W_{t-1} - v_t + * + * Where + * :math:`\eta` is the learning rate of the optimizer + * :math:`\gamma` is the decay rate of the momentum estimate + * :math:`\v_t` is the update vector at time step `t` + * :math:`\W_t` is the weight vector at time step `t` + * + * + * + * Defined in src/operator/optimizer_op.cc:L726 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def nag_mom_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, mom : Option[org.apache.mxnet.Symbol] = None, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the product of array elements over given axes treating Not a Numbers (``NaN``) as one. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_prod_value.cc:L47 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def nanprod (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the sum of array elements over given axes treating Not a Numbers (``NaN``) as zero. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L102 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def nansum (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Numerical negative of the argument, element-wise. + * + * The storage type of ``negative`` output depends upon the input storage type: + * + * - negative(default) = default + * - negative(row_sparse) = row_sparse + * - negative(csr) = csr + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def negative (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the norm on an NDArray. + * + * This operator computes the norm on an NDArray with the specified axis, depending + * on the value of the ord parameter. By default, it computes the L2 norm on the entire + * array. Currently only ord=2 supports sparse ndarrays. + * + * Examples:: + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * `[ [2, 2], + * [5, 6] ] ] + * + * norm(x, ord=2, axis=1) = `[ [3.1622777 4.472136 ] + * [5.3851647 6.3245554] ] + * + * norm(x, ord=1, axis=1) = `[ [4., 6.], + * [7., 8.] ] + * + * rsp = x.cast_storage('row_sparse') + * + * norm(rsp) = [5.47722578] + * + * csr = x.cast_storage('csr') + * + * norm(csr) = [5.47722578] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_norm_value.cc:L89 + * }}} + * + * @param data The input + * @param ord Order of the norm. Currently ord=1 and ord=2 is supported. + * @param axis The axis or axes along which to perform the reduction. + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + If `axis` is int, a reduction is performed on a particular axis. + If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, + and the matrix norms of these matrices are computed. + * @param out_dtype The data type of the output. + * @param keepdims If this is set to `True`, the reduced axis is left in the result as dimension with size one. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def norm (data : Option[org.apache.mxnet.Symbol] = None, ord : Option[Int] = None, axis : Option[org.apache.mxnet.Shape] = None, out_dtype : Option[String] = None, keepdims : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @param loc Mean of the distribution. + * @param scale Standard deviation of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def normal (loc : Option[Float] = None, scale : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns a one-hot array. + * + * The locations represented by `indices` take value `on_value`, while all + * other locations take value `off_value`. + * + * `one_hot` operation with `indices` of shape ``(i0, i1)`` and `depth` of ``d`` would result + * in an output array of shape ``(i0, i1, d)`` with:: + * + * output[i,j,:] = off_value + * output[i,j,indices[i,j] ] = on_value + * + * Examples:: + * + * one_hot([1,0,2,0], 3) = `[ [ 0. 1. 0.] + * [ 1. 0. 0.] + * [ 0. 0. 1.] + * [ 1. 0. 0.] ] + * + * one_hot([1,0,2,0], 3, on_value=8, off_value=1, + * dtype='int32') = `[ [1 8 1] + * [8 1 1] + * [1 1 8] + * [8 1 1] ] + * + * one_hot(`[ [1,0],[1,0],[2,0] ], 3) = `[ `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 0. 1.] + * [ 1. 0. 0.] ] ] + * + * + * Defined in src/operator/tensor/indexing_op.cc:L824 + * }}} + * + * @param indices array of locations where to set on_value + * @param depth Depth of the one hot dimension. + * @param on_value The value assigned to the locations represented by indices. + * @param off_value The value assigned to the locations not represented by indices. + * @param dtype DType of the output + * @return org.apache.mxnet.Symbol + */ +@Experimental +def one_hot (indices : Option[org.apache.mxnet.Symbol] = None, depth : Int, on_value : Option[Double] = None, off_value : Option[Double] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Return an array of ones with the same shape and type + * as the input array. + * + * Examples:: + * + * x = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * ones_like(x) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * }}} + * + * @param data The input + * @return org.apache.mxnet.Symbol + */ +@Experimental +def ones_like (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @param data An n-dimensional input array. + * @param mode Padding type to use. "constant" pads with `constant_value` "edge" pads using the edge values of the input array "reflect" pads by reflecting values with respect to the edges. + * @param pad_width Widths of the padding regions applied to the edges of each axis. It is a tuple of integer padding widths for each axis of the format ``(before_1, after_1, ... , before_N, after_N)``. It should be of length ``2*N`` where ``N`` is the number of dimensions of the array.This is equivalent to pad_width in numpy.pad, but flattened. + * @param constant_value The value used for padding when `mode` is "constant". + * @return org.apache.mxnet.Symbol + */ +@Experimental +def pad (data : Option[org.apache.mxnet.Symbol] = None, mode : String, pad_width : org.apache.mxnet.Shape, constant_value : Option[Double] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @param data The input array + * @param index The index array + * @param axis int or None. The axis to picking the elements. Negative values means indexing from right to left. If is `None`, the elements in the index w.r.t the flattened input will be picked. + * @param keepdims If true, the axis where we pick the elements is left in the result as dimension with size one. + * @param mode Specify how out-of-bound indices behave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def pick (data : Option[org.apache.mxnet.Symbol] = None, index : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, keepdims : Option[Boolean] = None, mode : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L200 + * }}} + * + * @param data Weights, gradients, momentums, learning rates and weight decays + * @param momentum The decay rate of momentum estimates at each epoch. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def preloaded_multi_mp_sgd_mom_update (data : Array[org.apache.mxnet.Symbol], momentum : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L140 + * }}} + * + * @param data Weights, gradients, learning rates and weight decays + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def preloaded_multi_mp_sgd_update (data : Array[org.apache.mxnet.Symbol], rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L91 + * }}} + * + * @param data Weights, gradients, momentum, learning rates and weight decays + * @param momentum The decay rate of momentum estimates at each epoch. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def preloaded_multi_sgd_mom_update (data : Array[org.apache.mxnet.Symbol], momentum : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L42 + * }}} + * + * @param data Weights, gradients, learning rates and weight decays + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param num_weights Number of updated weights. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def preloaded_multi_sgd_update (data : Array[org.apache.mxnet.Symbol], rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, num_weights : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the product of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L31 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def prod (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Converts each element of the input array from degrees to radians. + * + * .. math:: + * radians([0, 90, 180, 270, 360]) = [0, \pi/2, \pi, 3\pi/2, 2\pi] + * + * The storage type of ``radians`` output depends upon the input storage type: + * + * - radians(default) = default + * - radians(row_sparse) = row_sparse + * - radians(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L293 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def radians (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from an exponential distribution. + * + * Samples are distributed according to an exponential distribution parametrized by *lambda* (rate). + * + * Example:: + * + * exponential(lam=4, shape=(2,2)) = `[ [ 0.0097189 , 0.08999364], + * [ 0.04146638, 0.31715935] ] + * + * + * Defined in src/operator/random/sample_op.cc:L137 + * }}} + * + * @param lam Lambda parameter (rate) of the exponential distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_exponential (lam : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from a gamma distribution. + * + * Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). + * + * Example:: + * + * gamma(alpha=9, beta=0.5, shape=(2,2)) = `[ [ 7.10486984, 3.37695289], + * [ 3.91697288, 3.65933681] ] + * + * + * Defined in src/operator/random/sample_op.cc:L125 + * }}} + * + * @param alpha Alpha parameter (shape) of the gamma distribution. + * @param beta Beta parameter (scale) of the gamma distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_gamma (alpha : Option[Float] = None, beta : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from a generalized negative binomial distribution. + * + * Samples are distributed according to a generalized negative binomial distribution parametrized by + * *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the + * number of unsuccessful experiments (generalized to real numbers). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * generalized_negative_binomial(mu=2.0, alpha=0.3, shape=(2,2)) = `[ [ 2., 1.], + * [ 6., 4.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L179 + * }}} + * + * @param mu Mean of the negative binomial distribution. + * @param alpha Alpha (dispersion) parameter of the negative binomial distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_generalized_negative_binomial (mu : Option[Float] = None, alpha : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from a negative binomial distribution. + * + * Samples are distributed according to a negative binomial distribution parametrized by + * *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * negative_binomial(k=3, p=0.4, shape=(2,2)) = `[ [ 4., 7.], + * [ 2., 5.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L164 + * }}} + * + * @param k Limit of unsuccessful experiments. + * @param p Failure probability in each experiment. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_negative_binomial (k : Option[Int] = None, p : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @param loc Mean of the distribution. + * @param scale Standard deviation of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_normal (loc : Option[Float] = None, scale : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Dirichlet distributions with parameter *alpha*. + * + * The shape of *alpha* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *alpha*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *alpha* + * at index *i*. + * + * Examples:: + * + * random_pdf_dirichlet(sample=`[ [1,2],[2,3],[3,4] ], alpha=[2.5, 2.5]) = + * [38.413498, 199.60245, 564.56085] + * + * sample = `[ `[ [1, 2, 3], [10, 20, 30], [100, 200, 300] ], + * `[ [0.1, 0.2, 0.3], [0.01, 0.02, 0.03], [0.001, 0.002, 0.003] ] ] + * + * random_pdf_dirichlet(sample=sample, alpha=[0.1, 0.4, 0.9]) = + * `[ [2.3257459e-02, 5.8420084e-04, 1.4674458e-05], + * [9.2589635e-01, 3.6860607e+01, 1.4674468e+03] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L315 + * }}} + * + * @param sample Samples from the distributions. + * @param alpha Concentration parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_pdf_dirichlet (sample : Option[org.apache.mxnet.Symbol] = None, alpha : Option[org.apache.mxnet.Symbol] = None, is_log : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * exponential distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_exponential(sample=`[ [1, 2, 3] ], lam=[1]) = + * `[ [0.36787945, 0.13533528, 0.04978707] ] + * + * sample = `[ [1,2,3], + * [1,2,3], + * [1,2,3] ] + * + * random_pdf_exponential(sample=sample, lam=[1,0.5,0.25]) = + * `[ [0.36787945, 0.13533528, 0.04978707], + * [0.30326533, 0.18393973, 0.11156508], + * [0.1947002, 0.15163267, 0.11809164] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L304 + * }}} + * + * @param sample Samples from the distributions. + * @param lam Lambda (rate) parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_pdf_exponential (sample : Option[org.apache.mxnet.Symbol] = None, lam : Option[org.apache.mxnet.Symbol] = None, is_log : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * gamma distributions with parameters *alpha* (shape) and *beta* (rate). + * + * *alpha* and *beta* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *alpha* and *beta*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *alpha* and *beta* at index *i*. + * + * Examples:: + * + * random_pdf_gamma(sample=`[ [1,2,3,4,5] ], alpha=[5], beta=[1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739] ] + * + * sample = `[ [1, 2, 3, 4, 5], + * [2, 3, 4, 5, 6], + * [3, 4, 5, 6, 7] ] + * + * random_pdf_gamma(sample=sample, alpha=[5,6,7], beta=[1,1,1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739], + * [0.03608941, 0.10081882, 0.15629345, 0.17546739, 0.16062315], + * [0.05040941, 0.10419563, 0.14622283, 0.16062315, 0.14900276] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L301 + * }}} + * + * @param sample Samples from the distributions. + * @param alpha Alpha (shape) parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param beta Beta (scale) parameters of the distributions. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_pdf_gamma (sample : Option[org.apache.mxnet.Symbol] = None, alpha : Option[org.apache.mxnet.Symbol] = None, is_log : Option[Boolean] = None, beta : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * generalized negative binomial distributions with parameters *mu* (mean) + * and *alpha* (dispersion). This can be understood as a reparameterization of + * the negative binomial, where *k* = *1 / alpha* and *p* = *1 / (mu \* alpha + 1)*. + * + * *mu* and *alpha* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *alpha*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *alpha* at index *i*. + * + * Examples:: + * + * random_pdf_generalized_negative_binomial(sample=`[ [1, 2, 3, 4] ], alpha=[1], mu=[1]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_generalized_negative_binomial(sample=sample, alpha=[1, 0.6666], mu=[1, 1.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26517063, 0.16573331, 0.09667706, 0.05437994] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L311 + * }}} + * + * @param sample Samples from the distributions. + * @param mu Means of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param alpha Alpha (dispersion) parameters of the distributions. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_pdf_generalized_negative_binomial (sample : Option[org.apache.mxnet.Symbol] = None, mu : Option[org.apache.mxnet.Symbol] = None, is_log : Option[Boolean] = None, alpha : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the value of the PDF of samples of + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * *k* and *p* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *k* and *p*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *k* and *p* at index *i*. + * + * Examples:: + * + * random_pdf_negative_binomial(sample=`[ [1,2,3,4] ], k=[1], p=a[0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * # Note that k may be real-valued + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_negative_binomial(sample=sample, k=[1, 1.5], p=[0.5, 0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26516506, 0.16572815, 0.09667476, 0.05437956] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L308 + * }}} + * + * @param sample Samples from the distributions. + * @param k Limits of unsuccessful experiments. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param p Failure probabilities in each experiment. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_pdf_negative_binomial (sample : Option[org.apache.mxnet.Symbol] = None, k : Option[org.apache.mxnet.Symbol] = None, is_log : Option[Boolean] = None, p : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * *mu* and *sigma* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *sigma*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *sigma* at index *i*. + * + * Examples:: + * + * sample = `[ [-2, -1, 0, 1, 2] ] + * random_pdf_normal(sample=sample, mu=[0], sigma=[1]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097] ] + * + * random_pdf_normal(sample=sample*2, mu=[0,0], sigma=[1,2]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097], + * [0.12098537, 0.17603266, 0.19947115, 0.17603266, 0.12098537] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L299 + * }}} + * + * @param sample Samples from the distributions. + * @param mu Means of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param sigma Standard deviations of the distributions. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_pdf_normal (sample : Option[org.apache.mxnet.Symbol] = None, mu : Option[org.apache.mxnet.Symbol] = None, is_log : Option[Boolean] = None, sigma : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Poisson distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_poisson(sample=`[ [0,1,2,3] ], lam=[1]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324] ] + * + * sample = `[ [0,1,2,3], + * [0,1,2,3], + * [0,1,2,3] ] + * + * random_pdf_poisson(sample=sample, lam=[1,2,3]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324], + * [0.13533528, 0.27067056, 0.27067056, 0.18044704], + * [0.04978707, 0.14936121, 0.22404182, 0.22404182] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L306 + * }}} + * + * @param sample Samples from the distributions. + * @param lam Lambda (rate) parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_pdf_poisson (sample : Option[org.apache.mxnet.Symbol] = None, lam : Option[org.apache.mxnet.Symbol] = None, is_log : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * uniform distributions on the intervals given by *[low,high)*. + * + * *low* and *high* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *low* and *high*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *low* and *high* at index *i*. + * + * Examples:: + * + * random_pdf_uniform(sample=`[ [1,2,3,4] ], low=[0], high=[10]) = [0.1, 0.1, 0.1, 0.1] + * + * sample = `[ `[ [1, 2, 3], + * [1, 2, 3] ], + * `[ [1, 2, 3], + * [1, 2, 3] ] ] + * low = `[ [0, 0], + * [0, 0] ] + * high = `[ [ 5, 10], + * [15, 20] ] + * random_pdf_uniform(sample=sample, low=low, high=high) = + * `[ `[ [0.2, 0.2, 0.2 ], + * [0.1, 0.1, 0.1 ] ], + * `[ [0.06667, 0.06667, 0.06667], + * [0.05, 0.05, 0.05 ] ] ] + * + * + * + * Defined in src/operator/random/pdf_op.cc:L297 + * }}} + * + * @param sample Samples from the distributions. + * @param low Lower bounds of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param high Upper bounds of the distributions. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_pdf_uniform (sample : Option[org.apache.mxnet.Symbol] = None, low : Option[org.apache.mxnet.Symbol] = None, is_log : Option[Boolean] = None, high : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from a Poisson distribution. + * + * Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * poisson(lam=4, shape=(2,2)) = `[ [ 5., 2.], + * [ 4., 6.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L150 + * }}} + * + * @param lam Lambda parameter (rate) of the Poisson distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_poisson (lam : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from a discrete uniform distribution. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * randint(low=0, high=5, shape=(2,2)) = `[ [ 0, 2], + * [ 3, 1] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L194 + * }}} + * + * @param low Lower bound of the distribution. + * @param high Upper bound of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to int32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_randint (low : Long, high : Long, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from a uniform distribution. + * + * .. note:: The existing alias ``uniform`` is deprecated. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * uniform(low=0, high=1, shape=(2,2)) = `[ [ 0.60276335, 0.85794562], + * [ 0.54488319, 0.84725171] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L96 + * }}} + * + * @param low Lower bound of the distribution. + * @param high Upper bound of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def random_uniform (low : Option[Float] = None, high : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Converts a batch of index arrays into an array of flat indices. The operator follows numpy conventions so a single multi index is given by a column of the input matrix. The leading dimension may be left unspecified by using -1 as placeholder. + * + * Examples:: + * + * A = `[ [3,6,6],[4,5,1] ] + * ravel(A, shape=(7,6)) = [22,41,37] + * ravel(A, shape=(-1,6)) = [22,41,37] + * + * + * + * Defined in src/operator/tensor/ravel.cc:L42 + * }}} + * + * @param data Batch of multi-indices + * @param shape Shape of the array into which the multi-indices apply. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def ravel_multi_index (data : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise inverse cube-root value of the input. + * + * .. math:: + * rcbrt(x) = 1/\sqrt[3]{x} + * + * Example:: + * + * rcbrt([1,8,-125]) = [1.0, 0.5, -0.2] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L269 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def rcbrt (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the reciprocal of the argument, element-wise. + * + * Calculates 1/x. + * + * Example:: + * + * reciprocal([-2, 1, 3, 1.6, 0.2]) = [-0.5, 1.0, 0.33333334, 0.625, 5.0] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L42 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def reciprocal (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes rectified linear activation. + * + * .. math:: + * max(features, 0) + * + * The storage type of ``relu`` output depends upon the input storage type: + * + * - relu(default) = default + * - relu(row_sparse) = row_sparse + * - relu(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L85 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def relu (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Repeats elements of an array. + * By default, ``repeat`` flattens the input array into 1-D and then repeats the + * elements:: + * x = `[ [ 1, 2], + * [ 3, 4] ] + * repeat(x, repeats=2) = [ 1., 1., 2., 2., 3., 3., 4., 4.] + * The parameter ``axis`` specifies the axis along which to perform repeat:: + * repeat(x, repeats=2, axis=1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * repeat(x, repeats=2, axis=0) = `[ [ 1., 2.], + * [ 1., 2.], + * [ 3., 4.], + * [ 3., 4.] ] + * repeat(x, repeats=2, axis=-1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L744 + * }}} + * + * @param data Input data array + * @param repeats The number of repetitions for each element. + * @param axis The axis along which to repeat values. The negative numbers are interpreted counting from the backward. By default, use the flattened input array, and return a flat output array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def repeat (data : Option[org.apache.mxnet.Symbol] = None, repeats : Int, axis : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Set to zero multiple arrays + * + * + * Defined in src/operator/contrib/reset_arrays.cc:L36 + * }}} + * + * @param data Arrays + * @param num_arrays number of input arrays. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def reset_arrays (data : Array[org.apache.mxnet.Symbol], num_arrays : Int, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @param data Input data to reshape. + * @param shape The target shape + * @param reverse If true then the special values are inferred from right to left + * @param target_shape (Deprecated! Use ``shape`` instead.) Target new shape. One and only one dim can be 0, in which case it will be inferred from the rest of dims + * @param keep_highest (Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged.If set to true, then the first dim in target_shape is ignored,and always fixed as input + * @return org.apache.mxnet.Symbol + */ +@Experimental +def reshape (data : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, reverse : Option[Boolean] = None, target_shape : Option[org.apache.mxnet.Shape] = None, keep_highest : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Reshape some or all dimensions of `lhs` to have the same shape as some or all dimensions of `rhs`. + * + * Returns a **view** of the `lhs` array with a new shape without altering any data. + * + * Example:: + * + * x = [1, 2, 3, 4, 5, 6] + * y = `[ [0, -4], [3, 2], [2, 2] ] + * reshape_like(x, y) = `[ [1, 2], [3, 4], [5, 6] ] + * + * More precise control over how dimensions are inherited is achieved by specifying \ + * slices over the `lhs` and `rhs` array dimensions. Only the sliced `lhs` dimensions \ + * are reshaped to the `rhs` sliced dimensions, with the non-sliced `lhs` dimensions staying the same. + * + * Examples:: + * + * - lhs shape = (30,7), rhs shape = (15,2,4), lhs_begin=0, lhs_end=1, rhs_begin=0, rhs_end=2, output shape = (15,2,7) + * - lhs shape = (3, 5), rhs shape = (1,15,4), lhs_begin=0, lhs_end=2, rhs_begin=1, rhs_end=2, output shape = (15) + * + * Negative indices are supported, and `None` can be used for either `lhs_end` or `rhs_end` to indicate the end of the range. + * + * Example:: + * + * - lhs shape = (30, 12), rhs shape = (4, 2, 2, 3), lhs_begin=-1, lhs_end=None, rhs_begin=1, rhs_end=None, output shape = (30, 2, 2, 3) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L513 + * }}} + * + * @param lhs First input. + * @param rhs Second input. + * @param lhs_begin Defaults to 0. The beginning index along which the lhs dimensions are to be reshaped. Supports negative indices. + * @param lhs_end Defaults to None. The ending index along which the lhs dimensions are to be used for reshaping. Supports negative indices. + * @param rhs_begin Defaults to 0. The beginning index along which the rhs dimensions are to be used for reshaping. Supports negative indices. + * @param rhs_end Defaults to None. The ending index along which the rhs dimensions are to be used for reshaping. Supports negative indices. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def reshape_like (lhs : Option[org.apache.mxnet.Symbol] = None, rhs : Option[org.apache.mxnet.Symbol] = None, lhs_begin : Option[Int] = None, lhs_end : Option[Int] = None, rhs_begin : Option[Int] = None, rhs_end : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @param data Input data array + * @param axis The axis which to reverse elements. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def reverse (data : Option[org.apache.mxnet.Symbol] = None, axis : org.apache.mxnet.Shape, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * .. note:: + * - For input ``n.5`` ``rint`` returns ``n`` while ``round`` returns ``n+1``. + * - For input ``-n.5`` both ``rint`` and ``round`` returns ``-n-1``. + * + * Example:: + * + * rint([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 1., -2., 2., 2.] + * + * The storage type of ``rint`` output depends upon the input storage type: + * + * - rint(default) = default + * - rint(row_sparse) = row_sparse + * - rint(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L799 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def rint (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for `RMSProp` optimizer. + * + * `RMSprop` is a variant of stochastic gradient descent where the gradients are + * divided by a cache which grows with the sum of squares of recent gradients? + * + * `RMSProp` is similar to `AdaGrad`, a popular variant of `SGD` which adaptively + * tunes the learning rate of each parameter. `AdaGrad` lowers the learning rate for + * each parameter monotonically over the course of training. + * While this is analytically motivated for convex optimizations, it may not be ideal + * for non-convex problems. `RMSProp` deals with this heuristically by allowing the + * learning rates to rebound as the denominator decays over time. + * + * Define the Root Mean Square (RMS) error criterion of the gradient as + * :math:`RMS[g]_t = \sqrt{E[g^2]_t + \epsilon}`, where :math:`g` represents + * gradient and :math:`E[g^2]_t` is the decaying average over past squared gradient. + * + * The :math:`E[g^2]_t` is given by: + * + * .. math:: + * E[g^2]_t = \gamma * E[g^2]_{t-1} + (1-\gamma) * g_t^2 + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t - \frac{\eta}{RMS[g]_t} g_t + * + * The RMSProp code follows the version in + * http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf + * Tieleman & Hinton, 2012. + * + * Hinton suggests the momentum term :math:`\gamma` to be 0.9 and the learning rate + * :math:`\eta` to be 0.001. + * + * + * + * Defined in src/operator/optimizer_op.cc:L797 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param n n + * @param lr Learning rate + * @param gamma1 The decay rate of momentum estimates. + * @param epsilon A small constant for numerical stability. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param clip_weights Clip weights to the range of [-clip_weights, clip_weights] If clip_weights <= 0, weight clipping is turned off. weights = max(min(weights, clip_weights), -clip_weights). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def rmsprop_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, n : Option[org.apache.mxnet.Symbol] = None, lr : Float, gamma1 : Option[Float] = None, epsilon : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, clip_weights : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for RMSPropAlex optimizer. + * + * `RMSPropAlex` is non-centered version of `RMSProp`. + * + * Define :math:`E[g^2]_t` is the decaying average over past squared gradient and + * :math:`E[g]_t` is the decaying average over past gradient. + * + * .. math:: + * E[g^2]_t = \gamma_1 * E[g^2]_{t-1} + (1 - \gamma_1) * g_t^2\\ + * E[g]_t = \gamma_1 * E[g]_{t-1} + (1 - \gamma_1) * g_t\\ + * \Delta_t = \gamma_2 * \Delta_{t-1} - \frac{\eta}{\sqrt{E[g^2]_t - E[g]_t^2 + \epsilon}} g_t\\ + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t + \Delta_t + * + * The RMSPropAlex code follows the version in + * http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. + * + * Graves suggests the momentum term :math:`\gamma_1` to be 0.95, :math:`\gamma_2` + * to be 0.9 and the learning rate :math:`\eta` to be 0.0001. + * + * + * Defined in src/operator/optimizer_op.cc:L836 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param n n + * @param g g + * @param delta delta + * @param lr Learning rate + * @param gamma1 Decay rate. + * @param gamma2 Decay rate. + * @param epsilon A small constant for numerical stability. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param clip_weights Clip weights to the range of [-clip_weights, clip_weights] If clip_weights <= 0, weight clipping is turned off. weights = max(min(weights, clip_weights), -clip_weights). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def rmspropalex_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, n : Option[org.apache.mxnet.Symbol] = None, g : Option[org.apache.mxnet.Symbol] = None, delta : Option[org.apache.mxnet.Symbol] = None, lr : Float, gamma1 : Option[Float] = None, gamma2 : Option[Float] = None, epsilon : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, clip_weights : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * Example:: + * + * round([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 2., -2., 2., 2.] + * + * The storage type of ``round`` output depends upon the input storage type: + * + * - round(default) = default + * - round(row_sparse) = row_sparse + * - round(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L778 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def round (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise inverse square-root value of the input. + * + * .. math:: + * rsqrt(x) = 1/\sqrt{x} + * + * Example:: + * + * rsqrt([4,9,16]) = [0.5, 0.33333334, 0.25] + * + * The storage type of ``rsqrt`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L193 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def rsqrt (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Concurrent sampling from multiple + * exponential distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_exponential(lam) = [ 0.51837951, 0.09994757] + * + * // Draw a vector containing two samples for each distribution + * sample_exponential(lam, shape=(2)) = `[ [ 0.51837951, 0.19866663], + * [ 0.09994757, 0.50447971] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L283 + * }}} + * + * @param lam Lambda (rate) parameters of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sample_exponential (lam : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Concurrent sampling from multiple + * gamma distributions with parameters *alpha* (shape) and *beta* (scale). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * alpha = [ 0.0, 2.5 ] + * beta = [ 1.0, 0.7 ] + * + * // Draw a single sample for each distribution + * sample_gamma(alpha, beta) = [ 0. , 2.25797319] + * + * // Draw a vector containing two samples for each distribution + * sample_gamma(alpha, beta, shape=(2)) = `[ [ 0. , 0. ], + * [ 2.25797319, 1.70734084] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L280 + * }}} + * + * @param alpha Alpha (shape) parameters of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param beta Beta (scale) parameters of the distributions. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sample_gamma (alpha : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, beta : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Concurrent sampling from multiple + * generalized negative binomial distributions with parameters *mu* (mean) and *alpha* (dispersion). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * mu = [ 2.0, 2.5 ] + * alpha = [ 1.0, 0.1 ] + * + * // Draw a single sample for each distribution + * sample_generalized_negative_binomial(mu, alpha) = [ 0., 3.] + * + * // Draw a vector containing two samples for each distribution + * sample_generalized_negative_binomial(mu, alpha, shape=(2)) = `[ [ 0., 3.], + * [ 3., 1.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L290 + * }}} + * + * @param mu Means of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param alpha Alpha (dispersion) parameters of the distributions. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sample_generalized_negative_binomial (mu : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, alpha : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Concurrent sampling from multiple multinomial distributions. + * + * *data* is an *n* dimensional array whose last dimension has length *k*, where + * *k* is the number of possible outcomes of each multinomial distribution. This + * operator will draw *shape* samples from each distribution. If shape is empty + * one sample will be drawn from each distribution. + * + * If *get_prob* is true, a second array containing log likelihood of the drawn + * samples will also be returned. This is usually used for reinforcement learning + * where you can provide reward as head gradient for this array to estimate + * gradient. + * + * Note that the input distribution must be normalized, i.e. *data* must sum to + * 1 along its last axis. + * + * Examples:: + * + * probs = `[ [0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0] ] + * + * // Draw a single sample for each distribution + * sample_multinomial(probs) = [3, 0] + * + * // Draw a vector containing two samples for each distribution + * sample_multinomial(probs, shape=(2)) = `[ [4, 2], + * [0, 0] ] + * + * // requests log likelihood + * sample_multinomial(probs, get_prob=True) = [2, 1], [0.2, 0.3] + * }}} + * + * @param data Distribution probabilities. Must sum to one on the last axis. + * @param shape Shape to be sampled from each random distribution. + * @param get_prob Whether to also return the log probability of sampled result. This is usually used for differentiating through stochastic variables, e.g. in reinforcement learning. + * @param dtype DType of the output in case this can't be inferred. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sample_multinomial (data : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, get_prob : Option[Boolean] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Concurrent sampling from multiple + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * k = [ 20, 49 ] + * p = [ 0.4 , 0.77 ] + * + * // Draw a single sample for each distribution + * sample_negative_binomial(k, p) = [ 15., 16.] + * + * // Draw a vector containing two samples for each distribution + * sample_negative_binomial(k, p, shape=(2)) = `[ [ 15., 50.], + * [ 16., 12.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L287 + * }}} + * + * @param k Limits of unsuccessful experiments. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param p Failure probabilities in each experiment. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sample_negative_binomial (k : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, p : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Concurrent sampling from multiple + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * mu = [ 0.0, 2.5 ] + * sigma = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_normal(mu, sigma) = [-0.56410581, 0.95934606] + * + * // Draw a vector containing two samples for each distribution + * sample_normal(mu, sigma, shape=(2)) = `[ [-0.56410581, 0.2928229 ], + * [ 0.95934606, 4.48287058] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L278 + * }}} + * + * @param mu Means of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param sigma Standard deviations of the distributions. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sample_normal (mu : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, sigma : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Concurrent sampling from multiple + * Poisson distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_poisson(lam) = [ 0., 13.] + * + * // Draw a vector containing two samples for each distribution + * sample_poisson(lam, shape=(2)) = `[ [ 0., 4.], + * [ 13., 8.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L285 + * }}} + * + * @param lam Lambda (rate) parameters of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sample_poisson (lam : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Concurrent sampling from multiple + * uniform distributions on the intervals given by *[low,high)*. + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * low = [ 0.0, 2.5 ] + * high = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_uniform(low, high) = [ 0.40451524, 3.18687344] + * + * // Draw a vector containing two samples for each distribution + * sample_uniform(low, high, shape=(2)) = `[ [ 0.40451524, 0.18017688], + * [ 3.18687344, 3.68352246] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L276 + * }}} + * + * @param low Lower bounds of the distributions. + * @param shape Shape to be sampled from each random distribution. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @param high Upper bounds of the distributions. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sample_uniform (low : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, dtype : Option[String] = None, high : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Scatters data into a new tensor according to indices. + * + * Given `data` with shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(X_0, X_1, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, data shape should simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] = data[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] + * + * all other entries in output are 0. + * + * .. warning:: + * + * If the indices have duplicates, the result will be non-deterministic and + * the gradient of `scatter_nd` will not be correct!! + * + * + * Examples:: + * + * data = [2, 3, 0] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * shape = (2, 2) + * scatter_nd(data, indices, shape) = `[ [0, 0], [2, 3] ] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 1] ] + * shape = (2, 2, 2, 2) + * scatter_nd(data, indices, shape) = `[ [`[ [0, 0], + * [0, 0] ], + * + * `[ [1, 2], + * [3, 4] ] ], + * + * `[ `[ [0, 0], + * [0, 0] ], + * + * `[ [5, 6], + * [7, 8] ] ] ] + * }}} + * + * @param data data + * @param indices indices + * @param shape Shape of output. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def scatter_nd (data : Option[org.apache.mxnet.Symbol] = None, indices : Option[org.apache.mxnet.Symbol] = None, shape : org.apache.mxnet.Shape, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and weight's storage + * type is the same as momentum's storage type, + * only the row slices whose indices appear in grad.indices are updated (for both weight and momentum):: + * + * for row in gradient.indices: + * v[row] = momentum[row] * v[row] - learning_rate * gradient[row] + * weight[row] += v[row] + * + * + * + * Defined in src/operator/optimizer_op.cc:L565 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse and both weight and momentum have the same stype + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sgd_mom_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, mom : Option[org.apache.mxnet.Symbol] = None, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SGD) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * However, if gradient is of ``row_sparse`` storage type and ``lazy_update`` is True, + * only the row slices whose indices appear in grad.indices are updated:: + * + * for row in gradient.indices: + * weight[row] = weight[row] - learning_rate * (gradient[row] + wd * weight[row]) + * + * + * + * Defined in src/operator/optimizer_op.cc:L524 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param lr Learning rate + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sgd_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, lr : Float, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, lazy_update : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns a 1D int64 array containing the shape of data. + * + * Example:: + * + * shape_array(`[ [1,2,3,4], [5,6,7,8] ]) = [2,4] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L574 + * }}} + * + * @param data Input Array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def shape_array (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Randomly shuffle the elements. + * + * This shuffles the array along the first axis. + * The order of the elements in each subarray does not change. + * For example, if a 2D array is given, the order of the rows randomly changes, + * but the order of the elements in each row does not change. + * }}} + * + * @param data Data to be shuffled. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def shuffle (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes sigmoid of x element-wise. + * + * .. math:: + * y = 1 / (1 + exp(-x)) + * + * The storage type of ``sigmoid`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L119 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sigmoid (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise sign of the input. + * + * Example:: + * + * sign([-2, 0, 3]) = [-1, 0, 1] + * + * The storage type of ``sign`` output depends upon the input storage type: + * + * - sign(default) = default + * - sign(row_sparse) = row_sparse + * - sign(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L759 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sign (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Update function for SignSGD optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * W_t = W_{t-1} - \eta_t \text{sign}(g_t) + * + * It updates the weights using:: + * + * weight = weight - learning_rate * sign(gradient) + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L63 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param lr Learning rate + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def signsgd_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, lr : Float, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * SIGN momentUM (Signum) optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta m_{t-1} + (1 - \beta) g_t\\ + * W_t = W_{t-1} - \eta_t \text{sign}(m_t) + * + * It updates the weights using:: + * state = momentum * state + (1-momentum) * gradient + * weight = weight - learning_rate * sign(state) + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L92 + * }}} + * + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param lr Learning rate + * @param momentum The decay rate of momentum estimates at each epoch. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + * @param wd_lh The amount of weight decay that does not go into gradient/momentum calculationsotherwise do weight decay algorithmically only. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def signum_update (weight : Option[org.apache.mxnet.Symbol] = None, grad : Option[org.apache.mxnet.Symbol] = None, mom : Option[org.apache.mxnet.Symbol] = None, lr : Float, momentum : Option[Float] = None, wd : Option[Float] = None, rescale_grad : Option[Float] = None, clip_gradient : Option[Float] = None, wd_lh : Option[Float] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the element-wise sine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * sin([0, \pi/4, \pi/2]) = [0, 0.707, 1] + * + * The storage type of ``sin`` output depends upon the input storage type: + * + * - sin(default) = default + * - sin(row_sparse) = row_sparse + * - sin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L47 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sin (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the hyperbolic sine of the input array, computed element-wise. + * + * .. math:: + * sinh(x) = 0.5\times(exp(x) - exp(-x)) + * + * The storage type of ``sinh`` output depends upon the input storage type: + * + * - sinh(default) = default + * - sinh(row_sparse) = row_sparse + * - sinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L313 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sinh (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns a 1D int64 array containing the size of data. + * + * Example:: + * + * size_array(`[ [1,2,3,4], [5,6,7,8] ]) = [8] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L625 + * }}} + * + * @param data Input Array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def size_array (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @param data Source input + * @param begin starting indices for the slice operation, supports negative indices. + * @param end ending indices for the slice operation, supports negative indices. + * @param step step for the slice operation, supports negative values. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def slice (data : Option[org.apache.mxnet.Symbol] = None, begin : org.apache.mxnet.Shape, end : org.apache.mxnet.Shape, step : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Slices along a given axis. + * Returns an array slice along a given `axis` starting from the `begin` index + * to the `end` index. + * Examples:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=0, begin=1, end=3) = `[ [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=1, begin=0, end=2) = `[ [ 1., 2.], + * [ 5., 6.], + * [ 9., 10.] ] + * slice_axis(x, axis=1, begin=-3, end=-1) = `[ [ 2., 3.], + * [ 6., 7.], + * [ 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L571 + * }}} + * + * @param data Source input + * @param axis Axis along which to be sliced, supports negative indexes. + * @param begin The beginning index along the axis to be sliced, supports negative indexes. + * @param end The ending index along the axis to be sliced, supports negative indexes. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def slice_axis (data : Option[org.apache.mxnet.Symbol] = None, axis : Int, begin : Int, end : Int, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Slices a region of the array like the shape of another array. + * This function is similar to ``slice``, however, the `begin` are always `0`s + * and `end` of specific axes are inferred from the second input `shape_like`. + * Given the second `shape_like` input of ``shape=(d_0, d_1, ..., d_n-1)``, + * a ``slice_like`` operator with default empty `axes`, it performs the + * following operation: + * `` out = slice(input, begin=(0, 0, ..., 0), end=(d_0, d_1, ..., d_n-1))``. + * When `axes` is not empty, it is used to speficy which axes are being sliced. + * Given a 4-d input data, ``slice_like`` operator with ``axes=(0, 2, -1)`` + * will perform the following operation: + * `` out = slice(input, begin=(0, 0, 0, 0), end=(d_0, None, d_2, d_3))``. + * Note that it is allowed to have first and second input with different dimensions, + * however, you have to make sure the `axes` are specified and not exceeding the + * dimension limits. + * For example, given `input_1` with ``shape=(2,3,4,5)`` and `input_2` with + * ``shape=(1,2,3)``, it is not allowed to use: + * `` out = slice_like(a, b)`` because ndim of `input_1` is 4, and ndim of `input_2` + * is 3. + * The following is allowed in this situation: + * `` out = slice_like(a, b, axes=(0, 2))`` + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * y = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * slice_like(x, y) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0, 1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0)) = `[ [ 1., 2., 3., 4.] + * [ 5., 6., 7., 8.] ] + * slice_like(x, y, axes=(-1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] + * [ 9., 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L625 + * }}} + * + * @param data Source input + * @param shape_like Shape like input + * @param axes List of axes on which input data will be sliced according to the corresponding size of the second input. By default will slice on all axes. Negative axes are supported. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def slice_like (data : Option[org.apache.mxnet.Symbol] = None, shape_like : Option[org.apache.mxnet.Symbol] = None, axes : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Calculate Smooth L1 Loss(lhs, scalar) by summing + * + * .. math:: + * + * f(x) = + * \begin{cases} + * (\sigma x)^2/2,& \text{if }x < 1/\sigma^2\\ + * |x|-0.5/\sigma^2,& \text{otherwise} + * \end{cases} + * + * where :math:`x` is an element of the tensor *lhs* and :math:`\sigma` is the scalar. + * + * Example:: + * + * smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5] + * smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_scalar_op_extended.cc:L108 + * }}} + * + * @param data source input + * @param scalar scalar input + * @return org.apache.mxnet.Symbol + */ +@Experimental +def smooth_l1 (data : Option[org.apache.mxnet.Symbol] = None, scalar : Float, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies the softmax function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum up to 1. + * + * .. math:: + * softmax(\mathbf{z/t})_j = \frac{e^{z_j/t}}{\sum_{k=1}^K e^{z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 1. 1.] + * [ 1. 1. 1.] ] + * + * softmax(x,axis=0) = `[ [ 0.5 0.5 0.5] + * [ 0.5 0.5 0.5] ] + * + * softmax(x,axis=1) = `[ [ 0.33333334, 0.33333334, 0.33333334], + * [ 0.33333334, 0.33333334, 0.33333334] ] + * + * + * + * Defined in src/operator/nn/softmax.cc:L103 + * }}} + * + * @param data The input array. + * @param length The length array. + * @param axis The axis along which to compute softmax. + * @param temperature Temperature parameter in softmax + * @param dtype DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). + * @param use_length Whether to use the length input as a mask over the data input. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def softmax (data : Option[org.apache.mxnet.Symbol] = None, length : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, temperature : Option[Double] = None, dtype : Option[String] = None, use_length : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Calculate cross entropy of softmax output and one-hot label. + * + * - This operator computes the cross entropy in two steps: + * - Applies softmax function on the input array. + * - Computes and returns the cross entropy loss between the softmax output and the labels. + * + * - The softmax function and cross entropy loss is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * Example:: + * + * x = `[ [1, 2, 3], + * [11, 7, 5] ] + * + * label = [2, 0] + * + * softmax(x) = `[ [0.09003057, 0.24472848, 0.66524094], + * [0.97962922, 0.01794253, 0.00242826] ] + * + * softmax_cross_entropy(data, label) = - log(0.66524084) - log(0.97962922) = 0.4281871 + * + * + * + * Defined in src/operator/loss_binary_op.cc:L59 + * }}} + * + * @param data Input data + * @param label Input label + * @return org.apache.mxnet.Symbol + */ +@Experimental +def softmax_cross_entropy (data : Option[org.apache.mxnet.Symbol] = None, label : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Applies the softmin function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum + * up to 1. + * + * .. math:: + * softmin(\mathbf{z/t})_j = \frac{e^{-z_j/t}}{\sum_{k=1}^K e^{-z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 2. 3.] + * [ 3. 2. 1.] ] + * + * softmin(x,axis=0) = `[ [ 0.88079703, 0.5, 0.11920292], + * [ 0.11920292, 0.5, 0.88079703] ] + * + * softmin(x,axis=1) = `[ [ 0.66524094, 0.24472848, 0.09003057], + * [ 0.09003057, 0.24472848, 0.66524094] ] + * + * + * + * Defined in src/operator/nn/softmin.cc:L57 + * }}} + * + * @param data The input array. + * @param axis The axis along which to compute softmax. + * @param temperature Temperature parameter in softmax + * @param dtype DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). + * @param use_length Whether to use the length input as a mask over the data input. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def softmin (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, temperature : Option[Double] = None, dtype : Option[String] = None, use_length : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes softsign of x element-wise. + * + * .. math:: + * y = x / (1 + abs(x)) + * + * The storage type of ``softsign`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L191 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def softsign (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns a sorted copy of an input array along the given axis. + * + * Examples:: + * + * x = `[ [ 1, 4], + * [ 3, 1] ] + * + * // sorts along the last axis + * sort(x) = `[ [ 1., 4.], + * [ 1., 3.] ] + * + * // flattens and then sorts + * sort(x, axis=None) = [ 1., 1., 3., 4.] + * + * // sorts along the first axis + * sort(x, axis=0) = `[ [ 1., 1.], + * [ 3., 4.] ] + * + * // in a descend order + * sort(x, is_ascend=0) = `[ [ 4., 1.], + * [ 3., 1.] ] + * + * + * + * Defined in src/operator/tensor/ordering_op.cc:L132 + * }}} + * + * @param data The input array + * @param axis Axis along which to choose sort the input tensor. If not given, the flattened array is used. Default is -1. + * @param is_ascend Whether to sort in ascending or descending order. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sort (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, is_ascend : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Rearranges(permutes) blocks of spatial data into depth. + * Similar to ONNX SpaceToDepth operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#SpaceToDepth + * The output is a new tensor where the values from height and width dimension are + * moved to the depth dimension. The reverse of this operation is ``depth_to_space``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, C, H / block\_size, block\_size, W / block\_size, block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 5, 1, 2, 4]) \\ + * y = reshape(x \prime \prime, [N, C * (block\_size ^ 2), H / block\_size, W / block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C * (block\_size ^ 2), H / block\_size, W / block\_size]` + * Example:: + * x = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * space_to_depth(x, 2) = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L1019 + * }}} + * + * @param data Input ndarray + * @param block_size Blocks of [block_size. block_size] are moved + * @return org.apache.mxnet.Symbol + */ +@Experimental +def space_to_depth (data : Option[org.apache.mxnet.Symbol] = None, block_size : Int, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @param data The input + * @param num_outputs Number of splits. Note that this should evenly divide the length of the `axis`. + * @param axis Axis along which to split. + * @param squeeze_axis If true, Removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to ``true`` only if ``input.shape[axis] == num_outputs``. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def split (data : Option[org.apache.mxnet.Symbol] = None, num_outputs : Int, axis : Option[Int] = None, squeeze_axis : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise square-root value of the input. + * + * .. math:: + * \textrm{sqrt}(x) = \sqrt{x} + * + * Example:: + * + * sqrt([4, 9, 16]) = [2, 3, 4] + * + * The storage type of ``sqrt`` output depends upon the input storage type: + * + * - sqrt(default) = default + * - sqrt(row_sparse) = row_sparse + * - sqrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L142 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sqrt (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns element-wise squared value of the input. + * + * .. math:: + * square(x) = x^2 + * + * Example:: + * + * square([2, 3, 4]) = [4, 9, 16] + * + * The storage type of ``square`` output depends upon the input storage type: + * + * - square(default) = default + * - square(row_sparse) = row_sparse + * - square(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L118 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def square (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Remove single-dimensional entries from the shape of an array. + * Same behavior of defining the output tensor shape as numpy.squeeze for the most of cases. + * See the following note for exception. + * Examples:: + * data = `[ `[ [0], [1], [2] ] ] + * squeeze(data) = [0, 1, 2] + * squeeze(data, axis=0) = `[ [0], [1], [2] ] + * squeeze(data, axis=2) = `[ [0, 1, 2] ] + * squeeze(data, axis=(0, 2)) = [0, 1, 2] + * .. Note:: + * The output of this operator will keep at least one dimension not removed. For example, + * squeeze(`[ `[ [4] ] ]) = [4], while in numpy.squeeze, the output will become a scalar. + * }}} + * + * @param data data to squeeze + * @param axis Selects a subset of the single-dimensional entries in the shape. If an axis is selected with shape entry greater than one, an error is raised. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def squeeze (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Join a sequence of arrays along a new axis. + * The axis parameter specifies the index of the new axis in the dimensions of the + * result. For example, if axis=0 it will be the first dimension and if axis=-1 it + * will be the last dimension. + * Examples:: + * x = [1, 2] + * y = [3, 4] + * stack(x, y) = `[ [1, 2], + * [3, 4] ] + * stack(x, y, axis=1) = `[ [1, 3], + * [2, 4] ] + * }}} + * + * @param data List of arrays to stack + * @param axis The axis in the result array along which the input arrays are stacked. + * @param num_args Number of inputs to be stacked. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def stack (data : Array[org.apache.mxnet.Symbol], axis : Option[Int] = None, num_args : Int, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def stop_gradient (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sum (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @param data The input + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def sum_axis (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[org.apache.mxnet.Shape] = None, keepdims : Option[Boolean] = None, exclude : Option[Boolean] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @param data Input array. + * @param dim1 the first axis to be swapped. + * @param dim2 the second axis to be swapped. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def swapaxes (data : Option[org.apache.mxnet.Symbol] = None, dim1 : Option[Int] = None, dim2 : Option[Int] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Takes elements from an input array along the given axis. + * + * This function slices the input array along a particular axis with the provided indices. + * + * Given data tensor of rank r >= 1, and indices tensor of rank q, gather entries of the axis + * dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates them + * in an output tensor of rank q + (r - 1). + * + * Examples:: + * + * x = [4. 5. 6.] + * + * // Trivial case, take the second element along the first axis. + * + * take(x, [1]) = [ 5. ] + * + * // The other trivial case, axis=-1, take the third element along the first axis + * + * take(x, [3], axis=-1, mode='clip') = [ 6. ] + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2. Along axis 0 + * + * take(x, `[ [0,1],[1,2] ]) = `[ `[ [ 1., 2.], + * [ 3., 4.] ], + * + * `[ [ 3., 4.], + * [ 5., 6.] ] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2 (calculated by wrapping around). + * // Along axis 1 + * + * take(x, `[ [0, 3], [-1, -2] ], axis=1, mode='wrap') = `[ `[ [ 1. 2.] + * [ 2. 1.] ] + * + * `[ [ 3. 4.] + * [ 4. 3.] ] + * + * `[ [ 5. 6.] + * [ 6. 5.] ] ] + * + * The storage type of ``take`` output depends upon the input storage type: + * + * - take(default, default) = default + * - take(csr, default, axis=0) = csr + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L718 + * }}} + * + * @param a The input array. + * @param indices The indices of the values to be extracted. + * @param axis The axis of input array to be taken.For input tensor of rank r, it could be in the range of [-r, r-1] + * @param mode Specify how out-of-bound indices bahave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. "raise" means to raise an error when index out of range. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def take (a : Option[org.apache.mxnet.Symbol] = None, indices : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, mode : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Computes the element-wise tangent of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * tan([0, \pi/4, \pi/2]) = [0, 1, -inf] + * + * The storage type of ``tan`` output depends upon the input storage type: + * + * - tan(default) = default + * - tan(row_sparse) = row_sparse + * - tan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L140 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def tan (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the hyperbolic tangent of the input array, computed element-wise. + * + * .. math:: + * tanh(x) = sinh(x) / cosh(x) + * + * The storage type of ``tanh`` output depends upon the input storage type: + * + * - tanh(default) = default + * - tanh(row_sparse) = row_sparse + * - tanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L393 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def tanh (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Repeats the whole array multiple times. + * If ``reps`` has length *d*, and input array has dimension of *n*. There are + * three cases: + * - **n=d**. Repeat *i*-th dimension of the input by ``reps[i]`` times:: + * x = `[ [1, 2], + * [3, 4] ] + * tile(x, reps=(2,3)) = `[ [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.], + * [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.] ] + * - **n>d**. ``reps`` is promoted to length *n* by pre-pending 1's to it. Thus for + * an input shape ``(2,3)``, ``repos=(2,)`` is treated as ``(1,2)``:: + * tile(x, reps=(2,)) = `[ [ 1., 2., 1., 2.], + * [ 3., 4., 3., 4.] ] + * - **n d, reps is promoted to a.ndim by pre-pending 1's to it. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def tile (data : Option[org.apache.mxnet.Symbol] = None, reps : org.apache.mxnet.Shape, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Returns the indices of the top *k* elements in an input array along the given + * axis (by default). + * If ret_type is set to 'value' returns the value of top *k* elements (instead of indices). + * In case of ret_type = 'both', both value and index would be returned. + * The returned elements will be sorted. + * + * Examples:: + * + * x = `[ [ 0.3, 0.2, 0.4], + * [ 0.1, 0.3, 0.2] ] + * + * // returns an index of the largest element on last axis + * topk(x) = `[ [ 2.], + * [ 1.] ] + * + * // returns the value of top-2 largest elements on last axis + * topk(x, ret_typ='value', k=2) = `[ [ 0.4, 0.3], + * [ 0.3, 0.2] ] + * + * // returns the value of top-2 smallest elements on last axis + * topk(x, ret_typ='value', k=2, is_ascend=1) = `[ [ 0.2 , 0.3], + * [ 0.1 , 0.2] ] + * + * // returns the value of top-2 largest elements on axis 0 + * topk(x, axis=0, ret_typ='value', k=2) = `[ [ 0.3, 0.3, 0.4], + * [ 0.1, 0.2, 0.2] ] + * + * // flattens and then returns list of both values and indices + * topk(x, ret_typ='both', k=2) = `[ `[ [ 0.4, 0.3], [ 0.3, 0.2] ] , `[ [ 2., 0.], [ 1., 2.] ] ] + * + * + * + * Defined in src/operator/tensor/ordering_op.cc:L68 + * }}} + * + * @param data The input array + * @param axis Axis along which to choose the top k indices. If not given, the flattened array is used. Default is -1. + * @param k Number of top elements to select, should be always smaller than or equal to the element number in the given axis. A global sort is performed if set k < 1. + * @param ret_typ The return type. + "value" means to return the top k values, "indices" means to return the indices of the top k values, "mask" means to return a mask array containing 0 and 1. 1 means the top k values. "both" means to return a list of both values and indices of top k elements. + * @param is_ascend Whether to choose k largest or k smallest elements. Top K largest elements will be chosen if set to false. + * @param dtype DType of the output indices when ret_typ is "indices" or "both". An error will be raised if the selected data type cannot precisely represent the indices. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def topk (data : Option[org.apache.mxnet.Symbol] = None, axis : Option[Int] = None, k : Option[Int] = None, ret_typ : Option[String] = None, is_ascend : Option[Boolean] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Permutes the dimensions of an array. + * Examples:: + * x = `[ [ 1, 2], + * [ 3, 4] ] + * transpose(x) = `[ [ 1., 3.], + * [ 2., 4.] ] + * x = `[ `[ [ 1., 2.], + * [ 3., 4.] ], + * `[ [ 5., 6.], + * [ 7., 8.] ] ] + * transpose(x) = `[ `[ [ 1., 5.], + * [ 3., 7.] ], + * `[ [ 2., 6.], + * [ 4., 8.] ] ] + * transpose(x, axes=(1,0,2)) = `[ `[ [ 1., 2.], + * [ 5., 6.] ], + * `[ [ 3., 4.], + * [ 7., 8.] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L328 + * }}} + * + * @param data Source input + * @param axes Target axis order. By default the axes will be inverted. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def transpose (data : Option[org.apache.mxnet.Symbol] = None, axes : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Return the element-wise truncated value of the input. + * + * The truncated value of the scalar x is the nearest integer i which is closer to + * zero than x is. In short, the fractional part of the signed number x is discarded. + * + * Example:: + * + * trunc([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 1., 1., 2.] + * + * The storage type of ``trunc`` output depends upon the input storage type: + * + * - trunc(default) = default + * - trunc(row_sparse) = row_sparse + * - trunc(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L857 + * }}} + * + * @param data The input array. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def trunc (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Draw random samples from a uniform distribution. + * + * .. note:: The existing alias ``uniform`` is deprecated. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * uniform(low=0, high=1, shape=(2,2)) = `[ [ 0.60276335, 0.85794562], + * [ 0.54488319, 0.84725171] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L96 + * }}} + * + * @param low Lower bound of the distribution. + * @param high Upper bound of the distribution. + * @param shape Shape of the output. + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + * @return org.apache.mxnet.Symbol + */ +@Experimental +def uniform (low : Option[Float] = None, high : Option[Float] = None, shape : Option[org.apache.mxnet.Shape] = None, ctx : Option[String] = None, dtype : Option[String] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Converts an array of flat indices into a batch of index arrays. The operator follows numpy conventions so a single multi index is given by a column of the output matrix. The leading dimension may be left unspecified by using -1 as placeholder. + * + * Examples:: + * + * A = [22,41,37] + * unravel(A, shape=(7,6)) = `[ [3,6,6],[4,5,1] ] + * unravel(A, shape=(-1,6)) = `[ [3,6,6],[4,5,1] ] + * + * + * + * Defined in src/operator/tensor/ravel.cc:L68 + * }}} + * + * @param data Array of flat indices + * @param shape Shape of the array into which the multi-indices apply. + * @return org.apache.mxnet.Symbol + */ +@Experimental +def unravel_index (data : Option[org.apache.mxnet.Symbol] = None, shape : Option[org.apache.mxnet.Shape] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Return the elements, either from x or y, depending on the condition. + * + * Given three ndarrays, condition, x, and y, return an ndarray with the elements from x or y, + * depending on the elements from condition are true or false. x and y must have the same shape. + * If condition has the same shape as x, each element in the output array is from x if the + * corresponding element in the condition is true, and from y if false. + * + * If condition does not have the same shape as x, it must be a 1D array whose size is + * the same as x's first dimension size. Each row of the output array is from x's row + * if the corresponding element from condition is true, and from y's row if false. + * + * Note that all non-zero values are interpreted as ``True`` in condition. + * + * Examples:: + * + * x = `[ [1, 2], [3, 4] ] + * y = `[ [5, 6], [7, 8] ] + * cond = `[ [0, 1], [-1, 0] ] + * + * where(cond, x, y) = `[ [5, 2], [3, 8] ] + * + * csr_cond = cast_storage(cond, 'csr') + * + * where(csr_cond, x, y) = `[ [5, 2], [3, 8] ] + * + * + * + * Defined in src/operator/tensor/control_flow_op.cc:L57 + * }}} + * + * @param condition condition array + * @param x + * @param y + * @return org.apache.mxnet.Symbol + */ +@Experimental +def where (condition : Option[org.apache.mxnet.Symbol] = None, x : Option[org.apache.mxnet.Symbol] = None, y : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol + /** + * + * {{{ + * + * Return an array of zeros with the same shape, type and storage type + * as the input array. + * + * The storage type of ``zeros_like`` output depends on the storage type of the input + * + * - zeros_like(row_sparse) = row_sparse + * - zeros_like(csr) = csr + * - zeros_like(default) = default + * + * Examples:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * zeros_like(x) = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * }}} + * + * @param data The input + * @return org.apache.mxnet.Symbol + */ +@Experimental +def zeros_like (data : Option[org.apache.mxnet.Symbol] = None, name : String = null, attr : Map[String, String] = null): org.apache.mxnet.Symbol +} + diff --git a/scala-package/core/src/main/scala/org/apache/mxnet/SymbolBase.scala b/scala-package/core/src/main/scala/org/apache/mxnet/SymbolBase.scala new file mode 100644 index 000000000..ebb4a9e8c --- /dev/null +++ b/scala-package/core/src/main/scala/org/apache/mxnet/SymbolBase.scala @@ -0,0 +1,9342 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.mxnet + +import org.apache.mxnet.annotation.Experimental + +// scalastyle:off +abstract class SymbolBase { + /** + * + * {{{ + * + * Applies an activation function element-wise to the input. + * + * The following activation functions are supported: + * + * - `relu`: Rectified Linear Unit, :math:`y = max(x, 0)` + * - `sigmoid`: :math:`y = \frac{1}{1 + exp(-x)}` + * - `tanh`: Hyperbolic tangent, :math:`y = \frac{exp(x) - exp(-x)}{exp(x) + exp(-x)}` + * - `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))` + * - `softsign`: :math:`y = \frac{x}{1 + abs(x)}` + * + * + * + * Defined in src/operator/nn/activation.cc:L168 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Activation(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Batch normalization. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these + * two outputs are blocked. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel + * axis to be the last item in the input shape. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * .. Note:: + * When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False, + * the sparse tensors will fallback. + * + * + * + * Defined in src/operator/nn/batch_norm.cc:L571 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def BatchNorm(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Batch normalization. + * + * This operator is DEPRECATED. Perform BatchNorm on the input. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_var`` as well, which are needed for the backward pass. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * There's no sparse support for this operator, and it will exhibit problematic behavior if used with + * sparse tensors. + * + * + * + * Defined in src/operator/batch_norm_v1.cc:L95 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def BatchNorm_v1(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies bilinear sampling to input feature map. + * + * Bilinear Sampling is the key of [NIPS2015] \"Spatial Transformer Networks\". The usage of the operator is very similar to remap function in OpenCV, + * except that the operator has the backward pass. + * + * Given :math:`data` and :math:`grid`, then the output is computed by + * + * .. math:: + * x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\ + * y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\ + * output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}) + * + * :math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and :math:`G()` denotes the bilinear interpolation kernel. + * The out-boundary points will be padded with zeros.The shape of the output will be (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]). + * + * The operator assumes that :math:`data` has 'NCHW' layout and :math:`grid` has been normalized to [-1, 1]. + * + * BilinearSampler often cooperates with GridGenerator which generates sampling grids for BilinearSampler. + * GridGenerator supports two kinds of transformation: ``affine`` and ``warp``. + * If users want to design a CustomOp to manipulate :math:`grid`, please firstly refer to the code of GridGenerator. + * + * Example 1:: + * + * ## Zoom out data two times + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * affine_matrix = array(`[ [2, 0, 0], + * [0, 2, 0] ]) + * + * affine_matrix = reshape(affine_matrix, shape=(1, 6)) + * + * grid = GridGenerator(data=affine_matrix, transform_type='affine', target_shape=(4, 4)) + * + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 0, 0, 0, 0], + * [ 0, 3.5, 6.5, 0], + * [ 0, 1.25, 2.5, 0], + * [ 0, 0, 0, 0] ] ] + * + * + * Example 2:: + * + * ## shift data horizontally by -1 pixel + * + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * warp_maxtrix = array(`[ [`[ [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1] ], + * `[ [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0] ] ] ]) + * + * grid = GridGenerator(data=warp_matrix, transform_type='warp') + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 4, 3, 6, 0], + * [ 8, 8, 9, 0], + * [ 4, 1, 5, 0], + * [ 0, 1, 3, 0] ] ] + * + * + * Defined in src/operator/bilinear_sampler.cc:L256 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def BilinearSampler(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def BlockGrad(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def CTCLoss(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Cast(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Concat(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Compute *N*-D convolution on *(N+2)*-D input. + * + * In the 2-D convolution, given input data with shape *(batch_size, + * channel, height, width)*, the output is computed by + * + * .. math:: + * + * out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star + * weight[i,j,:,:] + * + * where :math:`\star` is the 2-D cross-correlation operator. + * + * For general 2-D convolution, the shapes are + * + * - **data**: *(batch_size, channel, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*. + * + * Define:: + * + * f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1 + * + * then we have:: + * + * out_height=f(height, kernel[0], pad[0], stride[0], dilate[0]) + * out_width=f(width, kernel[1], pad[1], stride[1], dilate[1]) + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height, + * width)*. We can choose other layouts such as *NWC*. + * + * If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data`` + * evenly into *g* parts along the channel axis, and also evenly split ``weight`` + * along the first dimension. Next compute the convolution on the *i*-th part of + * the data with the *i*-th weight part. The output is obtained by concatenating all + * the *g* results. + * + * 1-D convolution does not have *height* dimension but only *width* in space. + * + * - **data**: *(batch_size, channel, width)* + * - **weight**: *(num_filter, channel, kernel[0])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_width)*. + * + * 3-D convolution adds an additional *depth* dimension besides *height* and + * *width*. The shapes are + * + * - **data**: *(batch_size, channel, depth, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*. + * + * Both ``weight`` and ``bias`` are learnable parameters. + * + * There are other options to tune the performance. + * + * - **cudnn_tune**: enable this option leads to higher startup time but may give + * faster speed. Options are + * + * - **off**: no tuning + * - **limited_workspace**:run test and pick the fastest algorithm that doesn't + * exceed workspace limit. + * - **fastest**: pick the fastest algorithm and ignore workspace limit. + * - **None** (default): the behavior is determined by environment variable + * ``MXNET_CUDNN_AUTOTUNE_DEFAULT``. 0 for off, 1 for limited workspace + * (default), 2 for fastest. + * + * - **workspace**: A large number leads to more (GPU) memory usage but may improve + * the performance. + * + * + * + * Defined in src/operator/nn/convolution.cc:L473 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Convolution(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * This operator is DEPRECATED. Apply convolution to input then add a bias. + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Convolution_v1(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies correlation to inputs. + * + * The correlation layer performs multiplicative patch comparisons between two feature maps. + * + * Given two multi-channel feature maps :math:`f_{1}, f_{2}`, with :math:`w`, :math:`h`, and :math:`c` being their width, height, and number of channels, + * the correlation layer lets the network compare each patch from :math:`f_{1}` with each patch from :math:`f_{2}`. + * + * For now we consider only a single comparison of two patches. The 'correlation' of two patches centered at :math:`x_{1}` in the first map and + * :math:`x_{2}` in the second map is then defined as: + * + * .. math:: + * + * c(x_{1}, x_{2}) = \sum_{o \in [-k,k] \times [-k,k]} + * + * for a square patch of size :math:`K:=2k+1`. + * + * Note that the equation above is identical to one step of a convolution in neural networks, but instead of convolving data with a filter, it convolves data with other + * data. For this reason, it has no training weights. + * + * Computing :math:`c(x_{1}, x_{2})` involves :math:`c * K^{2}` multiplications. Comparing all patch combinations involves :math:`w^{2}*h^{2}` such computations. + * + * Given a maximum displacement :math:`d`, for each location :math:`x_{1}` it computes correlations :math:`c(x_{1}, x_{2})` only in a neighborhood of size :math:`D:=2d+1`, + * by limiting the range of :math:`x_{2}`. We use strides :math:`s_{1}, s_{2}`, to quantize :math:`x_{1}` globally and to quantize :math:`x_{2}` within the neighborhood + * centered around :math:`x_{1}`. + * + * The final output is defined by the following expression: + * + * .. math:: + * out[n, q, i, j] = c(x_{i, j}, x_{q}) + * + * where :math:`i` and :math:`j` enumerate spatial locations in :math:`f_{1}`, and :math:`q` denotes the :math:`q^{th}` neighborhood of :math:`x_{i,j}`. + * + * + * Defined in src/operator/correlation.cc:L198 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Correlation(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * + * + * .. note:: `Crop` is deprecated. Use `slice` instead. + * + * Crop the 2nd and 3rd dim of input data, with the corresponding size of h_w or + * with width and height of the second input symbol, i.e., with one input, we need h_w to + * specify the crop height and width, otherwise the second input symbol's size will be used + * + * + * Defined in src/operator/crop.cc:L50 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Crop(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Apply a custom operator implemented in a frontend language (like Python). + * + * Custom operators should override required methods like `forward` and `backward`. + * The custom operator must be registered before it can be used. + * Please check the tutorial here: https://mxnet.incubator.apache.org/api/faq/new_op + * + * + * + * Defined in src/operator/custom/custom.cc:L546 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Custom(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes 1D or 2D transposed convolution (aka fractionally strided convolution) of the input tensor. This operation can be seen as the gradient of Convolution operation with respect to its input. Convolution usually reduces the size of the input. Transposed convolution works the other way, going from a smaller input to a larger output while preserving the connectivity pattern. + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Deconvolution(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies dropout operation to input array. + * + * - During training, each element of the input is set to zero with probability p. + * The whole array is rescaled by :math:`1/(1-p)` to keep the expected + * sum of the input unchanged. + * + * - During testing, this operator does not change the input if mode is 'training'. + * If mode is 'always', the same computaion as during training will be applied. + * + * Example:: + * + * random.seed(998) + * input_array = array(`[ [3., 0.5, -0.5, 2., 7.], + * [2., -0.4, 7., 3., 0.2] ]) + * a = symbol.Variable('a') + * dropout = symbol.Dropout(a, p = 0.2) + * executor = dropout.simple_bind(a = input_array.shape) + * + * ## If training + * executor.forward(is_train = True, a = input_array) + * executor.outputs + * `[ [ 3.75 0.625 -0. 2.5 8.75 ] + * [ 2.5 -0.5 8.75 3.75 0. ] ] + * + * ## If testing + * executor.forward(is_train = False, a = input_array) + * executor.outputs + * `[ [ 3. 0.5 -0.5 2. 7. ] + * [ 2. -0.4 7. 3. 0.2 ] ] + * + * + * Defined in src/operator/nn/dropout.cc:L96 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Dropout(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def ElementWiseSum(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Maps integer indices to vector representations (embeddings). + * + * This operator maps words to real-valued vectors in a high-dimensional space, + * called word embeddings. These embeddings can capture semantic and syntactic properties of the words. + * For example, it has been noted that in the learned embedding spaces, similar words tend + * to be close to each other and dissimilar words far apart. + * + * For an input array of shape (d1, ..., dK), + * the shape of an output array is (d1, ..., dK, output_dim). + * All the input values should be integers in the range [0, input_dim). + * + * If the input_dim is ip0 and output_dim is op0, then shape of the embedding weight matrix must be + * (ip0, op0). + * + * When "sparse_grad" is False, if any index mentioned is too large, it is replaced by the index that + * addresses the last vector in an embedding matrix. + * When "sparse_grad" is True, an error will be raised if invalid indices are found. + * + * Examples:: + * + * input_dim = 4 + * output_dim = 5 + * + * // Each row in weight matrix y represents a word. So, y = (w0,w1,w2,w3) + * y = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.], + * [ 10., 11., 12., 13., 14.], + * [ 15., 16., 17., 18., 19.] ] + * + * // Input array x represents n-grams(2-gram). So, x = [(w1,w3), (w0,w2)] + * x = `[ [ 1., 3.], + * [ 0., 2.] ] + * + * // Mapped input x to its vector representation y. + * Embedding(x, y, 4, 5) = `[ `[ [ 5., 6., 7., 8., 9.], + * [ 15., 16., 17., 18., 19.] ], + * + * `[ [ 0., 1., 2., 3., 4.], + * [ 10., 11., 12., 13., 14.] ] ] + * + * + * The storage type of weight can be either row_sparse or default. + * + * .. Note:: + * + * If "sparse_grad" is set to True, the storage type of gradient w.r.t weights will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L539 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Embedding(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Flatten(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies a linear transformation: :math:`Y = XW^T + b`. + * + * If ``flatten`` is set to be true, then the shapes are: + * + * - **data**: `(batch_size, x1, x2, ..., xn)` + * - **weight**: `(num_hidden, x1 * x2 * ... * xn)` + * - **bias**: `(num_hidden,)` + * - **out**: `(batch_size, num_hidden)` + * + * If ``flatten`` is set to be false, then the shapes are: + * + * - **data**: `(x1, x2, ..., xn, input_dim)` + * - **weight**: `(num_hidden, input_dim)` + * - **bias**: `(num_hidden,)` + * - **out**: `(x1, x2, ..., xn, num_hidden)` + * + * The learnable parameters include both ``weight`` and ``bias``. + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * .. Note:: + * + * The sparse support for FullyConnected is limited to forward evaluation with `row_sparse` + * weight and bias, where the length of `weight.indices` and `bias.indices` must be equal + * to `num_hidden`. This could be useful for model inference with `row_sparse` weights + * trained with importance sampling or noise contrastive estimation. + * + * To compute linear transformation with 'csr' sparse data, sparse.dot is recommended instead + * of sparse.FullyConnected. + * + * + * + * Defined in src/operator/nn/fully_connected.cc:L291 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def FullyConnected(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Generates 2D sampling grid for bilinear sampling. + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def GridGenerator(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Group normalization. + * + * The input channels are separated into ``num_groups`` groups, each containing ``num_channels / num_groups`` channels. + * The mean and standard-deviation are calculated separately over the each group. + * + * .. math:: + * + * data = data.reshape((N, num_groups, C // num_groups, ...)) + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * + * + * Defined in src/operator/nn/group_norm.cc:L77 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def GroupNorm(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Apply a sparse regularization to the output a sigmoid activation function. + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def IdentityAttachKLSparseReg(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies instance normalization to the n-dimensional input array. + * + * This operator takes an n-dimensional input array where (n>2) and normalizes + * the input using the following formula: + * + * .. math:: + * + * out = \frac{x - mean[data]}{ \sqrt{Var[data]} + \epsilon} * gamma + beta + * + * This layer is similar to batch normalization layer (`BatchNorm`) + * with two differences: first, the normalization is + * carried out per example (instance), not over a batch. Second, the + * same normalization is applied both at test and train time. This + * operation is also known as `contrast normalization`. + * + * If the input data is of shape [batch, channel, spacial_dim1, spacial_dim2, ...], + * `gamma` and `beta` parameters must be vectors of shape [channel]. + * + * This implementation is based on this paper [1]_ + * + * .. [1] Instance Normalization: The Missing Ingredient for Fast Stylization, + * D. Ulyanov, A. Vedaldi, V. Lempitsky, 2016 (arXiv:1607.08022v2). + * + * Examples:: + * + * // Input of shape (2,1,2) + * x = `[ `[ [ 1.1, 2.2] ], + * `[ [ 3.3, 4.4] ] ] + * + * // gamma parameter of length 1 + * gamma = [1.5] + * + * // beta parameter of length 1 + * beta = [0.5] + * + * // Instance normalization is calculated with the above formula + * InstanceNorm(x,gamma,beta) = `[ `[ [-0.997527 , 1.99752665] ], + * `[ [-0.99752653, 1.99752724] ] ] + * + * + * + * Defined in src/operator/instance_norm.cc:L95 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def InstanceNorm(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Normalize the input array using the L2 norm. + * + * For 1-D NDArray, it computes:: + * + * out = data / sqrt(sum(data ** 2) + eps) + * + * For N-D NDArray, if the input array has shape (N, N, ..., N), + * + * with ``mode`` = ``instance``, it normalizes each instance in the multidimensional + * array by its L2 norm.:: + * + * for i in 0...N + * out[i,:,:,...,:] = data[i,:,:,...,:] / sqrt(sum(data[i,:,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``channel``, it normalizes each channel in the array by its L2 norm.:: + * + * for i in 0...N + * out[:,i,:,...,:] = data[:,i,:,...,:] / sqrt(sum(data[:,i,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``spatial``, it normalizes the cross channel norm for each position + * in the array by its L2 norm.:: + * + * for dim in 2...N + * for i in 0...N + * out[.....,i,...] = take(out, indices=i, axis=dim) / sqrt(sum(take(out, indices=i, axis=dim) ** 2) + eps) + * -dim- + * + * Example:: + * + * x = `[ `[ [1,2], + * [3,4] ], + * `[ [2,2], + * [5,6] ] ] + * + * L2Normalization(x, mode='instance') + * =`[ `[ [ 0.18257418 0.36514837] + * [ 0.54772252 0.73029673] ] + * `[ [ 0.24077171 0.24077171] + * [ 0.60192931 0.72231513] ] ] + * + * L2Normalization(x, mode='channel') + * =`[ `[ [ 0.31622776 0.44721359] + * [ 0.94868326 0.89442718] ] + * `[ [ 0.37139067 0.31622776] + * [ 0.92847669 0.94868326] ] ] + * + * L2Normalization(x, mode='spatial') + * =`[ `[ [ 0.44721359 0.89442718] + * [ 0.60000002 0.80000001] ] + * `[ [ 0.70710677 0.70710677] + * [ 0.6401844 0.76822126] ] ] + * + * + * + * Defined in src/operator/l2_normalization.cc:L196 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def L2Normalization(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies local response normalization to the input. + * + * The local response normalization layer performs "lateral inhibition" by normalizing + * over local input regions. + * + * If :math:`a_{x,y}^{i}` is the activity of a neuron computed by applying kernel :math:`i` at position + * :math:`(x, y)` and then applying the ReLU nonlinearity, the response-normalized + * activity :math:`b_{x,y}^{i}` is given by the expression: + * + * .. math:: + * b_{x,y}^{i} = \frac{a_{x,y}^{i}}{\Bigg({k + \frac{\alpha}{n} \sum_{j=max(0, i-\frac{n}{2})}^{min(N-1, i+\frac{n}{2})} (a_{x,y}^{j})^{2}}\Bigg)^{\beta}} + * + * where the sum runs over :math:`n` "adjacent" kernel maps at the same spatial position, and :math:`N` is the total + * number of kernels in the layer. + * + * + * + * Defined in src/operator/nn/lrn.cc:L164 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def LRN(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Layer normalization. + * + * Normalizes the channels of the input tensor by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis and then + * compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * Unlike BatchNorm and InstanceNorm, the *mean* and *var* are computed along the channel dimension. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_std``. Note that no gradient will be passed through these two outputs. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is -1, which sets the channel + * axis to be the last item in the input shape. + * + * + * + * Defined in src/operator/nn/layer_norm.cc:L156 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def LayerNorm(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies Leaky rectified linear unit activation element-wise to the input. + * + * Leaky ReLUs attempt to fix the "dying ReLU" problem by allowing a small `slope` + * when the input is negative and has a slope of one when input is positive. + * + * The following modified ReLU Activation functions are supported: + * + * - *elu*: Exponential Linear Unit. `y = x > 0 ? x : slope * (exp(x)-1)` + * - *selu*: Scaled Exponential Linear Unit. `y = lambda * (x > 0 ? x : alpha * (exp(x) - 1))` where + * *lambda = 1.0507009873554804934193349852946* and *alpha = 1.6732632423543772848170429916717*. + * - *leaky*: Leaky ReLU. `y = x > 0 ? x : slope * x` + * - *prelu*: Parametric ReLU. This is same as *leaky* except that `slope` is learnt during training. + * - *rrelu*: Randomized ReLU. same as *leaky* but the `slope` is uniformly and randomly chosen from + * *[lower_bound, upper_bound)* for training, while fixed to be + * *(lower_bound+upper_bound)/2* for inference. + * + * + * + * Defined in src/operator/leaky_relu.cc:L161 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def LeakyReLU(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes and optimizes for squared loss during backward propagation. + * Just outputs ``data`` during forward propagation. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the squared loss estimated over :math:`n` samples is defined as + * + * :math:`\text{SquaredLoss}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_2` + * + * .. note:: + * Use the LinearRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LinearRegressionOutput(default, default) = default + * - LinearRegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L92 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def LinearRegressionOutput(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies a logistic function to the input. + * + * The logistic function, also known as the sigmoid function, is computed as + * :math:`\frac{1}{1+exp(-\textbf{x})}`. + * + * Commonly, the sigmoid is used to squash the real-valued output of a linear model + * :math:`wTx+b` into the [0,1] range so that it can be interpreted as a probability. + * It is suitable for binary classification or probability prediction tasks. + * + * .. note:: + * Use the LogisticRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LogisticRegressionOutput(default, default) = default + * - LogisticRegressionOutput(default, csr) = default + * + * The loss function used is the Binary Cross Entropy Loss: + * + * :math:`-{(y\log(p) + (1 - y)\log(1 - p))}` + * + * Where `y` is the ground truth probability of positive outcome for a given example, and `p` the probability predicted by the model. By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L152 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def LogisticRegressionOutput(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes mean absolute error of the input. + * + * MAE is a risk metric corresponding to the expected value of the absolute error. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the mean absolute error (MAE) estimated over :math:`n` samples is defined as + * + * :math:`\text{MAE}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_1` + * + * .. note:: + * Use the MAERegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - MAERegressionOutput(default, default) = default + * - MAERegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L120 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def MAERegressionOutput(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = MakeLoss(cross_entropy) + * + * We will need to use ``MakeLoss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * In addition, we can give a scale to the loss by setting ``grad_scale``, + * so that the gradient of the loss will be rescaled in the backpropagation. + * + * .. note:: This operator should be used as a Symbol instead of NDArray. + * + * + * + * Defined in src/operator/make_loss.cc:L71 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def MakeLoss(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Pad(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Performs pooling on the input. + * + * The shapes for 1-D pooling are + * + * - **data** and **out**: *(batch_size, channel, width)* (NCW layout) or + * *(batch_size, width, channel)* (NWC layout), + * + * The shapes for 2-D pooling are + * + * - **data** and **out**: *(batch_size, channel, height, width)* (NCHW layout) or + * *(batch_size, height, width, channel)* (NHWC layout), + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * When ``global_pool`` is set to be true, then global pooling is performed. It will reset + * ``kernel=(height, width)`` and set the appropiate padding to 0. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * - **lp**: Lp pooling + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data and output will have shape *(batch_size, channel, depth, + * height, width)* (NCDHW layout) or *(batch_size, depth, height, width, channel)* (NDHWC layout). + * + * Notes on Lp pooling: + * + * Lp pooling was first introduced by this paper: https://arxiv.org/pdf/1204.3968.pdf. + * L-1 pooling is simply sum pooling, while L-inf pooling is simply max pooling. + * We can see that Lp pooling stands between those two, in practice the most common value for p is 2. + * + * For each window ``X``, the mathematical expression for Lp pooling is: + * + * :math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}` + * + * + * + * Defined in src/operator/nn/pooling.cc:L417 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Pooling(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * This operator is DEPRECATED. + * Perform pooling on the input. + * + * The shapes for 2-D pooling is + * + * - **data**: *(batch_size, channel, height, width)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*, with:: + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * But ``global_pool`` is set to be true, then do a global pooling, namely reset + * ``kernel=(height, width)``. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * + * 1-D pooling is special case of 2-D pooling with *weight=1* and + * *kernel[1]=1*. + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data will have shape *(batch_size, channel, depth, + * height, width)*. + * + * + * + * Defined in src/operator/pooling_v1.cc:L104 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Pooling_v1(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies recurrent layers to input data. Currently, vanilla RNN, LSTM and GRU are + * implemented, with both multi-layer and bidirectional support. + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * **Vanilla RNN** + * + * Applies a single-gate recurrent layer to input X. Two kinds of activation function are supported: + * ReLU and Tanh. + * + * With ReLU activation function: + * + * .. math:: + * h_t = relu(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * With Tanh activtion function: + * + * .. math:: + * h_t = \tanh(W_{ih} * x_t + b_{ih} + W_{hh} * h_{(t-1)} + b_{hh}) + * + * Reference paper: Finding structure in time - Elman, 1988. + * https://crl.ucsd.edu/~elman/Papers/fsit.pdf + * + * **LSTM** + * + * Long Short-Term Memory - Hochreiter, 1997. http://www.bioinf.jku.at/publications/older/2604.pdf + * + * .. math:: + * \begin{array}{ll} + * i_t = \mathrm{sigmoid}(W_{ii} x_t + b_{ii} + W_{hi} h_{(t-1)} + b_{hi}) \\ + * f_t = \mathrm{sigmoid}(W_{if} x_t + b_{if} + W_{hf} h_{(t-1)} + b_{hf}) \\ + * g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hc} h_{(t-1)} + b_{hg}) \\ + * o_t = \mathrm{sigmoid}(W_{io} x_t + b_{io} + W_{ho} h_{(t-1)} + b_{ho}) \\ + * c_t = f_t * c_{(t-1)} + i_t * g_t \\ + * h_t = o_t * \tanh(c_t) + * \end{array} + * + * **GRU** + * + * Gated Recurrent Unit - Cho et al. 2014. http://arxiv.org/abs/1406.1078 + * + * The definition of GRU here is slightly different from paper but compatible with CUDNN. + * + * .. math:: + * \begin{array}{ll} + * r_t = \mathrm{sigmoid}(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\ + * z_t = \mathrm{sigmoid}(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\ + * n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\ + * h_t = (1 - z_t) * n_t + z_t * h_{(t-1)} \\ + * \end{array} + * + * + * Defined in src/operator/rnn.cc:L354 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def RNN(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Performs region of interest(ROI) pooling on the input array. + * + * ROI pooling is a variant of a max pooling layer, in which the output size is fixed and + * region of interest is a parameter. Its purpose is to perform max pooling on the inputs + * of non-uniform sizes to obtain fixed-size feature maps. ROI pooling is a neural-net + * layer mostly used in training a `Fast R-CNN` network for object detection. + * + * This operator takes a 4D feature map as an input array and region proposals as `rois`, + * then it pools over sub-regions of input and produces a fixed-sized output array + * regardless of the ROI size. + * + * To crop the feature map accordingly, you can resize the bounding box coordinates + * by changing the parameters `rois` and `spatial_scale`. + * + * The cropped feature maps are pooled by standard max pooling operation to a fixed size output + * indicated by a `pooled_size` parameter. batch_size will change to the number of region + * bounding boxes after `ROIPooling`. + * + * The size of each region of interest doesn't have to be perfectly divisible by + * the number of pooling sections(`pooled_size`). + * + * Example:: + * + * x = `[ [`[ [ 0., 1., 2., 3., 4., 5.], + * [ 6., 7., 8., 9., 10., 11.], + * [ 12., 13., 14., 15., 16., 17.], + * [ 18., 19., 20., 21., 22., 23.], + * [ 24., 25., 26., 27., 28., 29.], + * [ 30., 31., 32., 33., 34., 35.], + * [ 36., 37., 38., 39., 40., 41.], + * [ 42., 43., 44., 45., 46., 47.] ] ] ] + * + * // region of interest i.e. bounding box coordinates. + * y = `[ [0,0,0,4,4] ] + * + * // returns array of shape (2,2) according to the given roi with max pooling. + * ROIPooling(x, y, (2,2), 1.0) = `[ [`[ [ 14., 16.], + * [ 26., 28.] ] ] ] + * + * // region of interest is changed due to the change in `spacial_scale` parameter. + * ROIPooling(x, y, (2,2), 0.7) = `[ [`[ [ 7., 9.], + * [ 19., 21.] ] ] ] + * + * + * + * Defined in src/operator/roi_pooling.cc:L225 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def ROIPooling(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Reshape(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes support vector machine based transformation of the input. + * + * This tutorial demonstrates using SVM as output layer for classification instead of softmax: + * https://github.com/dmlc/mxnet/tree/master/example/svm_mnist. + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def SVMOutput(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Takes the last element of a sequence. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns a (n-1)-dimensional array + * of the form [batch_size, other_feature_dims]. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` should be + * an input array of positive ints of dimension [batch_size]. To use this parameter, + * set `use_sequence_length` to `True`, otherwise each example in the batch is assumed + * to have the max sequence length. + * + * .. note:: Alternatively, you can also use `take` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ], + * + * `[ [ 10., 11., 12.], + * [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] ] + * + * // returns last sequence when sequence_length parameter is not used + * SequenceLast(x) = `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,1,1], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,2,3], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 13., 14., 15.], + * [ 25., 26., 27.] ] + * + * + * + * Defined in src/operator/sequence_last.cc:L106 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def SequenceLast(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Sets all elements outside the sequence to a constant value. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` + * should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length and + * this operator works as the `identity` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // works as identity operator when sequence_length parameter is not used + * SequenceMask(x) = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [1,1] means 1 of each batch will be kept + * // and other rows are masked with default mask value = 0 + * SequenceMask(x, sequence_length=[1,1], use_sequence_length=True) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] ] + * + * // sequence_length [2,3] means 2 of batch B1 and 3 of batch B2 will be kept + * // and other rows are masked with value = 1 + * SequenceMask(x, sequence_length=[2,3], use_sequence_length=True, value=1) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 1., 1.], + * [ 16., 17., 18.] ] ] + * + * + * + * Defined in src/operator/sequence_mask.cc:L186 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def SequenceMask(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Reverses the elements of each sequence. + * + * This function takes an n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] + * and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. + * `sequence_length` should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // returns reverse sequence when sequence_length parameter is not used + * SequenceReverse(x) = `[ `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ] ] + * + * // sequence_length [2,2] means 2 rows of + * // both batch B1 and B2 will be reversed. + * SequenceReverse(x, sequence_length=[2,2], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [2,3] means 2 of batch B2 and 3 of batch B3 + * // will be reversed. + * SequenceReverse(x, sequence_length=[2,3], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 16., 17., 18.] ], + * + * `[ [ 1., 2., 3.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14, 15.], + * [ 4., 5., 6.] ] ] + * + * + * + * Defined in src/operator/sequence_reverse.cc:L122 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def SequenceReverse(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def SliceChannel(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def Softmax(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies softmax activation to input. This is intended for internal layers. + * + * .. note:: + * + * This operator has been deprecated, please use `softmax`. + * + * If `mode` = ``instance``, this operator will compute a softmax for each instance in the batch. + * This is the default mode. + * + * If `mode` = ``channel``, this operator will compute a k-class softmax at each position + * of each instance, where `k` = ``num_channel``. This mode can only be used when the input array + * has at least 3 dimensions. + * This can be used for `fully convolutional network`, `image segmentation`, etc. + * + * Example:: + * + * >>> input_array = mx.nd.array(`[ [3., 0.5, -0.5, 2., 7.], + * >>> [2., -.4, 7., 3., 0.2] ]) + * >>> softmax_act = mx.nd.SoftmaxActivation(input_array) + * >>> print softmax_act.asnumpy() + * `[ [ 1.78322066e-02 1.46375655e-03 5.38485940e-04 6.56010211e-03 9.73605454e-01] + * [ 6.56221947e-03 5.95310994e-04 9.73919690e-01 1.78379621e-02 1.08472735e-03] ] + * + * + * + * Defined in src/operator/nn/softmax_activation.cc:L59 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def SoftmaxActivation(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def SoftmaxOutput(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies a spatial transformer to input feature map. + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def SpatialTransformer(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def SwapAxis(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Upsamples the given input data. + * + * Two algorithms (``sample_type``) are available for upsampling: + * + * - Nearest Neighbor + * - Bilinear + * + * **Nearest Neighbor Upsampling** + * + * Input data is expected to be NCHW. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * UpSampling(x, scale=2, sample_type='nearest') = `[ [`[ [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] ] ] ] + * + * **Bilinear Upsampling** + * + * Uses `deconvolution` algorithm under the hood. You need provide both input data and the kernel. + * + * Input data is expected to be NCHW. + * + * `num_filter` is expected to be same as the number of channels. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * w = `[ [`[ [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] ] ] ] + * + * UpSampling(x, w, scale=2, sample_type='bilinear', num_filter=1) = `[ [`[ [1. 2. 2. 2. 2. 1.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [1. 2. 2. 2. 2. 1.] ] ] ] + * + * + * Defined in src/operator/nn/upsampling.cc:L173 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def UpSampling(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise absolute value of the input. + * + * Example:: + * + * abs([-2, 0, 3]) = [2, 0, 3] + * + * The storage type of ``abs`` output depends upon the input storage type: + * + * - abs(default) = default + * - abs(row_sparse) = row_sparse + * - abs(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L721 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def abs(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for Adam optimizer. Adam is seen as a generalization + * of AdaGrad. + * + * Adam update consists of the following steps, where g represents gradient and m, v + * are 1st and 2nd order moment estimates (mean and variance). + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * W_t = W_{t-1} - \alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon } + * + * It updates the weights using:: + * + * m = beta1*m + (1-beta1)*grad + * v = beta2*v + (1-beta2)*(grad**2) + * w += - learning_rate * m / (sqrt(v) + epsilon) + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and the storage + * type of weight is the same as those of m and v, + * only the row slices whose indices appear in grad.indices are updated (for w, m and v):: + * + * for row in grad.indices: + * m[row] = beta1*m[row] + (1-beta1)*grad[row] + * v[row] = beta2*v[row] + (1-beta2)*(grad[row]**2) + * w[row] += - learning_rate * m[row] / (sqrt(v[row]) + epsilon) + * + * + * + * Defined in src/operator/optimizer_op.cc:L688 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def adam_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def add_n(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Check if all the float numbers in the array are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L101 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def all_finite(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Cast function between low precision float/FP32 used by AMP. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * Defined in src/operator/tensor/amp_cast.cc:L37 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def amp_cast(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Cast function used by AMP, that casts its inputs to the common widest type. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * + * Defined in src/operator/tensor/amp_cast.cc:L71 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def amp_multicast(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise inverse cosine of the input array. + * + * The input should be in range `[-1, 1]`. + * The output is in the closed interval :math:`[0, \pi]` + * + * .. math:: + * arccos([-1, -.707, 0, .707, 1]) = [\pi, 3\pi/4, \pi/2, \pi/4, 0] + * + * The storage type of ``arccos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L206 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def arccos(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic cosine of the input array, \ + * computed element-wise. + * + * The storage type of ``arccosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L474 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def arccosh(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise inverse sine of the input array. + * + * The input should be in the range `[-1, 1]`. + * The output is in the closed interval of [:math:`-\pi/2`, :math:`\pi/2`]. + * + * .. math:: + * arcsin([-1, -.707, 0, .707, 1]) = [-\pi/2, -\pi/4, 0, \pi/4, \pi/2] + * + * The storage type of ``arcsin`` output depends upon the input storage type: + * + * - arcsin(default) = default + * - arcsin(row_sparse) = row_sparse + * - arcsin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L187 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def arcsin(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic sine of the input array, \ + * computed element-wise. + * + * The storage type of ``arcsinh`` output depends upon the input storage type: + * + * - arcsinh(default) = default + * - arcsinh(row_sparse) = row_sparse + * - arcsinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L436 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def arcsinh(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise inverse tangent of the input array. + * + * The output is in the closed interval :math:`[-\pi/2, \pi/2]` + * + * .. math:: + * arctan([-1, 0, 1]) = [-\pi/4, 0, \pi/4] + * + * The storage type of ``arctan`` output depends upon the input storage type: + * + * - arctan(default) = default + * - arctan(row_sparse) = row_sparse + * - arctan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L227 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def arctan(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic tangent of the input array, \ + * computed element-wise. + * + * The storage type of ``arctanh`` output depends upon the input storage type: + * + * - arctanh(default) = default + * - arctanh(row_sparse) = row_sparse + * - arctanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L515 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def arctanh(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns indices of the maximum values along an axis. + * + * In the case of multiple occurrences of maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmax along axis 0 + * argmax(x, axis=0) = [ 1., 1., 1.] + * + * // argmax along axis 1 + * argmax(x, axis=1) = [ 2., 2.] + * + * // argmax along axis 1 keeping same dims as an input array + * argmax(x, axis=1, keepdims=True) = `[ [ 2.], + * [ 2.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L52 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def argmax(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns argmax indices of each channel from the input array. + * + * The result will be an NDArray of shape (num_channel,). + * + * In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * argmax_channel(x) = [ 2., 2.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L97 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def argmax_channel(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns indices of the minimum values along an axis. + * + * In the case of multiple occurrences of minimum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmin along axis 0 + * argmin(x, axis=0) = [ 0., 0., 0.] + * + * // argmin along axis 1 + * argmin(x, axis=1) = [ 0., 0.] + * + * // argmin along axis 1 keeping same dims as an input array + * argmin(x, axis=1, keepdims=True) = `[ [ 0.], + * [ 0.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L77 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def argmin(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the indices that would sort an input array along the given axis. + * + * This function performs sorting along the given axis and returns an array of indices having same shape + * as an input array that index data in sorted order. + * + * Examples:: + * + * x = `[ [ 0.3, 0.2, 0.4], + * [ 0.1, 0.3, 0.2] ] + * + * // sort along axis -1 + * argsort(x) = `[ [ 1., 0., 2.], + * [ 0., 2., 1.] ] + * + * // sort along axis 0 + * argsort(x, axis=0) = `[ [ 1., 0., 1.] + * [ 0., 1., 0.] ] + * + * // flatten and then sort + * argsort(x, axis=None) = [ 3., 1., 5., 0., 4., 2.] + * + * + * Defined in src/operator/tensor/ordering_op.cc:L183 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def argsort(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Batchwise dot product. + * + * ``batch_dot`` is used to compute dot product of ``x`` and ``y`` when ``x`` and + * ``y`` are data in batch, namely N-D (N >= 3) arrays in shape of `(B0, ..., B_i, :, :)`. + * + * For example, given ``x`` with shape `(B_0, ..., B_i, N, M)` and ``y`` with shape + * `(B_0, ..., B_i, M, K)`, the result array will have shape `(B_0, ..., B_i, N, K)`, + * which is computed by:: + * + * batch_dot(x,y)[b_0, ..., b_i, :, :] = dot(x[b_0, ..., b_i, :, :], y[b_0, ..., b_i, :, :]) + * + * + * + * Defined in src/operator/tensor/dot.cc:L127 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def batch_dot(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Takes elements from a data batch. + * + * .. note:: + * `batch_take` is deprecated. Use `pick` instead. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // takes elements with specified indices + * batch_take(x, [0,1,0]) = [ 1. 4. 5.] + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L777 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def batch_take(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_add(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_axes(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_axis(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise division of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 6., 6., 6.], + * [ 6., 6., 6.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_div(x, y) = `[ [ 3., 3., 3.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_div(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L187 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_div(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L46 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_equal(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of element-wise **greater than** (>) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L82 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_greater(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater_equal(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L100 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_greater_equal(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the hypotenuse of a right angled triangle, given its "legs" + * with broadcasting. + * + * It is equivalent to doing :math:`sqrt(x_1^2 + x_2^2)`. + * + * Example:: + * + * x = `[ [ 3., 3., 3.] ] + * + * y = `[ [ 4.], + * [ 4.] ] + * + * broadcast_hypot(x, y) = `[ [ 5., 5., 5.], + * [ 5., 5., 5.] ] + * + * z = `[ [ 0.], + * [ 4.] ] + * + * broadcast_hypot(x, z) = `[ [ 3., 3., 3.], + * [ 5., 5., 5.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L158 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_hypot(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than** (<) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser(x, y) = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L118 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_lesser(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than or equal to** (<=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L136 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_lesser_equal(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Broadcasts lhs to have the same shape as rhs. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_like(`[ [1,2,3] ], `[ [5,6,7],[7,8,9] ]) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * broadcast_like([9], [1,2,3,4,5], lhs_axes=(0,), rhs_axes=(-1,)) = [9,9,9,9,9] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L135 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_like(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of element-wise **logical and** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_logical_and(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L154 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_logical_and(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of element-wise **logical or** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_or(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L172 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_logical_or(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of element-wise **logical xor** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_xor(x, y) = `[ [ 0., 0., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L190 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_logical_xor(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise maximum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise maxima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L81 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_maximum(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise minimum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise minima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L117 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_minimum(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_minus(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise modulo of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 8., 8., 8.], + * [ 8., 8., 8.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_mod(x, y) = `[ [ 0., 0., 0.], + * [ 2., 2., 2.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L222 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_mod(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise product of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_mul(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * Supported sparse operations: + * + * broadcast_mul(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L146 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_mul(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of element-wise **not equal to** (!=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_not_equal(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L64 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_not_equal(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_plus(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns result of first array elements raised to powers from second array, element-wise with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_power(x, y) = `[ [ 2., 2., 2.], + * [ 4., 4., 4.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L45 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_power(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_sub(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Broadcasts the input array to a new shape. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_to(`[ [1,2,3] ], shape=(2,3)) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * The dimension which you do not want to change can also be kept as `0` which means copy the original value. + * So with `shape=(2,0)`, we will obtain the same result as in the above example. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L82 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def broadcast_to(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def cast(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Casts tensor storage type to the new type. + * + * When an NDArray with default storage type is cast to csr or row_sparse storage, + * the result is compact, which means: + * + * - for csr, zero values will not be retained + * - for row_sparse, row slices of all zeros will not be retained + * + * The storage type of ``cast_storage`` output depends on stype parameter: + * + * - cast_storage(csr, 'default') = default + * - cast_storage(row_sparse, 'default') = default + * - cast_storage(default, 'csr') = csr + * - cast_storage(default, 'row_sparse') = row_sparse + * - cast_storage(csr, 'csr') = csr + * - cast_storage(row_sparse, 'row_sparse') = row_sparse + * + * Example:: + * + * dense = `[ [ 0., 1., 0.], + * [ 2., 0., 3.], + * [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * # cast to row_sparse storage type + * rsp = cast_storage(dense, 'row_sparse') + * rsp.indices = [0, 1] + * rsp.values = `[ [ 0., 1., 0.], + * [ 2., 0., 3.] ] + * + * # cast to csr storage type + * csr = cast_storage(dense, 'csr') + * csr.indices = [1, 0, 2] + * csr.values = [ 1., 2., 3.] + * csr.indptr = [0, 1, 3, 3, 3] + * + * + * + * Defined in src/operator/tensor/cast_storage.cc:L71 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def cast_storage(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise cube-root value of the input. + * + * .. math:: + * cbrt(x) = \sqrt[3]{x} + * + * Example:: + * + * cbrt([1, 8, -125]) = [1, 2, -5] + * + * The storage type of ``cbrt`` output depends upon the input storage type: + * + * - cbrt(default) = default + * - cbrt(row_sparse) = row_sparse + * - cbrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L216 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def cbrt(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise ceiling of the input. + * + * The ceil of the scalar x is the smallest integer i, such that i >= x. + * + * Example:: + * + * ceil([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 2., 2., 3.] + * + * The storage type of ``ceil`` output depends upon the input storage type: + * + * - ceil(default) = default + * - ceil(row_sparse) = row_sparse + * - ceil(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L818 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def ceil(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def choose_element_0index(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Clips (limits) the values in an array. + * Given an interval, values outside the interval are clipped to the interval edges. + * Clipping ``x`` between `a_min` and `a_max` would be:: + * .. math:: + * clip(x, a_min, a_max) = \max(\min(x, a_max), a_min)) + * Example:: + * x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + * clip(x,1,8) = [ 1., 1., 2., 3., 4., 5., 6., 7., 8., 8.] + * The storage type of ``clip`` output depends on storage types of inputs and the a_min, a_max \ + * parameter values: + * - clip(default) = default + * - clip(row_sparse, a_min <= 0, a_max >= 0) = row_sparse + * - clip(csr, a_min <= 0, a_max >= 0) = csr + * - clip(row_sparse, a_min < 0, a_max < 0) = default + * - clip(row_sparse, a_min > 0, a_max > 0) = default + * - clip(csr, a_min < 0, a_max < 0) = csr + * - clip(csr, a_min > 0, a_max > 0) = csr + * + * + * Defined in src/operator/tensor/matrix_op.cc:L677 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def clip(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def concat(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the element-wise cosine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * cos([0, \pi/4, \pi/2]) = [1, 0.707, 0] + * + * The storage type of ``cos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L90 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def cos(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the hyperbolic cosine of the input array, computed element-wise. + * + * .. math:: + * cosh(x) = 0.5\times(exp(x) + exp(-x)) + * + * The storage type of ``cosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L351 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def cosh(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def crop(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def ctc_loss(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Return the cumulative sum of the elements along a given axis. + * + * Defined in src/operator/numpy/np_cumsum.cc:L70 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def cumsum(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Converts each element of the input array from radians to degrees. + * + * .. math:: + * degrees([0, \pi/2, \pi, 3\pi/2, 2\pi]) = [0, 90, 180, 270, 360] + * + * The storage type of ``degrees`` output depends upon the input storage type: + * + * - degrees(default) = default + * - degrees(row_sparse) = row_sparse + * - degrees(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L274 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def degrees(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Rearranges(permutes) data from depth into blocks of spatial data. + * Similar to ONNX DepthToSpace operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace. + * The output is a new tensor where the values from depth dimension are moved in spatial blocks + * to height and width dimension. The reverse of this operation is ``space_to_depth``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, block\_size, block\_size, C / (block\_size ^ 2), H * block\_size, W * block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 4, 1, 5, 2]) \\ + * y = reshape(x \prime \prime, [N, C / (block\_size ^ 2), H * block\_size, W * block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C / (block\_size ^ 2), H * block\_size, W * block\_size]` + * Example:: + * x = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * depth_to_space(x, 2) = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L972 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def depth_to_space(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Extracts a diagonal or constructs a diagonal array. + * + * ``diag``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero. + * - N-D arrays: extracts the diagonals of the sub-arrays with axes specified by ``axis1`` and ``axis2``. + * The output shape would be decided by removing the axes numbered ``axis1`` and ``axis2`` from the + * input shape and appending to the result a new axis with the size of the diagonals in question. + * + * For example, when the input shape is `(2, 3, 4, 5)`, ``axis1`` and ``axis2`` are 0 and 2 + * respectively and ``k`` is 0, the resulting shape would be `(3, 5, 2)`. + * + * Examples:: + * + * x = `[ [1, 2, 3], + * [4, 5, 6] ] + * + * diag(x) = [1, 5] + * + * diag(x, k=1) = [2, 6] + * + * diag(x, k=-1) = [4] + * + * x = [1, 2, 3] + * + * diag(x) = `[ [1, 0, 0], + * [0, 2, 0], + * [0, 0, 3] ] + * + * diag(x, k=1) = `[ [0, 1, 0], + * [0, 0, 2], + * [0, 0, 0] ] + * + * diag(x, k=-1) = `[ [0, 0, 0], + * [1, 0, 0], + * [0, 2, 0] ] + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * + * `[ [5, 6], + * [7, 8] ] ] + * + * diag(x) = `[ [1, 7], + * [2, 8] ] + * + * diag(x, k=1) = `[ [3], + * [4] ] + * + * diag(x, axis1=-2, axis2=-1) = `[ [1, 4], + * [5, 8] ] + * + * + * + * Defined in src/operator/tensor/diag_op.cc:L87 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def diag(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Dot product of two arrays. + * + * ``dot``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: inner product of vectors + * - 2-D arrays: matrix multiplication + * - N-D arrays: a sum product over the last axis of the first input and the first + * axis of the second input + * + * For example, given 3-D ``x`` with shape `(n,m,k)` and ``y`` with shape `(k,r,s)`, the + * result array will have shape `(n,m,r,s)`. It is computed by:: + * + * dot(x,y)[i,j,a,b] = sum(x[i,j,:]*y[:,a,b]) + * + * Example:: + * + * x = reshape([0,1,2,3,4,5,6,7], shape=(2,2,2)) + * y = reshape([7,6,5,4,3,2,1,0], shape=(2,2,2)) + * dot(x,y)[0,0,1,1] = 0 + * sum(x[0,0,:]*y[:,1,1]) = 0 + * + * The storage type of ``dot`` output depends on storage types of inputs, transpose option and + * forward_stype option for output storage type. Implemented sparse operations include: + * + * - dot(default, default, transpose_a=True/False, transpose_b=True/False) = default + * - dot(csr, default, transpose_a=True) = default + * - dot(csr, default, transpose_a=True) = row_sparse + * - dot(csr, default) = default + * - dot(csr, row_sparse) = default + * - dot(default, csr) = csr (CPU only) + * - dot(default, csr, forward_stype='default') = default + * - dot(default, csr, transpose_b=True, forward_stype='default') = default + * + * If the combination of input storage types and forward_stype does not match any of the + * above patterns, ``dot`` will fallback and generate output with default storage. + * + * .. Note:: + * + * If the storage type of the lhs is "csr", the storage type of gradient w.r.t rhs will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/dot.cc:L77 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def dot(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Adds arguments element-wise. + * + * The storage type of ``elemwise_add`` output depends on storage types of inputs + * + * - elemwise_add(row_sparse, row_sparse) = row_sparse + * - elemwise_add(csr, csr) = csr + * - elemwise_add(default, csr) = default + * - elemwise_add(csr, default) = default + * - elemwise_add(default, rsp) = default + * - elemwise_add(rsp, default) = default + * - otherwise, ``elemwise_add`` generates output with default storage + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def elemwise_add(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Divides arguments element-wise. + * + * The storage type of ``elemwise_div`` output is always dense + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def elemwise_div(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Multiplies arguments element-wise. + * + * The storage type of ``elemwise_mul`` output depends on storage types of inputs + * + * - elemwise_mul(default, default) = default + * - elemwise_mul(row_sparse, row_sparse) = row_sparse + * - elemwise_mul(default, row_sparse) = row_sparse + * - elemwise_mul(row_sparse, default) = row_sparse + * - elemwise_mul(csr, csr) = csr + * - otherwise, ``elemwise_mul`` generates output with default storage + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def elemwise_mul(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Subtracts arguments element-wise. + * + * The storage type of ``elemwise_sub`` output depends on storage types of inputs + * + * - elemwise_sub(row_sparse, row_sparse) = row_sparse + * - elemwise_sub(csr, csr) = csr + * - elemwise_sub(default, csr) = default + * - elemwise_sub(csr, default) = default + * - elemwise_sub(default, rsp) = default + * - elemwise_sub(rsp, default) = default + * - otherwise, ``elemwise_sub`` generates output with default storage + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def elemwise_sub(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise gauss error function of the input. + * + * Example:: + * + * erf([0, -1., 10.]) = [0., -0.8427, 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L886 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def erf(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise inverse gauss error function of the input. + * + * Example:: + * + * erfinv([0, 0.5., -1.]) = [0., 0.4769, -inf] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L907 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def erfinv(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise exponential value of the input. + * + * .. math:: + * exp(x) = e^x \approx 2.718^x + * + * Example:: + * + * exp([0, 1, 2]) = [1., 2.71828175, 7.38905621] + * + * The storage type of ``exp`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L63 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def exp(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Inserts a new axis of size 1 into the array shape + * For example, given ``x`` with shape ``(2,3,4)``, then ``expand_dims(x, axis=1)`` + * will return a new array with shape ``(2,1,3,4)``. + * + * + * Defined in src/operator/tensor/matrix_op.cc:L395 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def expand_dims(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns ``exp(x) - 1`` computed element-wise on the input. + * + * This function provides greater precision than ``exp(x) - 1`` for small values of ``x``. + * + * The storage type of ``expm1`` output depends upon the input storage type: + * + * - expm1(default) = default + * - expm1(row_sparse) = row_sparse + * - expm1(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L224 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def expm1(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Fill one element of each line(row for python, column for R/Julia) in lhs according to index indicated by rhs and values indicated by mhs. This function assume rhs uses 0-based index. + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def fill_element_0index(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest \ + * integer towards zero of the input. + * + * Example:: + * + * fix([-2.1, -1.9, 1.9, 2.1]) = [-2., -1., 1., 2.] + * + * The storage type of ``fix`` output depends upon the input storage type: + * + * - fix(default) = default + * - fix(row_sparse) = row_sparse + * - fix(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L875 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def fix(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def flatten(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def flip(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise floor of the input. + * + * The floor of the scalar x is the largest integer i, such that i <= x. + * + * Example:: + * + * floor([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-3., -2., 1., 1., 2.] + * + * The storage type of ``floor`` output depends upon the input storage type: + * + * - floor(default) = default + * - floor(row_sparse) = row_sparse + * - floor(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L837 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def floor(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * The FTML optimizer described in + * *FTML - Follow the Moving Leader in Deep Learning*, + * available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * d_t = \frac{ 1 - \beta_1^t }{ \eta_t } (\sqrt{ \frac{ v_t }{ 1 - \beta_2^t } } + \epsilon) + * \sigma_t = d_t - \beta_1 d_{t-1} + * z_t = \beta_1 z_{ t-1 } + (1 - \beta_1^t) g_t - \sigma_t W_{t-1} + * W_t = - \frac{ z_t }{ d_t } + * + * + * + * Defined in src/operator/optimizer_op.cc:L640 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def ftml_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for Ftrl optimizer. + * Referenced from *Ad Click Prediction: a View from the Trenches*, available at + * http://dl.acm.org/citation.cfm?id=2488200. + * + * It updates the weights using:: + * + * rescaled_grad = clip(grad * rescale_grad, clip_gradient) + * z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate + * n += rescaled_grad**2 + * w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1) + * + * If w, z and n are all of ``row_sparse`` storage type, + * only the row slices whose indices appear in grad.indices are updated (for w, z and n):: + * + * for row in grad.indices: + * rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient) + * z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate + * n[row] += rescaled_grad[row]**2 + * w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1) + * + * + * + * Defined in src/operator/optimizer_op.cc:L876 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def ftrl_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the gamma function (extension of the factorial function \ + * to the reals), computed element-wise on the input array. + * + * The storage type of ``gamma`` output is always dense + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def gamma(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise log of the absolute value of the gamma function \ + * of the input. + * + * The storage type of ``gammaln`` output is always dense + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def gammaln(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Gather elements or slices from `data` and store to a tensor whose + * shape is defined by `indices`. + * + * Given `data` with shape `(X_0, X_1, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, output shape will simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] = data[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] + * + * Examples:: + * + * data = `[ [0, 1], [2, 3] ] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * gather_nd(data, indices) = [2, 3, 0] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 0] ] + * gather_nd(data, indices) = `[ [3, 4], [5, 6] ] + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def gather_nd(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes hard sigmoid of x element-wise. + * + * .. math:: + * y = max(0, min(1, alpha * x + beta)) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L161 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def hard_sigmoid(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns a copy of the input. + * + * From:src/operator/tensor/elemwise_unary_op_basic.cc:246 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def identity(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the Khatri-Rao product of the input matrices. + * + * Given a collection of :math:`n` input matrices, + * + * .. math:: + * A_1 \in \mathbb{R}^{M_1 \times M}, \ldots, A_n \in \mathbb{R}^{M_n \times N}, + * + * the (column-wise) Khatri-Rao product is defined as the matrix, + * + * .. math:: + * X = A_1 \otimes \cdots \otimes A_n \in \mathbb{R}^{(M_1 \cdots M_n) \times N}, + * + * where the :math:`k` th column is equal to the column-wise outer product + * :math:`{A_1}_k \otimes \cdots \otimes {A_n}_k` where :math:`{A_i}_k` is the kth + * column of the ith matrix. + * + * Example:: + * + * >>> A = mx.nd.array(`[ [1, -1], + * >>> [2, -3] ]) + * >>> B = mx.nd.array(`[ [1, 4], + * >>> [2, 5], + * >>> [3, 6] ]) + * >>> C = mx.nd.khatri_rao(A, B) + * >>> print(C.asnumpy()) + * `[ [ 1. -4.] + * [ 2. -5.] + * [ 3. -6.] + * [ 2. -12.] + * [ 4. -15.] + * [ 6. -18.] ] + * + * + * + * Defined in src/operator/contrib/krprod.cc:L108 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def khatri_rao(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Phase I of lamb update it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad = grad * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L953 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def lamb_update_phase1(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Phase II of lamb update it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight = weight - lr * g + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L992 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def lamb_update_phase2(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Compute the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *det(A)* + * + * If *n>2*, *det* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: There is no gradient backwarded when A is non-invertible (which is + * equivalent to det(A) = 0) because zero is rarely hit upon in float + * point computation and the Jacobi's formula on determinant gradient + * is not computationally efficient when A is non-invertible. + * + * Examples:: + * + * Single matrix determinant + * A = `[ [1., 4.], [2., 3.] ] + * det(A) = [-5.] + * + * Batch matrix determinant + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [2., 3.], [1., 4.] ] ] + * det(A) = [-5., 5.] + * + * + * Defined in src/operator/tensor/la_op.cc:L973 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_det(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Extracts the diagonal entries of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix which diagonal elements get extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted diagonals are returned as an *n-1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix diagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extractdiag(A) = [1.0, 4.0] + * + * extractdiag(A, 1) = [2.0] + * + * Batch matrix diagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extractdiag(A) = `[ [1.0, 4.0], + * [5.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L495 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_extractdiag(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Extracts a triangular sub-matrix from a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix from which a triangular sub-matrix is extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted triangular sub-matrices are returned as an *n-1*-dimensional tensor. + * + * The *offset* and *lower* parameters determine the triangle to be extracted: + * + * - When *offset = 0* either the lower or upper triangle with respect to the main diagonal is extracted depending on the value of parameter *lower*. + * - When *offset = k > 0* the upper triangle with respect to the k-th diagonal above the main diagonal is extracted. + * - When *offset = k < 0* the lower triangle with respect to the k-th diagonal below the main diagonal is extracted. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extracttrian(A) = [1.0, 3.0, 4.0] + * extracttrian(A, lower=False) = [1.0, 2.0, 4.0] + * extracttrian(A, 1) = [2.0] + * extracttrian(A, -1) = [3.0] + * + * Batch triagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extracttrian(A) = `[ [1.0, 3.0, 4.0], + * [5.0, 7.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L605 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_extracttrian(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * LQ factorization for general matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, we compute the LQ factorization (LAPACK *gelqf*, followed by *orglq*). *A* + * must have shape *(x, y)* with *x <= y*, and must have full rank *=x*. The LQ + * factorization consists of *L* with shape *(x, x)* and *Q* with shape *(x, y)*, so + * that: + * + * *A* = *L* \* *Q* + * + * Here, *L* is lower triangular (upper triangle equal to zero) with nonzero diagonal, + * and *Q* is row-orthonormal, meaning that + * + * *Q* \* *Q*\ :sup:`T` + * + * is equal to the identity matrix of shape *(x, x)*. + * + * If *n>2*, *gelqf* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single LQ factorization + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * Q, L = gelqf(A) + * Q = `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ] + * L = `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ] + * + * Batch LQ factorization + * A = `[ `[ [1., 2., 3.], [4., 5., 6.] ], + * `[ [7., 8., 9.], [10., 11., 12.] ] ] + * Q, L = gelqf(A) + * Q = `[ `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ], + * `[ [-0.50257071, -0.57436653, -0.64616234], + * [0.7620735, 0.05862104, -0.64483142] ] ] + * L = `[ `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ], + * `[ [-13.92838828, 0.], + * [-19.09768702, 0.52758934] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L798 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_gelqf(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Performs general matrix multiplication and accumulation. + * Input are tensors *A*, *B*, *C*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + *beta* \* *C* + * + * Here, *alpha* and *beta* are scalar parameters, and *op()* is either the identity or + * matrix transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B*, *C* be 5 dimensional tensors. Then gemm(*A*, *B*, *C*, axis=1) is equivalent + * to the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = swapaxes(C, dim1=1, dim2=3) + * C = gemm(A1, B1, C) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply-add + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * C = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0, beta=10.0) + * = `[ [14.0, 14.0, 14.0], [14.0, 14.0, 14.0] ] + * + * Batch matrix multiply-add + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * C = `[ `[ [10.0] ], `[ [0.01] ] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0 , beta=10.0) + * = `[ `[ [104.0] ], `[ [0.14] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L89 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_gemm(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Performs general matrix multiplication. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + * + * Here *alpha* is a scalar parameter and *op()* is either the identity or the matrix + * transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B* be 5 dimensional tensors. Then gemm(*A*, *B*, axis=1) is equivalent to + * the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = gemm2(A1, B1) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ [4.0, 4.0, 4.0], [4.0, 4.0, 4.0] ] + * + * Batch matrix multiply + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ `[ [4.0] ], `[ [0.04 ] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L163 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_gemm2(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Compute the inverse of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *A*\ :sup:`-1` + * + * If *n>2*, *inverse* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [1., 4.], [2., 3.] ] + * inverse(A) = `[ [-0.6, 0.8], [0.4, -0.2] ] + * + * Batch matrix inverse + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [1., 3.], [2., 4.] ] ] + * inverse(A) = `[ `[ [-0.6, 0.8], [0.4, -0.2] ], + * `[ [-2., 1.5], [1., -0.5] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L919 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_inverse(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Constructs a square matrix with the input as diagonal. + * Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the diagonal entries of a single square matrix. This matrix will be returned as a 2-dimensional tensor. + * If *n>1*, then *A* represents a batch of diagonals of square matrices. The batch of diagonal matrices will be returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single diagonal matrix construction + * A = [1.0, 2.0] + * + * makediag(A) = `[ [1.0, 0.0], + * [0.0, 2.0] ] + * + * makediag(A, 1) = `[ [0.0, 1.0, 0.0], + * [0.0, 0.0, 2.0], + * [0.0, 0.0, 0.0] ] + * + * Batch diagonal matrix construction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * makediag(A) = `[ `[ [1.0, 0.0], + * [0.0, 2.0] ], + * `[ [3.0, 0.0], + * [0.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L547 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_makediag(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Constructs a square matrix with the input representing a specific triangular sub-matrix. + * This is basically the inverse of *linalg.extracttrian*. Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the entries of a triangular matrix which is lower triangular if *offset<0* or *offset=0*, *lower=true*. The resulting matrix is derived by first constructing the square + * matrix with the entries outside the triangle set to zero and then adding *offset*-times an additional + * diagonal with zero entries to the square matrix. + * + * If *n>1*, then *A* represents a batch of triangular sub-matrices. The batch of corresponding square matrices is returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix construction + * A = [1.0, 2.0, 3.0] + * + * maketrian(A) = `[ [1.0, 0.0], + * [2.0, 3.0] ] + * + * maketrian(A, lower=false) = `[ [1.0, 2.0], + * [0.0, 3.0] ] + * + * maketrian(A, offset=1) = `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ] + * maketrian(A, offset=-1) = `[ [0.0, 0.0, 0.0], + * [1.0, 0.0, 0.0], + * [2.0, 3.0, 0.0] ] + * + * Batch matrix construction + * A = `[ [1.0, 2.0, 3.0], + * [4.0, 5.0, 6.0] ] + * + * maketrian(A) = `[ `[ [1.0, 0.0], + * [2.0, 3.0] ], + * `[ [4.0, 0.0], + * [5.0, 6.0] ] ] + * + * maketrian(A, offset=1) = `[ `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ], + * `[ [0.0, 4.0, 5.0], + * [0.0, 0.0, 6.0], + * [0.0, 0.0, 0.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L673 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_maketrian(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Performs Cholesky factorization of a symmetric positive-definite matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the Cholesky factor *B* of the symmetric, positive definite matrix *A* is + * computed. *B* is triangular (entries of upper or lower triangle are all zero), has + * positive diagonal entries, and: + * + * *A* = *B* \* *B*\ :sup:`T` if *lower* = *true* + * *A* = *B*\ :sup:`T` \* *B* if *lower* = *false* + * + * If *n>2*, *potrf* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix factorization + * A = `[ [4.0, 1.0], [1.0, 4.25] ] + * potrf(A) = `[ [2.0, 0], [0.5, 2.0] ] + * + * Batch matrix factorization + * A = `[ `[ [4.0, 1.0], [1.0, 4.25] ], `[ [16.0, 4.0], [4.0, 17.0] ] ] + * potrf(A) = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L214 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_potrf(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Performs matrix inversion from a Cholesky factorization. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a triangular matrix (entries of upper or lower triangle are all zero) + * with positive diagonal. We compute: + * + * *out* = *A*\ :sup:`-T` \* *A*\ :sup:`-1` if *lower* = *true* + * *out* = *A*\ :sup:`-1` \* *A*\ :sup:`-T` if *lower* = *false* + * + * In other words, if *A* is the Cholesky factor of a symmetric positive definite matrix + * *B* (obtained by *potrf*), then + * + * *out* = *B*\ :sup:`-1` + * + * If *n>2*, *potri* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * .. note:: Use this operator only if you are certain you need the inverse of *B*, and + * cannot use the Cholesky factor *A* (*potrf*), together with backsubstitution + * (*trsm*). The latter is numerically much safer, and also cheaper. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [2.0, 0], [0.5, 2.0] ] + * potri(A) = `[ [0.26563, -0.0625], [-0.0625, 0.25] ] + * + * Batch matrix inverse + * A = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * potri(A) = `[ `[ [0.26563, -0.0625], [-0.0625, 0.25] ], + * `[ [0.06641, -0.01562], [-0.01562, 0,0625] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L275 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_potri(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Compute the sign and log of the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *sign* = *sign(det(A))* + * *logabsdet* = *log(abs(det(A)))* + * + * If *n>2*, *slogdet* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: The gradient is not properly defined on sign, so the gradient of + * it is not backwarded. + * .. note:: No gradient is backwarded when A is non-invertible. Please see + * the docs of operator det for detail. + * + * Examples:: + * + * Single matrix signed log determinant + * A = `[ [2., 3.], [1., 4.] ] + * sign, logabsdet = slogdet(A) + * sign = [1.] + * logabsdet = [1.609438] + * + * Batch matrix signed log determinant + * A = `[ `[ [2., 3.], [1., 4.] ], + * `[ [1., 2.], [2., 4.] ], + * `[ [1., 2.], [4., 3.] ] ] + * sign, logabsdet = slogdet(A) + * sign = [1., 0., -1.] + * logabsdet = [1.609438, -inf, 1.609438] + * + * + * Defined in src/operator/tensor/la_op.cc:L1031 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_slogdet(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the sum of the logarithms of the diagonal elements of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* must be square with positive diagonal entries. We sum the natural + * logarithms of the diagonal elements, the result has shape (1,). + * + * If *n>2*, *sumlogdiag* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix reduction + * A = `[ [1.0, 1.0], [1.0, 7.0] ] + * sumlogdiag(A) = [1.9459] + * + * Batch matrix reduction + * A = `[ `[ [1.0, 1.0], [1.0, 7.0] ], `[ [3.0, 0], [0, 17.0] ] ] + * sumlogdiag(A) = [1.9459, 3.9318] + * + * + * Defined in src/operator/tensor/la_op.cc:L445 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_sumlogdiag(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Multiplication of matrix with its transpose. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the operator performs the BLAS3 function *syrk*: + * + * *out* = *alpha* \* *A* \* *A*\ :sup:`T` + * + * if *transpose=False*, or + * + * *out* = *alpha* \* *A*\ :sup:`T` \ \* *A* + * + * if *transpose=True*. + * + * If *n>2*, *syrk* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * syrk(A, alpha=1., transpose=False) + * = `[ [14., 32.], + * [32., 77.] ] + * syrk(A, alpha=1., transpose=True) + * = `[ [17., 22., 27.], + * [22., 29., 36.], + * [27., 36., 45.] ] + * + * Batch matrix multiply + * A = `[ `[ [1., 1.] ], `[ [0.1, 0.1] ] ] + * syrk(A, alpha=2., transpose=False) = `[ `[ [4.] ], `[ [0.04] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L730 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_syrk(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Performs multiplication with a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trmm*: + * + * *out* = *alpha* \* *op*\ (*A*) \* *B* + * + * if *rightside=False*, or + * + * *out* = *alpha* \* *B* \* *op*\ (*A*) + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trmm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triangular matrix multiply + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * trmm(A, B, alpha=2.0) = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * + * Batch triangular matrix multiply + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], `[ [0.5, 0.5, 0.5], [0.5, 0.5, 0.5] ] ] + * trmm(A, B, alpha=2.0) = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [1.0, 1.0, 1.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L333 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_trmm(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Solves matrix equation involving a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trsm*, solving for *out* in: + * + * *op*\ (*A*) \* *out* = *alpha* \* *B* + * + * if *rightside=False*, or + * + * *out* \* *op*\ (*A*) = *alpha* \* *B* + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trsm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix solve + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * trsm(A, B, alpha=0.5) = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * + * Batch matrix solve + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [4.0, 4.0, 4.0], [8.0, 8.0, 8.0] ] ] + * trsm(A, B, alpha=0.5) = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], + * `[ [2.0, 2.0, 2.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L396 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def linalg_trsm(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise Natural logarithmic value of the input. + * + * The natural logarithm is logarithm in base *e*, so that ``log(exp(x)) = x`` + * + * The storage type of ``log`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L76 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def log(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise Base-10 logarithmic value of the input. + * + * ``10**log10(x) = x`` + * + * The storage type of ``log10`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L93 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def log10(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise ``log(1 + x)`` value of the input. + * + * This function is more accurate than ``log(1 + x)`` for small ``x`` so that + * :math:`1+x\approx 1` + * + * The storage type of ``log1p`` output depends upon the input storage type: + * + * - log1p(default) = default + * - log1p(row_sparse) = row_sparse + * - log1p(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L206 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def log1p(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise Base-2 logarithmic value of the input. + * + * ``2**log2(x) = x`` + * + * The storage type of ``log2`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L105 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def log2(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the log softmax of the input. + * This is equivalent to computing softmax followed by log. + * + * Examples:: + * + * >>> x = mx.nd.array([1, 2, .1]) + * >>> mx.nd.log_softmax(x).asnumpy() + * array([-1.41702998, -0.41702995, -2.31702995], dtype=float32) + * + * >>> x = mx.nd.array( `[ [1, 2, .1],[.1, 2, 1] ] ) + * >>> mx.nd.log_softmax(x, axis=0).asnumpy() + * array(`[ [-0.34115392, -0.69314718, -1.24115396], + * [-1.24115396, -0.69314718, -0.34115392] ], dtype=float32) + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def log_softmax(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the result of logical NOT (!) function + * + * Example: + * logical_not([-2., 0., 1.]) = [0., 1., 0.] + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def logical_not(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = make_loss(cross_entropy) + * + * We will need to use ``make_loss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * The storage type of ``make_loss`` output depends upon the input storage type: + * + * - make_loss(default) = default + * - make_loss(row_sparse) = row_sparse + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L360 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def make_loss(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def max(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def max_axis(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the mean of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L84 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def mean(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def min(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def min_axis(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * + * Calculate the mean and variance of `data`. + * + * The mean and variance are calculated by aggregating the contents of data across axes. + * If x is 1-D and axes = [0] this is just the mean and variance of a vector. + * + * Example: + * + * x = `[ [1, 2, 3], [4, 5, 6] ] + * mean, var = moments(data=x, axes=[0]) + * mean = [2.5, 3.5, 4.5] + * var = [2.25, 2.25, 2.25] + * mean, var = moments(data=x, axes=[1]) + * mean = [2.0, 5.0] + * var = [0.66666667, 0.66666667] + * mean, var = moments(data=x, axis=[0, 1]) + * mean = [3.5] + * var = [2.9166667] + * + * + * + * Defined in src/operator/nn/moments.cc:L54 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def moments(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Mixed Precision version of Phase I of lamb update + * it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad32 = grad(float16) * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight32; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight32; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1033 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def mp_lamb_update_phase1(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Mixed Precision version Phase II of lamb update + * it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight32 = weight32 - lr * g + * weight(float16) = weight32 + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1075 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def mp_lamb_update_phase2(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for multi-precision Nesterov Accelerated Gradient( NAG) optimizer. + * + * + * Defined in src/operator/optimizer_op.cc:L745 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def mp_nag_mom_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def mp_sgd_mom_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def mp_sgd_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Check if all the float numbers in all the arrays are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L133 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def multi_all_finite(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Compute the LARS coefficients of multiple weights and grads from their sums of square" + * + * + * Defined in src/operator/contrib/multi_lars.cc:L37 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def multi_lars(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L472 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def multi_mp_sgd_mom_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L417 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def multi_mp_sgd_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L374 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def multi_sgd_mom_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L329 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def multi_sgd_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Compute the sums of squares of multiple arrays + * + * + * Defined in src/operator/contrib/multi_sum_sq.cc:L36 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def multi_sum_sq(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for Nesterov Accelerated Gradient( NAG) optimizer. + * It updates the weights using the following formula, + * + * .. math:: + * v_t = \gamma v_{t-1} + \eta * \nabla J(W_{t-1} - \gamma v_{t-1})\\ + * W_t = W_{t-1} - v_t + * + * Where + * :math:`\eta` is the learning rate of the optimizer + * :math:`\gamma` is the decay rate of the momentum estimate + * :math:`\v_t` is the update vector at time step `t` + * :math:`\W_t` is the weight vector at time step `t` + * + * + * + * Defined in src/operator/optimizer_op.cc:L726 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def nag_mom_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the product of array elements over given axes treating Not a Numbers (``NaN``) as one. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_prod_value.cc:L47 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def nanprod(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes treating Not a Numbers (``NaN``) as zero. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L102 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def nansum(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Numerical negative of the argument, element-wise. + * + * The storage type of ``negative`` output depends upon the input storage type: + * + * - negative(default) = default + * - negative(row_sparse) = row_sparse + * - negative(csr) = csr + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def negative(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the norm on an NDArray. + * + * This operator computes the norm on an NDArray with the specified axis, depending + * on the value of the ord parameter. By default, it computes the L2 norm on the entire + * array. Currently only ord=2 supports sparse ndarrays. + * + * Examples:: + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * `[ [2, 2], + * [5, 6] ] ] + * + * norm(x, ord=2, axis=1) = `[ [3.1622777 4.472136 ] + * [5.3851647 6.3245554] ] + * + * norm(x, ord=1, axis=1) = `[ [4., 6.], + * [7., 8.] ] + * + * rsp = x.cast_storage('row_sparse') + * + * norm(rsp) = [5.47722578] + * + * csr = x.cast_storage('csr') + * + * norm(csr) = [5.47722578] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_norm_value.cc:L89 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def norm(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def normal(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns a one-hot array. + * + * The locations represented by `indices` take value `on_value`, while all + * other locations take value `off_value`. + * + * `one_hot` operation with `indices` of shape ``(i0, i1)`` and `depth` of ``d`` would result + * in an output array of shape ``(i0, i1, d)`` with:: + * + * output[i,j,:] = off_value + * output[i,j,indices[i,j] ] = on_value + * + * Examples:: + * + * one_hot([1,0,2,0], 3) = `[ [ 0. 1. 0.] + * [ 1. 0. 0.] + * [ 0. 0. 1.] + * [ 1. 0. 0.] ] + * + * one_hot([1,0,2,0], 3, on_value=8, off_value=1, + * dtype='int32') = `[ [1 8 1] + * [8 1 1] + * [1 1 8] + * [8 1 1] ] + * + * one_hot(`[ [1,0],[1,0],[2,0] ], 3) = `[ `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 0. 1.] + * [ 1. 0. 0.] ] ] + * + * + * Defined in src/operator/tensor/indexing_op.cc:L824 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def one_hot(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Return an array of ones with the same shape and type + * as the input array. + * + * Examples:: + * + * x = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * ones_like(x) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def ones_like(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def pad(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def pick(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L200 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def preloaded_multi_mp_sgd_mom_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L140 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def preloaded_multi_mp_sgd_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L91 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def preloaded_multi_sgd_mom_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L42 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def preloaded_multi_sgd_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the product of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L31 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def prod(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Converts each element of the input array from degrees to radians. + * + * .. math:: + * radians([0, 90, 180, 270, 360]) = [0, \pi/2, \pi, 3\pi/2, 2\pi] + * + * The storage type of ``radians`` output depends upon the input storage type: + * + * - radians(default) = default + * - radians(row_sparse) = row_sparse + * - radians(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L293 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def radians(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Draw random samples from an exponential distribution. + * + * Samples are distributed according to an exponential distribution parametrized by *lambda* (rate). + * + * Example:: + * + * exponential(lam=4, shape=(2,2)) = `[ [ 0.0097189 , 0.08999364], + * [ 0.04146638, 0.31715935] ] + * + * + * Defined in src/operator/random/sample_op.cc:L137 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_exponential(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Draw random samples from a gamma distribution. + * + * Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). + * + * Example:: + * + * gamma(alpha=9, beta=0.5, shape=(2,2)) = `[ [ 7.10486984, 3.37695289], + * [ 3.91697288, 3.65933681] ] + * + * + * Defined in src/operator/random/sample_op.cc:L125 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_gamma(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Draw random samples from a generalized negative binomial distribution. + * + * Samples are distributed according to a generalized negative binomial distribution parametrized by + * *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the + * number of unsuccessful experiments (generalized to real numbers). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * generalized_negative_binomial(mu=2.0, alpha=0.3, shape=(2,2)) = `[ [ 2., 1.], + * [ 6., 4.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L179 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_generalized_negative_binomial(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Draw random samples from a negative binomial distribution. + * + * Samples are distributed according to a negative binomial distribution parametrized by + * *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * negative_binomial(k=3, p=0.4, shape=(2,2)) = `[ [ 4., 7.], + * [ 2., 5.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L164 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_negative_binomial(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_normal(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Dirichlet distributions with parameter *alpha*. + * + * The shape of *alpha* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *alpha*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *alpha* + * at index *i*. + * + * Examples:: + * + * random_pdf_dirichlet(sample=`[ [1,2],[2,3],[3,4] ], alpha=[2.5, 2.5]) = + * [38.413498, 199.60245, 564.56085] + * + * sample = `[ `[ [1, 2, 3], [10, 20, 30], [100, 200, 300] ], + * `[ [0.1, 0.2, 0.3], [0.01, 0.02, 0.03], [0.001, 0.002, 0.003] ] ] + * + * random_pdf_dirichlet(sample=sample, alpha=[0.1, 0.4, 0.9]) = + * `[ [2.3257459e-02, 5.8420084e-04, 1.4674458e-05], + * [9.2589635e-01, 3.6860607e+01, 1.4674468e+03] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L315 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_pdf_dirichlet(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * exponential distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_exponential(sample=`[ [1, 2, 3] ], lam=[1]) = + * `[ [0.36787945, 0.13533528, 0.04978707] ] + * + * sample = `[ [1,2,3], + * [1,2,3], + * [1,2,3] ] + * + * random_pdf_exponential(sample=sample, lam=[1,0.5,0.25]) = + * `[ [0.36787945, 0.13533528, 0.04978707], + * [0.30326533, 0.18393973, 0.11156508], + * [0.1947002, 0.15163267, 0.11809164] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L304 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_pdf_exponential(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * gamma distributions with parameters *alpha* (shape) and *beta* (rate). + * + * *alpha* and *beta* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *alpha* and *beta*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *alpha* and *beta* at index *i*. + * + * Examples:: + * + * random_pdf_gamma(sample=`[ [1,2,3,4,5] ], alpha=[5], beta=[1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739] ] + * + * sample = `[ [1, 2, 3, 4, 5], + * [2, 3, 4, 5, 6], + * [3, 4, 5, 6, 7] ] + * + * random_pdf_gamma(sample=sample, alpha=[5,6,7], beta=[1,1,1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739], + * [0.03608941, 0.10081882, 0.15629345, 0.17546739, 0.16062315], + * [0.05040941, 0.10419563, 0.14622283, 0.16062315, 0.14900276] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L301 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_pdf_gamma(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * generalized negative binomial distributions with parameters *mu* (mean) + * and *alpha* (dispersion). This can be understood as a reparameterization of + * the negative binomial, where *k* = *1 / alpha* and *p* = *1 / (mu \* alpha + 1)*. + * + * *mu* and *alpha* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *alpha*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *alpha* at index *i*. + * + * Examples:: + * + * random_pdf_generalized_negative_binomial(sample=`[ [1, 2, 3, 4] ], alpha=[1], mu=[1]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_generalized_negative_binomial(sample=sample, alpha=[1, 0.6666], mu=[1, 1.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26517063, 0.16573331, 0.09667706, 0.05437994] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L311 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_pdf_generalized_negative_binomial(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the value of the PDF of samples of + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * *k* and *p* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *k* and *p*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *k* and *p* at index *i*. + * + * Examples:: + * + * random_pdf_negative_binomial(sample=`[ [1,2,3,4] ], k=[1], p=a[0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * # Note that k may be real-valued + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_negative_binomial(sample=sample, k=[1, 1.5], p=[0.5, 0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26516506, 0.16572815, 0.09667476, 0.05437956] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L308 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_pdf_negative_binomial(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * *mu* and *sigma* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *sigma*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *sigma* at index *i*. + * + * Examples:: + * + * sample = `[ [-2, -1, 0, 1, 2] ] + * random_pdf_normal(sample=sample, mu=[0], sigma=[1]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097] ] + * + * random_pdf_normal(sample=sample*2, mu=[0,0], sigma=[1,2]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097], + * [0.12098537, 0.17603266, 0.19947115, 0.17603266, 0.12098537] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L299 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_pdf_normal(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Poisson distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_poisson(sample=`[ [0,1,2,3] ], lam=[1]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324] ] + * + * sample = `[ [0,1,2,3], + * [0,1,2,3], + * [0,1,2,3] ] + * + * random_pdf_poisson(sample=sample, lam=[1,2,3]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324], + * [0.13533528, 0.27067056, 0.27067056, 0.18044704], + * [0.04978707, 0.14936121, 0.22404182, 0.22404182] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L306 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_pdf_poisson(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * uniform distributions on the intervals given by *[low,high)*. + * + * *low* and *high* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *low* and *high*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *low* and *high* at index *i*. + * + * Examples:: + * + * random_pdf_uniform(sample=`[ [1,2,3,4] ], low=[0], high=[10]) = [0.1, 0.1, 0.1, 0.1] + * + * sample = `[ `[ [1, 2, 3], + * [1, 2, 3] ], + * `[ [1, 2, 3], + * [1, 2, 3] ] ] + * low = `[ [0, 0], + * [0, 0] ] + * high = `[ [ 5, 10], + * [15, 20] ] + * random_pdf_uniform(sample=sample, low=low, high=high) = + * `[ `[ [0.2, 0.2, 0.2 ], + * [0.1, 0.1, 0.1 ] ], + * `[ [0.06667, 0.06667, 0.06667], + * [0.05, 0.05, 0.05 ] ] ] + * + * + * + * Defined in src/operator/random/pdf_op.cc:L297 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_pdf_uniform(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Draw random samples from a Poisson distribution. + * + * Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * poisson(lam=4, shape=(2,2)) = `[ [ 5., 2.], + * [ 4., 6.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L150 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_poisson(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Draw random samples from a discrete uniform distribution. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * randint(low=0, high=5, shape=(2,2)) = `[ [ 0, 2], + * [ 3, 1] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L194 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_randint(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Draw random samples from a uniform distribution. + * + * .. note:: The existing alias ``uniform`` is deprecated. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * uniform(low=0, high=1, shape=(2,2)) = `[ [ 0.60276335, 0.85794562], + * [ 0.54488319, 0.84725171] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L96 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def random_uniform(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Converts a batch of index arrays into an array of flat indices. The operator follows numpy conventions so a single multi index is given by a column of the input matrix. The leading dimension may be left unspecified by using -1 as placeholder. + * + * Examples:: + * + * A = `[ [3,6,6],[4,5,1] ] + * ravel(A, shape=(7,6)) = [22,41,37] + * ravel(A, shape=(-1,6)) = [22,41,37] + * + * + * + * Defined in src/operator/tensor/ravel.cc:L42 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def ravel_multi_index(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise inverse cube-root value of the input. + * + * .. math:: + * rcbrt(x) = 1/\sqrt[3]{x} + * + * Example:: + * + * rcbrt([1,8,-125]) = [1.0, 0.5, -0.2] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L269 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def rcbrt(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the reciprocal of the argument, element-wise. + * + * Calculates 1/x. + * + * Example:: + * + * reciprocal([-2, 1, 3, 1.6, 0.2]) = [-0.5, 1.0, 0.33333334, 0.625, 5.0] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L42 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def reciprocal(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes rectified linear activation. + * + * .. math:: + * max(features, 0) + * + * The storage type of ``relu`` output depends upon the input storage type: + * + * - relu(default) = default + * - relu(row_sparse) = row_sparse + * - relu(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L85 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def relu(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Repeats elements of an array. + * By default, ``repeat`` flattens the input array into 1-D and then repeats the + * elements:: + * x = `[ [ 1, 2], + * [ 3, 4] ] + * repeat(x, repeats=2) = [ 1., 1., 2., 2., 3., 3., 4., 4.] + * The parameter ``axis`` specifies the axis along which to perform repeat:: + * repeat(x, repeats=2, axis=1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * repeat(x, repeats=2, axis=0) = `[ [ 1., 2.], + * [ 1., 2.], + * [ 3., 4.], + * [ 3., 4.] ] + * repeat(x, repeats=2, axis=-1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L744 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def repeat(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Set to zero multiple arrays + * + * + * Defined in src/operator/contrib/reset_arrays.cc:L36 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def reset_arrays(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def reshape(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Reshape some or all dimensions of `lhs` to have the same shape as some or all dimensions of `rhs`. + * + * Returns a **view** of the `lhs` array with a new shape without altering any data. + * + * Example:: + * + * x = [1, 2, 3, 4, 5, 6] + * y = `[ [0, -4], [3, 2], [2, 2] ] + * reshape_like(x, y) = `[ [1, 2], [3, 4], [5, 6] ] + * + * More precise control over how dimensions are inherited is achieved by specifying \ + * slices over the `lhs` and `rhs` array dimensions. Only the sliced `lhs` dimensions \ + * are reshaped to the `rhs` sliced dimensions, with the non-sliced `lhs` dimensions staying the same. + * + * Examples:: + * + * - lhs shape = (30,7), rhs shape = (15,2,4), lhs_begin=0, lhs_end=1, rhs_begin=0, rhs_end=2, output shape = (15,2,7) + * - lhs shape = (3, 5), rhs shape = (1,15,4), lhs_begin=0, lhs_end=2, rhs_begin=1, rhs_end=2, output shape = (15) + * + * Negative indices are supported, and `None` can be used for either `lhs_end` or `rhs_end` to indicate the end of the range. + * + * Example:: + * + * - lhs shape = (30, 12), rhs shape = (4, 2, 2, 3), lhs_begin=-1, lhs_end=None, rhs_begin=1, rhs_end=None, output shape = (30, 2, 2, 3) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L513 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def reshape_like(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def reverse(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * .. note:: + * - For input ``n.5`` ``rint`` returns ``n`` while ``round`` returns ``n+1``. + * - For input ``-n.5`` both ``rint`` and ``round`` returns ``-n-1``. + * + * Example:: + * + * rint([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 1., -2., 2., 2.] + * + * The storage type of ``rint`` output depends upon the input storage type: + * + * - rint(default) = default + * - rint(row_sparse) = row_sparse + * - rint(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L799 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def rint(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for `RMSProp` optimizer. + * + * `RMSprop` is a variant of stochastic gradient descent where the gradients are + * divided by a cache which grows with the sum of squares of recent gradients? + * + * `RMSProp` is similar to `AdaGrad`, a popular variant of `SGD` which adaptively + * tunes the learning rate of each parameter. `AdaGrad` lowers the learning rate for + * each parameter monotonically over the course of training. + * While this is analytically motivated for convex optimizations, it may not be ideal + * for non-convex problems. `RMSProp` deals with this heuristically by allowing the + * learning rates to rebound as the denominator decays over time. + * + * Define the Root Mean Square (RMS) error criterion of the gradient as + * :math:`RMS[g]_t = \sqrt{E[g^2]_t + \epsilon}`, where :math:`g` represents + * gradient and :math:`E[g^2]_t` is the decaying average over past squared gradient. + * + * The :math:`E[g^2]_t` is given by: + * + * .. math:: + * E[g^2]_t = \gamma * E[g^2]_{t-1} + (1-\gamma) * g_t^2 + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t - \frac{\eta}{RMS[g]_t} g_t + * + * The RMSProp code follows the version in + * http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf + * Tieleman & Hinton, 2012. + * + * Hinton suggests the momentum term :math:`\gamma` to be 0.9 and the learning rate + * :math:`\eta` to be 0.001. + * + * + * + * Defined in src/operator/optimizer_op.cc:L797 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def rmsprop_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for RMSPropAlex optimizer. + * + * `RMSPropAlex` is non-centered version of `RMSProp`. + * + * Define :math:`E[g^2]_t` is the decaying average over past squared gradient and + * :math:`E[g]_t` is the decaying average over past gradient. + * + * .. math:: + * E[g^2]_t = \gamma_1 * E[g^2]_{t-1} + (1 - \gamma_1) * g_t^2\\ + * E[g]_t = \gamma_1 * E[g]_{t-1} + (1 - \gamma_1) * g_t\\ + * \Delta_t = \gamma_2 * \Delta_{t-1} - \frac{\eta}{\sqrt{E[g^2]_t - E[g]_t^2 + \epsilon}} g_t\\ + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t + \Delta_t + * + * The RMSPropAlex code follows the version in + * http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. + * + * Graves suggests the momentum term :math:`\gamma_1` to be 0.95, :math:`\gamma_2` + * to be 0.9 and the learning rate :math:`\eta` to be 0.0001. + * + * + * Defined in src/operator/optimizer_op.cc:L836 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def rmspropalex_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * Example:: + * + * round([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 2., -2., 2., 2.] + * + * The storage type of ``round`` output depends upon the input storage type: + * + * - round(default) = default + * - round(row_sparse) = row_sparse + * - round(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L778 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def round(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise inverse square-root value of the input. + * + * .. math:: + * rsqrt(x) = 1/\sqrt{x} + * + * Example:: + * + * rsqrt([4,9,16]) = [0.5, 0.33333334, 0.25] + * + * The storage type of ``rsqrt`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L193 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def rsqrt(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * exponential distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_exponential(lam) = [ 0.51837951, 0.09994757] + * + * // Draw a vector containing two samples for each distribution + * sample_exponential(lam, shape=(2)) = `[ [ 0.51837951, 0.19866663], + * [ 0.09994757, 0.50447971] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L283 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sample_exponential(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * gamma distributions with parameters *alpha* (shape) and *beta* (scale). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * alpha = [ 0.0, 2.5 ] + * beta = [ 1.0, 0.7 ] + * + * // Draw a single sample for each distribution + * sample_gamma(alpha, beta) = [ 0. , 2.25797319] + * + * // Draw a vector containing two samples for each distribution + * sample_gamma(alpha, beta, shape=(2)) = `[ [ 0. , 0. ], + * [ 2.25797319, 1.70734084] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L280 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sample_gamma(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * generalized negative binomial distributions with parameters *mu* (mean) and *alpha* (dispersion). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * mu = [ 2.0, 2.5 ] + * alpha = [ 1.0, 0.1 ] + * + * // Draw a single sample for each distribution + * sample_generalized_negative_binomial(mu, alpha) = [ 0., 3.] + * + * // Draw a vector containing two samples for each distribution + * sample_generalized_negative_binomial(mu, alpha, shape=(2)) = `[ [ 0., 3.], + * [ 3., 1.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L290 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sample_generalized_negative_binomial(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Concurrent sampling from multiple multinomial distributions. + * + * *data* is an *n* dimensional array whose last dimension has length *k*, where + * *k* is the number of possible outcomes of each multinomial distribution. This + * operator will draw *shape* samples from each distribution. If shape is empty + * one sample will be drawn from each distribution. + * + * If *get_prob* is true, a second array containing log likelihood of the drawn + * samples will also be returned. This is usually used for reinforcement learning + * where you can provide reward as head gradient for this array to estimate + * gradient. + * + * Note that the input distribution must be normalized, i.e. *data* must sum to + * 1 along its last axis. + * + * Examples:: + * + * probs = `[ [0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0] ] + * + * // Draw a single sample for each distribution + * sample_multinomial(probs) = [3, 0] + * + * // Draw a vector containing two samples for each distribution + * sample_multinomial(probs, shape=(2)) = `[ [4, 2], + * [0, 0] ] + * + * // requests log likelihood + * sample_multinomial(probs, get_prob=True) = [2, 1], [0.2, 0.3] + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sample_multinomial(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * k = [ 20, 49 ] + * p = [ 0.4 , 0.77 ] + * + * // Draw a single sample for each distribution + * sample_negative_binomial(k, p) = [ 15., 16.] + * + * // Draw a vector containing two samples for each distribution + * sample_negative_binomial(k, p, shape=(2)) = `[ [ 15., 50.], + * [ 16., 12.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L287 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sample_negative_binomial(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * mu = [ 0.0, 2.5 ] + * sigma = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_normal(mu, sigma) = [-0.56410581, 0.95934606] + * + * // Draw a vector containing two samples for each distribution + * sample_normal(mu, sigma, shape=(2)) = `[ [-0.56410581, 0.2928229 ], + * [ 0.95934606, 4.48287058] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L278 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sample_normal(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * Poisson distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_poisson(lam) = [ 0., 13.] + * + * // Draw a vector containing two samples for each distribution + * sample_poisson(lam, shape=(2)) = `[ [ 0., 4.], + * [ 13., 8.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L285 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sample_poisson(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * uniform distributions on the intervals given by *[low,high)*. + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * low = [ 0.0, 2.5 ] + * high = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_uniform(low, high) = [ 0.40451524, 3.18687344] + * + * // Draw a vector containing two samples for each distribution + * sample_uniform(low, high, shape=(2)) = `[ [ 0.40451524, 0.18017688], + * [ 3.18687344, 3.68352246] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L276 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sample_uniform(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Scatters data into a new tensor according to indices. + * + * Given `data` with shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(X_0, X_1, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, data shape should simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] = data[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] + * + * all other entries in output are 0. + * + * .. warning:: + * + * If the indices have duplicates, the result will be non-deterministic and + * the gradient of `scatter_nd` will not be correct!! + * + * + * Examples:: + * + * data = [2, 3, 0] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * shape = (2, 2) + * scatter_nd(data, indices, shape) = `[ [0, 0], [2, 3] ] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 1] ] + * shape = (2, 2, 2, 2) + * scatter_nd(data, indices, shape) = `[ [`[ [0, 0], + * [0, 0] ], + * + * `[ [1, 2], + * [3, 4] ] ], + * + * `[ `[ [0, 0], + * [0, 0] ], + * + * `[ [5, 6], + * [7, 8] ] ] ] + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def scatter_nd(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and weight's storage + * type is the same as momentum's storage type, + * only the row slices whose indices appear in grad.indices are updated (for both weight and momentum):: + * + * for row in gradient.indices: + * v[row] = momentum[row] * v[row] - learning_rate * gradient[row] + * weight[row] += v[row] + * + * + * + * Defined in src/operator/optimizer_op.cc:L565 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sgd_mom_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SGD) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * However, if gradient is of ``row_sparse`` storage type and ``lazy_update`` is True, + * only the row slices whose indices appear in grad.indices are updated:: + * + * for row in gradient.indices: + * weight[row] = weight[row] - learning_rate * (gradient[row] + wd * weight[row]) + * + * + * + * Defined in src/operator/optimizer_op.cc:L524 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sgd_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns a 1D int64 array containing the shape of data. + * + * Example:: + * + * shape_array(`[ [1,2,3,4], [5,6,7,8] ]) = [2,4] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L574 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def shape_array(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Randomly shuffle the elements. + * + * This shuffles the array along the first axis. + * The order of the elements in each subarray does not change. + * For example, if a 2D array is given, the order of the rows randomly changes, + * but the order of the elements in each row does not change. + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def shuffle(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes sigmoid of x element-wise. + * + * .. math:: + * y = 1 / (1 + exp(-x)) + * + * The storage type of ``sigmoid`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L119 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sigmoid(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise sign of the input. + * + * Example:: + * + * sign([-2, 0, 3]) = [-1, 0, 1] + * + * The storage type of ``sign`` output depends upon the input storage type: + * + * - sign(default) = default + * - sign(row_sparse) = row_sparse + * - sign(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L759 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sign(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Update function for SignSGD optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * W_t = W_{t-1} - \eta_t \text{sign}(g_t) + * + * It updates the weights using:: + * + * weight = weight - learning_rate * sign(gradient) + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L63 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def signsgd_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * SIGN momentUM (Signum) optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta m_{t-1} + (1 - \beta) g_t\\ + * W_t = W_{t-1} - \eta_t \text{sign}(m_t) + * + * It updates the weights using:: + * state = momentum * state + (1-momentum) * gradient + * weight = weight - learning_rate * sign(state) + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L92 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def signum_update(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the element-wise sine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * sin([0, \pi/4, \pi/2]) = [0, 0.707, 1] + * + * The storage type of ``sin`` output depends upon the input storage type: + * + * - sin(default) = default + * - sin(row_sparse) = row_sparse + * - sin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L47 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sin(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the hyperbolic sine of the input array, computed element-wise. + * + * .. math:: + * sinh(x) = 0.5\times(exp(x) - exp(-x)) + * + * The storage type of ``sinh`` output depends upon the input storage type: + * + * - sinh(default) = default + * - sinh(row_sparse) = row_sparse + * - sinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L313 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sinh(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns a 1D int64 array containing the size of data. + * + * Example:: + * + * size_array(`[ [1,2,3,4], [5,6,7,8] ]) = [8] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L625 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def size_array(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def slice(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Slices along a given axis. + * Returns an array slice along a given `axis` starting from the `begin` index + * to the `end` index. + * Examples:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=0, begin=1, end=3) = `[ [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=1, begin=0, end=2) = `[ [ 1., 2.], + * [ 5., 6.], + * [ 9., 10.] ] + * slice_axis(x, axis=1, begin=-3, end=-1) = `[ [ 2., 3.], + * [ 6., 7.], + * [ 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L571 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def slice_axis(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Slices a region of the array like the shape of another array. + * This function is similar to ``slice``, however, the `begin` are always `0`s + * and `end` of specific axes are inferred from the second input `shape_like`. + * Given the second `shape_like` input of ``shape=(d_0, d_1, ..., d_n-1)``, + * a ``slice_like`` operator with default empty `axes`, it performs the + * following operation: + * `` out = slice(input, begin=(0, 0, ..., 0), end=(d_0, d_1, ..., d_n-1))``. + * When `axes` is not empty, it is used to speficy which axes are being sliced. + * Given a 4-d input data, ``slice_like`` operator with ``axes=(0, 2, -1)`` + * will perform the following operation: + * `` out = slice(input, begin=(0, 0, 0, 0), end=(d_0, None, d_2, d_3))``. + * Note that it is allowed to have first and second input with different dimensions, + * however, you have to make sure the `axes` are specified and not exceeding the + * dimension limits. + * For example, given `input_1` with ``shape=(2,3,4,5)`` and `input_2` with + * ``shape=(1,2,3)``, it is not allowed to use: + * `` out = slice_like(a, b)`` because ndim of `input_1` is 4, and ndim of `input_2` + * is 3. + * The following is allowed in this situation: + * `` out = slice_like(a, b, axes=(0, 2))`` + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * y = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * slice_like(x, y) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0, 1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] ] + * slice_like(x, y, axes=(0)) = `[ [ 1., 2., 3., 4.] + * [ 5., 6., 7., 8.] ] + * slice_like(x, y, axes=(-1)) = `[ [ 1., 2., 3.] + * [ 5., 6., 7.] + * [ 9., 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L625 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def slice_like(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Calculate Smooth L1 Loss(lhs, scalar) by summing + * + * .. math:: + * + * f(x) = + * \begin{cases} + * (\sigma x)^2/2,& \text{if }x < 1/\sigma^2\\ + * |x|-0.5/\sigma^2,& \text{otherwise} + * \end{cases} + * + * where :math:`x` is an element of the tensor *lhs* and :math:`\sigma` is the scalar. + * + * Example:: + * + * smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5] + * smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_scalar_op_extended.cc:L108 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def smooth_l1(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies the softmax function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum up to 1. + * + * .. math:: + * softmax(\mathbf{z/t})_j = \frac{e^{z_j/t}}{\sum_{k=1}^K e^{z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 1. 1.] + * [ 1. 1. 1.] ] + * + * softmax(x,axis=0) = `[ [ 0.5 0.5 0.5] + * [ 0.5 0.5 0.5] ] + * + * softmax(x,axis=1) = `[ [ 0.33333334, 0.33333334, 0.33333334], + * [ 0.33333334, 0.33333334, 0.33333334] ] + * + * + * + * Defined in src/operator/nn/softmax.cc:L103 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def softmax(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Calculate cross entropy of softmax output and one-hot label. + * + * - This operator computes the cross entropy in two steps: + * - Applies softmax function on the input array. + * - Computes and returns the cross entropy loss between the softmax output and the labels. + * + * - The softmax function and cross entropy loss is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * Example:: + * + * x = `[ [1, 2, 3], + * [11, 7, 5] ] + * + * label = [2, 0] + * + * softmax(x) = `[ [0.09003057, 0.24472848, 0.66524094], + * [0.97962922, 0.01794253, 0.00242826] ] + * + * softmax_cross_entropy(data, label) = - log(0.66524084) - log(0.97962922) = 0.4281871 + * + * + * + * Defined in src/operator/loss_binary_op.cc:L59 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def softmax_cross_entropy(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Applies the softmin function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum + * up to 1. + * + * .. math:: + * softmin(\mathbf{z/t})_j = \frac{e^{-z_j/t}}{\sum_{k=1}^K e^{-z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 2. 3.] + * [ 3. 2. 1.] ] + * + * softmin(x,axis=0) = `[ [ 0.88079703, 0.5, 0.11920292], + * [ 0.11920292, 0.5, 0.88079703] ] + * + * softmin(x,axis=1) = `[ [ 0.66524094, 0.24472848, 0.09003057], + * [ 0.09003057, 0.24472848, 0.66524094] ] + * + * + * + * Defined in src/operator/nn/softmin.cc:L57 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def softmin(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes softsign of x element-wise. + * + * .. math:: + * y = x / (1 + abs(x)) + * + * The storage type of ``softsign`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L191 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def softsign(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns a sorted copy of an input array along the given axis. + * + * Examples:: + * + * x = `[ [ 1, 4], + * [ 3, 1] ] + * + * // sorts along the last axis + * sort(x) = `[ [ 1., 4.], + * [ 1., 3.] ] + * + * // flattens and then sorts + * sort(x, axis=None) = [ 1., 1., 3., 4.] + * + * // sorts along the first axis + * sort(x, axis=0) = `[ [ 1., 1.], + * [ 3., 4.] ] + * + * // in a descend order + * sort(x, is_ascend=0) = `[ [ 4., 1.], + * [ 3., 1.] ] + * + * + * + * Defined in src/operator/tensor/ordering_op.cc:L132 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sort(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Rearranges(permutes) blocks of spatial data into depth. + * Similar to ONNX SpaceToDepth operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#SpaceToDepth + * The output is a new tensor where the values from height and width dimension are + * moved to the depth dimension. The reverse of this operation is ``depth_to_space``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, C, H / block\_size, block\_size, W / block\_size, block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 5, 1, 2, 4]) \\ + * y = reshape(x \prime \prime, [N, C * (block\_size ^ 2), H / block\_size, W / block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C * (block\_size ^ 2), H / block\_size, W / block\_size]` + * Example:: + * x = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * space_to_depth(x, 2) = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L1019 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def space_to_depth(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def split(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise square-root value of the input. + * + * .. math:: + * \textrm{sqrt}(x) = \sqrt{x} + * + * Example:: + * + * sqrt([4, 9, 16]) = [2, 3, 4] + * + * The storage type of ``sqrt`` output depends upon the input storage type: + * + * - sqrt(default) = default + * - sqrt(row_sparse) = row_sparse + * - sqrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L142 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sqrt(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns element-wise squared value of the input. + * + * .. math:: + * square(x) = x^2 + * + * Example:: + * + * square([2, 3, 4]) = [4, 9, 16] + * + * The storage type of ``square`` output depends upon the input storage type: + * + * - square(default) = default + * - square(row_sparse) = row_sparse + * - square(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L118 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def square(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Remove single-dimensional entries from the shape of an array. + * Same behavior of defining the output tensor shape as numpy.squeeze for the most of cases. + * See the following note for exception. + * Examples:: + * data = `[ `[ [0], [1], [2] ] ] + * squeeze(data) = [0, 1, 2] + * squeeze(data, axis=0) = `[ [0], [1], [2] ] + * squeeze(data, axis=2) = `[ [0, 1, 2] ] + * squeeze(data, axis=(0, 2)) = [0, 1, 2] + * .. Note:: + * The output of this operator will keep at least one dimension not removed. For example, + * squeeze(`[ `[ [4] ] ]) = [4], while in numpy.squeeze, the output will become a scalar. + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def squeeze(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Join a sequence of arrays along a new axis. + * The axis parameter specifies the index of the new axis in the dimensions of the + * result. For example, if axis=0 it will be the first dimension and if axis=-1 it + * will be the last dimension. + * Examples:: + * x = [1, 2] + * y = [3, 4] + * stack(x, y) = `[ [1, 2], + * [3, 4] ] + * stack(x, y, axis=1) = `[ [1, 3], + * [2, 4] ] + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def stack(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def stop_gradient(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sum(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def sum_axis(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def swapaxes(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Takes elements from an input array along the given axis. + * + * This function slices the input array along a particular axis with the provided indices. + * + * Given data tensor of rank r >= 1, and indices tensor of rank q, gather entries of the axis + * dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates them + * in an output tensor of rank q + (r - 1). + * + * Examples:: + * + * x = [4. 5. 6.] + * + * // Trivial case, take the second element along the first axis. + * + * take(x, [1]) = [ 5. ] + * + * // The other trivial case, axis=-1, take the third element along the first axis + * + * take(x, [3], axis=-1, mode='clip') = [ 6. ] + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2. Along axis 0 + * + * take(x, `[ [0,1],[1,2] ]) = `[ `[ [ 1., 2.], + * [ 3., 4.] ], + * + * `[ [ 3., 4.], + * [ 5., 6.] ] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2 (calculated by wrapping around). + * // Along axis 1 + * + * take(x, `[ [0, 3], [-1, -2] ], axis=1, mode='wrap') = `[ `[ [ 1. 2.] + * [ 2. 1.] ] + * + * `[ [ 3. 4.] + * [ 4. 3.] ] + * + * `[ [ 5. 6.] + * [ 6. 5.] ] ] + * + * The storage type of ``take`` output depends upon the input storage type: + * + * - take(default, default) = default + * - take(csr, default, axis=0) = csr + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L718 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def take(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Computes the element-wise tangent of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * tan([0, \pi/4, \pi/2]) = [0, 1, -inf] + * + * The storage type of ``tan`` output depends upon the input storage type: + * + * - tan(default) = default + * - tan(row_sparse) = row_sparse + * - tan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L140 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def tan(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Returns the hyperbolic tangent of the input array, computed element-wise. + * + * .. math:: + * tanh(x) = sinh(x) / cosh(x) + * + * The storage type of ``tanh`` output depends upon the input storage type: + * + * - tanh(default) = default + * - tanh(row_sparse) = row_sparse + * - tanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L393 + * }}} + * + * @return org.apache.mxnet.Symbol + */ +def tanh(name : String = null, attr : Map[String, String] = null) + (args : org.apache.mxnet.Symbol*)(kwargs : Map[String, Any] = null): + org.apache.mxnet.Symbol + + /** + * + * {{{ + * + * Repeats the whole array multiple times. + * If ``reps`` has length *d*, and input array has dimension of *n*. There are + * three cases: + * - **n=d**. Repeat *i*-th dimension of the input by ``reps[i]`` times:: + * x = `[ [1, 2], + * [3, 4] ] + * tile(x, reps=(2,3)) = `[ [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.], + * [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.] ] + * - **n>d**. ``reps`` is promoted to length *n* by pre-pending 1's to it. Thus for + * an input shape ``(2,3)``, ``repos=(2,)`` is treated as ``(1,2)``:: + * tile(x, reps=(2,)) = `[ [ 1., 2., 1., 2.], + * [ 3., 4., 3., 4.] ] + * - **n= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trmm*: + * + * *out* = *alpha* \* *op*\ (*A*) \* *B* + * + * if *rightside=False*, or + * + * *out* = *alpha* \* *B* \* *op*\ (*A*) + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trmm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triangular matrix multiply + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * trmm(A, B, alpha=2.0) = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * + * Batch triangular matrix multiply + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], `[ [0.5, 0.5, 0.5], [0.5, 0.5, 0.5] ] ] + * trmm(A, B, alpha=2.0) = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [1.0, 1.0, 1.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L333 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def linalg_trmm(po: linalg_trmmParam) : Array[NDArray] + + /** + * + * {{{ + * + * Normalize the input array using the L2 norm. + * + * For 1-D NDArray, it computes:: + * + * out = data / sqrt(sum(data ** 2) + eps) + * + * For N-D NDArray, if the input array has shape (N, N, ..., N), + * + * with ``mode`` = ``instance``, it normalizes each instance in the multidimensional + * array by its L2 norm.:: + * + * for i in 0...N + * out[i,:,:,...,:] = data[i,:,:,...,:] / sqrt(sum(data[i,:,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``channel``, it normalizes each channel in the array by its L2 norm.:: + * + * for i in 0...N + * out[:,i,:,...,:] = data[:,i,:,...,:] / sqrt(sum(data[:,i,:,...,:] ** 2) + eps) + * + * with ``mode`` = ``spatial``, it normalizes the cross channel norm for each position + * in the array by its L2 norm.:: + * + * for dim in 2...N + * for i in 0...N + * out[.....,i,...] = take(out, indices=i, axis=dim) / sqrt(sum(take(out, indices=i, axis=dim) ** 2) + eps) + * -dim- + * + * Example:: + * + * x = `[ `[ [1,2], + * [3,4] ], + * `[ [2,2], + * [5,6] ] ] + * + * L2Normalization(x, mode='instance') + * =`[ `[ [ 0.18257418 0.36514837] + * [ 0.54772252 0.73029673] ] + * `[ [ 0.24077171 0.24077171] + * [ 0.60192931 0.72231513] ] ] + * + * L2Normalization(x, mode='channel') + * =`[ `[ [ 0.31622776 0.44721359] + * [ 0.94868326 0.89442718] ] + * `[ [ 0.37139067 0.31622776] + * [ 0.92847669 0.94868326] ] ] + * + * L2Normalization(x, mode='spatial') + * =`[ `[ [ 0.44721359 0.89442718] + * [ 0.60000002 0.80000001] ] + * `[ [ 0.70710677 0.70710677] + * [ 0.6401844 0.76822126] ] ] + * + * + * + * Defined in src/operator/l2_normalization.cc:L196 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def L2Normalization(po: L2NormalizationParam) : Array[NDArray] + + /** + * + * {{{ + * + * Dot product of two arrays. + * + * ``dot``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: inner product of vectors + * - 2-D arrays: matrix multiplication + * - N-D arrays: a sum product over the last axis of the first input and the first + * axis of the second input + * + * For example, given 3-D ``x`` with shape `(n,m,k)` and ``y`` with shape `(k,r,s)`, the + * result array will have shape `(n,m,r,s)`. It is computed by:: + * + * dot(x,y)[i,j,a,b] = sum(x[i,j,:]*y[:,a,b]) + * + * Example:: + * + * x = reshape([0,1,2,3,4,5,6,7], shape=(2,2,2)) + * y = reshape([7,6,5,4,3,2,1,0], shape=(2,2,2)) + * dot(x,y)[0,0,1,1] = 0 + * sum(x[0,0,:]*y[:,1,1]) = 0 + * + * The storage type of ``dot`` output depends on storage types of inputs, transpose option and + * forward_stype option for output storage type. Implemented sparse operations include: + * + * - dot(default, default, transpose_a=True/False, transpose_b=True/False) = default + * - dot(csr, default, transpose_a=True) = default + * - dot(csr, default, transpose_a=True) = row_sparse + * - dot(csr, default) = default + * - dot(csr, row_sparse) = default + * - dot(default, csr) = csr (CPU only) + * - dot(default, csr, forward_stype='default') = default + * - dot(default, csr, transpose_b=True, forward_stype='default') = default + * + * If the combination of input storage types and forward_stype does not match any of the + * above patterns, ``dot`` will fallback and generate output with default storage. + * + * .. Note:: + * + * If the storage type of the lhs is "csr", the storage type of gradient w.r.t rhs will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/dot.cc:L77 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def dot(po: dotParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise log of the absolute value of the gamma function \ + * of the input. + * + * The storage type of ``gammaln`` output is always dense + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def gammaln(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Multiplies arguments element-wise. + * + * The storage type of ``elemwise_mul`` output depends on storage types of inputs + * + * - elemwise_mul(default, default) = default + * - elemwise_mul(row_sparse, row_sparse) = row_sparse + * - elemwise_mul(default, row_sparse) = row_sparse + * - elemwise_mul(row_sparse, default) = row_sparse + * - elemwise_mul(csr, csr) = csr + * - otherwise, ``elemwise_mul`` generates output with default storage + * }}} + * + * @param lhs first input + * @param rhs second input + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def elemwise_mul(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Flattens the input array into a 2-D array by collapsing the higher dimensions. + * .. note:: `Flatten` is deprecated. Use `flatten` instead. + * For an input array with shape ``(d1, d2, ..., dk)``, `flatten` operation reshapes + * the input array into an output array of shape ``(d1, d2*...*dk)``. + * Note that the behavior of this function is different from numpy.ndarray.flatten, + * which behaves similar to mxnet.ndarray.reshape((-1,)). + * Example:: + * x = `[ [ + * [1,2,3], + * [4,5,6], + * [7,8,9] + * ], + * [ [1,2,3], + * [4,5,6], + * [7,8,9] + * ] ], + * flatten(x) = `[ [ 1., 2., 3., 4., 5., 6., 7., 8., 9.], + * [ 1., 2., 3., 4., 5., 6., 7., 8., 9.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L250 + * }}} + * + * @param data Input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def flatten(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Applies the softmax function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum up to 1. + * + * .. math:: + * softmax(\mathbf{z/t})_j = \frac{e^{z_j/t}}{\sum_{k=1}^K e^{z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 1. 1.] + * [ 1. 1. 1.] ] + * + * softmax(x,axis=0) = `[ [ 0.5 0.5 0.5] + * [ 0.5 0.5 0.5] ] + * + * softmax(x,axis=1) = `[ [ 0.33333334, 0.33333334, 0.33333334], + * [ 0.33333334, 0.33333334, 0.33333334] ] + * + * + * + * Defined in src/operator/nn/softmax.cc:L103 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def softmax(po: softmaxParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_sub(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Multiplication of matrix with its transpose. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the operator performs the BLAS3 function *syrk*: + * + * *out* = *alpha* \* *A* \* *A*\ :sup:`T` + * + * if *transpose=False*, or + * + * *out* = *alpha* \* *A*\ :sup:`T` \ \* *A* + * + * if *transpose=True*. + * + * If *n>2*, *syrk* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * syrk(A, alpha=1., transpose=False) + * = `[ [14., 32.], + * [32., 77.] ] + * syrk(A, alpha=1., transpose=True) + * = `[ [17., 22., 27.], + * [22., 29., 36.], + * [27., 36., 45.] ] + * + * Batch matrix multiply + * A = `[ `[ [1., 1.] ], `[ [0.1, 0.1] ] ] + * syrk(A, alpha=2., transpose=False) = `[ `[ [4.] ], `[ [0.04] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L730 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def linalg_syrk(po: linalg_syrkParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes and optimizes for squared loss during backward propagation. + * Just outputs ``data`` during forward propagation. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the squared loss estimated over :math:`n` samples is defined as + * + * :math:`\text{SquaredLoss}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_2` + * + * .. note:: + * Use the LinearRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LinearRegressionOutput(default, default) = default + * - LinearRegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L92 + * }}} + * + * @param data Input data to the function. + * @param label Input label to the function. + * @param grad_scale Scale the gradient by a float factor + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def LinearRegressionOutput(data : org.apache.mxnet.javaapi.NDArray, label : org.apache.mxnet.javaapi.NDArray, grad_scale : java.lang.Float, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for SignSGD optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * W_t = W_{t-1} - \eta_t \text{sign}(g_t) + * + * It updates the weights using:: + * + * weight = weight - learning_rate * sign(gradient) + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L63 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def signsgd_update(po: signsgd_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise Base-2 logarithmic value of the input. + * + * ``2**log2(x) = x`` + * + * The storage type of ``log2`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L105 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def log2(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def broadcast_axis(po: broadcast_axisParam) : Array[NDArray] + + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def CTCLoss(po: CTCLossParam) : Array[NDArray] + + /** + * + * {{{ + * + * Adds arguments element-wise. + * + * The storage type of ``elemwise_add`` output depends on storage types of inputs + * + * - elemwise_add(row_sparse, row_sparse) = row_sparse + * - elemwise_add(csr, csr) = csr + * - elemwise_add(default, csr) = default + * - elemwise_add(csr, default) = default + * - elemwise_add(default, rsp) = default + * - elemwise_add(rsp, default) = default + * - otherwise, ``elemwise_add`` generates output with default storage + * }}} + * + * @param lhs first input + * @param rhs second input + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def elemwise_add(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * generalized negative binomial distributions with parameters *mu* (mean) + * and *alpha* (dispersion). This can be understood as a reparameterization of + * the negative binomial, where *k* = *1 / alpha* and *p* = *1 / (mu \* alpha + 1)*. + * + * *mu* and *alpha* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *alpha*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *alpha* at index *i*. + * + * Examples:: + * + * random_pdf_generalized_negative_binomial(sample=`[ [1, 2, 3, 4] ], alpha=[1], mu=[1]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_generalized_negative_binomial(sample=sample, alpha=[1, 0.6666], mu=[1, 1.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26517063, 0.16573331, 0.09667706, 0.05437994] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L311 + * }}} + * + * @param sample Samples from the distributions. + * @param mu Means of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param alpha Alpha (dispersion) parameters of the distributions. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def random_pdf_generalized_negative_binomial(sample : org.apache.mxnet.javaapi.NDArray, mu : org.apache.mxnet.javaapi.NDArray, is_log : java.lang.Boolean, alpha : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = make_loss(cross_entropy) + * + * We will need to use ``make_loss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * The storage type of ``make_loss`` output depends upon the input storage type: + * + * - make_loss(default) = default + * - make_loss(row_sparse) = row_sparse + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L360 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def make_loss(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Performs region of interest(ROI) pooling on the input array. + * + * ROI pooling is a variant of a max pooling layer, in which the output size is fixed and + * region of interest is a parameter. Its purpose is to perform max pooling on the inputs + * of non-uniform sizes to obtain fixed-size feature maps. ROI pooling is a neural-net + * layer mostly used in training a `Fast R-CNN` network for object detection. + * + * This operator takes a 4D feature map as an input array and region proposals as `rois`, + * then it pools over sub-regions of input and produces a fixed-sized output array + * regardless of the ROI size. + * + * To crop the feature map accordingly, you can resize the bounding box coordinates + * by changing the parameters `rois` and `spatial_scale`. + * + * The cropped feature maps are pooled by standard max pooling operation to a fixed size output + * indicated by a `pooled_size` parameter. batch_size will change to the number of region + * bounding boxes after `ROIPooling`. + * + * The size of each region of interest doesn't have to be perfectly divisible by + * the number of pooling sections(`pooled_size`). + * + * Example:: + * + * x = `[ [`[ [ 0., 1., 2., 3., 4., 5.], + * [ 6., 7., 8., 9., 10., 11.], + * [ 12., 13., 14., 15., 16., 17.], + * [ 18., 19., 20., 21., 22., 23.], + * [ 24., 25., 26., 27., 28., 29.], + * [ 30., 31., 32., 33., 34., 35.], + * [ 36., 37., 38., 39., 40., 41.], + * [ 42., 43., 44., 45., 46., 47.] ] ] ] + * + * // region of interest i.e. bounding box coordinates. + * y = `[ [0,0,0,4,4] ] + * + * // returns array of shape (2,2) according to the given roi with max pooling. + * ROIPooling(x, y, (2,2), 1.0) = `[ [`[ [ 14., 16.], + * [ 26., 28.] ] ] ] + * + * // region of interest is changed due to the change in `spacial_scale` parameter. + * ROIPooling(x, y, (2,2), 0.7) = `[ [`[ [ 7., 9.], + * [ 19., 21.] ] ] ] + * + * + * + * Defined in src/operator/roi_pooling.cc:L225 + * }}} + * + * @param data The input array to the pooling operator, a 4D Feature maps + * @param rois Bounding box coordinates, a 2D array of `[ [batch_index, x1, y1, x2, y2] ], where (x1, y1) and (x2, y2) are top left and bottom right corners of designated region of interest. `batch_index` indicates the index of corresponding image in the input array + * @param pooled_size ROI pooling output shape (h,w) + * @param spatial_scale Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal of total stride in convolutional layers + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def ROIPooling(data : org.apache.mxnet.javaapi.NDArray, rois : org.apache.mxnet.javaapi.NDArray, pooled_size : org.apache.mxnet.javaapi.Shape, spatial_scale : java.lang.Float, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise division of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 6., 6., 6.], + * [ 6., 6., 6.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_div(x, y) = `[ [ 3., 3., 3.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_div(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L187 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_div(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def SliceChannel(po: SliceChannelParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes treating Not a Numbers (``NaN``) as zero. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L102 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def nansum(po: nansumParam) : Array[NDArray] + + /** + * + * {{{ + * + * This operator is DEPRECATED. + * Perform pooling on the input. + * + * The shapes for 2-D pooling is + * + * - **data**: *(batch_size, channel, height, width)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*, with:: + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * But ``global_pool`` is set to be true, then do a global pooling, namely reset + * ``kernel=(height, width)``. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * + * 1-D pooling is special case of 2-D pooling with *weight=1* and + * *kernel[1]=1*. + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data will have shape *(batch_size, channel, depth, + * height, width)*. + * + * + * + * Defined in src/operator/pooling_v1.cc:L104 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def Pooling_v1(po: Pooling_v1Param) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L140 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def preloaded_multi_mp_sgd_update(po: preloaded_multi_mp_sgd_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * k = [ 20, 49 ] + * p = [ 0.4 , 0.77 ] + * + * // Draw a single sample for each distribution + * sample_negative_binomial(k, p) = [ 15., 16.] + * + * // Draw a vector containing two samples for each distribution + * sample_negative_binomial(k, p, shape=(2)) = `[ [ 15., 50.], + * [ 16., 12.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L287 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sample_negative_binomial(po: sample_negative_binomialParam) : Array[NDArray] + + /** + * + * {{{ + * + * Applies local response normalization to the input. + * + * The local response normalization layer performs "lateral inhibition" by normalizing + * over local input regions. + * + * If :math:`a_{x,y}^{i}` is the activity of a neuron computed by applying kernel :math:`i` at position + * :math:`(x, y)` and then applying the ReLU nonlinearity, the response-normalized + * activity :math:`b_{x,y}^{i}` is given by the expression: + * + * .. math:: + * b_{x,y}^{i} = \frac{a_{x,y}^{i}}{\Bigg({k + \frac{\alpha}{n} \sum_{j=max(0, i-\frac{n}{2})}^{min(N-1, i+\frac{n}{2})} (a_{x,y}^{j})^{2}}\Bigg)^{\beta}} + * + * where the sum runs over :math:`n` "adjacent" kernel maps at the same spatial position, and :math:`N` is the total + * number of kernels in the layer. + * + * + * + * Defined in src/operator/nn/lrn.cc:L164 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def LRN(po: LRNParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the hyperbolic sine of the input array, computed element-wise. + * + * .. math:: + * sinh(x) = 0.5\times(exp(x) - exp(-x)) + * + * The storage type of ``sinh`` output depends upon the input storage type: + * + * - sinh(default) = default + * - sinh(row_sparse) = row_sparse + * - sinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L313 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def sinh(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * Poisson distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_poisson(lam) = [ 0., 13.] + * + * // Draw a vector containing two samples for each distribution + * sample_poisson(lam, shape=(2)) = `[ [ 0., 4.], + * [ 13., 8.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L285 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sample_poisson(po: sample_poissonParam) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L42 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def preloaded_multi_sgd_update(po: preloaded_multi_sgd_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Applies an activation function element-wise to the input. + * + * The following activation functions are supported: + * + * - `relu`: Rectified Linear Unit, :math:`y = max(x, 0)` + * - `sigmoid`: :math:`y = \frac{1}{1 + exp(-x)}` + * - `tanh`: Hyperbolic tangent, :math:`y = \frac{exp(x) - exp(-x)}{exp(x) + exp(-x)}` + * - `softrelu`: Soft ReLU, or SoftPlus, :math:`y = log(1 + exp(x))` + * - `softsign`: :math:`y = \frac{x}{1 + abs(x)}` + * + * + * + * Defined in src/operator/nn/activation.cc:L168 + * }}} + * + * @param data The input array. + * @param act_type Activation function to be applied. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def Activation(data : org.apache.mxnet.javaapi.NDArray, act_type : String, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of element-wise **logical or** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_or(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L172 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_logical_or(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * The FTML optimizer described in + * *FTML - Follow the Moving Leader in Deep Learning*, + * available at http://proceedings.mlr.press/v70/zheng17a/zheng17a.pdf. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * d_t = \frac{ 1 - \beta_1^t }{ \eta_t } (\sqrt{ \frac{ v_t }{ 1 - \beta_2^t } } + \epsilon) + * \sigma_t = d_t - \beta_1 d_{t-1} + * z_t = \beta_1 z_{ t-1 } + (1 - \beta_1^t) g_t - \sigma_t W_{t-1} + * W_t = - \frac{ z_t }{ d_t } + * + * + * + * Defined in src/operator/optimizer_op.cc:L640 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def ftml_update(po: ftml_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def min_axis(po: min_axisParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes softsign of x element-wise. + * + * .. math:: + * y = x / (1 + abs(x)) + * + * The storage type of ``softsign`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L191 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def softsign(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Converts each element of the input array from radians to degrees. + * + * .. math:: + * degrees([0, \pi/2, \pi, 3\pi/2, 2\pi]) = [0, 90, 180, 270, 360] + * + * The storage type of ``degrees`` output depends upon the input storage type: + * + * - degrees(default) = default + * - degrees(row_sparse) = row_sparse + * - degrees(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L274 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def degrees(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns ``exp(x) - 1`` computed element-wise on the input. + * + * This function provides greater precision than ``exp(x) - 1`` for small values of ``x``. + * + * The storage type of ``expm1`` output depends upon the input storage type: + * + * - expm1(default) = default + * - expm1(row_sparse) = row_sparse + * - expm1(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L224 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def expm1(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Batch normalization. + * + * This operator is DEPRECATED. Perform BatchNorm on the input. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_var`` as well, which are needed for the backward pass. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * There's no sparse support for this operator, and it will exhibit problematic behavior if used with + * sparse tensors. + * + * + * + * Defined in src/operator/batch_norm_v1.cc:L95 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def BatchNorm_v1(po: BatchNorm_v1Param) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the log softmax of the input. + * This is equivalent to computing softmax followed by log. + * + * Examples:: + * + * >>> x = mx.nd.array([1, 2, .1]) + * >>> mx.nd.log_softmax(x).asnumpy() + * array([-1.41702998, -0.41702995, -2.31702995], dtype=float32) + * + * >>> x = mx.nd.array( `[ [1, 2, .1],[.1, 2, 1] ] ) + * >>> mx.nd.log_softmax(x, axis=0).asnumpy() + * array(`[ [-0.34115392, -0.69314718, -1.24115396], + * [-1.24115396, -0.69314718, -0.34115392] ], dtype=float32) + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def log_softmax(po: log_softmaxParam) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for `RMSProp` optimizer. + * + * `RMSprop` is a variant of stochastic gradient descent where the gradients are + * divided by a cache which grows with the sum of squares of recent gradients? + * + * `RMSProp` is similar to `AdaGrad`, a popular variant of `SGD` which adaptively + * tunes the learning rate of each parameter. `AdaGrad` lowers the learning rate for + * each parameter monotonically over the course of training. + * While this is analytically motivated for convex optimizations, it may not be ideal + * for non-convex problems. `RMSProp` deals with this heuristically by allowing the + * learning rates to rebound as the denominator decays over time. + * + * Define the Root Mean Square (RMS) error criterion of the gradient as + * :math:`RMS[g]_t = \sqrt{E[g^2]_t + \epsilon}`, where :math:`g` represents + * gradient and :math:`E[g^2]_t` is the decaying average over past squared gradient. + * + * The :math:`E[g^2]_t` is given by: + * + * .. math:: + * E[g^2]_t = \gamma * E[g^2]_{t-1} + (1-\gamma) * g_t^2 + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t - \frac{\eta}{RMS[g]_t} g_t + * + * The RMSProp code follows the version in + * http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf + * Tieleman & Hinton, 2012. + * + * Hinton suggests the momentum term :math:`\gamma` to be 0.9 and the learning rate + * :math:`\eta` to be 0.001. + * + * + * + * Defined in src/operator/optimizer_op.cc:L797 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def rmsprop_update(po: rmsprop_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Applies a logistic function to the input. + * + * The logistic function, also known as the sigmoid function, is computed as + * :math:`\frac{1}{1+exp(-\textbf{x})}`. + * + * Commonly, the sigmoid is used to squash the real-valued output of a linear model + * :math:`wTx+b` into the [0,1] range so that it can be interpreted as a probability. + * It is suitable for binary classification or probability prediction tasks. + * + * .. note:: + * Use the LogisticRegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - LogisticRegressionOutput(default, default) = default + * - LogisticRegressionOutput(default, csr) = default + * + * The loss function used is the Binary Cross Entropy Loss: + * + * :math:`-{(y\log(p) + (1 - y)\log(1 - p))}` + * + * Where `y` is the ground truth probability of positive outcome for a given example, and `p` the probability predicted by the model. By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L152 + * }}} + * + * @param data Input data to the function. + * @param label Input label to the function. + * @param grad_scale Scale the gradient by a float factor + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def LogisticRegressionOutput(data : org.apache.mxnet.javaapi.NDArray, label : org.apache.mxnet.javaapi.NDArray, grad_scale : java.lang.Float, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for Nesterov Accelerated Gradient( NAG) optimizer. + * It updates the weights using the following formula, + * + * .. math:: + * v_t = \gamma v_{t-1} + \eta * \nabla J(W_{t-1} - \gamma v_{t-1})\\ + * W_t = W_{t-1} - v_t + * + * Where + * :math:`\eta` is the learning rate of the optimizer + * :math:`\gamma` is the decay rate of the momentum estimate + * :math:`\v_t` is the update vector at time step `t` + * :math:`\W_t` is the weight vector at time step `t` + * + * + * + * Defined in src/operator/optimizer_op.cc:L726 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def nag_mom_update(po: nag_mom_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes sigmoid of x element-wise. + * + * .. math:: + * y = 1 / (1 + exp(-x)) + * + * The storage type of ``sigmoid`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L119 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def sigmoid(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def mp_sgd_mom_update(po: mp_sgd_mom_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Slices along a given axis. + * Returns an array slice along a given `axis` starting from the `begin` index + * to the `end` index. + * Examples:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=0, begin=1, end=3) = `[ [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice_axis(x, axis=1, begin=0, end=2) = `[ [ 1., 2.], + * [ 5., 6.], + * [ 9., 10.] ] + * slice_axis(x, axis=1, begin=-3, end=-1) = `[ [ 2., 3.], + * [ 6., 7.], + * [ 10., 11.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L571 + * }}} + * + * @param data Source input + * @param axis Axis along which to be sliced, supports negative indexes. + * @param begin The beginning index along the axis to be sliced, supports negative indexes. + * @param end The ending index along the axis to be sliced, supports negative indexes. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def slice_axis(data : org.apache.mxnet.javaapi.NDArray, axis : java.lang.Integer, begin : java.lang.Integer, end : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from a gamma distribution. + * + * Samples are distributed according to a gamma distribution parametrized by *alpha* (shape) and *beta* (scale). + * + * Example:: + * + * gamma(alpha=9, beta=0.5, shape=(2,2)) = `[ [ 7.10486984, 3.37695289], + * [ 3.91697288, 3.65933681] ] + * + * + * Defined in src/operator/random/sample_op.cc:L125 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def random_gamma(po: random_gammaParam) : Array[NDArray] + + /** + * + * {{{ + * + * Maps integer indices to vector representations (embeddings). + * + * This operator maps words to real-valued vectors in a high-dimensional space, + * called word embeddings. These embeddings can capture semantic and syntactic properties of the words. + * For example, it has been noted that in the learned embedding spaces, similar words tend + * to be close to each other and dissimilar words far apart. + * + * For an input array of shape (d1, ..., dK), + * the shape of an output array is (d1, ..., dK, output_dim). + * All the input values should be integers in the range [0, input_dim). + * + * If the input_dim is ip0 and output_dim is op0, then shape of the embedding weight matrix must be + * (ip0, op0). + * + * When "sparse_grad" is False, if any index mentioned is too large, it is replaced by the index that + * addresses the last vector in an embedding matrix. + * When "sparse_grad" is True, an error will be raised if invalid indices are found. + * + * Examples:: + * + * input_dim = 4 + * output_dim = 5 + * + * // Each row in weight matrix y represents a word. So, y = (w0,w1,w2,w3) + * y = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.], + * [ 10., 11., 12., 13., 14.], + * [ 15., 16., 17., 18., 19.] ] + * + * // Input array x represents n-grams(2-gram). So, x = [(w1,w3), (w0,w2)] + * x = `[ [ 1., 3.], + * [ 0., 2.] ] + * + * // Mapped input x to its vector representation y. + * Embedding(x, y, 4, 5) = `[ `[ [ 5., 6., 7., 8., 9.], + * [ 15., 16., 17., 18., 19.] ], + * + * `[ [ 0., 1., 2., 3., 4.], + * [ 10., 11., 12., 13., 14.] ] ] + * + * + * The storage type of weight can be either row_sparse or default. + * + * .. Note:: + * + * If "sparse_grad" is set to True, the storage type of gradient w.r.t weights will be + * "row_sparse". Only a subset of optimizers support sparse gradients, including SGD, AdaGrad + * and Adam. Note that by default lazy updates is turned on, which may perform differently + * from standard updates. For more details, please check the Optimization API at: + * https://mxnet.incubator.apache.org/api/python/optimization/optimization.html + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L539 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def Embedding(po: EmbeddingParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise inverse cosine of the input array. + * + * The input should be in range `[-1, 1]`. + * The output is in the closed interval :math:`[0, \pi]` + * + * .. math:: + * arccos([-1, -.707, 0, .707, 1]) = [\pi, 3\pi/4, \pi/2, \pi/4, 0] + * + * The storage type of ``arccos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L206 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def arccos(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise inverse gauss error function of the input. + * + * Example:: + * + * erfinv([0, 0.5., -1.]) = [0., 0.4769, -inf] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L907 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def erfinv(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def normal(po: normalParam) : Array[NDArray] + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * generalized negative binomial distributions with parameters *mu* (mean) and *alpha* (dispersion). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Samples will always be returned as a floating point data type. + * + * Examples:: + * + * mu = [ 2.0, 2.5 ] + * alpha = [ 1.0, 0.1 ] + * + * // Draw a single sample for each distribution + * sample_generalized_negative_binomial(mu, alpha) = [ 0., 3.] + * + * // Draw a vector containing two samples for each distribution + * sample_generalized_negative_binomial(mu, alpha, shape=(2)) = `[ [ 0., 3.], + * [ 3., 1.] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L290 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sample_generalized_negative_binomial(po: sample_generalized_negative_binomialParam) : Array[NDArray] + + /** + * + * {{{ + * + * Performs Cholesky factorization of a symmetric positive-definite matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, the Cholesky factor *B* of the symmetric, positive definite matrix *A* is + * computed. *B* is triangular (entries of upper or lower triangle are all zero), has + * positive diagonal entries, and: + * + * *A* = *B* \* *B*\ :sup:`T` if *lower* = *true* + * *A* = *B*\ :sup:`T` \* *B* if *lower* = *false* + * + * If *n>2*, *potrf* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix factorization + * A = `[ [4.0, 1.0], [1.0, 4.25] ] + * potrf(A) = `[ [2.0, 0], [0.5, 2.0] ] + * + * Batch matrix factorization + * A = `[ `[ [4.0, 1.0], [1.0, 4.25] ], `[ [16.0, 4.0], [4.0, 17.0] ] ] + * potrf(A) = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L214 + * }}} + * + * @param A Tensor of input matrices to be decomposed + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def linalg_potrf(A : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Layer normalization. + * + * Normalizes the channels of the input tensor by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis and then + * compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * Unlike BatchNorm and InstanceNorm, the *mean* and *var* are computed along the channel dimension. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * ``data_std``. Note that no gradient will be passed through these two outputs. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is -1, which sets the channel + * axis to be the last item in the input shape. + * + * + * + * Defined in src/operator/nn/layer_norm.cc:L156 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def LayerNorm(po: LayerNormParam) : Array[NDArray] + + /** + * + * {{{ + * + * Applies dropout operation to input array. + * + * - During training, each element of the input is set to zero with probability p. + * The whole array is rescaled by :math:`1/(1-p)` to keep the expected + * sum of the input unchanged. + * + * - During testing, this operator does not change the input if mode is 'training'. + * If mode is 'always', the same computaion as during training will be applied. + * + * Example:: + * + * random.seed(998) + * input_array = array(`[ [3., 0.5, -0.5, 2., 7.], + * [2., -0.4, 7., 3., 0.2] ]) + * a = symbol.Variable('a') + * dropout = symbol.Dropout(a, p = 0.2) + * executor = dropout.simple_bind(a = input_array.shape) + * + * ## If training + * executor.forward(is_train = True, a = input_array) + * executor.outputs + * `[ [ 3.75 0.625 -0. 2.5 8.75 ] + * [ 2.5 -0.5 8.75 3.75 0. ] ] + * + * ## If testing + * executor.forward(is_train = False, a = input_array) + * executor.outputs + * `[ [ 3. 0.5 -0.5 2. 7. ] + * [ 2. -0.4 7. 3. 0.2 ] ] + * + * + * Defined in src/operator/nn/dropout.cc:L96 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def Dropout(po: DropoutParam) : Array[NDArray] + + /** + * + * {{{ + * + * Remove single-dimensional entries from the shape of an array. + * Same behavior of defining the output tensor shape as numpy.squeeze for the most of cases. + * See the following note for exception. + * Examples:: + * data = `[ `[ [0], [1], [2] ] ] + * squeeze(data) = [0, 1, 2] + * squeeze(data, axis=0) = `[ [0], [1], [2] ] + * squeeze(data, axis=2) = `[ [0, 1, 2] ] + * squeeze(data, axis=(0, 2)) = [0, 1, 2] + * .. Note:: + * The output of this operator will keep at least one dimension not removed. For example, + * squeeze(`[ `[ [4] ] ]) = [4], while in numpy.squeeze, the output will become a scalar. + * }}} + * + * @param data data to squeeze + * @param axis Selects a subset of the single-dimensional entries in the shape. If an axis is selected with shape entry greater than one, an error is raised. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def squeeze(data : org.apache.mxnet.javaapi.NDArray, axis : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Updater function for multi-precision sgd optimizer + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def mp_sgd_update(po: mp_sgd_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic tangent of the input array, \ + * computed element-wise. + * + * The storage type of ``arctanh`` output depends upon the input storage type: + * + * - arctanh(default) = default + * - arctanh(row_sparse) = row_sparse + * - arctanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L515 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def arctanh(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from a uniform distribution. + * + * .. note:: The existing alias ``uniform`` is deprecated. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * uniform(low=0, high=1, shape=(2,2)) = `[ [ 0.60276335, 0.85794562], + * [ 0.54488319, 0.84725171] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L96 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def random_uniform(po: random_uniformParam) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SGD) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * However, if gradient is of ``row_sparse`` storage type and ``lazy_update`` is True, + * only the row slices whose indices appear in grad.indices are updated:: + * + * for row in gradient.indices: + * weight[row] = weight[row] - learning_rate * (gradient[row] + wd * weight[row]) + * + * + * + * Defined in src/operator/optimizer_op.cc:L524 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sgd_update(po: sgd_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Scatters data into a new tensor according to indices. + * + * Given `data` with shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(X_0, X_1, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, data shape should simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] = data[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] + * + * all other entries in output are 0. + * + * .. warning:: + * + * If the indices have duplicates, the result will be non-deterministic and + * the gradient of `scatter_nd` will not be correct!! + * + * + * Examples:: + * + * data = [2, 3, 0] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * shape = (2, 2) + * scatter_nd(data, indices, shape) = `[ [0, 0], [2, 3] ] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 1] ] + * shape = (2, 2, 2, 2) + * scatter_nd(data, indices, shape) = `[ [`[ [0, 0], + * [0, 0] ], + * + * `[ [1, 2], + * [3, 4] ] ], + * + * `[ `[ [0, 0], + * [0, 0] ], + * + * `[ [5, 6], + * [7, 8] ] ] ] + * }}} + * + * @param data data + * @param indices indices + * @param shape Shape of output. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def scatter_nd(data : org.apache.mxnet.javaapi.NDArray, indices : org.apache.mxnet.javaapi.NDArray, shape : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Performs general matrix multiplication and accumulation. + * Input are tensors *A*, *B*, *C*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + *beta* \* *C* + * + * Here, *alpha* and *beta* are scalar parameters, and *op()* is either the identity or + * matrix transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B*, *C* be 5 dimensional tensors. Then gemm(*A*, *B*, *C*, axis=1) is equivalent + * to the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = swapaxes(C, dim1=1, dim2=3) + * C = gemm(A1, B1, C) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply-add + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * C = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0, beta=10.0) + * = `[ [14.0, 14.0, 14.0], [14.0, 14.0, 14.0] ] + * + * Batch matrix multiply-add + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * C = `[ `[ [10.0] ], `[ [0.01] ] ] + * gemm(A, B, C, transpose_b=True, alpha=2.0 , beta=10.0) + * = `[ `[ [104.0] ], `[ [0.14] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L89 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def linalg_gemm(po: linalg_gemmParam) : Array[NDArray] + + /** + * + * {{{ + * + * Solves matrix equation involving a lower triangular matrix. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, *A* must be triangular. The operator performs the BLAS3 function + * *trsm*, solving for *out* in: + * + * *op*\ (*A*) \* *out* = *alpha* \* *B* + * + * if *rightside=False*, or + * + * *out* \* *op*\ (*A*) = *alpha* \* *B* + * + * if *rightside=True*. Here, *alpha* is a scalar parameter, and *op()* is either the + * identity or the matrix transposition (depending on *transpose*). + * + * If *n>2*, *trsm* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix solve + * A = `[ [1.0, 0], [1.0, 1.0] ] + * B = `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ] + * trsm(A, B, alpha=0.5) = `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ] + * + * Batch matrix solve + * A = `[ `[ [1.0, 0], [1.0, 1.0] ], `[ [1.0, 0], [1.0, 1.0] ] ] + * B = `[ `[ [2.0, 2.0, 2.0], [4.0, 4.0, 4.0] ], + * `[ [4.0, 4.0, 4.0], [8.0, 8.0, 8.0] ] ] + * trsm(A, B, alpha=0.5) = `[ `[ [1.0, 1.0, 1.0], [1.0, 1.0, 1.0] ], + * `[ [2.0, 2.0, 2.0], [2.0, 2.0, 2.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L396 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def linalg_trsm(po: linalg_trsmParam) : Array[NDArray] + + /** + * + * {{{ + * + * This operator is DEPRECATED. Apply convolution to input then add a bias. + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def Convolution_v1(po: Convolution_v1Param) : Array[NDArray] + + /** + * + * {{{ + * + * Extracts a diagonal or constructs a diagonal array. + * + * ``diag``'s behavior depends on the input array dimensions: + * + * - 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero. + * - N-D arrays: extracts the diagonals of the sub-arrays with axes specified by ``axis1`` and ``axis2``. + * The output shape would be decided by removing the axes numbered ``axis1`` and ``axis2`` from the + * input shape and appending to the result a new axis with the size of the diagonals in question. + * + * For example, when the input shape is `(2, 3, 4, 5)`, ``axis1`` and ``axis2`` are 0 and 2 + * respectively and ``k`` is 0, the resulting shape would be `(3, 5, 2)`. + * + * Examples:: + * + * x = `[ [1, 2, 3], + * [4, 5, 6] ] + * + * diag(x) = [1, 5] + * + * diag(x, k=1) = [2, 6] + * + * diag(x, k=-1) = [4] + * + * x = [1, 2, 3] + * + * diag(x) = `[ [1, 0, 0], + * [0, 2, 0], + * [0, 0, 3] ] + * + * diag(x, k=1) = `[ [0, 1, 0], + * [0, 0, 2], + * [0, 0, 0] ] + * + * diag(x, k=-1) = `[ [0, 0, 0], + * [1, 0, 0], + * [0, 2, 0] ] + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * + * `[ [5, 6], + * [7, 8] ] ] + * + * diag(x) = `[ [1, 7], + * [2, 8] ] + * + * diag(x, k=1) = `[ [3], + * [4] ] + * + * diag(x, axis1=-2, axis2=-1) = `[ [1, 4], + * [5, 8] ] + * + * + * + * Defined in src/operator/tensor/diag_op.cc:L87 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def diag(po: diagParam) : Array[NDArray] + + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def SwapAxis(po: SwapAxisParam) : Array[NDArray] + + /** + * + * {{{ + * + * Extracts a triangular sub-matrix from a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix from which a triangular sub-matrix is extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted triangular sub-matrices are returned as an *n-1*-dimensional tensor. + * + * The *offset* and *lower* parameters determine the triangle to be extracted: + * + * - When *offset = 0* either the lower or upper triangle with respect to the main diagonal is extracted depending on the value of parameter *lower*. + * - When *offset = k > 0* the upper triangle with respect to the k-th diagonal above the main diagonal is extracted. + * - When *offset = k < 0* the lower triangle with respect to the k-th diagonal below the main diagonal is extracted. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single triagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extracttrian(A) = [1.0, 3.0, 4.0] + * extracttrian(A, lower=False) = [1.0, 2.0, 4.0] + * extracttrian(A, 1) = [2.0] + * extracttrian(A, -1) = [3.0] + * + * Batch triagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extracttrian(A) = `[ [1.0, 3.0, 4.0], + * [5.0, 7.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L605 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def linalg_extracttrian(po: linalg_extracttrianParam) : Array[NDArray] + + /** + * + * {{{ + * + * Batchwise dot product. + * + * ``batch_dot`` is used to compute dot product of ``x`` and ``y`` when ``x`` and + * ``y`` are data in batch, namely N-D (N >= 3) arrays in shape of `(B0, ..., B_i, :, :)`. + * + * For example, given ``x`` with shape `(B_0, ..., B_i, N, M)` and ``y`` with shape + * `(B_0, ..., B_i, M, K)`, the result array will have shape `(B_0, ..., B_i, N, K)`, + * which is computed by:: + * + * batch_dot(x,y)[b_0, ..., b_i, :, :] = dot(x[b_0, ..., b_i, :, :], y[b_0, ..., b_i, :, :]) + * + * + * + * Defined in src/operator/tensor/dot.cc:L127 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def batch_dot(po: batch_dotParam) : Array[NDArray] + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * mu = [ 0.0, 2.5 ] + * sigma = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_normal(mu, sigma) = [-0.56410581, 0.95934606] + * + * // Draw a vector containing two samples for each distribution + * sample_normal(mu, sigma, shape=(2)) = `[ [-0.56410581, 0.2928229 ], + * [ 0.95934606, 4.48287058] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L278 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sample_normal(po: sample_normalParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise inverse square-root value of the input. + * + * .. math:: + * rsqrt(x) = 1/\sqrt{x} + * + * Example:: + * + * rsqrt([4,9,16]) = [0.5, 0.33333334, 0.25] + * + * The storage type of ``rsqrt`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L193 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def rsqrt(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Applies Leaky rectified linear unit activation element-wise to the input. + * + * Leaky ReLUs attempt to fix the "dying ReLU" problem by allowing a small `slope` + * when the input is negative and has a slope of one when input is positive. + * + * The following modified ReLU Activation functions are supported: + * + * - *elu*: Exponential Linear Unit. `y = x > 0 ? x : slope * (exp(x)-1)` + * - *selu*: Scaled Exponential Linear Unit. `y = lambda * (x > 0 ? x : alpha * (exp(x) - 1))` where + * *lambda = 1.0507009873554804934193349852946* and *alpha = 1.6732632423543772848170429916717*. + * - *leaky*: Leaky ReLU. `y = x > 0 ? x : slope * x` + * - *prelu*: Parametric ReLU. This is same as *leaky* except that `slope` is learnt during training. + * - *rrelu*: Randomized ReLU. same as *leaky* but the `slope` is uniformly and randomly chosen from + * *[lower_bound, upper_bound)* for training, while fixed to be + * *(lower_bound+upper_bound)/2* for inference. + * + * + * + * Defined in src/operator/leaky_relu.cc:L161 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def LeakyReLU(po: LeakyReLUParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise Base-10 logarithmic value of the input. + * + * ``10**log10(x) = x`` + * + * The storage type of ``log10`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L93 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def log10(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L200 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def preloaded_multi_mp_sgd_mom_update(po: preloaded_multi_mp_sgd_mom_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest \ + * integer towards zero of the input. + * + * Example:: + * + * fix([-2.1, -1.9, 1.9, 2.1]) = [-2., -1., 1., 2.] + * + * The storage type of ``fix`` output depends upon the input storage type: + * + * - fix(default) = default + * - fix(row_sparse) = row_sparse + * - fix(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L875 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def fix(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of element-wise **greater than** (>) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L82 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_greater(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for multi-precision Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L417 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def multi_mp_sgd_update(po: multi_mp_sgd_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Joins input arrays along a given axis. + * + * .. note:: `Concat` is deprecated. Use `concat` instead. + * + * The dimensions of the input arrays should be the same except the axis along + * which they will be concatenated. + * The dimension of the output array along the concatenated axis will be equal + * to the sum of the corresponding dimensions of the input arrays. + * + * The storage type of ``concat`` output depends on storage types of inputs + * + * - concat(csr, csr, ..., csr, dim=0) = csr + * - otherwise, ``concat`` generates output with default storage + * + * Example:: + * + * x = `[ [1,1],[2,2] ] + * y = `[ [3,3],[4,4],[5,5] ] + * z = `[ [6,6], [7,7],[8,8] ] + * + * concat(x,y,z,dim=0) = `[ [ 1., 1.], + * [ 2., 2.], + * [ 3., 3.], + * [ 4., 4.], + * [ 5., 5.], + * [ 6., 6.], + * [ 7., 7.], + * [ 8., 8.] ] + * + * Note that you cannot concat x,y,z along dimension 1 since dimension + * 0 is not the same for all the input arrays. + * + * concat(y,z,dim=1) = `[ [ 3., 3., 6., 6.], + * [ 4., 4., 7., 7.], + * [ 5., 5., 8., 8.] ] + * + * + * + * Defined in src/operator/nn/concat.cc:L383 + * }}} + * + * @param data List of arrays to concatenate + * @param num_args Number of inputs to be concated. + * @param dim the dimension to be concated. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def concat(data : Array[org.apache.mxnet.javaapi.NDArray], num_args : java.lang.Integer, dim : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Gather elements or slices from `data` and store to a tensor whose + * shape is defined by `indices`. + * + * Given `data` with shape `(X_0, X_1, ..., X_{N-1})` and indices with shape + * `(M, Y_0, ..., Y_{K-1})`, the output will have shape `(Y_0, ..., Y_{K-1}, X_M, ..., X_{N-1})`, + * where `M <= N`. If `M == N`, output shape will simply be `(Y_0, ..., Y_{K-1})`. + * + * The elements in output is defined as follows:: + * + * output[y_0, ..., y_{K-1}, x_M, ..., x_{N-1}] = data[indices[0, y_0, ..., y_{K-1}], + * ..., + * indices[M-1, y_0, ..., y_{K-1}], + * x_M, ..., x_{N-1}] + * + * Examples:: + * + * data = `[ [0, 1], [2, 3] ] + * indices = `[ [1, 1, 0], [0, 1, 0] ] + * gather_nd(data, indices) = [2, 3, 0] + * + * data = `[ `[ [1, 2], [3, 4] ], `[ [5, 6], [7, 8] ] ] + * indices = `[ [0, 1], [1, 0] ] + * gather_nd(data, indices) = `[ [3, 4], [5, 6] ] + * }}} + * + * @param data data + * @param indices indices + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def gather_nd(data : org.apache.mxnet.javaapi.NDArray, indices : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * normal distributions with parameters *mu* (mean) and *sigma* (standard deviation). + * + * *mu* and *sigma* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *mu* and *sigma*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *mu* and *sigma* at index *i*. + * + * Examples:: + * + * sample = `[ [-2, -1, 0, 1, 2] ] + * random_pdf_normal(sample=sample, mu=[0], sigma=[1]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097] ] + * + * random_pdf_normal(sample=sample*2, mu=[0,0], sigma=[1,2]) = + * `[ [0.05399097, 0.24197073, 0.3989423, 0.24197073, 0.05399097], + * [0.12098537, 0.17603266, 0.19947115, 0.17603266, 0.12098537] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L299 + * }}} + * + * @param sample Samples from the distributions. + * @param mu Means of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param sigma Standard deviations of the distributions. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def random_pdf_normal(sample : org.apache.mxnet.javaapi.NDArray, mu : org.apache.mxnet.javaapi.NDArray, is_log : java.lang.Boolean, sigma : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Numerical negative of the argument, element-wise. + * + * The storage type of ``negative`` output depends upon the input storage type: + * + * - negative(default) = default + * - negative(row_sparse) = row_sparse + * - negative(csr) = csr + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def negative(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the mean of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L84 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def mean(po: meanParam) : Array[NDArray] + + /** + * + * {{{ + * + * Rearranges(permutes) data from depth into blocks of spatial data. + * Similar to ONNX DepthToSpace operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#DepthToSpace. + * The output is a new tensor where the values from depth dimension are moved in spatial blocks + * to height and width dimension. The reverse of this operation is ``space_to_depth``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, block\_size, block\_size, C / (block\_size ^ 2), H * block\_size, W * block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 4, 1, 5, 2]) \\ + * y = reshape(x \prime \prime, [N, C / (block\_size ^ 2), H * block\_size, W * block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C / (block\_size ^ 2), H * block\_size, W * block\_size]` + * Example:: + * x = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * depth_to_space(x, 2) = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L972 + * }}} + * + * @param data Input ndarray + * @param block_size Blocks of [block_size. block_size] are moved + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def depth_to_space(data : org.apache.mxnet.javaapi.NDArray, block_size : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise inverse sine of the input array. + * + * The input should be in the range `[-1, 1]`. + * The output is in the closed interval of [:math:`-\pi/2`, :math:`\pi/2`]. + * + * .. math:: + * arcsin([-1, -.707, 0, .707, 1]) = [-\pi/2, -\pi/4, 0, \pi/4, \pi/2] + * + * The storage type of ``arcsin`` output depends upon the input storage type: + * + * - arcsin(default) = default + * - arcsin(row_sparse) = row_sparse + * - arcsin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L187 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def arcsin(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Constructs a square matrix with the input representing a specific triangular sub-matrix. + * This is basically the inverse of *linalg.extracttrian*. Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the entries of a triangular matrix which is lower triangular if *offset<0* or *offset=0*, *lower=true*. The resulting matrix is derived by first constructing the square + * matrix with the entries outside the triangle set to zero and then adding *offset*-times an additional + * diagonal with zero entries to the square matrix. + * + * If *n>1*, then *A* represents a batch of triangular sub-matrices. The batch of corresponding square matrices is returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix construction + * A = [1.0, 2.0, 3.0] + * + * maketrian(A) = `[ [1.0, 0.0], + * [2.0, 3.0] ] + * + * maketrian(A, lower=false) = `[ [1.0, 2.0], + * [0.0, 3.0] ] + * + * maketrian(A, offset=1) = `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ] + * maketrian(A, offset=-1) = `[ [0.0, 0.0, 0.0], + * [1.0, 0.0, 0.0], + * [2.0, 3.0, 0.0] ] + * + * Batch matrix construction + * A = `[ [1.0, 2.0, 3.0], + * [4.0, 5.0, 6.0] ] + * + * maketrian(A) = `[ `[ [1.0, 0.0], + * [2.0, 3.0] ], + * `[ [4.0, 0.0], + * [5.0, 6.0] ] ] + * + * maketrian(A, offset=1) = `[ `[ [0.0, 1.0, 2.0], + * [0.0, 0.0, 3.0], + * [0.0, 0.0, 0.0] ], + * `[ [0.0, 4.0, 5.0], + * [0.0, 0.0, 6.0], + * [0.0, 0.0, 0.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L673 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def linalg_maketrian(po: linalg_maketrianParam) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for multi-precision Nesterov Accelerated Gradient( NAG) optimizer. + * + * + * Defined in src/operator/optimizer_op.cc:L745 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def mp_nag_mom_update(po: mp_nag_mom_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the min of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L47 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def min(po: minParam) : Array[NDArray] + + /** + * + * {{{ + * + * Compute *N*-D convolution on *(N+2)*-D input. + * + * In the 2-D convolution, given input data with shape *(batch_size, + * channel, height, width)*, the output is computed by + * + * .. math:: + * + * out[n,i,:,:] = bias[i] + \sum_{j=0}^{channel} data[n,j,:,:] \star + * weight[i,j,:,:] + * + * where :math:`\star` is the 2-D cross-correlation operator. + * + * For general 2-D convolution, the shapes are + * + * - **data**: *(batch_size, channel, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_height, out_width)*. + * + * Define:: + * + * f(x,k,p,s,d) = floor((x+2*p-d*(k-1)-1)/s)+1 + * + * then we have:: + * + * out_height=f(height, kernel[0], pad[0], stride[0], dilate[0]) + * out_width=f(width, kernel[1], pad[1], stride[1], dilate[1]) + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * The default data ``layout`` is *NCHW*, namely *(batch_size, channel, height, + * width)*. We can choose other layouts such as *NWC*. + * + * If ``num_group`` is larger than 1, denoted by *g*, then split the input ``data`` + * evenly into *g* parts along the channel axis, and also evenly split ``weight`` + * along the first dimension. Next compute the convolution on the *i*-th part of + * the data with the *i*-th weight part. The output is obtained by concatenating all + * the *g* results. + * + * 1-D convolution does not have *height* dimension but only *width* in space. + * + * - **data**: *(batch_size, channel, width)* + * - **weight**: *(num_filter, channel, kernel[0])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_width)*. + * + * 3-D convolution adds an additional *depth* dimension besides *height* and + * *width*. The shapes are + * + * - **data**: *(batch_size, channel, depth, height, width)* + * - **weight**: *(num_filter, channel, kernel[0], kernel[1], kernel[2])* + * - **bias**: *(num_filter,)* + * - **out**: *(batch_size, num_filter, out_depth, out_height, out_width)*. + * + * Both ``weight`` and ``bias`` are learnable parameters. + * + * There are other options to tune the performance. + * + * - **cudnn_tune**: enable this option leads to higher startup time but may give + * faster speed. Options are + * + * - **off**: no tuning + * - **limited_workspace**:run test and pick the fastest algorithm that doesn't + * exceed workspace limit. + * - **fastest**: pick the fastest algorithm and ignore workspace limit. + * - **None** (default): the behavior is determined by environment variable + * ``MXNET_CUDNN_AUTOTUNE_DEFAULT``. 0 for off, 1 for limited workspace + * (default), 2 for fastest. + * + * - **workspace**: A large number leads to more (GPU) memory usage but may improve + * the performance. + * + * + * + * Defined in src/operator/nn/convolution.cc:L473 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def Convolution(po: ConvolutionParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the Khatri-Rao product of the input matrices. + * + * Given a collection of :math:`n` input matrices, + * + * .. math:: + * A_1 \in \mathbb{R}^{M_1 \times M}, \ldots, A_n \in \mathbb{R}^{M_n \times N}, + * + * the (column-wise) Khatri-Rao product is defined as the matrix, + * + * .. math:: + * X = A_1 \otimes \cdots \otimes A_n \in \mathbb{R}^{(M_1 \cdots M_n) \times N}, + * + * where the :math:`k` th column is equal to the column-wise outer product + * :math:`{A_1}_k \otimes \cdots \otimes {A_n}_k` where :math:`{A_i}_k` is the kth + * column of the ith matrix. + * + * Example:: + * + * >>> A = mx.nd.array(`[ [1, -1], + * >>> [2, -3] ]) + * >>> B = mx.nd.array(`[ [1, 4], + * >>> [2, 5], + * >>> [3, 6] ]) + * >>> C = mx.nd.khatri_rao(A, B) + * >>> print(C.asnumpy()) + * `[ [ 1. -4.] + * [ 2. -5.] + * [ 3. -6.] + * [ 2. -12.] + * [ 4. -15.] + * [ 6. -18.] ] + * + * + * + * Defined in src/operator/contrib/krprod.cc:L108 + * }}} + * + * @param args Positional input matrices + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def khatri_rao(args : Array[org.apache.mxnet.javaapi.NDArray], out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from a Poisson distribution. + * + * Samples are distributed according to a Poisson distribution parametrized by *lambda* (rate). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * poisson(lam=4, shape=(2,2)) = `[ [ 5., 2.], + * [ 4., 6.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L150 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def random_poisson(po: random_poissonParam) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for Adam optimizer. Adam is seen as a generalization + * of AdaGrad. + * + * Adam update consists of the following steps, where g represents gradient and m, v + * are 1st and 2nd order moment estimates (mean and variance). + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\ + * v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ + * W_t = W_{t-1} - \alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon } + * + * It updates the weights using:: + * + * m = beta1*m + (1-beta1)*grad + * v = beta2*v + (1-beta2)*(grad**2) + * w += - learning_rate * m / (sqrt(v) + epsilon) + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and the storage + * type of weight is the same as those of m and v, + * only the row slices whose indices appear in grad.indices are updated (for w, m and v):: + * + * for row in grad.indices: + * m[row] = beta1*m[row] + (1-beta1)*grad[row] + * v[row] = beta2*v[row] + (1-beta2)*(grad[row]**2) + * w[row] += - learning_rate * m[row] / (sqrt(v[row]) + epsilon) + * + * + * + * Defined in src/operator/optimizer_op.cc:L688 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def adam_update(po: adam_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of element-wise **greater than or equal to** (>=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_greater_equal(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L100 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_greater_equal(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sum_axis(po: sum_axisParam) : Array[NDArray] + + /** + * + * {{{ + * + * Calculate cross entropy of softmax output and one-hot label. + * + * - This operator computes the cross entropy in two steps: + * - Applies softmax function on the input array. + * - Computes and returns the cross entropy loss between the softmax output and the labels. + * + * - The softmax function and cross entropy loss is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * Example:: + * + * x = `[ [1, 2, 3], + * [11, 7, 5] ] + * + * label = [2, 0] + * + * softmax(x) = `[ [0.09003057, 0.24472848, 0.66524094], + * [0.97962922, 0.01794253, 0.00242826] ] + * + * softmax_cross_entropy(data, label) = - log(0.66524084) - log(0.97962922) = 0.4281871 + * + * + * + * Defined in src/operator/loss_binary_op.cc:L59 + * }}} + * + * @param data Input data + * @param label Input label + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def softmax_cross_entropy(data : org.apache.mxnet.javaapi.NDArray, label : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Casts tensor storage type to the new type. + * + * When an NDArray with default storage type is cast to csr or row_sparse storage, + * the result is compact, which means: + * + * - for csr, zero values will not be retained + * - for row_sparse, row slices of all zeros will not be retained + * + * The storage type of ``cast_storage`` output depends on stype parameter: + * + * - cast_storage(csr, 'default') = default + * - cast_storage(row_sparse, 'default') = default + * - cast_storage(default, 'csr') = csr + * - cast_storage(default, 'row_sparse') = row_sparse + * - cast_storage(csr, 'csr') = csr + * - cast_storage(row_sparse, 'row_sparse') = row_sparse + * + * Example:: + * + * dense = `[ [ 0., 1., 0.], + * [ 2., 0., 3.], + * [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * # cast to row_sparse storage type + * rsp = cast_storage(dense, 'row_sparse') + * rsp.indices = [0, 1] + * rsp.values = `[ [ 0., 1., 0.], + * [ 2., 0., 3.] ] + * + * # cast to csr storage type + * csr = cast_storage(dense, 'csr') + * csr.indices = [1, 0, 2] + * csr.values = [ 1., 2., 3.] + * csr.indptr = [0, 1, 3, 3, 3] + * + * + * + * Defined in src/operator/tensor/cast_storage.cc:L71 + * }}} + * + * @param data The input. + * @param stype Output storage type. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def cast_storage(data : org.apache.mxnet.javaapi.NDArray, stype : String, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Converts each element of the input array from degrees to radians. + * + * .. math:: + * radians([0, 90, 180, 270, 360]) = [0, \pi/2, \pi, 3\pi/2, 2\pi] + * + * The storage type of ``radians`` output depends upon the input storage type: + * + * - radians(default) = default + * - radians(row_sparse) = row_sparse + * - radians(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L293 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def radians(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def BlockGrad(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Group normalization. + * + * The input channels are separated into ``num_groups`` groups, each containing ``num_channels / num_groups`` channels. + * The mean and standard-deviation are calculated separately over the each group. + * + * .. math:: + * + * data = data.reshape((N, num_groups, C // num_groups, ...)) + * out = \frac{data - mean(data, axis)}{\sqrt{var(data, axis) + \epsilon}} * gamma + beta + * + * Both ``gamma`` and ``beta`` are learnable parameters. + * + * + * + * Defined in src/operator/nn/group_norm.cc:L77 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def GroupNorm(po: GroupNormParam) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for Ftrl optimizer. + * Referenced from *Ad Click Prediction: a View from the Trenches*, available at + * http://dl.acm.org/citation.cfm?id=2488200. + * + * It updates the weights using:: + * + * rescaled_grad = clip(grad * rescale_grad, clip_gradient) + * z += rescaled_grad - (sqrt(n + rescaled_grad**2) - sqrt(n)) * weight / learning_rate + * n += rescaled_grad**2 + * w = (sign(z) * lamda1 - z) / ((beta + sqrt(n)) / learning_rate + wd) * (abs(z) > lamda1) + * + * If w, z and n are all of ``row_sparse`` storage type, + * only the row slices whose indices appear in grad.indices are updated (for w, z and n):: + * + * for row in grad.indices: + * rescaled_grad[row] = clip(grad[row] * rescale_grad, clip_gradient) + * z[row] += rescaled_grad[row] - (sqrt(n[row] + rescaled_grad[row]**2) - sqrt(n[row])) * weight[row] / learning_rate + * n[row] += rescaled_grad[row]**2 + * w[row] = (sign(z[row]) * lamda1 - z[row]) / ((beta + sqrt(n[row])) / learning_rate + wd) * (abs(z[row]) > lamda1) + * + * + * + * Defined in src/operator/optimizer_op.cc:L876 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def ftrl_update(po: ftrl_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of element-wise **logical xor** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 0.], + * [ 1., 1., 0.] ] + * + * y = `[ [ 1.], + * [ 0.] ] + * + * broadcast_logical_xor(x, y) = `[ [ 0., 0., 1.], + * [ 1., 1., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L190 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_logical_xor(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Compute the sign and log of the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *sign* = *sign(det(A))* + * *logabsdet* = *log(abs(det(A)))* + * + * If *n>2*, *slogdet* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: The gradient is not properly defined on sign, so the gradient of + * it is not backwarded. + * .. note:: No gradient is backwarded when A is non-invertible. Please see + * the docs of operator det for detail. + * + * Examples:: + * + * Single matrix signed log determinant + * A = `[ [2., 3.], [1., 4.] ] + * sign, logabsdet = slogdet(A) + * sign = [1.] + * logabsdet = [1.609438] + * + * Batch matrix signed log determinant + * A = `[ `[ [2., 3.], [1., 4.] ], + * `[ [1., 2.], [2., 4.] ], + * `[ [1., 2.], [4., 3.] ] ] + * sign, logabsdet = slogdet(A) + * sign = [1., 0., -1.] + * logabsdet = [1.609438, -inf, 1.609438] + * + * + * Defined in src/operator/tensor/la_op.cc:L1031 + * }}} + * + * @param A Tensor of square matrix + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def linalg_slogdet(A : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the element-wise cosine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * cos([0, \pi/4, \pi/2]) = [1, 0.707, 0] + * + * The storage type of ``cos`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L90 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def cos(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Performs matrix inversion from a Cholesky factorization. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a triangular matrix (entries of upper or lower triangle are all zero) + * with positive diagonal. We compute: + * + * *out* = *A*\ :sup:`-T` \* *A*\ :sup:`-1` if *lower* = *true* + * *out* = *A*\ :sup:`-1` \* *A*\ :sup:`-T` if *lower* = *false* + * + * In other words, if *A* is the Cholesky factor of a symmetric positive definite matrix + * *B* (obtained by *potrf*), then + * + * *out* = *B*\ :sup:`-1` + * + * If *n>2*, *potri* is performed separately on the trailing two dimensions for all inputs + * (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * .. note:: Use this operator only if you are certain you need the inverse of *B*, and + * cannot use the Cholesky factor *A* (*potrf*), together with backsubstitution + * (*trsm*). The latter is numerically much safer, and also cheaper. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [2.0, 0], [0.5, 2.0] ] + * potri(A) = `[ [0.26563, -0.0625], [-0.0625, 0.25] ] + * + * Batch matrix inverse + * A = `[ `[ [2.0, 0], [0.5, 2.0] ], `[ [4.0, 0], [1.0, 4.0] ] ] + * potri(A) = `[ `[ [0.26563, -0.0625], [-0.0625, 0.25] ], + * `[ [0.06641, -0.01562], [-0.01562, 0,0625] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L275 + * }}} + * + * @param A Tensor of lower triangular matrices + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def linalg_potri(A : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Performs pooling on the input. + * + * The shapes for 1-D pooling are + * + * - **data** and **out**: *(batch_size, channel, width)* (NCW layout) or + * *(batch_size, width, channel)* (NWC layout), + * + * The shapes for 2-D pooling are + * + * - **data** and **out**: *(batch_size, channel, height, width)* (NCHW layout) or + * *(batch_size, height, width, channel)* (NHWC layout), + * + * out_height = f(height, kernel[0], pad[0], stride[0]) + * out_width = f(width, kernel[1], pad[1], stride[1]) + * + * The definition of *f* depends on ``pooling_convention``, which has two options: + * + * - **valid** (default):: + * + * f(x, k, p, s) = floor((x+2*p-k)/s)+1 + * + * - **full**, which is compatible with Caffe:: + * + * f(x, k, p, s) = ceil((x+2*p-k)/s)+1 + * + * When ``global_pool`` is set to be true, then global pooling is performed. It will reset + * ``kernel=(height, width)`` and set the appropiate padding to 0. + * + * Three pooling options are supported by ``pool_type``: + * + * - **avg**: average pooling + * - **max**: max pooling + * - **sum**: sum pooling + * - **lp**: Lp pooling + * + * For 3-D pooling, an additional *depth* dimension is added before + * *height*. Namely the input data and output will have shape *(batch_size, channel, depth, + * height, width)* (NCDHW layout) or *(batch_size, depth, height, width, channel)* (NDHWC layout). + * + * Notes on Lp pooling: + * + * Lp pooling was first introduced by this paper: https://arxiv.org/pdf/1204.3968.pdf. + * L-1 pooling is simply sum pooling, while L-inf pooling is simply max pooling. + * We can see that Lp pooling stands between those two, in practice the most common value for p is 2. + * + * For each window ``X``, the mathematical expression for Lp pooling is: + * + * :math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}` + * + * + * + * Defined in src/operator/nn/pooling.cc:L417 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def Pooling(po: PoolingParam) : Array[NDArray] + + /** + * + * {{{ + * + * Applies the softmin function. + * + * The resulting array contains elements in the range (0,1) and the elements along the given axis sum + * up to 1. + * + * .. math:: + * softmin(\mathbf{z/t})_j = \frac{e^{-z_j/t}}{\sum_{k=1}^K e^{-z_k/t}} + * + * for :math:`j = 1, ..., K` + * + * t is the temperature parameter in softmax function. By default, t equals 1.0 + * + * Example:: + * + * x = `[ [ 1. 2. 3.] + * [ 3. 2. 1.] ] + * + * softmin(x,axis=0) = `[ [ 0.88079703, 0.5, 0.11920292], + * [ 0.11920292, 0.5, 0.88079703] ] + * + * softmin(x,axis=1) = `[ [ 0.66524094, 0.24472848, 0.09003057], + * [ 0.09003057, 0.24472848, 0.66524094] ] + * + * + * + * Defined in src/operator/nn/softmin.cc:L57 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def softmin(po: softminParam) : Array[NDArray] + + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @param args Positional input arguments + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def ElementWiseSum(args : Array[org.apache.mxnet.javaapi.NDArray], out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Compute the determinant of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *det(A)* + * + * If *n>2*, *det* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * .. note:: There is no gradient backwarded when A is non-invertible (which is + * equivalent to det(A) = 0) because zero is rarely hit upon in float + * point computation and the Jacobi's formula on determinant gradient + * is not computationally efficient when A is non-invertible. + * + * Examples:: + * + * Single matrix determinant + * A = `[ [1., 4.], [2., 3.] ] + * det(A) = [-5.] + * + * Batch matrix determinant + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [2., 3.], [1., 4.] ] ] + * det(A) = [-5., 5.] + * + * + * Defined in src/operator/tensor/la_op.cc:L973 + * }}} + * + * @param A Tensor of square matrix + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def linalg_det(A : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns argmax indices of each channel from the input array. + * + * The result will be an NDArray of shape (num_channel,). + * + * In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * argmax_channel(x) = [ 2., 2.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L97 + * }}} + * + * @param data The input array + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def argmax_channel(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Check if all the float numbers in all the arrays are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L133 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def multi_all_finite(po: multi_all_finiteParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise exponential value of the input. + * + * .. math:: + * exp(x) = e^x \approx 2.718^x + * + * Example:: + * + * exp([0, 1, 2]) = [1., 2.71828175, 7.38905621] + * + * The storage type of ``exp`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L63 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def exp(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Generates 2D sampling grid for bilinear sampling. + * }}} + * + * @param data Input data to the function. + * @param transform_type The type of transformation. For `affine`, input data should be an affine matrix of size (batch, 6). For `warp`, input data should be an optical flow of size (batch, 2, h, w). + * @param target_shape Specifies the output shape (H, W). This is required if transformation type is `affine`. If transformation type is `warp`, this parameter is ignored. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def GridGenerator(data : org.apache.mxnet.javaapi.NDArray, transform_type : String, target_shape : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of element-wise **logical and** with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_logical_and(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L154 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_logical_and(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns a 1D int64 array containing the size of data. + * + * Example:: + * + * size_array(`[ [1,2,3,4], [5,6,7,8] ]) = [8] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L625 + * }}} + * + * @param data Input Array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def size_array(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * gamma distributions with parameters *alpha* (shape) and *beta* (scale). + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * alpha = [ 0.0, 2.5 ] + * beta = [ 1.0, 0.7 ] + * + * // Draw a single sample for each distribution + * sample_gamma(alpha, beta) = [ 0. , 2.25797319] + * + * // Draw a vector containing two samples for each distribution + * sample_gamma(alpha, beta, shape=(2)) = `[ [ 0. , 0. ], + * [ 2.25797319, 1.70734084] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L280 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sample_gamma(po: sample_gammaParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise squared value of the input. + * + * .. math:: + * square(x) = x^2 + * + * Example:: + * + * square([2, 3, 4]) = [4, 9, 16] + * + * The storage type of ``square`` output depends upon the input storage type: + * + * - square(default) = default + * - square(row_sparse) = row_sparse + * - square(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L118 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def square(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise cube-root value of the input. + * + * .. math:: + * cbrt(x) = \sqrt[3]{x} + * + * Example:: + * + * cbrt([1, 8, -125]) = [1, 2, -5] + * + * The storage type of ``cbrt`` output depends upon the input storage type: + * + * - cbrt(default) = default + * - cbrt(row_sparse) = row_sparse + * - cbrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L216 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def cbrt(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Pads an input array with a constant or edge values of the array. + * + * .. note:: `Pad` is deprecated. Use `pad` instead. + * + * .. note:: Current implementation only supports 4D and 5D input arrays with padding applied + * only on axes 1, 2 and 3. Expects axes 4 and 5 in `pad_width` to be zero. + * + * This operation pads an input array with either a `constant_value` or edge values + * along each axis of the input array. The amount of padding is specified by `pad_width`. + * + * `pad_width` is a tuple of integer padding widths for each axis of the format + * ``(before_1, after_1, ... , before_N, after_N)``. The `pad_width` should be of length ``2*N`` + * where ``N`` is the number of dimensions of the array. + * + * For dimension ``N`` of the input array, ``before_N`` and ``after_N`` indicates how many values + * to add before and after the elements of the array along dimension ``N``. + * The widths of the higher two dimensions ``before_1``, ``after_1``, ``before_2``, + * ``after_2`` must be 0. + * + * Example:: + * + * x = `[ [`[ [ 1. 2. 3.] + * [ 4. 5. 6.] ] + * + * `[ [ 7. 8. 9.] + * [ 10. 11. 12.] ] ] + * + * + * `[ `[ [ 11. 12. 13.] + * [ 14. 15. 16.] ] + * + * `[ [ 17. 18. 19.] + * [ 20. 21. 22.] ] ] ] + * + * pad(x,mode="edge", pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 1. 1. 2. 3. 3.] + * [ 1. 1. 2. 3. 3.] + * [ 4. 4. 5. 6. 6.] + * [ 4. 4. 5. 6. 6.] ] + * + * `[ [ 7. 7. 8. 9. 9.] + * [ 7. 7. 8. 9. 9.] + * [ 10. 10. 11. 12. 12.] + * [ 10. 10. 11. 12. 12.] ] ] + * + * + * `[ `[ [ 11. 11. 12. 13. 13.] + * [ 11. 11. 12. 13. 13.] + * [ 14. 14. 15. 16. 16.] + * [ 14. 14. 15. 16. 16.] ] + * + * `[ [ 17. 17. 18. 19. 19.] + * [ 17. 17. 18. 19. 19.] + * [ 20. 20. 21. 22. 22.] + * [ 20. 20. 21. 22. 22.] ] ] ] + * + * pad(x, mode="constant", constant_value=0, pad_width=(0,0,0,0,1,1,1,1)) = + * + * `[ [`[ [ 0. 0. 0. 0. 0.] + * [ 0. 1. 2. 3. 0.] + * [ 0. 4. 5. 6. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 7. 8. 9. 0.] + * [ 0. 10. 11. 12. 0.] + * [ 0. 0. 0. 0. 0.] ] ] + * + * + * `[ `[ [ 0. 0. 0. 0. 0.] + * [ 0. 11. 12. 13. 0.] + * [ 0. 14. 15. 16. 0.] + * [ 0. 0. 0. 0. 0.] ] + * + * `[ [ 0. 0. 0. 0. 0.] + * [ 0. 17. 18. 19. 0.] + * [ 0. 20. 21. 22. 0.] + * [ 0. 0. 0. 0. 0.] ] ] ] + * + * + * + * + * Defined in src/operator/pad.cc:L766 + * }}} + * + * @param data An n-dimensional input array. + * @param mode Padding type to use. "constant" pads with `constant_value` "edge" pads using the edge values of the input array "reflect" pads by reflecting values with respect to the edges. + * @param pad_width Widths of the padding regions applied to the edges of each axis. It is a tuple of integer padding widths for each axis of the format ``(before_1, after_1, ... , before_N, after_N)``. It should be of length ``2*N`` where ``N`` is the number of dimensions of the array.This is equivalent to pad_width in numpy.pad, but flattened. + * @param constant_value The value used for padding when `mode` is "constant". + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def pad(data : org.apache.mxnet.javaapi.NDArray, mode : String, pad_width : org.apache.mxnet.javaapi.Shape, constant_value : java.lang.Double, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise inverse tangent of the input array. + * + * The output is in the closed interval :math:`[-\pi/2, \pi/2]` + * + * .. math:: + * arctan([-1, 0, 1]) = [-\pi/4, 0, \pi/4] + * + * The storage type of ``arctan`` output depends upon the input storage type: + * + * - arctan(default) = default + * - arctan(row_sparse) = row_sparse + * - arctan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L227 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def arctan(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * + * Calculate the mean and variance of `data`. + * + * The mean and variance are calculated by aggregating the contents of data across axes. + * If x is 1-D and axes = [0] this is just the mean and variance of a vector. + * + * Example: + * + * x = `[ [1, 2, 3], [4, 5, 6] ] + * mean, var = moments(data=x, axes=[0]) + * mean = [2.5, 3.5, 4.5] + * var = [2.25, 2.25, 2.25] + * mean, var = moments(data=x, axes=[1]) + * mean = [2.0, 5.0] + * var = [0.66666667, 0.66666667] + * mean, var = moments(data=x, axis=[0, 1]) + * mean = [3.5] + * var = [2.9166667] + * + * + * + * Defined in src/operator/nn/moments.cc:L54 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def moments(po: momentsParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes 1D or 2D transposed convolution (aka fractionally strided convolution) of the input tensor. This operation can be seen as the gradient of Convolution operation with respect to its input. Convolution usually reduces the size of the input. Transposed convolution works the other way, going from a smaller input to a larger output while preserving the connectivity pattern. + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def Deconvolution(po: DeconvolutionParam) : Array[NDArray] + + /** + * + * {{{ + * + * Performs general matrix multiplication. + * Input are tensors *A*, *B*, each of dimension *n >= 2* and having the same shape + * on the leading *n-2* dimensions. + * + * If *n=2*, the BLAS3 function *gemm* is performed: + * + * *out* = *alpha* \* *op*\ (*A*) \* *op*\ (*B*) + * + * Here *alpha* is a scalar parameter and *op()* is either the identity or the matrix + * transposition (depending on *transpose_a*, *transpose_b*). + * + * If *n>2*, *gemm* is performed separately for a batch of matrices. The column indices of the matrices + * are given by the last dimensions of the tensors, the row indices by the axis specified with the *axis* + * parameter. By default, the trailing two dimensions will be used for matrix encoding. + * + * For a non-default axis parameter, the operation performed is equivalent to a series of swapaxes/gemm/swapaxes + * calls. For example let *A*, *B* be 5 dimensional tensors. Then gemm(*A*, *B*, axis=1) is equivalent to + * the following without the overhead of the additional swapaxis operations:: + * + * A1 = swapaxes(A, dim1=1, dim2=3) + * B1 = swapaxes(B, dim1=1, dim2=3) + * C = gemm2(A1, B1) + * C = swapaxis(C, dim1=1, dim2=3) + * + * When the input data is of type float32 and the environment variables MXNET_CUDA_ALLOW_TENSOR_CORE + * and MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION are set to 1, this operator will try to use + * pseudo-float16 precision (float32 math with float16 I/O) precision in order to use + * Tensor Cores on suitable NVIDIA GPUs. This can sometimes give significant speedups. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix multiply + * A = `[ [1.0, 1.0], [1.0, 1.0] ] + * B = `[ [1.0, 1.0], [1.0, 1.0], [1.0, 1.0] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ [4.0, 4.0, 4.0], [4.0, 4.0, 4.0] ] + * + * Batch matrix multiply + * A = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * B = `[ `[ [1.0, 1.0] ], `[ [0.1, 0.1] ] ] + * gemm2(A, B, transpose_b=True, alpha=2.0) + * = `[ `[ [4.0] ], `[ [0.04 ] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L163 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def linalg_gemm2(po: linalg_gemm2Param) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic sine of the input array, \ + * computed element-wise. + * + * The storage type of ``arcsinh`` output depends upon the input storage type: + * + * - arcsinh(default) = default + * - arcsinh(row_sparse) = row_sparse + * - arcsinh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L436 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def arcsinh(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise gauss error function of the input. + * + * Example:: + * + * erf([0, -1., 10.]) = [0., -0.8427, 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L886 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def erf(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the sum of the logarithms of the diagonal elements of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* must be square with positive diagonal entries. We sum the natural + * logarithms of the diagonal elements, the result has shape (1,). + * + * If *n>2*, *sumlogdiag* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix reduction + * A = `[ [1.0, 1.0], [1.0, 7.0] ] + * sumlogdiag(A) = [1.9459] + * + * Batch matrix reduction + * A = `[ `[ [1.0, 1.0], [1.0, 7.0] ], `[ [3.0, 0], [0, 17.0] ] ] + * sumlogdiag(A) = [1.9459, 3.9318] + * + * + * Defined in src/operator/tensor/la_op.cc:L445 + * }}} + * + * @param A Tensor of square matrices + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def linalg_sumlogdiag(A : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Stops gradient computation. + * + * Stops the accumulated gradient of the inputs from flowing through this operator + * in the backward direction. In other words, this operator prevents the contribution + * of its inputs to be taken into account for computing gradients. + * + * Example:: + * + * v1 = [1, 2] + * v2 = [0, 1] + * a = Variable('a') + * b = Variable('b') + * b_stop_grad = stop_gradient(3 * b) + * loss = MakeLoss(b_stop_grad + a) + * + * executor = loss.simple_bind(ctx=cpu(), a=(1,2), b=(1,2)) + * executor.forward(is_train=True, a=v1, b=v2) + * executor.outputs + * [ 1. 5.] + * + * executor.backward() + * executor.grad_arrays + * [ 0. 0.] + * [ 1. 1.] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L327 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def stop_gradient(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise ceiling of the input. + * + * The ceil of the scalar x is the smallest integer i, such that i >= x. + * + * Example:: + * + * ceil([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 2., 2., 3.] + * + * The storage type of ``ceil`` output depends upon the input storage type: + * + * - ceil(default) = default + * - ceil(row_sparse) = row_sparse + * - ceil(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L818 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def ceil(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from a uniform distribution. + * + * .. note:: The existing alias ``uniform`` is deprecated. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * uniform(low=0, high=1, shape=(2,2)) = `[ [ 0.60276335, 0.85794562], + * [ 0.54488319, 0.84725171] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L96 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def uniform(po: uniformParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the hypotenuse of a right angled triangle, given its "legs" + * with broadcasting. + * + * It is equivalent to doing :math:`sqrt(x_1^2 + x_2^2)`. + * + * Example:: + * + * x = `[ [ 3., 3., 3.] ] + * + * y = `[ [ 4.], + * [ 4.] ] + * + * broadcast_hypot(x, y) = `[ [ 5., 5., 5.], + * [ 5., 5., 5.] ] + * + * z = `[ [ 0.], + * [ 4.] ] + * + * broadcast_hypot(x, z) = `[ [ 3., 3., 3.], + * [ 5., 5., 5.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L158 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_hypot(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * .. note:: + * - For input ``n.5`` ``rint`` returns ``n`` while ``round`` returns ``n+1``. + * - For input ``-n.5`` both ``rint`` and ``round`` returns ``-n-1``. + * + * Example:: + * + * rint([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 1., -2., 2., 2.] + * + * The storage type of ``rint`` output depends upon the input storage type: + * + * - rint(default) = default + * - rint(row_sparse) = row_sparse + * - rint(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L799 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def rint(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @param data Input data array + * @param axis The axis which to reverse elements. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def reverse(data : org.apache.mxnet.javaapi.NDArray, axis : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes rectified linear activation. + * + * .. math:: + * max(features, 0) + * + * The storage type of ``relu`` output depends upon the input storage type: + * + * - relu(default) = default + * - relu(row_sparse) = row_sparse + * - relu(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L85 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def relu(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Reshapes the input array. + * .. note:: ``Reshape`` is deprecated, use ``reshape`` + * Given an array and a shape, this function returns a copy of the array in the new shape. + * The shape is a tuple of integers such as (2,3,4). The size of the new shape should be same as the size of the input array. + * Example:: + * reshape([1,2,3,4], shape=(2,2)) = `[ [1,2], [3,4] ] + * Some dimensions of the shape can take special values from the set {0, -1, -2, -3, -4}. The significance of each is explained below: + * - ``0`` copy this dimension from the input to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (4,0,2), output shape = (4,3,2) + * - input shape = (2,3,4), shape = (2,0,0), output shape = (2,3,4) + * - ``-1`` infers the dimension of the output shape by using the remainder of the input dimensions + * keeping the size of the new array same as that of the input array. + * At most one dimension of shape can be -1. + * Example:: + * - input shape = (2,3,4), shape = (6,1,-1), output shape = (6,1,4) + * - input shape = (2,3,4), shape = (3,-1,8), output shape = (3,1,8) + * - input shape = (2,3,4), shape=(-1,), output shape = (24,) + * - ``-2`` copy all/remainder of the input dimensions to the output shape. + * Example:: + * - input shape = (2,3,4), shape = (-2,), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (2,-2), output shape = (2,3,4) + * - input shape = (2,3,4), shape = (-2,1,1), output shape = (2,3,4,1,1) + * - ``-3`` use the product of two consecutive dimensions of the input shape as the output dimension. + * Example:: + * - input shape = (2,3,4), shape = (-3,4), output shape = (6,4) + * - input shape = (2,3,4,5), shape = (-3,-3), output shape = (6,20) + * - input shape = (2,3,4), shape = (0,-3), output shape = (2,12) + * - input shape = (2,3,4), shape = (-3,-2), output shape = (6,4) + * - ``-4`` split one dimension of the input into two dimensions passed subsequent to -4 in shape (can contain -1). + * Example:: + * - input shape = (2,3,4), shape = (-4,1,2,-2), output shape =(1,2,3,4) + * - input shape = (2,3,4), shape = (2,-4,-1,3,-2), output shape = (2,1,3,4) + * If the argument `reverse` is set to 1, then the special values are inferred from right to left. + * Example:: + * - without reverse=1, for input shape = (10,5,4), shape = (-1,0), output shape would be (40,5) + * - with reverse=1, output shape will be (50,4). + * + * + * Defined in src/operator/tensor/matrix_op.cc:L175 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def reshape(po: reshapeParam) : Array[NDArray] + + /** + * + * {{{ + * + * Extracts the diagonal entries of a square matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, then *A* represents a single square matrix which diagonal elements get extracted as a 1-dimensional tensor. + * + * If *n>2*, then *A* represents a batch of square matrices on the trailing two dimensions. The extracted diagonals are returned as an *n-1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix diagonal extraction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * extractdiag(A) = [1.0, 4.0] + * + * extractdiag(A, 1) = [2.0] + * + * Batch matrix diagonal extraction + * A = `[ `[ [1.0, 2.0], + * [3.0, 4.0] ], + * `[ [5.0, 6.0], + * [7.0, 8.0] ] ] + * + * extractdiag(A) = `[ [1.0, 4.0], + * [5.0, 8.0] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L495 + * }}} + * + * @param A Tensor of square matrices + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def linalg_extractdiag(A : org.apache.mxnet.javaapi.NDArray, offset : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the element-wise inverse hyperbolic cosine of the input array, \ + * computed element-wise. + * + * The storage type of ``arccosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L474 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def arccosh(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Converts a batch of index arrays into an array of flat indices. The operator follows numpy conventions so a single multi index is given by a column of the input matrix. The leading dimension may be left unspecified by using -1 as placeholder. + * + * Examples:: + * + * A = `[ [3,6,6],[4,5,1] ] + * ravel(A, shape=(7,6)) = [22,41,37] + * ravel(A, shape=(-1,6)) = [22,41,37] + * + * + * + * Defined in src/operator/tensor/ravel.cc:L42 + * }}} + * + * @param data Batch of multi-indices + * @param shape Shape of the array into which the multi-indices apply. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def ravel_multi_index(data : org.apache.mxnet.javaapi.NDArray, shape : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise sign of the input. + * + * Example:: + * + * sign([-2, 0, 3]) = [-1, 0, 1] + * + * The storage type of ``sign`` output depends upon the input storage type: + * + * - sign(default) = default + * - sign(row_sparse) = row_sparse + * - sign(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L759 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def sign(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from an exponential distribution. + * + * Samples are distributed according to an exponential distribution parametrized by *lambda* (rate). + * + * Example:: + * + * exponential(lam=4, shape=(2,2)) = `[ [ 0.0097189 , 0.08999364], + * [ 0.04146638, 0.31715935] ] + * + * + * Defined in src/operator/random/sample_op.cc:L137 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def random_exponential(po: random_exponentialParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_plus(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Constructs a square matrix with the input as diagonal. + * Input is a tensor *A* of dimension *n >= 1*. + * + * If *n=1*, then *A* represents the diagonal entries of a single square matrix. This matrix will be returned as a 2-dimensional tensor. + * If *n>1*, then *A* represents a batch of diagonals of square matrices. The batch of diagonal matrices will be returned as an *n+1*-dimensional tensor. + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single diagonal matrix construction + * A = [1.0, 2.0] + * + * makediag(A) = `[ [1.0, 0.0], + * [0.0, 2.0] ] + * + * makediag(A, 1) = `[ [0.0, 1.0, 0.0], + * [0.0, 0.0, 2.0], + * [0.0, 0.0, 0.0] ] + * + * Batch diagonal matrix construction + * A = `[ [1.0, 2.0], + * [3.0, 4.0] ] + * + * makediag(A) = `[ `[ [1.0, 0.0], + * [0.0, 2.0] ], + * `[ [3.0, 0.0], + * [0.0, 4.0] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L547 + * }}} + * + * @param A Tensor of diagonal entries + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def linalg_makediag(A : org.apache.mxnet.javaapi.NDArray, offset : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Mixed Precision version of Phase I of lamb update + * it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad32 = grad(float16) * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight32; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight32; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1033 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def mp_lamb_update_phase1(po: mp_lamb_update_phase1Param) : Array[NDArray] + + /** + * + * {{{ + * + * Phase II of lamb update it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight = weight - lr * g + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L992 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def lamb_update_phase2(po: lamb_update_phase2Param) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the value of the PDF of samples of + * negative binomial distributions with parameters *k* (failure limit) and *p* (failure probability). + * + * *k* and *p* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *k* and *p*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *k* and *p* at index *i*. + * + * Examples:: + * + * random_pdf_negative_binomial(sample=`[ [1,2,3,4] ], k=[1], p=a[0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125] ] + * + * # Note that k may be real-valued + * sample = `[ [1,2,3,4], + * [1,2,3,4] ] + * random_pdf_negative_binomial(sample=sample, k=[1, 1.5], p=[0.5, 0.5]) = + * `[ [0.25, 0.125, 0.0625, 0.03125 ], + * [0.26516506, 0.16572815, 0.09667476, 0.05437956] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L308 + * }}} + * + * @param sample Samples from the distributions. + * @param k Limits of unsuccessful experiments. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param p Failure probabilities in each experiment. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def random_pdf_negative_binomial(sample : org.apache.mxnet.javaapi.NDArray, k : org.apache.mxnet.javaapi.NDArray, is_log : java.lang.Boolean, p : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise inverse cube-root value of the input. + * + * .. math:: + * rcbrt(x) = 1/\sqrt[3]{x} + * + * Example:: + * + * rcbrt([1,8,-125]) = [1.0, 0.5, -0.2] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L269 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def rcbrt(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Subtracts arguments element-wise. + * + * The storage type of ``elemwise_sub`` output depends on storage types of inputs + * + * - elemwise_sub(row_sparse, row_sparse) = row_sparse + * - elemwise_sub(csr, csr) = csr + * - elemwise_sub(default, csr) = default + * - elemwise_sub(csr, default) = default + * - elemwise_sub(default, rsp) = default + * - elemwise_sub(rsp, default) = default + * - otherwise, ``elemwise_sub`` generates output with default storage + * }}} + * + * @param lhs first input + * @param rhs second input + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def elemwise_sub(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Applies a spatial transformer to input feature map. + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def SpatialTransformer(po: SpatialTransformerParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise ``log(1 + x)`` value of the input. + * + * This function is more accurate than ``log(1 + x)`` for small ``x`` so that + * :math:`1+x\approx 1` + * + * The storage type of ``log1p`` output depends upon the input storage type: + * + * - log1p(default) = default + * - log1p(row_sparse) = row_sparse + * - log1p(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L206 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def log1p(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Casts all elements of the input to a new type. + * + * .. note:: ``Cast`` is deprecated. Use ``cast`` instead. + * + * Example:: + * + * cast([0.9, 1.3], dtype='int32') = [0, 1] + * cast([1e20, 11.1], dtype='float16') = [inf, 11.09375] + * cast([300, 11.1, 10.9, -1, -3], dtype='uint8') = [44, 11, 10, 255, 253] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L665 + * }}} + * + * @param data The input. + * @param dtype Output data type. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def cast(data : org.apache.mxnet.javaapi.NDArray, dtype : String, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def max(po: maxParam) : Array[NDArray] + + /** + * + * {{{ + * + * Calculate Smooth L1 Loss(lhs, scalar) by summing + * + * .. math:: + * + * f(x) = + * \begin{cases} + * (\sigma x)^2/2,& \text{if }x < 1/\sigma^2\\ + * |x|-0.5/\sigma^2,& \text{otherwise} + * \end{cases} + * + * where :math:`x` is an element of the tensor *lhs* and :math:`\sigma` is the scalar. + * + * Example:: + * + * smooth_l1([1, 2, 3, 4]) = [0.5, 1.5, 2.5, 3.5] + * smooth_l1([1, 2, 3, 4], scalar=1) = [0.5, 1.5, 2.5, 3.5] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_scalar_op_extended.cc:L108 + * }}} + * + * @param data source input + * @param scalar scalar input + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def smooth_l1(data : org.apache.mxnet.javaapi.NDArray, scalar : java.lang.Float, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise product of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_mul(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * Supported sparse operations: + * + * broadcast_mul(csr, dense(1D)) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L146 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_mul(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise square-root value of the input. + * + * .. math:: + * \textrm{sqrt}(x) = \sqrt{x} + * + * Example:: + * + * sqrt([4, 9, 16]) = [2, 3, 4] + * + * The storage type of ``sqrt`` output depends upon the input storage type: + * + * - sqrt(default) = default + * - sqrt(row_sparse) = row_sparse + * - sqrt(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L142 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def sqrt(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Compute the LARS coefficients of multiple weights and grads from their sums of square" + * + * + * Defined in src/operator/contrib/multi_lars.cc:L37 + * }}} + * + * @param lrs Learning rates to scale by LARS coefficient + * @param weights_sum_sq sum of square of weights arrays + * @param grads_sum_sq sum of square of gradients arrays + * @param wds weight decays + * @param eta LARS eta + * @param eps LARS eps + * @param rescale_grad Gradient rescaling factor + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def multi_lars(lrs : org.apache.mxnet.javaapi.NDArray, weights_sum_sq : org.apache.mxnet.javaapi.NDArray, grads_sum_sq : org.apache.mxnet.javaapi.NDArray, wds : org.apache.mxnet.javaapi.NDArray, eta : java.lang.Float, eps : java.lang.Float, rescale_grad : java.lang.Float, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than or equal to** (<=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L136 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_lesser_equal(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Divides arguments element-wise. + * + * The storage type of ``elemwise_div`` output is always dense + * }}} + * + * @param lhs first input + * @param rhs second input + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def elemwise_div(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of element-wise **not equal to** (!=) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_not_equal(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L64 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_not_equal(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Takes elements from an input array along the given axis. + * + * This function slices the input array along a particular axis with the provided indices. + * + * Given data tensor of rank r >= 1, and indices tensor of rank q, gather entries of the axis + * dimension of data (by default outer-most one as axis=0) indexed by indices, and concatenates them + * in an output tensor of rank q + (r - 1). + * + * Examples:: + * + * x = [4. 5. 6.] + * + * // Trivial case, take the second element along the first axis. + * + * take(x, [1]) = [ 5. ] + * + * // The other trivial case, axis=-1, take the third element along the first axis + * + * take(x, [3], axis=-1, mode='clip') = [ 6. ] + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2. Along axis 0 + * + * take(x, `[ [0,1],[1,2] ]) = `[ `[ [ 1., 2.], + * [ 3., 4.] ], + * + * `[ [ 3., 4.], + * [ 5., 6.] ] ] + * + * // In this case we will get rows 0 and 1, then 1 and 2 (calculated by wrapping around). + * // Along axis 1 + * + * take(x, `[ [0, 3], [-1, -2] ], axis=1, mode='wrap') = `[ `[ [ 1. 2.] + * [ 2. 1.] ] + * + * `[ [ 3. 4.] + * [ 4. 3.] ] + * + * `[ [ 5. 6.] + * [ 6. 5.] ] ] + * + * The storage type of ``take`` output depends upon the input storage type: + * + * - take(default, default) = default + * - take(csr, default, axis=0) = csr + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L718 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def take(po: takeParam) : Array[NDArray] + + /** + * + * {{{ + * + * Takes the last element of a sequence. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns a (n-1)-dimensional array + * of the form [batch_size, other_feature_dims]. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` should be + * an input array of positive ints of dimension [batch_size]. To use this parameter, + * set `use_sequence_length` to `True`, otherwise each example in the batch is assumed + * to have the max sequence length. + * + * .. note:: Alternatively, you can also use `take` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ], + * + * `[ [ 10., 11., 12.], + * [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] ] + * + * // returns last sequence when sequence_length parameter is not used + * SequenceLast(x) = `[ [ 19., 20., 21.], + * [ 22., 23., 24.], + * [ 25., 26., 27.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,1,1], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.], + * [ 7., 8., 9.] ] + * + * // sequence_length is used + * SequenceLast(x, sequence_length=[1,2,3], use_sequence_length=True) = + * `[ [ 1., 2., 3.], + * [ 13., 14., 15.], + * [ 25., 26., 27.] ] + * + * + * + * Defined in src/operator/sequence_last.cc:L106 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def SequenceLast(po: SequenceLastParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the norm on an NDArray. + * + * This operator computes the norm on an NDArray with the specified axis, depending + * on the value of the ord parameter. By default, it computes the L2 norm on the entire + * array. Currently only ord=2 supports sparse ndarrays. + * + * Examples:: + * + * x = `[ `[ [1, 2], + * [3, 4] ], + * `[ [2, 2], + * [5, 6] ] ] + * + * norm(x, ord=2, axis=1) = `[ [3.1622777 4.472136 ] + * [5.3851647 6.3245554] ] + * + * norm(x, ord=1, axis=1) = `[ [4., 6.], + * [7., 8.] ] + * + * rsp = x.cast_storage('row_sparse') + * + * norm(rsp) = [5.47722578] + * + * csr = x.cast_storage('csr') + * + * norm(csr) = [5.47722578] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_norm_value.cc:L89 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def norm(po: normParam) : Array[NDArray] + + /** + * + * {{{ + * + * LQ factorization for general matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, we compute the LQ factorization (LAPACK *gelqf*, followed by *orglq*). *A* + * must have shape *(x, y)* with *x <= y*, and must have full rank *=x*. The LQ + * factorization consists of *L* with shape *(x, x)* and *Q* with shape *(x, y)*, so + * that: + * + * *A* = *L* \* *Q* + * + * Here, *L* is lower triangular (upper triangle equal to zero) with nonzero diagonal, + * and *Q* is row-orthonormal, meaning that + * + * *Q* \* *Q*\ :sup:`T` + * + * is equal to the identity matrix of shape *(x, x)*. + * + * If *n>2*, *gelqf* is performed separately on the trailing two dimensions for all + * inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single LQ factorization + * A = `[ [1., 2., 3.], [4., 5., 6.] ] + * Q, L = gelqf(A) + * Q = `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ] + * L = `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ] + * + * Batch LQ factorization + * A = `[ `[ [1., 2., 3.], [4., 5., 6.] ], + * `[ [7., 8., 9.], [10., 11., 12.] ] ] + * Q, L = gelqf(A) + * Q = `[ `[ [-0.26726124, -0.53452248, -0.80178373], + * [0.87287156, 0.21821789, -0.43643578] ], + * `[ [-0.50257071, -0.57436653, -0.64616234], + * [0.7620735, 0.05862104, -0.64483142] ] ] + * L = `[ `[ [-3.74165739, 0.], + * [-8.55235974, 1.96396101] ], + * `[ [-13.92838828, 0.], + * [-19.09768702, 0.52758934] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L798 + * }}} + * + * @param A Tensor of input matrices to be factorized + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def linalg_gelqf(A : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the indices of the top *k* elements in an input array along the given + * axis (by default). + * If ret_type is set to 'value' returns the value of top *k* elements (instead of indices). + * In case of ret_type = 'both', both value and index would be returned. + * The returned elements will be sorted. + * + * Examples:: + * + * x = `[ [ 0.3, 0.2, 0.4], + * [ 0.1, 0.3, 0.2] ] + * + * // returns an index of the largest element on last axis + * topk(x) = `[ [ 2.], + * [ 1.] ] + * + * // returns the value of top-2 largest elements on last axis + * topk(x, ret_typ='value', k=2) = `[ [ 0.4, 0.3], + * [ 0.3, 0.2] ] + * + * // returns the value of top-2 smallest elements on last axis + * topk(x, ret_typ='value', k=2, is_ascend=1) = `[ [ 0.2 , 0.3], + * [ 0.1 , 0.2] ] + * + * // returns the value of top-2 largest elements on axis 0 + * topk(x, axis=0, ret_typ='value', k=2) = `[ [ 0.3, 0.3, 0.4], + * [ 0.1, 0.2, 0.2] ] + * + * // flattens and then returns list of both values and indices + * topk(x, ret_typ='both', k=2) = `[ `[ [ 0.4, 0.3], [ 0.3, 0.2] ] , `[ [ 2., 0.], [ 1., 2.] ] ] + * + * + * + * Defined in src/operator/tensor/ordering_op.cc:L68 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def topk(po: topkParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise floor of the input. + * + * The floor of the scalar x is the largest integer i, such that i <= x. + * + * Example:: + * + * floor([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-3., -2., 1., 1., 2.] + * + * The storage type of ``floor`` output depends upon the input storage type: + * + * - floor(default) = default + * - floor(row_sparse) = row_sparse + * - floor(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L837 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def floor(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Applies correlation to inputs. + * + * The correlation layer performs multiplicative patch comparisons between two feature maps. + * + * Given two multi-channel feature maps :math:`f_{1}, f_{2}`, with :math:`w`, :math:`h`, and :math:`c` being their width, height, and number of channels, + * the correlation layer lets the network compare each patch from :math:`f_{1}` with each patch from :math:`f_{2}`. + * + * For now we consider only a single comparison of two patches. The 'correlation' of two patches centered at :math:`x_{1}` in the first map and + * :math:`x_{2}` in the second map is then defined as: + * + * .. math:: + * + * c(x_{1}, x_{2}) = \sum_{o \in [-k,k] \times [-k,k]} + * + * for a square patch of size :math:`K:=2k+1`. + * + * Note that the equation above is identical to one step of a convolution in neural networks, but instead of convolving data with a filter, it convolves data with other + * data. For this reason, it has no training weights. + * + * Computing :math:`c(x_{1}, x_{2})` involves :math:`c * K^{2}` multiplications. Comparing all patch combinations involves :math:`w^{2}*h^{2}` such computations. + * + * Given a maximum displacement :math:`d`, for each location :math:`x_{1}` it computes correlations :math:`c(x_{1}, x_{2})` only in a neighborhood of size :math:`D:=2d+1`, + * by limiting the range of :math:`x_{2}`. We use strides :math:`s_{1}, s_{2}`, to quantize :math:`x_{1}` globally and to quantize :math:`x_{2}` within the neighborhood + * centered around :math:`x_{1}`. + * + * The final output is defined by the following expression: + * + * .. math:: + * out[n, q, i, j] = c(x_{i, j}, x_{q}) + * + * where :math:`i` and :math:`j` enumerate spatial locations in :math:`f_{1}`, and :math:`q` denotes the :math:`q^{th}` neighborhood of :math:`x_{i,j}`. + * + * + * Defined in src/operator/correlation.cc:L198 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def Correlation(po: CorrelationParam) : Array[NDArray] + + /** + * + * {{{ + * + * Reverses the order of elements along given axis while preserving array shape. + * Note: reverse and flip are equivalent. We use reverse in the following examples. + * Examples:: + * x = `[ [ 0., 1., 2., 3., 4.], + * [ 5., 6., 7., 8., 9.] ] + * reverse(x, axis=0) = `[ [ 5., 6., 7., 8., 9.], + * [ 0., 1., 2., 3., 4.] ] + * reverse(x, axis=1) = `[ [ 4., 3., 2., 1., 0.], + * [ 9., 8., 7., 6., 5.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L832 + * }}} + * + * @param data Input data array + * @param axis The axis which to reverse elements. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def flip(data : org.apache.mxnet.javaapi.NDArray, axis : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise modulo of the input arrays with broadcasting. + * + * Example:: + * + * x = `[ [ 8., 8., 8.], + * [ 8., 8., 8.] ] + * + * y = `[ [ 2.], + * [ 3.] ] + * + * broadcast_mod(x, y) = `[ [ 0., 0., 0.], + * [ 2., 2., 2.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L222 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_mod(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Splits an array along a particular axis into multiple sub-arrays. + * + * .. note:: ``SliceChannel`` is deprecated. Use ``split`` instead. + * + * **Note** that `num_outputs` should evenly divide the length of the axis + * along which to split the array. + * + * Example:: + * + * x = `[ `[ [ 1.] + * [ 2.] ] + * `[ [ 3.] + * [ 4.] ] + * `[ [ 5.] + * [ 6.] ] ] + * x.shape = (3, 2, 1) + * + * y = split(x, axis=1, num_outputs=2) // a list of 2 arrays with shape (3, 1, 1) + * y = `[ `[ [ 1.] ] + * `[ [ 3.] ] + * `[ [ 5.] ] ] + * + * `[ `[ [ 2.] ] + * `[ [ 4.] ] + * `[ [ 6.] ] ] + * + * y[0].shape = (3, 1, 1) + * + * z = split(x, axis=0, num_outputs=3) // a list of 3 arrays with shape (1, 2, 1) + * z = `[ `[ [ 1.] + * [ 2.] ] ] + * + * `[ `[ [ 3.] + * [ 4.] ] ] + * + * `[ `[ [ 5.] + * [ 6.] ] ] + * + * z[0].shape = (1, 2, 1) + * + * `squeeze_axis=1` removes the axis with length 1 from the shapes of the output arrays. + * **Note** that setting `squeeze_axis` to ``1`` removes axis with length 1 only + * along the `axis` which it is split. + * Also `squeeze_axis` can be set to true only if ``input.shape[axis] == num_outputs``. + * + * Example:: + * + * z = split(x, axis=0, num_outputs=3, squeeze_axis=1) // a list of 3 arrays with shape (2, 1) + * z = `[ [ 1.] + * [ 2.] ] + * + * `[ [ 3.] + * [ 4.] ] + * + * `[ [ 5.] + * [ 6.] ] + * z[0].shape = (2 ,1 ) + * + * + * + * Defined in src/operator/slice_channel.cc:L107 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def split(po: splitParam) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from a discrete uniform distribution. + * + * Samples are uniformly distributed over the half-open interval *[low, high)* + * (includes *low*, but excludes *high*). + * + * Example:: + * + * randint(low=0, high=5, shape=(2,2)) = `[ [ 0, 2], + * [ 3, 1] ] + * + * + * + * Defined in src/operator/random/sample_op.cc:L194 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def random_randint(po: random_randintParam) : Array[NDArray] + + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def pick(po: pickParam) : Array[NDArray] + + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @param data Source input + * @param begin starting indices for the slice operation, supports negative indices. + * @param end ending indices for the slice operation, supports negative indices. + * @param step step for the slice operation, supports negative values. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def slice(data : org.apache.mxnet.javaapi.NDArray, begin : org.apache.mxnet.javaapi.Shape, end : org.apache.mxnet.javaapi.Shape, step : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Rearranges(permutes) blocks of spatial data into depth. + * Similar to ONNX SpaceToDepth operator: + * https://github.com/onnx/onnx/blob/master/docs/Operators.md#SpaceToDepth + * The output is a new tensor where the values from height and width dimension are + * moved to the depth dimension. The reverse of this operation is ``depth_to_space``. + * .. math:: + * \begin{gather*} + * x \prime = reshape(x, [N, C, H / block\_size, block\_size, W / block\_size, block\_size]) \\ + * x \prime \prime = transpose(x \prime, [0, 3, 5, 1, 2, 4]) \\ + * y = reshape(x \prime \prime, [N, C * (block\_size ^ 2), H / block\_size, W / block\_size]) + * \end{gather*} + * where :math:`x` is an input tensor with default layout as :math:`[N, C, H, W]`: [batch, channels, height, width] + * and :math:`y` is the output tensor of layout :math:`[N, C * (block\_size ^ 2), H / block\_size, W / block\_size]` + * Example:: + * x = `[ [`[ [0, 6, 1, 7, 2, 8], + * [12, 18, 13, 19, 14, 20], + * [3, 9, 4, 10, 5, 11], + * [15, 21, 16, 22, 17, 23] ] ] ] + * space_to_depth(x, 2) = `[ [`[ [0, 1, 2], + * [3, 4, 5] ], + * `[ [6, 7, 8], + * [9, 10, 11] ], + * `[ [12, 13, 14], + * [15, 16, 17] ], + * `[ [18, 19, 20], + * [21, 22, 23] ] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L1019 + * }}} + * + * @param data Input ndarray + * @param block_size Blocks of [block_size. block_size] are moved + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def space_to_depth(data : org.apache.mxnet.javaapi.NDArray, block_size : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Applies softmax activation to input. This is intended for internal layers. + * + * .. note:: + * + * This operator has been deprecated, please use `softmax`. + * + * If `mode` = ``instance``, this operator will compute a softmax for each instance in the batch. + * This is the default mode. + * + * If `mode` = ``channel``, this operator will compute a k-class softmax at each position + * of each instance, where `k` = ``num_channel``. This mode can only be used when the input array + * has at least 3 dimensions. + * This can be used for `fully convolutional network`, `image segmentation`, etc. + * + * Example:: + * + * >>> input_array = mx.nd.array(`[ [3., 0.5, -0.5, 2., 7.], + * >>> [2., -.4, 7., 3., 0.2] ]) + * >>> softmax_act = mx.nd.SoftmaxActivation(input_array) + * >>> print softmax_act.asnumpy() + * `[ [ 1.78322066e-02 1.46375655e-03 5.38485940e-04 6.56010211e-03 9.73605454e-01] + * [ 6.56221947e-03 5.95310994e-04 9.73919690e-01 1.78379621e-02 1.08472735e-03] ] + * + * + * + * Defined in src/operator/nn/softmax_activation.cc:L59 + * }}} + * + * @param data The input array. + * @param mode Specifies how to compute the softmax. If set to ``instance``, it computes softmax for each instance. If set to ``channel``, It computes cross channel softmax for each position of each instance. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def SoftmaxActivation(data : org.apache.mxnet.javaapi.NDArray, mode : String, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Sets all elements outside the sequence to a constant value. + * + * This function takes an n-dimensional input array of the form + * [max_sequence_length, batch_size, other_feature_dims] and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. `sequence_length` + * should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length and + * this operator works as the `identity` operator. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // works as identity operator when sequence_length parameter is not used + * SequenceMask(x) = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [1,1] means 1 of each batch will be kept + * // and other rows are masked with default mask value = 0 + * SequenceMask(x, sequence_length=[1,1], use_sequence_length=True) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ], + * + * `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] ] + * + * // sequence_length [2,3] means 2 of batch B1 and 3 of batch B2 will be kept + * // and other rows are masked with value = 1 + * SequenceMask(x, sequence_length=[2,3], use_sequence_length=True, value=1) = + * `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 1., 1.], + * [ 16., 17., 18.] ] ] + * + * + * + * Defined in src/operator/sequence_mask.cc:L186 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def SequenceMask(po: SequenceMaskParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Poisson distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_poisson(sample=`[ [0,1,2,3] ], lam=[1]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324] ] + * + * sample = `[ [0,1,2,3], + * [0,1,2,3], + * [0,1,2,3] ] + * + * random_pdf_poisson(sample=sample, lam=[1,2,3]) = + * `[ [0.36787945, 0.36787945, 0.18393973, 0.06131324], + * [0.13533528, 0.27067056, 0.27067056, 0.18044704], + * [0.04978707, 0.14936121, 0.22404182, 0.22404182] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L306 + * }}} + * + * @param sample Samples from the distributions. + * @param lam Lambda (rate) parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def random_pdf_poisson(sample : org.apache.mxnet.javaapi.NDArray, lam : org.apache.mxnet.javaapi.NDArray, is_log : java.lang.Boolean, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Broadcasts lhs to have the same shape as rhs. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_like(`[ [1,2,3] ], `[ [5,6,7],[7,8,9] ]) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * broadcast_like([9], [1,2,3,4,5], lhs_axes=(0,), rhs_axes=(-1,)) = [9,9,9,9,9] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L135 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def broadcast_like(po: broadcast_likeParam) : Array[NDArray] + + /** + * + * {{{ + * + * Converts an array of flat indices into a batch of index arrays. The operator follows numpy conventions so a single multi index is given by a column of the output matrix. The leading dimension may be left unspecified by using -1 as placeholder. + * + * Examples:: + * + * A = [22,41,37] + * unravel(A, shape=(7,6)) = `[ [3,6,6],[4,5,1] ] + * unravel(A, shape=(-1,6)) = `[ [3,6,6],[4,5,1] ] + * + * + * + * Defined in src/operator/tensor/ravel.cc:L68 + * }}} + * + * @param data Array of flat indices + * @param shape Shape of the array into which the multi-indices apply. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def unravel_index(data : org.apache.mxnet.javaapi.NDArray, shape : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the gradient of cross entropy loss with respect to softmax output. + * + * - This operator computes the gradient in two steps. + * The cross entropy loss does not actually need to be computed. + * + * - Applies softmax function on the input array. + * - Computes and returns the gradient of cross entropy loss w.r.t. the softmax output. + * + * - The softmax function, cross entropy loss and gradient is given by: + * + * - Softmax Function: + * + * .. math:: \text{softmax}(x)_i = \frac{exp(x_i)}{\sum_j exp(x_j)} + * + * - Cross Entropy Function: + * + * .. math:: \text{CE(label, output)} = - \sum_i \text{label}_i \log(\text{output}_i) + * + * - The gradient of cross entropy loss w.r.t softmax output: + * + * .. math:: \text{gradient} = \text{output} - \text{label} + * + * - During forward propagation, the softmax function is computed for each instance in the input array. + * + * For general *N*-D input arrays with shape :math:`(d_1, d_2, ..., d_n)`. The size is + * :math:`s=d_1 \cdot d_2 \cdot \cdot \cdot d_n`. We can use the parameters `preserve_shape` + * and `multi_output` to specify the way to compute softmax: + * + * - By default, `preserve_shape` is ``false``. This operator will reshape the input array + * into a 2-D array with shape :math:`(d_1, \frac{s}{d_1})` and then compute the softmax function for + * each row in the reshaped array, and afterwards reshape it back to the original shape + * :math:`(d_1, d_2, ..., d_n)`. + * - If `preserve_shape` is ``true``, the softmax function will be computed along + * the last axis (`axis` = ``-1``). + * - If `multi_output` is ``true``, the softmax function will be computed along + * the second axis (`axis` = ``1``). + * + * - During backward propagation, the gradient of cross-entropy loss w.r.t softmax output array is computed. + * The provided label can be a one-hot label array or a probability label array. + * + * - If the parameter `use_ignore` is ``true``, `ignore_label` can specify input instances + * with a particular label to be ignored during backward propagation. **This has no effect when + * softmax `output` has same shape as `label`**. + * + * Example:: + * + * data = `[ [1,2,3,4],[2,2,2,2],[3,3,3,3],[4,4,4,4] ] + * label = [1,0,2,3] + * ignore_label = 1 + * SoftmaxOutput(data=data, label = label,\ + * multi_output=true, use_ignore=true,\ + * ignore_label=ignore_label) + * ## forward softmax output + * `[ [ 0.0320586 0.08714432 0.23688284 0.64391428] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] + * [ 0.25 0.25 0.25 0.25 ] ] + * ## backward gradient output + * `[ [ 0. 0. 0. 0. ] + * [-0.75 0.25 0.25 0.25] + * [ 0.25 0.25 -0.75 0.25] + * [ 0.25 0.25 0.25 -0.75] ] + * ## notice that the first row is all 0 because label[0] is 1, which is equal to ignore_label. + * + * - The parameter `grad_scale` can be used to rescale the gradient, which is often used to + * give each loss function different weights. + * + * - This operator also supports various ways to normalize the gradient by `normalization`, + * The `normalization` is applied if softmax output has different shape than the labels. + * The `normalization` mode can be set to the followings: + * + * - ``'null'``: do nothing. + * - ``'batch'``: divide the gradient by the batch size. + * - ``'valid'``: divide the gradient by the number of instances which are not ignored. + * + * + * + * Defined in src/operator/softmax_output.cc:L231 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def SoftmaxOutput(po: SoftmaxOutputParam) : Array[NDArray] + + /** + * + * {{{ + * + * Join a sequence of arrays along a new axis. + * The axis parameter specifies the index of the new axis in the dimensions of the + * result. For example, if axis=0 it will be the first dimension and if axis=-1 it + * will be the last dimension. + * Examples:: + * x = [1, 2] + * y = [3, 4] + * stack(x, y) = `[ [1, 2], + * [3, 4] ] + * stack(x, y, axis=1) = `[ [1, 3], + * [2, 4] ] + * }}} + * + * @param data List of arrays to stack + * @param axis The axis in the result array along which the input arrays are stacked. + * @param num_args Number of inputs to be stacked. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def stack(data : Array[org.apache.mxnet.javaapi.NDArray], axis : java.lang.Integer, num_args : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Interchanges two axes of an array. + * + * Examples:: + * + * x = `[ [1, 2, 3] ]) + * swapaxes(x, 0, 1) = `[ [ 1], + * [ 2], + * [ 3] ] + * + * x = `[ `[ [ 0, 1], + * [ 2, 3] ], + * `[ [ 4, 5], + * [ 6, 7] ] ] // (2,2,2) array + * + * swapaxes(x, 0, 2) = `[ `[ [ 0, 4], + * [ 2, 6] ], + * `[ [ 1, 5], + * [ 3, 7] ] ] + * + * + * Defined in src/operator/swapaxis.cc:L70 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def swapaxes(po: swapaxesParam) : Array[NDArray] + + /** + * + * {{{ + * + * Takes elements from a data batch. + * + * .. note:: + * `batch_take` is deprecated. Use `pick` instead. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // takes elements with specified indices + * batch_take(x, [0,1,0]) = [ 1. 4. 5.] + * + * + * + * Defined in src/operator/tensor/indexing_op.cc:L777 + * }}} + * + * @param a The input array + * @param indices The index array + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def batch_take(a : org.apache.mxnet.javaapi.NDArray, indices : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Return the cumulative sum of the elements along a given axis. + * + * Defined in src/operator/numpy/np_cumsum.cc:L70 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def cumsum(po: cumsumParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of logical NOT (!) function + * + * Example: + * logical_not([-2., 0., 1.]) = [0., 1., 0.] + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def logical_not(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the product of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L31 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def prod(po: prodParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of element-wise **lesser than** (<) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_lesser(x, y) = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L118 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_lesser(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Return an array of zeros with the same shape, type and storage type + * as the input array. + * + * The storage type of ``zeros_like`` output depends on the storage type of the input + * + * - zeros_like(row_sparse) = row_sparse + * - zeros_like(csr) = csr + * - zeros_like(default) = default + * + * Examples:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * zeros_like(x) = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * }}} + * + * @param data The input + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def zeros_like(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the hyperbolic tangent of the input array, computed element-wise. + * + * .. math:: + * tanh(x) = sinh(x) / cosh(x) + * + * The storage type of ``tanh`` output depends upon the input storage type: + * + * - tanh(default) = default + * - tanh(row_sparse) = row_sparse + * - tanh(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L393 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def tanh(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Check if all the float numbers in the array are finite (used for AMP) + * + * + * Defined in src/operator/contrib/all_finite.cc:L101 + * }}} + * + * @param data Array + * @param init_output Initialize output to 1. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def all_finite(data : org.apache.mxnet.javaapi.NDArray, init_output : java.lang.Boolean, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * uniform distributions on the intervals given by *[low,high)*. + * + * *low* and *high* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *low* and *high*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *low* and *high* at index *i*. + * + * Examples:: + * + * random_pdf_uniform(sample=`[ [1,2,3,4] ], low=[0], high=[10]) = [0.1, 0.1, 0.1, 0.1] + * + * sample = `[ `[ [1, 2, 3], + * [1, 2, 3] ], + * `[ [1, 2, 3], + * [1, 2, 3] ] ] + * low = `[ [0, 0], + * [0, 0] ] + * high = `[ [ 5, 10], + * [15, 20] ] + * random_pdf_uniform(sample=sample, low=low, high=high) = + * `[ `[ [0.2, 0.2, 0.2 ], + * [0.1, 0.1, 0.1 ] ], + * `[ [0.06667, 0.06667, 0.06667], + * [0.05, 0.05, 0.05 ] ] ] + * + * + * + * Defined in src/operator/random/pdf_op.cc:L297 + * }}} + * + * @param sample Samples from the distributions. + * @param low Lower bounds of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param high Upper bounds of the distributions. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def random_pdf_uniform(sample : org.apache.mxnet.javaapi.NDArray, low : org.apache.mxnet.javaapi.NDArray, is_log : java.lang.Boolean, high : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Phase I of lamb update it performs the following operations and returns g:. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * grad = grad * rescale_grad + * if (grad < -clip_gradient) + * then + * grad = -clip_gradient + * if (grad > clip_gradient) + * then + * grad = clip_gradient + * + * mean = beta1 * mean + (1 - beta1) * grad; + * variance = beta2 * variance + (1. - beta2) * grad ^ 2; + * + * if (bias_correction) + * then + * mean_hat = mean / (1. - beta1^t); + * var_hat = var / (1 - beta2^t); + * g = mean_hat / (var_hat^(1/2) + epsilon) + wd * weight; + * else + * g = mean / (var_data^(1/2) + epsilon) + wd * weight; + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L953 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def lamb_update_phase1(po: lamb_update_phase1Param) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise difference of the input arrays with broadcasting. + * + * `broadcast_minus` is an alias to the function `broadcast_sub`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_sub(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * broadcast_minus(x, y) = `[ [ 1., 1., 1.], + * [ 0., 0., 0.] ] + * + * Supported sparse operations: + * + * broadcast_sub/minus(csr, dense(1D)) = dense + * broadcast_sub/minus(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L106 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_minus(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Randomly shuffle the elements. + * + * This shuffles the array along the first axis. + * The order of the elements in each subarray does not change. + * For example, if a 2D array is given, the order of the rows randomly changes, + * but the order of the elements in each row does not change. + * }}} + * + * @param data Data to be shuffled. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def shuffle(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns a 1D int64 array containing the shape of data. + * + * Example:: + * + * shape_array(`[ [1,2,3,4], [5,6,7,8] ]) = [2,4] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L574 + * }}} + * + * @param data Input Array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def shape_array(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns a one-hot array. + * + * The locations represented by `indices` take value `on_value`, while all + * other locations take value `off_value`. + * + * `one_hot` operation with `indices` of shape ``(i0, i1)`` and `depth` of ``d`` would result + * in an output array of shape ``(i0, i1, d)`` with:: + * + * output[i,j,:] = off_value + * output[i,j,indices[i,j] ] = on_value + * + * Examples:: + * + * one_hot([1,0,2,0], 3) = `[ [ 0. 1. 0.] + * [ 1. 0. 0.] + * [ 0. 0. 1.] + * [ 1. 0. 0.] ] + * + * one_hot([1,0,2,0], 3, on_value=8, off_value=1, + * dtype='int32') = `[ [1 8 1] + * [8 1 1] + * [1 1 8] + * [8 1 1] ] + * + * one_hot(`[ [1,0],[1,0],[2,0] ], 3) = `[ `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 1. 0.] + * [ 1. 0. 0.] ] + * + * `[ [ 0. 0. 1.] + * [ 1. 0. 0.] ] ] + * + * + * Defined in src/operator/tensor/indexing_op.cc:L824 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def one_hot(po: one_hotParam) : Array[NDArray] + + /** + * + * {{{ + * + * Compute the sums of squares of multiple arrays + * + * + * Defined in src/operator/contrib/multi_sum_sq.cc:L36 + * }}} + * + * @param data Arrays + * @param num_arrays number of input arrays. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def multi_sum_sq(data : Array[org.apache.mxnet.javaapi.NDArray], num_arrays : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Compute the inverse of a matrix. + * Input is a tensor *A* of dimension *n >= 2*. + * + * If *n=2*, *A* is a square matrix. We compute: + * + * *out* = *A*\ :sup:`-1` + * + * If *n>2*, *inverse* is performed separately on the trailing two dimensions + * for all inputs (batch mode). + * + * .. note:: The operator supports float32 and float64 data types only. + * + * Examples:: + * + * Single matrix inverse + * A = `[ [1., 4.], [2., 3.] ] + * inverse(A) = `[ [-0.6, 0.8], [0.4, -0.2] ] + * + * Batch matrix inverse + * A = `[ `[ [1., 4.], [2., 3.] ], + * `[ [1., 3.], [2., 4.] ] ] + * inverse(A) = `[ `[ [-0.6, 0.8], [0.4, -0.2] ], + * `[ [-2., 1.5], [1., -0.5] ] ] + * + * + * Defined in src/operator/tensor/la_op.cc:L919 + * }}} + * + * @param A Tensor of square matrix + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def linalg_inverse(A : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Mixed Precision version Phase II of lamb update + * it performs the following operations and updates grad. + * + * Link to paper: https://arxiv.org/pdf/1904.00962.pdf + * + * .. math:: + * \begin{gather*} + * if (lower_bound >= 0) + * then + * r1 = max(r1, lower_bound) + * if (upper_bound >= 0) + * then + * r1 = max(r1, upper_bound) + * + * if (r1 == 0 or r2 == 0) + * then + * lr = lr + * else + * lr = lr * (r1/r2) + * weight32 = weight32 - lr * g + * weight(float16) = weight32 + * \end{gather*} + * + * + * + * Defined in src/operator/optimizer_op.cc:L1075 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def mp_lamb_update_phase2(po: mp_lamb_update_phase2Param) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * exponential distributions with parameters *lam* (rate). + * + * The shape of *lam* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *lam*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *lam* + * at index *i*. + * + * Examples:: + * + * random_pdf_exponential(sample=`[ [1, 2, 3] ], lam=[1]) = + * `[ [0.36787945, 0.13533528, 0.04978707] ] + * + * sample = `[ [1,2,3], + * [1,2,3], + * [1,2,3] ] + * + * random_pdf_exponential(sample=sample, lam=[1,0.5,0.25]) = + * `[ [0.36787945, 0.13533528, 0.04978707], + * [0.30326533, 0.18393973, 0.11156508], + * [0.1947002, 0.15163267, 0.11809164] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L304 + * }}} + * + * @param sample Samples from the distributions. + * @param lam Lambda (rate) parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def random_pdf_exponential(sample : org.apache.mxnet.javaapi.NDArray, lam : org.apache.mxnet.javaapi.NDArray, is_log : java.lang.Boolean, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the max of array elements over given axes. + * + * Defined in src/operator/tensor/./broadcast_reduce_op.h:L32 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def max_axis(po: max_axisParam) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from a negative binomial distribution. + * + * Samples are distributed according to a negative binomial distribution parametrized by + * *k* (limit of unsuccessful experiments) and *p* (failure probability in each experiment). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * negative_binomial(k=3, p=0.4, shape=(2,2)) = `[ [ 4., 7.], + * [ 2., 5.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L164 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def random_negative_binomial(po: random_negative_binomialParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise rounded value to the nearest integer of the input. + * + * Example:: + * + * round([-1.5, 1.5, -1.9, 1.9, 2.1]) = [-2., 2., -2., 2., 2.] + * + * The storage type of ``round`` output depends upon the input storage type: + * + * - round(default) = default + * - round(row_sparse) = row_sparse + * - round(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L778 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def round(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * SIGN momentUM (Signum) optimizer. + * + * .. math:: + * + * g_t = \nabla J(W_{t-1})\\ + * m_t = \beta m_{t-1} + (1 - \beta) g_t\\ + * W_t = W_{t-1} - \eta_t \text{sign}(m_t) + * + * It updates the weights using:: + * state = momentum * state + (1-momentum) * gradient + * weight = weight - learning_rate * sign(state) + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * .. note:: + * - sparse ndarray not supported for this optimizer yet. + * + * + * Defined in src/operator/optimizer_op.cc:L92 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def signum_update(po: signum_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise sum of the input arrays with broadcasting. + * + * `broadcast_plus` is an alias to the function `broadcast_add`. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_add(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * broadcast_plus(x, y) = `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] + * + * Supported sparse operations: + * + * broadcast_add(csr, dense(1D)) = dense + * broadcast_add(dense(1D), csr) = dense + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_basic.cc:L58 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_add(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns result of first array elements raised to powers from second array, element-wise with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_power(x, y) = `[ [ 2., 2., 2.], + * [ 4., 4., 4.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L45 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_power(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the hyperbolic cosine of the input array, computed element-wise. + * + * .. math:: + * cosh(x) = 0.5\times(exp(x) + exp(-x)) + * + * The storage type of ``cosh`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L351 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def cosh(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Return the elements, either from x or y, depending on the condition. + * + * Given three ndarrays, condition, x, and y, return an ndarray with the elements from x or y, + * depending on the elements from condition are true or false. x and y must have the same shape. + * If condition has the same shape as x, each element in the output array is from x if the + * corresponding element in the condition is true, and from y if false. + * + * If condition does not have the same shape as x, it must be a 1D array whose size is + * the same as x's first dimension size. Each row of the output array is from x's row + * if the corresponding element from condition is true, and from y's row if false. + * + * Note that all non-zero values are interpreted as ``True`` in condition. + * + * Examples:: + * + * x = `[ [1, 2], [3, 4] ] + * y = `[ [5, 6], [7, 8] ] + * cond = `[ [0, 1], [-1, 0] ] + * + * where(cond, x, y) = `[ [5, 2], [3, 8] ] + * + * csr_cond = cast_storage(cond, 'csr') + * + * where(csr_cond, x, y) = `[ [5, 2], [3, 8] ] + * + * + * + * Defined in src/operator/tensor/control_flow_op.cc:L57 + * }}} + * + * @param condition condition array + * @param x + * @param y + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def where(condition : org.apache.mxnet.javaapi.NDArray, x : org.apache.mxnet.javaapi.NDArray, y : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Permutes the dimensions of an array. + * Examples:: + * x = `[ [ 1, 2], + * [ 3, 4] ] + * transpose(x) = `[ [ 1., 3.], + * [ 2., 4.] ] + * x = `[ `[ [ 1., 2.], + * [ 3., 4.] ], + * `[ [ 5., 6.], + * [ 7., 8.] ] ] + * transpose(x) = `[ `[ [ 1., 5.], + * [ 3., 7.] ], + * `[ [ 2., 6.], + * [ 4., 8.] ] ] + * transpose(x, axes=(1,0,2)) = `[ `[ [ 1., 2.], + * [ 5., 6.] ], + * `[ [ 3., 4.], + * [ 7., 8.] ] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L328 + * }}} + * + * @param data Source input + * @param axes Target axis order. By default the axes will be inverted. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def transpose(data : org.apache.mxnet.javaapi.NDArray, axes : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Momentum update function for multi-precision Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L472 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def multi_mp_sgd_mom_update(po: multi_mp_sgd_mom_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Picks elements from an input array according to the input indices along the given axis. + * + * Given an input array of shape ``(d0, d1)`` and indices of shape ``(i0,)``, the result will be + * an output array of shape ``(i0,)`` with:: + * + * output[i] = input[i, indices[i] ] + * + * By default, if any index mentioned is too large, it is replaced by the index that addresses + * the last element along an axis (the `clip` mode). + * + * This function supports n-dimensional input and (n-1)-dimensional indices arrays. + * + * Examples:: + * + * x = `[ [ 1., 2.], + * [ 3., 4.], + * [ 5., 6.] ] + * + * // picks elements with specified indices along axis 0 + * pick(x, y=[0,1], 0) = [ 1., 4.] + * + * // picks elements with specified indices along axis 1 + * pick(x, y=[0,1,0], 1) = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 using 'wrap' mode + * // to place indicies that would normally be out of bounds + * pick(x, y=[2,-1,-2], 1, mode='wrap') = [ 1., 4., 5.] + * + * y = `[ [ 1.], + * [ 0.], + * [ 2.] ] + * + * // picks elements with specified indices along axis 1 and dims are maintained + * pick(x,y, 1, keepdims=True) = `[ [ 2.], + * [ 3.], + * [ 6.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L155 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def choose_element_0index(po: choose_element_0indexParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * Dirichlet distributions with parameter *alpha*. + * + * The shape of *alpha* must match the leftmost subshape of *sample*. That is, *sample* + * can have the same shape as *alpha*, in which case the output contains one density per + * distribution, or *sample* can be a tensor of tensors with that shape, in which case + * the output is a tensor of densities such that the densities at index *i* in the output + * are given by the samples at index *i* in *sample* parameterized by the value of *alpha* + * at index *i*. + * + * Examples:: + * + * random_pdf_dirichlet(sample=`[ [1,2],[2,3],[3,4] ], alpha=[2.5, 2.5]) = + * [38.413498, 199.60245, 564.56085] + * + * sample = `[ `[ [1, 2, 3], [10, 20, 30], [100, 200, 300] ], + * `[ [0.1, 0.2, 0.3], [0.01, 0.02, 0.03], [0.001, 0.002, 0.003] ] ] + * + * random_pdf_dirichlet(sample=sample, alpha=[0.1, 0.4, 0.9]) = + * `[ [2.3257459e-02, 5.8420084e-04, 1.4674458e-05], + * [9.2589635e-01, 3.6860607e+01, 1.4674468e+03] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L315 + * }}} + * + * @param sample Samples from the distributions. + * @param alpha Concentration parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def random_pdf_dirichlet(sample : org.apache.mxnet.javaapi.NDArray, alpha : org.apache.mxnet.javaapi.NDArray, is_log : java.lang.Boolean, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the reciprocal of the argument, element-wise. + * + * Calculates 1/x. + * + * Example:: + * + * reciprocal([-2, 1, 3, 1.6, 0.2]) = [-0.5, 1.0, 0.33333334, 0.625, 5.0] + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_pow.cc:L42 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def reciprocal(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns a copy of the input. + * + * From:src/operator/tensor/elemwise_unary_op_basic.cc:246 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def identity(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Apply a sparse regularization to the output a sigmoid activation function. + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def IdentityAttachKLSparseReg(po: IdentityAttachKLSparseRegParam) : Array[NDArray] + + /** + * + * {{{ + * + * Cast function used by AMP, that casts its inputs to the common widest type. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * + * Defined in src/operator/tensor/amp_cast.cc:L71 + * }}} + * + * @param data Weights + * @param num_outputs Number of input/output pairs to be casted to the widest type. + * @param cast_narrow Whether to cast to the narrowest type + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def amp_multicast(data : Array[org.apache.mxnet.javaapi.NDArray], num_outputs : java.lang.Integer, cast_narrow : java.lang.Boolean, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the gamma function (extension of the factorial function \ + * to the reals), computed element-wise on the input array. + * + * The storage type of ``gamma`` output is always dense + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def gamma(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Repeats elements of an array. + * By default, ``repeat`` flattens the input array into 1-D and then repeats the + * elements:: + * x = `[ [ 1, 2], + * [ 3, 4] ] + * repeat(x, repeats=2) = [ 1., 1., 2., 2., 3., 3., 4., 4.] + * The parameter ``axis`` specifies the axis along which to perform repeat:: + * repeat(x, repeats=2, axis=1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * repeat(x, repeats=2, axis=0) = `[ [ 1., 2.], + * [ 1., 2.], + * [ 3., 4.], + * [ 3., 4.] ] + * repeat(x, repeats=2, axis=-1) = `[ [ 1., 1., 2., 2.], + * [ 3., 3., 4., 4.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L744 + * }}} + * + * @param data Input data array + * @param repeats The number of repetitions for each element. + * @param axis The axis along which to repeat values. The negative numbers are interpreted counting from the backward. By default, use the flattened input array, and return a flat output array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def repeat(data : org.apache.mxnet.javaapi.NDArray, repeats : java.lang.Integer, axis : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for RMSPropAlex optimizer. + * + * `RMSPropAlex` is non-centered version of `RMSProp`. + * + * Define :math:`E[g^2]_t` is the decaying average over past squared gradient and + * :math:`E[g]_t` is the decaying average over past gradient. + * + * .. math:: + * E[g^2]_t = \gamma_1 * E[g^2]_{t-1} + (1 - \gamma_1) * g_t^2\\ + * E[g]_t = \gamma_1 * E[g]_{t-1} + (1 - \gamma_1) * g_t\\ + * \Delta_t = \gamma_2 * \Delta_{t-1} - \frac{\eta}{\sqrt{E[g^2]_t - E[g]_t^2 + \epsilon}} g_t\\ + * + * The update step is + * + * .. math:: + * \theta_{t+1} = \theta_t + \Delta_t + * + * The RMSPropAlex code follows the version in + * http://arxiv.org/pdf/1308.0850v5.pdf Eq(38) - Eq(45) by Alex Graves, 2013. + * + * Graves suggests the momentum term :math:`\gamma_1` to be 0.95, :math:`\gamma_2` + * to be 0.9 and the learning rate :math:`\eta` to be 0.0001. + * + * + * Defined in src/operator/optimizer_op.cc:L836 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def rmspropalex_update(po: rmspropalex_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Batch normalization. + * + * Normalizes a data batch by mean and variance, and applies a scale ``gamma`` as + * well as offset ``beta``. + * + * Assume the input has more than one dimension and we normalize along axis 1. + * We first compute the mean and variance along this axis: + * + * .. math:: + * + * data\_mean[i] = mean(data[:,i,:,...]) \\ + * data\_var[i] = var(data[:,i,:,...]) + * + * Then compute the normalized output, which has the same shape as input, as following: + * + * .. math:: + * + * out[:,i,:,...] = \frac{data[:,i,:,...] - data\_mean[i]}{\sqrt{data\_var[i]+\epsilon}} * gamma[i] + beta[i] + * + * Both *mean* and *var* returns a scalar by treating the input as a vector. + * + * Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta`` + * have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and + * the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these + * two outputs are blocked. + * + * Besides the inputs and the outputs, this operator accepts two auxiliary + * states, ``moving_mean`` and ``moving_var``, which are *k*-length + * vectors. They are global statistics for the whole dataset, which are updated + * by:: + * + * moving_mean = moving_mean * momentum + data_mean * (1 - momentum) + * moving_var = moving_var * momentum + data_var * (1 - momentum) + * + * If ``use_global_stats`` is set to be true, then ``moving_mean`` and + * ``moving_var`` are used instead of ``data_mean`` and ``data_var`` to compute + * the output. It is often used during inference. + * + * The parameter ``axis`` specifies which axis of the input shape denotes + * the 'channel' (separately normalized groups). The default is 1. Specifying -1 sets the channel + * axis to be the last item in the input shape. + * + * Both ``gamma`` and ``beta`` are learnable parameters. But if ``fix_gamma`` is true, + * then set ``gamma`` to 1 and its gradient to 0. + * + * .. Note:: + * When ``fix_gamma`` is set to True, no sparse support is provided. If ``fix_gamma is`` set to False, + * the sparse tensors will fallback. + * + * + * + * Defined in src/operator/nn/batch_norm.cc:L571 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def BatchNorm(po: BatchNormParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns a sorted copy of an input array along the given axis. + * + * Examples:: + * + * x = `[ [ 1, 4], + * [ 3, 1] ] + * + * // sorts along the last axis + * sort(x) = `[ [ 1., 4.], + * [ 1., 3.] ] + * + * // flattens and then sorts + * sort(x, axis=None) = [ 1., 1., 3., 4.] + * + * // sorts along the first axis + * sort(x, axis=0) = `[ [ 1., 1.], + * [ 3., 4.] ] + * + * // in a descend order + * sort(x, is_ascend=0) = `[ [ 4., 1.], + * [ 3., 1.] ] + * + * + * + * Defined in src/operator/tensor/ordering_op.cc:L132 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sort(po: sortParam) : Array[NDArray] + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and weight's storage + * type is the same as momentum's storage type, + * only the row slices whose indices appear in grad.indices are updated (for both weight and momentum):: + * + * for row in gradient.indices: + * v[row] = momentum[row] * v[row] - learning_rate * gradient[row] + * weight[row] += v[row] + * + * + * + * Defined in src/operator/optimizer_op.cc:L565 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sgd_mom_update(po: sgd_mom_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/contrib/preloaded_multi_sgd.cc:L91 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def preloaded_multi_sgd_mom_update(po: preloaded_multi_sgd_mom_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Clips (limits) the values in an array. + * Given an interval, values outside the interval are clipped to the interval edges. + * Clipping ``x`` between `a_min` and `a_max` would be:: + * .. math:: + * clip(x, a_min, a_max) = \max(\min(x, a_max), a_min)) + * Example:: + * x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] + * clip(x,1,8) = [ 1., 1., 2., 3., 4., 5., 6., 7., 8., 8.] + * The storage type of ``clip`` output depends on storage types of inputs and the a_min, a_max \ + * parameter values: + * - clip(default) = default + * - clip(row_sparse, a_min <= 0, a_max >= 0) = row_sparse + * - clip(csr, a_min <= 0, a_max >= 0) = csr + * - clip(row_sparse, a_min < 0, a_max < 0) = default + * - clip(row_sparse, a_min > 0, a_max > 0) = default + * - clip(csr, a_min < 0, a_max < 0) = csr + * - clip(csr, a_min > 0, a_max > 0) = csr + * + * + * Defined in src/operator/tensor/matrix_op.cc:L677 + * }}} + * + * @param data Input array. + * @param a_min Minimum value + * @param a_max Maximum value + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def clip(data : org.apache.mxnet.javaapi.NDArray, a_min : java.lang.Float, a_max : java.lang.Float, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Upsamples the given input data. + * + * Two algorithms (``sample_type``) are available for upsampling: + * + * - Nearest Neighbor + * - Bilinear + * + * **Nearest Neighbor Upsampling** + * + * Input data is expected to be NCHW. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * UpSampling(x, scale=2, sample_type='nearest') = `[ [`[ [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] + * [1. 1. 1. 1. 1. 1.] ] ] ] + * + * **Bilinear Upsampling** + * + * Uses `deconvolution` algorithm under the hood. You need provide both input data and the kernel. + * + * Input data is expected to be NCHW. + * + * `num_filter` is expected to be same as the number of channels. + * + * Example:: + * + * x = `[ [`[ [1. 1. 1.] + * [1. 1. 1.] + * [1. 1. 1.] ] ] ] + * + * w = `[ [`[ [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] + * [1. 1. 1. 1.] ] ] ] + * + * UpSampling(x, w, scale=2, sample_type='bilinear', num_filter=1) = `[ [`[ [1. 2. 2. 2. 2. 1.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [2. 4. 4. 4. 4. 2.] + * [1. 2. 2. 2. 2. 1.] ] ] ] + * + * + * Defined in src/operator/nn/upsampling.cc:L173 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def UpSampling(po: UpSamplingParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the element-wise tangent of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * tan([0, \pi/4, \pi/2]) = [0, 1, -inf] + * + * The storage type of ``tan`` output depends upon the input storage type: + * + * - tan(default) = default + * - tan(row_sparse) = row_sparse + * - tan(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L140 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def tan(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns indices of the minimum values along an axis. + * + * In the case of multiple occurrences of minimum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmin along axis 0 + * argmin(x, axis=0) = [ 0., 0., 0.] + * + * // argmin along axis 1 + * argmin(x, axis=1) = [ 0., 0.] + * + * // argmin along axis 1 keeping same dims as an input array + * argmin(x, axis=1, keepdims=True) = `[ [ 0.], + * [ 0.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L77 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def argmin(po: argminParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise absolute value of the input. + * + * Example:: + * + * abs([-2, 0, 3]) = [2, 0, 3] + * + * The storage type of ``abs`` output depends upon the input storage type: + * + * - abs(default) = default + * - abs(row_sparse) = row_sparse + * - abs(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L721 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def abs(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Broadcasts the input array over particular axes. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * `broadcast_axes` is an alias to the function `broadcast_axis`. + * + * Example:: + * + * // given x of shape (1,2,1) + * x = `[ `[ [ 1.], + * [ 2.] ] ] + * + * // broadcast x on on axis 2 + * broadcast_axis(x, axis=2, size=3) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * // broadcast x on on axes 0 and 2 + * broadcast_axis(x, axis=(0,2), size=(2,3)) = `[ `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ], + * `[ [ 1., 1., 1.], + * [ 2., 2., 2.] ] ] + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L58 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def broadcast_axes(po: broadcast_axesParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the product of array elements over given axes treating Not a Numbers (``NaN``) as one. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_prod_value.cc:L47 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def nanprod(po: nanprodParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the indices that would sort an input array along the given axis. + * + * This function performs sorting along the given axis and returns an array of indices having same shape + * as an input array that index data in sorted order. + * + * Examples:: + * + * x = `[ [ 0.3, 0.2, 0.4], + * [ 0.1, 0.3, 0.2] ] + * + * // sort along axis -1 + * argsort(x) = `[ [ 1., 0., 2.], + * [ 0., 2., 1.] ] + * + * // sort along axis 0 + * argsort(x, axis=0) = `[ [ 1., 0., 1.] + * [ 0., 1., 0.] ] + * + * // flatten and then sort + * argsort(x, axis=None) = [ 3., 1., 5., 0., 4., 2.] + * + * + * Defined in src/operator/tensor/ordering_op.cc:L183 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def argsort(po: argsortParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the value of the PDF of *sample* of + * gamma distributions with parameters *alpha* (shape) and *beta* (rate). + * + * *alpha* and *beta* must have the same shape, which must match the leftmost subshape + * of *sample*. That is, *sample* can have the same shape as *alpha* and *beta*, in which + * case the output contains one density per distribution, or *sample* can be a tensor + * of tensors with that shape, in which case the output is a tensor of densities such that + * the densities at index *i* in the output are given by the samples at index *i* in *sample* + * parameterized by the values of *alpha* and *beta* at index *i*. + * + * Examples:: + * + * random_pdf_gamma(sample=`[ [1,2,3,4,5] ], alpha=[5], beta=[1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739] ] + * + * sample = `[ [1, 2, 3, 4, 5], + * [2, 3, 4, 5, 6], + * [3, 4, 5, 6, 7] ] + * + * random_pdf_gamma(sample=sample, alpha=[5,6,7], beta=[1,1,1]) = + * `[ [0.01532831, 0.09022352, 0.16803136, 0.19536681, 0.17546739], + * [0.03608941, 0.10081882, 0.15629345, 0.17546739, 0.16062315], + * [0.05040941, 0.10419563, 0.14622283, 0.16062315, 0.14900276] ] + * + * + * Defined in src/operator/random/pdf_op.cc:L301 + * }}} + * + * @param sample Samples from the distributions. + * @param alpha Alpha (shape) parameters of the distributions. + * @param is_log If set, compute the density of the log-probability instead of the probability. + * @param beta Beta (scale) parameters of the distributions. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def random_pdf_gamma(sample : org.apache.mxnet.javaapi.NDArray, alpha : org.apache.mxnet.javaapi.NDArray, is_log : java.lang.Boolean, beta : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * exponential distributions with parameters lambda (rate). + * + * The parameters of the distributions are provided as an input array. + * Let *[s]* be the shape of the input array, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input array, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input value at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input array. + * + * Examples:: + * + * lam = [ 1.0, 8.5 ] + * + * // Draw a single sample for each distribution + * sample_exponential(lam) = [ 0.51837951, 0.09994757] + * + * // Draw a vector containing two samples for each distribution + * sample_exponential(lam, shape=(2)) = `[ [ 0.51837951, 0.19866663], + * [ 0.09994757, 0.50447971] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L283 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sample_exponential(po: sample_exponentialParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the element-wise sine of the input array. + * + * The input should be in radians (:math:`2\pi` rad equals 360 degrees). + * + * .. math:: + * sin([0, \pi/4, \pi/2]) = [0, 0.707, 1] + * + * The storage type of ``sin`` output depends upon the input storage type: + * + * - sin(default) = default + * - sin(row_sparse) = row_sparse + * - sin(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_trig.cc:L47 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def sin(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Reverses the elements of each sequence. + * + * This function takes an n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] + * and returns an array of the same shape. + * + * Parameter `sequence_length` is used to handle variable-length sequences. + * `sequence_length` should be an input array of positive ints of dimension [batch_size]. + * To use this parameter, set `use_sequence_length` to `True`, + * otherwise each example in the batch is assumed to have the max sequence length. + * + * Example:: + * + * x = `[ `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // Batch 1 + * B1 = `[ [ 1., 2., 3.], + * [ 7., 8., 9.], + * [ 13., 14., 15.] ] + * + * // Batch 2 + * B2 = `[ [ 4., 5., 6.], + * [ 10., 11., 12.], + * [ 16., 17., 18.] ] + * + * // returns reverse sequence when sequence_length parameter is not used + * SequenceReverse(x) = `[ `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ], + * + * `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ] ] + * + * // sequence_length [2,2] means 2 rows of + * // both batch B1 and B2 will be reversed. + * SequenceReverse(x, sequence_length=[2,2], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 10., 11., 12.] ], + * + * `[ [ 1., 2., 3.], + * [ 4., 5., 6.] ], + * + * `[ [ 13., 14., 15.], + * [ 16., 17., 18.] ] ] + * + * // sequence_length [2,3] means 2 of batch B2 and 3 of batch B3 + * // will be reversed. + * SequenceReverse(x, sequence_length=[2,3], use_sequence_length=True) = + * `[ `[ [ 7., 8., 9.], + * [ 16., 17., 18.] ], + * + * `[ [ 1., 2., 3.], + * [ 10., 11., 12.] ], + * + * `[ [ 13., 14, 15.], + * [ 4., 5., 6.] ] ] + * + * + * + * Defined in src/operator/sequence_reverse.cc:L122 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def SequenceReverse(po: SequenceReverseParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns the result of element-wise **equal to** (==) comparison operation with broadcasting. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_equal(x, y) = `[ [ 0., 0., 0.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_logic.cc:L46 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_equal(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Set to zero multiple arrays + * + * + * Defined in src/operator/contrib/reset_arrays.cc:L36 + * }}} + * + * @param data Arrays + * @param num_arrays number of input arrays. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def reset_arrays(data : Array[org.apache.mxnet.javaapi.NDArray], num_arrays : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Return an array of ones with the same shape and type + * as the input array. + * + * Examples:: + * + * x = `[ [ 0., 0., 0.], + * [ 0., 0., 0.] ] + * + * ones_like(x) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * }}} + * + * @param data The input + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def ones_like(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Slices a region of the array. + * .. note:: ``crop`` is deprecated. Use ``slice`` instead. + * This function returns a sliced array between the indices given + * by `begin` and `end` with the corresponding `step`. + * For an input array of ``shape=(d_0, d_1, ..., d_n-1)``, + * slice operation with ``begin=(b_0, b_1...b_m-1)``, + * ``end=(e_0, e_1, ..., e_m-1)``, and ``step=(s_0, s_1, ..., s_m-1)``, + * where m <= n, results in an array with the shape + * ``(|e_0-b_0|/|s_0|, ..., |e_m-1-b_m-1|/|s_m-1|, d_m, ..., d_n-1)``. + * The resulting array's *k*-th dimension contains elements + * from the *k*-th dimension of the input array starting + * from index ``b_k`` (inclusive) with step ``s_k`` + * until reaching ``e_k`` (exclusive). + * If the *k*-th elements are `None` in the sequence of `begin`, `end`, + * and `step`, the following rule will be used to set default values. + * If `s_k` is `None`, set `s_k=1`. If `s_k > 0`, set `b_k=0`, `e_k=d_k`; + * else, set `b_k=d_k-1`, `e_k=-1`. + * The storage type of ``slice`` output depends on storage types of inputs + * - slice(csr) = csr + * - otherwise, ``slice`` generates output with default storage + * .. note:: When input data storage type is csr, it only supports + * step=(), or step=(None,), or step=(1,) to generate a csr output. + * For other step parameter values, it falls back to slicing + * a dense tensor. + * Example:: + * x = `[ [ 1., 2., 3., 4.], + * [ 5., 6., 7., 8.], + * [ 9., 10., 11., 12.] ] + * slice(x, begin=(0,1), end=(2,4)) = `[ [ 2., 3., 4.], + * [ 6., 7., 8.] ] + * slice(x, begin=(None, 0), end=(None, 3), step=(-1, 2)) = `[ [9., 11.], + * [5., 7.], + * [1., 3.] ] + * + * + * Defined in src/operator/tensor/matrix_op.cc:L482 + * }}} + * + * @param data Source input + * @param begin starting indices for the slice operation, supports negative indices. + * @param end ending indices for the slice operation, supports negative indices. + * @param step step for the slice operation, supports negative values. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def crop(data : org.apache.mxnet.javaapi.NDArray, begin : org.apache.mxnet.javaapi.Shape, end : org.apache.mxnet.javaapi.Shape, step : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Broadcasts the input array to a new shape. + * + * Broadcasting is a mechanism that allows NDArrays to perform arithmetic operations + * with arrays of different shapes efficiently without creating multiple copies of arrays. + * Also see, `Broadcasting `_ for more explanation. + * + * Broadcasting is allowed on axes with size 1, such as from `(2,1,3,1)` to + * `(2,8,3,9)`. Elements will be duplicated on the broadcasted axes. + * + * For example:: + * + * broadcast_to(`[ [1,2,3] ], shape=(2,3)) = `[ [ 1., 2., 3.], + * [ 1., 2., 3.] ]) + * + * The dimension which you do not want to change can also be kept as `0` which means copy the original value. + * So with `shape=(2,0)`, we will obtain the same result as in the above example. + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_value.cc:L82 + * }}} + * + * @param data The input + * @param shape The shape of the desired array. We can set the dim to zero if it's same as the original. E.g `A = broadcast_to(B, shape=(10, 0, 0))` has the same meaning as `A = broadcast_axis(B, axis=0, size=10)`. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_to(data : org.apache.mxnet.javaapi.NDArray, shape : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Inserts a new axis of size 1 into the array shape + * For example, given ``x`` with shape ``(2,3,4)``, then ``expand_dims(x, axis=1)`` + * will return a new array with shape ``(2,1,3,4)``. + * + * + * Defined in src/operator/tensor/matrix_op.cc:L395 + * }}} + * + * @param data Source input + * @param axis Position where new axis is to be inserted. Suppose that the input `NDArray`'s dimension is `ndim`, the range of the inserted axis is `[-ndim, ndim]` + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def expand_dims(data : org.apache.mxnet.javaapi.NDArray, axis : java.lang.Integer, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Applies instance normalization to the n-dimensional input array. + * + * This operator takes an n-dimensional input array where (n>2) and normalizes + * the input using the following formula: + * + * .. math:: + * + * out = \frac{x - mean[data]}{ \sqrt{Var[data]} + \epsilon} * gamma + beta + * + * This layer is similar to batch normalization layer (`BatchNorm`) + * with two differences: first, the normalization is + * carried out per example (instance), not over a batch. Second, the + * same normalization is applied both at test and train time. This + * operation is also known as `contrast normalization`. + * + * If the input data is of shape [batch, channel, spacial_dim1, spacial_dim2, ...], + * `gamma` and `beta` parameters must be vectors of shape [channel]. + * + * This implementation is based on this paper [1]_ + * + * .. [1] Instance Normalization: The Missing Ingredient for Fast Stylization, + * D. Ulyanov, A. Vedaldi, V. Lempitsky, 2016 (arXiv:1607.08022v2). + * + * Examples:: + * + * // Input of shape (2,1,2) + * x = `[ `[ [ 1.1, 2.2] ], + * `[ [ 3.3, 4.4] ] ] + * + * // gamma parameter of length 1 + * gamma = [1.5] + * + * // beta parameter of length 1 + * beta = [0.5] + * + * // Instance normalization is calculated with the above formula + * InstanceNorm(x,gamma,beta) = `[ `[ [-0.997527 , 1.99752665] ], + * `[ [-0.99752653, 1.99752724] ] ] + * + * + * + * Defined in src/operator/instance_norm.cc:L95 + * }}} + * + * @param data An n-dimensional input array (n > 2) of the form [batch, channel, spatial_dim1, spatial_dim2, ...]. + * @param gamma A vector of length 'channel', which multiplies the normalized input. + * @param beta A vector of length 'channel', which is added to the product of the normalized input and the weight. + * @param eps An `epsilon` parameter to prevent division by 0. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def InstanceNorm(data : org.apache.mxnet.javaapi.NDArray, gamma : org.apache.mxnet.javaapi.NDArray, beta : org.apache.mxnet.javaapi.NDArray, eps : java.lang.Float, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Adds all input arguments element-wise. + * + * .. math:: + * add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n + * + * ``add_n`` is potentially more efficient than calling ``add`` by `n` times. + * + * The storage type of ``add_n`` output depends on storage types of inputs + * + * - add_n(row_sparse, row_sparse, ..) = row_sparse + * - add_n(default, csr, default) = default + * - add_n(any input combinations longer than 4 (>4) with at least one default type) = default + * - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage + * + * + * + * Defined in src/operator/tensor/elemwise_sum.cc:L155 + * }}} + * + * @param args Positional input arguments + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def add_n(args : Array[org.apache.mxnet.javaapi.NDArray], out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Momentum update function for Stochastic Gradient Descent (SGD) optimizer. + * + * Momentum update has better convergence rates on neural networks. Mathematically it looks + * like below: + * + * .. math:: + * + * v_1 = \alpha * \nabla J(W_0)\\ + * v_t = \gamma v_{t-1} - \alpha * \nabla J(W_{t-1})\\ + * W_t = W_{t-1} + v_t + * + * It updates the weights using:: + * + * v = momentum * v - learning_rate * gradient + * weight += v + * + * Where the parameter ``momentum`` is the decay rate of momentum estimates at each epoch. + * + * + * + * Defined in src/operator/optimizer_op.cc:L374 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def multi_sgd_mom_update(po: multi_sgd_mom_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Update function for Stochastic Gradient Descent (SDG) optimizer. + * + * It updates the weights using:: + * + * weight = weight - learning_rate * (gradient + wd * weight) + * + * + * + * Defined in src/operator/optimizer_op.cc:L329 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def multi_sgd_update(po: multi_sgd_updateParam) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from a generalized negative binomial distribution. + * + * Samples are distributed according to a generalized negative binomial distribution parametrized by + * *mu* (mean) and *alpha* (dispersion). *alpha* is defined as *1/k* where *k* is the failure limit of the + * number of unsuccessful experiments (generalized to real numbers). + * Samples will always be returned as a floating point data type. + * + * Example:: + * + * generalized_negative_binomial(mu=2.0, alpha=0.3, shape=(2,2)) = `[ [ 2., 1.], + * [ 6., 4.] ] + * + * + * Defined in src/operator/random/sample_op.cc:L179 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def random_generalized_negative_binomial(po: random_generalized_negative_binomialParam) : Array[NDArray] + + /** + * + * {{{ + * + * Fill one element of each line(row for python, column for R/Julia) in lhs according to index indicated by rhs and values indicated by mhs. This function assume rhs uses 0-based index. + * }}} + * + * @param lhs Left operand to the function. + * @param mhs Middle operand to the function. + * @param rhs Right operand to the function. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def fill_element_0index(lhs : org.apache.mxnet.javaapi.NDArray, mhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Concurrent sampling from multiple + * uniform distributions on the intervals given by *[low,high)*. + * + * The parameters of the distributions are provided as input arrays. + * Let *[s]* be the shape of the input arrays, *n* be the dimension of *[s]*, *[t]* + * be the shape specified as the parameter of the operator, and *m* be the dimension + * of *[t]*. Then the output will be a *(n+m)*-dimensional array with shape *[s]x[t]*. + * + * For any valid *n*-dimensional index *i* with respect to the input arrays, *output[i]* + * will be an *m*-dimensional array that holds randomly drawn samples from the distribution + * which is parameterized by the input values at index *i*. If the shape parameter of the + * operator is not set, then one sample will be drawn per distribution and the output array + * has the same shape as the input arrays. + * + * Examples:: + * + * low = [ 0.0, 2.5 ] + * high = [ 1.0, 3.7 ] + * + * // Draw a single sample for each distribution + * sample_uniform(low, high) = [ 0.40451524, 3.18687344] + * + * // Draw a vector containing two samples for each distribution + * sample_uniform(low, high, shape=(2)) = `[ [ 0.40451524, 0.18017688], + * [ 3.18687344, 3.68352246] ] + * + * + * Defined in src/operator/random/multisample_op.cc:L276 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sample_uniform(po: sample_uniformParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes support vector machine based transformation of the input. + * + * This tutorial demonstrates using SVM as output layer for classification instead of softmax: + * https://github.com/dmlc/mxnet/tree/master/example/svm_mnist. + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def SVMOutput(po: SVMOutputParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes the sum of array elements over given axes. + * + * .. Note:: + * + * `sum` and `sum_axis` are equivalent. + * For ndarray of csr storage type summation along axis 0 and axis 1 is supported. + * Setting keepdims or exclude to True will cause a fallback to dense operator. + * + * Example:: + * + * data = `[ `[ [1, 2], [2, 3], [1, 3] ], + * `[ [1, 4], [4, 3], [5, 2] ], + * `[ [7, 1], [7, 2], [7, 3] ] ] + * + * sum(data, axis=1) + * `[ [ 4. 8.] + * [ 10. 9.] + * [ 21. 6.] ] + * + * sum(data, axis=[1,2]) + * [ 12. 19. 27.] + * + * data = `[ [1, 2, 0], + * [3, 0, 1], + * [4, 1, 0] ] + * + * csr = cast_storage(data, 'csr') + * + * sum(csr, axis=0) + * [ 8. 3. 1.] + * + * sum(csr, axis=1) + * [ 3. 4. 5.] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_sum_value.cc:L67 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sum(po: sumParam) : Array[NDArray] + + /** + * + * {{{ + * + * Connectionist Temporal Classification Loss. + * + * .. note:: The existing alias ``contrib_CTCLoss`` is deprecated. + * + * The shapes of the inputs and outputs: + * + * - **data**: `(sequence_length, batch_size, alphabet_size)` + * - **label**: `(batch_size, label_sequence_length)` + * - **out**: `(batch_size)` + * + * The `data` tensor consists of sequences of activation vectors (without applying softmax), + * with i-th channel in the last dimension corresponding to i-th label + * for i between 0 and alphabet_size-1 (i.e always 0-indexed). + * Alphabet size should include one additional value reserved for blank label. + * When `blank_label` is ``"first"``, the ``0``-th channel is be reserved for + * activation of blank label, or otherwise if it is "last", ``(alphabet_size-1)``-th channel should be + * reserved for blank label. + * + * ``label`` is an index matrix of integers. When `blank_label` is ``"first"``, + * the value 0 is then reserved for blank label, and should not be passed in this matrix. Otherwise, + * when `blank_label` is ``"last"``, the value `(alphabet_size-1)` is reserved for blank label. + * + * If a sequence of labels is shorter than *label_sequence_length*, use the special + * padding value at the end of the sequence to conform it to the correct + * length. The padding value is `0` when `blank_label` is ``"first"``, and `-1` otherwise. + * + * For example, suppose the vocabulary is `[a, b, c]`, and in one batch we have three sequences + * 'ba', 'cbb', and 'abac'. When `blank_label` is ``"first"``, we can index the labels as + * `{'a': 1, 'b': 2, 'c': 3}`, and we reserve the 0-th channel for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [2, 1, 0, 0], [3, 2, 2, 0], [1, 2, 1, 3] ] + * + * When `blank_label` is ``"last"``, we can index the labels as + * `{'a': 0, 'b': 1, 'c': 2}`, and we reserve the channel index 3 for blank label in data tensor. + * The resulting `label` tensor should be padded to be:: + * + * `[ [1, 0, -1, -1], [2, 1, 1, -1], [0, 1, 0, 2] ] + * + * ``out`` is a list of CTC loss values, one per example in the batch. + * + * See *Connectionist Temporal Classification: Labelling Unsegmented + * Sequence Data with Recurrent Neural Networks*, A. Graves *et al*. for more + * information on the definition and the algorithm. + * + * + * + * Defined in src/operator/nn/ctc_loss.cc:L100 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def ctc_loss(po: ctc_lossParam) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise maximum of the input arrays with broadcasting. + * + * This function compares two input arrays and returns a new array having the element-wise maxima. + * + * Example:: + * + * x = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * y = `[ [ 0.], + * [ 1.] ] + * + * broadcast_maximum(x, y) = `[ [ 1., 1., 1.], + * [ 1., 1., 1.] ] + * + * + * + * Defined in src/operator/tensor/elemwise_binary_broadcast_op_extended.cc:L81 + * }}} + * + * @param lhs First input to the function + * @param rhs Second input to the function + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def broadcast_maximum(lhs : org.apache.mxnet.javaapi.NDArray, rhs : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns element-wise Natural logarithmic value of the input. + * + * The natural logarithm is logarithm in base *e*, so that ``log(exp(x)) = x`` + * + * The storage type of ``log`` output is always dense + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_logexp.cc:L76 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def log(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Applies a linear transformation: :math:`Y = XW^T + b`. + * + * If ``flatten`` is set to be true, then the shapes are: + * + * - **data**: `(batch_size, x1, x2, ..., xn)` + * - **weight**: `(num_hidden, x1 * x2 * ... * xn)` + * - **bias**: `(num_hidden,)` + * - **out**: `(batch_size, num_hidden)` + * + * If ``flatten`` is set to be false, then the shapes are: + * + * - **data**: `(x1, x2, ..., xn, input_dim)` + * - **weight**: `(num_hidden, input_dim)` + * - **bias**: `(num_hidden,)` + * - **out**: `(x1, x2, ..., xn, num_hidden)` + * + * The learnable parameters include both ``weight`` and ``bias``. + * + * If ``no_bias`` is set to be true, then the ``bias`` term is ignored. + * + * .. Note:: + * + * The sparse support for FullyConnected is limited to forward evaluation with `row_sparse` + * weight and bias, where the length of `weight.indices` and `bias.indices` must be equal + * to `num_hidden`. This could be useful for model inference with `row_sparse` weights + * trained with importance sampling or noise contrastive estimation. + * + * To compute linear transformation with 'csr' sparse data, sparse.dot is recommended instead + * of sparse.FullyConnected. + * + * + * + * Defined in src/operator/nn/fully_connected.cc:L291 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def FullyConnected(po: FullyConnectedParam) : Array[NDArray] + + /** + * + * {{{ + * + * Concurrent sampling from multiple multinomial distributions. + * + * *data* is an *n* dimensional array whose last dimension has length *k*, where + * *k* is the number of possible outcomes of each multinomial distribution. This + * operator will draw *shape* samples from each distribution. If shape is empty + * one sample will be drawn from each distribution. + * + * If *get_prob* is true, a second array containing log likelihood of the drawn + * samples will also be returned. This is usually used for reinforcement learning + * where you can provide reward as head gradient for this array to estimate + * gradient. + * + * Note that the input distribution must be normalized, i.e. *data* must sum to + * 1 along its last axis. + * + * Examples:: + * + * probs = `[ [0, 0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1, 0] ] + * + * // Draw a single sample for each distribution + * sample_multinomial(probs) = [3, 0] + * + * // Draw a vector containing two samples for each distribution + * sample_multinomial(probs, shape=(2)) = `[ [4, 2], + * [0, 0] ] + * + * // requests log likelihood + * sample_multinomial(probs, get_prob=True) = [2, 1], [0.2, 0.3] + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def sample_multinomial(po: sample_multinomialParam) : Array[NDArray] + + /** + * + * {{{ + * + * Return the element-wise truncated value of the input. + * + * The truncated value of the scalar x is the nearest integer i which is closer to + * zero than x is. In short, the fractional part of the signed number x is discarded. + * + * Example:: + * + * trunc([-2.1, -1.9, 1.5, 1.9, 2.1]) = [-2., -1., 1., 1., 2.] + * + * The storage type of ``trunc`` output depends upon the input storage type: + * + * - trunc(default) = default + * - trunc(row_sparse) = row_sparse + * - trunc(csr) = csr + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L857 + * }}} + * + * @param data The input array. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def trunc(data : org.apache.mxnet.javaapi.NDArray, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Returns indices of the maximum values along an axis. + * + * In the case of multiple occurrences of maximum values, the indices corresponding to the first occurrence + * are returned. + * + * Examples:: + * + * x = `[ [ 0., 1., 2.], + * [ 3., 4., 5.] ] + * + * // argmax along axis 0 + * argmax(x, axis=0) = [ 1., 1., 1.] + * + * // argmax along axis 1 + * argmax(x, axis=1) = [ 2., 2.] + * + * // argmax along axis 1 keeping same dims as an input array + * argmax(x, axis=1, keepdims=True) = `[ [ 2.], + * [ 2.] ] + * + * + * + * Defined in src/operator/tensor/broadcast_reduce_op_index.cc:L52 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def argmax(po: argmaxParam) : Array[NDArray] + + /** + * + * {{{ + * + * Reshape some or all dimensions of `lhs` to have the same shape as some or all dimensions of `rhs`. + * + * Returns a **view** of the `lhs` array with a new shape without altering any data. + * + * Example:: + * + * x = [1, 2, 3, 4, 5, 6] + * y = `[ [0, -4], [3, 2], [2, 2] ] + * reshape_like(x, y) = `[ [1, 2], [3, 4], [5, 6] ] + * + * More precise control over how dimensions are inherited is achieved by specifying \ + * slices over the `lhs` and `rhs` array dimensions. Only the sliced `lhs` dimensions \ + * are reshaped to the `rhs` sliced dimensions, with the non-sliced `lhs` dimensions staying the same. + * + * Examples:: + * + * - lhs shape = (30,7), rhs shape = (15,2,4), lhs_begin=0, lhs_end=1, rhs_begin=0, rhs_end=2, output shape = (15,2,7) + * - lhs shape = (3, 5), rhs shape = (1,15,4), lhs_begin=0, lhs_end=2, rhs_begin=1, rhs_end=2, output shape = (15) + * + * Negative indices are supported, and `None` can be used for either `lhs_end` or `rhs_end` to indicate the end of the range. + * + * Example:: + * + * - lhs shape = (30, 12), rhs shape = (4, 2, 2, 3), lhs_begin=-1, lhs_end=None, rhs_begin=1, rhs_end=None, output shape = (30, 2, 2, 3) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L513 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def reshape_like(po: reshape_likeParam) : Array[NDArray] + + /** + * + * {{{ + * + * Computes mean absolute error of the input. + * + * MAE is a risk metric corresponding to the expected value of the absolute error. + * + * If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, + * then the mean absolute error (MAE) estimated over :math:`n` samples is defined as + * + * :math:`\text{MAE}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_1` + * + * .. note:: + * Use the MAERegressionOutput as the final output layer of a net. + * + * The storage type of ``label`` can be ``default`` or ``csr`` + * + * - MAERegressionOutput(default, default) = default + * - MAERegressionOutput(default, csr) = default + * + * By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. + * The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. + * + * + * + * Defined in src/operator/regression_output.cc:L120 + * }}} + * + * @param data Input data to the function. + * @param label Input label to the function. + * @param grad_scale Scale the gradient by a float factor + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def MAERegressionOutput(data : org.apache.mxnet.javaapi.NDArray, label : org.apache.mxnet.javaapi.NDArray, grad_scale : java.lang.Float, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Computes hard sigmoid of x element-wise. + * + * .. math:: + * y = max(0, min(1, alpha * x + beta)) + * + * + * + * Defined in src/operator/tensor/elemwise_unary_op_basic.cc:L161 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def hard_sigmoid(po: hard_sigmoidParam) : Array[NDArray] + + /** + * + * {{{ + * + * Applies bilinear sampling to input feature map. + * + * Bilinear Sampling is the key of [NIPS2015] \"Spatial Transformer Networks\". The usage of the operator is very similar to remap function in OpenCV, + * except that the operator has the backward pass. + * + * Given :math:`data` and :math:`grid`, then the output is computed by + * + * .. math:: + * x_{src} = grid[batch, 0, y_{dst}, x_{dst}] \\ + * y_{src} = grid[batch, 1, y_{dst}, x_{dst}] \\ + * output[batch, channel, y_{dst}, x_{dst}] = G(data[batch, channel, y_{src}, x_{src}) + * + * :math:`x_{dst}`, :math:`y_{dst}` enumerate all spatial locations in :math:`output`, and :math:`G()` denotes the bilinear interpolation kernel. + * The out-boundary points will be padded with zeros.The shape of the output will be (data.shape[0], data.shape[1], grid.shape[2], grid.shape[3]). + * + * The operator assumes that :math:`data` has 'NCHW' layout and :math:`grid` has been normalized to [-1, 1]. + * + * BilinearSampler often cooperates with GridGenerator which generates sampling grids for BilinearSampler. + * GridGenerator supports two kinds of transformation: ``affine`` and ``warp``. + * If users want to design a CustomOp to manipulate :math:`grid`, please firstly refer to the code of GridGenerator. + * + * Example 1:: + * + * ## Zoom out data two times + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * affine_matrix = array(`[ [2, 0, 0], + * [0, 2, 0] ]) + * + * affine_matrix = reshape(affine_matrix, shape=(1, 6)) + * + * grid = GridGenerator(data=affine_matrix, transform_type='affine', target_shape=(4, 4)) + * + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 0, 0, 0, 0], + * [ 0, 3.5, 6.5, 0], + * [ 0, 1.25, 2.5, 0], + * [ 0, 0, 0, 0] ] ] + * + * + * Example 2:: + * + * ## shift data horizontally by -1 pixel + * + * data = array(`[ [`[ [1, 4, 3, 6], + * [1, 8, 8, 9], + * [0, 4, 1, 5], + * [1, 0, 1, 3] ] ] ]) + * + * warp_maxtrix = array(`[ [`[ [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1], + * [1, 1, 1, 1] ], + * `[ [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0], + * [0, 0, 0, 0] ] ] ]) + * + * grid = GridGenerator(data=warp_matrix, transform_type='warp') + * out = BilinearSampler(data, grid) + * + * out + * `[ [`[ [ 4, 3, 6, 0], + * [ 8, 8, 9, 0], + * [ 4, 1, 5, 0], + * [ 0, 1, 3, 0] ] ] + * + * + * Defined in src/operator/bilinear_sampler.cc:L256 + * }}} + * + * @param data Input data to the BilinearsamplerOp. + * @param grid Input grid to the BilinearsamplerOp.grid has two channels: x_src, y_src + * @param cudnn_off whether to turn cudnn off + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def BilinearSampler(data : org.apache.mxnet.javaapi.NDArray, grid : org.apache.mxnet.javaapi.NDArray, cudnn_off : java.lang.Boolean, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Make your own loss function in network construction. + * + * This operator accepts a customized loss function symbol as a terminal loss and + * the symbol should be an operator with no backward dependency. + * The output of this function is the gradient of loss with respect to the input data. + * + * For example, if you are a making a cross entropy loss function. Assume ``out`` is the + * predicted output and ``label`` is the true label, then the cross entropy can be defined as:: + * + * cross_entropy = label * log(out) + (1 - label) * log(1 - out) + * loss = MakeLoss(cross_entropy) + * + * We will need to use ``MakeLoss`` when we are creating our own loss function or we want to + * combine multiple loss functions. Also we may want to stop some variables' gradients + * from backpropagation. See more detail in ``BlockGrad`` or ``stop_gradient``. + * + * In addition, we can give a scale to the loss by setting ``grad_scale``, + * so that the gradient of the loss will be rescaled in the backpropagation. + * + * .. note:: This operator should be used as a Symbol instead of NDArray. + * + * + * + * Defined in src/operator/make_loss.cc:L71 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def MakeLoss(po: MakeLossParam) : Array[NDArray] + + /** + * + * {{{ + * + * Cast function between low precision float/FP32 used by AMP. + * + * It casts only between low precision float/FP32 and does not do anything for other types. + * + * + * Defined in src/operator/tensor/amp_cast.cc:L37 + * }}} + * + * @param data The input. + * @param dtype Output data type. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def amp_cast(data : org.apache.mxnet.javaapi.NDArray, dtype : String, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Repeats the whole array multiple times. + * If ``reps`` has length *d*, and input array has dimension of *n*. There are + * three cases: + * - **n=d**. Repeat *i*-th dimension of the input by ``reps[i]`` times:: + * x = `[ [1, 2], + * [3, 4] ] + * tile(x, reps=(2,3)) = `[ [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.], + * [ 1., 2., 1., 2., 1., 2.], + * [ 3., 4., 3., 4., 3., 4.] ] + * - **n>d**. ``reps`` is promoted to length *n* by pre-pending 1's to it. Thus for + * an input shape ``(2,3)``, ``repos=(2,)`` is treated as ``(1,2)``:: + * tile(x, reps=(2,)) = `[ [ 1., 2., 1., 2.], + * [ 3., 4., 3., 4.] ] + * - **n d, reps is promoted to a.ndim by pre-pending 1's to it. + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ +@Experimental + def tile(data : org.apache.mxnet.javaapi.NDArray, reps : org.apache.mxnet.javaapi.Shape, out : NDArray) : Array[NDArray] + + /** + * + * {{{ + * + * Draw random samples from a normal (Gaussian) distribution. + * + * .. note:: The existing alias ``normal`` is deprecated. + * + * Samples are distributed according to a normal distribution parametrized by *loc* (mean) and *scale* + * (standard deviation). + * + * Example:: + * + * normal(loc=0, scale=1, shape=(2,2)) = `[ [ 1.89171135, -1.16881478], + * [-1.23474145, 1.55807114] ] + * + * + * Defined in src/operator/random/sample_op.cc:L113 + * }}} + * + * @return Array[org.apache.mxnet.javaapi.NDArray] + */ + @Experimental + def random_normal(po: random_normalParam) : Array[NDArray] + +} +/** + * This Param Object is specifically used for RNN + * @param data Input data to RNN + * @param parameters Vector of all RNN trainable parameters concatenated + * @param state initial hidden state of the RNN + * @param state_cell initial cell state for LSTM networks (only for LSTM) + * @param sequence_length Vector of valid sequence lengths for each element in batch. (Only used if use_sequence_length kwarg is True) + * @param state_size size of the state for each layer + * @param num_layers number of stacked layers + * @param mode the type of RNN to compute + */ + class RNNParam(data : org.apache.mxnet.javaapi.NDArray,parameters : org.apache.mxnet.javaapi.NDArray,state : org.apache.mxnet.javaapi.NDArray,state_cell : org.apache.mxnet.javaapi.NDArray,sequence_length : org.apache.mxnet.javaapi.NDArray,state_size : java.lang.Integer,num_layers : java.lang.Integer,mode : String) { + def getData() = this.data + def getParameters() = this.parameters + def getState() = this.state + def getState_cell() = this.state_cell + def getSequence_length() = this.sequence_length + def getState_size() = this.state_size + def getNum_layers() = this.num_layers + private var bidirectional: java.lang.Boolean = null +/** + * @param bidirectional whether to use bidirectional recurrent layers + */ +def setBidirectional(bidirectional : java.lang.Boolean): RNNParam = { + this.bidirectional = bidirectional + this + } + def getBidirectional() = this.bidirectional + def getMode() = this.mode + private var p: java.lang.Float = null +/** + * @param p drop rate of the dropout on the outputs of each RNN layer, except the last layer. + */ +def setP(p : java.lang.Float): RNNParam = { + this.p = p + this + } + def getP() = this.p + private var state_outputs: java.lang.Boolean = null +/** + * @param state_outputs Whether to have the states as symbol outputs. + */ +def setState_outputs(state_outputs : java.lang.Boolean): RNNParam = { + this.state_outputs = state_outputs + this + } + def getState_outputs() = this.state_outputs + private var projection_size: java.lang.Integer = null +/** + * @param projection_size size of project size + */ +def setProjection_size(projection_size : java.lang.Integer): RNNParam = { + this.projection_size = projection_size + this + } + def getProjection_size() = this.projection_size + private var lstm_state_clip_min: java.lang.Double = null +/** + * @param lstm_state_clip_min Minimum clip value of LSTM states. This option must be used together with lstm_state_clip_max. + */ +def setLstm_state_clip_min(lstm_state_clip_min : java.lang.Double): RNNParam = { + this.lstm_state_clip_min = lstm_state_clip_min + this + } + def getLstm_state_clip_min() = this.lstm_state_clip_min + private var lstm_state_clip_max: java.lang.Double = null +/** + * @param lstm_state_clip_max Maximum clip value of LSTM states. This option must be used together with lstm_state_clip_min. + */ +def setLstm_state_clip_max(lstm_state_clip_max : java.lang.Double): RNNParam = { + this.lstm_state_clip_max = lstm_state_clip_max + this + } + def getLstm_state_clip_max() = this.lstm_state_clip_max + private var lstm_state_clip_nan: java.lang.Boolean = null +/** + * @param lstm_state_clip_nan Whether to stop NaN from propagating in state by clipping it to min/max. If clipping range is not specified, this option is ignored. + */ +def setLstm_state_clip_nan(lstm_state_clip_nan : java.lang.Boolean): RNNParam = { + this.lstm_state_clip_nan = lstm_state_clip_nan + this + } + def getLstm_state_clip_nan() = this.lstm_state_clip_nan + private var use_sequence_length: java.lang.Boolean = null +/** + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + */ +def setUse_sequence_length(use_sequence_length : java.lang.Boolean): RNNParam = { + this.use_sequence_length = use_sequence_length + this + } + def getUse_sequence_length() = this.use_sequence_length + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : RNNParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for linalg_trmm + * @param A Tensor of lower triangular matrices + * @param B Tensor of matrices + */ + class linalg_trmmParam(A : org.apache.mxnet.javaapi.NDArray,B : org.apache.mxnet.javaapi.NDArray) { + def getA() = this.A + def getB() = this.B + private var transpose: java.lang.Boolean = null +/** + * @param transpose Use transposed of the triangular matrix + */ +def setTranspose(transpose : java.lang.Boolean): linalg_trmmParam = { + this.transpose = transpose + this + } + def getTranspose() = this.transpose + private var rightside: java.lang.Boolean = null +/** + * @param rightside Multiply triangular matrix from the right to non-triangular one. + */ +def setRightside(rightside : java.lang.Boolean): linalg_trmmParam = { + this.rightside = rightside + this + } + def getRightside() = this.rightside + private var lower: java.lang.Boolean = null +/** + * @param lower True if the triangular matrix is lower triangular, false if it is upper triangular. + */ +def setLower(lower : java.lang.Boolean): linalg_trmmParam = { + this.lower = lower + this + } + def getLower() = this.lower + private var alpha: java.lang.Double = null +/** + * @param alpha Scalar factor to be applied to the result. + */ +def setAlpha(alpha : java.lang.Double): linalg_trmmParam = { + this.alpha = alpha + this + } + def getAlpha() = this.alpha + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : linalg_trmmParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for L2Normalization + * @param data Input array to normalize. + */ + class L2NormalizationParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var eps: java.lang.Float = null +/** + * @param eps A small constant for numerical stability. + */ +def setEps(eps : java.lang.Float): L2NormalizationParam = { + this.eps = eps + this + } + def getEps() = this.eps + private var mode: String = null +/** + * @param mode Specify the dimension along which to compute L2 norm. + */ +def setMode(mode : String): L2NormalizationParam = { + this.mode = mode + this + } + def getMode() = this.mode + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : L2NormalizationParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for dot + * @param lhs The first input + * @param rhs The second input + */ + class dotParam(lhs : org.apache.mxnet.javaapi.NDArray,rhs : org.apache.mxnet.javaapi.NDArray) { + def getLhs() = this.lhs + def getRhs() = this.rhs + private var transpose_a: java.lang.Boolean = null +/** + * @param transpose_a If true then transpose the first input before dot. + */ +def setTranspose_a(transpose_a : java.lang.Boolean): dotParam = { + this.transpose_a = transpose_a + this + } + def getTranspose_a() = this.transpose_a + private var transpose_b: java.lang.Boolean = null +/** + * @param transpose_b If true then transpose the second input before dot. + */ +def setTranspose_b(transpose_b : java.lang.Boolean): dotParam = { + this.transpose_b = transpose_b + this + } + def getTranspose_b() = this.transpose_b + private var forward_stype: String = null +/** + * @param forward_stype The desired storage type of the forward output given by user, if thecombination of input storage types and this hint does not matchany implemented ones, the dot operator will perform fallback operationand still produce an output of the desired storage type. + */ +def setForward_stype(forward_stype : String): dotParam = { + this.forward_stype = forward_stype + this + } + def getForward_stype() = this.forward_stype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : dotParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for softmax + * @param data The input array. + * @param length The length array. + */ + class softmaxParam(data : org.apache.mxnet.javaapi.NDArray,length : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getLength() = this.length + private var axis: java.lang.Integer = null +/** + * @param axis The axis along which to compute softmax. + */ +def setAxis(axis : java.lang.Integer): softmaxParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var temperature: java.lang.Double = null +/** + * @param temperature Temperature parameter in softmax + */ +def setTemperature(temperature : java.lang.Double): softmaxParam = { + this.temperature = temperature + this + } + def getTemperature() = this.temperature + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). + */ +def setDtype(dtype : String): softmaxParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var use_length: java.lang.Boolean = null +/** + * @param use_length Whether to use the length input as a mask over the data input. + */ +def setUse_length(use_length : java.lang.Boolean): softmaxParam = { + this.use_length = use_length + this + } + def getUse_length() = this.use_length + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : softmaxParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for linalg_syrk + * @param A Tensor of input matrices + */ + class linalg_syrkParam(A : org.apache.mxnet.javaapi.NDArray) { + def getA() = this.A + private var transpose: java.lang.Boolean = null +/** + * @param transpose Use transpose of input matrix. + */ +def setTranspose(transpose : java.lang.Boolean): linalg_syrkParam = { + this.transpose = transpose + this + } + def getTranspose() = this.transpose + private var alpha: java.lang.Double = null +/** + * @param alpha Scalar factor to be applied to the result. + */ +def setAlpha(alpha : java.lang.Double): linalg_syrkParam = { + this.alpha = alpha + this + } + def getAlpha() = this.alpha + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : linalg_syrkParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for signsgd_update + * @param weight Weight + * @param grad Gradient + * @param lr Learning rate + */ + class signsgd_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getLr() = this.lr + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): signsgd_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): signsgd_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): signsgd_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : signsgd_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for broadcast_axis + * @param data The input + */ + class broadcast_axisParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axes to perform the broadcasting. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): broadcast_axisParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var size: org.apache.mxnet.javaapi.Shape = null +/** + * @param size Target sizes of the broadcasting axes. + */ +def setSize(size : org.apache.mxnet.javaapi.Shape): broadcast_axisParam = { + this.size = size + this + } + def getSize() = this.size + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : broadcast_axisParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for CTCLoss + * @param data Input ndarray + * @param label Ground-truth labels for the loss. + * @param data_lengths Lengths of data for each of the samples. Only required when use_data_lengths is true. + * @param label_lengths Lengths of labels for each of the samples. Only required when use_label_lengths is true. + */ + class CTCLossParam(data : org.apache.mxnet.javaapi.NDArray,label : org.apache.mxnet.javaapi.NDArray,data_lengths : org.apache.mxnet.javaapi.NDArray,label_lengths : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getLabel() = this.label + def getData_lengths() = this.data_lengths + def getLabel_lengths() = this.label_lengths + private var use_data_lengths: java.lang.Boolean = null +/** + * @param use_data_lengths Whether the data lenghts are decided by `data_lengths`. If false, the lengths are equal to the max sequence length. + */ +def setUse_data_lengths(use_data_lengths : java.lang.Boolean): CTCLossParam = { + this.use_data_lengths = use_data_lengths + this + } + def getUse_data_lengths() = this.use_data_lengths + private var use_label_lengths: java.lang.Boolean = null +/** + * @param use_label_lengths Whether the label lenghts are decided by `label_lengths`, or derived from `padding_mask`. If false, the lengths are derived from the first occurrence of the value of `padding_mask`. The value of `padding_mask` is ``0`` when first CTC label is reserved for blank, and ``-1`` when last label is reserved for blank. See `blank_label`. + */ +def setUse_label_lengths(use_label_lengths : java.lang.Boolean): CTCLossParam = { + this.use_label_lengths = use_label_lengths + this + } + def getUse_label_lengths() = this.use_label_lengths + private var blank_label: String = null +/** + * @param blank_label Set the label that is reserved for blank label.If "first", 0-th label is reserved, and label values for tokens in the vocabulary are between ``1`` and ``alphabet_size-1``, and the padding mask is ``-1``. If "last", last label value ``alphabet_size-1`` is reserved for blank label instead, and label values for tokens in the vocabulary are between ``0`` and ``alphabet_size-2``, and the padding mask is ``0``. + */ +def setBlank_label(blank_label : String): CTCLossParam = { + this.blank_label = blank_label + this + } + def getBlank_label() = this.blank_label + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : CTCLossParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for SliceChannel + * @param data The input + * @param num_outputs Number of splits. Note that this should evenly divide the length of the `axis`. + */ + class SliceChannelParam(data : org.apache.mxnet.javaapi.NDArray,num_outputs : java.lang.Integer) { + def getData() = this.data + def getNum_outputs() = this.num_outputs + private var axis: java.lang.Integer = null +/** + * @param axis Axis along which to split. + */ +def setAxis(axis : java.lang.Integer): SliceChannelParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var squeeze_axis: java.lang.Boolean = null +/** + * @param squeeze_axis If true, Removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to ``true`` only if ``input.shape[axis] == num_outputs``. + */ +def setSqueeze_axis(squeeze_axis : java.lang.Boolean): SliceChannelParam = { + this.squeeze_axis = squeeze_axis + this + } + def getSqueeze_axis() = this.squeeze_axis + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : SliceChannelParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for nansum + * @param data The input + */ + class nansumParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): nansumParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): nansumParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): nansumParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : nansumParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for Pooling_v1 + * @param data Input data to the pooling operator. + */ + class Pooling_v1Param(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var kernel: org.apache.mxnet.javaapi.Shape = null +/** + * @param kernel pooling kernel size: (y, x) or (d, y, x) + */ +def setKernel(kernel : org.apache.mxnet.javaapi.Shape): Pooling_v1Param = { + this.kernel = kernel + this + } + def getKernel() = this.kernel + private var pool_type: String = null +/** + * @param pool_type Pooling type to be applied. + */ +def setPool_type(pool_type : String): Pooling_v1Param = { + this.pool_type = pool_type + this + } + def getPool_type() = this.pool_type + private var global_pool: java.lang.Boolean = null +/** + * @param global_pool Ignore kernel size, do global pooling based on current input feature map. + */ +def setGlobal_pool(global_pool : java.lang.Boolean): Pooling_v1Param = { + this.global_pool = global_pool + this + } + def getGlobal_pool() = this.global_pool + private var pooling_convention: String = null +/** + * @param pooling_convention Pooling convention to be applied. + */ +def setPooling_convention(pooling_convention : String): Pooling_v1Param = { + this.pooling_convention = pooling_convention + this + } + def getPooling_convention() = this.pooling_convention + private var stride: org.apache.mxnet.javaapi.Shape = null +/** + * @param stride stride: for pooling (y, x) or (d, y, x) + */ +def setStride(stride : org.apache.mxnet.javaapi.Shape): Pooling_v1Param = { + this.stride = stride + this + } + def getStride() = this.stride + private var pad: org.apache.mxnet.javaapi.Shape = null +/** + * @param pad pad for pooling: (y, x) or (d, y, x) + */ +def setPad(pad : org.apache.mxnet.javaapi.Shape): Pooling_v1Param = { + this.pad = pad + this + } + def getPad() = this.pad + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : Pooling_v1Param = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for preloaded_multi_mp_sgd_update + * @param data Weights, gradients, learning rates and weight decays + */ + class preloaded_multi_mp_sgd_updateParam(data : Array[org.apache.mxnet.javaapi.NDArray]) { + def getData() = this.data + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): preloaded_multi_mp_sgd_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): preloaded_multi_mp_sgd_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var num_weights: java.lang.Integer = null +/** + * @param num_weights Number of updated weights. + */ +def setNum_weights(num_weights : java.lang.Integer): preloaded_multi_mp_sgd_updateParam = { + this.num_weights = num_weights + this + } + def getNum_weights() = this.num_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : preloaded_multi_mp_sgd_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sample_negative_binomial + * @param k Limits of unsuccessful experiments. + * @param p Failure probabilities in each experiment. + */ + class sample_negative_binomialParam(k : org.apache.mxnet.javaapi.NDArray,p : org.apache.mxnet.javaapi.NDArray) { + def getK() = this.k + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape to be sampled from each random distribution. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): sample_negative_binomialParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): sample_negative_binomialParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + def getP() = this.p + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sample_negative_binomialParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for LRN + * @param data Input data to LRN + * @param nsize normalization window width in elements. + */ + class LRNParam(data : org.apache.mxnet.javaapi.NDArray,nsize : java.lang.Integer) { + def getData() = this.data + private var alpha: java.lang.Float = null +/** + * @param alpha The variance scaling parameter :math:`lpha` in the LRN expression. + */ +def setAlpha(alpha : java.lang.Float): LRNParam = { + this.alpha = alpha + this + } + def getAlpha() = this.alpha + private var beta: java.lang.Float = null +/** + * @param beta The power parameter :math:`eta` in the LRN expression. + */ +def setBeta(beta : java.lang.Float): LRNParam = { + this.beta = beta + this + } + def getBeta() = this.beta + private var knorm: java.lang.Float = null +/** + * @param knorm The parameter :math:`k` in the LRN expression. + */ +def setKnorm(knorm : java.lang.Float): LRNParam = { + this.knorm = knorm + this + } + def getKnorm() = this.knorm + def getNsize() = this.nsize + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : LRNParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sample_poisson + * @param lam Lambda (rate) parameters of the distributions. + */ + class sample_poissonParam(lam : org.apache.mxnet.javaapi.NDArray) { + def getLam() = this.lam + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape to be sampled from each random distribution. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): sample_poissonParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): sample_poissonParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sample_poissonParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for preloaded_multi_sgd_update + * @param data Weights, gradients, learning rates and weight decays + */ + class preloaded_multi_sgd_updateParam(data : Array[org.apache.mxnet.javaapi.NDArray]) { + def getData() = this.data + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): preloaded_multi_sgd_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): preloaded_multi_sgd_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var num_weights: java.lang.Integer = null +/** + * @param num_weights Number of updated weights. + */ +def setNum_weights(num_weights : java.lang.Integer): preloaded_multi_sgd_updateParam = { + this.num_weights = num_weights + this + } + def getNum_weights() = this.num_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : preloaded_multi_sgd_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for ftml_update + * @param weight Weight + * @param grad Gradient + * @param d Internal state ``d_t`` + * @param v Internal state ``v_t`` + * @param z Internal state ``z_t`` + * @param lr Learning rate. + * @param t Number of update. + */ + class ftml_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,d : org.apache.mxnet.javaapi.NDArray,v : org.apache.mxnet.javaapi.NDArray,z : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float,t : java.lang.Integer) { + def getWeight() = this.weight + def getGrad() = this.grad + def getD() = this.d + def getV() = this.v + def getZ() = this.z + def getLr() = this.lr + private var beta1: java.lang.Float = null +/** + * @param beta1 Generally close to 0.5. + */ +def setBeta1(beta1 : java.lang.Float): ftml_updateParam = { + this.beta1 = beta1 + this + } + def getBeta1() = this.beta1 + private var beta2: java.lang.Float = null +/** + * @param beta2 Generally close to 1. + */ +def setBeta2(beta2 : java.lang.Float): ftml_updateParam = { + this.beta2 = beta2 + this + } + def getBeta2() = this.beta2 + private var epsilon: java.lang.Double = null +/** + * @param epsilon Epsilon to prevent div 0. + */ +def setEpsilon(epsilon : java.lang.Double): ftml_updateParam = { + this.epsilon = epsilon + this + } + def getEpsilon() = this.epsilon + def getT() = this.t + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): ftml_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): ftml_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_grad: java.lang.Float = null +/** + * @param clip_grad Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_grad(clip_grad : java.lang.Float): ftml_updateParam = { + this.clip_grad = clip_grad + this + } + def getClip_grad() = this.clip_grad + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : ftml_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for min_axis + * @param data The input + */ + class min_axisParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): min_axisParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): min_axisParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): min_axisParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : min_axisParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for BatchNorm_v1 + * @param data Input data to batch normalization + * @param gamma gamma array + * @param beta beta array + */ + class BatchNorm_v1Param(data : org.apache.mxnet.javaapi.NDArray,gamma : org.apache.mxnet.javaapi.NDArray,beta : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getGamma() = this.gamma + def getBeta() = this.beta + private var eps: java.lang.Float = null +/** + * @param eps Epsilon to prevent div 0 + */ +def setEps(eps : java.lang.Float): BatchNorm_v1Param = { + this.eps = eps + this + } + def getEps() = this.eps + private var momentum: java.lang.Float = null +/** + * @param momentum Momentum for moving average + */ +def setMomentum(momentum : java.lang.Float): BatchNorm_v1Param = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var fix_gamma: java.lang.Boolean = null +/** + * @param fix_gamma Fix gamma while training + */ +def setFix_gamma(fix_gamma : java.lang.Boolean): BatchNorm_v1Param = { + this.fix_gamma = fix_gamma + this + } + def getFix_gamma() = this.fix_gamma + private var use_global_stats: java.lang.Boolean = null +/** + * @param use_global_stats Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. + */ +def setUse_global_stats(use_global_stats : java.lang.Boolean): BatchNorm_v1Param = { + this.use_global_stats = use_global_stats + this + } + def getUse_global_stats() = this.use_global_stats + private var output_mean_var: java.lang.Boolean = null +/** + * @param output_mean_var Output All,normal mean and var + */ +def setOutput_mean_var(output_mean_var : java.lang.Boolean): BatchNorm_v1Param = { + this.output_mean_var = output_mean_var + this + } + def getOutput_mean_var() = this.output_mean_var + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : BatchNorm_v1Param = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for log_softmax + * @param data The input array. + */ + class log_softmaxParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: java.lang.Integer = null +/** + * @param axis The axis along which to compute softmax. + */ +def setAxis(axis : java.lang.Integer): log_softmaxParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var temperature: java.lang.Double = null +/** + * @param temperature Temperature parameter in softmax + */ +def setTemperature(temperature : java.lang.Double): log_softmaxParam = { + this.temperature = temperature + this + } + def getTemperature() = this.temperature + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). + */ +def setDtype(dtype : String): log_softmaxParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var use_length: java.lang.Boolean = null +/** + * @param use_length Whether to use the length input as a mask over the data input. + */ +def setUse_length(use_length : java.lang.Boolean): log_softmaxParam = { + this.use_length = use_length + this + } + def getUse_length() = this.use_length + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : log_softmaxParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for rmsprop_update + * @param weight Weight + * @param grad Gradient + * @param n n + * @param lr Learning rate + */ + class rmsprop_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,n : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getN() = this.n + def getLr() = this.lr + private var gamma1: java.lang.Float = null +/** + * @param gamma1 The decay rate of momentum estimates. + */ +def setGamma1(gamma1 : java.lang.Float): rmsprop_updateParam = { + this.gamma1 = gamma1 + this + } + def getGamma1() = this.gamma1 + private var epsilon: java.lang.Float = null +/** + * @param epsilon A small constant for numerical stability. + */ +def setEpsilon(epsilon : java.lang.Float): rmsprop_updateParam = { + this.epsilon = epsilon + this + } + def getEpsilon() = this.epsilon + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): rmsprop_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): rmsprop_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): rmsprop_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var clip_weights: java.lang.Float = null +/** + * @param clip_weights Clip weights to the range of [-clip_weights, clip_weights] If clip_weights <= 0, weight clipping is turned off. weights = max(min(weights, clip_weights), -clip_weights). + */ +def setClip_weights(clip_weights : java.lang.Float): rmsprop_updateParam = { + this.clip_weights = clip_weights + this + } + def getClip_weights() = this.clip_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : rmsprop_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for nag_mom_update + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param lr Learning rate + */ + class nag_mom_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,mom : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getMom() = this.mom + def getLr() = this.lr + private var momentum: java.lang.Float = null +/** + * @param momentum The decay rate of momentum estimates at each epoch. + */ +def setMomentum(momentum : java.lang.Float): nag_mom_updateParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): nag_mom_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): nag_mom_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): nag_mom_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : nag_mom_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for mp_sgd_mom_update + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param weight32 Weight32 + * @param lr Learning rate + */ + class mp_sgd_mom_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,mom : org.apache.mxnet.javaapi.NDArray,weight32 : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getMom() = this.mom + def getWeight32() = this.weight32 + def getLr() = this.lr + private var momentum: java.lang.Float = null +/** + * @param momentum The decay rate of momentum estimates at each epoch. + */ +def setMomentum(momentum : java.lang.Float): mp_sgd_mom_updateParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): mp_sgd_mom_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): mp_sgd_mom_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): mp_sgd_mom_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var lazy_update: java.lang.Boolean = null +/** + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse and both weight and momentum have the same stype + */ +def setLazy_update(lazy_update : java.lang.Boolean): mp_sgd_mom_updateParam = { + this.lazy_update = lazy_update + this + } + def getLazy_update() = this.lazy_update + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : mp_sgd_mom_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for random_gamma + + */ + class random_gammaParam() { + private var alpha: java.lang.Float = null +/** + * @param alpha Alpha parameter (shape) of the gamma distribution. + */ +def setAlpha(alpha : java.lang.Float): random_gammaParam = { + this.alpha = alpha + this + } + def getAlpha() = this.alpha + private var beta: java.lang.Float = null +/** + * @param beta Beta parameter (scale) of the gamma distribution. + */ +def setBeta(beta : java.lang.Float): random_gammaParam = { + this.beta = beta + this + } + def getBeta() = this.beta + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): random_gammaParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): random_gammaParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): random_gammaParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : random_gammaParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for Embedding + * @param data The input array to the embedding operator. + * @param weight The embedding weight matrix. + * @param input_dim Vocabulary size of the input indices. + * @param output_dim Dimension of the embedding vectors. + */ + class EmbeddingParam(data : org.apache.mxnet.javaapi.NDArray,weight : org.apache.mxnet.javaapi.NDArray,input_dim : java.lang.Integer,output_dim : java.lang.Integer) { + def getData() = this.data + def getWeight() = this.weight + def getInput_dim() = this.input_dim + def getOutput_dim() = this.output_dim + private var dtype: String = null +/** + * @param dtype Data type of weight. + */ +def setDtype(dtype : String): EmbeddingParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var sparse_grad: java.lang.Boolean = null +/** + * @param sparse_grad Compute row sparse gradient in the backward calculation. If set to True, the grad's storage type is row_sparse. + */ +def setSparse_grad(sparse_grad : java.lang.Boolean): EmbeddingParam = { + this.sparse_grad = sparse_grad + this + } + def getSparse_grad() = this.sparse_grad + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : EmbeddingParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for normal + + */ + class normalParam() { + private var loc: java.lang.Float = null +/** + * @param loc Mean of the distribution. + */ +def setLoc(loc : java.lang.Float): normalParam = { + this.loc = loc + this + } + def getLoc() = this.loc + private var scale: java.lang.Float = null +/** + * @param scale Standard deviation of the distribution. + */ +def setScale(scale : java.lang.Float): normalParam = { + this.scale = scale + this + } + def getScale() = this.scale + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): normalParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): normalParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): normalParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : normalParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sample_generalized_negative_binomial + * @param mu Means of the distributions. + * @param alpha Alpha (dispersion) parameters of the distributions. + */ + class sample_generalized_negative_binomialParam(mu : org.apache.mxnet.javaapi.NDArray,alpha : org.apache.mxnet.javaapi.NDArray) { + def getMu() = this.mu + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape to be sampled from each random distribution. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): sample_generalized_negative_binomialParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): sample_generalized_negative_binomialParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + def getAlpha() = this.alpha + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sample_generalized_negative_binomialParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for LayerNorm + * @param data Input data to layer normalization + * @param gamma gamma array + * @param beta beta array + */ + class LayerNormParam(data : org.apache.mxnet.javaapi.NDArray,gamma : org.apache.mxnet.javaapi.NDArray,beta : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getGamma() = this.gamma + def getBeta() = this.beta + private var axis: java.lang.Integer = null +/** + * @param axis The axis to perform layer normalization. Usually, this should be be axis of the channel dimension. Negative values means indexing from right to left. + */ +def setAxis(axis : java.lang.Integer): LayerNormParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var eps: java.lang.Float = null +/** + * @param eps An `epsilon` parameter to prevent division by 0. + */ +def setEps(eps : java.lang.Float): LayerNormParam = { + this.eps = eps + this + } + def getEps() = this.eps + private var output_mean_var: java.lang.Boolean = null +/** + * @param output_mean_var Output the mean and std calculated along the given axis. + */ +def setOutput_mean_var(output_mean_var : java.lang.Boolean): LayerNormParam = { + this.output_mean_var = output_mean_var + this + } + def getOutput_mean_var() = this.output_mean_var + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : LayerNormParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for Dropout + * @param data Input array to which dropout will be applied. + */ + class DropoutParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var p: java.lang.Float = null +/** + * @param p Fraction of the input that gets dropped out during training time. + */ +def setP(p : java.lang.Float): DropoutParam = { + this.p = p + this + } + def getP() = this.p + private var mode: String = null +/** + * @param mode Whether to only turn on dropout during training or to also turn on for inference. + */ +def setMode(mode : String): DropoutParam = { + this.mode = mode + this + } + def getMode() = this.mode + private var axes: org.apache.mxnet.javaapi.Shape = null +/** + * @param axes Axes for variational dropout kernel. + */ +def setAxes(axes : org.apache.mxnet.javaapi.Shape): DropoutParam = { + this.axes = axes + this + } + def getAxes() = this.axes + private var cudnn_off: java.lang.Boolean = null +/** + * @param cudnn_off Whether to turn off cudnn in dropout operator. This option is ignored if axes is specified. + */ +def setCudnn_off(cudnn_off : java.lang.Boolean): DropoutParam = { + this.cudnn_off = cudnn_off + this + } + def getCudnn_off() = this.cudnn_off + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : DropoutParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for mp_sgd_update + * @param weight Weight + * @param grad gradient + * @param weight32 Weight32 + * @param lr Learning rate + */ + class mp_sgd_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,weight32 : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getWeight32() = this.weight32 + def getLr() = this.lr + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): mp_sgd_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): mp_sgd_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): mp_sgd_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var lazy_update: java.lang.Boolean = null +/** + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse. + */ +def setLazy_update(lazy_update : java.lang.Boolean): mp_sgd_updateParam = { + this.lazy_update = lazy_update + this + } + def getLazy_update() = this.lazy_update + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : mp_sgd_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for random_uniform + + */ + class random_uniformParam() { + private var low: java.lang.Float = null +/** + * @param low Lower bound of the distribution. + */ +def setLow(low : java.lang.Float): random_uniformParam = { + this.low = low + this + } + def getLow() = this.low + private var high: java.lang.Float = null +/** + * @param high Upper bound of the distribution. + */ +def setHigh(high : java.lang.Float): random_uniformParam = { + this.high = high + this + } + def getHigh() = this.high + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): random_uniformParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): random_uniformParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): random_uniformParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : random_uniformParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sgd_update + * @param weight Weight + * @param grad Gradient + * @param lr Learning rate + */ + class sgd_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getLr() = this.lr + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): sgd_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): sgd_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): sgd_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var lazy_update: java.lang.Boolean = null +/** + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse. + */ +def setLazy_update(lazy_update : java.lang.Boolean): sgd_updateParam = { + this.lazy_update = lazy_update + this + } + def getLazy_update() = this.lazy_update + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sgd_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for linalg_gemm + * @param A Tensor of input matrices + * @param B Tensor of input matrices + * @param C Tensor of input matrices + */ + class linalg_gemmParam(A : org.apache.mxnet.javaapi.NDArray,B : org.apache.mxnet.javaapi.NDArray,C : org.apache.mxnet.javaapi.NDArray) { + def getA() = this.A + def getB() = this.B + def getC() = this.C + private var transpose_a: java.lang.Boolean = null +/** + * @param transpose_a Multiply with transposed of first input (A). + */ +def setTranspose_a(transpose_a : java.lang.Boolean): linalg_gemmParam = { + this.transpose_a = transpose_a + this + } + def getTranspose_a() = this.transpose_a + private var transpose_b: java.lang.Boolean = null +/** + * @param transpose_b Multiply with transposed of second input (B). + */ +def setTranspose_b(transpose_b : java.lang.Boolean): linalg_gemmParam = { + this.transpose_b = transpose_b + this + } + def getTranspose_b() = this.transpose_b + private var alpha: java.lang.Double = null +/** + * @param alpha Scalar factor multiplied with A*B. + */ +def setAlpha(alpha : java.lang.Double): linalg_gemmParam = { + this.alpha = alpha + this + } + def getAlpha() = this.alpha + private var beta: java.lang.Double = null +/** + * @param beta Scalar factor multiplied with C. + */ +def setBeta(beta : java.lang.Double): linalg_gemmParam = { + this.beta = beta + this + } + def getBeta() = this.beta + private var axis: java.lang.Integer = null +/** + * @param axis Axis corresponding to the matrix rows. + */ +def setAxis(axis : java.lang.Integer): linalg_gemmParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : linalg_gemmParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for linalg_trsm + * @param A Tensor of lower triangular matrices + * @param B Tensor of matrices + */ + class linalg_trsmParam(A : org.apache.mxnet.javaapi.NDArray,B : org.apache.mxnet.javaapi.NDArray) { + def getA() = this.A + def getB() = this.B + private var transpose: java.lang.Boolean = null +/** + * @param transpose Use transposed of the triangular matrix + */ +def setTranspose(transpose : java.lang.Boolean): linalg_trsmParam = { + this.transpose = transpose + this + } + def getTranspose() = this.transpose + private var rightside: java.lang.Boolean = null +/** + * @param rightside Multiply triangular matrix from the right to non-triangular one. + */ +def setRightside(rightside : java.lang.Boolean): linalg_trsmParam = { + this.rightside = rightside + this + } + def getRightside() = this.rightside + private var lower: java.lang.Boolean = null +/** + * @param lower True if the triangular matrix is lower triangular, false if it is upper triangular. + */ +def setLower(lower : java.lang.Boolean): linalg_trsmParam = { + this.lower = lower + this + } + def getLower() = this.lower + private var alpha: java.lang.Double = null +/** + * @param alpha Scalar factor to be applied to the result. + */ +def setAlpha(alpha : java.lang.Double): linalg_trsmParam = { + this.alpha = alpha + this + } + def getAlpha() = this.alpha + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : linalg_trsmParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for Convolution_v1 + * @param data Input data to the ConvolutionV1Op. + * @param weight Weight matrix. + * @param bias Bias parameter. + * @param kernel convolution kernel size: (h, w) or (d, h, w) + * @param num_filter convolution filter(channel) number + */ + class Convolution_v1Param(data : org.apache.mxnet.javaapi.NDArray,weight : org.apache.mxnet.javaapi.NDArray,bias : org.apache.mxnet.javaapi.NDArray,kernel : org.apache.mxnet.javaapi.Shape,num_filter : java.lang.Integer) { + def getData() = this.data + def getWeight() = this.weight + def getBias() = this.bias + def getKernel() = this.kernel + private var stride: org.apache.mxnet.javaapi.Shape = null +/** + * @param stride convolution stride: (h, w) or (d, h, w) + */ +def setStride(stride : org.apache.mxnet.javaapi.Shape): Convolution_v1Param = { + this.stride = stride + this + } + def getStride() = this.stride + private var dilate: org.apache.mxnet.javaapi.Shape = null +/** + * @param dilate convolution dilate: (h, w) or (d, h, w) + */ +def setDilate(dilate : org.apache.mxnet.javaapi.Shape): Convolution_v1Param = { + this.dilate = dilate + this + } + def getDilate() = this.dilate + private var pad: org.apache.mxnet.javaapi.Shape = null +/** + * @param pad pad for convolution: (h, w) or (d, h, w) + */ +def setPad(pad : org.apache.mxnet.javaapi.Shape): Convolution_v1Param = { + this.pad = pad + this + } + def getPad() = this.pad + def getNum_filter() = this.num_filter + private var num_group: java.lang.Integer = null +/** + * @param num_group Number of group partitions. Equivalent to slicing input into num_group + partitions, apply convolution on each, then concatenate the results + */ +def setNum_group(num_group : java.lang.Integer): Convolution_v1Param = { + this.num_group = num_group + this + } + def getNum_group() = this.num_group + private var workspace: java.lang.Long = null +/** + * @param workspace Maximum temporary workspace allowed for convolution (MB).This parameter determines the effective batch size of the convolution kernel, which may be smaller than the given batch size. Also, the workspace will be automatically enlarged to make sure that we can run the kernel with batch_size=1 + */ +def setWorkspace(workspace : java.lang.Long): Convolution_v1Param = { + this.workspace = workspace + this + } + def getWorkspace() = this.workspace + private var no_bias: java.lang.Boolean = null +/** + * @param no_bias Whether to disable bias parameter. + */ +def setNo_bias(no_bias : java.lang.Boolean): Convolution_v1Param = { + this.no_bias = no_bias + this + } + def getNo_bias() = this.no_bias + private var cudnn_tune: String = null +/** + * @param cudnn_tune Whether to pick convolution algo by running performance test. + Leads to higher startup time but may give faster speed. Options are: + 'off': no tuning + 'limited_workspace': run test and pick the fastest algorithm that doesn't exceed workspace limit. + 'fastest': pick the fastest algorithm and ignore workspace limit. + If set to None (default), behavior is determined by environment + variable MXNET_CUDNN_AUTOTUNE_DEFAULT: 0 for off, + 1 for limited workspace (default), 2 for fastest. + */ +def setCudnn_tune(cudnn_tune : String): Convolution_v1Param = { + this.cudnn_tune = cudnn_tune + this + } + def getCudnn_tune() = this.cudnn_tune + private var cudnn_off: java.lang.Boolean = null +/** + * @param cudnn_off Turn off cudnn for this layer. + */ +def setCudnn_off(cudnn_off : java.lang.Boolean): Convolution_v1Param = { + this.cudnn_off = cudnn_off + this + } + def getCudnn_off() = this.cudnn_off + private var layout: String = null +/** + * @param layout Set layout for input, output and weight. Empty for + default layout: NCHW for 2d and NCDHW for 3d. + */ +def setLayout(layout : String): Convolution_v1Param = { + this.layout = layout + this + } + def getLayout() = this.layout + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : Convolution_v1Param = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for diag + * @param data Input ndarray + */ + class diagParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var k: java.lang.Integer = null +/** + * @param k Diagonal in question. The default is 0. Use k>0 for diagonals above the main diagonal, and k<0 for diagonals below the main diagonal. If input has shape (S0 S1) k must be between -S0 and S1 + */ +def setK(k : java.lang.Integer): diagParam = { + this.k = k + this + } + def getK() = this.k + private var axis1: java.lang.Integer = null +/** + * @param axis1 The first axis of the sub-arrays of interest. Ignored when the input is a 1-D array. + */ +def setAxis1(axis1 : java.lang.Integer): diagParam = { + this.axis1 = axis1 + this + } + def getAxis1() = this.axis1 + private var axis2: java.lang.Integer = null +/** + * @param axis2 The second axis of the sub-arrays of interest. Ignored when the input is a 1-D array. + */ +def setAxis2(axis2 : java.lang.Integer): diagParam = { + this.axis2 = axis2 + this + } + def getAxis2() = this.axis2 + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : diagParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for SwapAxis + * @param data Input array. + */ + class SwapAxisParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var dim1: java.lang.Integer = null +/** + * @param dim1 the first axis to be swapped. + */ +def setDim1(dim1 : java.lang.Integer): SwapAxisParam = { + this.dim1 = dim1 + this + } + def getDim1() = this.dim1 + private var dim2: java.lang.Integer = null +/** + * @param dim2 the second axis to be swapped. + */ +def setDim2(dim2 : java.lang.Integer): SwapAxisParam = { + this.dim2 = dim2 + this + } + def getDim2() = this.dim2 + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : SwapAxisParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for linalg_extracttrian + * @param A Tensor of square matrices + */ + class linalg_extracttrianParam(A : org.apache.mxnet.javaapi.NDArray) { + def getA() = this.A + private var offset: java.lang.Integer = null +/** + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + */ +def setOffset(offset : java.lang.Integer): linalg_extracttrianParam = { + this.offset = offset + this + } + def getOffset() = this.offset + private var lower: java.lang.Boolean = null +/** + * @param lower Refer to the lower triangular matrix if lower=true, refer to the upper otherwise. Only relevant when offset=0 + */ +def setLower(lower : java.lang.Boolean): linalg_extracttrianParam = { + this.lower = lower + this + } + def getLower() = this.lower + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : linalg_extracttrianParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for batch_dot + * @param lhs The first input + * @param rhs The second input + */ + class batch_dotParam(lhs : org.apache.mxnet.javaapi.NDArray,rhs : org.apache.mxnet.javaapi.NDArray) { + def getLhs() = this.lhs + def getRhs() = this.rhs + private var transpose_a: java.lang.Boolean = null +/** + * @param transpose_a If true then transpose the first input before dot. + */ +def setTranspose_a(transpose_a : java.lang.Boolean): batch_dotParam = { + this.transpose_a = transpose_a + this + } + def getTranspose_a() = this.transpose_a + private var transpose_b: java.lang.Boolean = null +/** + * @param transpose_b If true then transpose the second input before dot. + */ +def setTranspose_b(transpose_b : java.lang.Boolean): batch_dotParam = { + this.transpose_b = transpose_b + this + } + def getTranspose_b() = this.transpose_b + private var forward_stype: String = null +/** + * @param forward_stype The desired storage type of the forward output given by user, if thecombination of input storage types and this hint does not matchany implemented ones, the dot operator will perform fallback operationand still produce an output of the desired storage type. + */ +def setForward_stype(forward_stype : String): batch_dotParam = { + this.forward_stype = forward_stype + this + } + def getForward_stype() = this.forward_stype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : batch_dotParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sample_normal + * @param mu Means of the distributions. + * @param sigma Standard deviations of the distributions. + */ + class sample_normalParam(mu : org.apache.mxnet.javaapi.NDArray,sigma : org.apache.mxnet.javaapi.NDArray) { + def getMu() = this.mu + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape to be sampled from each random distribution. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): sample_normalParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): sample_normalParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + def getSigma() = this.sigma + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sample_normalParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for LeakyReLU + * @param data Input data to activation function. + * @param gamma Input data to activation function. + */ + class LeakyReLUParam(data : org.apache.mxnet.javaapi.NDArray,gamma : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getGamma() = this.gamma + private var act_type: String = null +/** + * @param act_type Activation function to be applied. + */ +def setAct_type(act_type : String): LeakyReLUParam = { + this.act_type = act_type + this + } + def getAct_type() = this.act_type + private var slope: java.lang.Float = null +/** + * @param slope Init slope for the activation. (For leaky and elu only) + */ +def setSlope(slope : java.lang.Float): LeakyReLUParam = { + this.slope = slope + this + } + def getSlope() = this.slope + private var lower_bound: java.lang.Float = null +/** + * @param lower_bound Lower bound of random slope. (For rrelu only) + */ +def setLower_bound(lower_bound : java.lang.Float): LeakyReLUParam = { + this.lower_bound = lower_bound + this + } + def getLower_bound() = this.lower_bound + private var upper_bound: java.lang.Float = null +/** + * @param upper_bound Upper bound of random slope. (For rrelu only) + */ +def setUpper_bound(upper_bound : java.lang.Float): LeakyReLUParam = { + this.upper_bound = upper_bound + this + } + def getUpper_bound() = this.upper_bound + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : LeakyReLUParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for preloaded_multi_mp_sgd_mom_update + * @param data Weights, gradients, momentums, learning rates and weight decays + */ + class preloaded_multi_mp_sgd_mom_updateParam(data : Array[org.apache.mxnet.javaapi.NDArray]) { + def getData() = this.data + private var momentum: java.lang.Float = null +/** + * @param momentum The decay rate of momentum estimates at each epoch. + */ +def setMomentum(momentum : java.lang.Float): preloaded_multi_mp_sgd_mom_updateParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): preloaded_multi_mp_sgd_mom_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): preloaded_multi_mp_sgd_mom_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var num_weights: java.lang.Integer = null +/** + * @param num_weights Number of updated weights. + */ +def setNum_weights(num_weights : java.lang.Integer): preloaded_multi_mp_sgd_mom_updateParam = { + this.num_weights = num_weights + this + } + def getNum_weights() = this.num_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : preloaded_multi_mp_sgd_mom_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for multi_mp_sgd_update + * @param data Weights + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ + class multi_mp_sgd_updateParam(data : Array[org.apache.mxnet.javaapi.NDArray],lrs : Any,wds : Any) { + def getData() = this.data + def getLrs() = this.lrs + def getWds() = this.wds + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): multi_mp_sgd_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): multi_mp_sgd_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var num_weights: java.lang.Integer = null +/** + * @param num_weights Number of updated weights. + */ +def setNum_weights(num_weights : java.lang.Integer): multi_mp_sgd_updateParam = { + this.num_weights = num_weights + this + } + def getNum_weights() = this.num_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : multi_mp_sgd_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for mean + * @param data The input + */ + class meanParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): meanParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): meanParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): meanParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : meanParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for linalg_maketrian + * @param A Tensor of triangular matrices stored as vectors + */ + class linalg_maketrianParam(A : org.apache.mxnet.javaapi.NDArray) { + def getA() = this.A + private var offset: java.lang.Integer = null +/** + * @param offset Offset of the diagonal versus the main diagonal. 0 corresponds to the main diagonal, a negative/positive value to diagonals below/above the main diagonal. + */ +def setOffset(offset : java.lang.Integer): linalg_maketrianParam = { + this.offset = offset + this + } + def getOffset() = this.offset + private var lower: java.lang.Boolean = null +/** + * @param lower Refer to the lower triangular matrix if lower=true, refer to the upper otherwise. Only relevant when offset=0 + */ +def setLower(lower : java.lang.Boolean): linalg_maketrianParam = { + this.lower = lower + this + } + def getLower() = this.lower + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : linalg_maketrianParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for mp_nag_mom_update + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param weight32 Weight32 + * @param lr Learning rate + */ + class mp_nag_mom_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,mom : org.apache.mxnet.javaapi.NDArray,weight32 : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getMom() = this.mom + def getWeight32() = this.weight32 + def getLr() = this.lr + private var momentum: java.lang.Float = null +/** + * @param momentum The decay rate of momentum estimates at each epoch. + */ +def setMomentum(momentum : java.lang.Float): mp_nag_mom_updateParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): mp_nag_mom_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): mp_nag_mom_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): mp_nag_mom_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : mp_nag_mom_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for min + * @param data The input + */ + class minParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): minParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): minParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): minParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : minParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for Convolution + * @param data Input data to the ConvolutionOp. + * @param weight Weight matrix. + * @param bias Bias parameter. + * @param kernel Convolution kernel size: (w,), (h, w) or (d, h, w) + * @param num_filter Convolution filter(channel) number + */ + class ConvolutionParam(data : org.apache.mxnet.javaapi.NDArray,weight : org.apache.mxnet.javaapi.NDArray,bias : org.apache.mxnet.javaapi.NDArray,kernel : org.apache.mxnet.javaapi.Shape,num_filter : java.lang.Integer) { + def getData() = this.data + def getWeight() = this.weight + def getBias() = this.bias + def getKernel() = this.kernel + private var stride: org.apache.mxnet.javaapi.Shape = null +/** + * @param stride Convolution stride: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + */ +def setStride(stride : org.apache.mxnet.javaapi.Shape): ConvolutionParam = { + this.stride = stride + this + } + def getStride() = this.stride + private var dilate: org.apache.mxnet.javaapi.Shape = null +/** + * @param dilate Convolution dilate: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + */ +def setDilate(dilate : org.apache.mxnet.javaapi.Shape): ConvolutionParam = { + this.dilate = dilate + this + } + def getDilate() = this.dilate + private var pad: org.apache.mxnet.javaapi.Shape = null +/** + * @param pad Zero pad for convolution: (w,), (h, w) or (d, h, w). Defaults to no padding. + */ +def setPad(pad : org.apache.mxnet.javaapi.Shape): ConvolutionParam = { + this.pad = pad + this + } + def getPad() = this.pad + def getNum_filter() = this.num_filter + private var num_group: java.lang.Integer = null +/** + * @param num_group Number of group partitions. + */ +def setNum_group(num_group : java.lang.Integer): ConvolutionParam = { + this.num_group = num_group + this + } + def getNum_group() = this.num_group + private var workspace: java.lang.Long = null +/** + * @param workspace Maximum temporary workspace allowed (MB) in convolution.This parameter has two usages. When CUDNN is not used, it determines the effective batch size of the convolution kernel. When CUDNN is used, it controls the maximum temporary storage used for tuning the best CUDNN kernel when `limited_workspace` strategy is used. + */ +def setWorkspace(workspace : java.lang.Long): ConvolutionParam = { + this.workspace = workspace + this + } + def getWorkspace() = this.workspace + private var no_bias: java.lang.Boolean = null +/** + * @param no_bias Whether to disable bias parameter. + */ +def setNo_bias(no_bias : java.lang.Boolean): ConvolutionParam = { + this.no_bias = no_bias + this + } + def getNo_bias() = this.no_bias + private var cudnn_tune: String = null +/** + * @param cudnn_tune Whether to pick convolution algo by running performance test. + */ +def setCudnn_tune(cudnn_tune : String): ConvolutionParam = { + this.cudnn_tune = cudnn_tune + this + } + def getCudnn_tune() = this.cudnn_tune + private var cudnn_off: java.lang.Boolean = null +/** + * @param cudnn_off Turn off cudnn for this layer. + */ +def setCudnn_off(cudnn_off : java.lang.Boolean): ConvolutionParam = { + this.cudnn_off = cudnn_off + this + } + def getCudnn_off() = this.cudnn_off + private var layout: String = null +/** + * @param layout Set layout for input, output and weight. Empty for + default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d.NHWC and NDHWC are only supported on GPU. + */ +def setLayout(layout : String): ConvolutionParam = { + this.layout = layout + this + } + def getLayout() = this.layout + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : ConvolutionParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for random_poisson + + */ + class random_poissonParam() { + private var lam: java.lang.Float = null +/** + * @param lam Lambda parameter (rate) of the Poisson distribution. + */ +def setLam(lam : java.lang.Float): random_poissonParam = { + this.lam = lam + this + } + def getLam() = this.lam + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): random_poissonParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): random_poissonParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): random_poissonParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : random_poissonParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for adam_update + * @param weight Weight + * @param grad Gradient + * @param mean Moving mean + * @param vari Moving variance + * @param lr Learning rate + */ + class adam_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,mean : org.apache.mxnet.javaapi.NDArray,vari : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getMean() = this.mean + def getVari() = this.vari + def getLr() = this.lr + private var beta1: java.lang.Float = null +/** + * @param beta1 The decay rate for the 1st moment estimates. + */ +def setBeta1(beta1 : java.lang.Float): adam_updateParam = { + this.beta1 = beta1 + this + } + def getBeta1() = this.beta1 + private var beta2: java.lang.Float = null +/** + * @param beta2 The decay rate for the 2nd moment estimates. + */ +def setBeta2(beta2 : java.lang.Float): adam_updateParam = { + this.beta2 = beta2 + this + } + def getBeta2() = this.beta2 + private var epsilon: java.lang.Float = null +/** + * @param epsilon A small constant for numerical stability. + */ +def setEpsilon(epsilon : java.lang.Float): adam_updateParam = { + this.epsilon = epsilon + this + } + def getEpsilon() = this.epsilon + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): adam_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): adam_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): adam_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var lazy_update: java.lang.Boolean = null +/** + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse and all of w, m and v have the same stype + */ +def setLazy_update(lazy_update : java.lang.Boolean): adam_updateParam = { + this.lazy_update = lazy_update + this + } + def getLazy_update() = this.lazy_update + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : adam_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sum_axis + * @param data The input + */ + class sum_axisParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): sum_axisParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): sum_axisParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): sum_axisParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sum_axisParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for GroupNorm + * @param data Input data + * @param gamma gamma array + * @param beta beta array + */ + class GroupNormParam(data : org.apache.mxnet.javaapi.NDArray,gamma : org.apache.mxnet.javaapi.NDArray,beta : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getGamma() = this.gamma + def getBeta() = this.beta + private var num_groups: java.lang.Integer = null +/** + * @param num_groups Total number of groups. + */ +def setNum_groups(num_groups : java.lang.Integer): GroupNormParam = { + this.num_groups = num_groups + this + } + def getNum_groups() = this.num_groups + private var eps: java.lang.Float = null +/** + * @param eps An `epsilon` parameter to prevent division by 0. + */ +def setEps(eps : java.lang.Float): GroupNormParam = { + this.eps = eps + this + } + def getEps() = this.eps + private var output_mean_var: java.lang.Boolean = null +/** + * @param output_mean_var Output the mean and std calculated along the given axis. + */ +def setOutput_mean_var(output_mean_var : java.lang.Boolean): GroupNormParam = { + this.output_mean_var = output_mean_var + this + } + def getOutput_mean_var() = this.output_mean_var + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : GroupNormParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for ftrl_update + * @param weight Weight + * @param grad Gradient + * @param z z + * @param n Square of grad + * @param lr Learning rate + */ + class ftrl_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,z : org.apache.mxnet.javaapi.NDArray,n : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getZ() = this.z + def getN() = this.n + def getLr() = this.lr + private var lamda1: java.lang.Float = null +/** + * @param lamda1 The L1 regularization coefficient. + */ +def setLamda1(lamda1 : java.lang.Float): ftrl_updateParam = { + this.lamda1 = lamda1 + this + } + def getLamda1() = this.lamda1 + private var beta: java.lang.Float = null +/** + * @param beta Per-Coordinate Learning Rate beta. + */ +def setBeta(beta : java.lang.Float): ftrl_updateParam = { + this.beta = beta + this + } + def getBeta() = this.beta + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): ftrl_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): ftrl_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): ftrl_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : ftrl_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for Pooling + * @param data Input data to the pooling operator. + */ + class PoolingParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var kernel: org.apache.mxnet.javaapi.Shape = null +/** + * @param kernel Pooling kernel size: (y, x) or (d, y, x) + */ +def setKernel(kernel : org.apache.mxnet.javaapi.Shape): PoolingParam = { + this.kernel = kernel + this + } + def getKernel() = this.kernel + private var pool_type: String = null +/** + * @param pool_type Pooling type to be applied. + */ +def setPool_type(pool_type : String): PoolingParam = { + this.pool_type = pool_type + this + } + def getPool_type() = this.pool_type + private var global_pool: java.lang.Boolean = null +/** + * @param global_pool Ignore kernel size, do global pooling based on current input feature map. + */ +def setGlobal_pool(global_pool : java.lang.Boolean): PoolingParam = { + this.global_pool = global_pool + this + } + def getGlobal_pool() = this.global_pool + private var cudnn_off: java.lang.Boolean = null +/** + * @param cudnn_off Turn off cudnn pooling and use MXNet pooling operator. + */ +def setCudnn_off(cudnn_off : java.lang.Boolean): PoolingParam = { + this.cudnn_off = cudnn_off + this + } + def getCudnn_off() = this.cudnn_off + private var pooling_convention: String = null +/** + * @param pooling_convention Pooling convention to be applied. + */ +def setPooling_convention(pooling_convention : String): PoolingParam = { + this.pooling_convention = pooling_convention + this + } + def getPooling_convention() = this.pooling_convention + private var stride: org.apache.mxnet.javaapi.Shape = null +/** + * @param stride Stride: for pooling (y, x) or (d, y, x). Defaults to 1 for each dimension. + */ +def setStride(stride : org.apache.mxnet.javaapi.Shape): PoolingParam = { + this.stride = stride + this + } + def getStride() = this.stride + private var pad: org.apache.mxnet.javaapi.Shape = null +/** + * @param pad Pad for pooling: (y, x) or (d, y, x). Defaults to no padding. + */ +def setPad(pad : org.apache.mxnet.javaapi.Shape): PoolingParam = { + this.pad = pad + this + } + def getPad() = this.pad + private var p_value: java.lang.Integer = null +/** + * @param p_value Value of p for Lp pooling, can be 1 or 2, required for Lp Pooling. + */ +def setP_value(p_value : java.lang.Integer): PoolingParam = { + this.p_value = p_value + this + } + def getP_value() = this.p_value + private var count_include_pad: java.lang.Boolean = null +/** + * @param count_include_pad Only used for AvgPool, specify whether to count padding elements for averagecalculation. For example, with a 5*5 kernel on a 3*3 corner of a image,the sum of the 9 valid elements will be divided by 25 if this is set to true,or it will be divided by 9 if this is set to false. Defaults to true. + */ +def setCount_include_pad(count_include_pad : java.lang.Boolean): PoolingParam = { + this.count_include_pad = count_include_pad + this + } + def getCount_include_pad() = this.count_include_pad + private var layout: String = null +/** + * @param layout Set layout for input and output. Empty for + default layout: NCW for 1d, NCHW for 2d and NCDHW for 3d. + */ +def setLayout(layout : String): PoolingParam = { + this.layout = layout + this + } + def getLayout() = this.layout + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : PoolingParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for softmin + * @param data The input array. + */ + class softminParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: java.lang.Integer = null +/** + * @param axis The axis along which to compute softmax. + */ +def setAxis(axis : java.lang.Integer): softminParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var temperature: java.lang.Double = null +/** + * @param temperature Temperature parameter in softmax + */ +def setTemperature(temperature : java.lang.Double): softminParam = { + this.temperature = temperature + this + } + def getTemperature() = this.temperature + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to the same as input's dtype if not defined (dtype=None). + */ +def setDtype(dtype : String): softminParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var use_length: java.lang.Boolean = null +/** + * @param use_length Whether to use the length input as a mask over the data input. + */ +def setUse_length(use_length : java.lang.Boolean): softminParam = { + this.use_length = use_length + this + } + def getUse_length() = this.use_length + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : softminParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for multi_all_finite + * @param data Arrays + */ + class multi_all_finiteParam(data : Array[org.apache.mxnet.javaapi.NDArray]) { + def getData() = this.data + private var num_arrays: java.lang.Integer = null +/** + * @param num_arrays Number of arrays. + */ +def setNum_arrays(num_arrays : java.lang.Integer): multi_all_finiteParam = { + this.num_arrays = num_arrays + this + } + def getNum_arrays() = this.num_arrays + private var init_output: java.lang.Boolean = null +/** + * @param init_output Initialize output to 1. + */ +def setInit_output(init_output : java.lang.Boolean): multi_all_finiteParam = { + this.init_output = init_output + this + } + def getInit_output() = this.init_output + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : multi_all_finiteParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sample_gamma + * @param alpha Alpha (shape) parameters of the distributions. + * @param beta Beta (scale) parameters of the distributions. + */ + class sample_gammaParam(alpha : org.apache.mxnet.javaapi.NDArray,beta : org.apache.mxnet.javaapi.NDArray) { + def getAlpha() = this.alpha + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape to be sampled from each random distribution. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): sample_gammaParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): sample_gammaParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + def getBeta() = this.beta + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sample_gammaParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for moments + * @param data Input ndarray + */ + class momentsParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axes: org.apache.mxnet.javaapi.Shape = null +/** + * @param axes Array of ints. Axes along which to compute mean and variance. + */ +def setAxes(axes : org.apache.mxnet.javaapi.Shape): momentsParam = { + this.axes = axes + this + } + def getAxes() = this.axes + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims produce moments with the same dimensionality as the input. + */ +def setKeepdims(keepdims : java.lang.Boolean): momentsParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : momentsParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for Deconvolution + * @param data Input tensor to the deconvolution operation. + * @param weight Weights representing the kernel. + * @param bias Bias added to the result after the deconvolution operation. + * @param kernel Deconvolution kernel size: (w,), (h, w) or (d, h, w). This is same as the kernel size used for the corresponding convolution + * @param num_filter Number of output filters. + */ + class DeconvolutionParam(data : org.apache.mxnet.javaapi.NDArray,weight : org.apache.mxnet.javaapi.NDArray,bias : org.apache.mxnet.javaapi.NDArray,kernel : org.apache.mxnet.javaapi.Shape,num_filter : java.lang.Integer) { + def getData() = this.data + def getWeight() = this.weight + def getBias() = this.bias + def getKernel() = this.kernel + private var stride: org.apache.mxnet.javaapi.Shape = null +/** + * @param stride The stride used for the corresponding convolution: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + */ +def setStride(stride : org.apache.mxnet.javaapi.Shape): DeconvolutionParam = { + this.stride = stride + this + } + def getStride() = this.stride + private var dilate: org.apache.mxnet.javaapi.Shape = null +/** + * @param dilate Dilation factor for each dimension of the input: (w,), (h, w) or (d, h, w). Defaults to 1 for each dimension. + */ +def setDilate(dilate : org.apache.mxnet.javaapi.Shape): DeconvolutionParam = { + this.dilate = dilate + this + } + def getDilate() = this.dilate + private var pad: org.apache.mxnet.javaapi.Shape = null +/** + * @param pad The amount of implicit zero padding added during convolution for each dimension of the input: (w,), (h, w) or (d, h, w). ``(kernel-1)/2`` is usually a good choice. If `target_shape` is set, `pad` will be ignored and a padding that will generate the target shape will be used. Defaults to no padding. + */ +def setPad(pad : org.apache.mxnet.javaapi.Shape): DeconvolutionParam = { + this.pad = pad + this + } + def getPad() = this.pad + private var adj: org.apache.mxnet.javaapi.Shape = null +/** + * @param adj Adjustment for output shape: (w,), (h, w) or (d, h, w). If `target_shape` is set, `adj` will be ignored and computed accordingly. + */ +def setAdj(adj : org.apache.mxnet.javaapi.Shape): DeconvolutionParam = { + this.adj = adj + this + } + def getAdj() = this.adj + private var target_shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param target_shape Shape of the output tensor: (w,), (h, w) or (d, h, w). + */ +def setTarget_shape(target_shape : org.apache.mxnet.javaapi.Shape): DeconvolutionParam = { + this.target_shape = target_shape + this + } + def getTarget_shape() = this.target_shape + def getNum_filter() = this.num_filter + private var num_group: java.lang.Integer = null +/** + * @param num_group Number of groups partition. + */ +def setNum_group(num_group : java.lang.Integer): DeconvolutionParam = { + this.num_group = num_group + this + } + def getNum_group() = this.num_group + private var workspace: java.lang.Long = null +/** + * @param workspace Maximum temporary workspace allowed (MB) in deconvolution.This parameter has two usages. When CUDNN is not used, it determines the effective batch size of the deconvolution kernel. When CUDNN is used, it controls the maximum temporary storage used for tuning the best CUDNN kernel when `limited_workspace` strategy is used. + */ +def setWorkspace(workspace : java.lang.Long): DeconvolutionParam = { + this.workspace = workspace + this + } + def getWorkspace() = this.workspace + private var no_bias: java.lang.Boolean = null +/** + * @param no_bias Whether to disable bias parameter. + */ +def setNo_bias(no_bias : java.lang.Boolean): DeconvolutionParam = { + this.no_bias = no_bias + this + } + def getNo_bias() = this.no_bias + private var cudnn_tune: String = null +/** + * @param cudnn_tune Whether to pick convolution algorithm by running performance test. + */ +def setCudnn_tune(cudnn_tune : String): DeconvolutionParam = { + this.cudnn_tune = cudnn_tune + this + } + def getCudnn_tune() = this.cudnn_tune + private var cudnn_off: java.lang.Boolean = null +/** + * @param cudnn_off Turn off cudnn for this layer. + */ +def setCudnn_off(cudnn_off : java.lang.Boolean): DeconvolutionParam = { + this.cudnn_off = cudnn_off + this + } + def getCudnn_off() = this.cudnn_off + private var layout: String = null +/** + * @param layout Set layout for input, output and weight. Empty for default layout, NCW for 1d, NCHW for 2d and NCDHW for 3d.NHWC and NDHWC are only supported on GPU. + */ +def setLayout(layout : String): DeconvolutionParam = { + this.layout = layout + this + } + def getLayout() = this.layout + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : DeconvolutionParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for linalg_gemm2 + * @param A Tensor of input matrices + * @param B Tensor of input matrices + */ + class linalg_gemm2Param(A : org.apache.mxnet.javaapi.NDArray,B : org.apache.mxnet.javaapi.NDArray) { + def getA() = this.A + def getB() = this.B + private var transpose_a: java.lang.Boolean = null +/** + * @param transpose_a Multiply with transposed of first input (A). + */ +def setTranspose_a(transpose_a : java.lang.Boolean): linalg_gemm2Param = { + this.transpose_a = transpose_a + this + } + def getTranspose_a() = this.transpose_a + private var transpose_b: java.lang.Boolean = null +/** + * @param transpose_b Multiply with transposed of second input (B). + */ +def setTranspose_b(transpose_b : java.lang.Boolean): linalg_gemm2Param = { + this.transpose_b = transpose_b + this + } + def getTranspose_b() = this.transpose_b + private var alpha: java.lang.Double = null +/** + * @param alpha Scalar factor multiplied with A*B. + */ +def setAlpha(alpha : java.lang.Double): linalg_gemm2Param = { + this.alpha = alpha + this + } + def getAlpha() = this.alpha + private var axis: java.lang.Integer = null +/** + * @param axis Axis corresponding to the matrix row indices. + */ +def setAxis(axis : java.lang.Integer): linalg_gemm2Param = { + this.axis = axis + this + } + def getAxis() = this.axis + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : linalg_gemm2Param = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for uniform + + */ + class uniformParam() { + private var low: java.lang.Float = null +/** + * @param low Lower bound of the distribution. + */ +def setLow(low : java.lang.Float): uniformParam = { + this.low = low + this + } + def getLow() = this.low + private var high: java.lang.Float = null +/** + * @param high Upper bound of the distribution. + */ +def setHigh(high : java.lang.Float): uniformParam = { + this.high = high + this + } + def getHigh() = this.high + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): uniformParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): uniformParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): uniformParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : uniformParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for reshape + * @param data Input data to reshape. + */ + class reshapeParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape The target shape + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): reshapeParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var reverse: java.lang.Boolean = null +/** + * @param reverse If true then the special values are inferred from right to left + */ +def setReverse(reverse : java.lang.Boolean): reshapeParam = { + this.reverse = reverse + this + } + def getReverse() = this.reverse + private var target_shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param target_shape (Deprecated! Use ``shape`` instead.) Target new shape. One and only one dim can be 0, in which case it will be inferred from the rest of dims + */ +def setTarget_shape(target_shape : org.apache.mxnet.javaapi.Shape): reshapeParam = { + this.target_shape = target_shape + this + } + def getTarget_shape() = this.target_shape + private var keep_highest: java.lang.Boolean = null +/** + * @param keep_highest (Deprecated! Use ``shape`` instead.) Whether keep the highest dim unchanged.If set to true, then the first dim in target_shape is ignored,and always fixed as input + */ +def setKeep_highest(keep_highest : java.lang.Boolean): reshapeParam = { + this.keep_highest = keep_highest + this + } + def getKeep_highest() = this.keep_highest + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : reshapeParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for random_exponential + + */ + class random_exponentialParam() { + private var lam: java.lang.Float = null +/** + * @param lam Lambda parameter (rate) of the exponential distribution. + */ +def setLam(lam : java.lang.Float): random_exponentialParam = { + this.lam = lam + this + } + def getLam() = this.lam + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): random_exponentialParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): random_exponentialParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): random_exponentialParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : random_exponentialParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for mp_lamb_update_phase1 + * @param weight Weight + * @param grad Gradient + * @param mean Moving mean + * @param vari Moving variance + * @param weight32 Weight32 + * @param t Index update count. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ + class mp_lamb_update_phase1Param(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,mean : org.apache.mxnet.javaapi.NDArray,vari : org.apache.mxnet.javaapi.NDArray,weight32 : org.apache.mxnet.javaapi.NDArray,t : java.lang.Integer,wd : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getMean() = this.mean + def getVari() = this.vari + def getWeight32() = this.weight32 + private var beta1: java.lang.Float = null +/** + * @param beta1 The decay rate for the 1st moment estimates. + */ +def setBeta1(beta1 : java.lang.Float): mp_lamb_update_phase1Param = { + this.beta1 = beta1 + this + } + def getBeta1() = this.beta1 + private var beta2: java.lang.Float = null +/** + * @param beta2 The decay rate for the 2nd moment estimates. + */ +def setBeta2(beta2 : java.lang.Float): mp_lamb_update_phase1Param = { + this.beta2 = beta2 + this + } + def getBeta2() = this.beta2 + private var epsilon: java.lang.Float = null +/** + * @param epsilon A small constant for numerical stability. + */ +def setEpsilon(epsilon : java.lang.Float): mp_lamb_update_phase1Param = { + this.epsilon = epsilon + this + } + def getEpsilon() = this.epsilon + def getT() = this.t + private var bias_correction: java.lang.Boolean = null +/** + * @param bias_correction Whether to use bias correction. + */ +def setBias_correction(bias_correction : java.lang.Boolean): mp_lamb_update_phase1Param = { + this.bias_correction = bias_correction + this + } + def getBias_correction() = this.bias_correction + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): mp_lamb_update_phase1Param = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): mp_lamb_update_phase1Param = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : mp_lamb_update_phase1Param = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for lamb_update_phase2 + * @param weight Weight + * @param g Output of lamb_update_phase 1 + * @param r1 r1 + * @param r2 r2 + * @param lr Learning rate + */ + class lamb_update_phase2Param(weight : org.apache.mxnet.javaapi.NDArray,g : org.apache.mxnet.javaapi.NDArray,r1 : org.apache.mxnet.javaapi.NDArray,r2 : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getG() = this.g + def getR1() = this.r1 + def getR2() = this.r2 + def getLr() = this.lr + private var lower_bound: java.lang.Float = null +/** + * @param lower_bound Lower limit of norm of weight. If lower_bound <= 0, Lower limit is not set + */ +def setLower_bound(lower_bound : java.lang.Float): lamb_update_phase2Param = { + this.lower_bound = lower_bound + this + } + def getLower_bound() = this.lower_bound + private var upper_bound: java.lang.Float = null +/** + * @param upper_bound Upper limit of norm of weight. If upper_bound <= 0, Upper limit is not set + */ +def setUpper_bound(upper_bound : java.lang.Float): lamb_update_phase2Param = { + this.upper_bound = upper_bound + this + } + def getUpper_bound() = this.upper_bound + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : lamb_update_phase2Param = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for SpatialTransformer + * @param data Input data to the SpatialTransformerOp. + * @param loc localisation net, the output dim should be 6 when transform_type is affine. You shold initialize the weight and bias with identity tranform. + * @param transform_type transformation type + * @param sampler_type sampling type + */ + class SpatialTransformerParam(data : org.apache.mxnet.javaapi.NDArray,loc : org.apache.mxnet.javaapi.NDArray,transform_type : String,sampler_type : String) { + def getData() = this.data + def getLoc() = this.loc + private var target_shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param target_shape output shape(h, w) of spatial transformer: (y, x) + */ +def setTarget_shape(target_shape : org.apache.mxnet.javaapi.Shape): SpatialTransformerParam = { + this.target_shape = target_shape + this + } + def getTarget_shape() = this.target_shape + def getTransform_type() = this.transform_type + def getSampler_type() = this.sampler_type + private var cudnn_off: java.lang.Boolean = null +/** + * @param cudnn_off whether to turn cudnn off + */ +def setCudnn_off(cudnn_off : java.lang.Boolean): SpatialTransformerParam = { + this.cudnn_off = cudnn_off + this + } + def getCudnn_off() = this.cudnn_off + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : SpatialTransformerParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for max + * @param data The input + */ + class maxParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): maxParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): maxParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): maxParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : maxParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for take + * @param a The input array. + * @param indices The indices of the values to be extracted. + */ + class takeParam(a : org.apache.mxnet.javaapi.NDArray,indices : org.apache.mxnet.javaapi.NDArray) { + def getA() = this.a + def getIndices() = this.indices + private var axis: java.lang.Integer = null +/** + * @param axis The axis of input array to be taken.For input tensor of rank r, it could be in the range of [-r, r-1] + */ +def setAxis(axis : java.lang.Integer): takeParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var mode: String = null +/** + * @param mode Specify how out-of-bound indices bahave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. "raise" means to raise an error when index out of range. + */ +def setMode(mode : String): takeParam = { + this.mode = mode + this + } + def getMode() = this.mode + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : takeParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for SequenceLast + * @param data n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] where n>2 + * @param sequence_length vector of sequence lengths of the form [batch_size] + */ + class SequenceLastParam(data : org.apache.mxnet.javaapi.NDArray,sequence_length : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getSequence_length() = this.sequence_length + private var use_sequence_length: java.lang.Boolean = null +/** + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + */ +def setUse_sequence_length(use_sequence_length : java.lang.Boolean): SequenceLastParam = { + this.use_sequence_length = use_sequence_length + this + } + def getUse_sequence_length() = this.use_sequence_length + private var axis: java.lang.Integer = null +/** + * @param axis The sequence axis. Only values of 0 and 1 are currently supported. + */ +def setAxis(axis : java.lang.Integer): SequenceLastParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : SequenceLastParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for norm + * @param data The input + */ + class normParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var ord: java.lang.Integer = null +/** + * @param ord Order of the norm. Currently ord=1 and ord=2 is supported. + */ +def setOrd(ord : java.lang.Integer): normParam = { + this.ord = ord + this + } + def getOrd() = this.ord + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + If `axis` is int, a reduction is performed on a particular axis. + If `axis` is a 2-tuple, it specifies the axes that hold 2-D matrices, + and the matrix norms of these matrices are computed. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): normParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var out_dtype: String = null +/** + * @param out_dtype The data type of the output. + */ +def setOut_dtype(out_dtype : String): normParam = { + this.out_dtype = out_dtype + this + } + def getOut_dtype() = this.out_dtype + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axis is left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): normParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : normParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for topk + * @param data The input array + */ + class topkParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: java.lang.Integer = null +/** + * @param axis Axis along which to choose the top k indices. If not given, the flattened array is used. Default is -1. + */ +def setAxis(axis : java.lang.Integer): topkParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var k: java.lang.Integer = null +/** + * @param k Number of top elements to select, should be always smaller than or equal to the element number in the given axis. A global sort is performed if set k < 1. + */ +def setK(k : java.lang.Integer): topkParam = { + this.k = k + this + } + def getK() = this.k + private var ret_typ: String = null +/** + * @param ret_typ The return type. + "value" means to return the top k values, "indices" means to return the indices of the top k values, "mask" means to return a mask array containing 0 and 1. 1 means the top k values. "both" means to return a list of both values and indices of top k elements. + */ +def setRet_typ(ret_typ : String): topkParam = { + this.ret_typ = ret_typ + this + } + def getRet_typ() = this.ret_typ + private var is_ascend: java.lang.Boolean = null +/** + * @param is_ascend Whether to choose k largest or k smallest elements. Top K largest elements will be chosen if set to false. + */ +def setIs_ascend(is_ascend : java.lang.Boolean): topkParam = { + this.is_ascend = is_ascend + this + } + def getIs_ascend() = this.is_ascend + private var dtype: String = null +/** + * @param dtype DType of the output indices when ret_typ is "indices" or "both". An error will be raised if the selected data type cannot precisely represent the indices. + */ +def setDtype(dtype : String): topkParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : topkParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for Correlation + * @param data1 Input data1 to the correlation. + * @param data2 Input data2 to the correlation. + */ + class CorrelationParam(data1 : org.apache.mxnet.javaapi.NDArray,data2 : org.apache.mxnet.javaapi.NDArray) { + def getData1() = this.data1 + def getData2() = this.data2 + private var kernel_size: java.lang.Integer = null +/** + * @param kernel_size kernel size for Correlation must be an odd number + */ +def setKernel_size(kernel_size : java.lang.Integer): CorrelationParam = { + this.kernel_size = kernel_size + this + } + def getKernel_size() = this.kernel_size + private var max_displacement: java.lang.Integer = null +/** + * @param max_displacement Max displacement of Correlation + */ +def setMax_displacement(max_displacement : java.lang.Integer): CorrelationParam = { + this.max_displacement = max_displacement + this + } + def getMax_displacement() = this.max_displacement + private var stride1: java.lang.Integer = null +/** + * @param stride1 stride1 quantize data1 globally + */ +def setStride1(stride1 : java.lang.Integer): CorrelationParam = { + this.stride1 = stride1 + this + } + def getStride1() = this.stride1 + private var stride2: java.lang.Integer = null +/** + * @param stride2 stride2 quantize data2 within the neighborhood centered around data1 + */ +def setStride2(stride2 : java.lang.Integer): CorrelationParam = { + this.stride2 = stride2 + this + } + def getStride2() = this.stride2 + private var pad_size: java.lang.Integer = null +/** + * @param pad_size pad for Correlation + */ +def setPad_size(pad_size : java.lang.Integer): CorrelationParam = { + this.pad_size = pad_size + this + } + def getPad_size() = this.pad_size + private var is_multiply: java.lang.Boolean = null +/** + * @param is_multiply operation type is either multiplication or subduction + */ +def setIs_multiply(is_multiply : java.lang.Boolean): CorrelationParam = { + this.is_multiply = is_multiply + this + } + def getIs_multiply() = this.is_multiply + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : CorrelationParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for split + * @param data The input + * @param num_outputs Number of splits. Note that this should evenly divide the length of the `axis`. + */ + class splitParam(data : org.apache.mxnet.javaapi.NDArray,num_outputs : java.lang.Integer) { + def getData() = this.data + def getNum_outputs() = this.num_outputs + private var axis: java.lang.Integer = null +/** + * @param axis Axis along which to split. + */ +def setAxis(axis : java.lang.Integer): splitParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var squeeze_axis: java.lang.Boolean = null +/** + * @param squeeze_axis If true, Removes the axis with length 1 from the shapes of the output arrays. **Note** that setting `squeeze_axis` to ``true`` removes axis with length 1 only along the `axis` which it is split. Also `squeeze_axis` can be set to ``true`` only if ``input.shape[axis] == num_outputs``. + */ +def setSqueeze_axis(squeeze_axis : java.lang.Boolean): splitParam = { + this.squeeze_axis = squeeze_axis + this + } + def getSqueeze_axis() = this.squeeze_axis + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : splitParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for random_randint + * @param low Lower bound of the distribution. + * @param high Upper bound of the distribution. + */ + class random_randintParam(low : java.lang.Long,high : java.lang.Long) { + def getLow() = this.low + def getHigh() = this.high + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): random_randintParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): random_randintParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to int32 if not defined (dtype=None). + */ +def setDtype(dtype : String): random_randintParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : random_randintParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for pick + * @param data The input array + * @param index The index array + */ + class pickParam(data : org.apache.mxnet.javaapi.NDArray,index : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getIndex() = this.index + private var axis: java.lang.Integer = null +/** + * @param axis int or None. The axis to picking the elements. Negative values means indexing from right to left. If is `None`, the elements in the index w.r.t the flattened input will be picked. + */ +def setAxis(axis : java.lang.Integer): pickParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If true, the axis where we pick the elements is left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): pickParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var mode: String = null +/** + * @param mode Specify how out-of-bound indices behave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. + */ +def setMode(mode : String): pickParam = { + this.mode = mode + this + } + def getMode() = this.mode + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : pickParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for SequenceMask + * @param data n-dimensional input array of the form [max_sequence_length, batch_size, other_feature_dims] where n>2 + * @param sequence_length vector of sequence lengths of the form [batch_size] + */ + class SequenceMaskParam(data : org.apache.mxnet.javaapi.NDArray,sequence_length : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getSequence_length() = this.sequence_length + private var use_sequence_length: java.lang.Boolean = null +/** + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + */ +def setUse_sequence_length(use_sequence_length : java.lang.Boolean): SequenceMaskParam = { + this.use_sequence_length = use_sequence_length + this + } + def getUse_sequence_length() = this.use_sequence_length + private var value: java.lang.Float = null +/** + * @param value The value to be used as a mask. + */ +def setValue(value : java.lang.Float): SequenceMaskParam = { + this.value = value + this + } + def getValue() = this.value + private var axis: java.lang.Integer = null +/** + * @param axis The sequence axis. Only values of 0 and 1 are currently supported. + */ +def setAxis(axis : java.lang.Integer): SequenceMaskParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : SequenceMaskParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for broadcast_like + * @param lhs First input. + * @param rhs Second input. + */ + class broadcast_likeParam(lhs : org.apache.mxnet.javaapi.NDArray,rhs : org.apache.mxnet.javaapi.NDArray) { + def getLhs() = this.lhs + def getRhs() = this.rhs + private var lhs_axes: org.apache.mxnet.javaapi.Shape = null +/** + * @param lhs_axes Axes to perform broadcast on in the first input array + */ +def setLhs_axes(lhs_axes : org.apache.mxnet.javaapi.Shape): broadcast_likeParam = { + this.lhs_axes = lhs_axes + this + } + def getLhs_axes() = this.lhs_axes + private var rhs_axes: org.apache.mxnet.javaapi.Shape = null +/** + * @param rhs_axes Axes to copy from the second input array + */ +def setRhs_axes(rhs_axes : org.apache.mxnet.javaapi.Shape): broadcast_likeParam = { + this.rhs_axes = rhs_axes + this + } + def getRhs_axes() = this.rhs_axes + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : broadcast_likeParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for SoftmaxOutput + * @param data Input array. + * @param label Ground truth label. + */ + class SoftmaxOutputParam(data : org.apache.mxnet.javaapi.NDArray,label : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getLabel() = this.label + private var grad_scale: java.lang.Float = null +/** + * @param grad_scale Scales the gradient by a float factor. + */ +def setGrad_scale(grad_scale : java.lang.Float): SoftmaxOutputParam = { + this.grad_scale = grad_scale + this + } + def getGrad_scale() = this.grad_scale + private var ignore_label: java.lang.Float = null +/** + * @param ignore_label The instances whose `labels` == `ignore_label` will be ignored during backward, if `use_ignore` is set to ``true``). + */ +def setIgnore_label(ignore_label : java.lang.Float): SoftmaxOutputParam = { + this.ignore_label = ignore_label + this + } + def getIgnore_label() = this.ignore_label + private var multi_output: java.lang.Boolean = null +/** + * @param multi_output If set to ``true``, the softmax function will be computed along axis ``1``. This is applied when the shape of input array differs from the shape of label array. + */ +def setMulti_output(multi_output : java.lang.Boolean): SoftmaxOutputParam = { + this.multi_output = multi_output + this + } + def getMulti_output() = this.multi_output + private var use_ignore: java.lang.Boolean = null +/** + * @param use_ignore If set to ``true``, the `ignore_label` value will not contribute to the backward gradient. + */ +def setUse_ignore(use_ignore : java.lang.Boolean): SoftmaxOutputParam = { + this.use_ignore = use_ignore + this + } + def getUse_ignore() = this.use_ignore + private var preserve_shape: java.lang.Boolean = null +/** + * @param preserve_shape If set to ``true``, the softmax function will be computed along the last axis (``-1``). + */ +def setPreserve_shape(preserve_shape : java.lang.Boolean): SoftmaxOutputParam = { + this.preserve_shape = preserve_shape + this + } + def getPreserve_shape() = this.preserve_shape + private var normalization: String = null +/** + * @param normalization Normalizes the gradient. + */ +def setNormalization(normalization : String): SoftmaxOutputParam = { + this.normalization = normalization + this + } + def getNormalization() = this.normalization + private var out_grad: java.lang.Boolean = null +/** + * @param out_grad Multiplies gradient with output gradient element-wise. + */ +def setOut_grad(out_grad : java.lang.Boolean): SoftmaxOutputParam = { + this.out_grad = out_grad + this + } + def getOut_grad() = this.out_grad + private var smooth_alpha: java.lang.Float = null +/** + * @param smooth_alpha Constant for computing a label smoothed version of cross-entropyfor the backwards pass. This constant gets subtracted from theone-hot encoding of the gold label and distributed uniformly toall other labels. + */ +def setSmooth_alpha(smooth_alpha : java.lang.Float): SoftmaxOutputParam = { + this.smooth_alpha = smooth_alpha + this + } + def getSmooth_alpha() = this.smooth_alpha + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : SoftmaxOutputParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for swapaxes + * @param data Input array. + */ + class swapaxesParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var dim1: java.lang.Integer = null +/** + * @param dim1 the first axis to be swapped. + */ +def setDim1(dim1 : java.lang.Integer): swapaxesParam = { + this.dim1 = dim1 + this + } + def getDim1() = this.dim1 + private var dim2: java.lang.Integer = null +/** + * @param dim2 the second axis to be swapped. + */ +def setDim2(dim2 : java.lang.Integer): swapaxesParam = { + this.dim2 = dim2 + this + } + def getDim2() = this.dim2 + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : swapaxesParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for cumsum + * @param a Input ndarray + */ + class cumsumParam(a : org.apache.mxnet.javaapi.NDArray) { + def getA() = this.a + private var axis: java.lang.Integer = null +/** + * @param axis Axis along which the cumulative sum is computed. The default (None) is to compute the cumsum over the flattened array. + */ +def setAxis(axis : java.lang.Integer): cumsumParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var dtype: String = null +/** + * @param dtype Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of a, unless a has an integer dtype with a precision less than that of the default platform integer. In that case, the default platform integer is used. + */ +def setDtype(dtype : String): cumsumParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : cumsumParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for prod + * @param data The input + */ + class prodParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): prodParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): prodParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): prodParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : prodParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for lamb_update_phase1 + * @param weight Weight + * @param grad Gradient + * @param mean Moving mean + * @param vari Moving variance + * @param t Index update count. + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ + class lamb_update_phase1Param(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,mean : org.apache.mxnet.javaapi.NDArray,vari : org.apache.mxnet.javaapi.NDArray,t : java.lang.Integer,wd : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getMean() = this.mean + def getVari() = this.vari + private var beta1: java.lang.Float = null +/** + * @param beta1 The decay rate for the 1st moment estimates. + */ +def setBeta1(beta1 : java.lang.Float): lamb_update_phase1Param = { + this.beta1 = beta1 + this + } + def getBeta1() = this.beta1 + private var beta2: java.lang.Float = null +/** + * @param beta2 The decay rate for the 2nd moment estimates. + */ +def setBeta2(beta2 : java.lang.Float): lamb_update_phase1Param = { + this.beta2 = beta2 + this + } + def getBeta2() = this.beta2 + private var epsilon: java.lang.Float = null +/** + * @param epsilon A small constant for numerical stability. + */ +def setEpsilon(epsilon : java.lang.Float): lamb_update_phase1Param = { + this.epsilon = epsilon + this + } + def getEpsilon() = this.epsilon + def getT() = this.t + private var bias_correction: java.lang.Boolean = null +/** + * @param bias_correction Whether to use bias correction. + */ +def setBias_correction(bias_correction : java.lang.Boolean): lamb_update_phase1Param = { + this.bias_correction = bias_correction + this + } + def getBias_correction() = this.bias_correction + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): lamb_update_phase1Param = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): lamb_update_phase1Param = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : lamb_update_phase1Param = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for one_hot + * @param indices array of locations where to set on_value + * @param depth Depth of the one hot dimension. + */ + class one_hotParam(indices : org.apache.mxnet.javaapi.NDArray,depth : java.lang.Integer) { + def getIndices() = this.indices + def getDepth() = this.depth + private var on_value: java.lang.Double = null +/** + * @param on_value The value assigned to the locations represented by indices. + */ +def setOn_value(on_value : java.lang.Double): one_hotParam = { + this.on_value = on_value + this + } + def getOn_value() = this.on_value + private var off_value: java.lang.Double = null +/** + * @param off_value The value assigned to the locations not represented by indices. + */ +def setOff_value(off_value : java.lang.Double): one_hotParam = { + this.off_value = off_value + this + } + def getOff_value() = this.off_value + private var dtype: String = null +/** + * @param dtype DType of the output + */ +def setDtype(dtype : String): one_hotParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : one_hotParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for mp_lamb_update_phase2 + * @param weight Weight + * @param g Output of mp_lamb_update_phase 1 + * @param r1 r1 + * @param r2 r2 + * @param weight32 Weight32 + * @param lr Learning rate + */ + class mp_lamb_update_phase2Param(weight : org.apache.mxnet.javaapi.NDArray,g : org.apache.mxnet.javaapi.NDArray,r1 : org.apache.mxnet.javaapi.NDArray,r2 : org.apache.mxnet.javaapi.NDArray,weight32 : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getG() = this.g + def getR1() = this.r1 + def getR2() = this.r2 + def getWeight32() = this.weight32 + def getLr() = this.lr + private var lower_bound: java.lang.Float = null +/** + * @param lower_bound Lower limit of norm of weight. If lower_bound <= 0, Lower limit is not set + */ +def setLower_bound(lower_bound : java.lang.Float): mp_lamb_update_phase2Param = { + this.lower_bound = lower_bound + this + } + def getLower_bound() = this.lower_bound + private var upper_bound: java.lang.Float = null +/** + * @param upper_bound Upper limit of norm of weight. If upper_bound <= 0, Upper limit is not set + */ +def setUpper_bound(upper_bound : java.lang.Float): mp_lamb_update_phase2Param = { + this.upper_bound = upper_bound + this + } + def getUpper_bound() = this.upper_bound + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : mp_lamb_update_phase2Param = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for max_axis + * @param data The input + */ + class max_axisParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): max_axisParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): max_axisParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): max_axisParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : max_axisParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for random_negative_binomial + + */ + class random_negative_binomialParam() { + private var k: java.lang.Integer = null +/** + * @param k Limit of unsuccessful experiments. + */ +def setK(k : java.lang.Integer): random_negative_binomialParam = { + this.k = k + this + } + def getK() = this.k + private var p: java.lang.Float = null +/** + * @param p Failure probability in each experiment. + */ +def setP(p : java.lang.Float): random_negative_binomialParam = { + this.p = p + this + } + def getP() = this.p + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): random_negative_binomialParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): random_negative_binomialParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): random_negative_binomialParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : random_negative_binomialParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for signum_update + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param lr Learning rate + */ + class signum_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,mom : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getMom() = this.mom + def getLr() = this.lr + private var momentum: java.lang.Float = null +/** + * @param momentum The decay rate of momentum estimates at each epoch. + */ +def setMomentum(momentum : java.lang.Float): signum_updateParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): signum_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): signum_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): signum_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var wd_lh: java.lang.Float = null +/** + * @param wd_lh The amount of weight decay that does not go into gradient/momentum calculationsotherwise do weight decay algorithmically only. + */ +def setWd_lh(wd_lh : java.lang.Float): signum_updateParam = { + this.wd_lh = wd_lh + this + } + def getWd_lh() = this.wd_lh + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : signum_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for multi_mp_sgd_mom_update + * @param data Weights + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ + class multi_mp_sgd_mom_updateParam(data : Array[org.apache.mxnet.javaapi.NDArray],lrs : Any,wds : Any) { + def getData() = this.data + def getLrs() = this.lrs + def getWds() = this.wds + private var momentum: java.lang.Float = null +/** + * @param momentum The decay rate of momentum estimates at each epoch. + */ +def setMomentum(momentum : java.lang.Float): multi_mp_sgd_mom_updateParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): multi_mp_sgd_mom_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): multi_mp_sgd_mom_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var num_weights: java.lang.Integer = null +/** + * @param num_weights Number of updated weights. + */ +def setNum_weights(num_weights : java.lang.Integer): multi_mp_sgd_mom_updateParam = { + this.num_weights = num_weights + this + } + def getNum_weights() = this.num_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : multi_mp_sgd_mom_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for choose_element_0index + * @param data The input array + * @param index The index array + */ + class choose_element_0indexParam(data : org.apache.mxnet.javaapi.NDArray,index : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getIndex() = this.index + private var axis: java.lang.Integer = null +/** + * @param axis int or None. The axis to picking the elements. Negative values means indexing from right to left. If is `None`, the elements in the index w.r.t the flattened input will be picked. + */ +def setAxis(axis : java.lang.Integer): choose_element_0indexParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If true, the axis where we pick the elements is left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): choose_element_0indexParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var mode: String = null +/** + * @param mode Specify how out-of-bound indices behave. Default is "clip". "clip" means clip to the range. So, if all indices mentioned are too large, they are replaced by the index that addresses the last element along an axis. "wrap" means to wrap around. + */ +def setMode(mode : String): choose_element_0indexParam = { + this.mode = mode + this + } + def getMode() = this.mode + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : choose_element_0indexParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for IdentityAttachKLSparseReg + * @param data Input data. + */ + class IdentityAttachKLSparseRegParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var sparseness_target: java.lang.Float = null +/** + * @param sparseness_target The sparseness target + */ +def setSparseness_target(sparseness_target : java.lang.Float): IdentityAttachKLSparseRegParam = { + this.sparseness_target = sparseness_target + this + } + def getSparseness_target() = this.sparseness_target + private var penalty: java.lang.Float = null +/** + * @param penalty The tradeoff parameter for the sparseness penalty + */ +def setPenalty(penalty : java.lang.Float): IdentityAttachKLSparseRegParam = { + this.penalty = penalty + this + } + def getPenalty() = this.penalty + private var momentum: java.lang.Float = null +/** + * @param momentum The momentum for running average + */ +def setMomentum(momentum : java.lang.Float): IdentityAttachKLSparseRegParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : IdentityAttachKLSparseRegParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for rmspropalex_update + * @param weight Weight + * @param grad Gradient + * @param n n + * @param g g + * @param delta delta + * @param lr Learning rate + */ + class rmspropalex_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,n : org.apache.mxnet.javaapi.NDArray,g : org.apache.mxnet.javaapi.NDArray,delta : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getN() = this.n + def getG() = this.g + def getDelta() = this.delta + def getLr() = this.lr + private var gamma1: java.lang.Float = null +/** + * @param gamma1 Decay rate. + */ +def setGamma1(gamma1 : java.lang.Float): rmspropalex_updateParam = { + this.gamma1 = gamma1 + this + } + def getGamma1() = this.gamma1 + private var gamma2: java.lang.Float = null +/** + * @param gamma2 Decay rate. + */ +def setGamma2(gamma2 : java.lang.Float): rmspropalex_updateParam = { + this.gamma2 = gamma2 + this + } + def getGamma2() = this.gamma2 + private var epsilon: java.lang.Float = null +/** + * @param epsilon A small constant for numerical stability. + */ +def setEpsilon(epsilon : java.lang.Float): rmspropalex_updateParam = { + this.epsilon = epsilon + this + } + def getEpsilon() = this.epsilon + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): rmspropalex_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): rmspropalex_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): rmspropalex_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var clip_weights: java.lang.Float = null +/** + * @param clip_weights Clip weights to the range of [-clip_weights, clip_weights] If clip_weights <= 0, weight clipping is turned off. weights = max(min(weights, clip_weights), -clip_weights). + */ +def setClip_weights(clip_weights : java.lang.Float): rmspropalex_updateParam = { + this.clip_weights = clip_weights + this + } + def getClip_weights() = this.clip_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : rmspropalex_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for BatchNorm + * @param data Input data to batch normalization + * @param gamma gamma array + * @param beta beta array + * @param moving_mean running mean of input + * @param moving_var running variance of input + */ + class BatchNormParam(data : org.apache.mxnet.javaapi.NDArray,gamma : org.apache.mxnet.javaapi.NDArray,beta : org.apache.mxnet.javaapi.NDArray,moving_mean : org.apache.mxnet.javaapi.NDArray,moving_var : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getGamma() = this.gamma + def getBeta() = this.beta + def getMoving_mean() = this.moving_mean + def getMoving_var() = this.moving_var + private var eps: java.lang.Double = null +/** + * @param eps Epsilon to prevent div 0. Must be no less than CUDNN_BN_MIN_EPSILON defined in cudnn.h when using cudnn (usually 1e-5) + */ +def setEps(eps : java.lang.Double): BatchNormParam = { + this.eps = eps + this + } + def getEps() = this.eps + private var momentum: java.lang.Float = null +/** + * @param momentum Momentum for moving average + */ +def setMomentum(momentum : java.lang.Float): BatchNormParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var fix_gamma: java.lang.Boolean = null +/** + * @param fix_gamma Fix gamma while training + */ +def setFix_gamma(fix_gamma : java.lang.Boolean): BatchNormParam = { + this.fix_gamma = fix_gamma + this + } + def getFix_gamma() = this.fix_gamma + private var use_global_stats: java.lang.Boolean = null +/** + * @param use_global_stats Whether use global moving statistics instead of local batch-norm. This will force change batch-norm into a scale shift operator. + */ +def setUse_global_stats(use_global_stats : java.lang.Boolean): BatchNormParam = { + this.use_global_stats = use_global_stats + this + } + def getUse_global_stats() = this.use_global_stats + private var output_mean_var: java.lang.Boolean = null +/** + * @param output_mean_var Output the mean and inverse std + */ +def setOutput_mean_var(output_mean_var : java.lang.Boolean): BatchNormParam = { + this.output_mean_var = output_mean_var + this + } + def getOutput_mean_var() = this.output_mean_var + private var axis: java.lang.Integer = null +/** + * @param axis Specify which shape axis the channel is specified + */ +def setAxis(axis : java.lang.Integer): BatchNormParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var cudnn_off: java.lang.Boolean = null +/** + * @param cudnn_off Do not select CUDNN operator, if available + */ +def setCudnn_off(cudnn_off : java.lang.Boolean): BatchNormParam = { + this.cudnn_off = cudnn_off + this + } + def getCudnn_off() = this.cudnn_off + private var min_calib_range: java.lang.Float = null +/** + * @param min_calib_range The minimum scalar value in the form of float32 obtained through calibration. If present, it will be used to by quantized batch norm op to calculate primitive scale.Note: this calib_range is to calib bn output. + */ +def setMin_calib_range(min_calib_range : java.lang.Float): BatchNormParam = { + this.min_calib_range = min_calib_range + this + } + def getMin_calib_range() = this.min_calib_range + private var max_calib_range: java.lang.Float = null +/** + * @param max_calib_range The maximum scalar value in the form of float32 obtained through calibration. If present, it will be used to by quantized batch norm op to calculate primitive scale.Note: this calib_range is to calib bn output. + */ +def setMax_calib_range(max_calib_range : java.lang.Float): BatchNormParam = { + this.max_calib_range = max_calib_range + this + } + def getMax_calib_range() = this.max_calib_range + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : BatchNormParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sort + * @param data The input array + */ + class sortParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: java.lang.Integer = null +/** + * @param axis Axis along which to choose sort the input tensor. If not given, the flattened array is used. Default is -1. + */ +def setAxis(axis : java.lang.Integer): sortParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var is_ascend: java.lang.Boolean = null +/** + * @param is_ascend Whether to sort in ascending or descending order. + */ +def setIs_ascend(is_ascend : java.lang.Boolean): sortParam = { + this.is_ascend = is_ascend + this + } + def getIs_ascend() = this.is_ascend + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sortParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sgd_mom_update + * @param weight Weight + * @param grad Gradient + * @param mom Momentum + * @param lr Learning rate + */ + class sgd_mom_updateParam(weight : org.apache.mxnet.javaapi.NDArray,grad : org.apache.mxnet.javaapi.NDArray,mom : org.apache.mxnet.javaapi.NDArray,lr : java.lang.Float) { + def getWeight() = this.weight + def getGrad() = this.grad + def getMom() = this.mom + def getLr() = this.lr + private var momentum: java.lang.Float = null +/** + * @param momentum The decay rate of momentum estimates at each epoch. + */ +def setMomentum(momentum : java.lang.Float): sgd_mom_updateParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var wd: java.lang.Float = null +/** + * @param wd Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ +def setWd(wd : java.lang.Float): sgd_mom_updateParam = { + this.wd = wd + this + } + def getWd() = this.wd + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): sgd_mom_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): sgd_mom_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var lazy_update: java.lang.Boolean = null +/** + * @param lazy_update If true, lazy updates are applied if gradient's stype is row_sparse and both weight and momentum have the same stype + */ +def setLazy_update(lazy_update : java.lang.Boolean): sgd_mom_updateParam = { + this.lazy_update = lazy_update + this + } + def getLazy_update() = this.lazy_update + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sgd_mom_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for preloaded_multi_sgd_mom_update + * @param data Weights, gradients, momentum, learning rates and weight decays + */ + class preloaded_multi_sgd_mom_updateParam(data : Array[org.apache.mxnet.javaapi.NDArray]) { + def getData() = this.data + private var momentum: java.lang.Float = null +/** + * @param momentum The decay rate of momentum estimates at each epoch. + */ +def setMomentum(momentum : java.lang.Float): preloaded_multi_sgd_mom_updateParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): preloaded_multi_sgd_mom_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): preloaded_multi_sgd_mom_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var num_weights: java.lang.Integer = null +/** + * @param num_weights Number of updated weights. + */ +def setNum_weights(num_weights : java.lang.Integer): preloaded_multi_sgd_mom_updateParam = { + this.num_weights = num_weights + this + } + def getNum_weights() = this.num_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : preloaded_multi_sgd_mom_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for UpSampling + * @param data Array of tensors to upsample. For bilinear upsampling, there should be 2 inputs - 1 data and 1 weight. + * @param scale Up sampling scale + * @param sample_type upsampling method + * @param num_args Number of inputs to be upsampled. For nearest neighbor upsampling, this can be 1-N; the size of output will be(scale*h_0,scale*w_0) and all other inputs will be upsampled to thesame size. For bilinear upsampling this must be 2; 1 input and 1 weight. + */ + class UpSamplingParam(data : Array[org.apache.mxnet.javaapi.NDArray],scale : java.lang.Integer,sample_type : String,num_args : java.lang.Integer) { + def getData() = this.data + def getScale() = this.scale + private var num_filter: java.lang.Integer = null +/** + * @param num_filter Input filter. Only used by bilinear sample_type.Since bilinear upsampling uses deconvolution, num_filters is set to the number of channels. + */ +def setNum_filter(num_filter : java.lang.Integer): UpSamplingParam = { + this.num_filter = num_filter + this + } + def getNum_filter() = this.num_filter + def getSample_type() = this.sample_type + private var multi_input_mode: String = null +/** + * @param multi_input_mode How to handle multiple input. concat means concatenate upsampled images along the channel dimension. sum means add all images together, only available for nearest neighbor upsampling. + */ +def setMulti_input_mode(multi_input_mode : String): UpSamplingParam = { + this.multi_input_mode = multi_input_mode + this + } + def getMulti_input_mode() = this.multi_input_mode + def getNum_args() = this.num_args + private var workspace: java.lang.Long = null +/** + * @param workspace Tmp workspace for deconvolution (MB) + */ +def setWorkspace(workspace : java.lang.Long): UpSamplingParam = { + this.workspace = workspace + this + } + def getWorkspace() = this.workspace + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : UpSamplingParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for argmin + * @param data The input + */ + class argminParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: java.lang.Integer = null +/** + * @param axis The axis along which to perform the reduction. Negative values means indexing from right to left. ``Requires axis to be set as int, because global reduction is not supported yet.`` + */ +def setAxis(axis : java.lang.Integer): argminParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axis is left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): argminParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : argminParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for broadcast_axes + * @param data The input + */ + class broadcast_axesParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axes to perform the broadcasting. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): broadcast_axesParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var size: org.apache.mxnet.javaapi.Shape = null +/** + * @param size Target sizes of the broadcasting axes. + */ +def setSize(size : org.apache.mxnet.javaapi.Shape): broadcast_axesParam = { + this.size = size + this + } + def getSize() = this.size + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : broadcast_axesParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for nanprod + * @param data The input + */ + class nanprodParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): nanprodParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): nanprodParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): nanprodParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : nanprodParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for argsort + * @param data The input array + */ + class argsortParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: java.lang.Integer = null +/** + * @param axis Axis along which to sort the input tensor. If not given, the flattened array is used. Default is -1. + */ +def setAxis(axis : java.lang.Integer): argsortParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var is_ascend: java.lang.Boolean = null +/** + * @param is_ascend Whether to sort in ascending or descending order. + */ +def setIs_ascend(is_ascend : java.lang.Boolean): argsortParam = { + this.is_ascend = is_ascend + this + } + def getIs_ascend() = this.is_ascend + private var dtype: String = null +/** + * @param dtype DType of the output indices. It is only valid when ret_typ is "indices" or "both". An error will be raised if the selected data type cannot precisely represent the indices. + */ +def setDtype(dtype : String): argsortParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : argsortParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sample_exponential + * @param lam Lambda (rate) parameters of the distributions. + */ + class sample_exponentialParam(lam : org.apache.mxnet.javaapi.NDArray) { + def getLam() = this.lam + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape to be sampled from each random distribution. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): sample_exponentialParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): sample_exponentialParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sample_exponentialParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for SequenceReverse + * @param data n-dimensional input array of the form [max_sequence_length, batch_size, other dims] where n>2 + * @param sequence_length vector of sequence lengths of the form [batch_size] + */ + class SequenceReverseParam(data : org.apache.mxnet.javaapi.NDArray,sequence_length : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getSequence_length() = this.sequence_length + private var use_sequence_length: java.lang.Boolean = null +/** + * @param use_sequence_length If set to true, this layer takes in an extra input parameter `sequence_length` to specify variable length sequence + */ +def setUse_sequence_length(use_sequence_length : java.lang.Boolean): SequenceReverseParam = { + this.use_sequence_length = use_sequence_length + this + } + def getUse_sequence_length() = this.use_sequence_length + private var axis: java.lang.Integer = null +/** + * @param axis The sequence axis. Only 0 is currently supported. + */ +def setAxis(axis : java.lang.Integer): SequenceReverseParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : SequenceReverseParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for multi_sgd_mom_update + * @param data Weights, gradients and momentum + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ + class multi_sgd_mom_updateParam(data : Array[org.apache.mxnet.javaapi.NDArray],lrs : Any,wds : Any) { + def getData() = this.data + def getLrs() = this.lrs + def getWds() = this.wds + private var momentum: java.lang.Float = null +/** + * @param momentum The decay rate of momentum estimates at each epoch. + */ +def setMomentum(momentum : java.lang.Float): multi_sgd_mom_updateParam = { + this.momentum = momentum + this + } + def getMomentum() = this.momentum + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): multi_sgd_mom_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): multi_sgd_mom_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var num_weights: java.lang.Integer = null +/** + * @param num_weights Number of updated weights. + */ +def setNum_weights(num_weights : java.lang.Integer): multi_sgd_mom_updateParam = { + this.num_weights = num_weights + this + } + def getNum_weights() = this.num_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : multi_sgd_mom_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for multi_sgd_update + * @param data Weights + * @param lrs Learning rates. + * @param wds Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. + */ + class multi_sgd_updateParam(data : Array[org.apache.mxnet.javaapi.NDArray],lrs : Any,wds : Any) { + def getData() = this.data + def getLrs() = this.lrs + def getWds() = this.wds + private var rescale_grad: java.lang.Float = null +/** + * @param rescale_grad Rescale gradient to grad = rescale_grad*grad. + */ +def setRescale_grad(rescale_grad : java.lang.Float): multi_sgd_updateParam = { + this.rescale_grad = rescale_grad + this + } + def getRescale_grad() = this.rescale_grad + private var clip_gradient: java.lang.Float = null +/** + * @param clip_gradient Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). + */ +def setClip_gradient(clip_gradient : java.lang.Float): multi_sgd_updateParam = { + this.clip_gradient = clip_gradient + this + } + def getClip_gradient() = this.clip_gradient + private var num_weights: java.lang.Integer = null +/** + * @param num_weights Number of updated weights. + */ +def setNum_weights(num_weights : java.lang.Integer): multi_sgd_updateParam = { + this.num_weights = num_weights + this + } + def getNum_weights() = this.num_weights + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : multi_sgd_updateParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for random_generalized_negative_binomial + + */ + class random_generalized_negative_binomialParam() { + private var mu: java.lang.Float = null +/** + * @param mu Mean of the negative binomial distribution. + */ +def setMu(mu : java.lang.Float): random_generalized_negative_binomialParam = { + this.mu = mu + this + } + def getMu() = this.mu + private var alpha: java.lang.Float = null +/** + * @param alpha Alpha (dispersion) parameter of the negative binomial distribution. + */ +def setAlpha(alpha : java.lang.Float): random_generalized_negative_binomialParam = { + this.alpha = alpha + this + } + def getAlpha() = this.alpha + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): random_generalized_negative_binomialParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): random_generalized_negative_binomialParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): random_generalized_negative_binomialParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : random_generalized_negative_binomialParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sample_uniform + * @param low Lower bounds of the distributions. + * @param high Upper bounds of the distributions. + */ + class sample_uniformParam(low : org.apache.mxnet.javaapi.NDArray,high : org.apache.mxnet.javaapi.NDArray) { + def getLow() = this.low + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape to be sampled from each random distribution. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): sample_uniformParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): sample_uniformParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + def getHigh() = this.high + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sample_uniformParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for SVMOutput + * @param data Input data for SVM transformation. + * @param label Class label for the input data. + */ + class SVMOutputParam(data : org.apache.mxnet.javaapi.NDArray,label : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getLabel() = this.label + private var margin: java.lang.Float = null +/** + * @param margin The loss function penalizes outputs that lie outside this margin. Default margin is 1. + */ +def setMargin(margin : java.lang.Float): SVMOutputParam = { + this.margin = margin + this + } + def getMargin() = this.margin + private var regularization_coefficient: java.lang.Float = null +/** + * @param regularization_coefficient Regularization parameter for the SVM. This balances the tradeoff between coefficient size and error. + */ +def setRegularization_coefficient(regularization_coefficient : java.lang.Float): SVMOutputParam = { + this.regularization_coefficient = regularization_coefficient + this + } + def getRegularization_coefficient() = this.regularization_coefficient + private var use_linear: java.lang.Boolean = null +/** + * @param use_linear Whether to use L1-SVM objective. L2-SVM objective is used by default. + */ +def setUse_linear(use_linear : java.lang.Boolean): SVMOutputParam = { + this.use_linear = use_linear + this + } + def getUse_linear() = this.use_linear + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : SVMOutputParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sum + * @param data The input + */ + class sumParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: org.apache.mxnet.javaapi.Shape = null +/** + * @param axis The axis or axes along which to perform the reduction. + + The default, `axis=()`, will compute over all elements into a + scalar array with shape `(1,)`. + + If `axis` is int, a reduction is performed on a particular axis. + + If `axis` is a tuple of ints, a reduction is performed on all the axes + specified in the tuple. + + If `exclude` is true, reduction will be performed on the axes that are + NOT in axis instead. + + Negative values means indexing from right to left. + */ +def setAxis(axis : org.apache.mxnet.javaapi.Shape): sumParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axes are left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): sumParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var exclude: java.lang.Boolean = null +/** + * @param exclude Whether to perform reduction on axis that are NOT in axis instead. + */ +def setExclude(exclude : java.lang.Boolean): sumParam = { + this.exclude = exclude + this + } + def getExclude() = this.exclude + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sumParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for ctc_loss + * @param data Input ndarray + * @param label Ground-truth labels for the loss. + * @param data_lengths Lengths of data for each of the samples. Only required when use_data_lengths is true. + * @param label_lengths Lengths of labels for each of the samples. Only required when use_label_lengths is true. + */ + class ctc_lossParam(data : org.apache.mxnet.javaapi.NDArray,label : org.apache.mxnet.javaapi.NDArray,data_lengths : org.apache.mxnet.javaapi.NDArray,label_lengths : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + def getLabel() = this.label + def getData_lengths() = this.data_lengths + def getLabel_lengths() = this.label_lengths + private var use_data_lengths: java.lang.Boolean = null +/** + * @param use_data_lengths Whether the data lenghts are decided by `data_lengths`. If false, the lengths are equal to the max sequence length. + */ +def setUse_data_lengths(use_data_lengths : java.lang.Boolean): ctc_lossParam = { + this.use_data_lengths = use_data_lengths + this + } + def getUse_data_lengths() = this.use_data_lengths + private var use_label_lengths: java.lang.Boolean = null +/** + * @param use_label_lengths Whether the label lenghts are decided by `label_lengths`, or derived from `padding_mask`. If false, the lengths are derived from the first occurrence of the value of `padding_mask`. The value of `padding_mask` is ``0`` when first CTC label is reserved for blank, and ``-1`` when last label is reserved for blank. See `blank_label`. + */ +def setUse_label_lengths(use_label_lengths : java.lang.Boolean): ctc_lossParam = { + this.use_label_lengths = use_label_lengths + this + } + def getUse_label_lengths() = this.use_label_lengths + private var blank_label: String = null +/** + * @param blank_label Set the label that is reserved for blank label.If "first", 0-th label is reserved, and label values for tokens in the vocabulary are between ``1`` and ``alphabet_size-1``, and the padding mask is ``-1``. If "last", last label value ``alphabet_size-1`` is reserved for blank label instead, and label values for tokens in the vocabulary are between ``0`` and ``alphabet_size-2``, and the padding mask is ``0``. + */ +def setBlank_label(blank_label : String): ctc_lossParam = { + this.blank_label = blank_label + this + } + def getBlank_label() = this.blank_label + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : ctc_lossParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for FullyConnected + * @param data Input data. + * @param weight Weight matrix. + * @param bias Bias parameter. + * @param num_hidden Number of hidden nodes of the output. + */ + class FullyConnectedParam(data : org.apache.mxnet.javaapi.NDArray,weight : org.apache.mxnet.javaapi.NDArray,bias : org.apache.mxnet.javaapi.NDArray,num_hidden : java.lang.Integer) { + def getData() = this.data + def getWeight() = this.weight + def getBias() = this.bias + def getNum_hidden() = this.num_hidden + private var no_bias: java.lang.Boolean = null +/** + * @param no_bias Whether to disable bias parameter. + */ +def setNo_bias(no_bias : java.lang.Boolean): FullyConnectedParam = { + this.no_bias = no_bias + this + } + def getNo_bias() = this.no_bias + private var flatten: java.lang.Boolean = null +/** + * @param flatten Whether to collapse all but the first axis of the input data tensor. + */ +def setFlatten(flatten : java.lang.Boolean): FullyConnectedParam = { + this.flatten = flatten + this + } + def getFlatten() = this.flatten + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : FullyConnectedParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for sample_multinomial + * @param data Distribution probabilities. Must sum to one on the last axis. + */ + class sample_multinomialParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape to be sampled from each random distribution. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): sample_multinomialParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var get_prob: java.lang.Boolean = null +/** + * @param get_prob Whether to also return the log probability of sampled result. This is usually used for differentiating through stochastic variables, e.g. in reinforcement learning. + */ +def setGet_prob(get_prob : java.lang.Boolean): sample_multinomialParam = { + this.get_prob = get_prob + this + } + def getGet_prob() = this.get_prob + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. + */ +def setDtype(dtype : String): sample_multinomialParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : sample_multinomialParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for argmax + * @param data The input + */ + class argmaxParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var axis: java.lang.Integer = null +/** + * @param axis The axis along which to perform the reduction. Negative values means indexing from right to left. ``Requires axis to be set as int, because global reduction is not supported yet.`` + */ +def setAxis(axis : java.lang.Integer): argmaxParam = { + this.axis = axis + this + } + def getAxis() = this.axis + private var keepdims: java.lang.Boolean = null +/** + * @param keepdims If this is set to `True`, the reduced axis is left in the result as dimension with size one. + */ +def setKeepdims(keepdims : java.lang.Boolean): argmaxParam = { + this.keepdims = keepdims + this + } + def getKeepdims() = this.keepdims + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : argmaxParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for reshape_like + * @param lhs First input. + * @param rhs Second input. + */ + class reshape_likeParam(lhs : org.apache.mxnet.javaapi.NDArray,rhs : org.apache.mxnet.javaapi.NDArray) { + def getLhs() = this.lhs + def getRhs() = this.rhs + private var lhs_begin: java.lang.Integer = null +/** + * @param lhs_begin Defaults to 0. The beginning index along which the lhs dimensions are to be reshaped. Supports negative indices. + */ +def setLhs_begin(lhs_begin : java.lang.Integer): reshape_likeParam = { + this.lhs_begin = lhs_begin + this + } + def getLhs_begin() = this.lhs_begin + private var lhs_end: java.lang.Integer = null +/** + * @param lhs_end Defaults to None. The ending index along which the lhs dimensions are to be used for reshaping. Supports negative indices. + */ +def setLhs_end(lhs_end : java.lang.Integer): reshape_likeParam = { + this.lhs_end = lhs_end + this + } + def getLhs_end() = this.lhs_end + private var rhs_begin: java.lang.Integer = null +/** + * @param rhs_begin Defaults to 0. The beginning index along which the rhs dimensions are to be used for reshaping. Supports negative indices. + */ +def setRhs_begin(rhs_begin : java.lang.Integer): reshape_likeParam = { + this.rhs_begin = rhs_begin + this + } + def getRhs_begin() = this.rhs_begin + private var rhs_end: java.lang.Integer = null +/** + * @param rhs_end Defaults to None. The ending index along which the rhs dimensions are to be used for reshaping. Supports negative indices. + */ +def setRhs_end(rhs_end : java.lang.Integer): reshape_likeParam = { + this.rhs_end = rhs_end + this + } + def getRhs_end() = this.rhs_end + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : reshape_likeParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for hard_sigmoid + * @param data The input array. + */ + class hard_sigmoidParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var alpha: java.lang.Float = null +/** + * @param alpha Slope of hard sigmoid + */ +def setAlpha(alpha : java.lang.Float): hard_sigmoidParam = { + this.alpha = alpha + this + } + def getAlpha() = this.alpha + private var beta: java.lang.Float = null +/** + * @param beta Bias of hard sigmoid. + */ +def setBeta(beta : java.lang.Float): hard_sigmoidParam = { + this.beta = beta + this + } + def getBeta() = this.beta + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : hard_sigmoidParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for MakeLoss + * @param data Input array. + */ + class MakeLossParam(data : org.apache.mxnet.javaapi.NDArray) { + def getData() = this.data + private var grad_scale: java.lang.Float = null +/** + * @param grad_scale Gradient scale as a supplement to unary and binary operators + */ +def setGrad_scale(grad_scale : java.lang.Float): MakeLossParam = { + this.grad_scale = grad_scale + this + } + def getGrad_scale() = this.grad_scale + private var valid_thresh: java.lang.Float = null +/** + * @param valid_thresh clip each element in the array to 0 when it is less than ``valid_thresh``. This is used when ``normalization`` is set to ``'valid'``. + */ +def setValid_thresh(valid_thresh : java.lang.Float): MakeLossParam = { + this.valid_thresh = valid_thresh + this + } + def getValid_thresh() = this.valid_thresh + private var normalization: String = null +/** + * @param normalization If this is set to null, the output gradient will not be normalized. If this is set to batch, the output gradient will be divided by the batch size. If this is set to valid, the output gradient will be divided by the number of valid input elements. + */ +def setNormalization(normalization : String): MakeLossParam = { + this.normalization = normalization + this + } + def getNormalization() = this.normalization + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : MakeLossParam = { + this.out = out + this + } + def getOut() = this.out + + } +/** + * This Param Object is specifically used for random_normal + + */ + class random_normalParam() { + private var loc: java.lang.Float = null +/** + * @param loc Mean of the distribution. + */ +def setLoc(loc : java.lang.Float): random_normalParam = { + this.loc = loc + this + } + def getLoc() = this.loc + private var scale: java.lang.Float = null +/** + * @param scale Standard deviation of the distribution. + */ +def setScale(scale : java.lang.Float): random_normalParam = { + this.scale = scale + this + } + def getScale() = this.scale + private var shape: org.apache.mxnet.javaapi.Shape = null +/** + * @param shape Shape of the output. + */ +def setShape(shape : org.apache.mxnet.javaapi.Shape): random_normalParam = { + this.shape = shape + this + } + def getShape() = this.shape + private var ctx: String = null +/** + * @param ctx Context of output, in format [cpu|gpu|cpu_pinned](n). Only used for imperative calls. + */ +def setCtx(ctx : String): random_normalParam = { + this.ctx = ctx + this + } + def getCtx() = this.ctx + private var dtype: String = null +/** + * @param dtype DType of the output in case this can't be inferred. Defaults to float32 if not defined (dtype=None). + */ +def setDtype(dtype : String): random_normalParam = { + this.dtype = dtype + this + } + def getDtype() = this.dtype + private var out : org.apache.mxnet.NDArray = null +def setOut(out : NDArray) : random_normalParam = { + this.out = out + this + } + def getOut() = this.out + + }