Struct burn::prelude::Tensor

pub struct Tensor<B, const D: usize, K = Float>
where B: Backend, K: TensorKind<B>,
{ /* private fields */ }
Expand description

A tensor with a given backend, shape and data type.

Implementations§

§

impl<const D: usize, B> Tensor<B, D>
where B: AutodiffBackend,

pub fn backward(&self) -> <B as AutodiffBackend>::Gradients

Backward pass of the tensor.

pub fn grad( &self, grads: &<B as AutodiffBackend>::Gradients, ) -> Option<Tensor<<B as AutodiffBackend>::InnerBackend, D>>

Get the gradients of a tensor if it exist.

Returns a new reference to the same tensor. Therefore the same grad tensor can be accessed multiple times. If you only need to get the gradients one time, consider using grad_remove for better performance.

pub fn grad_remove( &self, grads: &mut <B as AutodiffBackend>::Gradients, ) -> Option<Tensor<<B as AutodiffBackend>::InnerBackend, D>>

Remove the grad tensor from the grads struct returning the result.

pub fn grad_replace( &self, grads: &mut <B as AutodiffBackend>::Gradients, grad: Tensor<<B as AutodiffBackend>::InnerBackend, D>, )

Replace the grad tensor from the grads struct with the provided gradient.

§

impl<const D: usize, B, K> Tensor<B, D, K>

pub fn inner( self, ) -> Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>

Returns the inner tensor without the autodiff information.

pub fn from_inner( inner: Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>, ) -> Tensor<B, D, K>

Convert a tensor to the autodiff backend.

§Arguments
  • inner - The tensor to convert.
§Returns

The tensor converted to the autodiff backend.

§

impl<B, const D: usize, K> Tensor<B, D, K>
where B: Backend, K: TensorKind<B>,

pub fn new(primitive: <K as TensorKind<B>>::Primitive<D>) -> Tensor<B, D, K>

Constructs a new Tensor.

§

impl<B, const D: usize, K> Tensor<B, D, K>
where B: Backend, K: BasicOps<B>,

pub fn into_primitive(self) -> <K as TensorKind<B>>::Primitive<D>

Converts the tensor into a primitive tensor.

pub fn from_primitive( tensor: <K as TensorKind<B>>::Primitive<D>, ) -> Tensor<B, D, K>

Converts from a primitive tensor into a tensor.

pub fn empty<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
where S: Into<Shape<D>>,

Create an empty tensor of the given shape.

pub fn dims(&self) -> [usize; D]

Returns the dimensions of the current tensor.

Equivalent to tensor.shape().dims.

pub fn shape(&self) -> Shape<D>

Returns the shape of the current tensor.

pub fn reshape<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>
where S: ReshapeArgs<D2>,

Reshape the tensor to have the given shape.

A -1 in the shape is used to infer the remaining dimensions, e.g.: [2, -1] will reshape the tensor with [2, 3, 4] dimensions to [2, 12].

A 0 in the shape instructs to keep the current dimension from the original tensor, e.g.: [2, 0, 4] will reshape the tensor with [2, 3, 4] dimensions to [2, 3, 4]. This is useful when reshaping tensors with unknown dimensions and combining with -1 to infer the remaining dimensions, e.g. [0, -1] will reshape the tensor with [1, 3, 4] dimensions to [1, 12].

§Arguments
  • shape: The new shape of the tensor.
§Panics
  • If the tensor contains more than one -1 in the shape.
  • If the tensor contains values that are not positive (other than -1).
  • If the shape does not match the number of elements of the original shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
   let device = Default::default();
   let tensor = Tensor::<B, 3>::ones([2, 3, 4], &device);
   // Given a 3D tensor with dimensions (2, 3, 4), reshape it to (2, 12)
   let reshaped_tensor: Tensor::<B, 2> = tensor.reshape([2, -1]);
   // The resulting tensor will have dimensions (2, 12).
   println!("{:?}", reshaped_tensor.shape());
}

pub fn transpose(self) -> Tensor<B, D, K>

Transpose the tensor.

§Arguments
  • tensor - The tensor to transpose.
§Returns

The transposed tensor.

pub fn swap_dims(self, dim1: usize, dim2: usize) -> Tensor<B, D, K>

Swaps two dimensions of a tensor.

§Arguments
  • tensor - The tensor to swap the dimensions of.
  • dim1 - The first dimension to swap.
  • dim2 - The second dimension to swap.
§Returns

The tensor with the dimensions swapped.

pub fn permute(self, axes: [isize; D]) -> Tensor<B, D, K>

Permute the dimensions of the tensor.

§Arguments
  • axes - The new order of the dimensions. The length of the axes must be equal to the number of dimensions of the tensor. The values must be unique and in the range of the number of dimensions. The values can be negative, in which case they are used as an offset from the end.
§Returns

The tensor with the dimensions permuted.

pub fn movedim<S1, S2>(self, src: S1, dst: S2) -> Tensor<B, D, K>
where S1: MovedimArgs, S2: MovedimArgs,

Moves the dimension(s) of input at the position(s) in source to the position(s) in destination.

Other dimensions of input that are not explicitly moved remain in their original order and appear at the positions not specified in destination.

§Arguments
  • src - The dimension(s) to move. The values must be unique and in the range of the number of dimensions. The values can be negative, in which case they are used as an offset from the end.

  • dst - Destination positions for each of the original dims. These must also be unique.

§Panics
  • If the source and destination dimensions are not of the same length.
  • If the source and destination vectors contain duplicate values.
  • If the source and destination vectors contain values that are out of bounds.
§Returns

The tensor with the dimensions moved.

pub fn flip<const N: usize>(self, axes: [isize; N]) -> Tensor<B, D, K>

Reverse the order of elements in the tensor along the given dimensions.

§Arguments
  • axes - The dimensions to reverse. The values must be unique and in the range of the number of dimensions. The values can be negative, in which case they are used as an offset from the end.
§Returns

The tensor with the axes flipped.

pub fn flatten<const D2: usize>( self, start_dim: usize, end_dim: usize, ) -> Tensor<B, D2, K>

Flatten the tensor along a given range of dimensions.

This function collapses the specified range of dimensions into a single dimension, effectively flattening the tensor in that range.

§Arguments
  • start_dim: The starting dimension of the range to be flattened.
  • end_dim: The ending dimension of the range to be flattened (inclusive).
§Type Parameters
  • D2: The resulting number of dimensions in the flattened tensor.
§Returns

A new Tensor<B, D2, K> instance with the specified range of dimensions flattened.

§Example

use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 4]), &device);

    // Given a 3D tensor with dimensions (2, 3, 4), flatten the dimensions between indices 1 and 2:
    let flattened_tensor: Tensor::<B, 2> = tensor.flatten(1, 2);

    // The resulting tensor will have dimensions (2, 12).
   println!("{:?}", flattened_tensor.shape());
}

pub fn squeeze<const D2: usize>(self, dim: usize) -> Tensor<B, D2, K>

Squeeze the tensor along the given dimension, removing the specified dimension of size one, and effectively reducing the rank of the tensor by one.

§Arguments
  • dim: The dimension to be squeezed.
§Type Parameters
  • ‘D2’: The resulting number of dimensions in the squeezed tensor.
§Returns

A new Tensor<B, D2, K> instance with the specified dimension removed.

§Example

use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 1, 4]), &device);

    // Given a 3D tensor with dimensions (2, 1, 4), squeeze the dimension 1
    let squeezed_tensor: Tensor::<B, 2> = tensor.squeeze(1);

    // Resulting tensor will have dimensions (2, 4)
    println!("{:?}", squeezed_tensor.shape());
}

pub fn squeeze_dims<const D2: usize>(self, dims: &[isize]) -> Tensor<B, D2, K>

Removes specified dimensions of size 1 from a tensor’s shape. This function takes a tensor and an array of dimensions (dims) to be squeezed. If dims is provided, only the dimensions specified in this array will be removed. Each dimension in dims should correspond to a size of 1 in the tensor; otherwise, the dimension will not be squeezed. If dims is empty, all single-dimensional entries in the tensor will be removed. If entries in dims are negative, then dimensions will be counted from the back.

§Arguments
  • dims: The dimension(s) to be squeezed.
§Type Parameters
  • ‘D2’: The resulting number of dimensions in the squeezed tensor.
§Returns

A new Tensor<B, D2, K> instance with the specified dimensions removed.

§Example

use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor = Tensor::<B, 4>::ones(Shape::new([2, 1, 4, 1]), &device);

    // Given a 4D tensor with dimensions (2, 1, 4, 1), squeeze the 1 and 3 dimensions
    let squeezed_tensor: Tensor::<B, 2> = tensor.squeeze_dims(&[1, 3]);

    // Resulting tensor will have dimensions (2, 4)
    println!("{:?}", squeezed_tensor.shape());
}

pub fn unsqueeze<const D2: usize>(self) -> Tensor<B, D2, K>

Unsqueeze the current tensor. Create new dimensions to fit the given size.

If the output size is higher than the current tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor = Tensor::<B, 2>::ones(Shape::new([3, 3]), &device);
    let tensor = tensor.unsqueeze::<4>();
    println!("{:?}", tensor.shape());
    // Shape { dims: [1, 1, 3, 3] }
}

pub fn unsqueeze_dim<const D2: usize>(self, dim: usize) -> Tensor<B, D2, K>

Creates a new tensor with a dimension of size one inserted at the specified position.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor = Tensor::<B, 2>::ones(Shape::new([3, 3]), &device);
    let tensor: Tensor<B, 3> = tensor.unsqueeze_dim(1);
    println!("{:?}", tensor.shape());
    // Shape { dims: [3, 1, 3] }
}

pub fn unsqueeze_dims<const D2: usize>(self, axes: &[isize]) -> Tensor<B, D2, K>

Creates a new tensor with added dimensions of size one inserted at the specified indices. The indices can be negative, in which case they are counted from the last to the first dimension. the axes can contain duplicates, in which case the number of dimensions inserted at the index is the number of duplicates.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor = Tensor::<B, 3>::ones(Shape::new([3, 4, 5]), &device);
    let tensor: Tensor<B, 6> = tensor.unsqueeze_dims(&[0, -1, -1]);
    println!("{:?}", tensor.shape());
    // Shape { dims: [1, 3, 4, 5, 1, 1] }
}

pub fn slice<const D2: usize, R>(self, ranges: R) -> Tensor<B, D, K>
where R: RangesArg<D2>,

Returns a tensor containing the elements selected from the given ranges.

§Arguments
  • ranges - A type implementing the RangesArg trait, which can be:
    • An array of core::ops::Range<usize>
    • An array of Option<(i64, i64)>
    • An array of (i64, i64) tuples
§Behavior
  • Supports partial and full slicing in any number of dimensions.
  • Missing ranges are treated as full slices if D > D2.
  • Handles negative indices by wrapping around from the end of the dimension.
  • Clamps ranges to the tensor’s dimensions if they exceed the bounds.
  • For Option<(i64, i64)> ranges, None selects the full range of that dimension.
§Panics
  • If the number of ranges provided exceeds the tensor’s dimensions.
  • If a range is descending (e.g., 2..1) or empty (e.g., 1..1).
§Examples
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = B::Device::default();

    // 1D slicing
    let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..5, &device);
    let slice = tensor.slice([1..4]);
    assert_eq!(slice.into_data().to_vec::<i32>().unwrap(), vec![1i32, 2, 3]);

    // 2D slicing
    let tensor = Tensor::<B, 2>::ones(Shape::new([3, 4]), &device);
    let slice = tensor.slice([1..3, 0..2]);
    assert_eq!(slice.dims(), [2, 2]);

    // Using negative indices
    let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..5, &device);
    let slice = tensor.slice([(1, -1)]); // Equivalent to 1..4
    assert_eq!(slice.into_data().to_vec::<i32>().unwrap(), vec![1i32, 2, 3]);

    // Using Option<(i64, i64)>
    let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..12, &device).reshape([3, 4]);
    let slice = tensor.slice([Some((1, -1)), None]); // Select rows 1 and 2, all columns
    assert_eq!(slice.dims(), [2, 4]);
}
§Note

This function uses the RangesArg trait for flexible range specification. The trait handles the conversion of various range formats and applies clamping and negative index handling internally.

pub fn slice_assign<const D2: usize>( self, ranges: [Range<usize>; D2], values: Tensor<B, D, K>, ) -> Tensor<B, D, K>

Returns a copy of the current tensor with the selected elements changed to the new ones at the selected indices.

§Panics
  • If a range exceeds the number of elements on a dimension.
  • If the given values don’t match the given ranges.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = B::Device::default();
    let tensor = Tensor::<B, 3>::ones([2, 3, 3], &device);
    let values = Tensor::<B, 3>::zeros([1, 1, 1], &device);
    let tensor_sliced = tensor.slice_assign([0..1, 0..1, 0..1], values);
    println!("{:?}", tensor_sliced.dims()); // [2, 3, 3]
}

pub fn device(&self) -> <B as Backend>::Device

Returns the device of the current tensor.

pub fn to_device(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>

Returns a new tensor on the given device.

pub fn into_data(self) -> TensorData

Converts the data of the current tensor.

pub fn to_data(&self) -> TensorData

Returns the data of the current tensor.

pub async fn into_data_async(self) -> TensorData

Returns the data of the current tensor.

pub async fn to_data_async(&self) -> TensorData

Returns the data of the current tensor.

pub fn from_data<T>(data: T, device: &<B as Backend>::Device) -> Tensor<B, D, K>
where T: Into<TensorData>,

Create a tensor from the given data on the given device.

pub fn repeat_dim(self, dim: usize, times: usize) -> Tensor<B, D, K>

Repeat the tensor along the given dimension.

pub fn repeat(self, sizes: &[usize]) -> Tensor<B, D, K>

Repeat the tensor along the given dimensions.

§Arguments
  • sizes: Borrowed slice of the number of times to repeat each dimension.

pub fn equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element-wise equal comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

pub fn not_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element-wise non-equality comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

pub fn cat(tensors: Vec<Tensor<B, D, K>>, dim: usize) -> Tensor<B, D, K>

Concatenates all tensors into a new one along the given dimension.

§Panics

If all tensors don’t have the same shape.

pub fn stack<const D2: usize>( tensors: Vec<Tensor<B, D, K>>, dim: usize, ) -> Tensor<B, D2, K>

Concatenates all tensors into a new one along a new dimension.

§Panics

If all tensors don’t have the same shape. Given dimension is not with range of 0..D2

pub fn iter_dim(self, dim: usize) -> DimIter<B, D, K>

Iterate over slices of tensors alongside a given dimension.

§Panics

Given dimension is less than tensor rank.

§Returns

A tensor iterator.

pub fn narrow(self, dim: usize, start: usize, length: usize) -> Tensor<B, D, K>

Returns a new tensor with the given dimension narrowed to the given range.

§Panics
  • If the dimension is greater than the number of dimensions of the tensor.
  • If the given range exceeds the number of elements on the given dimension.
§Returns

A new tensor with the given dimension narrowed to the given range.

pub fn chunk(self, chunks: usize, dim: usize) -> Vec<Tensor<B, D, K>>

Attempts to split the tensor along the given dimension into chunks. May return less chunks than requested if the tensor size is not divisible by the number of chunks.

When the given dimension is evenly divisible by the number of chunks, the chunks will be of equal size. Otherwise all chunks will be of equal size except for the last one.

§Panics

If the dimension is greater than the number of dimensions of the tensor.

§Returns

A vector of tensors.

pub fn any(self) -> Tensor<B, 1, Bool>

Tests if any element in the tensor evaluates to True.

§Arguments
  • tensor - The tensor to test. All input tensor types (Float, Int, Bool) are supported.
§Returns

A boolean tensor Tensor<B, 1, Bool> containing a single element, True if any element in the input tensor evaluates to True, False otherwise.

pub fn any_dim(self, dim: usize) -> Tensor<B, D, Bool>

Tests if any element in the tensor evaluates to True along a given dimension dim.

§Arguments
  • tensor - The tensor to test. All input tensor types (Float, Int, Bool) are supported.
  • dim - The axis along which to test.
§Returns

A boolean tensor Tensor<B, D, Bool> with the same size as input tensor, except in the dim axis where the size is 1. The elem in the dim axis is True if any element along this dim in the input evaluates to True, False otherwise.

pub fn all(self) -> Tensor<B, 1, Bool>

Tests if all elements in the tensor evaluate to True.

§Arguments
  • tensor - The tensor to test. All input tensor types (Float, Int, Bool) are supported.
§Returns

A boolean tensor Tensor<B, 1, Bool> with a single element, True if all elements in the input tensor evaluate to True, False otherwise.

pub fn all_dim(self, dim: usize) -> Tensor<B, D, Bool>

Tests if all elements in the tensor evaluate to True along a given dimension dim.

§Arguments
  • tensor - The tensor to test. All input tensor types (Float, Int, Bool) are supported.
  • dim - The axis along which to test.
§Returns

A boolean tensor Tensor<B, D, Bool> with the same size as input tensor, except in the dim axis where the size is 1. The elem in the dim axis is True if all elements along this dim in the input evaluates to True, False otherwise.

pub fn into_scalar(self) -> <K as BasicOps<B>>::Elem

Convert the tensor into a scalar.

§Panics

If the tensor doesn’t have one element. If the backend fails to read the tensor data synchronously.

pub async fn into_scalar_async(self) -> <K as BasicOps<B>>::Elem

Convert the tensor into a scalar.

§Panics

If the tensor doesn’t have one element.

pub fn expand<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>
where S: BroadcastArgs<D, D2>,

Broadcast the tensor to the given shape.

§Arguments
  • shape - The shape to broadcast the tensor to. Can contain -1 for dimensions that should be inferred. The number of elements in the shape must be greater or equal as the number of dimensions of the tensor.
§Panics

If the tensor cannot be broadcasted to the given shape.

§Returns

A new tensor with the given shape.

§

impl<B, const D: usize> Tensor<B, D, Bool>
where B: Backend,

pub fn from_bool( data: TensorData, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>

Create a boolean tensor from data on the given device.

pub fn int(self) -> Tensor<B, D, Int>

Convert the bool tensor into an int tensor.

pub fn float(self) -> Tensor<B, D>

Convert the bool tensor into an float tensor.

pub fn bool_not(self) -> Tensor<B, D, Bool>

Inverses boolean values.

pub fn nonzero(self) -> Vec<Tensor<B, 1, Int>>

Compute the indices of the elements that are non-zero.

§Returns

A vector of tensors, one for each dimension of the given tensor, containing the indices of the non-zero elements in that dimension.

pub async fn nonzero_async(self) -> Vec<Tensor<B, 1, Int>>

Compute the indices of the elements that are non-zero.

§Returns

A vector of tensors, one for each dimension of the given tensor, containing the indices of the non-zero elements in that dimension.

pub fn argwhere(self) -> Tensor<B, 2, Int>

Compute the indices of the elements that are true, grouped by element.

§Returns

A tensor containing the indices of all non-zero elements of the given tensor. Each row in the result contains the indices of a non-zero element.

pub async fn argwhere_async(self) -> Tensor<B, 2, Int>

Compute the indices of the elements that are true, grouped by element.

§Returns

A tensor containing the indices of all non-zero elements of the given tensor. Each row in the result contains the indices of a non-zero element.

pub fn triu_mask<S>( shape: S, offset: i64, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
where S: Into<Shape<D>>,

Creates a mask for the upper triangle of a matrix, which can be used to fill the specified area with a value.

This function generates a boolean tensor representing the mask of the upper triangle of a matrix.

§Arguments
  • shape: The shape of the matrix.
  • offset: The offset from the diagonal, where 0 means the diagonal, and positive values shift towards the upper triangle.
  • device: The device on which the tensor will be allocated.
§Returns

Returns a boolean tensor where true indicates the elements of the matrix that are part of the upper triangle taking into account the specified offset.

pub fn tril_mask<S>( shape: S, offset: i64, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
where S: Into<Shape<D>>,

Creates a mask for the lower triangle of a matrix, which can be used to fill the specified area with a value.

This function generates a boolean tensor representing the mask of the lower triangle of a matrix.

§Arguments
  • shape: The shape of the matrix.
  • offset: The offset from the diagonal, where 0 means the diagonal, and negative values shift towards the lower triangle.
  • device: The device on which the tensor will be allocated.
§Returns

Returns a boolean tensor where true indicates the elements of the matrix that are part of the lower triangle taking into account the specified offset.

pub fn diag_mask<S>( shape: S, offset: i64, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
where S: Into<Shape<D>>,

Creates a mask for the diagonal of a matrix, which can be used to fill the specified area with a value.

This function generates a boolean tensor representing the mask of the diagonal of a matrix.

§Arguments
  • shape: The shape of the matrix.
  • device: The device on which the tensor will be allocated.
§Returns

Returns a boolean tensor where true indicates the elements of the matrix that are part of the diagonal.

§

impl<const D: usize, B> Tensor<B, D>
where B: Backend,

pub fn inplace<F>(&mut self, func: F)
where F: FnOnce(Tensor<B, D>) -> Tensor<B, D>,

Executes an operation on the tensor and modifies its value.

§Notes

This won’t necessary reuse the same tensor data/buffer, but it should if there is no other reference pointing to the same tensor.

Wrapping operations with inplace is not an optimization, it’s mainly there if you want to mutate a tensor by using owned operations. A plausible usage would be to update the weights of a mutable model reference.

pub fn exp(self) -> Tensor<B, D>

Applies element wise exponential operation.

y = e^x

pub fn log(self) -> Tensor<B, D>

Applies element wise natural log operation ln.

y = log(x)

pub fn log1p(self) -> Tensor<B, D>

Applies the natural logarithm of one plus the input tensor, element-wise.

y = log(x+1)

pub fn erf(self) -> Tensor<B, D>

Applies the error function element wise.

y = erf(x)

pub fn recip(self) -> Tensor<B, D>

Applies element wise reciprocal operation.

pub fn sqrt(self) -> Tensor<B, D>

Applies element wise root square operation.

pub fn cos(self) -> Tensor<B, D>

Applies element wise cosine operation.

pub fn sin(self) -> Tensor<B, D>

Applies element wise sine operation.

pub fn tanh(self) -> Tensor<B, D>

Applies element wise hyperbolic tangent operation.

pub fn from_floats<A>( floats: A, device: &<B as Backend>::Device, ) -> Tensor<B, D>
where A: Into<TensorData>,

Create a tensor from floats (f32) on a given device.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = B::Device::default();
    let _ = Tensor::<B, 1>::from_floats([1.0, 2.0], &device);
    let _ = Tensor::<B, 2>::from_floats([[1.0, 2.0], [3.0, 4.0]], &device);
}

pub fn int(self) -> Tensor<B, D, Int>

Returns a new tensor with the same shape and device as the current tensor and the data casted to Integer.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    let float_tensor = Tensor::<B, 1>::from_floats([1.0, 2.0], &device);
    let int_tensor = float_tensor.int();
}

pub fn zeros_like(&self) -> Tensor<B, D>

Returns a new tensor with the same shape and device as the current tensor filled with zeros.

pub fn ones_like(&self) -> Tensor<B, D>

Returns a new tensor with the same shape and device as the current tensor filled with ones.

pub fn random_like(&self, distribution: Distribution) -> Tensor<B, D>

Returns a new tensor with the same shape and device as the current tensor filled random values sampled from the given distribution.

pub fn one_hot( index: usize, num_classes: usize, device: &<B as Backend>::Device, ) -> Tensor<B, D>

Create a one hot tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    let one_hot = Tensor::<B, 1>::one_hot(2, 10, &device);
    println!("{}", one_hot.to_data());
    // [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
}

pub fn matmul(self, other: Tensor<B, D>) -> Tensor<B, D>

Applies the matrix multiplication operation.

C = AB

§Panics

If the two tensors dont’ have a compatible shape.

pub fn var(self, dim: usize) -> Tensor<B, D>

Calculate the variance along the given dimension.

pub fn var_bias(self, dim: usize) -> Tensor<B, D>

Calculate the variance along the given dimension without applying the Bessel’s correction.

pub fn var_mean(self, dim: usize) -> (Tensor<B, D>, Tensor<B, D>)

Calculate the variance along the given dimension and also returns the mean.

pub fn var_mean_bias(self, dim: usize) -> (Tensor<B, D>, Tensor<B, D>)

Calculate the variance along the given dimension without applying the Bessel’s correction and also returns the mean.

pub fn into_full_precision( self, ) -> Tensor<<<B as Backend>::FullPrecisionBridge as BackendBridge<B>>::Target, D>

Returns a tensor with full precision based on the selected backend.

pub fn from_full_precision( tensor: Tensor<<<B as Backend>::FullPrecisionBridge as BackendBridge<B>>::Target, D>, ) -> Tensor<B, D>

Returns a tensor on the selected backend from a full precision tensor.

pub fn detach(self) -> Tensor<B, D>

Detach the current tensor from the autodiff graph.

This function does nothing when autodiff is not enabled. This can be used in batchers or elsewhere to ensure that previous operations are not considered in the autodiff graph.

pub fn require_grad(self) -> Tensor<B, D>

Mark the tensor to keep gradients during the backward pass.

This function does nothing when autodiff is not enabled.

pub fn is_require_grad(&self) -> bool

Returns true if the tensor requires gradients during the backward pass.

pub fn set_require_grad(self, require_grad: bool) -> Tensor<B, D>

Mark the tensor as tracked or untracked depending on the require grad argument. When tracked, the gradients will be available after the backward pass.

This function does nothing when autodiff is not enabled.

pub fn cov(self, dim: usize, correction_factor: usize) -> Tensor<B, D>

Calculate covaraince matrix between different entries alongside a given dimension.

§Arguments
  • size - The size of the square matrix.
  • correction_factor - Is usually 1 for samples and 0 for population.

pub fn quantize( self, scheme: &QuantizationScheme, qparams: QuantizationParameters<B>, ) -> Tensor<B, D>

Convert the tensor to a lower precision data type based on the quantization scheme.

§Arguments
  • scheme - The quantization scheme.
  • qparams - The pre-computed quantization parameters.
§Returns

The quantized tensor.

pub fn dequantize(self) -> Tensor<B, D>

Convert the tensor back to a higher precision data type.

If the tensor is not quantized, its value is simply returned.

§Returns

The dequantized tensor.

§

impl<B> Tensor<B, 1, Int>
where B: Backend,

pub fn arange( range: Range<i64>, device: &<B as Backend>::Device, ) -> Tensor<B, 1, Int>

Returns a new integer tensor on the specified device.

§Arguments
  • range - The range of values to generate.
  • device - The device to create the tensor on.

pub fn arange_step( range: Range<i64>, step: usize, device: &<B as Backend>::Device, ) -> Tensor<B, 1, Int>

Returns a new integer tensor on the specified device.

§Arguments
  • range - The range of values to generate.
  • step - The step between each value.
§

impl<const D: usize, B> Tensor<B, D, Int>
where B: Backend,

pub fn from_ints<A>( ints: A, device: &<B as Backend>::Device, ) -> Tensor<B, D, Int>
where A: Into<TensorData>,

Create a tensor from integers (i32), placing it on a given device.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Int};

fn example<B: Backend>() {
    let device = B::Device::default();
    let _x: Tensor<B, 1, Int> = Tensor::from_ints([1, 2], &device);
    let _y: Tensor<B, 2, Int> = Tensor::from_ints([[1, 2], [3, 4]], &device);
}

pub fn float(self) -> Tensor<B, D>

Returns a new tensor with the same shape and device as the current tensor and the data casted to Float.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
    let device = Default::default();
    let int_tensor = Tensor::<B, 1, Int>::arange(0..5, &device);
    let float_tensor = int_tensor.float();
}

pub fn cartesian_grid<S, const D2: usize>( shape: S, device: &<B as Backend>::Device, ) -> Tensor<B, D2, Int>
where S: Into<Shape<D>>,

Generates a cartesian grid for the given tensor shape on the specified device. The generated tensor is of dimension D2 = D + 1, where each element at dimension D contains the cartesian grid coordinates for that element.

§Arguments
  • shape - The shape specifying the dimensions of the tensor.
  • device - The device to create the tensor on.
§Panics

Panics if D2 is not equal to D+1.

§Examples
   use burn_tensor::Int;
   use burn_tensor::{backend::Backend, Shape, Tensor};
   fn example<B: Backend>() {
       let device = Default::default();
       let result: Tensor<B, 3, _> = Tensor::<B, 2, Int>::cartesian_grid([2, 3], &device);
       println!("{}", result);
   }
§

impl<B, const D: usize, K> Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

pub fn add(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise addition operation.

y = x2 + x1

pub fn add_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise addition operation with a scalar.

y = x + s

pub fn sub(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise subtraction operation.

y = x2 - x1

pub fn sub_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise subtraction operation with a scalar.

y = x - s

pub fn div(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise division operation.

y = x2 / x1

pub fn div_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise division operation with a scalar.

y = x / s

pub fn remainder_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise the remainder operation with a scalar.

y = x2 % x1

pub fn mul(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise multiplication operation.

y = x2 * x1

pub fn mul_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise multiplication operation with a scalar.

y = x * s

pub fn neg(self) -> Tensor<B, D, K>

Switch sign of each element in the tensor.

y = -x

pub fn sign(self) -> Tensor<B, D, K>

Returns the signs of the elements of the input tensor.

pub fn zeros<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
where S: Into<Shape<D>>,

Create a tensor of the given shape where each element is zero.

pub fn ones<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
where S: Into<Shape<D>>,

Create a tensor of the given shape where each element is one.

pub fn full<S, E>( shape: S, fill_value: E, device: &<B as Backend>::Device, ) -> Tensor<B, D, K>
where S: Into<Shape<D>>, E: ElementConversion,

Create a tensor of the given shape where each element is equal to the provided value.

pub fn mean(self) -> Tensor<B, 1, K>

Aggregate all elements in the tensor with the mean operation.

pub fn sum(self) -> Tensor<B, 1, K>

Aggregate all elements in the tensor with the sum operation.

pub fn mean_dim(self, dim: usize) -> Tensor<B, D, K>

Aggregate all elements along the given dimension or axis in the tensor with the mean operation.

pub fn sum_dim(self, dim: usize) -> Tensor<B, D, K>

Aggregate all elements along the given dimension or axis in the tensor with the sum operation.

pub fn prod(self) -> Tensor<B, 1, K>

Aggregate all elements along the given dimension or axis in the tensor with the product operation.

pub fn prod_dim(self, dim: usize) -> Tensor<B, D, K>

Aggregate all elements along the given dimension or axis in the tensor with the product operation.

pub fn equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>
where E: Element,

Applies element wise equal comparison and returns a boolean tensor.

pub fn not_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>
where E: Element,

Applies element wise non-equality comparison and returns a boolean tensor.

pub fn greater(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element wise greater comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

pub fn greater_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element wise greater-equal comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

pub fn lower(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element wise lower comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

pub fn lower_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element wise lower-equal comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

pub fn greater_elem<E>(self, other: E) -> Tensor<B, D, Bool>

Applies element wise greater comparison and returns a boolean tensor.

pub fn greater_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>

Applies element wise greater-equal comparison and returns a boolean tensor.

pub fn lower_elem<E>(self, other: E) -> Tensor<B, D, Bool>

Applies element wise lower comparison and returns a boolean tensor.

pub fn lower_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>

Applies element wise lower-equal comparison and returns a boolean tensor.

pub fn mask_where( self, mask: Tensor<B, D, Bool>, value: Tensor<B, D, K>, ) -> Tensor<B, D, K>

Update the given tensor with the value tensor where the mask is true.

This is similar to mask_fill, however the value is a tensor instead of a scalar.

pub fn mask_fill<E>(self, mask: Tensor<B, D, Bool>, value: E) -> Tensor<B, D, K>

Update the given tensor with the value where the mask is true.

This is similar to mask_where, however the value is a scalar instead of a tensor.

pub fn gather(self, dim: usize, indices: Tensor<B, D, Int>) -> Tensor<B, D, K>

Gather tensor elements corresponding to the given indices from the specified dim.

Example using a 3D tensor:

output[i, j, k] = input[indices[i, j, k], j, k]; // dim = 0 output[i, j, k] = input[i, indices[i, j, k], k]; // dim = 1 output[i, j, k] = input[i, j, indices[i, j, k]]; // dim = 2

§Notes

The index tensor should have the same shape as the original tensor except for the dim specified.

pub fn scatter( self, dim: usize, indices: Tensor<B, D, Int>, values: Tensor<B, D, K>, ) -> Tensor<B, D, K>

Assign the gathered elements corresponding to the given indices along the specified dimension from the value tensor to the original tensor using sum reduction.

Example using a 3D tensor:

input[indices[i, j, k], j, k] += values[i, j, k]; // dim = 0 input[i, indices[i, j, k], k] += values[i, j, k]; // dim = 1 input[i, j, indices[i, j, k]] += values[i, j, k]; // dim = 2

§Notes

The index tensor should have the same shape as the original tensor except for the specified dimension. The value and index tensors should have the same shape.

Other references to the input tensor will not be modified by this operation.

pub fn select(self, dim: usize, indices: Tensor<B, 1, Int>) -> Tensor<B, D, K>

Select the tensor elements along the given dimension corresponding to the given indices.

Example using a 3D tensor:

output[i, j, k] = input[indices[i], j, k]; // dim = 0 output[i, j, k] = input[i, indices[j], k]; // dim = 1 output[i, j, k] = input[i, j, indices[k]]; // dim = 2

pub fn select_assign( self, dim: usize, indices: Tensor<B, 1, Int>, values: Tensor<B, D, K>, ) -> Tensor<B, D, K>

Assign the selected elements along the given dimension corresponding to the given indices from the value tensor to the original tensor using sum reduction.

Example using a 3D tensor:

input[indices[i], j, k] += values[i, j, k]; // dim = 0 input[i, indices[j], k] += values[i, j, k]; // dim = 1 input[i, j, indices[k]] += values[i, j, k]; // dim = 2

pub fn argmax(self, dim: usize) -> Tensor<B, D, Int>

Applies the argmax function along the given dimension and returns an integer tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = B::Device::default();
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 3]), &device);
    let tensor = tensor.argmax(1);
    println!("{:?}", tensor.shape());
    // Shape { dims: [2, 1, 3] }
}

pub fn max(self) -> Tensor<B, 1, K>

Find the maximum value.

pub fn max_dim(self, dim: usize) -> Tensor<B, D, K>

Find the maximum value along the given dimension.

pub fn max_dim_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Find the maximum value along the given dimension.

Also returns the indices.

pub fn max_pair(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Finds the maximum pair wise values with another Tensor

§Arguments
  • other - Other tensor to find maximum elements with
§Returns

A tensor with the same shape as the input tensors containing the maximum value found in the input tensors.

pub fn argmin(self, dim: usize) -> Tensor<B, D, Int>

Applies the argmin function along the given dimension and returns an integer tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 3]), &device);
    let tensor = tensor.argmin(1);
    println!("{:?}", tensor.shape());
    // Shape { dims: [2, 1, 3] }
}

pub fn min(self) -> Tensor<B, 1, K>

Find the minimum value.

pub fn min_dim(self, dim: usize) -> Tensor<B, D, K>

Find the minimum value along the given dimension.

pub fn min_dim_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Find the minimum value along the given dimension.

Also returns the indices.

pub fn min_pair(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Finds the minimum pair wise values with another Tensor

§Arguments
  • other - Other tensor to find minimum elements with
§Returns

A tensor with the same shape as the input tensors containing the minimum value found between each element of the two source tensors.

pub fn clamp<E>(self, min: E, max: E) -> Tensor<B, D, K>

Clamp the tensor between the given min and max values.

§Arguments
  • min - The minimum value.
  • max - The maximum value.
§Returns

A new tensor with the values clamped between the given min and max values.

pub fn clamp_min<E>(self, min: E) -> Tensor<B, D, K>

Clamps a tensor under a minimum value.

§Arguments
  • tensor - The tensor to clamp.
  • min - The minimum value.
§Returns

A new tensor with the values clamped under the given min value.

pub fn clamp_max<E>(self, max: E) -> Tensor<B, D, K>

Clamps a tensor over a maximum value.

§Arguments
  • tensor - The tensor to clamp.
  • max - The maximum value.
§Returns

A new tensor with the values clamped over the given max value.

pub fn abs(self) -> Tensor<B, D, K>

Apply element wise absolute value operation

pub fn triu(self, diagonal: i64) -> Tensor<B, D, K>

Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input, the other elements of the result tensor out are set to 0.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
   let device = Default::default();
   let tensor = Tensor::<B, 2, Int>::from_ints(
       [
         [1, 2, 3],
         [4, 5, 6],
         [7, 8, 9]
       ],
       &device
   );
   let tensor = tensor.triu(1);
   println!("{}", tensor);
   // Tensor { data: [
   //   [0, 2, 3],
   //   [0, 0, 6],
   //   [0, 0, 0]
   // ], ... }
}

pub fn tril(self, diagonal: i64) -> Tensor<B, D, K>

Returns the lower triangular part of a matrix (2-D tensor) or batch of matrices input, the other elements of the result tensor out are set to 0.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
   let device = Default::default();
   let tensor = Tensor::<B, 2, Int>::from_ints(
       [
         [1, 2, 3],
         [4, 5, 6],
         [7, 8, 9]
       ],
       &device
   );

   let tensor = tensor.tril(-1);
   println!("{}", tensor);
   // Tensor { data: [
   //   [0, 0, 0],
   //   [4, 0, 0],
   //   [7, 8, 0]
   // ], ... }
}

pub fn powf(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise power operation with a float Tensor

pub fn powf_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise power operation with a float scalar

pub fn powi(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise power operation with a integer Tensor

pub fn powi_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise power operation with a integer scalar

pub fn is_close( self, other: Tensor<B, D, K>, rtol: Option<f64>, atol: Option<f64>, ) -> Tensor<B, D, Bool>

Checks element wise if the tensor is close to another tensor.

The tolerance is defined by the following equation:

abs(a - b) <= (atol + rtol * abs(b))

where `a` is the first tensor, `b` is the second tensor, `rtol` is the relative tolerance,
and `atol` is the absolute tolerance.
§Arguments
  • other - The tensor to compare with.
  • rtol - Optional relative tolerance. Default is 1e-5.
  • atol - Optional absolute tolerance. Default is 1e-8.
§Returns

A boolean tensor with the same shape as the input tensors.

pub fn all_close( self, other: Tensor<B, D, K>, rtol: Option<f64>, atol: Option<f64>, ) -> bool

Checks if all elements are close to another tensor.

The tolerance is defined by the following equation:


abs(a - b) <= (atol + rtol * abs(b))

where `a` is the first tensor, `b` is the second tensor, `rtol` is the relative tolerance,
and `atol` is the absolute tolerance.

§Arguments
  • other - The tensor to compare with.
  • rtol - Optional relative tolerance. Default is 1e-5.
  • atol - Optional absolute tolerance. Default is 1e-8.
§Returns

A boolean scalar.

§Remarks

pub fn bool(self) -> Tensor<B, D, Bool>

Converts the tensor to a boolean tensor by checking if the elements are non-zero.

§Returns

A boolean tensor with the same shape as the input tensor.

pub fn random<S>( shape: S, distribution: Distribution, device: &<B as Backend>::Device, ) -> Tensor<B, D, K>
where S: Into<Shape<D>>,

Create a random tensor of the given shape on the given device where each element is sampled from the given distribution.

pub fn sort(self, dim: usize) -> Tensor<B, D, K>

Sort the elements by value in ascending order along a given dimension.

This sort is unstable (i.e., may reorder equal elements).

pub fn sort_descending(self, dim: usize) -> Tensor<B, D, K>

Sort the elements by value in descending order along a given dimension.

This sort is unstable (i.e., may reorder equal elements).

pub fn sort_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Sort the elements by value in ascending order along a given dimension. Also returns the indices.

This sort is unstable (i.e., may reorder equal elements).

pub fn sort_descending_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Sort the elements by value in descending order along a given dimension. Also returns the indices.

This sort is unstable (i.e., may reorder equal elements).

pub fn argsort(self, dim: usize) -> Tensor<B, D, Int>

Returns the indices that sort the elements by value in ascending order along a given dimension.

This sort is unstable (i.e., may reorder equal elements).

pub fn argsort_descending(self, dim: usize) -> Tensor<B, D, Int>

Returns the indices that sort the elements by value in descending order along a given dimension.

This sort is unstable (i.e., may reorder equal elements).

pub fn topk(self, k: usize, dim: usize) -> Tensor<B, D, K>

Returns the k largest elements of the given input tensor along a given dimension.

pub fn topk_with_indices( self, k: usize, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Returns the k largest elements of the given input tensor along a given dimension. Also returns the indices.

pub fn pad( self, padding: (usize, usize, usize, usize), value: <K as BasicOps<B>>::Elem, ) -> Tensor<B, D, K>

Pad the tensor of rank two or higher with the given value on the last two dimensions.

§Arguments
  • padding - A tuple of four integers representing the padding on the left, right, top, and bottom.
  • value - The value to pad the tensor with.
§Returns

A new tensor with the given padding.

pub fn is_nan(&self) -> Tensor<B, D, Bool>

Returns a new tensor with boolean elements indicating whether each element of the input is NaN.

§Returns

A boolean tensor where true indicates NaN and false indicates a non-NaN value.

pub fn contains_nan(&self) -> Tensor<B, 1, Bool>

Checks if the tensor contains any NaN values.

§Returns

A boolean tensor with a single element indicating whether the tensor contains any NaN values.

§

impl<B, K> Tensor<B, 2, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

pub fn eye(size: usize, device: &<B as Backend>::Device) -> Tensor<B, 2, K>

Creates a new 2D tensor with ones on the diagonal and zeros elsewhere.

§Arguments
  • size - The size of the square matrix.

Trait Implementations§

§

impl<E, const D: usize, B, K> Add<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the + operator.
§

fn add(self, other: E) -> Tensor<B, D, K>

Performs the + operation. Read more
§

impl<B, const D: usize, K> Add for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the + operator.
§

fn add(self, rhs: Tensor<B, D, K>) -> Tensor<B, D, K>

Performs the + operation. Read more
§

impl<const D: usize, B, K> AutodiffModule<B> for Tensor<B, D, K>

§

type InnerModule = Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>

Inner module without auto-differentiation.
§

fn valid(&self) -> <Tensor<B, D, K> as AutodiffModule<B>>::InnerModule

Get the same module, but on the inner backend without auto-differentiation.
§

impl<B, const D: usize> BitXor<T> for Tensor<B, D>
where B: Backend,

§

type Output = Tensor<B, D>

The resulting type after applying the ^ operator.
§

fn bitxor(self, _: T) -> <Tensor<B, D> as BitXor<T>>::Output

Performs the ^ operation. Read more
§

impl<B, const D: usize, K> Clone for Tensor<B, D, K>
where B: Clone + Backend, K: Clone + TensorKind<B>, <K as TensorKind<B>>::Primitive<D>: Clone,

§

fn clone(&self) -> Tensor<B, D, K>

Returns a copy of the value. Read more
1.0.0 · source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
§

impl<B, const D: usize, K> Debug for Tensor<B, D, K>
where B: Debug + Backend, K: Debug + TensorKind<B>, <K as TensorKind<B>>::Primitive<D>: Debug,

§

fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>

Formats the value using the given formatter. Read more
§

impl<'de, B, const D: usize, K> Deserialize<'de> for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>, <K as BasicOps<B>>::Elem: Debug + Copy + Deserialize<'de>,

§

fn deserialize<De>( deserializer: De, ) -> Result<Tensor<B, D, K>, <De as Deserializer<'de>>::Error>
where De: Deserializer<'de>,

Deserialize this value from the given Serde deserializer. Read more
§

impl<B, const D: usize, K> Display for Tensor<B, D, K>
where B: Backend, <B as Backend>::IntElem: Display, K: BasicOps<B>, <K as BasicOps<B>>::Elem: Debug,

Pretty print tensors

§

fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>

Formats the value using the given formatter. Read more
§

impl<E, const D: usize, B, K> Div<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the / operator.
§

fn div(self, other: E) -> Tensor<B, D, K>

Performs the / operation. Read more
§

impl<B, const D: usize, K> Div for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the / operator.
§

fn div(self, rhs: Tensor<B, D, K>) -> Tensor<B, D, K>

Performs the / operation. Read more
§

impl<B, const D: usize, K, T> From<T> for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>, T: Into<TensorData>,

§

fn from(value: T) -> Tensor<B, D, K>

Converts to this type from the input type.
§

impl<const D: usize, B, K> Module<B> for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>,

§

type Record = ConstantRecord

Type to save and load the module.
§

fn visit<V>(&self, _visitor: &mut V)
where V: ModuleVisitor<B>,

Visit each tensor parameter in the module with a visitor.
§

fn map<M>(self, _mapper: &mut M) -> Tensor<B, D, K>
where M: ModuleMapper<B>,

Map each tensor parameter in the module with a mapper.
§

fn into_record(self) -> <Tensor<B, D, K> as Module<B>>::Record

Convert the module into a record containing the state.
§

fn load_record( self, _record: <Tensor<B, D, K> as Module<B>>::Record, ) -> Tensor<B, D, K>

Load the module state from a record.
§

fn to_device(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>

Move the module and all of its sub-modules to the given device. Read more
§

fn fork(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>

Fork the module and all of its sub-modules to the given device. Read more
§

fn collect_devices( &self, devices: Vec<<B as Backend>::Device>, ) -> Vec<<B as Backend>::Device>

Return all the devices found in the underneath module tree added to the given vector without duplicates.
§

fn devices(&self) -> Vec<<B as Backend>::Device>

Return all the devices found in the underneath module tree without duplicates.
§

fn no_grad(self) -> Self

Each tensor in the module tree will not require grad. Read more
§

fn num_params(&self) -> usize

Get the number of parameters the module has, including all of its sub-modules.
§

fn save_file<FR, PB>( self, file_path: PB, recorder: &FR, ) -> Result<(), RecorderError>
where FR: FileRecorder<B>, PB: Into<PathBuf>,

Save the module to a file using the provided file recorder. Read more
§

fn load_file<FR, PB>( self, file_path: PB, recorder: &FR, device: &<B as Backend>::Device, ) -> Result<Self, RecorderError>
where FR: FileRecorder<B>, PB: Into<PathBuf>,

Load the module from a file using the provided file recorder. Read more
§

fn quantize_weights<C>(self, quantizer: &mut Quantizer<C>) -> Self
where C: Calibration,

Quantize the weights of the module.
§

impl<const D: usize, B, K> ModuleDisplay for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>,

§

fn format(&self, passed_settings: DisplaySettings) -> String

Formats the module with provided display settings. Read more
§

fn custom_settings(&self) -> Option<DisplaySettings>

Custom display settings for the module. Read more
§

fn custom_content(&self, _content: Content) -> Option<Content>

Custom attributes for the module. Read more
§

impl<const D: usize, B, K> ModuleDisplayDefault for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>,

§

fn content(&self, content: Content) -> Option<Content>

Attributes of the module used for display purposes. Read more
§

fn num_params(&self) -> usize

Gets the number of the parameters of the module.
§

impl<E, const D: usize, B, K> Mul<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the * operator.
§

fn mul(self, other: E) -> Tensor<B, D, K>

Performs the * operation. Read more
§

impl<B, const D: usize, K> Mul for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the * operator.
§

fn mul(self, rhs: Tensor<B, D, K>) -> Tensor<B, D, K>

Performs the * operation. Read more
§

impl<B, const D: usize, K> Neg for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the - operator.
§

fn neg(self) -> Tensor<B, D, K>

Performs the unary - operation. Read more
§

impl<B, const D: usize> Parameter for Tensor<B, D>
where B: Backend,

§

type Device = <B as Backend>::Device

The device type to be used.
§

fn device(&self) -> <Tensor<B, D> as Parameter>::Device

Fetch the device.
§

fn is_require_grad(&self) -> bool

Fetch the gradient requirement.
§

fn set_require_grad(self, require_grad: bool) -> Tensor<B, D>

Set the gradient requirement.
§

impl<B, const D: usize> Parameter for Tensor<B, D, Bool>
where B: Backend,

§

type Device = <B as Backend>::Device

The device type to be used.
§

fn device(&self) -> <Tensor<B, D, Bool> as Parameter>::Device

Fetch the device.
§

fn is_require_grad(&self) -> bool

Fetch the gradient requirement.
§

fn set_require_grad(self, _require_grad: bool) -> Tensor<B, D, Bool>

Set the gradient requirement.
§

impl<B, const D: usize> Parameter for Tensor<B, D, Int>
where B: Backend,

§

type Device = <B as Backend>::Device

The device type to be used.
§

fn device(&self) -> <Tensor<B, D, Int> as Parameter>::Device

Fetch the device.
§

fn is_require_grad(&self) -> bool

Fetch the gradient requirement.
§

fn set_require_grad(self, _require_grad: bool) -> Tensor<B, D, Int>

Set the gradient requirement.
§

impl<B, const D: usize> Record<B> for Tensor<B, D>
where B: Backend,

§

type Item<S: PrecisionSettings> = FloatTensorSerde<S>

Type of the item that can be serialized and deserialized.
§

fn into_item<S>(self) -> <Tensor<B, D> as Record<B>>::Item<S>

Convert the current record into the corresponding item that follows the given settings.
§

fn from_item<S>( item: <Tensor<B, D> as Record<B>>::Item<S>, device: &<B as Backend>::Device, ) -> Tensor<B, D>

Convert the given item into a record.
§

impl<B, const D: usize> Record<B> for Tensor<B, D, Bool>
where B: Backend,

§

type Item<S: PrecisionSettings> = BoolTensorSerde

Type of the item that can be serialized and deserialized.
§

fn into_item<S>(self) -> <Tensor<B, D, Bool> as Record<B>>::Item<S>

Convert the current record into the corresponding item that follows the given settings.
§

fn from_item<S>( item: <Tensor<B, D, Bool> as Record<B>>::Item<S>, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>

Convert the given item into a record.
§

impl<B, const D: usize> Record<B> for Tensor<B, D, Int>
where B: Backend,

§

type Item<S: PrecisionSettings> = IntTensorSerde<S>

Type of the item that can be serialized and deserialized.
§

fn into_item<S>(self) -> <Tensor<B, D, Int> as Record<B>>::Item<S>

Convert the current record into the corresponding item that follows the given settings.
§

fn from_item<S>( item: <Tensor<B, D, Int> as Record<B>>::Item<S>, device: &<B as Backend>::Device, ) -> Tensor<B, D, Int>

Convert the given item into a record.
§

impl<E, const D: usize, B, K> Rem<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the % operator.
§

fn rem(self, other: E) -> Tensor<B, D, K>

Performs the % operation. Read more
§

impl<B, const D: usize, K> Serialize for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>, <K as BasicOps<B>>::Elem: Debug + Copy + Serialize,

§

fn serialize<S>( &self, serializer: S, ) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer,

Serialize this value into the given Serde serializer. Read more
§

impl<E, const D: usize, B, K> Sub<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the - operator.
§

fn sub(self, other: E) -> Tensor<B, D, K>

Performs the - operation. Read more
§

impl<B, const D: usize, K> Sub for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the - operator.
§

fn sub(self, rhs: Tensor<B, D, K>) -> Tensor<B, D, K>

Performs the - operation. Read more

Auto Trait Implementations§

§

impl<B, const D: usize, K> Freeze for Tensor<B, D, K>
where <K as TensorKind<B>>::Primitive<D>: Freeze,

§

impl<B, const D: usize, K> RefUnwindSafe for Tensor<B, D, K>
where <K as TensorKind<B>>::Primitive<D>: RefUnwindSafe,

§

impl<B, const D: usize, K> Send for Tensor<B, D, K>

§

impl<B, const D: usize, K> Sync for Tensor<B, D, K>
where <K as TensorKind<B>>::Primitive<D>: Sync,

§

impl<B, const D: usize, K> Unpin for Tensor<B, D, K>
where <K as TensorKind<B>>::Primitive<D>: Unpin,

§

impl<B, const D: usize, K> UnwindSafe for Tensor<B, D, K>
where <K as TensorKind<B>>::Primitive<D>: UnwindSafe,

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
§

impl<T> Downcast<T> for T

§

fn downcast(&self) -> &T

source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

§

impl<T> Instrument for T

§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided [Span], returning an Instrumented wrapper. Read more
§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T> IntoEither for T

source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
§

impl<T> Pointable for T

§

const ALIGN: usize = _

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
source§

impl<T> Same for T

§

type Output = T

Should always be Self
source§

impl<T> ToOwned for T
where T: Clone,

§

type Owned = T

The resulting type after obtaining ownership.
source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
source§

impl<T> ToString for T
where T: Display + ?Sized,

source§

default fn to_string(&self) -> String

Converts the given value to a String. Read more
source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<T> Upcast<T> for T

§

fn upcast(&self) -> Option<&T>

§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V

§

impl<T> WithSubscriber for T

§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a [WithDispatch] wrapper. Read more
§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a [WithDispatch] wrapper. Read more
source§

impl<T> DeserializeOwned for T
where T: for<'de> Deserialize<'de>,

§

impl<T> ErasedDestructor for T
where T: 'static,

source§

impl<T, Rhs, Output> NumOps<Rhs, Output> for T
where T: Sub<Rhs, Output = Output> + Mul<Rhs, Output = Output> + Div<Rhs, Output = Output> + Add<Rhs, Output = Output> + Rem<Rhs, Output = Output>,

§

impl<T> WasmNotSend for T
where T: Send,

§

impl<T> WasmNotSendSync for T
where T: WasmNotSend + WasmNotSync,

§

impl<T> WasmNotSync for T
where T: Sync,