burn::tensor

Struct Tensor

pub struct Tensor<B, const D: usize, K = Float>
where B: Backend, K: TensorKind<B>,
{ /* private fields */ }
Expand description

A tensor with a given backend, shape and data type.

§Indexing

Indexing a tensor can be done using slice for all tensor types or select for numeric types.

§Example

use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
use burn_tensor::Int;

fn example<B: Backend>() {
    let device = Default::default();

    let tensor = Tensor::<B, 2>::from_data(
        [
            [3.0, 4.9, 2.0],
            [2.0, 1.9, 3.0],
            [6.0, 1.5, 7.0],
            [3.0, 4.9, 9.0],
        ],
        &device,
    );

    // Slice the tensor to get the second and third rows:
    // [[2.0, 1.9, 3.0], [6.0, 1.5, 7.0]]
    // The resulting tensor will have dimensions [2, 3].
    let slice = tensor.clone().slice([1..3]);
    println!("{slice}");

    // Slice the tensor to get the first two rows and the first 2 columns:
    // [[3.0, 4.9], [2.0, 1.9]]
    // The resulting tensor will have dimensions [2, 2].
    let slice = tensor.clone().slice([0..2, 0..2]);
    println!("{slice}");

    // Index the tensor along the dimension 1 to get the elements 0 and 2:
    // [[3.0, 2.0], [2.0, 3.0], [6.0, 7.0], [3.0, 9.0]]
    // The resulting tensor will have dimensions [4, 2]
    let indices = Tensor::<B, 1, Int>::from_data([0, 2], &device);
    let indexed = tensor.select(1, indices);
    println!("{indexed}");
}

Implementations§

§

impl<const D: usize, B> Tensor<B, D>
where B: AutodiffBackend,

pub fn backward(&self) -> <B as AutodiffBackend>::Gradients

Backward pass of the tensor.

pub fn grad( &self, grads: &<B as AutodiffBackend>::Gradients, ) -> Option<Tensor<<B as AutodiffBackend>::InnerBackend, D>>

Get the gradients of a tensor if it exist.

Returns a new reference to the same tensor. Therefore the same grad tensor can be accessed multiple times. If you only need to get the gradients one time, consider using grad_remove for better performance.

pub fn grad_remove( &self, grads: &mut <B as AutodiffBackend>::Gradients, ) -> Option<Tensor<<B as AutodiffBackend>::InnerBackend, D>>

Remove the grad tensor from the grads struct returning the result.

pub fn grad_replace( &self, grads: &mut <B as AutodiffBackend>::Gradients, grad: Tensor<<B as AutodiffBackend>::InnerBackend, D>, )

Replace the grad tensor from the grads struct with the provided gradient.

§

impl<const D: usize, B, K> Tensor<B, D, K>

pub fn inner( self, ) -> Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>

Returns the inner tensor without the autodiff information.

pub fn from_inner( inner: Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>, ) -> Tensor<B, D, K>

Convert a tensor to the autodiff backend.

§Arguments
  • inner - The tensor to convert.
§Returns

The tensor converted to the autodiff backend.

§

impl<B, const D: usize, K> Tensor<B, D, K>
where B: Backend, K: TensorKind<B>,

pub fn new(primitive: <K as TensorKind<B>>::Primitive) -> Tensor<B, D, K>

Constructs a new Tensor.

§

impl<B, const D: usize, K> Tensor<B, D, K>
where B: Backend, K: BasicOps<B>,

pub fn into_primitive(self) -> <K as TensorKind<B>>::Primitive

Converts the tensor into a primitive tensor.

pub fn from_primitive( tensor: <K as TensorKind<B>>::Primitive, ) -> Tensor<B, D, K>

Converts from a primitive tensor into a tensor.

pub fn dtype(&self) -> DType

Returns the tensor primitive data type.

§Note

Some element types are encoded in different primitive types depending on the backend (e.g., bool could be encoded as u8 or u32).

pub fn empty<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
where S: Into<Shape>,

Create an empty tensor of the given shape.

§Arguments
  • shape: The shape of the tensor.
  • device: The device where the tensor will be created.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
   let device = Default::default();
   // Create an empty tensor with dimensions [2, 3, 4].
   let tensor = Tensor::<B, 3>::empty([2, 3, 4], &device);
}

pub fn dims(&self) -> [usize; D]

Returns the dimensions of the current tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
  let device = Default::default();
  let tensor = Tensor::<B, 3>::ones([2, 3, 4], &device);
  let dims = tensor.dims(); // [2, 3, 4]
  println!("{dims:?}");
}

pub fn shape(&self) -> Shape

Returns the shape of the current tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
   let device = Default::default();
   let tensor = Tensor::<B, 3>::ones([2, 3, 4], &device);
   // Shape { dims: [2, 3, 4] }
   let shape = tensor.shape();
}

pub fn reshape<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>
where S: ReshapeArgs<D2>,

Reshape the tensor to have the given shape.

A -1 in the shape is used to infer the remaining dimensions, e.g.: [2, -1] will reshape the tensor with [2, 3, 4] dimensions to [2, 12].

A 0 in the shape instructs to keep the current dimension from the original tensor, e.g.: [2, 0, 4] will reshape the tensor with [2, 3, 4] dimensions to [2, 3, 4]. This is useful when reshaping tensors with unknown dimensions and combining with -1 to infer the remaining dimensions, e.g. [0, -1] will reshape the tensor with [1, 3, 4] dimensions to [1, 12].

§Arguments
  • shape: The new shape of the tensor.
§Panics
  • If the tensor contains more than one -1 in the shape.
  • If the tensor contains values that are not positive (other than -1).
  • If the shape does not match the number of elements of the original shape.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
   let device = Default::default();
   // Create a tensor with dimensions [2, 3, 4]
   let tensor = Tensor::<B, 3>::ones([2, 3, 4], &device);
   // Reshape it to [2, 12], where 12 is inferred from the number of elements.
   let reshaped = tensor.reshape([2, -1]);
   println!("{reshaped}");
}

pub fn transpose(self) -> Tensor<B, D, K>

Transpose the tensor.

§Arguments
  • tensor - The tensor to transpose.
§Returns

The transposed tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor of shape [2, 3]
    let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);

    // Transpose the tensor:
    // [[1.0, 5.0], [-2.0, 9.0], [3.0, 6.0]]
    // The resulting tensor will have dimensions [3, 2].
    let transposed = tensor.transpose();
    println!("{transposed}");
}

pub fn swap_dims(self, dim1: usize, dim2: usize) -> Tensor<B, D, K>

Swaps two dimensions of a tensor.

§Arguments
  • tensor - The tensor to swap the dimensions of.
  • dim1 - The first dimension to swap.
  • dim2 - The second dimension to swap.
§Returns

The tensor with the dimensions swapped.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor of shape [2, 3]
    let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);

    // Swap the dimensions 0 and 1 (equivalent to `tensor.transpose()`):
    // [[1.0, 5.0], [-2.0, 9.0], [3.0, 6.0]]
    // The resulting tensor will have dimensions [3, 2].
    let swapped = tensor.swap_dims(0, 1);
    println!("{swapped}");
}

pub fn permute(self, axes: [isize; D]) -> Tensor<B, D, K>

Permute the dimensions of the tensor.

§Arguments
  • axes - The new order of the dimensions. The length of the axes must be equal to the number of dimensions of the tensor. The values must be unique and in the range of the number of dimensions. The values can be negative, in which case they are used as an offset from the end.
§Returns

The tensor with the dimensions permuted.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor of shape [3, 2]
    let tensor = Tensor::<B, 2>::from_data([[1.0, 5.0], [-2.0, 9.0], [3.0, 6.0]], &device);

    // Permute the dimensions 1 and 0:
    // [[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]]
    // The resulting tensor will have dimensions [3, 2].
    let permuted = tensor.permute([1, 0]);
    println!("{permuted}");
}

pub fn movedim<S1, S2>(self, src: S1, dst: S2) -> Tensor<B, D, K>
where S1: MovedimArgs, S2: MovedimArgs,

Moves the dimension(s) of input at the position(s) in source to the position(s) in destination.

Other dimensions of input that are not explicitly moved remain in their original order and appear at the positions not specified in destination.

§Arguments
  • src - The dimension(s) to move. The values must be unique and in the range of the number of dimensions. The values can be negative, in which case they are used as an offset from the end.

  • dst - Destination positions for each of the original dims. These must also be unique.

§Panics
  • If the source and destination dimensions are not of the same length.
  • If the source and destination vectors contain duplicate values.
  • If the source and destination vectors contain values that are out of bounds.
§Returns

The tensor with the dimensions moved.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 3D tensor of shape [3, 2, 1]
    let tensor = Tensor::<B, 3>::from_data([[[1.0], [5.0]], [[-2.0], [9.0]], [[3.0], [6.0]]], &device);

    // Move the dimensions 0 and 1:
    // [[[1.0], [-2.0], [3.0]], [[5.0], [9.0], [6.0]]]
    // The resulting tensor will have dimensions [2, 3, 1].
    let moved = tensor.movedim(1, 0);
    println!("{moved}");
}

pub fn flip<const N: usize>(self, axes: [isize; N]) -> Tensor<B, D, K>

Reverse the order of elements in the tensor along the given dimensions.

§Arguments
  • axes - The dimensions to reverse. The values must be unique and in the range of the number of dimensions. The values can be negative, in which case they are used as an offset from the end.
§Returns

The tensor with the axes flipped.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor with dimensions [4, 3]
    let tensor = Tensor::<B, 2>::from_data(
        [
            [3.0, 4.9, 2.0],
            [2.0, 1.9, 3.0],
            [4.0, 5.9, 8.0],
            [1.4, 5.8, 6.0],
        ],
        &device,
    );

    // Flip the elements in dimensions 0 and 1:
    // [[6.0, 5.8, 1.4],
    //  [8.0, 5.9, 4.0],
    //  [3.0, 1.9, 2.0],
    //  [2.0, 4.9, 3.0]]
    // The resulting tensor will have dimensions [4, 3].
    let flipped = tensor.flip([0, 1]);
    println!("{flipped}");
}

pub fn flatten<const D2: usize>( self, start_dim: usize, end_dim: usize, ) -> Tensor<B, D2, K>

Flatten the tensor along a given range of dimensions.

This function collapses the specified range of dimensions into a single dimension, effectively flattening the tensor in that range.

§Arguments
  • start_dim: The starting dimension of the range to be flattened.
  • end_dim: The ending dimension of the range to be flattened (inclusive).
§Type Parameters
  • D2: The resulting number of dimensions in the flattened tensor.
§Returns

A new Tensor<B, D2, K> instance with the specified range of dimensions flattened.

§Example

use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 3D tensor with dimensions [2, 3, 4]
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 4]), &device);

    // Flatten the tensor from dimensions 1 to 2 (inclusive).
    // The resulting tensor will have dimensions [2, 12]
    let flattened: Tensor<B, 2> = tensor.flatten(1, 2);
    println!("{flattened}");
}

pub fn squeeze<const D2: usize>(self, dim: usize) -> Tensor<B, D2, K>

Squeeze the tensor along the given dimension, removing the specified dimension of size one, and effectively reducing the rank of the tensor by one.

§Arguments
  • dim: The dimension to be squeezed.
§Type Parameters
  • ‘D2’: The resulting number of dimensions in the squeezed tensor.
§Returns

A new Tensor<B, D2, K> instance with the specified dimension removed.

§Example

use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 3D tensor with dimensions [3, 1, 3]
    let tensor = Tensor::<B, 3>::from_data(
        [[[3.0, 4.9, 2.0]], [[2.0, 1.9, 3.0]], [[4.0, 5.9, 8.0]]],
        &device,
    );

    // Squeeze the dimension 1.
    // The resulting tensor will have dimensions [3, 3].
    let squeezed = tensor.squeeze::<2>(1);
    println!("{squeezed}");
}

pub fn squeeze_dims<const D2: usize>(self, dims: &[isize]) -> Tensor<B, D2, K>

Removes specified dimensions of size 1 from a tensor’s shape. This function takes a tensor and an array of dimensions (dims) to be squeezed. If dims is provided, only the dimensions specified in this array will be removed. Each dimension in dims should correspond to a size of 1 in the tensor; otherwise, the dimension will not be squeezed. If dims is empty, all single-dimensional entries in the tensor will be removed. If entries in dims are negative, then dimensions will be counted from the back.

§Arguments
  • dims: The dimension(s) to be squeezed.
§Type Parameters
  • ‘D2’: The resulting number of dimensions in the squeezed tensor.
§Returns

A new Tensor<B, D2, K> instance with the specified dimensions removed.

§Example

use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 4D tensor with dimensions [2, 1, 4, 1]
    let tensor = Tensor::<B, 4>::ones(Shape::new([2, 1, 4, 1]), &device);

    // Squeeze the dimensions 1 and 3.
    // The resulting tensor will have dimensions [2, 4].
    let squeezed: Tensor<B, 2> = tensor.squeeze_dims(&[1, 3]);
    println!("{squeezed}");
}

pub fn unsqueeze<const D2: usize>(self) -> Tensor<B, D2, K>

Unsqueeze the current tensor. Create new dimensions to fit the given size.

If the output size is higher than the current tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor with dimensions [3, 3]
    let tensor = Tensor::<B, 2>::ones(Shape::new([3, 3]), &device);
    // Unsqueeze the tensor up to 4 dimensions.
    // The resulting tensor will have dimensions [1, 1, 3, 3].
    let unsqueezed = tensor.unsqueeze::<4>();
    println!("{unsqueezed}");
}

pub fn unsqueeze_dim<const D2: usize>(self, dim: usize) -> Tensor<B, D2, K>

Creates a new tensor with a dimension of size one inserted at the specified position.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor with dimensions [3, 3]
    let tensor = Tensor::<B, 2>::ones(Shape::new([3, 3]), &device);
    // Unsqueeze the dimension 1.
    // The resulting tensor will have dimensions [3, 1, 3].
    let unsqueezed: Tensor<B, 3> = tensor.unsqueeze_dim(1);
    println!("{unsqueezed}");
}

pub fn unsqueeze_dims<const D2: usize>(self, axes: &[isize]) -> Tensor<B, D2, K>

Creates a new tensor with added dimensions of size one inserted at the specified indices. The indices can be negative, in which case they are counted from the last to the first dimension. the axes can contain duplicates, in which case the number of dimensions inserted at the index is the number of duplicates.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 3D tensor with dimensions [3, 4, 5]
    let tensor = Tensor::<B, 3>::ones(Shape::new([3, 4, 5]), &device);
    // Unsqueeze the leading dimension (0) once and the trailing dimension (-1) twice.
    // The resulting tensor will have dimensions [1, 3, 4, 5, 1, 1].
    let unsqueezed: Tensor<B, 6> = tensor.unsqueeze_dims(&[0, -1, -1]);
    println!("{unsqueezed}");
}

pub fn slice<const D2: usize, R>(self, ranges: R) -> Tensor<B, D, K>
where R: RangesArg<D2>,

Returns a tensor containing the elements selected from the given ranges.

§Arguments
  • ranges - A type implementing the RangesArg trait, which can be:
    • An array of core::ops::Range<usize>
    • An array of Option<(i64, i64)>
    • An array of (i64, i64) tuples
§Behavior
  • Supports partial and full slicing in any number of dimensions.
  • Missing ranges are treated as full slices if D > D2.
  • Handles negative indices by wrapping around from the end of the dimension.
  • Clamps ranges to the tensor’s dimensions if they exceed the bounds.
  • For Option<(i64, i64)> ranges, None selects the full range of that dimension.
§Panics
  • If the number of ranges provided exceeds the tensor’s dimensions.
  • If a range is descending (e.g., 2..1) or empty (e.g., 1..1).
§Examples
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = B::Device::default();

    // 1D slicing
    let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..5, &device);
    let slice = tensor.slice([1..4]);
    assert_eq!(slice.into_data().to_vec::<i32>().unwrap(), vec![1i32, 2, 3]);

    // 2D slicing
    let tensor = Tensor::<B, 2>::ones(Shape::new([3, 4]), &device);
    let slice = tensor.slice([1..3, 0..2]);
    assert_eq!(slice.dims(), [2, 2]);

    // Using negative indices
    let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..5, &device);
    let slice = tensor.slice([(1, -1)]); // Equivalent to 1..4
    assert_eq!(slice.into_data().to_vec::<i32>().unwrap(), vec![1i32, 2, 3]);

    // Using Option<(i64, i64)>
    let tensor = Tensor::<B, 1, burn_tensor::Int>::arange(0..12, &device).reshape([3, 4]);
    let slice = tensor.slice([Some((1, -1)), None]); // Select rows 1 and 2, all columns
    assert_eq!(slice.dims(), [2, 4]);
}
§Note

This function uses the RangesArg trait for flexible range specification. The trait handles the conversion of various range formats and applies clamping and negative index handling internally.

pub fn slice_assign<const D2: usize>( self, ranges: [Range<usize>; D2], values: Tensor<B, D, K>, ) -> Tensor<B, D, K>

Returns a copy of the current tensor with the selected elements changed to the new ones at the selected indices.

§Panics
  • If a range exceeds the number of elements on a dimension.
  • If the given values don’t match the given ranges.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = B::Device::default();
    let tensor = Tensor::<B, 3>::ones([2, 3, 3], &device);
    let values = Tensor::<B, 3>::zeros([1, 1, 1], &device);
    let tensor_sliced = tensor.slice_assign([0..1, 0..1, 0..1], values);
    println!("{:?}", tensor_sliced.dims()); // [2, 3, 3]
}

pub fn device(&self) -> <B as Backend>::Device

Returns the device of the current tensor.

pub fn to_device(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>

Returns a new tensor on the given device.

pub fn into_data(self) -> TensorData

Converts the data of the current tensor.

§Note

For better performance, prefer using a Transaction when reading multiple tensors at once. This may improve laziness, especially if executed on a different thread in native environments.

pub fn to_data(&self) -> TensorData

Converts the data of the current tensor.

§Note

For better performance, prefer using a Transaction when reading multiple tensors at once. This may improve laziness, especially if executed on a different thread in native environments.

pub async fn into_data_async(self) -> TensorData

Returns the data of the current tensor.

pub async fn to_data_async(&self) -> TensorData

Returns the data of the current tensor.

pub fn from_data<T>(data: T, device: &<B as Backend>::Device) -> Tensor<B, D, K>
where T: Into<TensorData>,

Create a tensor from the given data on the given device.

pub fn repeat_dim(self, dim: usize, times: usize) -> Tensor<B, D, K>

Repeat the tensor along the given dimension.

§Arguments
  • dim: The dimension to repeat.
  • times: The number of times to repeat the tensor along the given dimension in the new tensor.
§Returns

A new tensor with the given dimension repeated times times.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor with dimensions [3, 2]
    let tensor = Tensor::<B, 2>::from_data([[3.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);

    // Repeat the tensor along the dimension 0 twice.
    // [[3.0, 4.9], [2.0, 1.9], [4.0, 5.9], [3.0, 4.9], [2.0, 1.9], [4.0, 5.9]]
    // The resulting tensor will have dimensions [6, 2].
    let repeated = tensor.repeat_dim(0, 2);
    println!("{repeated}");
}

pub fn repeat(self, sizes: &[usize]) -> Tensor<B, D, K>

Repeat the tensor along the given dimensions.

§Arguments
  • sizes: Borrowed slice of the number of times to repeat each dimension.
§Returns

A new tensor with the given dimensions repeated times times.

§Example

use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor with dimensions [3, 2]
    let tensor = Tensor::<B, 2>::from_data([[3.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);

    // Repeat the tensor along the dimension 0 twice and the dimension 0 once.
    // [[3.0, 4.9], [2.0, 1.9], [4.0, 5.9], [3.0, 4.9], [2.0, 1.9], [4.0, 5.9]]
    // The resulting tensor will have dimensions [6, 2].
    let repeated = tensor.repeat(&[2, 1]);
}

pub fn equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element-wise equal comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    let t1 = Tensor::<B, 2>::from_data([[2.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
    let t2 = Tensor::<B, 2>::from_data([[3.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
    // Compare the elements of the two 2D tensors with dimensions [3, 2].
    // [[false, true], [true, true], [true, true]]
    let equal = t1.equal(t2);
    println!("{equal}");
}

pub fn not_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element-wise non-equality comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    let t1 = Tensor::<B, 2>::from_data([[2.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
    let t2 = Tensor::<B, 2>::from_data([[3.0, 4.9], [2.0, 1.9], [4.0, 5.9]], &device);
    // Compare the elements of the two 2D tensors for inequality.
    // [[true, false], [false, false], [false, false]]
    let not_equal = t1.not_equal(t2);
    println!("{not_equal}");
}

pub fn cat(tensors: Vec<Tensor<B, D, K>>, dim: usize) -> Tensor<B, D, K>

Concatenates all tensors into a new one along the given dimension.

§Panics

If all tensors don’t have the same shape.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    let t1 = Tensor::<B, 2>::from_data([[3.0, 4.9, 2.0], [2.0, 1.9, 3.0]], &device);
    let t2 = Tensor::<B, 2>::from_data([[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]], &device);

    // Concatenate the two tensors with shape [2, 3] along the dimension 1.
    // [[3.0, 4.9, 2.0, 4.0, 5.9, 8.0], [2.0, 1.9, 3.0, 1.4, 5.8, 6.0]]
    // The resulting tensor will have shape [2, 6].
    let concat = Tensor::cat(vec![t1, t2], 1);
    println!("{concat}");
}

pub fn stack<const D2: usize>( tensors: Vec<Tensor<B, D, K>>, dim: usize, ) -> Tensor<B, D2, K>

Concatenates all tensors into a new one along a new dimension.

§Panics

If all tensors don’t have the same shape. Given dimension is not with range of 0..D2

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    let t1 = Tensor::<B, 2>::from_data([[3.0, 4.9, 2.0], [2.0, 1.9, 3.0]], &device);
    let t2 = Tensor::<B, 2>::from_data([[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]], &device);
    let t3 = Tensor::<B, 2>::from_data([[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]], &device);

    // Concatenate the three tensors with shape [2, 3] along a new dimension, 0.
    // [[[3.0, 4.9, 2.0], [2.0, 1.9, 3.0]],
    //  [[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]],
    //  [[4.0, 5.9, 8.0], [1.4, 5.8, 6.0]]]
    // The resulting tensor will have shape [3, 2, 3].
    let stacked= Tensor::stack::<3>(vec![t1, t2, t3], 0);
    println!("{stacked}");
}

pub fn iter_dim(self, dim: usize) -> DimIter<B, D, K>

Iterate over slices of tensors alongside a given dimension.

§Panics

Given dimension is less than tensor rank.

§Returns

A tensor iterator.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;
fn example<B: Backend>() {
  let device = Default::default();
  let tensor = Tensor::<B,2>::from_data([[3.0, 4.9, 2.0], [2.0, 1.9, 3.0]], &device);
  // Given a 2D tensor with dimensions (2, 3), iterate over slices of tensors along the dimension 0.
  let iter = tensor.iter_dim(0);
  for (i,tensor) in iter.enumerate() {
    println!("Tensor {}: {}", i, tensor);
    // Tensor 0: Tensor { data: [[3.0, 4.9, 2.0]], ... }
    // Tensor 1: Tensor { data: [[2.0, 1.9, 3.0]], ... }
 }
}

pub fn narrow(self, dim: usize, start: usize, length: usize) -> Tensor<B, D, K>

Returns a new tensor with the given dimension narrowed to the given range.

§Panics
  • If the dimension is greater than the number of dimensions of the tensor.
  • If the given range exceeds the number of elements on the given dimension.
§Returns

A new tensor with the given dimension narrowed to the given range.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor with dimensions [4, 3]
    let tensor = Tensor::<B, 2>::from_data(
        [
            [3.0, 4.9, 2.0],
            [2.0, 1.9, 3.0],
            [6.0, 1.5, 7.0],
            [3.0, 4.9, 9.0],
        ],
        &device,
    );
    // Narrow the tensor along the dimension 0, keeping 3 elements starting from index 1.
    // [[2.0, 1.9, 3.0], [6.0, 1.5, 7.0], [3.0, 4.9, 9.0]]
    // The resulting tensor will have dimensions [3, 3].
    let narrowed = tensor.narrow(0, 1, 3);
    println!("{narrowed}");
}

pub fn chunk(self, chunks: usize, dim: usize) -> Vec<Tensor<B, D, K>>

Attempts to split the tensor into a specified number of chunks along a given dimension. May return less chunks than requested if the tensor size is not divisible by the number of chunks.

When the given dimension is evenly divisible by the number of chunks, the chunks will be of equal size. Otherwise all chunks will be of equal size except for the last one.

§Panics

If the dimension is greater than the number of dimensions of the tensor.

§Returns

A vector of tensors.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor with dimensions [4, 3]
    let tensor = Tensor::<B, 2>::from_data(
        [
            [3.0, 4.9, 2.0],
            [2.0, 1.9, 3.0],
            [6.0, 1.5, 7.0],
            [3.0, 4.9, 9.0],
        ],
        &device,
    );
    // Split the tensor along the dimension 1 into 2 chunks.
    // The first chuck will have shape [4, 2]:
    // [[3.0, 4.9], [2.0, 1.9], [6.0, 1.5], [3.0, 4.9]]
    // The second chunk will have shape [4, 1]:
    // [[2.0], [3.0], [7.0], [9.0]]
    let chunks = tensor.chunk(2, 1);
    println!("{chunks:?}");
}

pub fn split(self, split_size: usize, dim: usize) -> Vec<Tensor<B, D, K>>

Splits the tensor into chunks of a specified size along a given dimension. Each chunk is a view of the original tensor.

If the tensor size along the given dimension is not divisible by split_size, then the last chunk will be smaller.

§Panics

If the specified dimension to split along is greater than the number of dimensions of the tensor.

§Returns

A vector of tensors.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 1D tensor with 5 elements
    let tensor = Tensor::<B, 1>::from_data([0.0, 1.0, 2.0, 3.0, 4.0], &device);
    // Split the tensor into chunks of size 2 along dimension 0
    let chunks = tensor.split(2, 0);
    // The result is a vector of tensors:
    // [Tensor([0.0, 1.0]), Tensor([2.0, 3.0]), Tensor([4.0])]
    println!("{:?}", chunks);
}

pub fn split_with_sizes( self, split_sizes: Vec<usize>, dim: usize, ) -> Vec<Tensor<B, D, K>>

Splits the tensor into chunks with the specified sizes along a given dimension. Each chunk is a view of the original tensor.

The sizes of the chunks are specified in the split_sizes vector. The sum of the sizes in split_sizes must equal the size of the tensor along the specified dimension.

§Panics

If the specified dimension to split along is greater than the number of dimensions of the tensor or if the sum of dim_sizes does not equal the size of the tensor along dim.

§Returns

A vector of tensors.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 1D tensor with 5 elements
    let tensor = Tensor::<B, 1>::from_data([0.0, 1.0, 2.0, 3.0, 4.0], &device);
    // Split the tensor into chunks with sizes [2, 3] along dimension 0
    let chunks = tensor.split_with_sizes(vec![2, 3], 0);
    // The result is a vector of tensors:
    // [Tensor([0.0, 1.0]), Tensor([2.0, 3.0, 4.0])]
    println!("{:?}", chunks);
}

pub fn any(self) -> Tensor<B, 1, Bool>

Tests if any element in the tensor evaluates to True.

§Arguments
  • tensor - The tensor to test. All input tensor types (Float, Int, Bool) are supported.
§Returns

A boolean tensor Tensor<B, 1, Bool> containing a single element, True if any element in the input tensor evaluates to True, False otherwise.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};

fn example<B: Backend>() {
  let device = Default::default();
  let tensor = Tensor::<B,2, Bool>::from_data([[true,false,true],[false,true,false]], &device);
  let tensor_two = Tensor::<B,2, Bool>::from_data([[false,false,false],[false,false,false]], &device);

  // Given a 2D tensor with dimensions (2, 3), test if any element in the tensor evaluates to True.
  let any_tensor = tensor.any();
  println!("{}", any_tensor);
  // Tensor { data: [true], ... }

  // Given a 2D tensor with dimensions (2, 3), test if any element in the tensor evaluates to True.
  let any_tensor_two = tensor_two.any();
  println!("{}", any_tensor_two);
  // Tensor { data: [false], ... }
}

pub fn any_dim(self, dim: usize) -> Tensor<B, D, Bool>

Tests if any element in the tensor evaluates to True along a given dimension dim.

§Arguments
  • tensor - The tensor to test. All input tensor types (Float, Int, Bool) are supported.
  • dim - The axis along which to test.
§Returns

A boolean tensor Tensor<B, D, Bool> with the same size as input tensor, except in the dim axis where the size is 1. The elem in the dim axis is True if any element along this dim in the input evaluates to True, False otherwise.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor =
        Tensor::<B, 2, Bool>::from_data([[true, false, false], [false, true, false]], &device);
    // Check if any element in the tensor evaluates to True along the dimension 1.
    // [[true], [true]],
    let any_dim = tensor.clone().any_dim(1);
    println!("{any_dim}");
}

pub fn all(self) -> Tensor<B, 1, Bool>

Tests if all elements in the tensor evaluate to True.

§Arguments
  • tensor - The tensor to test. All input tensor types (Float, Int, Bool) are supported.
§Returns

A boolean tensor Tensor<B, 1, Bool> with a single element, True if all elements in the input tensor evaluate to True, False otherwise.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor =
        Tensor::<B, 2, Bool>::from_data([[true, false, true], [true, true, true]], &device);
    // Check if all elements in the tensor evaluate to True (which is not the case).
    // [false]
    let all = tensor.all();
    println!("{all}");
}

pub fn all_dim(self, dim: usize) -> Tensor<B, D, Bool>

Tests if all elements in the tensor evaluate to True along a given dimension dim.

§Arguments
  • tensor - The tensor to test. All input tensor types (Float, Int, Bool) are supported.
  • dim - The axis along which to test.
§Returns

A boolean tensor Tensor<B, D, Bool> with the same size as input tensor, except in the dim axis where the size is 1. The elem in the dim axis is True if all elements along this dim in the input evaluates to True, False otherwise.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor =
        Tensor::<B, 2, Bool>::from_data([[true, true, false], [true, true, true]], &device);
    // Check if all elements in the tensor evaluate to True along the dimension 1.
    // [[true, true, false]]
    let all_dim = tensor.clone().all_dim(0);
    println!("{all_dim}");
}

pub fn into_scalar(self) -> <K as BasicOps<B>>::Elem

Convert the tensor into a scalar.

§Panics

If the tensor doesn’t have one element. If the backend fails to read the tensor data synchronously.

§Returns

The scalar value of the tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    let tensor = Tensor::<B, 2>::from_data([[3.0]], &device);
    // Convert the tensor with a single element into a scalar.
    let scalar = tensor.into_scalar();
    println!("{scalar}");
}

pub async fn into_scalar_async(self) -> <K as BasicOps<B>>::Elem

Convert the tensor into a scalar.

§Panics

If the tensor doesn’t have one element.

pub fn expand<const D2: usize, S>(self, shape: S) -> Tensor<B, D2, K>
where S: BroadcastArgs<D, D2>,

Broadcast the tensor to the given shape.

§Arguments
  • shape - The shape to broadcast the tensor to. Can contain -1 for dimensions that should be inferred. The number of elements in the shape must be greater or equal as the number of dimensions of the tensor.
§Panics

If the tensor cannot be broadcasted to the given shape.

§Returns

A new tensor with the given shape.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    // Create a 2D tensor with dimensions [3, 1]
    let tensor = Tensor::<B, 2>::from_data([[1.], [2.], [3.]], &device);
    // Expand the tensor to a new shape [3, 4]
    // [[1.0, 1.0, 1.0, 1.0], [2.0, 2.0, 2.0, 2.0], [3.0, 3.0, 3.0, 3.0]]
    let expanded = tensor.expand([3, 4]);
    println!("{}", expanded);
}
§

impl<B, const D: usize> Tensor<B, D, Bool>
where B: Backend,

pub fn from_bool( data: TensorData, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>

Create a boolean tensor from data on the given device.

pub fn int(self) -> Tensor<B, D, Int>

Convert the bool tensor into an int tensor.

pub fn float(self) -> Tensor<B, D>

Convert the bool tensor into an float tensor.

pub fn bool_not(self) -> Tensor<B, D, Bool>

Inverses boolean values.

pub fn nonzero(self) -> Vec<Tensor<B, 1, Int>>

Compute the indices of the elements that are non-zero.

§Returns

A vector of tensors, one for each dimension of the given tensor, containing the indices of the non-zero elements in that dimension.

pub async fn nonzero_async(self) -> Vec<Tensor<B, 1, Int>>

Compute the indices of the elements that are non-zero.

§Returns

A vector of tensors, one for each dimension of the given tensor, containing the indices of the non-zero elements in that dimension.

pub fn argwhere(self) -> Tensor<B, 2, Int>

Compute the indices of the elements that are true, grouped by element.

§Returns

A tensor containing the indices of all non-zero elements of the given tensor. Each row in the result contains the indices of a non-zero element.

pub async fn argwhere_async(self) -> Tensor<B, 2, Int>

Compute the indices of the elements that are true, grouped by element.

§Returns

A tensor containing the indices of all non-zero elements of the given tensor. Each row in the result contains the indices of a non-zero element.

pub fn triu_mask<S>( shape: S, offset: i64, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
where S: Into<Shape>,

Creates a mask for the upper triangle of a matrix, which can be used to fill the specified area with a value.

This function generates a boolean tensor representing the mask of the upper triangle of a matrix.

§Arguments
  • shape: The shape of the matrix.
  • offset: The offset from the diagonal, where 0 means the diagonal, and positive values shift towards the upper triangle.
  • device: The device on which the tensor will be allocated.
§Returns

Returns a boolean tensor where false indicates the elements of the matrix that are part of the upper triangle taking into account the specified offset. All other elements are true.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};

fn example<B: Backend>() {
  let mask = Tensor::<B, 2, Bool>::triu_mask([3, 3], 0, &Default::default());
  println!("{mask}");
  // [[false, false, false],
  //  [true, false, false],
  //  [true, true, false]]
}

pub fn tril_mask<S>( shape: S, offset: i64, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
where S: Into<Shape>,

Creates a mask for the lower triangle of a matrix, which can be used to fill the specified area with a value.

This function generates a boolean tensor representing the mask of the lower triangle of a matrix.

§Arguments
  • shape: The shape of the matrix.
  • offset: The offset from the diagonal, where 0 means the diagonal, and negative values shift towards the lower triangle.
  • device: The device on which the tensor will be allocated.
§Returns

Returns a boolean tensor where false indicates the elements of the matrix that are part of the lower triangle taking into account the specified offset. All other elements are true.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};

fn example<B: Backend>() {
  let mask = Tensor::<B, 2, Bool>::tril_mask([3, 3], 0, &Default::default());
  println!("{mask}");
  // [[false, true, true],
  //  [false, false, true],
  //  [false, false, false]]
}

pub fn diag_mask<S>( shape: S, offset: i64, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>
where S: Into<Shape>,

Creates a mask for the diagonal of a matrix, which can be used to fill the specified area with a value.

This function generates a boolean tensor representing the mask of the diagonal of a matrix.

§Arguments
  • shape: The shape of the matrix.
  • device: The device on which the tensor will be allocated.
§Returns

Returns a boolean tensor where false indicates the elements of the matrix that are part of the diagonal. All other elements are true.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool};

fn example<B: Backend>() {
  let mask = Tensor::<B, 2, Bool>::diag_mask([3, 3], 0, &Default::default());
  println!("{mask}");
  // [[false, true, true],
  //  [true, false, true],
  //  [true, true, false]]
}
§

impl<const D: usize, B> Tensor<B, D>
where B: Backend,

pub fn inplace<F>(&mut self, func: F)
where F: FnOnce(Tensor<B, D>) -> Tensor<B, D>,

Executes an operation on the tensor and modifies its value.

§Notes

This won’t necessarily reuse the same tensor data/buffer, but it should if there is no other reference pointing to the same tensor.

Wrapping operations with inplace is not an optimization, it’s mainly there if you want to mutate a tensor by using owned operations. A plausible usage would be to update the weights of a mutable model reference.

pub fn exp(self) -> Tensor<B, D>

Applies element wise exponential operation.

y = e^x

pub fn log(self) -> Tensor<B, D>

Applies element wise natural log operation ln.

y = log(x)

pub fn log1p(self) -> Tensor<B, D>

Applies the natural logarithm of one plus the input tensor, element-wise.

y = log(x+1)

pub fn erf(self) -> Tensor<B, D>

Applies the error function element wise.

y = erf(x)

pub fn recip(self) -> Tensor<B, D>

Applies element wise reciprocal operation.

pub fn sqrt(self) -> Tensor<B, D>

Applies element wise root square operation.

pub fn cos(self) -> Tensor<B, D>

Applies element wise cosine operation.

pub fn sin(self) -> Tensor<B, D>

Applies element wise sine operation.

pub fn tanh(self) -> Tensor<B, D>

Applies element wise hyperbolic tangent operation.

pub fn round(self) -> Tensor<B, D>

Applies element wise round operation.

This function implements the round half to even strategy, with halfway cases rounded to the nearest even integer value.

pub fn floor(self) -> Tensor<B, D>

Applies element wise floor operation.

pub fn ceil(self) -> Tensor<B, D>

Applies element wise ceil operation.

pub fn from_floats<A>( floats: A, device: &<B as Backend>::Device, ) -> Tensor<B, D>
where A: Into<TensorData>,

Create a tensor from floats (f32) on a given device.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = B::Device::default();
    let _ = Tensor::<B, 1>::from_floats([1.0, 2.0], &device);
    let _ = Tensor::<B, 2>::from_floats([[1.0, 2.0], [3.0, 4.0]], &device);
}

pub fn int(self) -> Tensor<B, D, Int>

Returns a new tensor with the same shape and device as the current tensor and the data cast to Integer.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    let float_tensor = Tensor::<B, 1>::from_floats([1.0, 2.0], &device);
    let int_tensor = float_tensor.int();
}

pub fn random_like(&self, distribution: Distribution) -> Tensor<B, D>

Returns a new tensor with the same shape and device as the current tensor filled random values sampled from the given distribution.

pub fn one_hot( index: usize, num_classes: usize, device: &<B as Backend>::Device, ) -> Tensor<B, D>

Create a one hot tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::Tensor;

fn example<B: Backend>() {
    let device = Default::default();
    let one_hot = Tensor::<B, 1>::one_hot(2, 10, &device);
    println!("{}", one_hot.to_data());
    // [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
}

pub fn matmul(self, other: Tensor<B, D>) -> Tensor<B, D>

Applies the matrix multiplication operation.

C = AB

§Panics

If the two tensors don’t have a compatible shape.

pub fn var(self, dim: usize) -> Tensor<B, D>

Calculate the variance along the given dimension.

pub fn var_bias(self, dim: usize) -> Tensor<B, D>

Calculate the variance along the given dimension without applying the Bessel’s correction.

pub fn var_mean(self, dim: usize) -> (Tensor<B, D>, Tensor<B, D>)

Calculate the variance along the given dimension and also returns the mean.

pub fn var_mean_bias(self, dim: usize) -> (Tensor<B, D>, Tensor<B, D>)

Calculate the variance along the given dimension without applying the Bessel’s correction and also returns the mean.

pub fn cast<F>(self, dtype: F) -> Tensor<B, D>
where F: Into<FloatDType>,

Converts a tensor to the specified floating point data type.

§Warning

Most backends don’t have automatic type promotion at this time, so make sure that all tensors have the same floating point precision data type for operations multiple input tensors (e.g., binary ops).

pub fn detach(self) -> Tensor<B, D>

Detach the current tensor from the autodiff graph.

This function does nothing when autodiff is not enabled. This can be used in batchers or elsewhere to ensure that previous operations are not considered in the autodiff graph.

pub fn require_grad(self) -> Tensor<B, D>

Mark the tensor to keep gradients during the backward pass.

This function does nothing when autodiff is not enabled.

pub fn is_require_grad(&self) -> bool

Returns true if the tensor requires gradients during the backward pass.

pub fn set_require_grad(self, require_grad: bool) -> Tensor<B, D>

Mark the tensor as tracked or untracked depending on the require_grad argument. When tracked, the gradients will be available after the backward pass.

This function does nothing when autodiff is not enabled.

pub fn cov(self, dim: usize, correction_factor: usize) -> Tensor<B, D>

Calculate covaraince matrix between different entries alongside a given dimension.

§Arguments
  • size - The size of the square matrix.
  • correction_factor - Is usually 1 for samples and 0 for population.

pub fn quantize( self, scheme: &QuantizationScheme, qparams: QParams<Tensor<B, 1>, Tensor<B, 1, Int>>, ) -> Tensor<B, D>

Convert the tensor to a lower precision data type based on the quantization scheme.

§Arguments
  • scheme - The quantization scheme.
  • qparams - The pre-computed quantization parameters.
§Returns

The quantized tensor.

pub fn quantize_dynamic(self, scheme: &QuantizationScheme) -> Tensor<B, D>

Dynamically convert the tensor to a lower precision data type based on the quantization scheme.

§Arguments
  • scheme - The quantization scheme.
§Returns

The quantized tensor.

pub fn dequantize(self) -> Tensor<B, D>

Convert the tensor back to a higher precision data type.

If the tensor is not quantized, its value is simply returned.

§Returns

The dequantized tensor.

§

impl<B> Tensor<B, 1, Int>
where B: Backend,

pub fn arange( range: Range<i64>, device: &<B as Backend>::Device, ) -> Tensor<B, 1, Int>

Returns a new integer tensor on the specified device.

§Arguments
  • range - The range of values to generate.
  • device - The device to create the tensor on.

pub fn arange_step( range: Range<i64>, step: usize, device: &<B as Backend>::Device, ) -> Tensor<B, 1, Int>

Returns a new integer tensor on the specified device.

§Arguments
  • range - The range of values to generate.
  • step - The step between each value.

pub fn one_hot(self, num_classes: usize) -> Tensor<B, 2, Int>

Create a one hot tensor from an index tensor.

§Arguments
  • num_classes - The number of classes to use in encoding.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Int};

fn example<B: Backend>() {
    let device = B::Device::default();
    let indices: Tensor<B, 1, Int> = Tensor::from_ints([0, 1, 2, 3], &device);
    let one_hot = indices.one_hot(4);
    println!("{}", one_hot.to_data());
    // [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
}
§

impl<const D: usize, B> Tensor<B, D, Int>
where B: Backend,

pub fn from_ints<A>( ints: A, device: &<B as Backend>::Device, ) -> Tensor<B, D, Int>
where A: Into<TensorData>,

Create a tensor from integers (i32), placing it on a given device.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Int};

fn example<B: Backend>() {
    let device = B::Device::default();
    let _x: Tensor<B, 1, Int> = Tensor::from_ints([1, 2], &device);
    let _y: Tensor<B, 2, Int> = Tensor::from_ints([[1, 2], [3, 4]], &device);
}

pub fn float(self) -> Tensor<B, D>

Returns a new tensor with the same shape and device as the current tensor and the data cast to Float.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
    let device = Default::default();
    let int_tensor = Tensor::<B, 1, Int>::arange(0..5, &device);
    let float_tensor = int_tensor.float();
}

pub fn cartesian_grid<S, const D2: usize>( shape: S, device: &<B as Backend>::Device, ) -> Tensor<B, D2, Int>
where S: Into<Shape>,

Generates a cartesian grid for the given tensor shape on the specified device. The generated tensor is of dimension D2 = D + 1, where each element at dimension D contains the cartesian grid coordinates for that element.

§Arguments
  • shape - The shape specifying the dimensions of the tensor.
  • device - The device to create the tensor on.
§Panics

Panics if D2 is not equal to D+1.

§Examples
   use burn_tensor::Int;
   use burn_tensor::{backend::Backend, Shape, Tensor};
   fn example<B: Backend>() {
       let device = Default::default();
       let result: Tensor<B, 3, _> = Tensor::<B, 2, Int>::cartesian_grid([2, 3], &device);
       println!("{}", result);
   }
§

impl<B, const D: usize, K> Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

pub fn add(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise addition operation.

y = x2 + x1

§Arguments
  • other - The tensor to add.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
   let tensor = tensor1 + tensor2;
   println!("{tensor}");
   // [[3.0, 1.0, 7.0], [6.0, 11.0, 9.0]]
}

pub fn add_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise addition operation with a scalar.

y = x + s

§Arguments
  • other - The scalar to add, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let scalar = 2.0;
  let tensor = tensor + scalar;
  println!("{tensor}");
  // [[3.0, 0.0, 5.0], [7.0, 11.0, 8.0]]
}

pub fn sub(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise subtraction operation.

y = x2 - x1

§Arguments
  • other - The tensor to subtract.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
  let tensor = tensor1 - tensor2;
  println!("{tensor}");
  // [[-1.0, -5.0, -1.0], [4.0, 7.0, 3.0]]
}

pub fn sub_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise subtraction operation with a scalar.

y = x - s

§Arguments
  • other - The scalar to subtract, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let scalar = 2.0;
   let tensor = tensor - scalar;
   println!("{tensor}");
   // [[-1.0, -4.0, 1.0], [3.0, 7.0, 4.0]]
}

pub fn div(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise division operation.

y = x2 / x1

§Arguments
  • other - The tensor to divide.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
   let tensor = tensor1 / tensor2;
   println!("{tensor}");
   // [[0.5, -0.6666667, 0.75], [5.0, 4.5, 2.0]]
}

pub fn div_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise division operation with a scalar.

y = x / s

§Arguments
  • other - The scalar to divide, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let scalar = 2.0;
   let tensor = tensor / scalar;
   println!("{tensor}");
   // [[0.5, -1.0, 1.5], [2.5, 4.5, 3.0]]
}

pub fn remainder(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise the remainder operation with a scalar.

y = x2 % x1

pub fn remainder_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise the remainder operation with a scalar.

y = x2 % x1

§Arguments
  • other - The scalar to divide, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let scalar = 2.0;
   let tensor = tensor1 % scalar;
   println!("{tensor}");
   // [[1.0, 0.0, 1.0], [1.0, 1.0, 0.0]]
}

pub fn mul(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise multiplication operation.

y = x2 * x1

§Arguments
  • other - The tensor to multiply.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
   let tensor = tensor1 * tensor2;
   println!("{tensor}");
   // [[2.0, -6.0, 12.0], [5.0, 18.0, 18.0]]
}

pub fn mul_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise multiplication operation with a scalar.

y = x * s

§Arguments
  • other - The scalar to multiply, element wise.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let scalar = 2.0;
   let tensor = tensor * scalar;
   println!("{tensor}");
   // [[2.0, -4.0, 6.0], [10.0, 18.0, 12.0]]
}

pub fn neg(self) -> Tensor<B, D, K>

Switch sign of each element in the tensor.

y = -x

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = -tensor;
   println!("{tensor}");
   // [[-1.0, 2.0, -3.0], [-5.0, -9.0, -6.0]]
}

pub fn sign(self) -> Tensor<B, D, K>

Returns the signs of the elements of the input tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.sign();
   println!("{tensor}");
   // [[1.0, -1.0, 1.0], [1.0, 1.0, 1.0]]
}

pub fn zeros<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
where S: Into<Shape>,

Create a tensor of the given shape where each element is zero.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::zeros(Shape::new([2, 3]), &device);
   println!("{tensor}");
   // [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
}

pub fn zeros_like(&self) -> Tensor<B, D, K>

Returns a new tensor with the same shape and device as the current tensor filled with zeros.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let tensor = tensor.zeros_like();
  println!("{tensor}");
  // [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
}

pub fn ones<S>(shape: S, device: &<B as Backend>::Device) -> Tensor<B, D, K>
where S: Into<Shape>,

Create a tensor of the given shape where each element is one.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::ones(Shape::new([2, 3]), &device);
  println!("{tensor}");
  // [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
}

pub fn ones_like(&self) -> Tensor<B, D, K>

Returns a new tensor with the same shape and device as the current tensor filled with ones.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.ones_like();
   println!("{tensor}");
   // [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]]
}

pub fn full<S, E>( shape: S, fill_value: E, device: &<B as Backend>::Device, ) -> Tensor<B, D, K>

Create a tensor of the given shape where each element is equal to the provided value.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::full(Shape::new([2, 3]), 5.0, &device);
  println!("{tensor}");
  // [[5.0, 5.0, 5.0], [5.0, 5.0, 5.0]]
}

pub fn full_like<E>(&self, fill_value: E) -> Tensor<B, D, K>

Returns a new tensor with the same shape and device as the current tensor filled with the provided value.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.full_like(5.0);
   println!("{tensor}");
   // [[5.0, 5.0, 5.0], [5.0, 5.0, 5.0]]
}

pub fn mean(self) -> Tensor<B, 1, K>

Aggregate all elements in the tensor with the mean operation.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.mean();
   println!("{tensor}");
   // [3.6666667]
}

pub fn sum(self) -> Tensor<B, 1, K>

Aggregate all elements in the tensor with the sum operation.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let tensor = tensor.sum();
  println!("{tensor}");
  // [22.0]
}

pub fn mean_dim(self, dim: usize) -> Tensor<B, D, K>

Aggregate all elements along the given dimension or axis in the tensor with the mean operation.

§Arguments
  • dim - The dimension or axis along which to aggregate the elements.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let tensor = tensor.clone().mean_dim(0);
  println!("{tensor}");
  // [[3.0, 3.5, 4.5]]
  let tensor = tensor.clone().mean_dim(1);
  println!("{tensor}");
  // [[0.6666667], [6.6666665]]
}

pub fn sum_dim(self, dim: usize) -> Tensor<B, D, K>

Aggregate all elements along the given dimension or axis in the tensor with the sum operation.

§Arguments
  • dim - The dimension or axis along which to aggregate the elements.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.clone().sum_dim(0);
   println!("{tensor}");
   let tensor = tensor.clone().sum_dim(1);
   // [[6.0, 7.0, 9.0]]
   println!("{tensor}");
   // [[2.0], [20.0]]
}

pub fn prod(self) -> Tensor<B, 1, K>

Aggregate all elements along the given dimension or axis in the tensor with the product operation.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.prod();
   println!("{tensor}");
   // [-1620.0]
}

pub fn prod_dim(self, dim: usize) -> Tensor<B, D, K>

Aggregate all elements along the given dimension or axis in the tensor with the product operation.

§Arguments
  • dim - The dimension or axis along which to aggregate the elements.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.clone().prod_dim(0);
   println!("{tensor}");
   // [[5.0, -18.0, 18.0]]
   let tensor = tensor.clone().prod_dim(1);
   println!("{tensor}");
   // [[-6.0], [270.0]]
}

pub fn equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>
where E: Element,

Applies element wise equal comparison and returns a boolean tensor.

§Arguments
  • other - The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.equal_elem(3.0);
   println!("{tensor}");
   // [[false, false, true], [false, false, false]]
}

pub fn not_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>
where E: Element,

Applies element wise non-equality comparison and returns a boolean tensor.

§Arguments
  • other - The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.not_equal_elem(3.0);
   println!("{tensor}");
   // [[true, true, false], [true, true, true]]
}

pub fn greater(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element wise greater comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
  let tensor = tensor1.greater(tensor2);
  println!("{tensor}");
  // [[false, false, false], [true, true, true]]
}

pub fn greater_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element wise greater-equal comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
   let tensor = tensor1.greater_equal(tensor2);
   println!("{tensor}");
   // [[false, false, false], [true, true, true]]
}

pub fn lower(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element wise lower comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
   let tensor = tensor1.lower(tensor2);
   println!("{tensor}");
   // [[true, true, true], [false, false, false]]
}

pub fn lower_equal(self, other: Tensor<B, D, K>) -> Tensor<B, D, Bool>

Applies element wise lower-equal comparison and returns a boolean tensor.

§Panics

If the two tensors don’t have the same shape.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
   let tensor = tensor1.lower_equal(tensor2);
   println!("{tensor}");
   // [[true, true, true], [false, false, false]]
}

pub fn greater_elem<E>(self, other: E) -> Tensor<B, D, Bool>

Applies element wise greater comparison and returns a boolean tensor.

§Arguments
  • other - The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.greater_elem(3.0);
   println!("{tensor}");
   // [[false, false, true], [true, true, true]]
}

pub fn greater_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>

Applies element wise greater-equal comparison and returns a boolean tensor.

§Arguments
  • other - The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.greater_equal_elem(3.0);
   println!("{tensor}");
   // [[false, false, true], [true, true, true]]
}

pub fn lower_elem<E>(self, other: E) -> Tensor<B, D, Bool>

Applies element wise lower comparison and returns a boolean tensor.

§Arguments
  • other - The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = B::Device::default();
    let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
    let tensor = tensor.lower_elem(3.0);
    println!("{tensor}");
    // [[true, true, false], [false, false, false]]
}

pub fn lower_equal_elem<E>(self, other: E) -> Tensor<B, D, Bool>

Applies element wise lower-equal comparison and returns a boolean tensor.

§Arguments
  • other - The element to compare.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.lower_equal_elem(3.0);
   println!("{tensor}");
   // [[true, true, true], [false, false, false]]
}

pub fn mask_where( self, mask: Tensor<B, D, Bool>, value: Tensor<B, D, K>, ) -> Tensor<B, D, K>

Update the given tensor with the value tensor where the mask is true.

This is similar to mask_fill, however the value is a tensor instead of a scalar.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Bool};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let mask = Tensor::<B, 2, Bool>::from_data([[true, false, true], [false, true, false]], &device);
  let value = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
  let tensor = tensor.mask_where(mask, value);
  println!("{tensor}");
  // [[2.0, -2.0, 4.0], [5.0, 2.0, 6.0]]
}

pub fn mask_fill<E>(self, mask: Tensor<B, D, Bool>, value: E) -> Tensor<B, D, K>

Update the given tensor with the value where the mask is true.

This is similar to mask_where, however the value is a scalar instead of a tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Bool};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let mask = Tensor::<B, 2, Bool>::from_data([[true, false, true], [false, true, false]], &device);
  let tensor = tensor.mask_fill(mask, 3.0);
  println!("{tensor}");
  // [[3.0, -2.0, 3.0], [5.0, 3.0, 6.0]]
}

pub fn gather(self, dim: usize, indices: Tensor<B, D, Int>) -> Tensor<B, D, K>

Gather tensor elements corresponding to the given indices from the specified dim.

Example using a 3D tensor:

output[i, j, k] = input[indices[i, j, k], j, k]; // dim = 0 output[i, j, k] = input[i, indices[i, j, k], k]; // dim = 1 output[i, j, k] = input[i, j, indices[i, j, k]]; // dim = 2

§Notes

The index tensor should have the same shape as the original tensor except for the dim specified.

§Warning

Not all backends have runtime bound checks for the indices, so make sure the they are valid. Otherwise, out of bounds indices could lead to unexpected results instead of panicking.

pub fn scatter( self, dim: usize, indices: Tensor<B, D, Int>, values: Tensor<B, D, K>, ) -> Tensor<B, D, K>

Assign the gathered elements corresponding to the given indices along the specified dimension from the value tensor to the original tensor using sum reduction.

Example using a 3D tensor:

input[indices[i, j, k], j, k] += values[i, j, k]; // dim = 0 input[i, indices[i, j, k], k] += values[i, j, k]; // dim = 1 input[i, j, indices[i, j, k]] += values[i, j, k]; // dim = 2

§Notes

The index tensor should have the same shape as the original tensor except for the specified dimension. The value and index tensors should have the same shape.

Other references to the input tensor will not be modified by this operation.

§Warning

Not all backends have runtime bound checks for the indices, so make sure the they are valid. Otherwise, out of bounds indices could lead to unexpected results instead of panicking.

pub fn select(self, dim: usize, indices: Tensor<B, 1, Int>) -> Tensor<B, D, K>

Select the tensor elements along the given dimension corresponding to the given indices.

Example using a 3D tensor:

output[i, j, k] = input[indices[i], j, k]; // dim = 0 output[i, j, k] = input[i, indices[j], k]; // dim = 1 output[i, j, k] = input[i, j, indices[k]]; // dim = 2

§Warning

Not all backends have runtime bound checks for the indices, so make sure the they are valid. Otherwise, out of bounds indices could lead to unexpected results instead of panicking.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Int};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 3>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let indices = Tensor::<B, 1, Int>::from_data([0], &device);
  let tensor = tensor.select(0, indices);
  println!("{tensor}");
  //  [[1.0, -2.0, 3.0]]
}

pub fn select_assign( self, dim: usize, indices: Tensor<B, 1, Int>, values: Tensor<B, D, K>, ) -> Tensor<B, D, K>

Assign the selected elements along the given dimension corresponding to the given indices from the value tensor to the original tensor using sum reduction.

Example using a 3D tensor:

input[indices[i], j, k] += values[i, j, k]; // dim = 0 input[i, indices[j], k] += values[i, j, k]; // dim = 1 input[i, j, indices[k]] += values[i, j, k]; // dim = 2

§Warning

Not all backends have runtime bound checks for the indices, so make sure the they are valid. Otherwise, out of bounds indices could lead to unexpected results instead of panicking.

pub fn argmax(self, dim: usize) -> Tensor<B, D, Int>

Applies the argmax function along the given dimension and returns an integer tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = B::Device::default();
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 3]), &device);
    let tensor = tensor.argmax(1);
    println!("{:?}", tensor.shape());
    // Shape { dims: [2, 1, 3] }
}

pub fn max(self) -> Tensor<B, 1, K>

Find the maximum value.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let tensor = tensor.max();
  println!("{tensor}");
  // [9.0]
}

pub fn max_dim(self, dim: usize) -> Tensor<B, D, K>

Find the maximum value along the given dimension.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let tensor = tensor.max_dim(0);
  println!("{tensor}");
  // [[5.0, 9.0, 6.0]]
}

pub fn max_dim_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Find the maximum value along the given dimension.

Also returns the indices.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let (tensor, index) = tensor.max_dim_with_indices(0);
   // [[5.0, 9.0, 6.0]]
   println!("{tensor}");
   // [[1, 1, 1]]
   println!("{index}");
}

pub fn max_pair(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Finds the maximum pair wise values with another Tensor

§Arguments
  • other - Other tensor to find maximum elements with
§Returns

A tensor with the same shape as the input tensors containing the maximum value found in the input tensors.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
   let tensor = tensor1.max_pair(tensor2);
   println!("{tensor}");
   // [[2.0, 3.0, 4.0], [5.0, 9.0, 6.0]]
}

pub fn argmin(self, dim: usize) -> Tensor<B, D, Int>

Applies the argmin function along the given dimension and returns an integer tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
    let device = Default::default();
    let tensor = Tensor::<B, 3>::ones(Shape::new([2, 3, 3]), &device);
    let tensor = tensor.argmin(1);
    println!("{:?}", tensor.shape());
    // Shape { dims: [2, 1, 3] }
}

pub fn min(self) -> Tensor<B, 1, K>

Find the minimum value.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.min();
   println!("{tensor}");
   // [-2.0]
}

pub fn min_dim(self, dim: usize) -> Tensor<B, D, K>

Find the minimum value along the given dimension.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.min_dim(0);
   println!("{tensor}");
   // [[1.0, -2.0, 3.0]]
}

pub fn min_dim_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Find the minimum value along the given dimension.

Also returns the indices.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[7.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let (tensor, index) = tensor.min_dim_with_indices(0);
   println!("{tensor}");
   // [[1.0, -2.0, 3.0]]
   println!("{}", index);
   // [[1, 0, 0]]
}

pub fn min_pair(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Finds the minimum pair wise values with another Tensor

§Arguments
  • other - Other tensor to find minimum elements with
§Returns

A tensor with the same shape as the input tensors containing the minimum value found between each element of the two source tensors.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
   let tensor = tensor1.min_pair(tensor2);
   println!("{tensor}");
   // [[1.0, -2.0, 3.0], [1.0, 2.0, 3.0]]
}

pub fn clamp<E>(self, min: E, max: E) -> Tensor<B, D, K>

Clamp the tensor between the given min and max values.

§Arguments
  • min - The minimum value.
  • max - The maximum value.
§Returns

A new tensor with the values clamped between the given min and max values.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
  let device = Default::default();
  let tensor = Tensor::<B, 2, Int>::from_ints(
   [
    [1, 2, 3],
    [4, 5, 6],
    [7, 8, 9]
   ],
   &device);
   let tensor = tensor.clamp(2, 6);
   println!("{tensor}");
   // [[2, 2, 3], [4, 5, 6], [6, 6, 6]]
}

pub fn clamp_min<E>(self, min: E) -> Tensor<B, D, K>

Clamps a tensor under a minimum value.

§Arguments
  • tensor - The tensor to clamp.
  • min - The minimum value.
§Returns

A new tensor with the values clamped under the given min value.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
   let device = Default::default();
   let tensor = Tensor::<B, 2, Int>::from_ints(
   [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
   &device);
   let tensor = tensor.clamp_min(4);
   println!("{tensor}");
   // [[4, 4, 4], [4, 5, 6], [7, 8, 9]]
}

pub fn clamp_max<E>(self, max: E) -> Tensor<B, D, K>

Clamps a tensor over a maximum value.

§Arguments
  • tensor - The tensor to clamp.
  • max - The maximum value.
§Returns

A new tensor with the values clamped over the given max value.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
   let device = Default::default();
   let tensor = Tensor::<B, 2, Int>::from_ints(
   [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
   &device);
   let tensor = tensor.clamp_max(5);
   println!("{tensor}");
   // [[1, 2, 3], [4, 5, 5], [5, 5, 5]]
}

pub fn abs(self) -> Tensor<B, D, K>

Apply element wise absolute value operation

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
  let device = Default::default();
  let tensor = Tensor::<B, 2, Int>::from_ints([[1, -2, 3], [4, -5, 6], [7, -8, 9]], &device);
  let tensor = tensor.abs();
  println!("{tensor}");
  // [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
}

pub fn triu(self, diagonal: i64) -> Tensor<B, D, K>

Returns the upper triangular part of a matrix (2-D tensor) or batch of matrices input, the other elements of the result tensor out are set to 0.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
   let device = Default::default();
   let tensor = Tensor::<B, 2, Int>::from_ints(
       [
         [1, 2, 3],
         [4, 5, 6],
         [7, 8, 9]
       ],
       &device
   );
   let tensor = tensor.triu(1);
   println!("{tensor}");
   // [
   //   [0, 2, 3],
   //   [0, 0, 6],
   //   [0, 0, 0]
   // ]
}

pub fn tril(self, diagonal: i64) -> Tensor<B, D, K>

Returns the lower triangular part of a matrix (2-D tensor) or batch of matrices input, the other elements of the result tensor out are set to 0.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Int, Tensor};

fn example<B: Backend>() {
   let device = Default::default();
   let tensor = Tensor::<B, 2, Int>::from_ints(
       [
         [1, 2, 3],
         [4, 5, 6],
         [7, 8, 9]
       ],
       &device
   );

   let tensor = tensor.tril(-1);
   println!("{tensor}");
   // [
   //   [0, 0, 0],
   //   [4, 0, 0],
   //   [7, 8, 0]
   // ]
}

pub fn powf(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise power operation with a float Tensor

§Arguments
  • other - The tensor to apply the power operation with.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[2.0, 3.0, 4.0], [1.0, 2.0, 3.0]], &device);
   let tensor = tensor1.powf(tensor2);
   println!("{tensor}");
   // [[1.0, 8.0, 81.0], [5.0, 81.0, 216.0]]
}

pub fn powf_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise power operation with a float scalar

§Arguments
  • other - The scalar to apply the power operation with.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.powf_scalar(2.0);
   println!("{tensor}");
   // [[1.0, 4.0, 9.0], [25.0, 81.0, 36.0]]
}

pub fn powi(self, other: Tensor<B, D, K>) -> Tensor<B, D, K>

Applies element wise power operation with a integer Tensor

§Arguments
  • other - The tensor to apply the power operation with.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Int};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2, Int>::from_ints([[1, -2, 3], [5, 9, 6]], &device);
   let tensor2 = Tensor::<B, 2, Int>::from_ints([[2, 3, 4], [1, 2, 3]], &device);
   let tensor = tensor1.powi(tensor2);
   println!("{tensor}");
   // [[1, -8, 81], [5, 81, 216]]
}

pub fn powi_scalar<E>(self, other: E) -> Tensor<B, D, K>

Applies element wise power operation with a integer scalar

§Arguments
  • other - The scalar to apply the power operation with.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Int};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2, Int>::from_ints([[1, -2, 3], [5, 9, 6]], &device);
   let tensor = tensor.powi_scalar(2);
   println!("{tensor}");
   // [[1, 4, 9], [25, 81, 36]]
}

pub fn is_close( self, other: Tensor<B, D, K>, rtol: Option<f64>, atol: Option<f64>, ) -> Tensor<B, D, Bool>

Checks element wise if the tensor is close to another tensor.

The tolerance is defined by the following equation:

abs(a - b) <= (atol + rtol * abs(b))

where `a` is the first tensor, `b` is the second tensor, `rtol` is the relative tolerance,
and `atol` is the absolute tolerance.
§Arguments
  • other - The tensor to compare with.
  • rtol - Optional relative tolerance. Default is 1e-5.
  • atol - Optional absolute tolerance. Default is 1e-8.
§Returns

A boolean tensor with the same shape as the input tensors.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor1.is_close(tensor2, None, None);
   println!("{tensor}");
   // [[true, true, true], [true, true, true]]
}

pub fn all_close( self, other: Tensor<B, D, K>, rtol: Option<f64>, atol: Option<f64>, ) -> bool

Checks if all elements are close to another tensor.

The tolerance is defined by the following equation:


abs(a - b) <= (atol + rtol * abs(b))

where `a` is the first tensor, `b` is the second tensor, `rtol` is the relative tolerance,
and `atol` is the absolute tolerance.
§Arguments
  • other - The tensor to compare with.
  • rtol - Optional relative tolerance. Default is 1e-5.
  • atol - Optional absolute tolerance. Default is 1e-8.
§Returns

A boolean scalar.

§Remarks
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor1 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor2 = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
   let result = tensor1.all_close(tensor2, None, None);
   println!("{}", result);
   // true
}

pub fn bool(self) -> Tensor<B, D, Bool>

Converts the tensor to a boolean tensor by checking if the elements are non-zero.

§Returns

A boolean tensor with the same shape as the input tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [0.0, 9.0, 6.0]], &device);
  let tensor = tensor.bool();
  println!("{tensor}");
  // [
  //   [true, true, true],
  //   [false, true, true]
  // ]
}

pub fn random<S>( shape: S, distribution: Distribution, device: &<B as Backend>::Device, ) -> Tensor<B, D, K>
where S: Into<Shape>,

Create a random tensor of the given shape on the given device where each element is sampled from the given distribution.

§Arguments
  • shape - The shape of the tensor.
  • distribution - The distribution to sample from.
  • device - The device to create the tensor on.
§Returns

A new tensor with the given shape and elements sampled from the given distribution.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape, Distribution};

fn example<B: Backend>() {
  let device = B::Device::default();
  let distribution = Distribution::Uniform(0.0, 1.0); // Any random value between 0.0 and 1.0
  let tensor = Tensor::<B, 2>::random(Shape::new([2, 3]), distribution, &device);
  println!("{tensor}");
  // [
  //   [0.08347523, 0.70498955, 0.60332155],
  //   [0.08173251, 0.18028641, 0.97942924]
  // ]
}

pub fn sort(self, dim: usize) -> Tensor<B, D, K>

Sort the elements by value in ascending order along a given dimension.

This sort is unstable (i.e., may reorder equal elements).

§Arguments
  • dim - The dimension to sort along.
§Returns

A new tensor with the elements sorted in ascending order along the given dimension.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
  let tensor = tensor.sort(0);
  println!("{tensor}");
  // [[5.0, -2.0, 3.0], [12.0, 3.0, 6.0]]
  let tensor = tensor.sort(1);
  println!("{tensor}");
  // [[-2.0, 3.0, 12.0], [3.0, 5.0, 6.0]]
}

pub fn sort_descending(self, dim: usize) -> Tensor<B, D, K>

Sort the elements by value in descending order along a given dimension.

This sort is unstable (i.e., may reorder equal elements).

§Arguments
  • dim - The dimension to sort along.
§Returns

A new tensor with the elements sorted in descending order along the given dimension.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
   let tensor = tensor.sort_descending(0);
   println!("{tensor}");
   // [[12.0, 3.0, 6.0], [5.0, -2.0, 3.0]]
   let tensor = tensor.sort_descending(1);
   println!("{tensor}");
   // [[12.0, 3.0, -2.0], [6.0, 5.0, 3.0]]
}

pub fn sort_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Sort the elements by value in ascending order along a given dimension. Also returns the indices.

This sort is unstable (i.e., may reorder equal elements).

§Arguments
  • dim - The dimension to sort along.
§Returns

A tuple containing the sorted tensor and the indices tensor.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
  let (tensor, indices) = tensor.sort_with_indices(0);
  println!("{tensor}");
  // [[5.0, -2.0, 3.0], [12.0, 3.0, 6.0]]
  println!("{}", indices);
  // [[1, 0, 0], [0, 1, 1]]
}

pub fn sort_descending_with_indices( self, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Sort the elements by value in descending order along a given dimension. Also returns the indices.

This sort is unstable (i.e., may reorder equal elements).

§Arguments
  • dim - The dimension to sort along.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
   let (tensor, indices) = tensor.sort_descending_with_indices(0);
   println!("{tensor}");
   // [[12.0, 3.0, 6.0], [5.0, -2.0, 3.0]]
   println!("{}", indices);
   // [[0, 1, 1], [1, 0, 0]]
}

pub fn argsort(self, dim: usize) -> Tensor<B, D, Int>

Returns the indices that sort the elements by value in ascending order along a given dimension.

This sort is unstable (i.e., may reorder equal elements).

§Arguments
  • dim - The dimension to sort along.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
   let tensor = tensor.argsort(0);
   println!("{tensor}");
   // [[1, 0, 0], [0, 1, 1]]
}

pub fn argsort_descending(self, dim: usize) -> Tensor<B, D, Int>

Returns the indices that sort the elements by value in descending order along a given dimension.

This sort is unstable (i.e., may reorder equal elements).

§Arguments
  • dim - The dimension to sort along.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
   let tensor = tensor.argsort_descending(0);
   println!("{tensor}");
   // [[0, 1, 1], [1, 0, 0]]
   let tensor = tensor.argsort_descending(1);
   println!("{tensor}");
   // [[0, 2, 1], [2, 0, 1]]
}

pub fn topk(self, k: usize, dim: usize) -> Tensor<B, D, K>

Returns the k largest elements of the given input tensor along a given dimension.

§Arguments
  • k - The number of elements to return.
§Returns

A new tensor with the k largest elements along the given dimension.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
  let tensor = tensor.topk(2, 0);
  println!("{tensor}");
  // [[12.0, 3.0, 6.0], [5.0, -2.0, 3.0]]
  let tensor = tensor.topk(1, 1);
  println!("{tensor}");   
  // [[12.0], [6.0]]
}

pub fn topk_with_indices( self, k: usize, dim: usize, ) -> (Tensor<B, D, K>, Tensor<B, D, Int>)

Returns the k largest elements of the given input tensor along a given dimension. Also returns the indices.

§Arguments
  • k - The number of elements to return.
  • dim - The dimension to sort along.
§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
   let (tensor, indices) = tensor.topk_with_indices(2, 0);
   println!("{tensor}");
   // [[12.0, 3.0, 6.0], [5.0, -2.0, 3.0]]
   println!("{}", indices);
   // [[0, 1, 1], [1, 0, 0]]
   let (tensor, indices) = tensor.topk_with_indices(1, 1);
   println!("{tensor}");
   // [[12.0], [6.0]]
   println!("{indices}");
   // [[0], [2]]
}

pub fn pad<E>( self, padding: (usize, usize, usize, usize), value: E, ) -> Tensor<B, D, K>

Pad the tensor of rank two or higher with the given value on the last two dimensions.

§Arguments
  • padding - A tuple of four integers representing the padding on the left, right, top, and bottom.
  • value - The value to pad the tensor with.
§Returns

A new tensor with the given padding.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Shape};

fn example<B: Backend<FloatElem: From<f32>>>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[12.0, -2.0, 3.0], [5.0, 3.0, 6.0]], &device);
   let tensor = tensor.pad((1, 1, 1, 1), 0.0);
   println!("{tensor}");
   // [
   //   [0.0, 0.0, 0.0, 0.0, 0.0],
   //   [0.0, 12.0, -2.0, 3.0, 0.0],
   //   [0.0, 5.0, 3.0, 6.0, 0.0],
   //   [0.0, 0.0, 0.0, 0.0, 0.0]
   // ]
}

pub fn is_nan(&self) -> Tensor<B, D, Bool>

Returns a new tensor with boolean elements indicating whether each element of the input is NaN.

§Returns

A boolean tensor where true indicates NaN and false indicates a non-NaN value.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool, Shape};

fn example<B: Backend>() {
   let device = B::Device::default();
   let tensor = Tensor::<B, 2>::from_data([[1.0, f64::NAN, 3.0], [5.0, 9.0, 6.0]], &device);
   let tensor = tensor.is_nan();
   println!("{tensor}");
   // [[false, true, false], [false, false, false]]
}

pub fn contains_nan(&self) -> Tensor<B, 1, Bool>

Checks if the tensor contains any NaN values.

§Returns

A boolean tensor with a single element indicating whether the tensor contains any NaN values.

§Example
use burn_tensor::backend::Backend;
use burn_tensor::{Tensor, Bool, Shape};

fn example<B: Backend>() {
  let device = B::Device::default();
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [f64::NAN, 9.0, 6.0]], &device);
  let tensor = tensor.contains_nan();
  println!("{tensor}");
  // [true]
  let tensor = Tensor::<B, 2>::from_data([[1.0, -2.0, 3.0], [5.0, 9.0, 6.0]], &device);
  let tensor = tensor.contains_nan();
  println!("{tensor}");
  // [false]
}
§

impl<B, K> Tensor<B, 2, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

pub fn eye(size: usize, device: &<B as Backend>::Device) -> Tensor<B, 2, K>

Creates a new 2D tensor with ones on the diagonal and zeros elsewhere.

§Arguments
  • size - The size of the square matrix.

Trait Implementations§

§

impl<E, const D: usize, B, K> Add<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the + operator.
§

fn add(self, other: E) -> Tensor<B, D, K>

Performs the + operation. Read more
§

impl<B, const D: usize, K> Add for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the + operator.
§

fn add(self, rhs: Tensor<B, D, K>) -> Tensor<B, D, K>

Performs the + operation. Read more
§

impl<const D: usize, B, K> AutodiffModule<B> for Tensor<B, D, K>

§

type InnerModule = Tensor<<B as AutodiffBackend>::InnerBackend, D, <K as BasicAutodiffOps<B>>::InnerKind>

Inner module without auto-differentiation.
§

fn valid(&self) -> <Tensor<B, D, K> as AutodiffModule<B>>::InnerModule

Get the same module, but on the inner backend without auto-differentiation.
§

impl<B, const D: usize> BitXor<T> for Tensor<B, D>
where B: Backend,

§

type Output = Tensor<B, D>

The resulting type after applying the ^ operator.
§

fn bitxor(self, _: T) -> <Tensor<B, D> as BitXor<T>>::Output

Performs the ^ operation. Read more
§

impl<B, const D: usize, K> Clone for Tensor<B, D, K>
where B: Clone + Backend, K: Clone + TensorKind<B>, <K as TensorKind<B>>::Primitive: Clone,

§

fn clone(&self) -> Tensor<B, D, K>

Returns a copy of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
§

impl<B, const D: usize, K> Debug for Tensor<B, D, K>
where B: Debug + Backend, K: Debug + TensorKind<B>, <K as TensorKind<B>>::Primitive: Debug,

§

fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>

Formats the value using the given formatter. Read more
§

impl<'de, B, const D: usize, K> Deserialize<'de> for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>, <K as BasicOps<B>>::Elem: Debug + Copy + Deserialize<'de>,

§

fn deserialize<De>( deserializer: De, ) -> Result<Tensor<B, D, K>, <De as Deserializer<'de>>::Error>
where De: Deserializer<'de>,

Deserialize this value from the given Serde deserializer. Read more
§

impl<B, const D: usize, K> Display for Tensor<B, D, K>
where B: Backend, <B as Backend>::IntElem: Display, K: BasicOps<B>, <K as BasicOps<B>>::Elem: Debug,

Pretty print tensors

§

fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>

Formats the value using the given formatter. Read more
§

impl<E, const D: usize, B, K> Div<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the / operator.
§

fn div(self, other: E) -> Tensor<B, D, K>

Performs the / operation. Read more
§

impl<B, const D: usize, K> Div for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the / operator.
§

fn div(self, rhs: Tensor<B, D, K>) -> Tensor<B, D, K>

Performs the / operation. Read more
§

impl<B, const D: usize, K, T> From<T> for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>, T: Into<TensorData>,

§

fn from(value: T) -> Tensor<B, D, K>

Converts to this type from the input type.
§

impl<const D: usize, B, K> Module<B> for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>,

§

type Record = ConstantRecord

Type to save and load the module.
§

fn visit<V>(&self, _visitor: &mut V)
where V: ModuleVisitor<B>,

Visit each tensor parameter in the module with a visitor.
§

fn map<M>(self, _mapper: &mut M) -> Tensor<B, D, K>
where M: ModuleMapper<B>,

Map each tensor parameter in the module with a mapper.
§

fn into_record(self) -> <Tensor<B, D, K> as Module<B>>::Record

Convert the module into a record containing the state.
§

fn load_record( self, _record: <Tensor<B, D, K> as Module<B>>::Record, ) -> Tensor<B, D, K>

Load the module state from a record.
§

fn to_device(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>

Move the module and all of its sub-modules to the given device. Read more
§

fn fork(self, device: &<B as Backend>::Device) -> Tensor<B, D, K>

Fork the module and all of its sub-modules to the given device. Read more
§

fn collect_devices( &self, devices: Vec<<B as Backend>::Device>, ) -> Vec<<B as Backend>::Device>

Return all the devices found in the underneath module tree added to the given vector without duplicates.
§

fn devices(&self) -> Vec<<B as Backend>::Device>

Return all the devices found in the underneath module tree without duplicates.
§

fn no_grad(self) -> Self

Each tensor in the module tree will not require grad. Read more
§

fn num_params(&self) -> usize

Get the number of parameters the module has, including all of its sub-modules.
§

fn save_file<FR, PB>( self, file_path: PB, recorder: &FR, ) -> Result<(), RecorderError>
where FR: FileRecorder<B>, PB: Into<PathBuf>,

Save the module to a file using the provided file recorder. Read more
§

fn load_file<FR, PB>( self, file_path: PB, recorder: &FR, device: &<B as Backend>::Device, ) -> Result<Self, RecorderError>
where FR: FileRecorder<B>, PB: Into<PathBuf>,

Load the module from a file using the provided file recorder. Read more
§

fn quantize_weights<C>(self, quantizer: &mut Quantizer<C>) -> Self
where C: Calibration,

Quantize the weights of the module.
§

impl<const D: usize, B, K> ModuleDisplay for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>,

§

fn format(&self, passed_settings: DisplaySettings) -> String

Formats the module with provided display settings. Read more
§

fn custom_settings(&self) -> Option<DisplaySettings>

Custom display settings for the module. Read more
§

fn custom_content(&self, _content: Content) -> Option<Content>

Custom attributes for the module. Read more
§

impl<const D: usize, B, K> ModuleDisplayDefault for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>,

§

fn content(&self, content: Content) -> Option<Content>

Attributes of the module used for display purposes. Read more
§

fn num_params(&self) -> usize

Gets the number of the parameters of the module.
§

impl<E, const D: usize, B, K> Mul<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the * operator.
§

fn mul(self, other: E) -> Tensor<B, D, K>

Performs the * operation. Read more
§

impl<B, const D: usize, K> Mul for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the * operator.
§

fn mul(self, rhs: Tensor<B, D, K>) -> Tensor<B, D, K>

Performs the * operation. Read more
§

impl<B, const D: usize, K> Neg for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the - operator.
§

fn neg(self) -> Tensor<B, D, K>

Performs the unary - operation. Read more
§

impl<B, const D: usize> Parameter for Tensor<B, D>
where B: Backend,

§

type Device = <B as Backend>::Device

The device type to be used.
§

fn device(&self) -> <Tensor<B, D> as Parameter>::Device

Fetch the device.
§

fn is_require_grad(&self) -> bool

Fetch the gradient requirement.
§

fn set_require_grad(self, require_grad: bool) -> Tensor<B, D>

Set the gradient requirement.
§

impl<B, const D: usize> Parameter for Tensor<B, D, Bool>
where B: Backend,

§

type Device = <B as Backend>::Device

The device type to be used.
§

fn device(&self) -> <Tensor<B, D, Bool> as Parameter>::Device

Fetch the device.
§

fn is_require_grad(&self) -> bool

Fetch the gradient requirement.
§

fn set_require_grad(self, _require_grad: bool) -> Tensor<B, D, Bool>

Set the gradient requirement.
§

impl<B, const D: usize> Parameter for Tensor<B, D, Int>
where B: Backend,

§

type Device = <B as Backend>::Device

The device type to be used.
§

fn device(&self) -> <Tensor<B, D, Int> as Parameter>::Device

Fetch the device.
§

fn is_require_grad(&self) -> bool

Fetch the gradient requirement.
§

fn set_require_grad(self, _require_grad: bool) -> Tensor<B, D, Int>

Set the gradient requirement.
§

impl<B, const D: usize> Record<B> for Tensor<B, D>
where B: Backend,

§

type Item<S: PrecisionSettings> = FloatTensorSerde<S>

Type of the item that can be serialized and deserialized.
§

fn into_item<S>(self) -> <Tensor<B, D> as Record<B>>::Item<S>

Convert the current record into the corresponding item that follows the given settings.
§

fn from_item<S>( item: <Tensor<B, D> as Record<B>>::Item<S>, device: &<B as Backend>::Device, ) -> Tensor<B, D>

Convert the given item into a record.
§

impl<B, const D: usize> Record<B> for Tensor<B, D, Bool>
where B: Backend,

§

type Item<S: PrecisionSettings> = BoolTensorSerde

Type of the item that can be serialized and deserialized.
§

fn into_item<S>(self) -> <Tensor<B, D, Bool> as Record<B>>::Item<S>

Convert the current record into the corresponding item that follows the given settings.
§

fn from_item<S>( item: <Tensor<B, D, Bool> as Record<B>>::Item<S>, device: &<B as Backend>::Device, ) -> Tensor<B, D, Bool>

Convert the given item into a record.
§

impl<B, const D: usize> Record<B> for Tensor<B, D, Int>
where B: Backend,

§

type Item<S: PrecisionSettings> = IntTensorSerde<S>

Type of the item that can be serialized and deserialized.
§

fn into_item<S>(self) -> <Tensor<B, D, Int> as Record<B>>::Item<S>

Convert the current record into the corresponding item that follows the given settings.
§

fn from_item<S>( item: <Tensor<B, D, Int> as Record<B>>::Item<S>, device: &<B as Backend>::Device, ) -> Tensor<B, D, Int>

Convert the given item into a record.
§

impl<E, const D: usize, B, K> Rem<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the % operator.
§

fn rem(self, other: E) -> Tensor<B, D, K>

Performs the % operation. Read more
§

impl<const D: usize, B, K> Rem for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the % operator.
§

fn rem(self, rhs: Tensor<B, D, K>) -> <Tensor<B, D, K> as Rem>::Output

Performs the % operation. Read more
§

impl<B, const D: usize, K> Serialize for Tensor<B, D, K>
where B: Backend, K: BasicOps<B>, <K as BasicOps<B>>::Elem: Debug + Copy + Serialize,

§

fn serialize<S>( &self, serializer: S, ) -> Result<<S as Serializer>::Ok, <S as Serializer>::Error>
where S: Serializer,

Serialize this value into the given Serde serializer. Read more
§

impl<E, const D: usize, B, K> Sub<E> for Tensor<B, D, K>
where E: ElementConversion, B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the - operator.
§

fn sub(self, other: E) -> Tensor<B, D, K>

Performs the - operation. Read more
§

impl<B, const D: usize, K> Sub for Tensor<B, D, K>
where B: Backend, K: Numeric<B>, <K as BasicOps<B>>::Elem: Element,

§

type Output = Tensor<B, D, K>

The resulting type after applying the - operator.
§

fn sub(self, rhs: Tensor<B, D, K>) -> Tensor<B, D, K>

Performs the - operation. Read more

Auto Trait Implementations§

§

impl<B, const D: usize, K> Freeze for Tensor<B, D, K>
where <K as TensorKind<B>>::Primitive: Freeze,

§

impl<B, const D: usize, K> RefUnwindSafe for Tensor<B, D, K>

§

impl<B, const D: usize, K> Send for Tensor<B, D, K>

§

impl<B, const D: usize, K> Sync for Tensor<B, D, K>

§

impl<B, const D: usize, K> Unpin for Tensor<B, D, K>
where <K as TensorKind<B>>::Primitive: Unpin,

§

impl<B, const D: usize, K> UnwindSafe for Tensor<B, D, K>
where <K as TensorKind<B>>::Primitive: UnwindSafe,

Blanket Implementations§

§

impl<T> Adaptor<()> for T

§

fn adapt(&self)

Adapt the type to be passed to a metric.
Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dst: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
§

impl<T> Downcast<T> for T

§

fn downcast(&self) -> &T

Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

§

impl<T> FromRef<T> for T
where T: Clone,

§

fn from_ref(input: &T) -> T

Converts to this type from a reference to the input type.
§

impl<T> Instrument for T

§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided [Span], returning an Instrumented wrapper. Read more
§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> IntoEither for T

Source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
Source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
§

impl<T> Pointable for T

§

const ALIGN: usize

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
§

impl<T> ToCompactString for T
where T: Display,

§

fn try_to_compact_string(&self) -> Result<CompactString, ToCompactStringError>

Fallible version of [ToCompactString::to_compact_string()] Read more
§

fn to_compact_string(&self) -> CompactString

Converts the given value to a [CompactString]. Read more
§

impl<T> ToLine for T
where T: Display,

§

fn to_line(&self) -> Line<'_>

Converts the value to a [Line].
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
§

impl<T> ToSpan for T
where T: Display,

§

fn to_span(&self) -> Span<'_>

Converts the value to a [Span].
Source§

impl<T> ToString for T
where T: Display + ?Sized,

Source§

fn to_string(&self) -> String

Converts the given value to a String. Read more
§

impl<T> ToText for T
where T: Display,

§

fn to_text(&self) -> Text<'_>

Converts the value to a [Text].
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<T> Upcast<T> for T

§

fn upcast(&self) -> Option<&T>

§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V

§

impl<T> WithSubscriber for T

§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a [WithDispatch] wrapper. Read more
§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a [WithDispatch] wrapper. Read more
Source§

impl<T> DeserializeOwned for T
where T: for<'de> Deserialize<'de>,

§

impl<T> ErasedDestructor for T
where T: 'static,

§

impl<T> MaybeSendSync for T

Source§

impl<T, Rhs, Output> NumOps<Rhs, Output> for T
where T: Sub<Rhs, Output = Output> + Mul<Rhs, Output = Output> + Div<Rhs, Output = Output> + Add<Rhs, Output = Output> + Rem<Rhs, Output = Output>,

§

impl<T> WasmNotSend for T
where T: Send,

§

impl<T> WasmNotSendSync for T
where T: WasmNotSend + WasmNotSync,

§

impl<T> WasmNotSync for T
where T: Sync,