Struct burn::backend::libtorch::LibTorch

pub struct LibTorch<E = f32, Q = i8> { /* private fields */ }
Expand description

Tensor backend that uses LibTorch with the [tch] crate for executing tensor operations.

This backend is compatible with a wide range of hardwares ranging from CPUs to GPUs, but requires LibTorch to be installed correctly. The CPU version can be downloaded automatically and the CUDA version as well by setting the TORCH_CUDA_VERSION environment variable. For more complex configurations, check out the manual installation for burn-tch.

Refer to the [tch] crate for more information.

Trait Implementations§

§

impl<E, Q> ActivationOps<LibTorch<E, Q>> for LibTorch<E, Q>
where E: TchElement, Q: QuantElement,

§

fn relu<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Applies the ReLU activation function. Read more
§

fn gelu<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Applies the Gelu activation function. Read more
§

fn gelu_backward<const D: usize>( tensor: TchTensor<E, D>, grad: TchTensor<E, D>, ) -> TchTensor<E, D>

Applies the Gelu activation function backward. Read more
§

fn sigmoid<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Applies the Sigmoid activation function. Read more
§

fn log_sigmoid<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Applies the LogSigmoid activation function. Read more
§

fn leaky_relu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, negative_slope: <B as Backend>::FloatElem, ) -> <B as Backend>::FloatTensorPrimitive<D>

Applies the LeakyReLU activation function. Read more
§

fn relu_backward<const D: usize>( output: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>

Applies the ReLU activation function backward. Read more
§

fn prelu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, alpha: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>

Applies the PReLu activation function. Read more
§

fn sigmoid_backward<const D: usize>( output: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>

Applies the Sigmoid activation function backward. Read more
§

fn hard_sigmoid<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, alpha: <B as Backend>::FloatElem, beta: <B as Backend>::FloatElem, ) -> <B as Backend>::FloatTensorPrimitive<D>

Applies the hard Sigmoid activation function. Read more
§

fn log_sigmoid_backward<const D: usize>( x: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>

Applies the LogSigmoid activation function backward. Read more
§

impl<E, Q> Backend for LibTorch<E, Q>
where E: TchElement, Q: QuantElement,

§

type Device = LibTorchDevice

Device type.
§

type FullPrecisionBridge = PrecisionBridge<f32>

A bridge that can cast tensors to full precision.
§

type FloatTensorPrimitive<const D: usize> = TchTensor<E, D>

Tensor primitive to be used for all float operations.
§

type FloatElem = E

Float element type.
§

type IntTensorPrimitive<const D: usize> = TchTensor<i64, D>

Tensor primitive to be used for all int operations.
§

type IntElem = i64

Int element type.
§

type BoolTensorPrimitive<const D: usize> = TchTensor<bool, D>

Tensor primitive to be used for all bool operations.
§

type QuantizedTensorPrimitive<const D: usize> = TchQTensor<Q, D>

Tensor primitive to be used for all quantized operations.
§

fn seed(seed: u64)

Seed the backend.
§

fn ad_enabled() -> bool

If autodiff is enabled.
§

fn name() -> String

Name of the backend.
§

fn sync(device: &<LibTorch<E, Q> as Backend>::Device, sync_type: SyncType)

Sync the backend, ensure that all computation are finished.
§

impl<TElem, OElem, QElem> BackendBridge<LibTorch<OElem, QElem>> for PrecisionBridge<TElem>
where TElem: TchElement, OElem: TchElement, QElem: QuantElement,

§

type Target = LibTorch<TElem>

The target backend
§

fn into_target<const D: usize>( tensor: <LibTorch<OElem> as Backend>::FloatTensorPrimitive<D>, device: Option<<<PrecisionBridge<TElem> as BackendBridge<LibTorch<OElem, QElem>>>::Target as Backend>::Device>, ) -> <<PrecisionBridge<TElem> as BackendBridge<LibTorch<OElem, QElem>>>::Target as Backend>::FloatTensorPrimitive<D>

Transfer the tensor to the target backend.
§

fn from_target<const D: usize>( tensor: <<PrecisionBridge<TElem> as BackendBridge<LibTorch<OElem, QElem>>>::Target as Backend>::FloatTensorPrimitive<D>, device: Option<<LibTorch<OElem> as Backend>::Device>, ) -> <LibTorch<OElem> as Backend>::FloatTensorPrimitive<D>

Transfer the tensor from the target backend.
§

impl<E, Q> BoolTensorOps<LibTorch<E, Q>> for LibTorch<E, Q>
where E: TchElement, Q: QuantElement,

§

fn bool_from_data<const D: usize>( data: TensorData, device: &LibTorchDevice, ) -> TchTensor<bool, D>

Creates a tensor from the data structure. Read more
§

fn bool_shape<const D: usize>(tensor: &TchTensor<bool, D>) -> Shape<D>

Returns the shape of the tensor. Read more
§

fn bool_repeat_dim<const D: usize>( tensor: TchTensor<bool, D>, dim: usize, times: usize, ) -> TchTensor<bool, D>

Repeats one dimension of the tensor a given number of times along that dimension. Read more
§

async fn bool_into_data<const D: usize>( tensor: TchTensor<bool, D>, ) -> TensorData

Converts the tensor to a data structure. Read more
§

fn bool_to_device<const D: usize>( tensor: TchTensor<bool, D>, device: &LibTorchDevice, ) -> TchTensor<bool, D>

Moves the tensor to the device.
§

fn bool_reshape<const D1: usize, const D2: usize>( tensor: TchTensor<bool, D1>, shape: Shape<D2>, ) -> TchTensor<bool, D2>

Reshapes the tensor. Read more
§

fn bool_device<const D: usize>(tensor: &TchTensor<bool, D>) -> LibTorchDevice

Gets the device of the tensor. Read more
§

fn bool_empty<const D: usize>( shape: Shape<D>, device: &<LibTorch<E> as Backend>::Device, ) -> TchTensor<bool, D>

Creates a new bool tensor. Read more
§

fn bool_slice<const D1: usize, const D2: usize>( tensor: TchTensor<bool, D1>, ranges: [Range<usize>; D2], ) -> TchTensor<bool, D1>

Gets the values from the tensor for the given ranges. Read more
§

fn bool_slice_assign<const D1: usize, const D2: usize>( tensor: TchTensor<bool, D1>, ranges: [Range<usize>; D2], value: TchTensor<bool, D1>, ) -> TchTensor<bool, D1>

Sets the values in the tensor for the given ranges. Read more
§

fn bool_cat<const D: usize>( tensors: Vec<TchTensor<bool, D>>, dim: usize, ) -> TchTensor<bool, D>

Concatenates the tensors along the given dimension. Read more
§

fn bool_equal<const D: usize>( lhs: TchTensor<bool, D>, rhs: TchTensor<bool, D>, ) -> TchTensor<bool, D>

Equates the two tensors. Read more
§

fn bool_not<const D: usize>(tensor: TchTensor<bool, D>) -> TchTensor<bool, D>

Inverses boolean values. Read more
§

fn bool_into_int<const D: usize>( tensor: TchTensor<bool, D>, ) -> TchTensor<i64, D>

Converts bool tensor to int tensor. Read more
§

fn bool_into_float<const D: usize>( tensor: TchTensor<bool, D>, ) -> TchTensor<E, D>

Converts bool tensor to float tensor. Read more
§

fn bool_swap_dims<const D: usize>( tensor: <LibTorch<E> as Backend>::BoolTensorPrimitive<D>, dim1: usize, dim2: usize, ) -> <LibTorch<E> as Backend>::BoolTensorPrimitive<D>

Swaps two dimensions of a bool tensor. Read more
§

fn bool_narrow<const D: usize>( tensor: TchTensor<bool, D>, dim: usize, start: usize, length: usize, ) -> TchTensor<bool, D>

Returns a new tensor with the given dimension narrowed to the given range. Read more
§

fn bool_chunk<const D: usize>( tensor: TchTensor<bool, D>, chunks: usize, dim: usize, ) -> Vec<TchTensor<bool, D>>

Split the tensor along the given dimension into chunks. Read more
§

fn bool_permute<const D: usize>( tensor: <LibTorch<E, Q> as Backend>::BoolTensorPrimitive<D>, axes: [usize; D], ) -> <LibTorch<E, Q> as Backend>::BoolTensorPrimitive<D>

Permutes the dimensions of a tensor. Read more
§

fn bool_flip<const D: usize>( tensor: TchTensor<bool, D>, axes: &[usize], ) -> TchTensor<bool, D>

Reverse the order of elements in a tensor along the given axes. Read more
§

async fn bool_argwhere<const D: usize>( tensor: <LibTorch<E> as Backend>::BoolTensorPrimitive<D>, ) -> TchTensor<i64, 2>

Compute the indices of the elements that are non-zero, grouped by element. Read more
§

async fn bool_nonzero<const D: usize>( tensor: <LibTorch<E> as Backend>::BoolTensorPrimitive<D>, ) -> Vec<TchTensor<i64, 1>>

Compute the indices of the elements that are non-zero. Read more
§

fn bool_expand<const D1: usize, const D2: usize>( tensor: <LibTorch<E, Q> as Backend>::BoolTensorPrimitive<D1>, shape: Shape<D2>, ) -> <LibTorch<E, Q> as Backend>::BoolTensorPrimitive<D2>

Broadcasts the bool tensor to the given shape.
§

fn bool_not_equal<const D: usize>( lhs: <B as Backend>::BoolTensorPrimitive<D>, rhs: <B as Backend>::BoolTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<D>

Element-wise non-equality comparison. Read more
§

fn bool_transpose<const D: usize>( tensor: <B as Backend>::BoolTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<D>

Transposes a bool tensor. Read more
§

fn bool_any<const D: usize>( tensor: <B as Backend>::BoolTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<1>

Tests if any element in the boolean tensor evaluates to True. Read more
§

fn bool_any_dim<const D: usize>( tensor: <B as Backend>::BoolTensorPrimitive<D>, dim: usize, ) -> <B as Backend>::BoolTensorPrimitive<D>

Tests if any element in the boolean tensor evaluates to True along a given dimension dim. Read more
§

fn bool_all<const D: usize>( tensor: <B as Backend>::BoolTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<1>

Tests if all elements in the boolean tensor evaluate to True. Read more
§

fn bool_all_dim<const D: usize>( tensor: <B as Backend>::BoolTensorPrimitive<D>, dim: usize, ) -> <B as Backend>::BoolTensorPrimitive<D>

Tests if all elements in the boolean tensor evaluate to True along a given dimension dim. Read more
§

impl<E, Q> Clone for LibTorch<E, Q>
where E: Clone, Q: Clone,

§

fn clone(&self) -> LibTorch<E, Q>

Returns a copy of the value. Read more
1.0.0 · source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
§

impl<E, Q> Debug for LibTorch<E, Q>
where E: Debug, Q: Debug,

§

fn fmt(&self, f: &mut Formatter<'_>) -> Result<(), Error>

Formats the value using the given formatter. Read more
§

impl<E, Q> Default for LibTorch<E, Q>
where E: Default, Q: Default,

§

fn default() -> LibTorch<E, Q>

Returns the “default value” for a type. Read more
§

impl<E, Q> FloatTensorOps<LibTorch<E, Q>> for LibTorch<E, Q>
where E: TchElement, Q: QuantElement,

§

fn float_from_data<const D: usize>( data: TensorData, device: &LibTorchDevice, ) -> TchTensor<E, D>

Creates a new tensor from the data structure. Read more
§

fn float_random<const D: usize>( shape: Shape<D>, distribution: Distribution, device: &LibTorchDevice, ) -> TchTensor<E, D>

Creates a new tensor with random values. Read more
§

fn float_repeat_dim<const D: usize>( tensor: TchTensor<E, D>, dim: usize, times: usize, ) -> TchTensor<E, D>

Repeat the tensor along the given dimension. Read more
§

fn float_zeros<const D: usize>( shape: Shape<D>, device: &LibTorchDevice, ) -> TchTensor<E, D>

Creates a new tensor with zeros. Read more
§

fn float_ones<const D: usize>( shape: Shape<D>, device: &LibTorchDevice, ) -> TchTensor<E, D>

Creates a new tensor with ones. Read more
§

fn float_shape<const D: usize>( tensor: &<LibTorch<E> as Backend>::FloatTensorPrimitive<D>, ) -> Shape<D>

Gets the shape of the tensor. Read more
§

async fn float_into_data<const D: usize>( tensor: <LibTorch<E> as Backend>::FloatTensorPrimitive<D>, ) -> TensorData

Converts the tensor to a data structure. Read more
§

fn float_device<const D: usize>(tensor: &TchTensor<E, D>) -> LibTorchDevice

Gets the device of the tensor. Read more
§

fn float_to_device<const D: usize>( tensor: TchTensor<E, D>, device: &LibTorchDevice, ) -> TchTensor<E, D>

Moves the tensor to the given device. Read more
§

fn float_empty<const D: usize>( shape: Shape<D>, device: &<LibTorch<E> as Backend>::Device, ) -> <LibTorch<E> as Backend>::FloatTensorPrimitive<D>

Creates an empty tensor with the given shape. Read more
§

fn float_add<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<E, D>

Adds two tensors together. Read more
§

fn float_add_scalar<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<E, D>

Adds a scalar to a tensor. Read more
§

fn float_sub<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<E, D>

Subtracts two tensors. Read more
§

fn float_sub_scalar<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<E, D>

Subtracts a scalar from a tensor. Read more
§

fn float_mul<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<E, D>

Multiplies two tensors together element-wise.
§

fn float_mul_scalar<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<E, D>

Multiplies a tensor by a scalar. Read more
§

fn float_div<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<E, D>

Divides two tensors element-wise. Read more
§

fn float_div_scalar<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<E, D>

Divides a tensor by a scalar. Read more
§

fn float_remainder_scalar<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<E, D>

Computes the modulus of a tensor given a scalar. Read more
§

fn float_matmul<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<E, D>

Multiplies two tensors together using matrix multiplication. Read more
§

fn float_neg<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Negates a tensor element-wise.
§

fn float_recip<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Calculates the reciprocals element-wise
§

fn float_swap_dims<const D: usize>( tensor: TchTensor<E, D>, dim1: usize, dim2: usize, ) -> TchTensor<E, D>

Swaps two dimensions of a tensor. Read more
§

fn float_reshape<const D1: usize, const D2: usize>( tensor: TchTensor<E, D1>, shape: Shape<D2>, ) -> TchTensor<E, D2>

Reshapes a tensor. Read more
§

fn float_gather<const D: usize>( dim: usize, tensor: TchTensor<E, D>, indices: TchTensor<i64, D>, ) -> TchTensor<E, D>

Gather elements from a tensor. Read more
§

fn float_scatter<const D: usize>( dim: usize, tensor: TchTensor<E, D>, indices: TchTensor<i64, D>, value: TchTensor<E, D>, ) -> TchTensor<E, D>

Scatter elements into a tensor. Read more
§

fn float_select<const D: usize>( tensor: TchTensor<E, D>, dim: usize, indices: TchTensor<i64, 1>, ) -> TchTensor<E, D>

Select tensor elements along the given dimension corresponding for the given indices. Read more
§

fn float_select_assign<const D: usize>( tensor: TchTensor<E, D>, dim: usize, indices: TchTensor<i64, 1>, value: TchTensor<E, D>, ) -> TchTensor<E, D>

Assign the selected elements along the given dimension corresponding for the given indices to the given value. Read more
§

fn float_slice<const D1: usize, const D2: usize>( tensor: TchTensor<E, D1>, ranges: [Range<usize>; D2], ) -> TchTensor<E, D1>

Select tensor elements corresponding for the given ranges. Read more
§

fn float_slice_assign<const D1: usize, const D2: usize>( tensor: TchTensor<E, D1>, ranges: [Range<usize>; D2], value: TchTensor<E, D1>, ) -> <LibTorch<E> as Backend>::FloatTensorPrimitive<D1>

Assign the selected elements corresponding for the given ranges to the given value. Read more
§

fn float_mask_where<const D: usize>( tensor: TchTensor<E, D>, mask: TchTensor<bool, D>, value: TchTensor<E, D>, ) -> TchTensor<E, D>

Update the given tensor with the value tensor where the mask is true. Read more
§

fn float_mask_fill<const D: usize>( tensor: TchTensor<E, D>, mask: TchTensor<bool, D>, value: E, ) -> TchTensor<E, D>

Update the given tensor with the value where the mask is true. Read more
§

fn float_equal<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<bool, D>

Equal comparison of two tensors. Read more
§

fn float_equal_elem<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<bool, D>

Equal comparison of a tensor and a scalar. Read more
§

fn float_greater<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<bool, D>

Greater than comparison of two tensors. Read more
§

fn float_greater_elem<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<bool, D>

Greater than comparison of a tensor and a scalar. Read more
§

fn float_greater_equal<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<bool, D>

Greater than or equal comparison of two tensors. Read more
§

fn float_greater_equal_elem<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<bool, D>

Greater than or equal comparison of a tensor and a scalar. Read more
§

fn float_lower<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<bool, D>

Less than comparison of two tensors. Read more
§

fn float_lower_elem<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<bool, D>

Less than comparison of a tensor and a scalar. Read more
§

fn float_lower_equal<const D: usize>( lhs: TchTensor<E, D>, rhs: TchTensor<E, D>, ) -> TchTensor<bool, D>

Less than or equal comparison of two tensors. Read more
§

fn float_lower_equal_elem<const D: usize>( lhs: TchTensor<E, D>, rhs: E, ) -> TchTensor<bool, D>

Less than or equal comparison of a tensor and a scalar. Read more
§

fn float_mean<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, 1>

Mean of all elements in a tensor. Read more
§

fn float_sum<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, 1>

Sum of all elements in a tensor. Read more
§

fn float_sum_dim<const D: usize>( tensor: TchTensor<E, D>, dim: usize, ) -> TchTensor<E, D>

Sum of all elements in a tensor along a dimension. Read more
§

fn float_mean_dim<const D: usize>( tensor: TchTensor<E, D>, dim: usize, ) -> TchTensor<E, D>

Mean of all elements in a tensor along a dimension. Read more
§

fn float_prod<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, 1>

Product of all elements in a tensor. Read more
§

fn float_prod_dim<const D: usize>( tensor: TchTensor<E, D>, dim: usize, ) -> TchTensor<E, D>

Product of all elements in a tensor along a dimension. Read more
§

fn float_argmax<const D: usize>( tensor: TchTensor<E, D>, dim: usize, ) -> TchTensor<i64, D>

Gets the indices of the maximum elements of a tensor along an axis. Read more
§

fn float_argmin<const D: usize>( tensor: TchTensor<E, D>, dim: usize, ) -> TchTensor<i64, D>

Gets the indices of the minimum elements of a tensor along an axis. Read more
§

fn float_max_dim<const D: usize>( tensor: TchTensor<E, D>, dim: usize, ) -> TchTensor<E, D>

Gets the maximum elements of a tensor along an axis. Read more
§

fn float_max_dim_with_indices<const D: usize>( tensor: TchTensor<E, D>, dim: usize, ) -> (TchTensor<E, D>, TchTensor<i64, D>)

Gets the maximum elements of a tensor along an axis and their indices. Read more
§

fn float_min_dim<const D: usize>( tensor: TchTensor<E, D>, dim: usize, ) -> TchTensor<E, D>

Gets the minimum elements of a tensor along an axis. Read more
§

fn float_min_dim_with_indices<const D: usize>( tensor: TchTensor<E, D>, dim: usize, ) -> (TchTensor<E, D>, TchTensor<i64, D>)

Gets the minimum elements of a tensor along an axis and their indices. Read more
§

fn float_exp<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Returns a new tensor with exponential values. Read more
§

fn float_log<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Returns a new tensor with natural logarithm values. Read more
§

fn float_log1p<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Returns a new tensor with logarithm values of (1 + Xi). Read more
§

fn float_powf_scalar<const D: usize>( tensor: TchTensor<E, D>, value: f32, ) -> TchTensor<E, D>

Returns a new tensor with values raised to the power of float value. Read more
§

fn float_sqrt<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Returns a new tensor with square root values. Read more
§

fn float_abs<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Returns a new tensor with absolute values. Read more
§

fn float_cos<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Returns a new tensor with cosine values. Read more
§

fn float_sin<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Returns a new tensor with sine values. Read more
§

fn float_tanh<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Returns a new tensor with tangent values. Read more
§

fn float_erf<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<E, D>

Returns a new tensor with the error function values. Read more
§

fn float_cat<const D: usize>( tensors: Vec<TchTensor<E, D>>, dim: usize, ) -> TchTensor<E, D>

Concatenates tensors along a dimension. Read more
§

fn float_clamp_min<const D: usize>( tensor: TchTensor<E, D>, min: E, ) -> <LibTorch<E> as Backend>::FloatTensorPrimitive<D>

Clamps a tensor under a minimum value. Read more
§

fn float_clamp_max<const D: usize>( tensor: <LibTorch<E> as Backend>::FloatTensorPrimitive<D>, max: <LibTorch<E> as Backend>::FloatElem, ) -> <LibTorch<E> as Backend>::FloatTensorPrimitive<D>

Clamps a tensor over a maximum value. Read more
§

fn float_clamp<const D: usize>( tensor: <LibTorch<E> as Backend>::FloatTensorPrimitive<D>, min: <LibTorch<E> as Backend>::FloatElem, max: <LibTorch<E> as Backend>::FloatElem, ) -> <LibTorch<E> as Backend>::FloatTensorPrimitive<D>

Clamps a tensor between a minimum and maximum value. Read more
§

fn float_into_int<const D: usize>(tensor: TchTensor<E, D>) -> TchTensor<i64, D>

Converts float tensor to int tensor. Read more
§

fn float_narrow<const D: usize>( tensor: TchTensor<E, D>, dim: usize, start: usize, length: usize, ) -> TchTensor<E, D>

Returns a new tensor with the given dimension narrowed to the given range. Read more
§

fn float_chunk<const D: usize>( tensor: TchTensor<E, D>, chunks: usize, dim: usize, ) -> Vec<TchTensor<E, D>>

Split the tensor along the given dimension into chunks. Read more
§

fn float_powf<const D: usize>( lhs: <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D>, rhs: <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D>, ) -> <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D>

Element-wise power with a FloatTensor. Read more
§

fn float_permute<const D: usize>( tensor: <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D>, axes: [usize; D], ) -> <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D>

Permutes the dimensions of a tensor. Read more
§

fn float_flip<const D: usize>( tensor: <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D>, axes: &[usize], ) -> <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D>

Reverse the order of elements in a tensor along the given axes. Read more
§

fn float_sign<const D: usize>( tensor: <LibTorch<E> as Backend>::FloatTensorPrimitive<D>, ) -> <LibTorch<E> as Backend>::FloatTensorPrimitive<D>

Returns the signs of the float tensor. Read more
§

fn float_expand<const D1: usize, const D2: usize>( tensor: <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D1>, shape: Shape<D2>, ) -> <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D2>

Broadcasts the float tensor to the given shape.
§

fn float_sort<const D: usize>( tensor: <LibTorch<E> as Backend>::FloatTensorPrimitive<D>, dim: usize, descending: bool, ) -> <LibTorch<E> as Backend>::FloatTensorPrimitive<D>

Sort the elements of the input tensor by value in along a given dimension. Read more
§

fn float_argsort<const D: usize>( tensor: <LibTorch<E> as Backend>::FloatTensorPrimitive<D>, dim: usize, descending: bool, ) -> <LibTorch<E> as Backend>::IntTensorPrimitive<D>

Returns the indices that sort the elements of the input tensor by value along a given dimension. Read more
§

fn float_full<const D: usize>( shape: Shape<D>, fill_value: <B as Backend>::FloatElem, device: &<B as Backend>::Device, ) -> <B as Backend>::FloatTensorPrimitive<D>

Creates a tensor filled with given value. Read more
§

fn float_transpose<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>

Transposes a tensor. Read more
§

fn float_not_equal<const D: usize>( lhs: <B as Backend>::FloatTensorPrimitive<D>, rhs: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<D>

Element-wise non-equality comparison. Read more
§

fn float_not_equal_elem<const D: usize>( lhs: <B as Backend>::FloatTensorPrimitive<D>, rhs: <B as Backend>::FloatElem, ) -> <B as Backend>::BoolTensorPrimitive<D>

Element-wise non-equality comparison with a scalar. Read more
§

fn float_detach<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>

Detaches a tensor from the computation graph.
§

fn float_set_require_grad<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, _require_grad: bool, ) -> <B as Backend>::FloatTensorPrimitive<D>

Sets the require_grad flag of a tensor.
§

fn float_is_require_grad<const D: usize>( _tensor: &<B as Backend>::FloatTensorPrimitive<D>, ) -> bool

Returns the require_grad flag of a tensor.
§

fn float_into_full_precision<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <<<B as Backend>::FullPrecisionBridge as BackendBridge<B>>::Target as Backend>::FloatTensorPrimitive<D>

Converts a tensor to full precision. Read more
§

fn float_from_full_precision<const D: usize>( tensor: <<<B as Backend>::FullPrecisionBridge as BackendBridge<B>>::Target as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>

Converts a tensor from full precision. Read more
§

fn float_powi<const D: usize>( lhs: <B as Backend>::FloatTensorPrimitive<D>, rhs: <B as Backend>::IntTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>

Element-wise power with an IntTensor. Read more
§

fn float_powi_scalar<const D: usize>( lhs: <B as Backend>::FloatTensorPrimitive<D>, rhs: <B as Backend>::IntElem, ) -> <B as Backend>::FloatTensorPrimitive<D>

raises a tensor to the power of an int scalar. Read more
§

fn float_max<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<1>

Gets the maximum element of a tensor. Read more
§

fn float_min<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<1>

Gets the minimum element of a tensor. Read more
§

fn float_any<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<1>

Tests if any element in the float tensor evaluates to True. Read more
§

fn float_any_dim<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, dim: usize, ) -> <B as Backend>::BoolTensorPrimitive<D>

Tests if any element in the float tensor evaluates to True along a given dimension dim. Read more
§

fn float_all<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<1>

Tests if all elements in the float tensor evaluate to True. Read more
§

fn float_all_dim<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, dim: usize, ) -> <B as Backend>::BoolTensorPrimitive<D>

Tests if all elements in the float tensor evaluate to True along a given dimension dim. Read more
§

fn float_sort_with_indices<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, dim: usize, descending: bool, ) -> (<B as Backend>::FloatTensorPrimitive<D>, <B as Backend>::IntTensorPrimitive<D>)

Sort the elements of the input tensor by value in along a given dimension. Read more
§

impl<E, Q> IntTensorOps<LibTorch<E, Q>> for LibTorch<E, Q>
where E: TchElement, Q: QuantElement,

§

fn int_from_data<const D: usize>( data: TensorData, device: &LibTorchDevice, ) -> TchTensor<i64, D>

Creates a tensor from the data structure. Read more
§

fn int_shape<const D: usize>(tensor: &TchTensor<i64, D>) -> Shape<D>

Returns the shape of the tensor. Read more
§

fn int_repeat_dim<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, times: usize, ) -> TchTensor<i64, D>

Repeats the tensor along the given dimension the given number of times. Read more
§

async fn int_into_data<const D: usize>(tensor: TchTensor<i64, D>) -> TensorData

Converts the tensor to a data structure. Read more
§

fn int_to_device<const D: usize>( tensor: TchTensor<i64, D>, device: &LibTorchDevice, ) -> TchTensor<i64, D>

Moves the tensor to the given device.
§

fn int_reshape<const D1: usize, const D2: usize>( tensor: TchTensor<i64, D1>, shape: Shape<D2>, ) -> TchTensor<i64, D2>

Reshapes the tensor. Read more
§

fn int_device<const D: usize>(tensor: &TchTensor<i64, D>) -> LibTorchDevice

Gets the device of the tensor. Read more
§

fn int_empty<const D: usize>( shape: Shape<D>, device: &<LibTorch<E> as Backend>::Device, ) -> TchTensor<i64, D>

Creates a new int tensor. Read more
§

fn int_slice<const D1: usize, const D2: usize>( tensor: TchTensor<i64, D1>, ranges: [Range<usize>; D2], ) -> TchTensor<i64, D1>

Gets the element at the given indices. Read more
§

fn int_slice_assign<const D1: usize, const D2: usize>( tensor: TchTensor<i64, D1>, ranges: [Range<usize>; D2], value: TchTensor<i64, D1>, ) -> TchTensor<i64, D1>

Sets the element at the given indices. Read more
§

fn int_cat<const D: usize>( tensors: Vec<TchTensor<i64, D>>, dim: usize, ) -> TchTensor<i64, D>

Concatenates the given tensors along the given dimension. Read more
§

fn int_equal<const D: usize>( lhs: TchTensor<i64, D>, rhs: TchTensor<i64, D>, ) -> TchTensor<bool, D>

Element-wise equality comparison. Read more
§

fn int_equal_elem<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<bool, D>

Element-wise equality comparison with a scalar. Read more
§

fn int_greater<const D: usize>( lhs: TchTensor<i64, D>, rhs: TchTensor<i64, D>, ) -> TchTensor<bool, D>

Element-wise greater than comparison. Read more
§

fn int_greater_elem<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<bool, D>

Element-wise greater than comparison with a scalar. Read more
§

fn int_greater_equal<const D: usize>( lhs: TchTensor<i64, D>, rhs: TchTensor<i64, D>, ) -> TchTensor<bool, D>

Element-wise greater than or equal comparison. Read more
§

fn int_greater_equal_elem<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<bool, D>

Element-wise greater than or equal comparison with a scalar. Read more
§

fn int_lower<const D: usize>( lhs: TchTensor<i64, D>, rhs: TchTensor<i64, D>, ) -> TchTensor<bool, D>

Element-wise less than comparison. Read more
§

fn int_lower_elem<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<bool, D>

Element-wise less than comparison with a scalar. Read more
§

fn int_lower_equal<const D: usize>( lhs: TchTensor<i64, D>, rhs: TchTensor<i64, D>, ) -> TchTensor<bool, D>

Element-wise less than or equal comparison. Read more
§

fn int_lower_equal_elem<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<bool, D>

Element-wise less than or equal comparison with a scalar. Read more
§

fn int_add<const D: usize>( lhs: TchTensor<i64, D>, rhs: TchTensor<i64, D>, ) -> TchTensor<i64, D>

Element-wise addition. Read more
§

fn int_add_scalar<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<i64, D>

Element-wise addition with a scalar. Read more
§

fn int_sub<const D: usize>( lhs: TchTensor<i64, D>, rhs: TchTensor<i64, D>, ) -> TchTensor<i64, D>

Element-wise subtraction. Read more
§

fn int_sub_scalar<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<i64, D>

Element-wise subtraction with a scalar. Read more
§

fn int_mul<const D: usize>( lhs: TchTensor<i64, D>, rhs: TchTensor<i64, D>, ) -> TchTensor<i64, D>

Element-wise multiplication. Read more
§

fn int_mul_scalar<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<i64, D>

Element-wise multiplication with a scalar. Read more
§

fn int_div<const D: usize>( lhs: TchTensor<i64, D>, rhs: TchTensor<i64, D>, ) -> TchTensor<i64, D>

Element-wise division. Read more
§

fn int_div_scalar<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<i64, D>

Element-wise division with a scalar. Read more
§

fn int_remainder_scalar<const D: usize>( lhs: TchTensor<i64, D>, rhs: i64, ) -> TchTensor<i64, D>

Element-wise modulus with a scalar. Read more
§

fn int_neg<const D: usize>(tensor: TchTensor<i64, D>) -> TchTensor<i64, D>

Element-wise negation. Read more
§

fn int_zeros<const D: usize>( shape: Shape<D>, device: &<LibTorch<E> as Backend>::Device, ) -> TchTensor<i64, D>

Creates a tensor of zeros. Read more
§

fn int_ones<const D: usize>( shape: Shape<D>, device: &<LibTorch<E> as Backend>::Device, ) -> TchTensor<i64, D>

Creates a tensor of ones. Read more
§

fn int_full<const D: usize>( shape: Shape<D>, fill_value: i64, device: &<LibTorch<E> as Backend>::Device, ) -> TchTensor<i64, D>

Creates a tensor filled with given value. Read more
§

fn int_sum<const D: usize>(tensor: TchTensor<i64, D>) -> TchTensor<i64, 1>

Sums all elements in the tensor. Read more
§

fn int_sum_dim<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, ) -> TchTensor<i64, D>

Sums all elements in the tensor along a dimension. Read more
§

fn int_prod<const D: usize>(tensor: TchTensor<i64, D>) -> TchTensor<i64, 1>

Computes the product of all elements in the tensor. Read more
§

fn int_prod_dim<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, ) -> TchTensor<i64, D>

Computes the product of all elements in the tensor along a dimension. Read more
§

fn int_mean<const D: usize>(tensor: TchTensor<i64, D>) -> TchTensor<i64, 1>

Computes the mean of all elements in the tensor. Read more
§

fn int_mean_dim<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, ) -> TchTensor<i64, D>

Computes the mean of all elements in the tensor along a dimension. Read more
§

fn int_gather<const D: usize>( dim: usize, tensor: TchTensor<i64, D>, indices: TchTensor<i64, D>, ) -> TchTensor<i64, D>

Gather elements from the tensor at the given indices. Read more
§

fn int_scatter<const D: usize>( dim: usize, tensor: TchTensor<i64, D>, indices: TchTensor<i64, D>, value: TchTensor<i64, D>, ) -> TchTensor<i64, D>

Scatter a given value to the tensor at the given indices. Read more
§

fn int_select<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, indices: TchTensor<i64, 1>, ) -> TchTensor<i64, D>

Select tensor elements along the given dimension corresponding to the given indices. Read more
§

fn int_select_assign<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, indices: TchTensor<i64, 1>, value: TchTensor<i64, D>, ) -> TchTensor<i64, D>

Assign the selected elements along the given dimension corresponding to the given indices to the given value. Read more
§

fn int_mask_where<const D: usize>( tensor: TchTensor<i64, D>, mask: TchTensor<bool, D>, source: TchTensor<i64, D>, ) -> TchTensor<i64, D>

Fills the tensor with values from the source tensor if the mask is true at the given indices. Read more
§

fn int_mask_fill<const D: usize>( tensor: TchTensor<i64, D>, mask: TchTensor<bool, D>, value: i64, ) -> TchTensor<i64, D>

Fills the tensor with the given value if the mask is true at the given indices. Read more
§

fn int_argmax<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, ) -> TchTensor<i64, D>

Gets the indices of the maximum elements along a dimension. Read more
§

fn int_argmin<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, ) -> TchTensor<i64, D>

Gets the indices of the minimum elements along a dimension. Read more
§

fn int_max_dim<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, ) -> TchTensor<i64, D>

Gets the maximum element in the tensor along a dimension. Read more
§

fn int_max_dim_with_indices<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, ) -> (TchTensor<i64, D>, TchTensor<i64, D>)

Gets the maximum elements and corresponding indices along a dimension. Read more
§

fn int_min_dim<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, ) -> TchTensor<i64, D>

Gets the minimum elements in the tensor along a dimension. Read more
§

fn int_min_dim_with_indices<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, ) -> (TchTensor<i64, D>, TchTensor<i64, D>)

Gets the minimum elements and corresponding indices along a dimension. Read more
§

fn int_clamp_min<const D: usize>( tensor: TchTensor<i64, D>, min: i64, ) -> TchTensor<i64, D>

Clamps a tensor under a minimum value. Read more
§

fn int_clamp_max<const D: usize>( tensor: TchTensor<i64, D>, max: i64, ) -> TchTensor<i64, D>

Clamps a tensor over a maximum value. Read more
§

fn int_clamp<const D: usize>( tensor: TchTensor<i64, D>, min: i64, max: i64, ) -> TchTensor<i64, D>

Clamps a tensor between a minimum and maximum value. Read more
§

fn int_abs<const D: usize>(tensor: TchTensor<i64, D>) -> TchTensor<i64, D>

Returns a new tensor with absolute values. Read more
§

fn int_into_float<const D: usize>(tensor: TchTensor<i64, D>) -> TchTensor<E, D>

Converts int tensor to float tensor. Read more
§

fn int_swap_dims<const D: usize>( tensor: <LibTorch<E> as Backend>::IntTensorPrimitive<D>, dim1: usize, dim2: usize, ) -> <LibTorch<E> as Backend>::IntTensorPrimitive<D>

Swaps two dimensions of an int tensor. Read more
§

fn int_narrow<const D: usize>( tensor: TchTensor<i64, D>, dim: usize, start: usize, length: usize, ) -> TchTensor<i64, D>

Returns a new tensor with the given dimension narrowed to the given range. Read more
§

fn int_chunk<const D: usize>( tensor: TchTensor<i64, D>, chunks: usize, dim: usize, ) -> Vec<TchTensor<i64, D>>

Split the tensor along the given dimension into chunks. Read more
§

fn int_random<const D: usize>( shape: Shape<D>, distribution: Distribution, device: &LibTorchDevice, ) -> TchTensor<i64, D>

Creates a new int tensor with random values. Read more
§

fn int_arange(range: Range<i64>, device: &LibTorchDevice) -> TchTensor<i64, 1>

Creates a new tensor with values from the given range. Read more
§

fn int_permute<const D: usize>( tensor: <LibTorch<E, Q> as Backend>::IntTensorPrimitive<D>, axes: [usize; D], ) -> <LibTorch<E, Q> as Backend>::IntTensorPrimitive<D>

Permutes the dimensions of a tensor. Read more
§

fn int_flip<const D: usize>( tensor: <LibTorch<E, Q> as Backend>::IntTensorPrimitive<D>, axes: &[usize], ) -> <LibTorch<E, Q> as Backend>::IntTensorPrimitive<D>

Reverse the order of elements in a tensor along the given axes. Read more
§

fn int_sign<const D: usize>( tensor: <LibTorch<E> as Backend>::IntTensorPrimitive<D>, ) -> <LibTorch<E> as Backend>::IntTensorPrimitive<D>

Returns the signs of the int tensor. Read more
§

fn int_expand<const D1: usize, const D2: usize>( tensor: <LibTorch<E, Q> as Backend>::IntTensorPrimitive<D1>, shape: Shape<D2>, ) -> <LibTorch<E, Q> as Backend>::IntTensorPrimitive<D2>

Broadcasts the int tensor to the given shape.
§

fn int_sort<const D: usize>( tensor: <LibTorch<E> as Backend>::IntTensorPrimitive<D>, dim: usize, descending: bool, ) -> <LibTorch<E> as Backend>::IntTensorPrimitive<D>

Sort the elements of the input tensor by value along a given dimension. Read more
§

fn int_argsort<const D: usize>( tensor: <LibTorch<E> as Backend>::IntTensorPrimitive<D>, dim: usize, descending: bool, ) -> <LibTorch<E> as Backend>::IntTensorPrimitive<D>

Returns the indices that sort the elements of the input tensor by value along a given dimension. Read more
§

fn int_not_equal<const D: usize>( lhs: <B as Backend>::IntTensorPrimitive<D>, rhs: <B as Backend>::IntTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<D>

Element-wise non-equality comparison. Read more
§

fn int_not_equal_elem<const D: usize>( lhs: <B as Backend>::IntTensorPrimitive<D>, rhs: <B as Backend>::IntElem, ) -> <B as Backend>::BoolTensorPrimitive<D>

Element-wise non-equality comparison with a scalar. Read more
§

fn int_powi<const D: usize>( lhs: <B as Backend>::IntTensorPrimitive<D>, rhs: <B as Backend>::IntTensorPrimitive<D>, ) -> <B as Backend>::IntTensorPrimitive<D>

Element-wise power with a IntTensor. Read more
§

fn int_powf<const D: usize>( lhs: <B as Backend>::IntTensorPrimitive<D>, rhs: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::IntTensorPrimitive<D>

Element-wise power with a floatTensor. Read more
§

fn int_powi_scalar<const D: usize>( lhs: <B as Backend>::IntTensorPrimitive<D>, rhs: <B as Backend>::IntElem, ) -> <B as Backend>::IntTensorPrimitive<D>

Element-wise power with a scalar. Read more
§

fn int_powf_scalar<const D: usize>( lhs: <B as Backend>::IntTensorPrimitive<D>, rhs: f32, ) -> <B as Backend>::IntTensorPrimitive<D>

Element-wise power with a floatTensor. Read more
§

fn int_max<const D: usize>( tensor: <B as Backend>::IntTensorPrimitive<D>, ) -> <B as Backend>::IntTensorPrimitive<1>

Gets the maximum element in the tensor. Read more
§

fn int_min<const D: usize>( tensor: <B as Backend>::IntTensorPrimitive<D>, ) -> <B as Backend>::IntTensorPrimitive<1>

Gets the minimum element in the tensor. Read more
§

fn int_transpose<const D: usize>( tensor: <B as Backend>::IntTensorPrimitive<D>, ) -> <B as Backend>::IntTensorPrimitive<D>

Transposes an int tensor. Read more
§

fn int_cartesian_grid<S, const D: usize, const D2: usize>( shape: S, device: &<B as Backend>::Device, ) -> <B as Backend>::IntTensorPrimitive<D2>
where S: Into<Shape<D>>,

Generates a cartesian grid for the given tensor shape on the specified device. The generated tensor is of dimension D2 = D + 1, where each element at dimension D contains the cartesian grid coordinates for that element. Read more
§

fn int_arange_step( range: Range<i64>, step: usize, device: &<B as Backend>::Device, ) -> <B as Backend>::IntTensorPrimitive<1>

Creates a new tensor with values from the given range with the given step size. Read more
§

fn int_any<const D: usize>( tensor: <B as Backend>::IntTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<1>

Tests if any element in the int tensor evaluates to True. Read more
§

fn int_any_dim<const D: usize>( tensor: <B as Backend>::IntTensorPrimitive<D>, dim: usize, ) -> <B as Backend>::BoolTensorPrimitive<D>

Tests if any element in the int tensor evaluates to True along a given dimension dim. Read more
§

fn int_all<const D: usize>( tensor: <B as Backend>::IntTensorPrimitive<D>, ) -> <B as Backend>::BoolTensorPrimitive<1>

Tests if all elements in the int tensor evaluate to True. Read more
§

fn int_all_dim<const D: usize>( tensor: <B as Backend>::IntTensorPrimitive<D>, dim: usize, ) -> <B as Backend>::BoolTensorPrimitive<D>

Tests if all elements in the int tensor evaluate to True along a given dimension dim. Read more
§

fn int_sort_with_indices<const D: usize>( tensor: <B as Backend>::IntTensorPrimitive<D>, dim: usize, descending: bool, ) -> (<B as Backend>::IntTensorPrimitive<D>, <B as Backend>::IntTensorPrimitive<D>)

Sort the elements of the input tensor by value along a given dimension. Read more
§

impl<E, Q> ModuleOps<LibTorch<E, Q>> for LibTorch<E, Q>
where E: TchElement, Q: QuantElement,

§

fn embedding( weights: TchTensor<E, 2>, indices: TchTensor<i64, 2>, ) -> TchTensor<E, 3>

Embedding operation. Read more
§

fn embedding_backward( weights: TchTensor<E, 2>, output: TchTensor<E, 3>, indices: TchTensor<i64, 2>, ) -> TchTensor<E, 2>

Embedding backward operation. Read more
§

fn conv1d( x: TchTensor<E, 3>, weight: TchTensor<E, 3>, bias: Option<TchTensor<E, 1>>, options: ConvOptions<1>, ) -> TchTensor<E, 3>

One dimensional convolution. Read more
§

fn conv2d( x: TchTensor<E, 4>, weight: TchTensor<E, 4>, bias: Option<TchTensor<E, 1>>, options: ConvOptions<2>, ) -> TchTensor<E, 4>

Two dimensional convolution. Read more
§

fn conv3d( x: TchTensor<E, 5>, weight: TchTensor<E, 5>, bias: Option<TchTensor<E, 1>>, options: ConvOptions<3>, ) -> TchTensor<E, 5>

Three dimensional convolution. Read more
§

fn conv_transpose1d( x: TchTensor<E, 3>, weight: TchTensor<E, 3>, bias: Option<TchTensor<E, 1>>, options: ConvTransposeOptions<1>, ) -> TchTensor<E, 3>

One dimensional transposed convolution. Read more
§

fn conv_transpose2d( x: TchTensor<E, 4>, weight: TchTensor<E, 4>, bias: Option<TchTensor<E, 1>>, options: ConvTransposeOptions<2>, ) -> TchTensor<E, 4>

Two dimensional transposed convolution. Read more
§

fn conv_transpose3d( x: TchTensor<E, 5>, weight: TchTensor<E, 5>, bias: Option<TchTensor<E, 1>>, options: ConvTransposeOptions<3>, ) -> TchTensor<E, 5>

Three dimensional transposed convolution. Read more
§

fn avg_pool1d( x: TchTensor<E, 3>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> TchTensor<E, 3>

One dimensional avg pooling. Read more
§

fn avg_pool2d( x: TchTensor<E, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> TchTensor<E, 4>

Two dimensional avg pooling. Read more
§

fn avg_pool2d_backward( x: TchTensor<E, 4>, grad: TchTensor<E, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], count_include_pad: bool, ) -> TchTensor<E, 4>

Backward pass for the avg pooling 2d operation.
§

fn max_pool1d( x: TchTensor<E, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> TchTensor<E, 3>

One dimensional max pooling. Read more
§

fn max_pool1d_with_indices( x: TchTensor<E, 3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, ) -> MaxPool1dWithIndices<LibTorch<E, Q>>

One dimensional max pooling with indices. Read more
§

fn max_pool2d( x: TchTensor<E, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> TchTensor<E, 4>

Two dimensional max pooling. Read more
§

fn max_pool2d_with_indices( x: TchTensor<E, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], ) -> MaxPool2dWithIndices<LibTorch<E, Q>>

Two dimensional max pooling with indices. Read more
§

fn max_pool2d_with_indices_backward( x: TchTensor<E, 4>, kernel_size: [usize; 2], stride: [usize; 2], padding: [usize; 2], dilation: [usize; 2], output_grad: TchTensor<E, 4>, indices: TchTensor<i64, 4>, ) -> MaxPool2dBackward<LibTorch<E, Q>>

Backward pass for the max pooling 2d operation.
§

fn adaptive_avg_pool2d( x: TchTensor<E, 4>, output_size: [usize; 2], ) -> TchTensor<E, 4>

Two dimensional adaptive avg pooling. Read more
§

fn adaptive_avg_pool2d_backward( x: TchTensor<E, 4>, grad: TchTensor<E, 4>, ) -> TchTensor<E, 4>

Backward pass for the adaptive avg pooling 2d operation.
§

fn adaptive_avg_pool1d( x: TchTensor<E, 3>, output_size: usize, ) -> TchTensor<E, 3>

One dimensional adaptive avg pooling. Read more
§

fn interpolate( x: TchTensor<E, 4>, output_size: [usize; 2], options: InterpolateOptions, ) -> TchTensor<E, 4>

Down/up samples the input. Read more
§

fn interpolate_backward( x: TchTensor<E, 4>, grad: TchTensor<E, 4>, output_size: [usize; 2], options: InterpolateOptions, ) -> TchTensor<E, 4>

Backward pass for the interpolate operation.
§

fn conv1d_backward( x: <B as Backend>::FloatTensorPrimitive<3>, weight: <B as Backend>::FloatTensorPrimitive<3>, bias: Option<<B as Backend>::FloatTensorPrimitive<1>>, output_grad: <B as Backend>::FloatTensorPrimitive<3>, options: ConvOptions<1>, ) -> Conv1dBackward<B>

Backward pass for the conv1d operation.
§

fn conv2d_backward( x: <B as Backend>::FloatTensorPrimitive<4>, weight: <B as Backend>::FloatTensorPrimitive<4>, bias: Option<<B as Backend>::FloatTensorPrimitive<1>>, output_grad: <B as Backend>::FloatTensorPrimitive<4>, options: ConvOptions<2>, ) -> Conv2dBackward<B>

Backward pass for the conv2d operation.
§

fn conv3d_backward( x: <B as Backend>::FloatTensorPrimitive<5>, weight: <B as Backend>::FloatTensorPrimitive<5>, bias: Option<<B as Backend>::FloatTensorPrimitive<1>>, output_grad: <B as Backend>::FloatTensorPrimitive<5>, options: ConvOptions<3>, ) -> Conv3dBackward<B>

Backward pass for the conv3d operation.
§

fn conv_transpose1d_backward( x: <B as Backend>::FloatTensorPrimitive<3>, weight: <B as Backend>::FloatTensorPrimitive<3>, bias: Option<<B as Backend>::FloatTensorPrimitive<1>>, output_grad: <B as Backend>::FloatTensorPrimitive<3>, options: ConvTransposeOptions<1>, ) -> Conv1dBackward<B>

Backward pass for the conv transpose 1d operation.
§

fn conv_transpose2d_backward( x: <B as Backend>::FloatTensorPrimitive<4>, weight: <B as Backend>::FloatTensorPrimitive<4>, bias: Option<<B as Backend>::FloatTensorPrimitive<1>>, output_grad: <B as Backend>::FloatTensorPrimitive<4>, options: ConvTransposeOptions<2>, ) -> Conv2dBackward<B>

Backward pass for the conv transpose 2d operation.
§

fn conv_transpose3d_backward( x: <B as Backend>::FloatTensorPrimitive<5>, weight: <B as Backend>::FloatTensorPrimitive<5>, bias: Option<<B as Backend>::FloatTensorPrimitive<1>>, output_grad: <B as Backend>::FloatTensorPrimitive<5>, options: ConvTransposeOptions<3>, ) -> Conv3dBackward<B>

Backward pass for the conv transpose 3d operation.
§

fn unfold4d( x: <B as Backend>::FloatTensorPrimitive<4>, kernel_size: [usize; 2], options: UnfoldOptions, ) -> <B as Backend>::FloatTensorPrimitive<3>

Four-dimensional unfolding. Read more
§

fn avg_pool1d_backward( x: <B as Backend>::FloatTensorPrimitive<3>, grad: <B as Backend>::FloatTensorPrimitive<3>, kernel_size: usize, stride: usize, padding: usize, count_include_pad: bool, ) -> <B as Backend>::FloatTensorPrimitive<3>

Backward pass for the avg pooling 1d operation.
§

fn adaptive_avg_pool1d_backward( x: <B as Backend>::FloatTensorPrimitive<3>, grad: <B as Backend>::FloatTensorPrimitive<3>, ) -> <B as Backend>::FloatTensorPrimitive<3>

Backward pass for the adaptive avg pooling 1d operation.
§

fn max_pool1d_with_indices_backward( x: <B as Backend>::FloatTensorPrimitive<3>, kernel_size: usize, stride: usize, padding: usize, dilation: usize, output_grad: <B as Backend>::FloatTensorPrimitive<3>, indices: <B as Backend>::IntTensorPrimitive<3>, ) -> MaxPool1dBackward<B>

Backward pass for the max pooling 1d operation.
§

impl<E, Q> QTensorOps<LibTorch<E, Q>> for LibTorch<E, Q>
where E: TchElement, Q: QuantElement,

§

fn q_from_data<const D: usize>( data: TensorData, device: &LibTorchDevice, ) -> <LibTorch<E, Q> as Backend>::QuantizedTensorPrimitive<D>

Creates a new tensor from the data structure. Read more
§

fn quantize<const D: usize>( tensor: <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D>, scheme: &QuantizationScheme, qparams: QuantizationParametersPrimitive<LibTorch<E, Q>>, ) -> <LibTorch<E, Q> as Backend>::QuantizedTensorPrimitive<D>

Convert the tensor to a lower precision data type based on the quantization scheme and parameters.
§

fn dequantize<const D: usize>( tensor: <LibTorch<E, Q> as Backend>::QuantizedTensorPrimitive<D>, ) -> <LibTorch<E, Q> as Backend>::FloatTensorPrimitive<D>

Convert the tensor back to a higher precision data type.
§

fn q_shape<const D: usize>( tensor: &<LibTorch<E, Q> as Backend>::QuantizedTensorPrimitive<D>, ) -> Shape<D>

Gets the shape of the tensor. Read more
§

fn q_device<const D: usize>( tensor: &<LibTorch<E, Q> as Backend>::QuantizedTensorPrimitive<D>, ) -> LibTorchDevice

Gets the device of the tensor. Read more
§

fn q_reshape<const D1: usize, const D2: usize>( tensor: <LibTorch<E, Q> as Backend>::QuantizedTensorPrimitive<D1>, shape: Shape<D2>, ) -> <LibTorch<E, Q> as Backend>::QuantizedTensorPrimitive<D2>

Reshapes a tensor. Read more
§

async fn q_into_data<const D: usize>( tensor: <LibTorch<E, Q> as Backend>::QuantizedTensorPrimitive<D>, ) -> TensorData

Converts the tensor to a data structure. Read more
§

fn q_set_require_grad<const D: usize>( tensor: <B as Backend>::QuantizedTensorPrimitive<D>, _require_grad: bool, ) -> <B as Backend>::QuantizedTensorPrimitive<D>

Sets the require_grad flag of a tensor.
§

fn q_is_require_grad<const D: usize>( _tensor: &<B as Backend>::QuantizedTensorPrimitive<D>, ) -> bool

Returns the require_grad flag of a tensor.
§

impl<E, Q> Copy for LibTorch<E, Q>
where E: Copy, Q: Copy,

Auto Trait Implementations§

§

impl<E, Q> Freeze for LibTorch<E, Q>
where E: Freeze, Q: Freeze,

§

impl<E, Q> RefUnwindSafe for LibTorch<E, Q>

§

impl<E, Q> Send for LibTorch<E, Q>
where E: Send, Q: Send,

§

impl<E, Q> Sync for LibTorch<E, Q>
where E: Sync, Q: Sync,

§

impl<E, Q> Unpin for LibTorch<E, Q>
where E: Unpin, Q: Unpin,

§

impl<E, Q> UnwindSafe for LibTorch<E, Q>
where E: UnwindSafe, Q: UnwindSafe,

Blanket Implementations§

source§

impl<T> Any for T
where T: 'static + ?Sized,

source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
source§

impl<T> Borrow<T> for T
where T: ?Sized,

source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
source§

impl<T> CloneToUninit for T
where T: Copy,

source§

unsafe fn clone_to_uninit(&self, dst: *mut T)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
source§

impl<T> CloneToUninit for T
where T: Clone,

source§

default unsafe fn clone_to_uninit(&self, dst: *mut T)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dst. Read more
§

impl<T> Downcast<T> for T

§

fn downcast(&self) -> &T

source§

impl<T> From<T> for T

source§

fn from(t: T) -> T

Returns the argument unchanged.

§

impl<T> Instrument for T

§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided [Span], returning an Instrumented wrapper. Read more
§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
source§

impl<T, U> Into<U> for T
where U: From<T>,

source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

source§

impl<T> IntoEither for T

source§

fn into_either(self, into_left: bool) -> Either<Self, Self>

Converts self into a Left variant of Either<Self, Self> if into_left is true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
source§

fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
where F: FnOnce(&Self) -> bool,

Converts self into a Left variant of Either<Self, Self> if into_left(&self) returns true. Converts self into a Right variant of Either<Self, Self> otherwise. Read more
§

impl<T> Pointable for T

§

const ALIGN: usize = _

The alignment of pointer.
§

type Init = T

The type for initializers.
§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
source§

impl<R, P> ReadPrimitive<R> for P
where R: Read + ReadEndian<P>, P: Default,

source§

fn read_from_little_endian(read: &mut R) -> Result<Self, Error>

Read this value from the supplied reader. Same as ReadEndian::read_from_little_endian().
source§

fn read_from_big_endian(read: &mut R) -> Result<Self, Error>

Read this value from the supplied reader. Same as ReadEndian::read_from_big_endian().
source§

fn read_from_native_endian(read: &mut R) -> Result<Self, Error>

Read this value from the supplied reader. Same as ReadEndian::read_from_native_endian().
source§

impl<T> Same for T

§

type Output = T

Should always be Self
source§

impl<T> ToOwned for T
where T: Clone,

§

type Owned = T

The resulting type after obtaining ownership.
source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

§

type Error = Infallible

The type returned in the event of a conversion error.
source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<T> Upcast<T> for T

§

fn upcast(&self) -> Option<&T>

§

impl<V, T> VZip<V> for T
where V: MultiLane<T>,

§

fn vzip(self) -> V

§

impl<T> WithSubscriber for T

§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a [WithDispatch] wrapper. Read more
§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a [WithDispatch] wrapper. Read more
§

impl<T> ErasedDestructor for T
where T: 'static,

§

impl<T> WasmNotSend for T
where T: Send,

§

impl<T> WasmNotSendSync for T
where T: WasmNotSend + WasmNotSync,

§

impl<T> WasmNotSync for T
where T: Sync,