Trait burn::tensor::ops::QTensorOps

pub trait QTensorOps<B>
where B: Backend,
{ // Required methods fn q_from_data<const D: usize>( data: TensorData, device: &<B as Backend>::Device, ) -> <B as Backend>::QuantizedTensorPrimitive<D>; fn quantize<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, scheme: &QuantizationScheme, qparams: QuantizationParametersPrimitive<B>, ) -> <B as Backend>::QuantizedTensorPrimitive<D>; fn dequantize<const D: usize>( tensor: <B as Backend>::QuantizedTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>; fn q_shape<const D: usize>( tensor: &<B as Backend>::QuantizedTensorPrimitive<D>, ) -> Shape<D>; fn q_device<const D: usize>( tensor: &<B as Backend>::QuantizedTensorPrimitive<D>, ) -> <B as Backend>::Device; fn q_reshape<const D1: usize, const D2: usize>( tensor: <B as Backend>::QuantizedTensorPrimitive<D1>, shape: Shape<D2>, ) -> <B as Backend>::QuantizedTensorPrimitive<D2>; fn q_into_data<const D: usize>( tensor: <B as Backend>::QuantizedTensorPrimitive<D>, ) -> impl Future<Output = TensorData> + Send; // Provided methods fn q_set_require_grad<const D: usize>( tensor: <B as Backend>::QuantizedTensorPrimitive<D>, _require_grad: bool, ) -> <B as Backend>::QuantizedTensorPrimitive<D> { ... } fn q_is_require_grad<const D: usize>( _tensor: &<B as Backend>::QuantizedTensorPrimitive<D>, ) -> bool { ... } }
Expand description

Quantized Tensor API for basic operations, see tensor for documentation on each function.

Required Methods§

fn q_from_data<const D: usize>( data: TensorData, device: &<B as Backend>::Device, ) -> <B as Backend>::QuantizedTensorPrimitive<D>

Creates a new tensor from the data structure.

§Arguments
  • data - The data structure.
  • device - The device to create the tensor on.
§Returns

The tensor with the given data.

fn quantize<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, scheme: &QuantizationScheme, qparams: QuantizationParametersPrimitive<B>, ) -> <B as Backend>::QuantizedTensorPrimitive<D>

Convert the tensor to a lower precision data type based on the quantization scheme and parameters.

fn dequantize<const D: usize>( tensor: <B as Backend>::QuantizedTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>

Convert the tensor back to a higher precision data type.

fn q_shape<const D: usize>( tensor: &<B as Backend>::QuantizedTensorPrimitive<D>, ) -> Shape<D>

Gets the shape of the tensor.

§Arguments
  • tensor - The tensor.
§Returns

The shape of the tensor.

fn q_device<const D: usize>( tensor: &<B as Backend>::QuantizedTensorPrimitive<D>, ) -> <B as Backend>::Device

Gets the device of the tensor.

§Arguments
  • tensor - The tensor.
§Returns

The device of the tensor.

fn q_reshape<const D1: usize, const D2: usize>( tensor: <B as Backend>::QuantizedTensorPrimitive<D1>, shape: Shape<D2>, ) -> <B as Backend>::QuantizedTensorPrimitive<D2>

Reshapes a tensor.

§Arguments
  • tensor - The tensor to reshape.
  • shape - The new shape of the tensor.
§Returns

The tensor with the new shape.

fn q_into_data<const D: usize>( tensor: <B as Backend>::QuantizedTensorPrimitive<D>, ) -> impl Future<Output = TensorData> + Send

Converts the tensor to a data structure.

§Arguments
  • tensor - The tensor.
§Returns

The data structure with the tensor’s data.

Provided Methods§

fn q_set_require_grad<const D: usize>( tensor: <B as Backend>::QuantizedTensorPrimitive<D>, _require_grad: bool, ) -> <B as Backend>::QuantizedTensorPrimitive<D>

Sets the require_grad flag of a tensor.

fn q_is_require_grad<const D: usize>( _tensor: &<B as Backend>::QuantizedTensorPrimitive<D>, ) -> bool

Returns the require_grad flag of a tensor.

Object Safety§

This trait is not object safe.

Implementations on Foreign Types§

§

impl<B> QTensorOps<Fusion<B>> for Fusion<B>
where B: FusionBackend,

§

fn q_from_data<const D: usize>( _data: TensorData, _device: &<Fusion<B> as Backend>::Device, ) -> <Fusion<B> as Backend>::QuantizedTensorPrimitive<D>

§

fn quantize<const D: usize>( _tensor: <Fusion<B> as Backend>::FloatTensorPrimitive<D>, _scheme: &QuantizationScheme, _qparams: QuantizationParametersPrimitive<Fusion<B>>, ) -> <Fusion<B> as Backend>::QuantizedTensorPrimitive<D>

§

fn dequantize<const D: usize>( _tensor: <Fusion<B> as Backend>::QuantizedTensorPrimitive<D>, ) -> <Fusion<B> as Backend>::FloatTensorPrimitive<D>

§

fn q_shape<const D: usize>( tensor: &<Fusion<B> as Backend>::QuantizedTensorPrimitive<D>, ) -> Shape<D>

§

fn q_device<const D: usize>( tensor: &<Fusion<B> as Backend>::QuantizedTensorPrimitive<D>, ) -> <Fusion<B> as Backend>::Device

§

fn q_reshape<const D1: usize, const D2: usize>( _tensor: <Fusion<B> as Backend>::QuantizedTensorPrimitive<D1>, _shape: Shape<D2>, ) -> <Fusion<B> as Backend>::QuantizedTensorPrimitive<D2>

§

async fn q_into_data<const D: usize>( _tensor: <Fusion<B> as Backend>::QuantizedTensorPrimitive<D>, ) -> TensorData

Implementors§

§

impl<B, C> QTensorOps<Autodiff<B, C>> for Autodiff<B, C>

§

impl<E, Q> QTensorOps<LibTorch<E, Q>> for LibTorch<E, Q>
where E: TchElement, Q: QuantElement,

§

impl<E, Q> QTensorOps<NdArray<E, Q>> for NdArray<E, Q>
where E: FloatNdArrayElement, Q: QuantElement,

§

impl<F, I> QTensorOps<Candle<F, I>> for Candle<F, I>
where F: FloatCandleElement, I: IntCandleElement,

§

impl<R, F, I> QTensorOps<JitBackend<R, F, I>> for JitBackend<R, F, I>
where R: JitRuntime, F: FloatElement, I: IntElement,