Trait burn::tensor::ops::ActivationOps
pub trait ActivationOps<B>where
B: Backend,{
// Provided methods
fn leaky_relu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
negative_slope: <B as Backend>::FloatElem,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn relu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn relu_backward<const D: usize>(
output: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn gelu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn prelu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
alpha: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn gelu_backward<const D: usize>(
x: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn sigmoid_backward<const D: usize>(
output: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn hard_sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
alpha: <B as Backend>::FloatElem,
beta: <B as Backend>::FloatElem,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn log_sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
fn log_sigmoid_backward<const D: usize>(
x: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D> { ... }
}
Expand description
Activation function operations.
This trait let backend implementations override activation functions for better performance.
Provided Methods§
fn leaky_relu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
negative_slope: <B as Backend>::FloatElem,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn leaky_relu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, negative_slope: <B as Backend>::FloatElem, ) -> <B as Backend>::FloatTensorPrimitive<D>
fn relu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn relu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>
fn relu_backward<const D: usize>(
output: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn relu_backward<const D: usize>( output: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>
fn gelu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn gelu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>
fn prelu<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
alpha: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn prelu<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, alpha: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>
Applies the PReLu activation function.
§Arguments
tensor
- The input tensoralpha
- The weight tensor
fn gelu_backward<const D: usize>(
x: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn gelu_backward<const D: usize>( x: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>
fn sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn sigmoid<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>
fn sigmoid_backward<const D: usize>(
output: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn sigmoid_backward<const D: usize>( output: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>
fn hard_sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
alpha: <B as Backend>::FloatElem,
beta: <B as Backend>::FloatElem,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn hard_sigmoid<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, alpha: <B as Backend>::FloatElem, beta: <B as Backend>::FloatElem, ) -> <B as Backend>::FloatTensorPrimitive<D>
fn log_sigmoid<const D: usize>(
tensor: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn log_sigmoid<const D: usize>( tensor: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>
fn log_sigmoid_backward<const D: usize>(
x: <B as Backend>::FloatTensorPrimitive<D>,
grad: <B as Backend>::FloatTensorPrimitive<D>,
) -> <B as Backend>::FloatTensorPrimitive<D>
fn log_sigmoid_backward<const D: usize>( x: <B as Backend>::FloatTensorPrimitive<D>, grad: <B as Backend>::FloatTensorPrimitive<D>, ) -> <B as Backend>::FloatTensorPrimitive<D>
Object Safety§
This trait is not object safe.