Trait burn::tensor::ops::ActivationOps
pub trait ActivationOps<B>where
B: Backend,{
// Provided methods
fn leaky_relu(
tensor: <B as Backend>::FloatTensorPrimitive,
negative_slope: <B as Backend>::FloatElem,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn relu(
tensor: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn relu_backward(
output: <B as Backend>::FloatTensorPrimitive,
grad: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn gelu(
tensor: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn prelu(
tensor: <B as Backend>::FloatTensorPrimitive,
alpha: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn gelu_backward(
x: <B as Backend>::FloatTensorPrimitive,
grad: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn sigmoid(
tensor: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn sigmoid_backward(
output: <B as Backend>::FloatTensorPrimitive,
grad: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn hard_sigmoid(
tensor: <B as Backend>::FloatTensorPrimitive,
alpha: <B as Backend>::FloatElem,
beta: <B as Backend>::FloatElem,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn log_sigmoid(
tensor: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive { ... }
fn log_sigmoid_backward(
x: <B as Backend>::FloatTensorPrimitive,
grad: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive { ... }
}
Expand description
Activation function operations.
This trait let backend implementations override activation functions for better performance.
Provided Methods§
fn leaky_relu(
tensor: <B as Backend>::FloatTensorPrimitive,
negative_slope: <B as Backend>::FloatElem,
) -> <B as Backend>::FloatTensorPrimitive
fn leaky_relu( tensor: <B as Backend>::FloatTensorPrimitive, negative_slope: <B as Backend>::FloatElem, ) -> <B as Backend>::FloatTensorPrimitive
fn relu(
tensor: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive
fn relu( tensor: <B as Backend>::FloatTensorPrimitive, ) -> <B as Backend>::FloatTensorPrimitive
fn relu_backward(
output: <B as Backend>::FloatTensorPrimitive,
grad: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive
fn relu_backward( output: <B as Backend>::FloatTensorPrimitive, grad: <B as Backend>::FloatTensorPrimitive, ) -> <B as Backend>::FloatTensorPrimitive
fn gelu(
tensor: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive
fn gelu( tensor: <B as Backend>::FloatTensorPrimitive, ) -> <B as Backend>::FloatTensorPrimitive
fn prelu(
tensor: <B as Backend>::FloatTensorPrimitive,
alpha: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive
fn prelu( tensor: <B as Backend>::FloatTensorPrimitive, alpha: <B as Backend>::FloatTensorPrimitive, ) -> <B as Backend>::FloatTensorPrimitive
Applies the PReLu activation function.
§Arguments
tensor
- The input tensoralpha
- The weight tensor
fn gelu_backward(
x: <B as Backend>::FloatTensorPrimitive,
grad: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive
fn gelu_backward( x: <B as Backend>::FloatTensorPrimitive, grad: <B as Backend>::FloatTensorPrimitive, ) -> <B as Backend>::FloatTensorPrimitive
fn sigmoid(
tensor: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive
fn sigmoid( tensor: <B as Backend>::FloatTensorPrimitive, ) -> <B as Backend>::FloatTensorPrimitive
fn sigmoid_backward(
output: <B as Backend>::FloatTensorPrimitive,
grad: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive
fn sigmoid_backward( output: <B as Backend>::FloatTensorPrimitive, grad: <B as Backend>::FloatTensorPrimitive, ) -> <B as Backend>::FloatTensorPrimitive
fn hard_sigmoid(
tensor: <B as Backend>::FloatTensorPrimitive,
alpha: <B as Backend>::FloatElem,
beta: <B as Backend>::FloatElem,
) -> <B as Backend>::FloatTensorPrimitive
fn hard_sigmoid( tensor: <B as Backend>::FloatTensorPrimitive, alpha: <B as Backend>::FloatElem, beta: <B as Backend>::FloatElem, ) -> <B as Backend>::FloatTensorPrimitive
fn log_sigmoid(
tensor: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive
fn log_sigmoid( tensor: <B as Backend>::FloatTensorPrimitive, ) -> <B as Backend>::FloatTensorPrimitive
fn log_sigmoid_backward(
x: <B as Backend>::FloatTensorPrimitive,
grad: <B as Backend>::FloatTensorPrimitive,
) -> <B as Backend>::FloatTensorPrimitive
fn log_sigmoid_backward( x: <B as Backend>::FloatTensorPrimitive, grad: <B as Backend>::FloatTensorPrimitive, ) -> <B as Backend>::FloatTensorPrimitive
Object Safety§
This trait is not object safe.