tensorflow: add tensorflow.keras.activations members (#11444)

Co-authored-by: Jelle Zijlstra <jelle.zijlstra@gmail.com>
This commit is contained in:
Hoël Bagard
2024-03-02 00:17:28 +09:00
committed by GitHub
parent 88238202d7
commit ea2002d171
2 changed files with 28 additions and 2 deletions

View File

@@ -34,6 +34,7 @@ IntDataSequence: TypeAlias = Sequence[int] | Sequence[IntDataSequence]
StrDataSequence: TypeAlias = Sequence[str] | Sequence[StrDataSequence]
ScalarTensorCompatible: TypeAlias = tf.Tensor | str | float | np.ndarray[Any, Any] | np.number[Any]
UIntTensorCompatible: TypeAlias = tf.Tensor | int | UIntArray
FloatTensorCompatible: TypeAlias = tf.Tensor | int | IntArray | float | FloatArray | np.number[Any]
StringTensorCompatible: TypeAlias = tf.Tensor | str | npt.NDArray[np.str_] | Sequence[StringTensorCompatible]
TensorCompatible: TypeAlias = ScalarTensorCompatible | Sequence[TensorCompatible]

View File

@@ -1,12 +1,37 @@
from _typeshed import Incomplete
from collections.abc import Callable
from typing import Any
from typing_extensions import TypeAlias
import numpy as np
from tensorflow import Tensor
from tensorflow._aliases import FloatArray, FloatDataSequence, FloatTensorCompatible, Integer
# The implementation uses isinstance so it must be dict and not any Mapping.
_Activation: TypeAlias = str | None | Callable[[Tensor], Tensor] | dict[str, Any]
# Ints are not allowed.
_ActivationInput: TypeAlias = Tensor | FloatDataSequence | FloatArray | np.number[Any] | float
def deserialize(
name: str, custom_objects: dict[str, Callable[..., Any]] | None = None, use_legacy_format: bool = False
) -> Callable[..., Any]: ...
def elu(x: _ActivationInput, alpha: FloatTensorCompatible | FloatDataSequence = 1.0) -> Tensor: ...
def exponential(x: _ActivationInput) -> Tensor: ...
def gelu(x: _ActivationInput, approximate: bool = False) -> Tensor: ...
def get(identifier: _Activation) -> Callable[[Tensor], Tensor]: ...
def __getattr__(name: str) -> Incomplete: ...
def hard_sigmoid(x: _ActivationInput) -> Tensor: ...
def linear(x: _ActivationInput) -> Tensor: ...
def mish(x: _ActivationInput) -> Tensor: ...
def relu(
x: _ActivationInput,
alpha: FloatTensorCompatible = 0.0,
max_value: FloatTensorCompatible | FloatDataSequence | None = None,
threshold: FloatTensorCompatible | FloatDataSequence = 0.0,
) -> Tensor: ...
def selu(x: _ActivationInput) -> Tensor: ...
def serialize(activation: Callable[..., Any], use_legacy_format: bool = False) -> str | dict[str, Any]: ...
def sigmoid(x: _ActivationInput) -> Tensor: ...
def softmax(x: Tensor, axis: Integer = -1) -> Tensor: ...
def softplus(x: _ActivationInput) -> Tensor: ...
def softsign(x: _ActivationInput) -> Tensor: ...
def swish(x: _ActivationInput) -> Tensor: ...
def tanh(x: _ActivationInput) -> Tensor: ...