Tensorflow keras layer (#9707)

Co-authored-by: Mehdi Drissi <mdrissi@snapchat.com>
This commit is contained in:
Mehdi Drissi
2023-03-09 06:36:53 -08:00
committed by GitHub
parent 2fe634f01c
commit a47dd76af8
10 changed files with 398 additions and 2 deletions

View File

@@ -15,8 +15,52 @@ tensorflow.DType.__getattr__
tensorflow.Graph.__getattr__
tensorflow.Operation.__getattr__
tensorflow.Variable.__getattr__
tensorflow.keras.layers.Layer.__getattr__
# Internal undocumented API
tensorflow.RaggedTensor.__init__
# Has an undocumented extra argument that tf.Variable which acts like subclass
# (by dynamically patching tf.Tensor methods) does not preserve.
tensorflow.Tensor.__getitem__
# stub internal utility
tensorflow._aliases
# Tensorflow imports are cursed.
# import tensorflow.initializers
# import tensorflow as tf
# tf.initializers
# Usually these two ways are same module, but for tensorflow the first way
# often does not work and the second way does. The documentation describes
# tf.initializers as module and has that type if accessed the second way,
# but the real module file is completely different name (even package) and dynamically handled.
# tf.initializers at runtime is <module 'keras.api._v2.keras.initializers' from '...'>
tensorflow.initializers
# Layer constructor's always have **kwargs, but only allow a few specific values. PEP 692
# would allow us to specify this with **kwargs and remove the need for these exceptions.
tensorflow.keras.layers.*.__init__
# __call__ in tensorflow classes often allow keyword usage, but
# when you subclass those classes it is not expected to handle keyword case. As an example,
# class MyLayer(tf.keras.layers.Layer):
# def call(self, x):
# ...
# is common even though Layer.call is defined like def call(self, inputs). Treating inputs as
# a keyword argument would lead to many false positives with typical subclass usage.
# Additional awkwardness for Layer's is call may optionally have training/mask as keyword arguments and some
# layers do while others do not. At runtime call is not intended to be used directly by users,
# but instead through __call__ which extracts out the training/mask arguments. Trying to describe
# this better in stubs would similarly add many false positive Liskov violations.
tensorflow.keras.layers.*.call
tensorflow.keras.regularizers.Regularizer.__call__
tensorflow.keras.constraints.Constraint.__call__
# Layer class does good deal of __new__ magic and actually returns one of two different internal
# types depending on tensorflow execution mode. This feels like implementation internal.
tensorflow.keras.layers.Layer.__new__
# build/compute_output_shape are marked positional only in stubs
# as argument name is inconsistent across layer's and looks like
# an implementation detail as documentation never mentions the
# disagreements.
tensorflow.keras.layers.*.build
tensorflow.keras.layers.*.compute_output_shape

View File

@@ -4,10 +4,12 @@ from builtins import bool as _bool
from collections.abc import Callable, Iterable, Iterator, Sequence
from contextlib import contextmanager
from enum import Enum
from typing import Any, NoReturn, overload
from typing_extensions import Self, TypeAlias
from types import TracebackType
from typing import Any, NoReturn, TypeVar, overload
from typing_extensions import ParamSpec, Self, TypeAlias
import numpy
from tensorflow import initializers as initializers, keras as keras, math as math
# Explicit import of DType is covered by the wildcard, but
# is necessary to avoid a crash in pytype.
@@ -253,4 +255,31 @@ class IndexedSlices(metaclass=ABCMeta):
def __neg__(self) -> IndexedSlices: ...
def consumers(self) -> list[Operation]: ...
class name_scope:
def __init__(self, name: str) -> None: ...
def __enter__(self) -> str: ...
def __exit__(self, typ: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None) -> None: ...
_P = ParamSpec("_P")
_R = TypeVar("_R")
class Module:
def __init__(self, name: str | None = None) -> None: ...
@property
def name(self) -> str: ...
@property
def name_scope(self) -> name_scope: ...
# Documentation only specifies these as returning Sequence. Actual
# implementation does tuple.
@property
def variables(self) -> Sequence[Variable]: ...
@property
def trainable_variables(self) -> Sequence[Variable]: ...
@property
def non_trainable_variables(self) -> Sequence[Variable]: ...
@property
def submodules(self) -> Sequence[Module]: ...
@classmethod
def with_name_scope(cls, method: Callable[_P, _R]) -> Callable[_P, _R]: ...
def __getattr__(name: str) -> Incomplete: ...

View File

@@ -0,0 +1,14 @@
# Commonly used type aliases.
# Everything in this module is private for stubs. There is no runtime
# equivalent.
from collections.abc import Mapping, Sequence
from typing import Any, TypeVar
from typing_extensions import TypeAlias
import numpy
_T1 = TypeVar("_T1")
ContainerGeneric: TypeAlias = Mapping[str, ContainerGeneric[_T1]] | Sequence[ContainerGeneric[_T1]] | _T1
AnyArray: TypeAlias = numpy.ndarray[Any, Any]

View File

@@ -0,0 +1 @@
from tensorflow.keras.initializers import *

View File

@@ -0,0 +1,11 @@
from _typeshed import Incomplete
from tensorflow.keras import (
activations as activations,
constraints as constraints,
initializers as initializers,
layers as layers,
regularizers as regularizers,
)
def __getattr__(name: str) -> Incomplete: ...

View File

@@ -0,0 +1,12 @@
from _typeshed import Incomplete
from collections.abc import Callable
from typing import Any
from typing_extensions import TypeAlias
from tensorflow import Tensor
# The implementation uses isinstance so it must be dict and not any Mapping.
_Activation: TypeAlias = str | None | Callable[[Tensor], Tensor] | dict[str, Any]
def get(identifier: _Activation) -> Callable[[Tensor], Tensor]: ...
def __getattr__(name: str) -> Incomplete: ...

View File

@@ -0,0 +1,17 @@
from _typeshed import Incomplete
from collections.abc import Callable
from typing import Any, overload
from tensorflow import Tensor
class Constraint:
def get_config(self) -> dict[str, Any]: ...
def __call__(self, __w: Tensor) -> Tensor: ...
@overload
def get(identifier: None) -> None: ...
@overload
def get(identifier: str | dict[str, Any] | Constraint) -> Constraint: ...
@overload
def get(identifier: Callable[[Tensor], Tensor]) -> Callable[[Tensor], Tensor]: ...
def __getattr__(name: str) -> Incomplete: ...

View File

@@ -0,0 +1,50 @@
from _typeshed import Incomplete
from collections.abc import Callable
from typing import Any, overload
from typing_extensions import Self, TypeAlias
from tensorflow import Tensor, _DTypeLike, _ShapeLike, _TensorCompatible
class Initializer:
def __call__(self, shape: _ShapeLike, dtype: _DTypeLike | None = None) -> Tensor: ...
def get_config(self) -> dict[str, Any]: ...
@classmethod
def from_config(cls, config: dict[str, Any]) -> Self: ...
class Constant(Initializer):
def __init__(self, value: _TensorCompatible = 0) -> None: ...
class GlorotNormal(Initializer):
def __init__(self, seed: int | None = None) -> None: ...
class GlorotUniform(Initializer):
def __init__(self, seed: int | None = None) -> None: ...
class TruncatedNormal(Initializer):
def __init__(self, mean: _TensorCompatible = 0.0, stddev: _TensorCompatible = 0.05, seed: int | None = None) -> None: ...
class RandomNormal(Initializer):
def __init__(self, mean: _TensorCompatible = 0.0, stddev: _TensorCompatible = 0.05, seed: int | None = None) -> None: ...
class RandomUniform(Initializer):
def __init__(self, minval: _TensorCompatible = -0.05, maxval: _TensorCompatible = 0.05, seed: int | None = None) -> None: ...
class Zeros(Initializer): ...
constant = Constant
glorot_normal = GlorotNormal
glorot_uniform = GlorotUniform
truncated_normal = TruncatedNormal
zeros = Zeros
_Initializer: TypeAlias = ( # noqa: Y047
str | Initializer | type[Initializer] | Callable[[_ShapeLike], Tensor] | dict[str, Any] | None
)
@overload
def get(identifier: None) -> None: ...
@overload
def get(identifier: str | Initializer | dict[str, Any] | type[Initializer]) -> Initializer: ...
@overload
def get(identifier: Callable[[_ShapeLike], Tensor]) -> Callable[[_ShapeLike], Tensor]: ...
def __getattr__(name: str) -> Incomplete: ...

View File

@@ -0,0 +1,197 @@
from _typeshed import Incomplete
from collections.abc import Callable, Iterable, Sequence
from typing import Any, Generic, TypeVar, overload
from typing_extensions import Self, TypeAlias
import tensorflow as tf
from tensorflow import Tensor, Variable, VariableAggregation, VariableSynchronization, _TensorCompatible
from tensorflow._aliases import AnyArray
from tensorflow.keras.activations import _Activation
from tensorflow.keras.constraints import Constraint
from tensorflow.keras.initializers import _Initializer
from tensorflow.keras.regularizers import _Regularizer
_InputT = TypeVar("_InputT", contravariant=True)
_OutputT = TypeVar("_OutputT", covariant=True)
class InputSpec:
dtype: str | None
shape: tuple[int | None, ...]
ndim: int | None
max_ndim: int | None
min_ndim: int | None
axes: dict[int, int | None] | None
def __init__(
self,
dtype: tf._DTypeLike | None = None,
shape: Iterable[int | None] | None = None,
ndim: int | None = None,
max_ndim: int | None = None,
min_ndim: int | None = None,
axes: dict[int, int | None] | None = None,
allow_last_axis_squeeze: bool = False,
name: str | None = None,
) -> None: ...
def get_config(self) -> dict[str, Any]: ...
@classmethod
def from_config(cls, config: dict[str, Any]) -> type[Self]: ...
# Most layers have input and output type of just Tensor and when we support default type variables,
# maybe worth trying.
class Layer(Generic[_InputT, _OutputT], tf.Module):
# The most general type is _ContainerGeneric[InputSpec] as it really
# depends on _InputT. For most Layers it is just InputSpec
# though. Maybe describable with HKT?
input_spec: InputSpec | Any
@property
def trainable(self) -> bool: ...
@trainable.setter
def trainable(self, value: bool) -> None: ...
def __init__(
self, trainable: bool = True, name: str | None = None, dtype: tf._DTypeLike | None = None, dynamic: bool = False
) -> None: ...
# *args/**kwargs are allowed, but have obscure footguns and tensorflow documentation discourages their usage.
# First argument will automatically be cast to layer's compute dtype, but any other tensor arguments will not be.
# Also various tensorflow tools/apis can misbehave if they encounter a layer with *args/**kwargs.
def __call__(self, inputs: _InputT, *, training: bool = False, mask: _TensorCompatible | None = None) -> _OutputT: ...
def call(self, __inputs: _InputT) -> _OutputT: ...
# input_shape's real type depends on _InputT, but we can't express that without HKT.
# For example _InputT tf.Tensor -> tf.TensorShape, _InputT dict[str, tf.Tensor] -> dict[str, tf.TensorShape].
def build(self, __input_shape: Any) -> None: ...
@overload
def compute_output_shape(self: Layer[tf.Tensor, tf.Tensor], __input_shape: tf.TensorShape) -> tf.TensorShape: ...
@overload
def compute_output_shape(self, __input_shape: Any) -> Any: ...
def add_weight(
self,
name: str | None = None,
shape: Iterable[int | None] | None = None,
dtype: tf._DTypeLike | None = None,
initializer: _Initializer | None = None,
regularizer: _Regularizer = None,
trainable: bool | None = None,
constraint: _Constraint = None,
use_resource: bool | None = None,
synchronization: VariableSynchronization = ...,
aggregation: VariableAggregation = ...,
) -> tf.Variable: ...
def add_loss(self, losses: tf.Tensor | Sequence[tf.Tensor] | Callable[[], tf.Tensor]) -> None: ...
def count_params(self) -> int: ...
@property
def trainable_variables(self) -> list[Variable]: ...
@property
def non_trainable_variables(self) -> list[Variable]: ...
@property
def trainable_weights(self) -> list[Variable]: ...
@property
def non_trainable_weights(self) -> list[Variable]: ...
@property
def losses(self) -> list[Tensor]: ...
def get_weights(self) -> list[AnyArray]: ...
def set_weights(self, weights: Sequence[AnyArray]) -> None: ...
def get_config(self) -> dict[str, Any]: ...
@classmethod
def from_config(cls, config: dict[str, Any]) -> Self: ...
def __getattr__(self, name: str) -> Incomplete: ...
# Every layer has trainable, dtype, name, and dynamic. At runtime these
# are mainly handled with **kwargs, passed up and then validated.
# In actual implementation there's 12 allowed keyword arguments, but only
# 4 are documented and other 8 are mainly internal. The other 8 can be found
# https://github.com/keras-team/keras/blob/e6784e4302c7b8cd116b74a784f4b78d60e83c26/keras/engine/base_layer.py#L329
# PEP 692 support would be very helpful here and allow removing stubtest allowlist for
# all layer constructors.
# TODO: Replace last Any after adding tf.keras.mixed_precision.Policy.
_LayerDtype: TypeAlias = tf._DTypeLike | dict[str, Any] | Any
_Constraint: TypeAlias = str | dict[str, Any] | Constraint | None
# Layer's compute_output_shape commonly have instance as first argument name instead of self.
# This is an artifact of actual implementation commonly uses a decorator to define it.
# Layer.build has same weirdness sometimes. For both marked as positional only.
class Dense(Layer[tf.Tensor, tf.Tensor]):
def __init__(
self,
units: int,
activation: _Activation = None,
use_bias: bool = True,
kernel_initializer: _Initializer = "glorot_uniform",
bias_initializer: _Initializer = "zeros",
kernel_regularizer: _Regularizer = None,
bias_regularizer: _Regularizer = None,
activity_regularizer: _Regularizer = None,
kernel_constraint: _Constraint = None,
bias_constraint: _Constraint = None,
trainable: bool = True,
dtype: _LayerDtype = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
class BatchNormalization(Layer[tf.Tensor, tf.Tensor]):
def __init__(
self,
axis: int = -1,
momentum: float = 0.99,
epsilon: float = 0.001,
center: bool = True,
scale: bool = True,
beta_initializer: _Initializer = "zeros",
gamma_initializer: _Initializer = "ones",
moving_mean_initializer: _Initializer = "zeros",
moving_variance_initializer: _Initializer = "ones",
beta_regularizer: _Regularizer = None,
gamma_regularizer: _Regularizer = None,
beta_constraint: _Constraint = None,
gamma_constraint: _Constraint = None,
trainable: bool = True,
dtype: _LayerDtype = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
class ReLU(Layer[tf.Tensor, tf.Tensor]):
def __init__(
self,
max_value: float | None = None,
negative_slope: float | None = 0.0,
threshold: float | None = 0.0,
trainable: bool = True,
dtype: _LayerDtype = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
class Dropout(Layer[tf.Tensor, tf.Tensor]):
def __init__(
self,
rate: float,
noise_shape: _TensorCompatible | Sequence[int | None] | None = None,
seed: int | None = None,
trainable: bool = True,
dtype: _LayerDtype = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
class Embedding(Layer[tf.Tensor, tf.Tensor]):
def __init__(
self,
input_dim: int,
output_dim: int,
embeddings_initializer: _Initializer = "uniform",
embeddings_regularizer: _Regularizer = None,
embeddings_constraint: _Constraint = None,
mask_zero: bool = False,
input_length: int | None = None,
trainable: bool = True,
dtype: _LayerDtype = None,
dynamic: bool = False,
name: str | None = None,
) -> None: ...
def __getattr__(name: str) -> Incomplete: ...

View File

@@ -0,0 +1,21 @@
from collections.abc import Callable
from typing import Any, overload
from typing_extensions import Self, TypeAlias
from tensorflow import Tensor
class Regularizer:
def get_config(self) -> dict[str, Any]: ...
@classmethod
def from_config(cls, config: dict[str, Any]) -> Self: ...
def __call__(self, __x: Tensor) -> Tensor: ...
_Regularizer: TypeAlias = str | dict[str, Any] | Regularizer | None # noqa: Y047
@overload
def get(identifier: None) -> None: ...
@overload
def get(identifier: str | dict[str, Any] | Regularizer) -> Regularizer: ...
@overload
def get(identifier: Callable[[Tensor], Tensor]) -> Callable[[Tensor], Tensor]: ...
def __getattr__(name: str) -> Any: ...