mirror of
https://github.com/davidhalter/typeshed.git
synced 2025-12-16 08:47:39 +08:00
tensorflow: Add missing members to the tensorflow.keras.layers module. (#11333)
Co-authored-by: Jelle Zijlstra <jelle.zijlstra@gmail.com> Co-authored-by: Rebecca Chen <rechen@google.com>
This commit is contained in:
@@ -48,6 +48,9 @@ tensorflow._aliases
|
||||
# but the real module file is completely different name (even package) and dynamically handled.
|
||||
# tf.initializers at runtime is <module 'keras.api._v2.keras.initializers' from '...'>
|
||||
tensorflow.initializers
|
||||
# Other cursed import magic similar to the one above.
|
||||
tensorflow.keras.layers.preprocessing
|
||||
tensorflow.keras.layers.preprocessing.index_lookup
|
||||
# Another cursed import magic similar to the one above.
|
||||
tensorflow.distribute.experimental.coordinator
|
||||
|
||||
|
||||
@@ -21,11 +21,7 @@ from tensorflow import (
|
||||
from tensorflow._aliases import AnyArray, DTypeLike, ShapeLike, Slice, TensorCompatible
|
||||
from tensorflow.autodiff import GradientTape as GradientTape
|
||||
from tensorflow.core.protobuf import struct_pb2
|
||||
|
||||
# Explicit import of DType is covered by the wildcard, but
|
||||
# is necessary to avoid a crash in pytype.
|
||||
from tensorflow.dtypes import *
|
||||
from tensorflow.dtypes import DType as DType
|
||||
from tensorflow.experimental.dtensor import Layout
|
||||
from tensorflow.keras import losses as losses
|
||||
from tensorflow.linalg import eye as eye
|
||||
|
||||
@@ -5,9 +5,11 @@ from collections.abc import Iterable, Mapping, Sequence
|
||||
from typing import Any, Protocol, TypeVar
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
import numpy # pytype needs the unaliased import to resolve DTypeLike
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import tensorflow as tf
|
||||
from tensorflow.dtypes import DType
|
||||
from tensorflow.keras.layers import InputSpec
|
||||
|
||||
_T = TypeVar("_T")
|
||||
@@ -29,10 +31,12 @@ KerasSerializable: TypeAlias = KerasSerializable1 | KerasSerializable2
|
||||
|
||||
TensorValue: TypeAlias = tf.Tensor # Alias for a 0D Tensor
|
||||
Integer: TypeAlias = TensorValue | int | IntArray | np.number[Any] # Here IntArray are assumed to be 0D.
|
||||
Float: TypeAlias = Integer | float | FloatArray
|
||||
Slice: TypeAlias = int | slice | None
|
||||
FloatDataSequence: TypeAlias = Sequence[float] | Sequence[FloatDataSequence]
|
||||
IntDataSequence: TypeAlias = Sequence[int] | Sequence[IntDataSequence]
|
||||
StrDataSequence: TypeAlias = Sequence[str] | Sequence[StrDataSequence]
|
||||
DataSequence: TypeAlias = FloatDataSequence | StrDataSequence | IntDataSequence
|
||||
ScalarTensorCompatible: TypeAlias = tf.Tensor | str | float | np.ndarray[Any, Any] | np.number[Any]
|
||||
UIntTensorCompatible: TypeAlias = tf.Tensor | int | UIntArray
|
||||
FloatTensorCompatible: TypeAlias = tf.Tensor | int | IntArray | float | FloatArray | np.number[Any]
|
||||
@@ -51,7 +55,7 @@ SparseTensorCompatible: TypeAlias = TensorCompatible | tf.SparseTensor
|
||||
TensorOrArray: TypeAlias = tf.Tensor | AnyArray
|
||||
|
||||
ShapeLike: TypeAlias = tf.TensorShape | Iterable[ScalarTensorCompatible | None] | int | tf.Tensor
|
||||
DTypeLike: TypeAlias = tf.DType | str | np.dtype[Any] | int
|
||||
DTypeLike: TypeAlias = DType | str | numpy.dtype[Any] | int
|
||||
|
||||
ContainerTensors: TypeAlias = ContainerGeneric[tf.Tensor]
|
||||
ContainerTensorsLike: TypeAlias = ContainerGeneric[TensorLike]
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
from _typeshed import Incomplete
|
||||
from collections.abc import Callable, Iterable, Sequence
|
||||
from typing import Any, Generic, TypeVar, overload
|
||||
from collections.abc import Callable, Iterable, Mapping, Sequence
|
||||
from typing import Any, Generic, Literal, TypeVar, overload
|
||||
from typing_extensions import Self, TypeAlias
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow import Tensor, Variable, VariableAggregation, VariableSynchronization
|
||||
from tensorflow._aliases import AnyArray, DTypeLike, TensorCompatible
|
||||
from tensorflow._aliases import AnyArray, DTypeLike, TensorCompatible, TensorLike
|
||||
from tensorflow.keras.activations import _Activation
|
||||
from tensorflow.keras.constraints import Constraint
|
||||
from tensorflow.keras.initializers import _Initializer
|
||||
from tensorflow.keras.regularizers import _Regularizer
|
||||
from tensorflow.keras.layers.preprocessing import IntegerLookup as IntegerLookup, StringLookup as StringLookup
|
||||
from tensorflow.keras.regularizers import Regularizer, _Regularizer
|
||||
from tensorflow.python.feature_column.feature_column_v2 import DenseColumn, SequenceDenseColumn
|
||||
|
||||
_InputT = TypeVar("_InputT", contravariant=True)
|
||||
_OutputT = TypeVar("_OutputT", covariant=True)
|
||||
@@ -127,7 +129,7 @@ class Dense(Layer[tf.Tensor, tf.Tensor]):
|
||||
kernel_constraint: _Constraint = None,
|
||||
bias_constraint: _Constraint = None,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype = None,
|
||||
dtype: _LayerDtype | None = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
) -> None: ...
|
||||
@@ -149,7 +151,7 @@ class BatchNormalization(Layer[tf.Tensor, tf.Tensor]):
|
||||
beta_constraint: _Constraint = None,
|
||||
gamma_constraint: _Constraint = None,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype = None,
|
||||
dtype: _LayerDtype | None = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
) -> None: ...
|
||||
@@ -161,7 +163,7 @@ class ReLU(Layer[tf.Tensor, tf.Tensor]):
|
||||
negative_slope: float | None = 0.0,
|
||||
threshold: float | None = 0.0,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype = None,
|
||||
dtype: _LayerDtype | None = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
) -> None: ...
|
||||
@@ -173,7 +175,7 @@ class Dropout(Layer[tf.Tensor, tf.Tensor]):
|
||||
noise_shape: TensorCompatible | Sequence[int | None] | None = None,
|
||||
seed: int | None = None,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype = None,
|
||||
dtype: _LayerDtype | None = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
) -> None: ...
|
||||
@@ -189,6 +191,133 @@ class Embedding(Layer[tf.Tensor, tf.Tensor]):
|
||||
mask_zero: bool = False,
|
||||
input_length: int | None = None,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype | None = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
) -> None: ...
|
||||
|
||||
class Conv2D(Layer[tf.Tensor, tf.Tensor]):
|
||||
def __init__(
|
||||
self,
|
||||
filters: int,
|
||||
kernel_size: int | Iterable[int],
|
||||
strides: int | Iterable[int] = (1, 1),
|
||||
padding: Literal["valid", "same"] = "valid",
|
||||
data_format: None | Literal["channels_last", "channels_first"] = None,
|
||||
dilation_rate: int | Iterable[int] = (1, 1),
|
||||
groups: int = 1,
|
||||
activation: _Activation = None,
|
||||
use_bias: bool = True,
|
||||
kernel_initializer: _Initializer = "glorot_uniform",
|
||||
bias_initializer: _Initializer = "zeros",
|
||||
kernel_regularizer: _Regularizer = None,
|
||||
bias_regularizer: _Regularizer = None,
|
||||
activity_regularizer: _Regularizer = None,
|
||||
kernel_constraint: _Constraint = None,
|
||||
bias_constraint: _Constraint = None,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype | None = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
) -> None: ...
|
||||
|
||||
class Identity(Layer[tf.Tensor, tf.Tensor]):
|
||||
def __init__(
|
||||
self, trainable: bool = True, dtype: _LayerDtype = None, dynamic: bool = False, name: str | None = None
|
||||
) -> None: ...
|
||||
|
||||
class LayerNormalization(Layer[tf.Tensor, tf.Tensor]):
|
||||
def __init__(
|
||||
self,
|
||||
axis: int = -1,
|
||||
epsilon: float = 0.001,
|
||||
center: bool = True,
|
||||
scale: bool = True,
|
||||
beta_initializer: _Initializer = "zeros",
|
||||
gamma_initializer: _Initializer = "ones",
|
||||
beta_regularizer: _Regularizer = None,
|
||||
gamma_regularizer: _Regularizer = None,
|
||||
beta_constraint: _Constraint = None,
|
||||
gamma_constraint: _Constraint = None,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype | None = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
) -> None: ...
|
||||
|
||||
class DenseFeatures(Layer[Mapping[str, TensorLike], tf.Tensor]):
|
||||
def __init__(
|
||||
self,
|
||||
feature_columns: Sequence[DenseColumn | SequenceDenseColumn],
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
) -> None: ...
|
||||
|
||||
class MultiHeadAttention(Layer[Any, tf.Tensor]):
|
||||
def __init__(
|
||||
self,
|
||||
num_heads: int,
|
||||
key_dim: int | None,
|
||||
value_dim: int | None = None,
|
||||
dropout: float = 0.0,
|
||||
use_bias: bool = True,
|
||||
output_shape: tuple[int, ...] | None = None,
|
||||
attention_axes: tuple[int, ...] | None = None,
|
||||
kernel_initialize: _Initializer = "glorot_uniform",
|
||||
bias_initializer: _Initializer = "zeros",
|
||||
kernel_regularizer: Regularizer | None = None,
|
||||
bias_regularizer: _Regularizer | None = None,
|
||||
activity_regularizer: _Regularizer | None = None,
|
||||
kernel_constraint: _Constraint | None = None,
|
||||
bias_constraint: _Constraint | None = None,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype | None = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
) -> None: ...
|
||||
# @override
|
||||
@overload # type: ignore
|
||||
def __call__(
|
||||
self,
|
||||
query: tf.Tensor,
|
||||
value: tf.Tensor,
|
||||
key: tf.Tensor | None,
|
||||
attention_mask: tf.Tensor | None,
|
||||
return_attention_scores: Literal[False],
|
||||
training: bool,
|
||||
use_causal_mask: bool,
|
||||
) -> tf.Tensor: ...
|
||||
@overload
|
||||
def __call__(
|
||||
self,
|
||||
query: tf.Tensor,
|
||||
value: tf.Tensor,
|
||||
key: tf.Tensor | None,
|
||||
attention_mask: tf.Tensor | None,
|
||||
return_attention_scores: Literal[True],
|
||||
training: bool,
|
||||
use_causal_mask: bool,
|
||||
) -> tuple[tf.Tensor, tf.Tensor]: ...
|
||||
@overload
|
||||
def __call__(
|
||||
self,
|
||||
query: tf.Tensor,
|
||||
value: tf.Tensor,
|
||||
key: tf.Tensor | None = None,
|
||||
attention_mask: tf.Tensor | None = None,
|
||||
return_attention_scores: bool = False,
|
||||
training: bool = False,
|
||||
use_causal_mask: bool = False,
|
||||
) -> tuple[tf.Tensor, tf.Tensor] | tf.Tensor: ...
|
||||
|
||||
class GaussianDropout(Layer[tf.Tensor, tf.Tensor]):
|
||||
def __init__(
|
||||
self,
|
||||
rate: float,
|
||||
seed: int | None = None,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype = None,
|
||||
dynamic: bool = False,
|
||||
name: str | None = None,
|
||||
@@ -0,0 +1,27 @@
|
||||
import abc
|
||||
from typing import overload
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow._aliases import AnyArray, DataSequence, Float, Integer, TensorCompatible, TensorLike
|
||||
from tensorflow.keras.layers import Layer
|
||||
|
||||
class PreprocessingLayer(Layer[TensorLike, TensorLike], metaclass=abc.ABCMeta):
|
||||
@property
|
||||
def is_adapted(self) -> bool: ...
|
||||
@overload # type: ignore
|
||||
def __call__(self, inputs: tf.Tensor, *, training: bool = False, mask: TensorCompatible | None = None) -> tf.Tensor: ...
|
||||
@overload
|
||||
def __call__(
|
||||
self, inputs: tf.SparseTensor, *, training: bool = False, mask: TensorCompatible | None = None
|
||||
) -> tf.SparseTensor: ...
|
||||
@overload
|
||||
def __call__(
|
||||
self, inputs: tf.RaggedTensor, *, training: bool = False, mask: TensorCompatible | None = None
|
||||
) -> tf.RaggedTensor: ...
|
||||
def adapt(
|
||||
self,
|
||||
data: tf.data.Dataset[TensorLike] | AnyArray | DataSequence,
|
||||
batch_size: Integer | None = None,
|
||||
steps: Float | None = None,
|
||||
) -> None: ...
|
||||
def compile(self, run_eagerly: bool | None = None, steps_per_execution: Integer | None = None) -> None: ...
|
||||
@@ -0,0 +1,36 @@
|
||||
from typing import Literal
|
||||
|
||||
from tensorflow._aliases import TensorCompatible
|
||||
from tensorflow.keras.layers.preprocessing.index_lookup import _IndexLookup
|
||||
|
||||
class StringLookup(_IndexLookup):
|
||||
def __init__(
|
||||
self,
|
||||
max_tokens: int | None = None,
|
||||
num_oov_indices: int = 1,
|
||||
mask_token: str | None = None,
|
||||
oov_token: str = "[UNK]",
|
||||
vocabulary: str | None | TensorCompatible = None,
|
||||
idf_weights: TensorCompatible | None = None,
|
||||
encoding: str = "utf-8",
|
||||
invert: bool = False,
|
||||
output_mode: Literal["int", "count", "multi_hot", "one_hot", "tf_idf"] = "int",
|
||||
sparse: bool = False,
|
||||
pad_to_max_tokens: bool = False,
|
||||
) -> None: ...
|
||||
|
||||
class IntegerLookup(_IndexLookup):
|
||||
def __init__(
|
||||
self,
|
||||
max_tokens: int | None = None,
|
||||
num_oov_indices: int = 1,
|
||||
mask_token: int | None = None,
|
||||
oov_token: int = -1,
|
||||
vocabulary: str | None | TensorCompatible = None,
|
||||
vocabulary_dtype: Literal["int64", "int32"] = "int64",
|
||||
idf_weights: TensorCompatible | None = None,
|
||||
invert: bool = False,
|
||||
output_mode: Literal["int", "count", "multi_hot", "one_hot", "tf_idf"] = "int",
|
||||
sparse: bool = False,
|
||||
pad_to_max_tokens: bool = False,
|
||||
) -> None: ...
|
||||
@@ -0,0 +1,9 @@
|
||||
from _typeshed import Incomplete
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.layers.experimental.preprocessing import PreprocessingLayer
|
||||
|
||||
class _IndexLookup(PreprocessingLayer):
|
||||
def compute_output_signature(self, input_spec: Incomplete) -> tf.TensorSpec: ...
|
||||
def get_vocabulary(self, include_special_tokens: bool = True) -> list[Incomplete]: ...
|
||||
def vocabulary_size(self) -> int: ...
|
||||
Reference in New Issue
Block a user