mirror of
https://github.com/davidhalter/typeshed.git
synced 2025-12-08 04:54:47 +08:00
tensorflow: Add (and rename) aliases (#11324)
This commit is contained in:
@@ -6,9 +6,8 @@ from contextlib import contextmanager
|
||||
from enum import Enum
|
||||
from types import TracebackType
|
||||
from typing import Any, Generic, NoReturn, TypeVar, overload
|
||||
from typing_extensions import ParamSpec, Self, TypeAlias
|
||||
from typing_extensions import ParamSpec, Self
|
||||
|
||||
import numpy
|
||||
from google.protobuf.message import Message
|
||||
from tensorflow import (
|
||||
data as data,
|
||||
@@ -19,7 +18,18 @@ from tensorflow import (
|
||||
keras as keras,
|
||||
math as math,
|
||||
)
|
||||
from tensorflow._aliases import ContainerGradients, ContainerTensors, ContainerTensorsLike, Gradients, TensorLike
|
||||
from tensorflow._aliases import (
|
||||
AnyArray,
|
||||
ContainerGradients,
|
||||
ContainerTensors,
|
||||
ContainerTensorsLike,
|
||||
DTypeLike,
|
||||
Gradients,
|
||||
ShapeLike,
|
||||
Slice,
|
||||
TensorCompatible,
|
||||
TensorLike,
|
||||
)
|
||||
from tensorflow.core.protobuf import struct_pb2
|
||||
|
||||
# Explicit import of DType is covered by the wildcard, but
|
||||
@@ -73,15 +83,6 @@ from tensorflow.sparse import SparseTensor as SparseTensor
|
||||
# we will skip making Tensor generic. Also good type hints for shapes will
|
||||
# run quickly into many places where type system is not strong enough today.
|
||||
# So shape typing is probably not worth doing anytime soon.
|
||||
_Slice: TypeAlias = int | slice | None
|
||||
|
||||
_FloatDataSequence: TypeAlias = Sequence[float] | Sequence[_FloatDataSequence]
|
||||
_StrDataSequence: TypeAlias = Sequence[str] | Sequence[_StrDataSequence]
|
||||
_ScalarTensorCompatible: TypeAlias = Tensor | str | float | numpy.ndarray[Any, Any] | numpy.number[Any]
|
||||
_TensorCompatible: TypeAlias = _ScalarTensorCompatible | Sequence[_TensorCompatible]
|
||||
_ShapeLike: TypeAlias = TensorShape | Iterable[_ScalarTensorCompatible | None] | int | Tensor
|
||||
_DTypeLike: TypeAlias = DType | str | numpy.dtype[Any]
|
||||
|
||||
class Tensor:
|
||||
def __init__(self, op: Operation, value_index: int, dtype: DType) -> None: ...
|
||||
def consumers(self) -> list[Incomplete]: ...
|
||||
@@ -96,35 +97,35 @@ class Tensor:
|
||||
def name(self) -> str: ...
|
||||
@property
|
||||
def op(self) -> Operation: ...
|
||||
def numpy(self) -> numpy.ndarray[Any, Any]: ...
|
||||
def numpy(self) -> AnyArray: ...
|
||||
def __int__(self) -> int: ...
|
||||
def __abs__(self, name: str | None = None) -> Tensor: ...
|
||||
def __add__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __radd__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __sub__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __rsub__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __mul__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __rmul__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __pow__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __matmul__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __rmatmul__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __floordiv__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __rfloordiv__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __truediv__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __rtruediv__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __add__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __radd__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __sub__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __rsub__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __mul__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __rmul__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __pow__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __matmul__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __rmatmul__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __floordiv__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __rfloordiv__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __truediv__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __rtruediv__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __neg__(self, name: str | None = None) -> Tensor: ...
|
||||
def __and__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __rand__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __or__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __ror__(self, other: _TensorCompatible) -> Tensor: ...
|
||||
def __eq__(self, other: _TensorCompatible) -> Tensor: ... # type: ignore[override]
|
||||
def __ne__(self, other: _TensorCompatible) -> Tensor: ... # type: ignore[override]
|
||||
def __ge__(self, other: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def __gt__(self, other: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def __le__(self, other: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def __lt__(self, other: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def __and__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __rand__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __or__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __ror__(self, other: TensorCompatible) -> Tensor: ...
|
||||
def __eq__(self, other: TensorCompatible) -> Tensor: ... # type: ignore[override]
|
||||
def __ne__(self, other: TensorCompatible) -> Tensor: ... # type: ignore[override]
|
||||
def __ge__(self, other: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def __gt__(self, other: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def __le__(self, other: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def __lt__(self, other: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def __bool__(self) -> NoReturn: ...
|
||||
def __getitem__(self, slice_spec: _Slice | tuple[_Slice, ...]) -> Tensor: ...
|
||||
def __getitem__(self, slice_spec: Slice | tuple[Slice, ...]) -> Tensor: ...
|
||||
def __len__(self) -> int: ...
|
||||
# This only works for rank 0 tensors.
|
||||
def __index__(self) -> int: ...
|
||||
@@ -160,25 +161,25 @@ class Variable(Tensor, metaclass=_VariableMetaclass):
|
||||
# Real type is VariableDef protobuf type. Can be added after adding script
|
||||
# to generate tensorflow protobuf stubs with mypy-protobuf.
|
||||
variable_def: Incomplete | None = None,
|
||||
dtype: _DTypeLike | None = None,
|
||||
dtype: DTypeLike | None = None,
|
||||
import_scope: str | None = None,
|
||||
constraint: Callable[[Tensor], Tensor] | None = None,
|
||||
synchronization: VariableSynchronization = ...,
|
||||
aggregation: VariableAggregation = ...,
|
||||
shape: _ShapeLike | None = None,
|
||||
shape: ShapeLike | None = None,
|
||||
experimental_enable_variable_lifting: _bool = True,
|
||||
) -> None: ...
|
||||
def __getattr__(self, name: str) -> Incomplete: ...
|
||||
|
||||
class RaggedTensor(metaclass=ABCMeta):
|
||||
def bounding_shape(
|
||||
self, axis: _TensorCompatible | None = None, name: str | None = None, out_type: _DTypeLike | None = None
|
||||
self, axis: TensorCompatible | None = None, name: str | None = None, out_type: DTypeLike | None = None
|
||||
) -> Tensor: ...
|
||||
@classmethod
|
||||
def from_sparse(cls, st_input: SparseTensor, name: str | None = None, row_splits_dtype: _DTypeLike = ...) -> RaggedTensor: ...
|
||||
def from_sparse(cls, st_input: SparseTensor, name: str | None = None, row_splits_dtype: DTypeLike = ...) -> RaggedTensor: ...
|
||||
def to_sparse(self, name: str | None = None) -> SparseTensor: ...
|
||||
def to_tensor(
|
||||
self, default_value: float | str | None = None, name: str | None = None, shape: _ShapeLike | None = None
|
||||
self, default_value: float | str | None = None, name: str | None = None, shape: ShapeLike | None = None
|
||||
) -> Tensor: ...
|
||||
def __add__(self, other: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
def __radd__(self, other: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
@@ -187,7 +188,7 @@ class RaggedTensor(metaclass=ABCMeta):
|
||||
def __rmul__(self, other: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
def __floordiv__(self, other: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
def __truediv__(self, other: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
def __getitem__(self, slice_spec: _Slice | tuple[_Slice, ...]) -> RaggedTensor: ...
|
||||
def __getitem__(self, slice_spec: Slice | tuple[Slice, ...]) -> RaggedTensor: ...
|
||||
def __getattr__(self, name: str) -> Incomplete: ...
|
||||
|
||||
class Operation:
|
||||
@@ -216,7 +217,7 @@ class Operation:
|
||||
def __getattr__(self, name: str) -> Incomplete: ...
|
||||
|
||||
class TensorShape(metaclass=ABCMeta):
|
||||
def __init__(self, dims: _ShapeLike) -> None: ...
|
||||
def __init__(self, dims: ShapeLike) -> None: ...
|
||||
@property
|
||||
def rank(self) -> int: ...
|
||||
def as_list(self) -> list[int | None]: ...
|
||||
@@ -354,14 +355,14 @@ class TypeSpec(ABC, Generic[_SpecProto]):
|
||||
def experimental_from_proto(cls, proto: _SpecProto) -> Self: ...
|
||||
@classmethod
|
||||
def experimental_type_proto(cls) -> type[_SpecProto]: ...
|
||||
def is_compatible_with(self, spec_or_value: Self | _TensorCompatible | SparseTensor | RaggedTensor) -> _bool: ...
|
||||
def is_compatible_with(self, spec_or_value: Self | TensorCompatible | SparseTensor | RaggedTensor) -> _bool: ...
|
||||
# Incomplete as tf.types is not yet covered.
|
||||
def is_subtype_of(self, other: Incomplete) -> _bool: ...
|
||||
def most_specific_common_supertype(self, others: Sequence[Incomplete]) -> Self | None: ...
|
||||
def most_specific_compatible_type(self, other: Self) -> Self: ...
|
||||
|
||||
class TensorSpec(TypeSpec[struct_pb2.TensorSpecProto]):
|
||||
def __init__(self, shape: _ShapeLike, dtype: _DTypeLike = ..., name: str | None = None) -> None: ...
|
||||
def __init__(self, shape: ShapeLike, dtype: DTypeLike = ..., name: str | None = None) -> None: ...
|
||||
@property
|
||||
def value_type(self) -> Tensor: ...
|
||||
@property
|
||||
@@ -374,10 +375,10 @@ class TensorSpec(TypeSpec[struct_pb2.TensorSpecProto]):
|
||||
def from_spec(cls, spec: TypeSpec[Any], name: str | None = None) -> Self: ...
|
||||
@classmethod
|
||||
def from_tensor(cls, tensor: Tensor, name: str | None = None) -> Self: ...
|
||||
def is_compatible_with(self, spec_or_tensor: Self | _TensorCompatible) -> _bool: ... # type: ignore[override]
|
||||
def is_compatible_with(self, spec_or_tensor: Self | TensorCompatible) -> _bool: ... # type: ignore[override]
|
||||
|
||||
class SparseTensorSpec(TypeSpec[struct_pb2.TypeSpecProto]):
|
||||
def __init__(self, shape: _ShapeLike | None = None, dtype: _DTypeLike = ...) -> None: ...
|
||||
def __init__(self, shape: ShapeLike | None = None, dtype: DTypeLike = ...) -> None: ...
|
||||
@property
|
||||
def value_type(self) -> SparseTensor: ...
|
||||
@property
|
||||
@@ -390,10 +391,10 @@ class SparseTensorSpec(TypeSpec[struct_pb2.TypeSpecProto]):
|
||||
class RaggedTensorSpec(TypeSpec[struct_pb2.TypeSpecProto]):
|
||||
def __init__(
|
||||
self,
|
||||
shape: _ShapeLike | None = None,
|
||||
dtype: _DTypeLike = ...,
|
||||
shape: ShapeLike | None = None,
|
||||
dtype: DTypeLike = ...,
|
||||
ragged_rank: int | None = None,
|
||||
row_splits_dtype: _DTypeLike = ...,
|
||||
row_splits_dtype: DTypeLike = ...,
|
||||
flat_values_spec: TypeSpec[Any] | None = None,
|
||||
) -> None: ...
|
||||
@property
|
||||
@@ -407,35 +408,35 @@ class RaggedTensorSpec(TypeSpec[struct_pb2.TypeSpecProto]):
|
||||
|
||||
def __getattr__(name: str) -> Incomplete: ...
|
||||
def convert_to_tensor(
|
||||
value: _TensorCompatible | IndexedSlices,
|
||||
dtype: _DTypeLike | None = None,
|
||||
dtype_hint: _DTypeLike | None = None,
|
||||
value: TensorCompatible | IndexedSlices,
|
||||
dtype: DTypeLike | None = None,
|
||||
dtype_hint: DTypeLike | None = None,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
@overload
|
||||
def expand_dims(input: _TensorCompatible, axis: int, name: str | None = None) -> Tensor: ...
|
||||
def expand_dims(input: TensorCompatible, axis: int, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def expand_dims(input: RaggedTensor, axis: int, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def concat(values: _TensorCompatible, axis: int, name: str | None = "concat") -> Tensor: ...
|
||||
def concat(values: TensorCompatible, axis: int, name: str | None = "concat") -> Tensor: ...
|
||||
@overload
|
||||
def concat(values: Sequence[RaggedTensor], axis: int, name: str | None = "concat") -> RaggedTensor: ...
|
||||
@overload
|
||||
def squeeze(
|
||||
input: _TensorCompatible, axis: int | tuple[int, ...] | list[int] | None = None, name: str | None = None
|
||||
input: TensorCompatible, axis: int | tuple[int, ...] | list[int] | None = None, name: str | None = None
|
||||
) -> Tensor: ...
|
||||
@overload
|
||||
def squeeze(input: RaggedTensor, axis: int | tuple[int, ...] | list[int], name: str | None = None) -> RaggedTensor: ...
|
||||
def tensor_scatter_nd_update(
|
||||
tensor: _TensorCompatible, indices: _TensorCompatible, updates: _TensorCompatible, name: str | None = None
|
||||
tensor: TensorCompatible, indices: TensorCompatible, updates: TensorCompatible, name: str | None = None
|
||||
) -> Tensor: ...
|
||||
def constant(
|
||||
value: _TensorCompatible, dtype: _DTypeLike | None = None, shape: _ShapeLike | None = None, name: str | None = "Const"
|
||||
value: TensorCompatible, dtype: DTypeLike | None = None, shape: ShapeLike | None = None, name: str | None = "Const"
|
||||
) -> Tensor: ...
|
||||
@overload
|
||||
def cast(x: _TensorCompatible, dtype: _DTypeLike, name: str | None = None) -> Tensor: ...
|
||||
def cast(x: TensorCompatible, dtype: DTypeLike, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def cast(x: SparseTensor, dtype: _DTypeLike, name: str | None = None) -> SparseTensor: ...
|
||||
def cast(x: SparseTensor, dtype: DTypeLike, name: str | None = None) -> SparseTensor: ...
|
||||
@overload
|
||||
def cast(x: RaggedTensor, dtype: _DTypeLike, name: str | None = None) -> RaggedTensor: ...
|
||||
def reshape(tensor: _TensorCompatible, shape: _ShapeLike | Tensor, name: str | None = None) -> Tensor: ...
|
||||
def cast(x: RaggedTensor, dtype: DTypeLike, name: str | None = None) -> RaggedTensor: ...
|
||||
def reshape(tensor: TensorCompatible, shape: ShapeLike | Tensor, name: str | None = None) -> Tensor: ...
|
||||
|
||||
@@ -1,30 +1,55 @@
|
||||
# Commonly used type aliases.
|
||||
# Everything in this module is private for stubs. There is no runtime
|
||||
# equivalent.
|
||||
# Everything in this module is private for stubs. There is no runtime equivalent.
|
||||
|
||||
from collections.abc import Mapping, Sequence
|
||||
from collections.abc import Iterable, Mapping, Sequence
|
||||
from typing import Any, Protocol, TypeVar
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
import numpy
|
||||
import numpy as np
|
||||
import numpy.typing as npt
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras.layers import InputSpec
|
||||
|
||||
_T1 = TypeVar("_T1")
|
||||
ContainerGeneric: TypeAlias = Mapping[str, ContainerGeneric[_T1]] | Sequence[ContainerGeneric[_T1]] | _T1
|
||||
_T = TypeVar("_T")
|
||||
ContainerGeneric: TypeAlias = Mapping[str, ContainerGeneric[_T]] | Sequence[ContainerGeneric[_T]] | _T
|
||||
|
||||
TensorLike: TypeAlias = tf.Tensor | tf.RaggedTensor | tf.SparseTensor
|
||||
SparseTensorLike: TypeAlias = tf.Tensor | tf.SparseTensor
|
||||
RaggedTensorLike: TypeAlias = tf.Tensor | tf.RaggedTensor
|
||||
# _RaggedTensorLikeT = TypeVar("_RaggedTensorLikeT", tf.Tensor, tf.RaggedTensor)
|
||||
Gradients: TypeAlias = tf.Tensor | tf.IndexedSlices
|
||||
|
||||
ContainerTensorsLike: TypeAlias = ContainerGeneric[TensorLike]
|
||||
ContainerTensors: TypeAlias = ContainerGeneric[tf.Tensor]
|
||||
ContainerGradients: TypeAlias = ContainerGeneric[Gradients]
|
||||
|
||||
AnyArray: TypeAlias = numpy.ndarray[Any, Any]
|
||||
|
||||
class _KerasSerializable1(Protocol):
|
||||
class KerasSerializable1(Protocol):
|
||||
def get_config(self) -> dict[str, Any]: ...
|
||||
|
||||
class _KerasSerializable2(Protocol):
|
||||
class KerasSerializable2(Protocol):
|
||||
__name__: str
|
||||
|
||||
KerasSerializable: TypeAlias = _KerasSerializable1 | _KerasSerializable2
|
||||
KerasSerializable: TypeAlias = KerasSerializable1 | KerasSerializable2
|
||||
|
||||
Slice: TypeAlias = int | slice | None
|
||||
FloatDataSequence: TypeAlias = Sequence[float] | Sequence[FloatDataSequence]
|
||||
StrDataSequence: TypeAlias = Sequence[str] | Sequence[StrDataSequence]
|
||||
ScalarTensorCompatible: TypeAlias = tf.Tensor | str | float | np.ndarray[Any, Any] | np.number[Any]
|
||||
|
||||
TensorCompatible: TypeAlias = ScalarTensorCompatible | Sequence[TensorCompatible]
|
||||
# _TensorCompatibleT = TypeVar("_TensorCompatibleT", bound=TensorCompatible)
|
||||
# Sparse tensors are very annoying. Some operations work on them, but many do not.
|
||||
# You will need to manually verify if an operation supports them. SparseTensorCompatible is intended to be a
|
||||
# broader type than TensorCompatible and not all operations will support broader version. If unsure,
|
||||
# use TensorCompatible instead.
|
||||
SparseTensorCompatible: TypeAlias = TensorCompatible | tf.SparseTensor
|
||||
|
||||
ShapeLike: TypeAlias = tf.TensorShape | Iterable[ScalarTensorCompatible | None] | int | tf.Tensor
|
||||
DTypeLike: TypeAlias = tf.DType | str | np.dtype[Any] | int
|
||||
|
||||
ContainerTensors: TypeAlias = ContainerGeneric[tf.Tensor]
|
||||
ContainerTensorsLike: TypeAlias = ContainerGeneric[TensorLike]
|
||||
ContainerTensorCompatible: TypeAlias = ContainerGeneric[TensorCompatible]
|
||||
ContainerGradients: TypeAlias = ContainerGeneric[Gradients]
|
||||
ContainerTensorShape: TypeAlias = ContainerGeneric[tf.TensorShape]
|
||||
ContainerInputSpec: TypeAlias = ContainerGeneric[InputSpec]
|
||||
|
||||
AnyArray: TypeAlias = npt.NDArray[Any]
|
||||
FloatArray: TypeAlias = npt.NDArray[np.float_ | np.float16 | np.float32 | np.float64]
|
||||
IntArray: TypeAlias = npt.NDArray[np.int_ | np.uint8 | np.int32 | np.int64]
|
||||
|
||||
@@ -6,8 +6,8 @@ from typing_extensions import Self
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow import TypeSpec, _ScalarTensorCompatible, _TensorCompatible
|
||||
from tensorflow._aliases import ContainerGeneric
|
||||
from tensorflow import TypeSpec
|
||||
from tensorflow._aliases import ContainerGeneric, ScalarTensorCompatible, TensorCompatible
|
||||
from tensorflow.data import experimental as experimental
|
||||
from tensorflow.data.experimental import AUTOTUNE as AUTOTUNE
|
||||
from tensorflow.dtypes import DType
|
||||
@@ -32,7 +32,7 @@ class Dataset(ABC, Generic[_T1]):
|
||||
def as_numpy_iterator(self) -> Iterator[np.ndarray[Any, Any]]: ...
|
||||
def batch(
|
||||
self,
|
||||
batch_size: _ScalarTensorCompatible,
|
||||
batch_size: ScalarTensorCompatible,
|
||||
drop_remainder: bool = False,
|
||||
num_parallel_calls: int | None = None,
|
||||
deterministic: bool | None = None,
|
||||
@@ -40,11 +40,11 @@ class Dataset(ABC, Generic[_T1]):
|
||||
) -> Dataset[_T1]: ...
|
||||
def bucket_by_sequence_length(
|
||||
self,
|
||||
element_length_func: Callable[[_T1], _ScalarTensorCompatible],
|
||||
element_length_func: Callable[[_T1], ScalarTensorCompatible],
|
||||
bucket_boundaries: Sequence[int],
|
||||
bucket_batch_sizes: Sequence[int],
|
||||
padded_shapes: ContainerGeneric[tf.TensorShape | _TensorCompatible] | None = None,
|
||||
padding_values: ContainerGeneric[_ScalarTensorCompatible] | None = None,
|
||||
padded_shapes: ContainerGeneric[tf.TensorShape | TensorCompatible] | None = None,
|
||||
padding_values: ContainerGeneric[ScalarTensorCompatible] | None = None,
|
||||
pad_to_bucket_boundary: bool = False,
|
||||
no_padding: bool = False,
|
||||
drop_remainder: bool = False,
|
||||
@@ -59,12 +59,12 @@ class Dataset(ABC, Generic[_T1]):
|
||||
def concatenate(self, dataset: Dataset[_T1], name: str | None = None) -> Dataset[_T1]: ...
|
||||
@staticmethod
|
||||
def counter(
|
||||
start: _ScalarTensorCompatible = 0, step: _ScalarTensorCompatible = 1, dtype: DType = ..., name: str | None = None
|
||||
start: ScalarTensorCompatible = 0, step: ScalarTensorCompatible = 1, dtype: DType = ..., name: str | None = None
|
||||
) -> Dataset[tf.Tensor]: ...
|
||||
@property
|
||||
@abstractmethod
|
||||
def element_spec(self) -> ContainerGeneric[TypeSpec[Any]]: ...
|
||||
def enumerate(self, start: _ScalarTensorCompatible = 0, name: str | None = None) -> Dataset[tuple[int, _T1]]: ...
|
||||
def enumerate(self, start: ScalarTensorCompatible = 0, name: str | None = None) -> Dataset[tuple[int, _T1]]: ...
|
||||
def filter(self, predicate: Callable[[_T1], bool | tf.Tensor], name: str | None = None) -> Dataset[_T1]: ...
|
||||
def flat_map(self, map_func: Callable[[_T1], Dataset[_T2]], name: str | None = None) -> Dataset[_T2]: ...
|
||||
# PEP 646 can be used here for a more precise type when better supported.
|
||||
@@ -80,13 +80,13 @@ class Dataset(ABC, Generic[_T1]):
|
||||
@staticmethod
|
||||
def from_tensors(tensors: Any, name: str | None = None) -> Dataset[Any]: ...
|
||||
@staticmethod
|
||||
def from_tensor_slices(tensors: _TensorCompatible, name: str | None = None) -> Dataset[Any]: ...
|
||||
def from_tensor_slices(tensors: TensorCompatible, name: str | None = None) -> Dataset[Any]: ...
|
||||
def get_single_element(self, name: str | None = None) -> _T1: ...
|
||||
def group_by_window(
|
||||
self,
|
||||
key_func: Callable[[_T1], tf.Tensor],
|
||||
reduce_func: Callable[[tf.Tensor, Dataset[_T1]], Dataset[_T2]],
|
||||
window_size: _ScalarTensorCompatible | None = None,
|
||||
window_size: ScalarTensorCompatible | None = None,
|
||||
window_size_func: Callable[[tf.Tensor], tf.Tensor] | None = None,
|
||||
name: str | None = None,
|
||||
) -> Dataset[_T2]: ...
|
||||
@@ -103,7 +103,7 @@ class Dataset(ABC, Generic[_T1]):
|
||||
def __iter__(self) -> Iterator[_T1]: ...
|
||||
@staticmethod
|
||||
def list_files(
|
||||
file_pattern: str | Sequence[str] | _TensorCompatible,
|
||||
file_pattern: str | Sequence[str] | TensorCompatible,
|
||||
shuffle: bool | None = None,
|
||||
seed: int | None = None,
|
||||
name: str | None = None,
|
||||
@@ -126,16 +126,16 @@ class Dataset(ABC, Generic[_T1]):
|
||||
def options(self) -> Options: ...
|
||||
def padded_batch(
|
||||
self,
|
||||
batch_size: _ScalarTensorCompatible,
|
||||
padded_shapes: ContainerGeneric[tf.TensorShape | _TensorCompatible] | None = None,
|
||||
padding_values: ContainerGeneric[_ScalarTensorCompatible] | None = None,
|
||||
batch_size: ScalarTensorCompatible,
|
||||
padded_shapes: ContainerGeneric[tf.TensorShape | TensorCompatible] | None = None,
|
||||
padding_values: ContainerGeneric[ScalarTensorCompatible] | None = None,
|
||||
drop_remainder: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Dataset[_T1]: ...
|
||||
def prefetch(self, buffer_size: _ScalarTensorCompatible, name: str | None = None) -> Dataset[_T1]: ...
|
||||
def prefetch(self, buffer_size: ScalarTensorCompatible, name: str | None = None) -> Dataset[_T1]: ...
|
||||
def ragged_batch(
|
||||
self,
|
||||
batch_size: _ScalarTensorCompatible,
|
||||
batch_size: ScalarTensorCompatible,
|
||||
drop_remainder: bool = False,
|
||||
row_splits_dtype: DType = ...,
|
||||
name: str | None = None,
|
||||
@@ -146,33 +146,33 @@ class Dataset(ABC, Generic[_T1]):
|
||||
) -> Dataset[tf.Tensor]: ...
|
||||
@staticmethod
|
||||
@overload
|
||||
def range(__stop: _ScalarTensorCompatible, output_type: DType = ..., name: str | None = None) -> Dataset[tf.Tensor]: ...
|
||||
def range(__stop: ScalarTensorCompatible, output_type: DType = ..., name: str | None = None) -> Dataset[tf.Tensor]: ...
|
||||
@staticmethod
|
||||
@overload
|
||||
def range(
|
||||
__start: _ScalarTensorCompatible,
|
||||
__stop: _ScalarTensorCompatible,
|
||||
__step: _ScalarTensorCompatible = 1,
|
||||
__start: ScalarTensorCompatible,
|
||||
__stop: ScalarTensorCompatible,
|
||||
__step: ScalarTensorCompatible = 1,
|
||||
output_type: DType = ...,
|
||||
name: str | None = None,
|
||||
) -> Dataset[tf.Tensor]: ...
|
||||
def rebatch(
|
||||
self, batch_size: _ScalarTensorCompatible, drop_remainder: bool = False, name: str | None = None
|
||||
self, batch_size: ScalarTensorCompatible, drop_remainder: bool = False, name: str | None = None
|
||||
) -> Dataset[_T1]: ...
|
||||
def reduce(self, initial_state: _T2, reduce_func: Callable[[_T2, _T1], _T2], name: str | None = None) -> _T2: ...
|
||||
def rejection_resample(
|
||||
self,
|
||||
class_func: Callable[[_T1], _ScalarTensorCompatible],
|
||||
target_dist: _TensorCompatible,
|
||||
initial_dist: _TensorCompatible | None = None,
|
||||
class_func: Callable[[_T1], ScalarTensorCompatible],
|
||||
target_dist: TensorCompatible,
|
||||
initial_dist: TensorCompatible | None = None,
|
||||
seed: int | None = None,
|
||||
name: str | None = None,
|
||||
) -> Dataset[_T1]: ...
|
||||
def repeat(self, count: _ScalarTensorCompatible | None = None, name: str | None = None) -> Dataset[_T1]: ...
|
||||
def repeat(self, count: ScalarTensorCompatible | None = None, name: str | None = None) -> Dataset[_T1]: ...
|
||||
@staticmethod
|
||||
def sample_from_datasets(
|
||||
datasets: Sequence[Dataset[_T1]],
|
||||
weights: _TensorCompatible | None = None,
|
||||
weights: TensorCompatible | None = None,
|
||||
seed: int | None = None,
|
||||
stop_on_empty_dataset: bool = False,
|
||||
rerandomize_each_iteration: bool | None = None,
|
||||
@@ -189,36 +189,36 @@ class Dataset(ABC, Generic[_T1]):
|
||||
self, initial_state: _T2, scan_func: Callable[[_T2, _T1], tuple[_T2, _T3]], name: str | None = None
|
||||
) -> Dataset[_T3]: ...
|
||||
def shard(
|
||||
self, num_shards: _ScalarTensorCompatible, index: _ScalarTensorCompatible, name: str | None = None
|
||||
self, num_shards: ScalarTensorCompatible, index: ScalarTensorCompatible, name: str | None = None
|
||||
) -> Dataset[_T1]: ...
|
||||
def shuffle(
|
||||
self,
|
||||
buffer_size: _ScalarTensorCompatible,
|
||||
buffer_size: ScalarTensorCompatible,
|
||||
seed: int | None = None,
|
||||
reshuffle_each_iteration: bool | None = None,
|
||||
name: str | None = None,
|
||||
) -> Dataset[_T1]: ...
|
||||
def skip(self, count: _ScalarTensorCompatible, name: str | None = None) -> Dataset[_T1]: ...
|
||||
def skip(self, count: ScalarTensorCompatible, name: str | None = None) -> Dataset[_T1]: ...
|
||||
def snapshot(
|
||||
self,
|
||||
path: str,
|
||||
compression: _CompressionTypes = "AUTO",
|
||||
reader_func: Callable[[Dataset[Dataset[_T1]]], Dataset[_T1]] | None = None,
|
||||
shard_func: Callable[[_T1], _ScalarTensorCompatible] | None = None,
|
||||
shard_func: Callable[[_T1], ScalarTensorCompatible] | None = None,
|
||||
name: str | None = None,
|
||||
) -> Dataset[_T1]: ...
|
||||
def sparse_batch(
|
||||
self, batch_size: _ScalarTensorCompatible, row_shape: tf.TensorShape | _TensorCompatible, name: str | None = None
|
||||
self, batch_size: ScalarTensorCompatible, row_shape: tf.TensorShape | TensorCompatible, name: str | None = None
|
||||
) -> Dataset[tf.SparseTensor]: ...
|
||||
def take(self, count: _ScalarTensorCompatible, name: str | None = None) -> Dataset[_T1]: ...
|
||||
def take_while(self, predicate: Callable[[_T1], _ScalarTensorCompatible], name: str | None = None) -> Dataset[_T1]: ...
|
||||
def take(self, count: ScalarTensorCompatible, name: str | None = None) -> Dataset[_T1]: ...
|
||||
def take_while(self, predicate: Callable[[_T1], ScalarTensorCompatible], name: str | None = None) -> Dataset[_T1]: ...
|
||||
def unbatch(self, name: str | None = None) -> Dataset[_T1]: ...
|
||||
def unique(self, name: str | None = None) -> Dataset[_T1]: ...
|
||||
def window(
|
||||
self,
|
||||
size: _ScalarTensorCompatible,
|
||||
shift: _ScalarTensorCompatible | None = None,
|
||||
stride: _ScalarTensorCompatible = 1,
|
||||
size: ScalarTensorCompatible,
|
||||
shift: ScalarTensorCompatible | None = None,
|
||||
stride: ScalarTensorCompatible = 1,
|
||||
drop_remainder: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Dataset[Dataset[_T1]]: ...
|
||||
@@ -245,7 +245,7 @@ class Options:
|
||||
class TFRecordDataset(Dataset[tf.Tensor]):
|
||||
def __init__(
|
||||
self,
|
||||
filenames: _TensorCompatible | Dataset[str],
|
||||
filenames: TensorCompatible | Dataset[str],
|
||||
compression_type: _CompressionTypes = None,
|
||||
buffer_size: int | None = None,
|
||||
num_parallel_reads: int | None = None,
|
||||
|
||||
@@ -2,7 +2,8 @@ from _typeshed import Incomplete
|
||||
from collections.abc import Callable, Sequence
|
||||
from typing import Final, TypeVar
|
||||
|
||||
from tensorflow import Tensor, _TensorCompatible
|
||||
from tensorflow import Tensor
|
||||
from tensorflow._aliases import TensorCompatible
|
||||
from tensorflow.data import Dataset
|
||||
|
||||
AUTOTUNE: Final = -1
|
||||
@@ -25,7 +26,7 @@ def enable_debug_mode() -> None: ...
|
||||
def cardinality(dataset: Dataset[object]) -> Tensor: ...
|
||||
def sample_from_datasets(
|
||||
datasets: Sequence[Dataset[_T1]],
|
||||
weights: _TensorCompatible | None = None,
|
||||
weights: TensorCompatible | None = None,
|
||||
seed: int | None = None,
|
||||
stop_on_empty_dataset: bool = False,
|
||||
) -> Dataset[_T1]: ...
|
||||
|
||||
@@ -4,7 +4,7 @@ from builtins import bool as _bool
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
from tensorflow import _DTypeLike
|
||||
from tensorflow._aliases import DTypeLike
|
||||
|
||||
class _DTypeMeta(ABCMeta): ...
|
||||
|
||||
@@ -51,5 +51,5 @@ quint8: DType
|
||||
quint16: DType
|
||||
string: DType
|
||||
|
||||
def as_dtype(type_value: _DTypeLike) -> DType: ...
|
||||
def as_dtype(type_value: DTypeLike) -> DType: ...
|
||||
def __getattr__(name: str) -> Incomplete: ...
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
from collections.abc import Callable, Iterable, Sequence
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow import _ShapeLike
|
||||
from tensorflow._aliases import ShapeLike
|
||||
from tensorflow.python.feature_column import feature_column_v2 as fc, sequence_feature_column as seq_fc
|
||||
|
||||
def numeric_column(
|
||||
key: str,
|
||||
shape: _ShapeLike = (1,),
|
||||
shape: ShapeLike = (1,),
|
||||
default_value: float | None = None,
|
||||
dtype: tf.DType = ...,
|
||||
normalizer_fn: Callable[[tf.Tensor], tf.Tensor] | None = None,
|
||||
@@ -16,7 +16,7 @@ def embedding_column(
|
||||
categorical_column: fc.CategoricalColumn,
|
||||
dimension: int,
|
||||
combiner: fc._Combiners = "mean",
|
||||
initializer: Callable[[_ShapeLike], tf.Tensor] | None = None,
|
||||
initializer: Callable[[ShapeLike], tf.Tensor] | None = None,
|
||||
ckpt_to_load_from: str | None = None,
|
||||
tensor_name_in_ckpt: str | None = None,
|
||||
max_norm: float | None = None,
|
||||
@@ -27,7 +27,7 @@ def shared_embeddings(
|
||||
categorical_columns: Iterable[fc.CategoricalColumn],
|
||||
dimension: int,
|
||||
combiner: fc._Combiners = "mean",
|
||||
initializer: Callable[[_ShapeLike], tf.Tensor] | None = None,
|
||||
initializer: Callable[[ShapeLike], tf.Tensor] | None = None,
|
||||
shared_embedding_collection_name: str | None = None,
|
||||
ckpt_to_load_from: str | None = None,
|
||||
tensor_name_in_ckpt: str | None = None,
|
||||
@@ -64,7 +64,7 @@ def crossed_column(
|
||||
) -> fc.CrossedColumn: ...
|
||||
def sequence_numeric_column(
|
||||
key: str,
|
||||
shape: _ShapeLike = (1,),
|
||||
shape: ShapeLike = (1,),
|
||||
default_value: float = 0.0,
|
||||
dtype: tf.DType = ...,
|
||||
normalizer_fn: Callable[[tf.Tensor], tf.Tensor] | None = None,
|
||||
|
||||
@@ -4,8 +4,7 @@ from types import TracebackType
|
||||
from typing import Literal, NamedTuple
|
||||
from typing_extensions import Self, TypeAlias
|
||||
|
||||
from tensorflow import _DTypeLike, _ShapeLike, _TensorCompatible
|
||||
from tensorflow._aliases import TensorLike
|
||||
from tensorflow._aliases import DTypeLike, ShapeLike, TensorCompatible, TensorLike
|
||||
from tensorflow.io import gfile as gfile
|
||||
|
||||
_FeatureSpecs: TypeAlias = Mapping[str, FixedLenFeature | FixedLenSequenceFeature | VarLenFeature | RaggedFeature | SparseFeature]
|
||||
@@ -54,23 +53,23 @@ class TFRecordWriter:
|
||||
# in this [issue](https://github.com/google/pytype/issues/1410#issue-1669793588). After
|
||||
# next release the defaults can be added back.
|
||||
class FixedLenFeature(NamedTuple):
|
||||
shape: _ShapeLike
|
||||
dtype: _DTypeLike
|
||||
default_value: _TensorCompatible | None = ...
|
||||
shape: ShapeLike
|
||||
dtype: DTypeLike
|
||||
default_value: TensorCompatible | None = ...
|
||||
|
||||
class FixedLenSequenceFeature(NamedTuple):
|
||||
shape: _ShapeLike
|
||||
dtype: _DTypeLike
|
||||
shape: ShapeLike
|
||||
dtype: DTypeLike
|
||||
allow_missing: bool = ...
|
||||
default_value: _TensorCompatible | None = ...
|
||||
default_value: TensorCompatible | None = ...
|
||||
|
||||
class VarLenFeature(NamedTuple):
|
||||
dtype: _DTypeLike
|
||||
dtype: DTypeLike
|
||||
|
||||
class SparseFeature(NamedTuple):
|
||||
index_key: str | list[str]
|
||||
value_key: str
|
||||
dtype: _DTypeLike
|
||||
dtype: DTypeLike
|
||||
size: int | list[int]
|
||||
already_sorted: bool = ...
|
||||
|
||||
@@ -94,13 +93,13 @@ class RaggedFeature(NamedTuple):
|
||||
|
||||
class UniformRowLength(NamedTuple): # type: ignore[misc]
|
||||
length: int
|
||||
dtype: _DTypeLike
|
||||
dtype: DTypeLike
|
||||
value_key: str | None = ...
|
||||
partitions: tuple[RowSplits | RowLengths | RowStarts | RowLimits | ValueRowIds | UniformRowLength, ...] = ... # type: ignore[name-defined]
|
||||
row_splits_dtype: _DTypeLike = ...
|
||||
row_splits_dtype: DTypeLike = ...
|
||||
validate: bool = ...
|
||||
|
||||
def parse_example(
|
||||
serialized: _TensorCompatible, features: _FeatureSpecs, example_names: Iterable[str] | None = None, name: str | None = None
|
||||
serialized: TensorCompatible, features: _FeatureSpecs, example_names: Iterable[str] | None = None, name: str | None = None
|
||||
) -> dict[str, TensorLike]: ...
|
||||
def __getattr__(name: str) -> Incomplete: ...
|
||||
|
||||
@@ -3,16 +3,17 @@ from collections.abc import Callable
|
||||
from typing import Any, overload
|
||||
from typing_extensions import Self, TypeAlias
|
||||
|
||||
from tensorflow import Tensor, _DTypeLike, _ShapeLike, _TensorCompatible
|
||||
from tensorflow import Tensor
|
||||
from tensorflow._aliases import DTypeLike, ShapeLike, TensorCompatible
|
||||
|
||||
class Initializer:
|
||||
def __call__(self, shape: _ShapeLike, dtype: _DTypeLike | None = None) -> Tensor: ...
|
||||
def __call__(self, shape: ShapeLike, dtype: DTypeLike | None = None) -> Tensor: ...
|
||||
def get_config(self) -> dict[str, Any]: ...
|
||||
@classmethod
|
||||
def from_config(cls, config: dict[str, Any]) -> Self: ...
|
||||
|
||||
class Constant(Initializer):
|
||||
def __init__(self, value: _TensorCompatible = 0) -> None: ...
|
||||
def __init__(self, value: TensorCompatible = 0) -> None: ...
|
||||
|
||||
class GlorotNormal(Initializer):
|
||||
def __init__(self, seed: int | None = None) -> None: ...
|
||||
@@ -21,13 +22,13 @@ class GlorotUniform(Initializer):
|
||||
def __init__(self, seed: int | None = None) -> None: ...
|
||||
|
||||
class TruncatedNormal(Initializer):
|
||||
def __init__(self, mean: _TensorCompatible = 0.0, stddev: _TensorCompatible = 0.05, seed: int | None = None) -> None: ...
|
||||
def __init__(self, mean: TensorCompatible = 0.0, stddev: TensorCompatible = 0.05, seed: int | None = None) -> None: ...
|
||||
|
||||
class RandomNormal(Initializer):
|
||||
def __init__(self, mean: _TensorCompatible = 0.0, stddev: _TensorCompatible = 0.05, seed: int | None = None) -> None: ...
|
||||
def __init__(self, mean: TensorCompatible = 0.0, stddev: TensorCompatible = 0.05, seed: int | None = None) -> None: ...
|
||||
|
||||
class RandomUniform(Initializer):
|
||||
def __init__(self, minval: _TensorCompatible = -0.05, maxval: _TensorCompatible = 0.05, seed: int | None = None) -> None: ...
|
||||
def __init__(self, minval: TensorCompatible = -0.05, maxval: TensorCompatible = 0.05, seed: int | None = None) -> None: ...
|
||||
|
||||
class Zeros(Initializer): ...
|
||||
|
||||
@@ -38,7 +39,7 @@ truncated_normal = TruncatedNormal
|
||||
zeros = Zeros
|
||||
|
||||
_Initializer: TypeAlias = ( # noqa: Y047
|
||||
str | Initializer | type[Initializer] | Callable[[_ShapeLike], Tensor] | dict[str, Any] | None
|
||||
str | Initializer | type[Initializer] | Callable[[ShapeLike], Tensor] | dict[str, Any] | None
|
||||
)
|
||||
|
||||
@overload
|
||||
@@ -46,5 +47,5 @@ def get(identifier: None) -> None: ...
|
||||
@overload
|
||||
def get(identifier: str | Initializer | dict[str, Any] | type[Initializer]) -> Initializer: ...
|
||||
@overload
|
||||
def get(identifier: Callable[[_ShapeLike], Tensor]) -> Callable[[_ShapeLike], Tensor]: ...
|
||||
def get(identifier: Callable[[ShapeLike], Tensor]) -> Callable[[ShapeLike], Tensor]: ...
|
||||
def __getattr__(name: str) -> Incomplete: ...
|
||||
|
||||
@@ -4,8 +4,8 @@ from typing import Any, Generic, TypeVar, overload
|
||||
from typing_extensions import Self, TypeAlias
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow import Tensor, Variable, VariableAggregation, VariableSynchronization, _TensorCompatible
|
||||
from tensorflow._aliases import AnyArray
|
||||
from tensorflow import Tensor, Variable, VariableAggregation, VariableSynchronization
|
||||
from tensorflow._aliases import AnyArray, DTypeLike, TensorCompatible
|
||||
from tensorflow.keras.activations import _Activation
|
||||
from tensorflow.keras.constraints import Constraint
|
||||
from tensorflow.keras.initializers import _Initializer
|
||||
@@ -23,7 +23,7 @@ class InputSpec:
|
||||
axes: dict[int, int | None] | None
|
||||
def __init__(
|
||||
self,
|
||||
dtype: tf._DTypeLike | None = None,
|
||||
dtype: DTypeLike | None = None,
|
||||
shape: Iterable[int | None] | None = None,
|
||||
ndim: int | None = None,
|
||||
max_ndim: int | None = None,
|
||||
@@ -39,7 +39,7 @@ class InputSpec:
|
||||
# Most layers have input and output type of just Tensor and when we support default type variables,
|
||||
# maybe worth trying.
|
||||
class Layer(tf.Module, Generic[_InputT, _OutputT]):
|
||||
# The most general type is _ContainerGeneric[InputSpec] as it really
|
||||
# The most general type is ContainerGeneric[InputSpec] as it really
|
||||
# depends on _InputT. For most Layers it is just InputSpec
|
||||
# though. Maybe describable with HKT?
|
||||
input_spec: InputSpec | Any
|
||||
@@ -49,13 +49,13 @@ class Layer(tf.Module, Generic[_InputT, _OutputT]):
|
||||
@trainable.setter
|
||||
def trainable(self, value: bool) -> None: ...
|
||||
def __init__(
|
||||
self, trainable: bool = True, name: str | None = None, dtype: tf._DTypeLike | None = None, dynamic: bool = False
|
||||
self, trainable: bool = True, name: str | None = None, dtype: DTypeLike | None = None, dynamic: bool = False
|
||||
) -> None: ...
|
||||
|
||||
# *args/**kwargs are allowed, but have obscure footguns and tensorflow documentation discourages their usage.
|
||||
# First argument will automatically be cast to layer's compute dtype, but any other tensor arguments will not be.
|
||||
# Also various tensorflow tools/apis can misbehave if they encounter a layer with *args/**kwargs.
|
||||
def __call__(self, inputs: _InputT, *, training: bool = False, mask: _TensorCompatible | None = None) -> _OutputT: ...
|
||||
def __call__(self, inputs: _InputT, *, training: bool = False, mask: TensorCompatible | None = None) -> _OutputT: ...
|
||||
def call(self, __inputs: _InputT) -> _OutputT: ...
|
||||
|
||||
# input_shape's real type depends on _InputT, but we can't express that without HKT.
|
||||
@@ -69,7 +69,7 @@ class Layer(tf.Module, Generic[_InputT, _OutputT]):
|
||||
self,
|
||||
name: str | None = None,
|
||||
shape: Iterable[int | None] | None = None,
|
||||
dtype: tf._DTypeLike | None = None,
|
||||
dtype: DTypeLike | None = None,
|
||||
initializer: _Initializer | None = None,
|
||||
regularizer: _Regularizer = None,
|
||||
trainable: bool | None = None,
|
||||
@@ -106,7 +106,7 @@ class Layer(tf.Module, Generic[_InputT, _OutputT]):
|
||||
# all layer constructors.
|
||||
|
||||
# TODO: Replace last Any after adding tf.keras.mixed_precision.Policy.
|
||||
_LayerDtype: TypeAlias = tf._DTypeLike | dict[str, Any] | Any
|
||||
_LayerDtype: TypeAlias = DTypeLike | dict[str, Any] | Any
|
||||
|
||||
_Constraint: TypeAlias = str | dict[str, Any] | Constraint | None
|
||||
|
||||
@@ -170,7 +170,7 @@ class Dropout(Layer[tf.Tensor, tf.Tensor]):
|
||||
def __init__(
|
||||
self,
|
||||
rate: float,
|
||||
noise_shape: _TensorCompatible | Sequence[int | None] | None = None,
|
||||
noise_shape: TensorCompatible | Sequence[int | None] | None = None,
|
||||
seed: int | None = None,
|
||||
trainable: bool = True,
|
||||
dtype: _LayerDtype = None,
|
||||
|
||||
@@ -4,8 +4,8 @@ from collections.abc import Callable
|
||||
from typing import Any, Final, Literal, TypeVar, overload
|
||||
from typing_extensions import Self, TypeAlias, TypeGuard
|
||||
|
||||
from tensorflow import Tensor, _TensorCompatible
|
||||
from tensorflow._aliases import KerasSerializable
|
||||
from tensorflow import Tensor
|
||||
from tensorflow._aliases import KerasSerializable, TensorCompatible
|
||||
from tensorflow.keras.metrics import (
|
||||
binary_crossentropy as binary_crossentropy,
|
||||
categorical_crossentropy as categorical_crossentropy,
|
||||
@@ -21,7 +21,7 @@ class Loss(ABC):
|
||||
def from_config(cls, config: dict[str, Any]) -> Self: ...
|
||||
def get_config(self) -> dict[str, Any]: ...
|
||||
def __call__(
|
||||
self, y_true: _TensorCompatible, y_pred: _TensorCompatible, sample_weight: _TensorCompatible | None = None
|
||||
self, y_true: TensorCompatible, y_pred: TensorCompatible, sample_weight: TensorCompatible | None = None
|
||||
) -> Tensor: ...
|
||||
|
||||
class BinaryCrossentropy(Loss):
|
||||
@@ -130,9 +130,9 @@ class Reduction:
|
||||
|
||||
_ReductionValues: TypeAlias = Literal["auto", "none", "sum", "sum_over_batch_size"]
|
||||
|
||||
def categorical_hinge(y_true: _TensorCompatible, y_pred: _TensorCompatible) -> Tensor: ...
|
||||
def huber(y_true: _TensorCompatible, y_pred: _TensorCompatible, delta: float = 1.0) -> Tensor: ...
|
||||
def log_cosh(y_true: _TensorCompatible, y_pred: _TensorCompatible) -> Tensor: ...
|
||||
def categorical_hinge(y_true: TensorCompatible, y_pred: TensorCompatible) -> Tensor: ...
|
||||
def huber(y_true: TensorCompatible, y_pred: TensorCompatible, delta: float = 1.0) -> Tensor: ...
|
||||
def log_cosh(y_true: TensorCompatible, y_pred: TensorCompatible) -> Tensor: ...
|
||||
def deserialize(
|
||||
name: str | dict[str, Any], custom_objects: dict[str, Any] | None = None, use_legacy_format: bool = False
|
||||
) -> Loss: ...
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from tensorflow import Tensor, _TensorCompatible
|
||||
from tensorflow import Tensor
|
||||
from tensorflow._aliases import TensorCompatible
|
||||
|
||||
def binary_crossentropy(
|
||||
y_true: _TensorCompatible, y_pred: _TensorCompatible, from_logits: bool = False, label_smoothing: float = 0.0, axis: int = -1
|
||||
y_true: TensorCompatible, y_pred: TensorCompatible, from_logits: bool = False, label_smoothing: float = 0.0, axis: int = -1
|
||||
) -> Tensor: ...
|
||||
def categorical_crossentropy(
|
||||
y_true: _TensorCompatible, y_pred: _TensorCompatible, from_logits: bool = False, label_smoothing: float = 0.0, axis: int = -1
|
||||
y_true: TensorCompatible, y_pred: TensorCompatible, from_logits: bool = False, label_smoothing: float = 0.0, axis: int = -1
|
||||
) -> Tensor: ...
|
||||
|
||||
@@ -3,11 +3,12 @@ from collections.abc import Iterable
|
||||
from typing import TypeVar, overload
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from tensorflow import IndexedSlices, RaggedTensor, Tensor, _DTypeLike, _ShapeLike, _TensorCompatible
|
||||
from tensorflow import IndexedSlices, RaggedTensor, Tensor
|
||||
from tensorflow._aliases import DTypeLike, ShapeLike, TensorCompatible
|
||||
from tensorflow.sparse import SparseTensor
|
||||
|
||||
_TensorCompatibleT = TypeVar("_TensorCompatibleT", bound=_TensorCompatible)
|
||||
_SparseTensorCompatible: TypeAlias = _TensorCompatible | SparseTensor
|
||||
_TensorCompatibleT = TypeVar("_TensorCompatibleT", bound=TensorCompatible)
|
||||
_SparseTensorCompatible: TypeAlias = TensorCompatible | SparseTensor
|
||||
|
||||
# Most operations support RaggedTensor. Documentation for them is here,
|
||||
# https://www.tensorflow.org/api_docs/python/tf/ragged.
|
||||
@@ -17,101 +18,101 @@ _SparseTensorCompatible: TypeAlias = _TensorCompatible | SparseTensor
|
||||
# SparseTensors. Binary operations with ragged tensors usually only work
|
||||
# if both operands are ragged.
|
||||
@overload
|
||||
def abs(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def abs(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def abs(x: SparseTensor, name: str | None = None) -> SparseTensor: ...
|
||||
@overload
|
||||
def abs(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def sin(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def sin(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def sin(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def cos(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def cos(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def cos(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def exp(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def exp(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def exp(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def sinh(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def sinh(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def sinh(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def cosh(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def cosh(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def cosh(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def tanh(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def tanh(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def tanh(x: SparseTensor, name: str | None = None) -> SparseTensor: ...
|
||||
@overload
|
||||
def tanh(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def expm1(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def expm1(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def expm1(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def log(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def log(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def log(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def log1p(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def log1p(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def log1p(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def negative(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def negative(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def negative(x: SparseTensor, name: str | None = None) -> SparseTensor: ...
|
||||
@overload
|
||||
def negative(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def sigmoid(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def sigmoid(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def sigmoid(x: SparseTensor, name: str | None = None) -> SparseTensor: ...
|
||||
@overload
|
||||
def add(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def add(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def add(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def add_n(inputs: Iterable[_TensorCompatible | IndexedSlices], name: str | None = None) -> Tensor: ...
|
||||
def add_n(inputs: Iterable[TensorCompatible | IndexedSlices], name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def add_n(inputs: Iterable[RaggedTensor], name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def subtract(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def subtract(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def subtract(x: _TensorCompatible | RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
def subtract(x: TensorCompatible | RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def subtract(
|
||||
x: _TensorCompatible | RaggedTensor, y: _TensorCompatible | RaggedTensor, name: str | None = None
|
||||
x: TensorCompatible | RaggedTensor, y: TensorCompatible | RaggedTensor, name: str | None = None
|
||||
) -> Tensor | RaggedTensor: ...
|
||||
@overload
|
||||
def multiply(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def multiply(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def multiply(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def multiply_no_nan(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def multiply_no_nan(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def multiply_no_nan(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def divide(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def divide(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def divide(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def divide_no_nan(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def divide_no_nan(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def divide_no_nan(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def floormod(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def floormod(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def floormod(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def ceil(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def ceil(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def ceil(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def floor(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def floor(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def floor(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
|
||||
@@ -119,170 +120,170 @@ def floor(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
# behave covariantly.
|
||||
def accumulate_n(
|
||||
inputs: list[_TensorCompatibleT] | tuple[_TensorCompatibleT, ...],
|
||||
shape: _ShapeLike | None = None,
|
||||
tensor_dtype: _DTypeLike | None = None,
|
||||
shape: ShapeLike | None = None,
|
||||
tensor_dtype: DTypeLike | None = None,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
@overload
|
||||
def pow(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def pow(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def pow(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def reciprocal(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def reciprocal(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def reciprocal(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def is_nan(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def is_nan(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def is_nan(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def minimum(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def minimum(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def minimum(x: RaggedTensor, y: _TensorCompatible | RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
def minimum(x: RaggedTensor, y: TensorCompatible | RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def minimum(x: _TensorCompatible | RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
def minimum(x: TensorCompatible | RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def maximum(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def maximum(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def maximum(x: RaggedTensor, y: _TensorCompatible | RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
def maximum(x: RaggedTensor, y: TensorCompatible | RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def maximum(x: _TensorCompatible | RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
def maximum(x: TensorCompatible | RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def logical_not(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def logical_not(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def logical_not(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def logical_and(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def logical_and(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def logical_and(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def logical_or(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def logical_or(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def logical_or(x: RaggedTensor, y: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def logical_xor(x: _TensorCompatible, y: _TensorCompatible, name: str | None = "LogicalXor") -> Tensor: ...
|
||||
def logical_xor(x: TensorCompatible, y: TensorCompatible, name: str | None = "LogicalXor") -> Tensor: ...
|
||||
@overload
|
||||
def logical_xor(x: RaggedTensor, y: RaggedTensor, name: str | None = "LogicalXor") -> RaggedTensor: ...
|
||||
@overload
|
||||
def equal(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def equal(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def equal(x: RaggedTensor, y: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def not_equal(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def not_equal(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def not_equal(x: RaggedTensor, y: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def greater(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def greater(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def greater(x: RaggedTensor, y: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def greater_equal(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def greater_equal(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def greater_equal(x: RaggedTensor, y: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def less(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def less(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def less(x: RaggedTensor, y: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def less_equal(x: _TensorCompatible, y: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def less_equal(x: TensorCompatible, y: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def less_equal(x: RaggedTensor, y: RaggedTensor | float, name: str | None = None) -> RaggedTensor: ...
|
||||
def segment_sum(data: _TensorCompatible, segment_ids: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def segment_sum(data: TensorCompatible, segment_ids: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def sign(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def sign(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def sign(x: SparseTensor, name: str | None = None) -> SparseTensor: ...
|
||||
@overload
|
||||
def sign(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def sqrt(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def sqrt(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def sqrt(x: SparseTensor, name: str | None = None) -> SparseTensor: ...
|
||||
@overload
|
||||
def sqrt(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def rsqrt(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def rsqrt(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def rsqrt(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def square(x: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def square(x: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def square(x: SparseTensor, name: str | None = None) -> SparseTensor: ...
|
||||
@overload
|
||||
def square(x: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
@overload
|
||||
def softplus(features: _TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
def softplus(features: TensorCompatible, name: str | None = None) -> Tensor: ...
|
||||
@overload
|
||||
def softplus(features: RaggedTensor, name: str | None = None) -> RaggedTensor: ...
|
||||
|
||||
# Depending on the method axis is either a rank 0 tensor or a rank 0/1 tensor.
|
||||
def reduce_mean(
|
||||
input_tensor: _TensorCompatible | RaggedTensor,
|
||||
axis: _TensorCompatible | None = None,
|
||||
input_tensor: TensorCompatible | RaggedTensor,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def reduce_sum(
|
||||
input_tensor: _TensorCompatible | RaggedTensor,
|
||||
axis: _TensorCompatible | None = None,
|
||||
input_tensor: TensorCompatible | RaggedTensor,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def reduce_max(
|
||||
input_tensor: _TensorCompatible | RaggedTensor,
|
||||
axis: _TensorCompatible | None = None,
|
||||
input_tensor: TensorCompatible | RaggedTensor,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def reduce_min(
|
||||
input_tensor: _TensorCompatible | RaggedTensor,
|
||||
axis: _TensorCompatible | None = None,
|
||||
input_tensor: TensorCompatible | RaggedTensor,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def reduce_prod(
|
||||
input_tensor: _TensorCompatible | RaggedTensor,
|
||||
axis: _TensorCompatible | None = None,
|
||||
input_tensor: TensorCompatible | RaggedTensor,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def reduce_std(
|
||||
input_tensor: _TensorCompatible | RaggedTensor,
|
||||
axis: _TensorCompatible | None = None,
|
||||
input_tensor: TensorCompatible | RaggedTensor,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def reduce_variance(
|
||||
input_tensor: _TensorCompatible | RaggedTensor,
|
||||
axis: _TensorCompatible | None = None,
|
||||
input_tensor: TensorCompatible | RaggedTensor,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def argmax(
|
||||
input: _TensorCompatible, axis: _TensorCompatible | None = None, output_type: _DTypeLike = ..., name: str | None = None
|
||||
input: TensorCompatible, axis: TensorCompatible | None = None, output_type: DTypeLike = ..., name: str | None = None
|
||||
) -> Tensor: ...
|
||||
def argmin(
|
||||
input: _TensorCompatible, axis: _TensorCompatible | None = None, output_type: _DTypeLike = ..., name: str | None = None
|
||||
input: TensorCompatible, axis: TensorCompatible | None = None, output_type: DTypeLike = ..., name: str | None = None
|
||||
) -> Tensor: ...
|
||||
|
||||
# Only for bool tensors.
|
||||
def reduce_any(
|
||||
input_tensor: _TensorCompatible | RaggedTensor,
|
||||
axis: _TensorCompatible | None = None,
|
||||
input_tensor: TensorCompatible | RaggedTensor,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def reduce_all(
|
||||
input_tensor: _TensorCompatible | RaggedTensor,
|
||||
axis: _TensorCompatible | None = None,
|
||||
input_tensor: TensorCompatible | RaggedTensor,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool = False,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def count_nonzero(
|
||||
input: _SparseTensorCompatible,
|
||||
axis: _TensorCompatible | None = None,
|
||||
axis: TensorCompatible | None = None,
|
||||
keepdims: bool | None = None,
|
||||
dtype: _DTypeLike = ...,
|
||||
dtype: DTypeLike = ...,
|
||||
name: str | None = None,
|
||||
) -> Tensor: ...
|
||||
def __getattr__(name: str) -> Incomplete: ...
|
||||
|
||||
@@ -9,7 +9,7 @@ from typing import Literal
|
||||
from typing_extensions import Self, TypeAlias
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow import _ShapeLike
|
||||
from tensorflow._aliases import ShapeLike
|
||||
|
||||
_Combiners: TypeAlias = Literal["mean", "sqrtn", "sum"]
|
||||
_ExampleSpec: TypeAlias = dict[str, tf.io.FixedLenFeature | tf.io.VarLenFeature]
|
||||
@@ -35,7 +35,7 @@ class SequenceDenseColumn(FeatureColumn, metaclass=ABCMeta): ...
|
||||
# _cls instead of cls is because collections.namedtuple uses _cls for __new__.
|
||||
class NumericColumn(DenseColumn):
|
||||
key: str
|
||||
shape: _ShapeLike
|
||||
shape: ShapeLike
|
||||
default_value: float
|
||||
dtype: tf.DType
|
||||
normalizer_fn: Callable[[tf.Tensor], tf.Tensor] | None
|
||||
@@ -43,7 +43,7 @@ class NumericColumn(DenseColumn):
|
||||
def __new__(
|
||||
_cls,
|
||||
key: str,
|
||||
shape: _ShapeLike,
|
||||
shape: ShapeLike,
|
||||
default_value: float,
|
||||
dtype: tf.DType,
|
||||
normalizer_fn: Callable[[tf.Tensor], tf.Tensor] | None,
|
||||
@@ -78,7 +78,7 @@ class EmbeddingColumn(DenseColumn, SequenceDenseColumn):
|
||||
categorical_column: CategoricalColumn
|
||||
dimension: int
|
||||
combiner: _Combiners
|
||||
initializer: Callable[[_ShapeLike], tf.Tensor] | None
|
||||
initializer: Callable[[ShapeLike], tf.Tensor] | None
|
||||
ckpt_to_load_from: str | None
|
||||
tensor_name_in_ckpt: str | None
|
||||
max_norm: float | None
|
||||
@@ -91,7 +91,7 @@ class EmbeddingColumn(DenseColumn, SequenceDenseColumn):
|
||||
categorical_column: CategoricalColumn,
|
||||
dimension: int,
|
||||
combiner: _Combiners,
|
||||
initializer: Callable[[_ShapeLike], tf.Tensor] | None,
|
||||
initializer: Callable[[ShapeLike], tf.Tensor] | None,
|
||||
ckpt_to_load_from: str | None,
|
||||
tensor_name_in_ckpt: str | None,
|
||||
max_norm: float | None,
|
||||
@@ -109,7 +109,7 @@ class SharedEmbeddingColumnCreator:
|
||||
def __init__(
|
||||
self,
|
||||
dimension: int,
|
||||
initializer: Callable[[_ShapeLike], tf.Tensor] | None,
|
||||
initializer: Callable[[ShapeLike], tf.Tensor] | None,
|
||||
ckpt_to_load_from: str | None,
|
||||
tensor_name_in_ckpt: str | None,
|
||||
num_buckets: int,
|
||||
|
||||
@@ -2,14 +2,14 @@ from collections.abc import Callable
|
||||
from typing_extensions import Self
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow import _ShapeLike
|
||||
from tensorflow._aliases import ShapeLike
|
||||
from tensorflow.python.feature_column.feature_column_v2 import FeatureColumn, SequenceDenseColumn, _ExampleSpec
|
||||
|
||||
# Strangely at runtime most of Sequence feature columns are defined in feature_column_v2 except
|
||||
# for this one.
|
||||
class SequenceNumericColumn(SequenceDenseColumn):
|
||||
key: str
|
||||
shape: _ShapeLike
|
||||
shape: ShapeLike
|
||||
default_value: float
|
||||
dtype: tf.DType
|
||||
normalizer_fn: Callable[[tf.Tensor], tf.Tensor] | None
|
||||
@@ -17,7 +17,7 @@ class SequenceNumericColumn(SequenceDenseColumn):
|
||||
def __new__(
|
||||
_cls,
|
||||
key: str,
|
||||
shape: _ShapeLike,
|
||||
shape: ShapeLike,
|
||||
default_value: float,
|
||||
dtype: tf.DType,
|
||||
normalizer_fn: Callable[[tf.Tensor], tf.Tensor] | None,
|
||||
|
||||
@@ -2,10 +2,11 @@ from _typeshed import Incomplete
|
||||
from abc import ABCMeta
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from tensorflow import Tensor, TensorShape, _TensorCompatible
|
||||
from tensorflow import Tensor, TensorShape
|
||||
from tensorflow._aliases import TensorCompatible
|
||||
from tensorflow.dtypes import DType
|
||||
|
||||
_SparseTensorCompatible: TypeAlias = _TensorCompatible | SparseTensor
|
||||
_SparseTensorCompatible: TypeAlias = TensorCompatible | SparseTensor
|
||||
|
||||
class SparseTensor(metaclass=ABCMeta):
|
||||
@property
|
||||
@@ -19,7 +20,7 @@ class SparseTensor(metaclass=ABCMeta):
|
||||
@property
|
||||
def dtype(self) -> DType: ...
|
||||
name: str
|
||||
def __init__(self, indices: _TensorCompatible, values: _TensorCompatible, dense_shape: _TensorCompatible) -> None: ...
|
||||
def __init__(self, indices: TensorCompatible, values: TensorCompatible, dense_shape: TensorCompatible) -> None: ...
|
||||
def get_shape(self) -> TensorShape: ...
|
||||
# Many arithmetic operations are not directly supported. Some have alternatives like tf.sparse.add instead of +.
|
||||
def __div__(self, y: _SparseTensorCompatible) -> SparseTensor: ...
|
||||
|
||||
Reference in New Issue
Block a user