Update mypy-protobuf (#10914)

Co-authored-by: Avasam <samuel.06@hotmail.com>
Co-authored-by: Alex Waygood <Alex.Waygood@Gmail.com>
This commit is contained in:
Jelle Zijlstra
2023-10-22 16:31:02 -07:00
committed by GitHub
parent b9640005eb
commit f9f30cc0f2
47 changed files with 3183 additions and 958 deletions

View File

@@ -2,6 +2,7 @@ version = "2.12.*"
upstream_repository = "https://github.com/tensorflow/tensorflow"
# requires a version of numpy with a `py.typed` file
requires = ["numpy>=1.20", "types-protobuf"]
extra_description = "Partially generated using [mypy-protobuf==3.5.0](https://github.com/nipunn1313/mypy-protobuf/tree/v3.5.0) on tensorflow==2.12.1"
partial_stub = true
[tool.stubtest]

View File

@@ -0,0 +1,94 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
Copyright 2022 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys
import tensorflow.tsl.protobuf.autotuning_pb2
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class AutotuneResults(google.protobuf.message.Message):
"""A collection of algorithms for particular dot/convs. Usually this is "the
best" algorithm for the particular dot/conv, although that's not strictly
required.
Users don't interact with this proto directly. It's used internally to
facilitate ahead-of-time autotuning -- The string used by
xla::{Serialize,Load}AutotuneResults is, internally, a serialization of this
proto.
LINT.IfChange
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class Entry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DEVICE_FIELD_NUMBER: builtins.int
HLO_FIELD_NUMBER: builtins.int
RESULT_FIELD_NUMBER: builtins.int
device: builtins.str
hlo: builtins.str
@property
def result(self) -> tensorflow.tsl.protobuf.autotuning_pb2.AutotuneResult:
"""nb: These results are always tied to a particular version of
cublas/cudnn, but this is *especially* true for cublasLt results. For
cublasLt gemms, the result is an index into the list of candidate
algorithms returned by cublasLt. Different version of cublasLt ->
different list of algos -> different interpretation of results!
"""
def __init__(
self,
*,
device: builtins.str | None = ...,
hlo: builtins.str | None = ...,
result: tensorflow.tsl.protobuf.autotuning_pb2.AutotuneResult | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["result", b"result"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["device", b"device", "hlo", b"hlo", "result", b"result"]) -> None: ...
VERSION_FIELD_NUMBER: builtins.int
DOTS_FIELD_NUMBER: builtins.int
CONVS_FIELD_NUMBER: builtins.int
version: builtins.int
@property
def dots(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___AutotuneResults.Entry]: ...
@property
def convs(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___AutotuneResults.Entry]: ...
def __init__(
self,
*,
version: builtins.int | None = ...,
dots: collections.abc.Iterable[global___AutotuneResults.Entry] | None = ...,
convs: collections.abc.Iterable[global___AutotuneResults.Entry] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["convs", b"convs", "dots", b"dots", "version", b"version"]) -> None: ...
global___AutotuneResults = AutotuneResults

View File

@@ -94,6 +94,27 @@ class _CustomCallApiVersionEnumTypeWrapper(google.protobuf.internal.enum_type_wr
const char* opaque, size_t opaque_len,
XlaCustomCallStatus* status);
"""
API_VERSION_TYPED_FFI: _CustomCallApiVersion.ValueType # 4
"""Api version implementing XLA runtime custom call calling convention. These
custom calls can be registered as an XLA runtime custom call (1) or as XLA
runtime FFI binding (2).
This type of custom call uses custom ABI to pass type information along
with custom call arguments. Also it passes buffer arguments together with
data type, sizes and strides.
Example: (XLA runtime custom call)
absl::Status DoCustomCall(StridedMemrefView arg, float attr);
CustomCall::Bind("custom_call")
.Arg<StridedMemrefView>()
.Attr<float>("attr")
.To(DoCustomCall);
(1) xla/runtime/custom_call.h
(2) xla/runtime/ffi/ffi.h
"""
class CustomCallApiVersion(_CustomCallApiVersion, metaclass=_CustomCallApiVersionEnumTypeWrapper):
"""The version of the API used by the custom call function. The signatures for
@@ -142,6 +163,27 @@ GPU:
const char* opaque, size_t opaque_len,
XlaCustomCallStatus* status);
"""
API_VERSION_TYPED_FFI: CustomCallApiVersion.ValueType # 4
"""Api version implementing XLA runtime custom call calling convention. These
custom calls can be registered as an XLA runtime custom call (1) or as XLA
runtime FFI binding (2).
This type of custom call uses custom ABI to pass type information along
with custom call arguments. Also it passes buffer arguments together with
data type, sizes and strides.
Example: (XLA runtime custom call)
absl::Status DoCustomCall(StridedMemrefView arg, float attr);
CustomCall::Bind("custom_call")
.Arg<StridedMemrefView>()
.Attr<float>("attr")
.To(DoCustomCall);
(1) xla/runtime/custom_call.h
(2) xla/runtime/ffi/ffi.h
"""
global___CustomCallApiVersion = CustomCallApiVersion
class _Kind:
@@ -174,7 +216,7 @@ global___Kind = Kind
@typing_extensions.final
class HloInstructionProto(google.protobuf.message.Message):
"""Serialization of HloInstruction.
Next ID: 80
Next ID: 81
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@@ -254,7 +296,7 @@ class HloInstructionProto(google.protobuf.message.Message):
CHOLESKY_OPTIONS_FIELD_NUMBER: builtins.int
PARAMETER_REPLICATION_FIELD_NUMBER: builtins.int
CUSTOM_CALL_HAS_SIDE_EFFECT_FIELD_NUMBER: builtins.int
CUSTOM_CALL_OUTPUT_OPERAND_ALIASING_FIELD_NUMBER: builtins.int
OUTPUT_OPERAND_ALIASING_FIELD_NUMBER: builtins.int
CUSTOM_CALL_SCHEDULE_FIELD_NUMBER: builtins.int
DELTA_FIELD_NUMBER: builtins.int
INDICES_ARE_SORTED_FIELD_NUMBER: builtins.int
@@ -263,6 +305,7 @@ class HloInstructionProto(google.protobuf.message.Message):
RNG_ALGORITHM_FIELD_NUMBER: builtins.int
COMPARISON_TYPE_FIELD_NUMBER: builtins.int
IS_CROSS_PROGRAM_PREFETCH_FIELD_NUMBER: builtins.int
CROSS_PROGRAM_PREFETCH_INDEX_FIELD_NUMBER: builtins.int
PADDING_TYPE_FIELD_NUMBER: builtins.int
CUSTOM_CALL_API_VERSION_FIELD_NUMBER: builtins.int
ASYNC_GROUP_ID_FIELD_NUMBER: builtins.int
@@ -424,9 +467,9 @@ class HloInstructionProto(google.protobuf.message.Message):
kCustomCall.
"""
@property
def custom_call_output_operand_aliasing(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[tensorflow.compiler.xla.xla_data_pb2.CustomCallOutputOperandAliasing]:
"""A list of CustomCallOutputOperandAliasing pairs that specifies aliasing
buffers between output and operands for kCustomCall.
def output_operand_aliasing(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[tensorflow.compiler.xla.xla_data_pb2.OutputOperandAliasing]:
"""A list of OutputOperandAliasing pairs that specifies aliasing buffers
between output and operands for kCustomCall and kFusion.
"""
custom_call_schedule: global___CustomCallSchedule.ValueType
"""Specifies the desired schedule for the custom-call. The field is only
@@ -450,7 +493,10 @@ class HloInstructionProto(google.protobuf.message.Message):
comparison_type: builtins.str
"""The comparison type used for kCompare."""
is_cross_program_prefetch: builtins.bool
"""Specifies if this is a cross-program-prefetch, used by kCopyStart."""
"""Specifies if this is a cross-program-prefetch, used by kCopyStart.
Deprecated and replaced by optional_cross_program_prefetch_index.
"""
cross_program_prefetch_index: builtins.int
padding_type: tensorflow.compiler.xla.xla_data_pb2.PaddingType.ValueType
"""If a convolution is dynamic, a dynamic padding type will be specified."""
custom_call_api_version: global___CustomCallApiVersion.ValueType
@@ -526,7 +572,7 @@ class HloInstructionProto(google.protobuf.message.Message):
cholesky_options: tensorflow.compiler.xla.xla_data_pb2.CholeskyOptions | None = ...,
parameter_replication: tensorflow.compiler.xla.xla_data_pb2.ParameterReplication | None = ...,
custom_call_has_side_effect: builtins.bool | None = ...,
custom_call_output_operand_aliasing: collections.abc.Iterable[tensorflow.compiler.xla.xla_data_pb2.CustomCallOutputOperandAliasing] | None = ...,
output_operand_aliasing: collections.abc.Iterable[tensorflow.compiler.xla.xla_data_pb2.OutputOperandAliasing] | None = ...,
custom_call_schedule: global___CustomCallSchedule.ValueType | None = ...,
delta: builtins.int | None = ...,
indices_are_sorted: builtins.bool | None = ...,
@@ -535,13 +581,15 @@ class HloInstructionProto(google.protobuf.message.Message):
rng_algorithm: tensorflow.compiler.xla.xla_data_pb2.RandomAlgorithm.ValueType | None = ...,
comparison_type: builtins.str | None = ...,
is_cross_program_prefetch: builtins.bool | None = ...,
cross_program_prefetch_index: builtins.int | None = ...,
padding_type: tensorflow.compiler.xla.xla_data_pb2.PaddingType.ValueType | None = ...,
custom_call_api_version: global___CustomCallApiVersion.ValueType | None = ...,
async_group_id: builtins.int | None = ...,
async_execution_thread: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["cholesky_options", b"cholesky_options", "convolution_dimension_numbers", b"convolution_dimension_numbers", "domain_entry_sharding", b"domain_entry_sharding", "domain_exit_sharding", b"domain_exit_sharding", "dot_dimension_numbers", b"dot_dimension_numbers", "frontend_attributes", b"frontend_attributes", "gather_dimension_numbers", b"gather_dimension_numbers", "literal", b"literal", "metadata", b"metadata", "outfeed_shape", b"outfeed_shape", "padding_config", b"padding_config", "parameter_replication", b"parameter_replication", "precision_config", b"precision_config", "scatter_dimension_numbers", b"scatter_dimension_numbers", "shape", b"shape", "sharding", b"sharding", "triangular_solve_options", b"triangular_solve_options", "window", b"window"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["all_reduce_id", b"all_reduce_id", "async_execution_thread", b"async_execution_thread", "async_group_id", b"async_group_id", "backend_config", b"backend_config", "batch_group_count", b"batch_group_count", "called_computation_ids", b"called_computation_ids", "channel_id", b"channel_id", "cholesky_options", b"cholesky_options", "comparison_direction", b"comparison_direction", "comparison_type", b"comparison_type", "constrain_layout", b"constrain_layout", "control_predecessor_ids", b"control_predecessor_ids", "convolution_dimension_numbers", b"convolution_dimension_numbers", "custom_call_api_version", b"custom_call_api_version", "custom_call_has_side_effect", b"custom_call_has_side_effect", "custom_call_output_operand_aliasing", b"custom_call_output_operand_aliasing", "custom_call_schedule", b"custom_call_schedule", "custom_call_target", b"custom_call_target", "delta", b"delta", "dimensions", b"dimensions", "distribution", b"distribution", "domain_entry_sharding", b"domain_entry_sharding", "domain_exit_sharding", b"domain_exit_sharding", "dot_dimension_numbers", b"dot_dimension_numbers", "dynamic_slice_sizes", b"dynamic_slice_sizes", "epsilon", b"epsilon", "exponent_bits", b"exponent_bits", "feature_group_count", b"feature_group_count", "feature_index", b"feature_index", "fft_length", b"fft_length", "fft_type", b"fft_type", "frontend_attributes", b"frontend_attributes", "fusion_kind", b"fusion_kind", "gather_dimension_numbers", b"gather_dimension_numbers", "gather_slice_sizes", b"gather_slice_sizes", "id", b"id", "indices_are_sorted", b"indices_are_sorted", "infeed_config", b"infeed_config", "is_cross_program_prefetch", b"is_cross_program_prefetch", "is_host_transfer", b"is_host_transfer", "is_stable", b"is_stable", "literal", b"literal", "mantissa_bits", b"mantissa_bits", "metadata", b"metadata", "name", b"name", "opcode", b"opcode", "operand_ids", b"operand_ids", "operand_shapes_with_layout", b"operand_shapes_with_layout", "outfeed_config", b"outfeed_config", "outfeed_shape", b"outfeed_shape", "padding_config", b"padding_config", "padding_type", b"padding_type", "parameter_number", b"parameter_number", "parameter_replication", b"parameter_replication", "precision_config", b"precision_config", "replica_groups", b"replica_groups", "rng_algorithm", b"rng_algorithm", "scatter_dimension_numbers", b"scatter_dimension_numbers", "shape", b"shape", "sharding", b"sharding", "slice_dimensions", b"slice_dimensions", "source_target_pairs", b"source_target_pairs", "triangular_solve_options", b"triangular_solve_options", "tuple_index", b"tuple_index", "unique_indices", b"unique_indices", "use_global_device_ids", b"use_global_device_ids", "window", b"window"]) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["cholesky_options", b"cholesky_options", "convolution_dimension_numbers", b"convolution_dimension_numbers", "cross_program_prefetch_index", b"cross_program_prefetch_index", "domain_entry_sharding", b"domain_entry_sharding", "domain_exit_sharding", b"domain_exit_sharding", "dot_dimension_numbers", b"dot_dimension_numbers", "frontend_attributes", b"frontend_attributes", "gather_dimension_numbers", b"gather_dimension_numbers", "literal", b"literal", "metadata", b"metadata", "optional_cross_program_prefetch_index", b"optional_cross_program_prefetch_index", "outfeed_shape", b"outfeed_shape", "padding_config", b"padding_config", "parameter_replication", b"parameter_replication", "precision_config", b"precision_config", "scatter_dimension_numbers", b"scatter_dimension_numbers", "shape", b"shape", "sharding", b"sharding", "triangular_solve_options", b"triangular_solve_options", "window", b"window"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["all_reduce_id", b"all_reduce_id", "async_execution_thread", b"async_execution_thread", "async_group_id", b"async_group_id", "backend_config", b"backend_config", "batch_group_count", b"batch_group_count", "called_computation_ids", b"called_computation_ids", "channel_id", b"channel_id", "cholesky_options", b"cholesky_options", "comparison_direction", b"comparison_direction", "comparison_type", b"comparison_type", "constrain_layout", b"constrain_layout", "control_predecessor_ids", b"control_predecessor_ids", "convolution_dimension_numbers", b"convolution_dimension_numbers", "cross_program_prefetch_index", b"cross_program_prefetch_index", "custom_call_api_version", b"custom_call_api_version", "custom_call_has_side_effect", b"custom_call_has_side_effect", "custom_call_schedule", b"custom_call_schedule", "custom_call_target", b"custom_call_target", "delta", b"delta", "dimensions", b"dimensions", "distribution", b"distribution", "domain_entry_sharding", b"domain_entry_sharding", "domain_exit_sharding", b"domain_exit_sharding", "dot_dimension_numbers", b"dot_dimension_numbers", "dynamic_slice_sizes", b"dynamic_slice_sizes", "epsilon", b"epsilon", "exponent_bits", b"exponent_bits", "feature_group_count", b"feature_group_count", "feature_index", b"feature_index", "fft_length", b"fft_length", "fft_type", b"fft_type", "frontend_attributes", b"frontend_attributes", "fusion_kind", b"fusion_kind", "gather_dimension_numbers", b"gather_dimension_numbers", "gather_slice_sizes", b"gather_slice_sizes", "id", b"id", "indices_are_sorted", b"indices_are_sorted", "infeed_config", b"infeed_config", "is_cross_program_prefetch", b"is_cross_program_prefetch", "is_host_transfer", b"is_host_transfer", "is_stable", b"is_stable", "literal", b"literal", "mantissa_bits", b"mantissa_bits", "metadata", b"metadata", "name", b"name", "opcode", b"opcode", "operand_ids", b"operand_ids", "operand_shapes_with_layout", b"operand_shapes_with_layout", "optional_cross_program_prefetch_index", b"optional_cross_program_prefetch_index", "outfeed_config", b"outfeed_config", "outfeed_shape", b"outfeed_shape", "output_operand_aliasing", b"output_operand_aliasing", "padding_config", b"padding_config", "padding_type", b"padding_type", "parameter_number", b"parameter_number", "parameter_replication", b"parameter_replication", "precision_config", b"precision_config", "replica_groups", b"replica_groups", "rng_algorithm", b"rng_algorithm", "scatter_dimension_numbers", b"scatter_dimension_numbers", "shape", b"shape", "sharding", b"sharding", "slice_dimensions", b"slice_dimensions", "source_target_pairs", b"source_target_pairs", "triangular_solve_options", b"triangular_solve_options", "tuple_index", b"tuple_index", "unique_indices", b"unique_indices", "use_global_device_ids", b"use_global_device_ids", "window", b"window"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["optional_cross_program_prefetch_index", b"optional_cross_program_prefetch_index"]) -> typing_extensions.Literal["cross_program_prefetch_index"] | None: ...
global___HloInstructionProto = HloInstructionProto
@@ -712,7 +760,7 @@ class DynamicParameterBindingProto(google.protobuf.message.Message):
@typing_extensions.final
class Binding(google.protobuf.message.Message):
"""A list of bindings which indicates that the `target_dim_num` in
"""A list of bindings which indicates that the `target_param_dim_num` in
the subshape `target_param_index` of parameter `target_param_num`
is a dynamic dimension and its real dynamic size is represented
by `dynamic_param_index` in parameter `dynamic_param_num`.
@@ -734,7 +782,7 @@ class DynamicParameterBindingProto(google.protobuf.message.Message):
dynamic_param_index = {}
target_param_num = 0
target_param_index = {}
target_param_dim = 0
target_param_dim_num = 0
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@@ -780,16 +828,19 @@ class CrossProgramPrefetch(google.protobuf.message.Message):
PARAMETER_FIELD_NUMBER: builtins.int
INDEX_FIELD_NUMBER: builtins.int
OFFSET_FIELD_NUMBER: builtins.int
parameter: builtins.int
@property
def index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ...
offset: builtins.int
def __init__(
self,
*,
parameter: builtins.int | None = ...,
index: collections.abc.Iterable[builtins.int] | None = ...,
offset: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["index", b"index", "parameter", b"parameter"]) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["index", b"index", "offset", b"offset", "parameter", b"parameter"]) -> None: ...
global___CrossProgramPrefetch = CrossProgramPrefetch
@@ -803,7 +854,7 @@ class HloModuleProto(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ProfileTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[HloModuleProto._ProfileType.ValueType], builtins.type): # noqa: F821
class _ProfileTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[HloModuleProto._ProfileType.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
INVALID: HloModuleProto._ProfileType.ValueType # 0
FLAG: HloModuleProto._ProfileType.ValueType # 1
@@ -940,28 +991,22 @@ class LogicalBufferProto(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
COMPUTATION_NAME_FIELD_NUMBER: builtins.int
INSTRUCTION_NAME_FIELD_NUMBER: builtins.int
INSTRUCTION_ID_FIELD_NUMBER: builtins.int
SHAPE_INDEX_FIELD_NUMBER: builtins.int
computation_name: builtins.str
"""NOTE: module_name isn't necessary, since all LogicalBuffers are
associated with a single HloModule.
TODO(b/239098765): Remove instruction_name and computation_name.
"""
instruction_name: builtins.str
"""TODO(b/239098765): Remove instruction_name and computation_name."""
instruction_id: builtins.int
@property
def shape_index(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ...
def __init__(
self,
*,
computation_name: builtins.str | None = ...,
instruction_name: builtins.str | None = ...,
instruction_id: builtins.int | None = ...,
shape_index: collections.abc.Iterable[builtins.int] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["computation_name", b"computation_name", "instruction_id", b"instruction_id", "instruction_name", b"instruction_name", "shape_index", b"shape_index"]) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["instruction_id", b"instruction_id", "instruction_name", b"instruction_name", "shape_index", b"shape_index"]) -> None: ...
ID_FIELD_NUMBER: builtins.int
SIZE_FIELD_NUMBER: builtins.int
@@ -1076,7 +1121,7 @@ class HeapSimulatorTrace(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _KindEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[HeapSimulatorTrace.Event._Kind.ValueType], builtins.type): # noqa: F821
class _KindEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[HeapSimulatorTrace.Event._Kind.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
ALLOC: HeapSimulatorTrace.Event._Kind.ValueType # 0
"""A memory region was allocated for the buffer."""
@@ -1488,14 +1533,10 @@ class XlaRuntimeExecutableProto(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
HLO_MODULE_PROTO_FIELD_NUMBER: builtins.int
ENTRY_FUNC_ATTRS_FIELD_NUMBER: builtins.int
OBJ_FILE_FIELD_NUMBER: builtins.int
MLIR_MODULE_FIELD_NUMBER: builtins.int
@property
def hlo_module_proto(self) -> global___HloModuleProto: ...
@property
def entry_func_attrs(self) -> global___EntryFunctionAttributes:
"""XLA-specific attributes of the executable's entry function."""
obj_file: builtins.bytes
"""TODO(b/232263665)): Serialized executable has to know what APIs it has to
be linked with, including the version. For example Gpu executable must be
@@ -1509,11 +1550,10 @@ class XlaRuntimeExecutableProto(google.protobuf.message.Message):
self,
*,
hlo_module_proto: global___HloModuleProto | None = ...,
entry_func_attrs: global___EntryFunctionAttributes | None = ...,
obj_file: builtins.bytes | None = ...,
mlir_module: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["entry_func_attrs", b"entry_func_attrs", "hlo_module_proto", b"hlo_module_proto"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["entry_func_attrs", b"entry_func_attrs", "hlo_module_proto", b"hlo_module_proto", "mlir_module", b"mlir_module", "obj_file", b"obj_file"]) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["hlo_module_proto", b"hlo_module_proto"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["hlo_module_proto", b"hlo_module_proto", "mlir_module", b"mlir_module", "obj_file", b"obj_file"]) -> None: ...
global___XlaRuntimeExecutableProto = XlaRuntimeExecutableProto

View File

@@ -0,0 +1,73 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import google.protobuf.descriptor
import google.protobuf.duration_pb2
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.message
import google.protobuf.timestamp_pb2
import sys
import typing
if sys.version_info >= (3, 10):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class CompilationLogEntry(google.protobuf.message.Message):
"""Defines XLA compilation metrics."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _CompilationStage:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _CompilationStageEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[CompilationLogEntry._CompilationStage.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNSPECIFIED: CompilationLogEntry._CompilationStage.ValueType # 0
END_TO_END: CompilationLogEntry._CompilationStage.ValueType # 1
HLO_PASSES: CompilationLogEntry._CompilationStage.ValueType # 2
CODE_GENERATION: CompilationLogEntry._CompilationStage.ValueType # 3
BACKEND_PASSES: CompilationLogEntry._CompilationStage.ValueType # 4
class CompilationStage(_CompilationStage, metaclass=_CompilationStageEnumTypeWrapper):
"""Defines compilation stages for which metrics are collected."""
UNSPECIFIED: CompilationLogEntry.CompilationStage.ValueType # 0
END_TO_END: CompilationLogEntry.CompilationStage.ValueType # 1
HLO_PASSES: CompilationLogEntry.CompilationStage.ValueType # 2
CODE_GENERATION: CompilationLogEntry.CompilationStage.ValueType # 3
BACKEND_PASSES: CompilationLogEntry.CompilationStage.ValueType # 4
TIMESTAMP_FIELD_NUMBER: builtins.int
STAGE_FIELD_NUMBER: builtins.int
DURATION_FIELD_NUMBER: builtins.int
TASK_INDEX_FIELD_NUMBER: builtins.int
@property
def timestamp(self) -> google.protobuf.timestamp_pb2.Timestamp:
"""Time when the event captured by this log entry occurred."""
stage: global___CompilationLogEntry.CompilationStage.ValueType
"""Compilation stage recorded by this log entry."""
@property
def duration(self) -> google.protobuf.duration_pb2.Duration:
"""Duration of the given compilation stage."""
task_index: builtins.int
"""Task index from which this log entry was recorded."""
def __init__(
self,
*,
timestamp: google.protobuf.timestamp_pb2.Timestamp | None = ...,
stage: global___CompilationLogEntry.CompilationStage.ValueType | None = ...,
duration: google.protobuf.duration_pb2.Duration | None = ...,
task_index: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["duration", b"duration", "timestamp", b"timestamp"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["duration", b"duration", "stage", b"stage", "task_index", b"task_index", "timestamp", b"timestamp"]) -> None: ...
global___CompilationLogEntry = CompilationLogEntry

View File

@@ -65,6 +65,22 @@ class _PrimitiveTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._
and 7 bits for the mantissa.
"""
F64: _PrimitiveType.ValueType # 12
F8E5M2: _PrimitiveType.ValueType # 19
"""FP8 dtypes, as described in this paper: https://arxiv.org/abs/2209.05433
F8E5M2 has 5 exponent bits and 2 mantissa bits, and is similar to the
existing IEEE types.
F8E4M3FN has 4 exponent bits and 3 mantissa bits. The "FN" means only
Finite and NaN values are supported. Unlike IEEE types, infinities are not
supported. NaN is represented when the exponent and mantissa bits are all
1s. All other values are finite.
Support for these dtypes is under development. They do not yet work
properly in most cases.
TODO(b/259609697): Fully support FP8.
"""
F8E4M3FN: _PrimitiveType.ValueType # 20
C64: _PrimitiveType.ValueType # 15
"""Complex values of fixed width.
Paired F32 (real, imag), as in std::complex<float>.
@@ -128,6 +144,22 @@ floating-point format, but uses 1 bit for the sign, 8 bits for the exponent
and 7 bits for the mantissa.
"""
F64: PrimitiveType.ValueType # 12
F8E5M2: PrimitiveType.ValueType # 19
"""FP8 dtypes, as described in this paper: https://arxiv.org/abs/2209.05433
F8E5M2 has 5 exponent bits and 2 mantissa bits, and is similar to the
existing IEEE types.
F8E4M3FN has 4 exponent bits and 3 mantissa bits. The "FN" means only
Finite and NaN values are supported. Unlike IEEE types, infinities are not
supported. NaN is represented when the exponent and mantissa bits are all
1s. All other values are finite.
Support for these dtypes is under development. They do not yet work
properly in most cases.
TODO(b/259609697): Fully support FP8.
"""
F8E4M3FN: PrimitiveType.ValueType # 20
C64: PrimitiveType.ValueType # 15
"""Complex values of fixed width.
Paired F32 (real, imag), as in std::complex<float>.
@@ -434,11 +466,15 @@ class LayoutProto(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DIM_LEVEL_TYPES_FIELD_NUMBER: builtins.int
DIM_UNIQUE_FIELD_NUMBER: builtins.int
DIM_ORDERED_FIELD_NUMBER: builtins.int
MINOR_TO_MAJOR_FIELD_NUMBER: builtins.int
TILES_FIELD_NUMBER: builtins.int
ELEMENT_SIZE_IN_BITS_FIELD_NUMBER: builtins.int
MEMORY_SPACE_FIELD_NUMBER: builtins.int
INDEX_PRIMITIVE_TYPE_FIELD_NUMBER: builtins.int
POINTER_PRIMITIVE_TYPE_FIELD_NUMBER: builtins.int
PHYSICAL_SHAPE_FIELD_NUMBER: builtins.int
DYNAMIC_SHAPE_METADATA_PREFIX_BYTES_FIELD_NUMBER: builtins.int
@property
def dim_level_types(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[global___DimLevelType.ValueType]:
"""The dimension level type list for this array, specifying the way in which
@@ -446,6 +482,16 @@ class LayoutProto(google.protobuf.message.Message):
array is assumed to be dense.
"""
@property
def dim_unique(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bool]:
"""Whether each dimension is unique or ordered. Each of the following lists
must be empty, or have one entry for each entry of dim_level_types. If
either list is empty, all dimensions are assumed to be unique and ordered,
respectively. Entries in this list may not be false for some DimLevelType
values (such as DIM_DENSE in particular).
"""
@property
def dim_ordered(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.bool]: ...
@property
def minor_to_major(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Sequence of dimension numbers, from minor (fastest varying index) to major
(slowest varying index). This field is required.
@@ -458,17 +504,19 @@ class LayoutProto(google.protobuf.message.Message):
TODO(b/119839262): implement tiling in each backend or add Unimplemented
error.
"""
element_size_in_bits: builtins.int
"""Bit size of each element. If the size is bigger than what the element
type requires, the value is stored in the least significant
bits and the additional most significant bits are filled with 0's.
TODO(b/119839262): implement in each backend or add Unimplemented error.
"""
memory_space: builtins.int
"""Memory space where this array resides. The integer field is interpreted in
a backend-specific manner.
"""
index_primitive_type: global___PrimitiveType.ValueType
"""The integer types to be used for indices and pointers. These fields must
not be used unless the layout represents a sparse array. The PrimitiveType
must correspond to an unsigned integer (U8, U16, U32, or U64).
If not provided, the compiler will use the largest unsigned integer
that is naturally supported by the target device (U32 or U64 in currently
supported devices).
"""
pointer_primitive_type: global___PrimitiveType.ValueType
@property
def physical_shape(self) -> global___ShapeProto:
"""The physical, on-device shape used to represent the shape this layout
@@ -476,18 +524,27 @@ class LayoutProto(google.protobuf.message.Message):
The layout(s) contained within the physical shape should not also contain
a physical shape.
"""
dynamic_shape_metadata_prefix_bytes: builtins.int
"""The dynamic shape metadata size in bytes in front of the shape data. The
field may be non-zero for a static shape whose associated buffer is for a
dynamic shape, e.g. a result of SliceToDynamic.
"""
def __init__(
self,
*,
dim_level_types: collections.abc.Iterable[global___DimLevelType.ValueType] | None = ...,
dim_unique: collections.abc.Iterable[builtins.bool] | None = ...,
dim_ordered: collections.abc.Iterable[builtins.bool] | None = ...,
minor_to_major: collections.abc.Iterable[builtins.int] | None = ...,
tiles: collections.abc.Iterable[global___TileProto] | None = ...,
element_size_in_bits: builtins.int | None = ...,
memory_space: builtins.int | None = ...,
index_primitive_type: global___PrimitiveType.ValueType | None = ...,
pointer_primitive_type: global___PrimitiveType.ValueType | None = ...,
physical_shape: global___ShapeProto | None = ...,
dynamic_shape_metadata_prefix_bytes: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["physical_shape", b"physical_shape"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["dim_level_types", b"dim_level_types", "element_size_in_bits", b"element_size_in_bits", "memory_space", b"memory_space", "minor_to_major", b"minor_to_major", "physical_shape", b"physical_shape", "tiles", b"tiles"]) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["dim_level_types", b"dim_level_types", "dim_ordered", b"dim_ordered", "dim_unique", b"dim_unique", "dynamic_shape_metadata_prefix_bytes", b"dynamic_shape_metadata_prefix_bytes", "index_primitive_type", b"index_primitive_type", "memory_space", b"memory_space", "minor_to_major", b"minor_to_major", "physical_shape", b"physical_shape", "pointer_primitive_type", b"pointer_primitive_type", "tiles", b"tiles"]) -> None: ...
global___LayoutProto = LayoutProto
@@ -848,7 +905,7 @@ class ChannelHandle(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ChannelTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ChannelHandle._ChannelType.ValueType], builtins.type): # noqa: F821
class _ChannelTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ChannelHandle._ChannelType.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
CHANNEL_TYPE_INVALID: ChannelHandle._ChannelType.ValueType # 0
"""Invalid primitive type to serve as default."""
@@ -965,6 +1022,8 @@ class LiteralProto(google.protobuf.message.Message):
BF16S_FIELD_NUMBER: builtins.int
U16S_FIELD_NUMBER: builtins.int
S16S_FIELD_NUMBER: builtins.int
F8E5M2S_FIELD_NUMBER: builtins.int
F8E4M3FNS_FIELD_NUMBER: builtins.int
SPARSE_INDICES_FIELD_NUMBER: builtins.int
@property
def shape(self) -> global___ShapeProto: ...
@@ -997,9 +1056,11 @@ class LiteralProto(google.protobuf.message.Message):
bf16s: builtins.bytes
u16s: builtins.bytes
s16s: builtins.bytes
f8e5m2s: builtins.bytes
f8e4m3fns: builtins.bytes
@property
def sparse_indices(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""Next = 19"""
"""Next = 21"""
def __init__(
self,
*,
@@ -1020,10 +1081,12 @@ class LiteralProto(google.protobuf.message.Message):
bf16s: builtins.bytes | None = ...,
u16s: builtins.bytes | None = ...,
s16s: builtins.bytes | None = ...,
f8e5m2s: builtins.bytes | None = ...,
f8e4m3fns: builtins.bytes | None = ...,
sparse_indices: collections.abc.Iterable[builtins.int] | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["shape", b"shape"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["bf16s", b"bf16s", "c128s", b"c128s", "c64s", b"c64s", "f16s", b"f16s", "f32s", b"f32s", "f64s", b"f64s", "preds", b"preds", "s16s", b"s16s", "s32s", b"s32s", "s64s", b"s64s", "s8s", b"s8s", "shape", b"shape", "sparse_indices", b"sparse_indices", "tuple_literals", b"tuple_literals", "u16s", b"u16s", "u32s", b"u32s", "u64s", b"u64s", "u8s", b"u8s"]) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bf16s", b"bf16s", "c128s", b"c128s", "c64s", b"c64s", "f16s", b"f16s", "f32s", b"f32s", "f64s", b"f64s", "f8e4m3fns", b"f8e4m3fns", "f8e5m2s", b"f8e5m2s", "preds", b"preds", "s16s", b"s16s", "s32s", b"s32s", "s64s", b"s64s", "s8s", b"s8s", "shape", b"shape", "sparse_indices", b"sparse_indices", "tuple_literals", b"tuple_literals", "u16s", b"u16s", "u32s", b"u32s", "u64s", b"u64s", "u8s", b"u8s"]) -> None: ...
global___LiteralProto = LiteralProto
@@ -1307,7 +1370,7 @@ class TriangularSolveOptions(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TransposeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TriangularSolveOptions._Transpose.ValueType], builtins.type): # noqa: F821
class _TransposeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TriangularSolveOptions._Transpose.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
TRANSPOSE_INVALID: TriangularSolveOptions._Transpose.ValueType # 0
NO_TRANSPOSE: TriangularSolveOptions._Transpose.ValueType # 1
@@ -1415,7 +1478,7 @@ class OpSharding(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[OpSharding._Type.ValueType], builtins.type): # noqa: F821
class _TypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[OpSharding._Type.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
REPLICATED: OpSharding._Type.ValueType # 0
"""This sharding is replicated across all devices (implies maximal,
@@ -1573,7 +1636,7 @@ class PrecisionConfig(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _PrecisionEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[PrecisionConfig._Precision.ValueType], builtins.type): # noqa: F821
class _PrecisionEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[PrecisionConfig._Precision.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT: PrecisionConfig._Precision.ValueType # 0
HIGH: PrecisionConfig._Precision.ValueType # 1
@@ -1671,9 +1734,9 @@ class WhileLoopBackendConfig(google.protobuf.message.Message):
global___WhileLoopBackendConfig = WhileLoopBackendConfig
@typing_extensions.final
class CustomCallOutputOperandAliasing(google.protobuf.message.Message):
"""Specifies a pair of output/operand buffers for kCustomCall that alias each
other.
class OutputOperandAliasing(google.protobuf.message.Message):
"""Specifies a pair of output/operand buffers that alias each other for
kCustomCall and kFusion
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@@ -1695,4 +1758,4 @@ class CustomCallOutputOperandAliasing(google.protobuf.message.Message):
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["operand_index", b"operand_index", "operand_shape_index", b"operand_shape_index", "output_shape_index", b"output_shape_index"]) -> None: ...
global___CustomCallOutputOperandAliasing = CustomCallOutputOperandAliasing
global___OutputOperandAliasing = OutputOperandAliasing

View File

@@ -47,7 +47,7 @@ class ApiDef(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _VisibilityEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ApiDef._Visibility.ValueType], builtins.type): # noqa: F821
class _VisibilityEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ApiDef._Visibility.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT_VISIBILITY: ApiDef._Visibility.ValueType # 0
"""Normally this is "VISIBLE" unless you are inheriting a

View File

@@ -132,7 +132,7 @@ class CardinalityOptions(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ComputeLevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[CardinalityOptions._ComputeLevel.ValueType], builtins.type): # noqa: F821
class _ComputeLevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[CardinalityOptions._ComputeLevel.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
CARDINALITY_COMPUTE_UNSPECIFIED: CardinalityOptions._ComputeLevel.ValueType # 0
CARDINALITY_COMPUTE_LOW: CardinalityOptions._ComputeLevel.ValueType # 1
@@ -294,7 +294,7 @@ class Options(google.protobuf.message.Message):
"""Message stored with Dataset objects to control how datasets are processed and
optimized.
next: 8
next: 9
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@@ -306,6 +306,7 @@ class Options(google.protobuf.message.Message):
SLACK_FIELD_NUMBER: builtins.int
THREADING_OPTIONS_FIELD_NUMBER: builtins.int
EXTERNAL_STATE_POLICY_FIELD_NUMBER: builtins.int
SYMBOLIC_CHECKPOINT_FIELD_NUMBER: builtins.int
deterministic: builtins.bool
@property
def autotune_options(self) -> global___AutotuneOptions:
@@ -321,6 +322,7 @@ class Options(google.protobuf.message.Message):
def threading_options(self) -> global___ThreadingOptions:
"""The threading options associated with the dataset."""
external_state_policy: global___ExternalStatePolicy.ValueType
symbolic_checkpoint: builtins.bool
def __init__(
self,
*,
@@ -331,14 +333,17 @@ class Options(google.protobuf.message.Message):
slack: builtins.bool | None = ...,
threading_options: global___ThreadingOptions | None = ...,
external_state_policy: global___ExternalStatePolicy.ValueType | None = ...,
symbolic_checkpoint: builtins.bool | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["autotune_options", b"autotune_options", "deterministic", b"deterministic", "distribute_options", b"distribute_options", "external_state_policy", b"external_state_policy", "optimization_options", b"optimization_options", "optional_deterministic", b"optional_deterministic", "optional_external_state_policy", b"optional_external_state_policy", "optional_slack", b"optional_slack", "slack", b"slack", "threading_options", b"threading_options"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["autotune_options", b"autotune_options", "deterministic", b"deterministic", "distribute_options", b"distribute_options", "external_state_policy", b"external_state_policy", "optimization_options", b"optimization_options", "optional_deterministic", b"optional_deterministic", "optional_external_state_policy", b"optional_external_state_policy", "optional_slack", b"optional_slack", "slack", b"slack", "threading_options", b"threading_options"]) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["autotune_options", b"autotune_options", "deterministic", b"deterministic", "distribute_options", b"distribute_options", "external_state_policy", b"external_state_policy", "optimization_options", b"optimization_options", "optional_deterministic", b"optional_deterministic", "optional_external_state_policy", b"optional_external_state_policy", "optional_slack", b"optional_slack", "optional_symbolic_checkpoint", b"optional_symbolic_checkpoint", "slack", b"slack", "symbolic_checkpoint", b"symbolic_checkpoint", "threading_options", b"threading_options"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["autotune_options", b"autotune_options", "deterministic", b"deterministic", "distribute_options", b"distribute_options", "external_state_policy", b"external_state_policy", "optimization_options", b"optimization_options", "optional_deterministic", b"optional_deterministic", "optional_external_state_policy", b"optional_external_state_policy", "optional_slack", b"optional_slack", "optional_symbolic_checkpoint", b"optional_symbolic_checkpoint", "slack", b"slack", "symbolic_checkpoint", b"symbolic_checkpoint", "threading_options", b"threading_options"]) -> None: ...
@typing.overload
def WhichOneof(self, oneof_group: typing_extensions.Literal["optional_deterministic", b"optional_deterministic"]) -> typing_extensions.Literal["deterministic"] | None: ...
@typing.overload
def WhichOneof(self, oneof_group: typing_extensions.Literal["optional_external_state_policy", b"optional_external_state_policy"]) -> typing_extensions.Literal["external_state_policy"] | None: ...
@typing.overload
def WhichOneof(self, oneof_group: typing_extensions.Literal["optional_slack", b"optional_slack"]) -> typing_extensions.Literal["slack"] | None: ...
@typing.overload
def WhichOneof(self, oneof_group: typing_extensions.Literal["optional_symbolic_checkpoint", b"optional_symbolic_checkpoint"]) -> typing_extensions.Literal["symbolic_checkpoint"] | None: ...
global___Options = Options

View File

@@ -0,0 +1,105 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys
import tensorflow.core.framework.tensor_pb2
import tensorflow.core.framework.tensor_shape_pb2
import tensorflow.core.framework.types_pb2
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class CompressedComponentMetadata(google.protobuf.message.Message):
"""This file contains protocol buffers for working with tf.data Datasets.
Metadata describing a compressed component of a dataset element.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DTYPE_FIELD_NUMBER: builtins.int
TENSOR_SHAPE_FIELD_NUMBER: builtins.int
UNCOMPRESSED_BYTES_FIELD_NUMBER: builtins.int
dtype: tensorflow.core.framework.types_pb2.DataType.ValueType
"""The dtype of the component tensor."""
@property
def tensor_shape(self) -> tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto:
"""The shape of the component tensor."""
@property
def uncompressed_bytes(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""The amount of uncompressed tensor data.
- For string tensors, there is an element for each string indicating the
size of the string.
- For all other tensors, there is a single element indicating the size of
the tensor.
"""
def __init__(
self,
*,
dtype: tensorflow.core.framework.types_pb2.DataType.ValueType | None = ...,
tensor_shape: tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto | None = ...,
uncompressed_bytes: collections.abc.Iterable[builtins.int] | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["tensor_shape", b"tensor_shape"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["dtype", b"dtype", "tensor_shape", b"tensor_shape", "uncompressed_bytes", b"uncompressed_bytes"]) -> None: ...
global___CompressedComponentMetadata = CompressedComponentMetadata
@typing_extensions.final
class CompressedElement(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DATA_FIELD_NUMBER: builtins.int
COMPONENT_METADATA_FIELD_NUMBER: builtins.int
VERSION_FIELD_NUMBER: builtins.int
data: builtins.bytes
"""Compressed tensor bytes for all components of the element."""
@property
def component_metadata(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___CompressedComponentMetadata]:
"""Metadata for the components of the element."""
version: builtins.int
"""Version of the CompressedElement. CompressedElements may be stored on disk
and read back by later versions of code, so we store a version number to
help readers understand which version they are reading. When you add a new
field to this proto, you need to increment kCompressedElementVersion in
tensorflow/core/data/compression_utils.cc.
"""
def __init__(
self,
*,
data: builtins.bytes | None = ...,
component_metadata: collections.abc.Iterable[global___CompressedComponentMetadata] | None = ...,
version: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["component_metadata", b"component_metadata", "data", b"data", "version", b"version"]) -> None: ...
global___CompressedElement = CompressedElement
@typing_extensions.final
class UncompressedElement(google.protobuf.message.Message):
"""An uncompressed dataset element."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
COMPONENTS_FIELD_NUMBER: builtins.int
@property
def components(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[tensorflow.core.framework.tensor_pb2.TensorProto]: ...
def __init__(
self,
*,
components: collections.abc.Iterable[tensorflow.core.framework.tensor_pb2.TensorProto] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["components", b"components"]) -> None: ...
global___UncompressedElement = UncompressedElement

View File

@@ -195,6 +195,17 @@ class _FullTypeIdEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._Enu
Examples:
TFT_ENCODING[TFT_INT32, TFT_STRING] is an integer encoded as string.
"""
TFT_SHAPE_TENSOR: _FullTypeId.ValueType # 1005
"""The type of "shape tensors" where the runtime value is the shape of
some tensor(s), i.e. the output of tf.shape.
Shape tensors have special, host-only placement, in contrast to
TFT_TENSOR[TFT_INT32] which is the type of a normal numeric tensor
with no special placement.
Examples:
TFT_SHAPE_TENSOR[TFT_INT32] is the most common
TFT_SHAPE_TENSOR[TFT_INT64] is also allowed
"""
TFT_BOOL: _FullTypeId.ValueType # 200
"""Type attributes. These always appear in the parametrization of a type,
never alone. For example, there is no such thing as a "bool" TensorFlow
@@ -460,6 +471,17 @@ Parametrization:
Examples:
TFT_ENCODING[TFT_INT32, TFT_STRING] is an integer encoded as string.
"""
TFT_SHAPE_TENSOR: FullTypeId.ValueType # 1005
"""The type of "shape tensors" where the runtime value is the shape of
some tensor(s), i.e. the output of tf.shape.
Shape tensors have special, host-only placement, in contrast to
TFT_TENSOR[TFT_INT32] which is the type of a normal numeric tensor
with no special placement.
Examples:
TFT_SHAPE_TENSOR[TFT_INT32] is the most common
TFT_SHAPE_TENSOR[TFT_INT64] is also allowed
"""
TFT_BOOL: FullTypeId.ValueType # 200
"""Type attributes. These always appear in the parametrization of a type,
never alone. For example, there is no such thing as a "bool" TensorFlow

View File

@@ -193,7 +193,7 @@ class GraphTransferInfo(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _DestinationEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[GraphTransferInfo._Destination.ValueType], builtins.type): # noqa: F821
class _DestinationEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[GraphTransferInfo._Destination.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
NOP: GraphTransferInfo._Destination.ValueType # 0
HEXAGON: GraphTransferInfo._Destination.ValueType # 1

View File

@@ -0,0 +1,83 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys
import tensorflow.core.framework.graph_pb2
import tensorflow.core.framework.types_pb2
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class OptimizedFunctionGraph(google.protobuf.message.Message):
"""Optimized function graph after instantiation-related graph optimization
passes (up till before graph partitioning). The first half of the proto is
representing a GraphDef and the rest of the fields are extra information from
graph optimizations.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class NodeNameToControlRetEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.str
value: builtins.str
def __init__(
self,
*,
key: builtins.str | None = ...,
value: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
NAME_FIELD_NUMBER: builtins.int
FUNCTION_GRAPH_FIELD_NUMBER: builtins.int
NODE_NAME_TO_CONTROL_RET_FIELD_NUMBER: builtins.int
RET_TYPES_FIELD_NUMBER: builtins.int
NUM_RETURN_NODES_FIELD_NUMBER: builtins.int
name: builtins.str
"""Function name. It can be a human-readable SignatureDef's method name, or a
FunctionDef name.
"""
@property
def function_graph(self) -> tensorflow.core.framework.graph_pb2.GraphDef:
"""Optimized function graph."""
@property
def node_name_to_control_ret(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]:
"""Maps from node name to control ret. This is an output from running TF/XLA
bridge.
"""
@property
def ret_types(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[tensorflow.core.framework.types_pb2.DataType.ValueType]:
"""Return node types of the function. This is an output of graph
preprocessing.
"""
num_return_nodes: builtins.int
"""Number of return nodes. This is an output of graph preprocessing."""
def __init__(
self,
*,
name: builtins.str | None = ...,
function_graph: tensorflow.core.framework.graph_pb2.GraphDef | None = ...,
node_name_to_control_ret: collections.abc.Mapping[builtins.str, builtins.str] | None = ...,
ret_types: collections.abc.Iterable[tensorflow.core.framework.types_pb2.DataType.ValueType] | None = ...,
num_return_nodes: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["function_graph", b"function_graph"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["function_graph", b"function_graph", "name", b"name", "node_name_to_control_ret", b"node_name_to_control_ret", "num_return_nodes", b"num_return_nodes", "ret_types", b"ret_types"]) -> None: ...
global___OptimizedFunctionGraph = OptimizedFunctionGraph

View File

@@ -42,6 +42,7 @@ class TensorProto(google.protobuf.message.Message):
VARIANT_VAL_FIELD_NUMBER: builtins.int
UINT32_VAL_FIELD_NUMBER: builtins.int
UINT64_VAL_FIELD_NUMBER: builtins.int
FLOAT8_VAL_FIELD_NUMBER: builtins.int
dtype: tensorflow.core.framework.types_pb2.DataType.ValueType
@property
def tensor_shape(self) -> tensorflow.core.framework.tensor_shape_pb2.TensorShapeProto:
@@ -114,6 +115,10 @@ class TensorProto(google.protobuf.message.Message):
@property
def uint64_val(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]:
"""DT_UINT64"""
float8_val: builtins.bytes
"""DT_FLOAT8_*, use variable-sized set of bytes
(i.e. the equivalent of repeated uint8, if such a thing existed).
"""
def __init__(
self,
*,
@@ -134,9 +139,10 @@ class TensorProto(google.protobuf.message.Message):
variant_val: collections.abc.Iterable[global___VariantTensorDataProto] | None = ...,
uint32_val: collections.abc.Iterable[builtins.int] | None = ...,
uint64_val: collections.abc.Iterable[builtins.int] | None = ...,
float8_val: builtins.bytes | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["tensor_shape", b"tensor_shape"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["bool_val", b"bool_val", "dcomplex_val", b"dcomplex_val", "double_val", b"double_val", "dtype", b"dtype", "float_val", b"float_val", "half_val", b"half_val", "int64_val", b"int64_val", "int_val", b"int_val", "resource_handle_val", b"resource_handle_val", "scomplex_val", b"scomplex_val", "string_val", b"string_val", "tensor_content", b"tensor_content", "tensor_shape", b"tensor_shape", "uint32_val", b"uint32_val", "uint64_val", b"uint64_val", "variant_val", b"variant_val", "version_number", b"version_number"]) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bool_val", b"bool_val", "dcomplex_val", b"dcomplex_val", "double_val", b"double_val", "dtype", b"dtype", "float8_val", b"float8_val", "float_val", b"float_val", "half_val", b"half_val", "int64_val", b"int64_val", "int_val", b"int_val", "resource_handle_val", b"resource_handle_val", "scomplex_val", b"scomplex_val", "string_val", b"string_val", "tensor_content", b"tensor_content", "tensor_shape", b"tensor_shape", "uint32_val", b"uint32_val", "uint64_val", b"uint64_val", "variant_val", b"variant_val", "version_number", b"version_number"]) -> None: ...
global___TensorProto = TensorProto

View File

@@ -47,7 +47,7 @@ class _DataTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumT
DT_QINT32: _DataType.ValueType # 13
"""Quantized int32"""
DT_BFLOAT16: _DataType.ValueType # 14
"""Float32 truncated to 16 bits. Only for cast ops."""
"""Float32 truncated to 16 bits."""
DT_QINT16: _DataType.ValueType # 15
"""Quantized int16"""
DT_QUINT16: _DataType.ValueType # 16
@@ -61,8 +61,14 @@ class _DataTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumT
"""Arbitrary C++ data types"""
DT_UINT32: _DataType.ValueType # 22
DT_UINT64: _DataType.ValueType # 23
DT_FLOAT8_E5M2: _DataType.ValueType # 24
"""5 exponent bits, 2 mantissa bits."""
DT_FLOAT8_E4M3FN: _DataType.ValueType # 25
"""4 exponent bits, 3 mantissa bits, finite-only, with"""
DT_FLOAT_REF: _DataType.ValueType # 101
"""Do not use! These are only for parameters. Every enum above
"""2 NaNs (0bS1111111).
Do not use! These are only for parameters. Every enum above
should have a corresponding value below (verified by types_test).
"""
DT_DOUBLE_REF: _DataType.ValueType # 102
@@ -87,6 +93,8 @@ class _DataTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumT
DT_VARIANT_REF: _DataType.ValueType # 121
DT_UINT32_REF: _DataType.ValueType # 122
DT_UINT64_REF: _DataType.ValueType # 123
DT_FLOAT8_E5M2_REF: _DataType.ValueType # 124
DT_FLOAT8_E4M3FN_REF: _DataType.ValueType # 125
class DataType(_DataType, metaclass=_DataTypeEnumTypeWrapper):
"""(== suppress_warning documentation-presence ==)
@@ -118,7 +126,7 @@ DT_QUINT8: DataType.ValueType # 12
DT_QINT32: DataType.ValueType # 13
"""Quantized int32"""
DT_BFLOAT16: DataType.ValueType # 14
"""Float32 truncated to 16 bits. Only for cast ops."""
"""Float32 truncated to 16 bits."""
DT_QINT16: DataType.ValueType # 15
"""Quantized int16"""
DT_QUINT16: DataType.ValueType # 16
@@ -132,8 +140,14 @@ DT_VARIANT: DataType.ValueType # 21
"""Arbitrary C++ data types"""
DT_UINT32: DataType.ValueType # 22
DT_UINT64: DataType.ValueType # 23
DT_FLOAT8_E5M2: DataType.ValueType # 24
"""5 exponent bits, 2 mantissa bits."""
DT_FLOAT8_E4M3FN: DataType.ValueType # 25
"""4 exponent bits, 3 mantissa bits, finite-only, with"""
DT_FLOAT_REF: DataType.ValueType # 101
"""Do not use! These are only for parameters. Every enum above
"""2 NaNs (0bS1111111).
Do not use! These are only for parameters. Every enum above
should have a corresponding value below (verified by types_test).
"""
DT_DOUBLE_REF: DataType.ValueType # 102
@@ -158,6 +172,8 @@ DT_RESOURCE_REF: DataType.ValueType # 120
DT_VARIANT_REF: DataType.ValueType # 121
DT_UINT32_REF: DataType.ValueType # 122
DT_UINT64_REF: DataType.ValueType # 123
DT_FLOAT8_E5M2_REF: DataType.ValueType # 124
DT_FLOAT8_E4M3FN_REF: DataType.ValueType # 125
global___DataType = DataType
@typing_extensions.final

View File

@@ -2,161 +2,13 @@
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
from tensorflow.tsl.protobuf.bfc_memory_map_pb2 import (
BinSummary as BinSummary,
MemAllocatorStats as MemAllocatorStats,
MemChunk as MemChunk,
MemoryDump as MemoryDump,
SnapShot as SnapShot,
)
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class MemAllocatorStats(google.protobuf.message.Message):
"""Some of the data from AllocatorStats"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NUM_ALLOCS_FIELD_NUMBER: builtins.int
BYTES_IN_USE_FIELD_NUMBER: builtins.int
PEAK_BYTES_IN_USE_FIELD_NUMBER: builtins.int
LARGEST_ALLOC_SIZE_FIELD_NUMBER: builtins.int
FRAGMENTATION_METRIC_FIELD_NUMBER: builtins.int
num_allocs: builtins.int
bytes_in_use: builtins.int
peak_bytes_in_use: builtins.int
largest_alloc_size: builtins.int
fragmentation_metric: builtins.float
def __init__(
self,
*,
num_allocs: builtins.int | None = ...,
bytes_in_use: builtins.int | None = ...,
peak_bytes_in_use: builtins.int | None = ...,
largest_alloc_size: builtins.int | None = ...,
fragmentation_metric: builtins.float | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bytes_in_use", b"bytes_in_use", "fragmentation_metric", b"fragmentation_metric", "largest_alloc_size", b"largest_alloc_size", "num_allocs", b"num_allocs", "peak_bytes_in_use", b"peak_bytes_in_use"]) -> None: ...
global___MemAllocatorStats = MemAllocatorStats
@typing_extensions.final
class MemChunk(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ADDRESS_FIELD_NUMBER: builtins.int
SIZE_FIELD_NUMBER: builtins.int
REQUESTED_SIZE_FIELD_NUMBER: builtins.int
BIN_FIELD_NUMBER: builtins.int
OP_NAME_FIELD_NUMBER: builtins.int
FREED_AT_COUNT_FIELD_NUMBER: builtins.int
ACTION_COUNT_FIELD_NUMBER: builtins.int
IN_USE_FIELD_NUMBER: builtins.int
STEP_ID_FIELD_NUMBER: builtins.int
address: builtins.int
size: builtins.int
requested_size: builtins.int
bin: builtins.int
op_name: builtins.str
freed_at_count: builtins.int
action_count: builtins.int
in_use: builtins.bool
step_id: builtins.int
def __init__(
self,
*,
address: builtins.int | None = ...,
size: builtins.int | None = ...,
requested_size: builtins.int | None = ...,
bin: builtins.int | None = ...,
op_name: builtins.str | None = ...,
freed_at_count: builtins.int | None = ...,
action_count: builtins.int | None = ...,
in_use: builtins.bool | None = ...,
step_id: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["action_count", b"action_count", "address", b"address", "bin", b"bin", "freed_at_count", b"freed_at_count", "in_use", b"in_use", "op_name", b"op_name", "requested_size", b"requested_size", "size", b"size", "step_id", b"step_id"]) -> None: ...
global___MemChunk = MemChunk
@typing_extensions.final
class BinSummary(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
BIN_FIELD_NUMBER: builtins.int
TOTAL_BYTES_IN_USE_FIELD_NUMBER: builtins.int
TOTAL_BYTES_IN_BIN_FIELD_NUMBER: builtins.int
TOTAL_CHUNKS_IN_USE_FIELD_NUMBER: builtins.int
TOTAL_CHUNKS_IN_BIN_FIELD_NUMBER: builtins.int
bin: builtins.int
total_bytes_in_use: builtins.int
total_bytes_in_bin: builtins.int
total_chunks_in_use: builtins.int
total_chunks_in_bin: builtins.int
def __init__(
self,
*,
bin: builtins.int | None = ...,
total_bytes_in_use: builtins.int | None = ...,
total_bytes_in_bin: builtins.int | None = ...,
total_chunks_in_use: builtins.int | None = ...,
total_chunks_in_bin: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bin", b"bin", "total_bytes_in_bin", b"total_bytes_in_bin", "total_bytes_in_use", b"total_bytes_in_use", "total_chunks_in_bin", b"total_chunks_in_bin", "total_chunks_in_use", b"total_chunks_in_use"]) -> None: ...
global___BinSummary = BinSummary
@typing_extensions.final
class SnapShot(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ACTION_COUNT_FIELD_NUMBER: builtins.int
SIZE_FIELD_NUMBER: builtins.int
action_count: builtins.int
size: builtins.int
def __init__(
self,
*,
action_count: builtins.int | None = ...,
size: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["action_count", b"action_count", "size", b"size"]) -> None: ...
global___SnapShot = SnapShot
@typing_extensions.final
class MemoryDump(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ALLOCATOR_NAME_FIELD_NUMBER: builtins.int
BIN_SUMMARY_FIELD_NUMBER: builtins.int
CHUNK_FIELD_NUMBER: builtins.int
SNAP_SHOT_FIELD_NUMBER: builtins.int
STATS_FIELD_NUMBER: builtins.int
allocator_name: builtins.str
@property
def bin_summary(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinSummary]: ...
@property
def chunk(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___MemChunk]: ...
@property
def snap_shot(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___SnapShot]: ...
@property
def stats(self) -> global___MemAllocatorStats: ...
def __init__(
self,
*,
allocator_name: builtins.str | None = ...,
bin_summary: collections.abc.Iterable[global___BinSummary] | None = ...,
chunk: collections.abc.Iterable[global___MemChunk] | None = ...,
snap_shot: collections.abc.Iterable[global___SnapShot] | None = ...,
stats: global___MemAllocatorStats | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["stats", b"stats"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["allocator_name", b"allocator_name", "bin_summary", b"bin_summary", "chunk", b"chunk", "snap_shot", b"snap_shot", "stats", b"stats"]) -> None: ...
global___MemoryDump = MemoryDump

View File

@@ -13,9 +13,10 @@ import tensorflow.core.framework.cost_graph_pb2
import tensorflow.core.framework.graph_pb2
import tensorflow.core.framework.step_stats_pb2
import tensorflow.core.protobuf.cluster_pb2
import tensorflow.core.protobuf.coordination_config_pb2
import tensorflow.core.protobuf.debug_pb2
import tensorflow.core.protobuf.rewriter_config_pb2
import tensorflow.tsl.protobuf.coordination_config_pb2
import tensorflow.tsl.protobuf.rpc_options_pb2
import typing
if sys.version_info >= (3, 10):
@@ -95,6 +96,8 @@ class GPUOptions(google.protobuf.message.Message):
INTERNAL_FRAGMENTATION_FRACTION_FIELD_NUMBER: builtins.int
USE_CUDA_MALLOC_ASYNC_FIELD_NUMBER: builtins.int
DISALLOW_RETRY_ON_ALLOCATION_FAILURE_FIELD_NUMBER: builtins.int
GPU_HOST_MEM_LIMIT_IN_MB_FIELD_NUMBER: builtins.int
GPU_HOST_MEM_DISALLOW_GROWTH_FIELD_NUMBER: builtins.int
@property
def virtual_devices(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___GPUOptions.Experimental.VirtualDevices]:
"""The multi virtual device settings. If empty (not set), it will create
@@ -205,6 +208,19 @@ class GPUOptions(google.protobuf.message.Message):
hopes that another thread will free up memory in the meantime. Setting
this to true disables the sleep; instead we'll OOM immediately.
"""
gpu_host_mem_limit_in_mb: builtins.float
"""Memory limit for "GPU host allocator", aka pinned memory allocator. This
can also be set via the envvar TF_GPU_HOST_MEM_LIMIT_IN_MB.
"""
gpu_host_mem_disallow_growth: builtins.bool
"""If true, then the host allocator allocates its max memory all upfront and
never grows. This can be useful for latency-sensitive systems, because
growing the GPU host memory pool can be expensive.
You probably only want to use this in combination with
gpu_host_mem_limit_in_mb, because the default GPU host memory limit is
quite high.
"""
def __init__(
self,
*,
@@ -219,8 +235,10 @@ class GPUOptions(google.protobuf.message.Message):
internal_fragmentation_fraction: builtins.float | None = ...,
use_cuda_malloc_async: builtins.bool | None = ...,
disallow_retry_on_allocation_failure: builtins.bool | None = ...,
gpu_host_mem_limit_in_mb: builtins.float | None = ...,
gpu_host_mem_disallow_growth: builtins.bool | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["collective_ring_order", b"collective_ring_order", "disallow_retry_on_allocation_failure", b"disallow_retry_on_allocation_failure", "internal_fragmentation_fraction", b"internal_fragmentation_fraction", "kernel_tracker_max_bytes", b"kernel_tracker_max_bytes", "kernel_tracker_max_interval", b"kernel_tracker_max_interval", "kernel_tracker_max_pending", b"kernel_tracker_max_pending", "num_dev_to_dev_copy_streams", b"num_dev_to_dev_copy_streams", "timestamped_allocator", b"timestamped_allocator", "use_cuda_malloc_async", b"use_cuda_malloc_async", "use_unified_memory", b"use_unified_memory", "virtual_devices", b"virtual_devices"]) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["collective_ring_order", b"collective_ring_order", "disallow_retry_on_allocation_failure", b"disallow_retry_on_allocation_failure", "gpu_host_mem_disallow_growth", b"gpu_host_mem_disallow_growth", "gpu_host_mem_limit_in_mb", b"gpu_host_mem_limit_in_mb", "internal_fragmentation_fraction", b"internal_fragmentation_fraction", "kernel_tracker_max_bytes", b"kernel_tracker_max_bytes", "kernel_tracker_max_interval", b"kernel_tracker_max_interval", "kernel_tracker_max_pending", b"kernel_tracker_max_pending", "num_dev_to_dev_copy_streams", b"num_dev_to_dev_copy_streams", "timestamped_allocator", b"timestamped_allocator", "use_cuda_malloc_async", b"use_cuda_malloc_async", "use_unified_memory", b"use_unified_memory", "virtual_devices", b"virtual_devices"]) -> None: ...
PER_PROCESS_GPU_MEMORY_FRACTION_FIELD_NUMBER: builtins.int
ALLOW_GROWTH_FIELD_NUMBER: builtins.int
@@ -232,9 +250,9 @@ class GPUOptions(google.protobuf.message.Message):
FORCE_GPU_COMPATIBLE_FIELD_NUMBER: builtins.int
EXPERIMENTAL_FIELD_NUMBER: builtins.int
per_process_gpu_memory_fraction: builtins.float
"""Fraction of the available GPU memory to allocate for each process.
"""Fraction of the total GPU memory to allocate for each process.
1 means to allocate all of the GPU memory, 0.5 means the process
allocates up to ~50% of the available GPU memory.
allocates up to ~50% of the total GPU memory.
GPU memory is pre-allocated unless the allow_growth option is enabled.
@@ -345,7 +363,7 @@ class OptimizerOptions(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _LevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[OptimizerOptions._Level.ValueType], builtins.type): # noqa: F821
class _LevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[OptimizerOptions._Level.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
L1: OptimizerOptions._Level.ValueType # 0
"""L1 is the default level.
@@ -372,7 +390,7 @@ class OptimizerOptions(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _GlobalJitLevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[OptimizerOptions._GlobalJitLevel.ValueType], builtins.type): # noqa: F821
class _GlobalJitLevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[OptimizerOptions._GlobalJitLevel.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT: OptimizerOptions._GlobalJitLevel.ValueType # 0
"""Default setting ("off" now, but later expected to be "on")"""
@@ -562,61 +580,6 @@ class ThreadPoolOptionProto(google.protobuf.message.Message):
global___ThreadPoolOptionProto = ThreadPoolOptionProto
@typing_extensions.final
class RPCOptions(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
USE_RPC_FOR_INPROCESS_MASTER_FIELD_NUMBER: builtins.int
COMPRESSION_ALGORITHM_FIELD_NUMBER: builtins.int
COMPRESSION_LEVEL_FIELD_NUMBER: builtins.int
CACHE_RPC_RESPONSE_FIELD_NUMBER: builtins.int
DISABLE_SESSION_CONNECTION_SHARING_FIELD_NUMBER: builtins.int
NUM_CHANNELS_PER_TARGET_FIELD_NUMBER: builtins.int
use_rpc_for_inprocess_master: builtins.bool
"""If true, always use RPC to contact the session target.
If false (the default option), TensorFlow may use an optimized
transport for client-master communication that avoids the RPC
stack. This option is primarily for used testing the RPC stack.
"""
compression_algorithm: builtins.str
"""The compression algorithm to be used. One of "deflate", "gzip"."""
compression_level: builtins.int
"""If compression_algorithm is set, the compression level to be used.
From 0 (no compression), up to 3.
"""
cache_rpc_response: builtins.bool
"""Setting cache_rpc_response to true will enable sender side caching of
response for RecvTensorAsync and RecvBufAsync to allow receiver to retry
requests . This is only necessary when the network fabric is experiencing a
significant error rate. Without it we'll fail a step on an network error,
while with it we'll be able to complete long steps (like complex
initializations) in the face of some network errors during RecvTensor.
"""
disable_session_connection_sharing: builtins.bool
"""Disables TCP connection sharing when opening a new RPC channel."""
num_channels_per_target: builtins.int
"""Setting num_channels_per_target > 0 allows uses of multiple channels to
communicate to the same target. This can be used to improve the aggregate
throughput on high speed links (e.g 100G) where single connection is not
sufficient to maximize link utilization. Note that a single RPC only goes
on a single channel, this only helps in situations where there are multiple
transfers to the same target overlapping in time.
"""
def __init__(
self,
*,
use_rpc_for_inprocess_master: builtins.bool | None = ...,
compression_algorithm: builtins.str | None = ...,
compression_level: builtins.int | None = ...,
cache_rpc_response: builtins.bool | None = ...,
disable_session_connection_sharing: builtins.bool | None = ...,
num_channels_per_target: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["cache_rpc_response", b"cache_rpc_response", "compression_algorithm", b"compression_algorithm", "compression_level", b"compression_level", "disable_session_connection_sharing", b"disable_session_connection_sharing", "num_channels_per_target", b"num_channels_per_target", "use_rpc_for_inprocess_master", b"use_rpc_for_inprocess_master"]) -> None: ...
global___RPCOptions = RPCOptions
@typing_extensions.final
class SessionMetadata(google.protobuf.message.Message):
"""Metadata about the session.
@@ -683,7 +646,7 @@ class ConfigProto(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _MlirBridgeRolloutEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ConfigProto.Experimental._MlirBridgeRollout.ValueType], builtins.type): # noqa: F821
class _MlirBridgeRolloutEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ConfigProto.Experimental._MlirBridgeRollout.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
MLIR_BRIDGE_ROLLOUT_UNSPECIFIED: ConfigProto.Experimental._MlirBridgeRollout.ValueType # 0
"""If this field is left unspecified, the MLIR bridge may be selectively
@@ -693,20 +656,6 @@ class ConfigProto(google.protobuf.message.Message):
"""Enabling the MLIR bridge enables it for all graphs in this session."""
MLIR_BRIDGE_ROLLOUT_DISABLED: ConfigProto.Experimental._MlirBridgeRollout.ValueType # 2
"""Disabling the MLIR bridge disables it for all graphs in this session."""
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED: ConfigProto.Experimental._MlirBridgeRollout.ValueType # 3
"""Enable the MLIR bridge on a per graph basis based on an analysis of
the features used in the graph. If the features used by the graph are
supported by the MLIR bridge, the MLIR bridge will be used to run the
graph.
"""
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED: ConfigProto.Experimental._MlirBridgeRollout.ValueType # 4
"""Enable the MLIR bridge in a fallback mode on a per graph basis based
on an analysis of the features used in the graph.
Running the MLIR bridge in the fallback mode means that it is
executed and it commits all the changes to the TF graph in case
of success. And it does not in case of failures and let the old bridge
to process the TF graph.
"""
class MlirBridgeRollout(_MlirBridgeRollout, metaclass=_MlirBridgeRolloutEnumTypeWrapper):
"""An enum that describes the state of the MLIR bridge rollout."""
@@ -719,20 +668,6 @@ class ConfigProto(google.protobuf.message.Message):
"""Enabling the MLIR bridge enables it for all graphs in this session."""
MLIR_BRIDGE_ROLLOUT_DISABLED: ConfigProto.Experimental.MlirBridgeRollout.ValueType # 2
"""Disabling the MLIR bridge disables it for all graphs in this session."""
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_ENABLED: ConfigProto.Experimental.MlirBridgeRollout.ValueType # 3
"""Enable the MLIR bridge on a per graph basis based on an analysis of
the features used in the graph. If the features used by the graph are
supported by the MLIR bridge, the MLIR bridge will be used to run the
graph.
"""
MLIR_BRIDGE_ROLLOUT_SAFE_MODE_FALLBACK_ENABLED: ConfigProto.Experimental.MlirBridgeRollout.ValueType # 4
"""Enable the MLIR bridge in a fallback mode on a per graph basis based
on an analysis of the features used in the graph.
Running the MLIR bridge in the fallback mode means that it is
executed and it commits all the changes to the TF graph in case
of success. And it does not in case of failures and let the old bridge
to process the TF graph.
"""
COLLECTIVE_GROUP_LEADER_FIELD_NUMBER: builtins.int
EXECUTOR_TYPE_FIELD_NUMBER: builtins.int
@@ -754,6 +689,7 @@ class ConfigProto(google.protobuf.message.Message):
DISABLE_FUNCTIONAL_OPS_LOWERING_FIELD_NUMBER: builtins.int
XLA_PREFER_SINGLE_GRAPH_CLUSTER_FIELD_NUMBER: builtins.int
COORDINATION_CONFIG_FIELD_NUMBER: builtins.int
DISABLE_OPTIMIZE_FOR_STATIC_GRAPH_FIELD_NUMBER: builtins.int
collective_group_leader: builtins.str
"""Task name for group resolution."""
executor_type: builtins.str
@@ -817,7 +753,8 @@ class ConfigProto(google.protobuf.message.Message):
If set, this can be used by the runtime and the Ops for debugging,
monitoring, etc.
NOTE: This is currently used and propagated only by the direct session.
NOTE: This is currently used and propagated only by the direct session
and EagerContext.
"""
optimize_for_static_graph: builtins.bool
"""If true, the session may treat the graph as being static for optimization
@@ -828,13 +765,10 @@ class ConfigProto(google.protobuf.message.Message):
Session::Extend() may not be supported.
"""
enable_mlir_bridge: builtins.bool
"""This field will eventually be deprecated and replaced by
mlir_bridge_rollout (b/166038521).
"""Whether to enable the MLIR-based TF->XLA bridge. This is only used if set
to true. Default value or false is ignored. Use mlir_bridge_rollout for
finer control.
Whether to enable the MLIR-based TF->XLA bridge.
This is a replacement to the existing bridge, and not ready for
production usage yet.
If this option is set to true when a session is created, MLIR is used to
perform the set of graph transformations to put the graph in a form that
can be executed with delegation of some computations to an accelerator.
@@ -844,11 +778,7 @@ class ConfigProto(google.protobuf.message.Message):
to lower the encapsulated graph to a particular device.
"""
mlir_bridge_rollout: global___ConfigProto.Experimental.MlirBridgeRollout.ValueType
"""This field is underdevelopment, for now use enable_mlir_bridge
(b/166038521).
Whether to enable the MLIR-based TF->XLA bridge.
"""
"""Whether to enable the MLIR-based TF->XLA bridge."""
enable_mlir_graph_optimization: builtins.bool
"""Whether to enable the MLIR-based Graph optimizations.
@@ -882,8 +812,19 @@ class ConfigProto(google.protobuf.message.Message):
cluster that encompases most of the graph.
"""
@property
def coordination_config(self) -> tensorflow.core.protobuf.coordination_config_pb2.CoordinationServiceConfig:
def coordination_config(self) -> tensorflow.tsl.protobuf.coordination_config_pb2.CoordinationServiceConfig:
"""Distributed coordination service configurations."""
disable_optimize_for_static_graph: builtins.bool
"""If true, the session will treat the graph as being non-static for
optimization purposes.
If this option is set to true when a session is created, the full
GraphDef will be retained to enable calls to Session::Extend().
Calling Extend() without setting this flag will result in errors.
This option is meant to replace `optimize_for_static_graph` and it
aims to negate its value.
"""
def __init__(
self,
*,
@@ -906,10 +847,11 @@ class ConfigProto(google.protobuf.message.Message):
use_tfrt: builtins.bool | None = ...,
disable_functional_ops_lowering: builtins.bool | None = ...,
xla_prefer_single_graph_cluster: builtins.bool | None = ...,
coordination_config: tensorflow.core.protobuf.coordination_config_pb2.CoordinationServiceConfig | None = ...,
coordination_config: tensorflow.tsl.protobuf.coordination_config_pb2.CoordinationServiceConfig | None = ...,
disable_optimize_for_static_graph: builtins.bool | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["coordination_config", b"coordination_config", "session_metadata", b"session_metadata"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["collective_deterministic_sequential_execution", b"collective_deterministic_sequential_execution", "collective_group_leader", b"collective_group_leader", "collective_nccl", b"collective_nccl", "coordination_config", b"coordination_config", "disable_functional_ops_lowering", b"disable_functional_ops_lowering", "disable_output_partition_graphs", b"disable_output_partition_graphs", "disable_thread_spinning", b"disable_thread_spinning", "enable_mlir_bridge", b"enable_mlir_bridge", "enable_mlir_graph_optimization", b"enable_mlir_graph_optimization", "executor_type", b"executor_type", "mlir_bridge_rollout", b"mlir_bridge_rollout", "optimize_for_static_graph", b"optimize_for_static_graph", "recv_buf_max_chunk", b"recv_buf_max_chunk", "session_metadata", b"session_metadata", "share_cluster_devices_in_session", b"share_cluster_devices_in_session", "share_session_state_in_clusterspec_propagation", b"share_session_state_in_clusterspec_propagation", "use_numa_affinity", b"use_numa_affinity", "use_tfrt", b"use_tfrt", "xla_fusion_autotuner_thresh", b"xla_fusion_autotuner_thresh", "xla_prefer_single_graph_cluster", b"xla_prefer_single_graph_cluster"]) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["collective_deterministic_sequential_execution", b"collective_deterministic_sequential_execution", "collective_group_leader", b"collective_group_leader", "collective_nccl", b"collective_nccl", "coordination_config", b"coordination_config", "disable_functional_ops_lowering", b"disable_functional_ops_lowering", "disable_optimize_for_static_graph", b"disable_optimize_for_static_graph", "disable_output_partition_graphs", b"disable_output_partition_graphs", "disable_thread_spinning", b"disable_thread_spinning", "enable_mlir_bridge", b"enable_mlir_bridge", "enable_mlir_graph_optimization", b"enable_mlir_graph_optimization", "executor_type", b"executor_type", "mlir_bridge_rollout", b"mlir_bridge_rollout", "optimize_for_static_graph", b"optimize_for_static_graph", "recv_buf_max_chunk", b"recv_buf_max_chunk", "session_metadata", b"session_metadata", "share_cluster_devices_in_session", b"share_cluster_devices_in_session", "share_session_state_in_clusterspec_propagation", b"share_session_state_in_clusterspec_propagation", "use_numa_affinity", b"use_numa_affinity", "use_tfrt", b"use_tfrt", "xla_fusion_autotuner_thresh", b"xla_fusion_autotuner_thresh", "xla_prefer_single_graph_cluster", b"xla_prefer_single_graph_cluster"]) -> None: ...
DEVICE_COUNT_FIELD_NUMBER: builtins.int
INTRA_OP_PARALLELISM_THREADS_FIELD_NUMBER: builtins.int
@@ -1029,7 +971,7 @@ class ConfigProto(google.protobuf.message.Message):
deadline for all blocking operations.
"""
@property
def rpc_options(self) -> global___RPCOptions:
def rpc_options(self) -> tensorflow.tsl.protobuf.rpc_options_pb2.RPCOptions:
"""Options that apply when this session uses the distributed runtime."""
@property
def cluster_def(self) -> tensorflow.core.protobuf.cluster_pb2.ClusterDef:
@@ -1062,7 +1004,7 @@ class ConfigProto(google.protobuf.message.Message):
log_device_placement: builtins.bool | None = ...,
graph_options: global___GraphOptions | None = ...,
operation_timeout_in_ms: builtins.int | None = ...,
rpc_options: global___RPCOptions | None = ...,
rpc_options: tensorflow.tsl.protobuf.rpc_options_pb2.RPCOptions | None = ...,
cluster_def: tensorflow.core.protobuf.cluster_pb2.ClusterDef | None = ...,
isolate_session_state: builtins.bool | None = ...,
share_cluster_devices_in_session: builtins.bool | None = ...,
@@ -1083,7 +1025,7 @@ class RunOptions(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TraceLevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RunOptions._TraceLevel.ValueType], builtins.type): # noqa: F821
class _TraceLevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RunOptions._TraceLevel.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
NO_TRACE: RunOptions._TraceLevel.ValueType # 0
SOFTWARE_TRACE: RunOptions._TraceLevel.ValueType # 1

View File

@@ -29,7 +29,7 @@ class ErrorSourceProto(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ErrorSourceEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ErrorSourceProto._ErrorSource.ValueType], builtins.type): # noqa: F821
class _ErrorSourceEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ErrorSourceProto._ErrorSource.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNKNOWN: ErrorSourceProto._ErrorSource.ValueType # 0
TPU_COMPILE_OP: ErrorSourceProto._ErrorSource.ValueType # 1

View File

@@ -56,7 +56,7 @@ class ProcessingModeDef(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ShardingPolicyEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ProcessingModeDef._ShardingPolicy.ValueType], builtins.type): # noqa: F821
class _ShardingPolicyEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[ProcessingModeDef._ShardingPolicy.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
OFF: ProcessingModeDef._ShardingPolicy.ValueType # 0
"""No sharding will be performed. Each worker produces the entire dataset
@@ -160,7 +160,7 @@ class DataServiceMetadata(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _CompressionEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[DataServiceMetadata._Compression.ValueType], builtins.type): # noqa: F821
class _CompressionEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[DataServiceMetadata._Compression.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
COMPRESSION_UNSPECIFIED: DataServiceMetadata._Compression.ValueType # 0
COMPRESSION_OFF: DataServiceMetadata._Compression.ValueType # 1

View File

@@ -25,14 +25,14 @@ class FingerprintDef(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
GRAPH_DEF_CHECKSUM_FIELD_NUMBER: builtins.int
SAVED_MODEL_CHECKSUM_FIELD_NUMBER: builtins.int
GRAPH_DEF_PROGRAM_HASH_FIELD_NUMBER: builtins.int
SIGNATURE_DEF_HASH_FIELD_NUMBER: builtins.int
SAVED_OBJECT_GRAPH_HASH_FIELD_NUMBER: builtins.int
CHECKPOINT_HASH_FIELD_NUMBER: builtins.int
VERSION_FIELD_NUMBER: builtins.int
graph_def_checksum: builtins.int
"""Hash of the graph_def, referred to as a "checksum"."""
saved_model_checksum: builtins.int
"""Hash of the saved_model.pb, referred to as a "checksum"."""
graph_def_program_hash: builtins.int
"""Hash of regularized graph_def."""
signature_def_hash: builtins.int
@@ -47,7 +47,7 @@ class FingerprintDef(google.protobuf.message.Message):
def __init__(
self,
*,
graph_def_checksum: builtins.int | None = ...,
saved_model_checksum: builtins.int | None = ...,
graph_def_program_hash: builtins.int | None = ...,
signature_def_hash: builtins.int | None = ...,
saved_object_graph_hash: builtins.int | None = ...,
@@ -55,6 +55,6 @@ class FingerprintDef(google.protobuf.message.Message):
version: tensorflow.core.framework.versions_pb2.VersionDef | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["version", b"version"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["checkpoint_hash", b"checkpoint_hash", "graph_def_checksum", b"graph_def_checksum", "graph_def_program_hash", b"graph_def_program_hash", "saved_object_graph_hash", b"saved_object_graph_hash", "signature_def_hash", b"signature_def_hash", "version", b"version"]) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["checkpoint_hash", b"checkpoint_hash", "graph_def_program_hash", b"graph_def_program_hash", "saved_model_checksum", b"saved_model_checksum", "saved_object_graph_hash", b"saved_object_graph_hash", "signature_def_hash", b"signature_def_hash", "version", b"version"]) -> None: ...
global___FingerprintDef = FingerprintDef

View File

@@ -67,7 +67,7 @@ class RewriterConfig(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ToggleEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RewriterConfig._Toggle.ValueType], builtins.type): # noqa: F821
class _ToggleEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RewriterConfig._Toggle.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT: RewriterConfig._Toggle.ValueType # 0
ON: RewriterConfig._Toggle.ValueType # 1
@@ -115,7 +115,7 @@ class RewriterConfig(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _CpuLayoutEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RewriterConfig._CpuLayout.ValueType], builtins.type): # noqa: F821
class _CpuLayoutEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RewriterConfig._CpuLayout.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
NO_CONVERSION_ON_CPU: RewriterConfig._CpuLayout.ValueType # 0
NCHW_TO_NHWC: RewriterConfig._CpuLayout.ValueType # 1
@@ -132,7 +132,7 @@ class RewriterConfig(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _NumIterationsTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RewriterConfig._NumIterationsType.ValueType], builtins.type): # noqa: F821
class _NumIterationsTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RewriterConfig._NumIterationsType.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT_NUM_ITERS: RewriterConfig._NumIterationsType.ValueType # 0
ONE: RewriterConfig._NumIterationsType.ValueType # 1
@@ -151,7 +151,7 @@ class RewriterConfig(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _MemOptTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RewriterConfig._MemOptType.ValueType], builtins.type): # noqa: F821
class _MemOptTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[RewriterConfig._MemOptType.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT_MEM_OPT: RewriterConfig._MemOptType.ValueType # 0
"""The default setting (SCHEDULING and SWAPPING HEURISTICS only)"""

View File

@@ -0,0 +1,10 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import google.protobuf.descriptor
from tensorflow.tsl.protobuf.rpc_options_pb2 import (
RPCOptions as RPCOptions,
)
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor

View File

@@ -472,7 +472,7 @@ class FunctionSpec(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _JitCompileEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[FunctionSpec._JitCompile.ValueType], builtins.type): # noqa: F821
class _JitCompileEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[FunctionSpec._JitCompile.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT: FunctionSpec._JitCompile.ValueType # 0
ON: FunctionSpec._JitCompile.ValueType # 1

View File

@@ -26,7 +26,7 @@ class SaverDef(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _CheckpointFormatVersionEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[SaverDef._CheckpointFormatVersion.ValueType], builtins.type): # noqa: F821
class _CheckpointFormatVersionEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[SaverDef._CheckpointFormatVersion.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
LEGACY: SaverDef._CheckpointFormatVersion.ValueType # 0
"""Internal legacy format."""

View File

@@ -20,7 +20,7 @@ DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class DispatcherConfig(google.protobuf.message.Message):
"""Configuration for a tf.data service DispatchServer.
Next id: 10
Next id: 11
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@@ -34,6 +34,7 @@ class DispatcherConfig(google.protobuf.message.Message):
JOB_GC_CHECK_INTERVAL_MS_FIELD_NUMBER: builtins.int
JOB_GC_TIMEOUT_MS_FIELD_NUMBER: builtins.int
CLIENT_TIMEOUT_MS_FIELD_NUMBER: builtins.int
WORKER_TIMEOUT_MS_FIELD_NUMBER: builtins.int
port: builtins.int
"""The port for the dispatcher to bind to. A value of 0 indicates that the
dispatcher may bind to any available port.
@@ -75,6 +76,10 @@ class DispatcherConfig(google.protobuf.message.Message):
heartbeated to the dispatcher. A value of 0 indicates that the timeout
should be left to the runtime.
"""
worker_timeout_ms: builtins.int
"""How long to wait for a worker to heartbeat before considering it missing.
A value of 0 indicates that the timeout should be left to the runtime.
"""
def __init__(
self,
*,
@@ -87,8 +92,9 @@ class DispatcherConfig(google.protobuf.message.Message):
job_gc_check_interval_ms: builtins.int | None = ...,
job_gc_timeout_ms: builtins.int | None = ...,
client_timeout_ms: builtins.int | None = ...,
worker_timeout_ms: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["client_timeout_ms", b"client_timeout_ms", "deployment_mode", b"deployment_mode", "fault_tolerant_mode", b"fault_tolerant_mode", "job_gc_check_interval_ms", b"job_gc_check_interval_ms", "job_gc_timeout_ms", b"job_gc_timeout_ms", "port", b"port", "protocol", b"protocol", "work_dir", b"work_dir", "worker_addresses", b"worker_addresses"]) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["client_timeout_ms", b"client_timeout_ms", "deployment_mode", b"deployment_mode", "fault_tolerant_mode", b"fault_tolerant_mode", "job_gc_check_interval_ms", b"job_gc_check_interval_ms", "job_gc_timeout_ms", b"job_gc_timeout_ms", "port", b"port", "protocol", b"protocol", "work_dir", b"work_dir", "worker_addresses", b"worker_addresses", "worker_timeout_ms", b"worker_timeout_ms"]) -> None: ...
global___DispatcherConfig = DispatcherConfig

View File

@@ -124,3 +124,28 @@ class SnapshotTensorMetadata(google.protobuf.message.Message):
def ClearField(self, field_name: typing_extensions.Literal["tensor_metadata", b"tensor_metadata"]) -> None: ...
global___SnapshotTensorMetadata = SnapshotTensorMetadata
@typing_extensions.final
class DistributedSnapshotMetadata(google.protobuf.message.Message):
"""Metadata for a `tf.data.Dataset` distributed snapshot."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ELEMENT_SPEC_FIELD_NUMBER: builtins.int
COMPRESSION_FIELD_NUMBER: builtins.int
element_spec: builtins.bytes
"""The element spec of the snapshotted dataset."""
compression: builtins.str
"""Whether and how to compress the snapshot. Supported values are defined in
`tsl::io::compression`. In particular, an empty string specifies not to
compress.
"""
def __init__(
self,
*,
element_spec: builtins.bytes | None = ...,
compression: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["compression", b"compression", "element_spec", b"element_spec"]) -> None: ...
global___DistributedSnapshotMetadata = DistributedSnapshotMetadata

View File

@@ -331,7 +331,7 @@ class TypeSpecProto(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _TypeSpecClassEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TypeSpecProto._TypeSpecClass.ValueType], builtins.type): # noqa: F821
class _TypeSpecClassEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TypeSpecProto._TypeSpecClass.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNKNOWN: TypeSpecProto._TypeSpecClass.ValueType # 0
SPARSE_TENSOR_SPEC: TypeSpecProto._TypeSpecClass.ValueType # 1

View File

@@ -41,7 +41,7 @@ class BundleHeaderProto(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _EndiannessEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BundleHeaderProto._Endianness.ValueType], builtins.type): # noqa: F821
class _EndiannessEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[BundleHeaderProto._Endianness.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
LITTLE: BundleHeaderProto._Endianness.ValueType # 0
BIG: BundleHeaderProto._Endianness.ValueType # 1

View File

@@ -33,7 +33,7 @@ class CompilationResultProto(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ErrorCodeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[CompilationResultProto._ErrorCode.ValueType], builtins.type): # noqa: F821
class _ErrorCodeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[CompilationResultProto._ErrorCode.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNKNOWN: CompilationResultProto._ErrorCode.ValueType # 0
OUT_OF_MEMORY: CompilationResultProto._ErrorCode.ValueType # 1

View File

@@ -735,7 +735,7 @@ class GradientAccumulationStatus(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _StatusEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[GradientAccumulationStatus._Status.ValueType], builtins.type): # noqa: F821
class _StatusEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[GradientAccumulationStatus._Status.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNSPECIFIED: GradientAccumulationStatus._Status.ValueType # 0
ENABLED: GradientAccumulationStatus._Status.ValueType # 1
@@ -802,7 +802,7 @@ class LowDimensionalPackingStatus(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _StatusEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[LowDimensionalPackingStatus._Status.ValueType], builtins.type): # noqa: F821
class _StatusEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[LowDimensionalPackingStatus._Status.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNSPECIFIED: LowDimensionalPackingStatus._Status.ValueType # 0
ENABLED: LowDimensionalPackingStatus._Status.ValueType # 1
@@ -843,20 +843,24 @@ class HotIdReplicationConfiguration(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _StatusEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[HotIdReplicationConfiguration._Status.ValueType], builtins.type): # noqa: F821
class _StatusEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[HotIdReplicationConfiguration._Status.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNSPECIFIED: HotIdReplicationConfiguration._Status.ValueType # 0
ENABLED: HotIdReplicationConfiguration._Status.ValueType # 1
DISABLED: HotIdReplicationConfiguration._Status.ValueType # 2
MIGRATION_ONLY: HotIdReplicationConfiguration._Status.ValueType # 3
class Status(_Status, metaclass=_StatusEnumTypeWrapper):
"""Whether to enable or disable hot ID optimization.
If UNSPECIFIED (default), hot ID optimization is DISABLED.
If set to UNSPECIFIED (default), hot ID optimization is DISABLED.
If set to ENABLED, hot ID replication is turned ON.
If set to MIGRATION_ONLY, hot ID migration is turned ON.
"""
UNSPECIFIED: HotIdReplicationConfiguration.Status.ValueType # 0
ENABLED: HotIdReplicationConfiguration.Status.ValueType # 1
DISABLED: HotIdReplicationConfiguration.Status.ValueType # 2
MIGRATION_ONLY: HotIdReplicationConfiguration.Status.ValueType # 3
STATUS_FIELD_NUMBER: builtins.int
status: global___HotIdReplicationConfiguration.Status.ValueType

View File

@@ -28,7 +28,7 @@ class TPUHardwareFeature(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _EmbeddingFeatureEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TPUHardwareFeature._EmbeddingFeature.ValueType], builtins.type): # noqa: F821
class _EmbeddingFeatureEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TPUHardwareFeature._EmbeddingFeature.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNSUPPORTED: TPUHardwareFeature._EmbeddingFeature.ValueType # 0
"""No embedding lookup accelerator available on the tpu."""

View File

@@ -27,7 +27,7 @@ class TPUEmbeddingConfiguration(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ModeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TPUEmbeddingConfiguration._Mode.ValueType], builtins.type): # noqa: F821
class _ModeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TPUEmbeddingConfiguration._Mode.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNSPECIFIED: TPUEmbeddingConfiguration._Mode.ValueType # 0
INFERENCE: TPUEmbeddingConfiguration._Mode.ValueType # 1
@@ -48,7 +48,7 @@ class TPUEmbeddingConfiguration(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ShardingStrategyEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TPUEmbeddingConfiguration._ShardingStrategy.ValueType], builtins.type): # noqa: F821
class _ShardingStrategyEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TPUEmbeddingConfiguration._ShardingStrategy.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DIV_DEFAULT: TPUEmbeddingConfiguration._ShardingStrategy.ValueType # 0
MOD: TPUEmbeddingConfiguration._ShardingStrategy.ValueType # 1

View File

@@ -26,7 +26,7 @@ class VerifierConfig(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ToggleEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[VerifierConfig._Toggle.ValueType], builtins.type): # noqa: F821
class _ToggleEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[VerifierConfig._Toggle.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT: VerifierConfig._Toggle.ValueType # 0
ON: VerifierConfig._Toggle.ValueType # 1

View File

@@ -86,6 +86,7 @@ class Event(google.protobuf.message.Message):
SESSION_LOG_FIELD_NUMBER: builtins.int
TAGGED_RUN_METADATA_FIELD_NUMBER: builtins.int
META_GRAPH_DEF_FIELD_NUMBER: builtins.int
SOURCE_METADATA_FIELD_NUMBER: builtins.int
wall_time: builtins.float
"""Timestamp of the event."""
step: builtins.int
@@ -115,6 +116,11 @@ class Event(google.protobuf.message.Message):
"""The metadata returned by running a session.run() call."""
meta_graph_def: builtins.bytes
"""An encoded version of a MetaGraphDef."""
@property
def source_metadata(self) -> global___SourceMetadata:
"""Information of the source that writes the events, this is only logged in
the very first event along with the `file_version` field.
"""
def __init__(
self,
*,
@@ -127,13 +133,34 @@ class Event(google.protobuf.message.Message):
session_log: global___SessionLog | None = ...,
tagged_run_metadata: global___TaggedRunMetadata | None = ...,
meta_graph_def: builtins.bytes | None = ...,
source_metadata: global___SourceMetadata | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["file_version", b"file_version", "graph_def", b"graph_def", "log_message", b"log_message", "meta_graph_def", b"meta_graph_def", "session_log", b"session_log", "summary", b"summary", "tagged_run_metadata", b"tagged_run_metadata", "what", b"what"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["file_version", b"file_version", "graph_def", b"graph_def", "log_message", b"log_message", "meta_graph_def", b"meta_graph_def", "session_log", b"session_log", "step", b"step", "summary", b"summary", "tagged_run_metadata", b"tagged_run_metadata", "wall_time", b"wall_time", "what", b"what"]) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["file_version", b"file_version", "graph_def", b"graph_def", "log_message", b"log_message", "meta_graph_def", b"meta_graph_def", "session_log", b"session_log", "source_metadata", b"source_metadata", "summary", b"summary", "tagged_run_metadata", b"tagged_run_metadata", "what", b"what"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["file_version", b"file_version", "graph_def", b"graph_def", "log_message", b"log_message", "meta_graph_def", b"meta_graph_def", "session_log", b"session_log", "source_metadata", b"source_metadata", "step", b"step", "summary", b"summary", "tagged_run_metadata", b"tagged_run_metadata", "wall_time", b"wall_time", "what", b"what"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["what", b"what"]) -> typing_extensions.Literal["file_version", "graph_def", "summary", "log_message", "session_log", "tagged_run_metadata", "meta_graph_def"] | None: ...
global___Event = Event
@typing_extensions.final
class SourceMetadata(google.protobuf.message.Message):
"""Holds the information of the source that writes the events."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
WRITER_FIELD_NUMBER: builtins.int
writer: builtins.str
"""Low level name of the summary writer, such as
`tensorflow.core.util.events_writer`.
"""
def __init__(
self,
*,
writer: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["writer", b"writer"]) -> None: ...
global___SourceMetadata = SourceMetadata
@typing_extensions.final
class LogMessage(google.protobuf.message.Message):
"""Protocol buffer used for logging messages to the events file.
@@ -148,7 +175,7 @@ class LogMessage(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _LevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[LogMessage._Level.ValueType], builtins.type): # noqa: F821
class _LevelEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[LogMessage._Level.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNKNOWN: LogMessage._Level.ValueType # 0
DEBUGGING: LogMessage._Level.ValueType # 10
@@ -199,7 +226,7 @@ class SessionLog(google.protobuf.message.Message):
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _SessionStatusEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[SessionLog._SessionStatus.ValueType], builtins.type): # noqa: F821
class _SessionStatusEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[SessionLog._SessionStatus.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
STATUS_UNSPECIFIED: SessionLog._SessionStatus.ValueType # 0
START: SessionLog._SessionStatus.ValueType # 1

View File

@@ -2,574 +2,22 @@
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
Protocol messages for describing the results of benchmarks and unit tests."""
import builtins
import collections.abc
import google.protobuf.any_pb2
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.message
import google.protobuf.wrappers_pb2
import sys
import typing
if sys.version_info >= (3, 10):
import typing as typing_extensions
else:
import typing_extensions
from tensorflow.tsl.protobuf.test_log_pb2 import (
AvailableDeviceInfo as AvailableDeviceInfo,
BenchmarkEntries as BenchmarkEntries,
BenchmarkEntry as BenchmarkEntry,
BuildConfiguration as BuildConfiguration,
CPUInfo as CPUInfo,
CommitId as CommitId,
EntryValue as EntryValue,
GPUInfo as GPUInfo,
MachineConfiguration as MachineConfiguration,
MemoryInfo as MemoryInfo,
MetricEntry as MetricEntry,
PlatformInfo as PlatformInfo,
RunConfiguration as RunConfiguration,
TestResults as TestResults,
)
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class EntryValue(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DOUBLE_VALUE_FIELD_NUMBER: builtins.int
STRING_VALUE_FIELD_NUMBER: builtins.int
double_value: builtins.float
string_value: builtins.str
def __init__(
self,
*,
double_value: builtins.float | None = ...,
string_value: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["double_value", b"double_value", "kind", b"kind", "string_value", b"string_value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["double_value", b"double_value", "kind", b"kind", "string_value", b"string_value"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["kind", b"kind"]) -> typing_extensions.Literal["double_value", "string_value"] | None: ...
global___EntryValue = EntryValue
@typing_extensions.final
class MetricEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
MIN_VALUE_FIELD_NUMBER: builtins.int
MAX_VALUE_FIELD_NUMBER: builtins.int
name: builtins.str
"""Metric name"""
value: builtins.float
"""Metric value"""
@property
def min_value(self) -> google.protobuf.wrappers_pb2.DoubleValue:
"""The minimum acceptable value for the metric if specified"""
@property
def max_value(self) -> google.protobuf.wrappers_pb2.DoubleValue:
"""The maximum acceptable value for the metric if specified"""
def __init__(
self,
*,
name: builtins.str | None = ...,
value: builtins.float | None = ...,
min_value: google.protobuf.wrappers_pb2.DoubleValue | None = ...,
max_value: google.protobuf.wrappers_pb2.DoubleValue | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["max_value", b"max_value", "min_value", b"min_value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["max_value", b"max_value", "min_value", b"min_value", "name", b"name", "value", b"value"]) -> None: ...
global___MetricEntry = MetricEntry
@typing_extensions.final
class BenchmarkEntry(google.protobuf.message.Message):
"""Each unit test or benchmark in a test or benchmark run provides
some set of information. Here we provide some reasonable keys
one would expect to see, with optional key/value pairs for things
we haven't considered.
This BenchmarkEntry should be emitted by each unit test or benchmark
reporter.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class ExtrasEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.str
@property
def value(self) -> global___EntryValue: ...
def __init__(
self,
*,
key: builtins.str | None = ...,
value: global___EntryValue | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["value", b"value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
NAME_FIELD_NUMBER: builtins.int
ITERS_FIELD_NUMBER: builtins.int
CPU_TIME_FIELD_NUMBER: builtins.int
WALL_TIME_FIELD_NUMBER: builtins.int
THROUGHPUT_FIELD_NUMBER: builtins.int
EXTRAS_FIELD_NUMBER: builtins.int
METRICS_FIELD_NUMBER: builtins.int
name: builtins.str
"""The name of the specific benchmark or test
(e.g. BM_AdjustContrast_gpu_B_W_H)
"""
iters: builtins.int
"""If a benchmark, how many iterations it was run for"""
cpu_time: builtins.float
"""Total cpu time used for all iterations (in seconds)"""
wall_time: builtins.float
"""Total wall time used for all iterations (in seconds)"""
throughput: builtins.float
"""Throughput (in MB/s)"""
@property
def extras(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___EntryValue]:
"""Generic map from result key to value."""
@property
def metrics(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___MetricEntry]:
"""Metric name, value and expected range. This can include accuracy metrics
typically used to determine whether the accuracy test has passed
"""
def __init__(
self,
*,
name: builtins.str | None = ...,
iters: builtins.int | None = ...,
cpu_time: builtins.float | None = ...,
wall_time: builtins.float | None = ...,
throughput: builtins.float | None = ...,
extras: collections.abc.Mapping[builtins.str, global___EntryValue] | None = ...,
metrics: collections.abc.Iterable[global___MetricEntry] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["cpu_time", b"cpu_time", "extras", b"extras", "iters", b"iters", "metrics", b"metrics", "name", b"name", "throughput", b"throughput", "wall_time", b"wall_time"]) -> None: ...
global___BenchmarkEntry = BenchmarkEntry
@typing_extensions.final
class BenchmarkEntries(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ENTRY_FIELD_NUMBER: builtins.int
@property
def entry(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BenchmarkEntry]: ...
def __init__(
self,
*,
entry: collections.abc.Iterable[global___BenchmarkEntry] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["entry", b"entry"]) -> None: ...
global___BenchmarkEntries = BenchmarkEntries
@typing_extensions.final
class BuildConfiguration(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
MODE_FIELD_NUMBER: builtins.int
CC_FLAGS_FIELD_NUMBER: builtins.int
OPTS_FIELD_NUMBER: builtins.int
mode: builtins.str
"""opt, dbg, etc"""
@property
def cc_flags(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
"""CC compiler flags, if known"""
@property
def opts(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
"""Bazel compilation options, if known"""
def __init__(
self,
*,
mode: builtins.str | None = ...,
cc_flags: collections.abc.Iterable[builtins.str] | None = ...,
opts: collections.abc.Iterable[builtins.str] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["cc_flags", b"cc_flags", "mode", b"mode", "opts", b"opts"]) -> None: ...
global___BuildConfiguration = BuildConfiguration
@typing_extensions.final
class CommitId(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
CHANGELIST_FIELD_NUMBER: builtins.int
HASH_FIELD_NUMBER: builtins.int
SNAPSHOT_FIELD_NUMBER: builtins.int
PENDING_CHANGELIST_FIELD_NUMBER: builtins.int
changelist: builtins.int
"""Submitted changelist."""
hash: builtins.str
snapshot: builtins.str
"""Hash of intermediate change between hash/changelist and what was tested.
Not used if the build is from a commit without modifications.
"""
pending_changelist: builtins.int
"""Changelist tested if the change list is not already submitted."""
def __init__(
self,
*,
changelist: builtins.int | None = ...,
hash: builtins.str | None = ...,
snapshot: builtins.str | None = ...,
pending_changelist: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["changelist", b"changelist", "hash", b"hash", "kind", b"kind"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["changelist", b"changelist", "hash", b"hash", "kind", b"kind", "pending_changelist", b"pending_changelist", "snapshot", b"snapshot"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["kind", b"kind"]) -> typing_extensions.Literal["changelist", "hash"] | None: ...
global___CommitId = CommitId
@typing_extensions.final
class CPUInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class CacheSizeEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.str
value: builtins.int
def __init__(
self,
*,
key: builtins.str | None = ...,
value: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
NUM_CORES_FIELD_NUMBER: builtins.int
NUM_CORES_ALLOWED_FIELD_NUMBER: builtins.int
MHZ_PER_CPU_FIELD_NUMBER: builtins.int
CPU_INFO_FIELD_NUMBER: builtins.int
CPU_GOVERNOR_FIELD_NUMBER: builtins.int
CACHE_SIZE_FIELD_NUMBER: builtins.int
num_cores: builtins.int
num_cores_allowed: builtins.int
mhz_per_cpu: builtins.float
"""How fast are these cpus?"""
cpu_info: builtins.str
"""Additional cpu information. For example,
Intel Ivybridge with HyperThreading (24 cores) dL1:32KB dL2:256KB dL3:30MB
"""
cpu_governor: builtins.str
"""What kind of cpu scaling is enabled on the host.
Examples include "performance", "ondemand", "conservative", "mixed".
"""
@property
def cache_size(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.int]:
"""Cache sizes (in bytes), e.g. "L2": 262144 (for 256KB)"""
def __init__(
self,
*,
num_cores: builtins.int | None = ...,
num_cores_allowed: builtins.int | None = ...,
mhz_per_cpu: builtins.float | None = ...,
cpu_info: builtins.str | None = ...,
cpu_governor: builtins.str | None = ...,
cache_size: collections.abc.Mapping[builtins.str, builtins.int] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["cache_size", b"cache_size", "cpu_governor", b"cpu_governor", "cpu_info", b"cpu_info", "mhz_per_cpu", b"mhz_per_cpu", "num_cores", b"num_cores", "num_cores_allowed", b"num_cores_allowed"]) -> None: ...
global___CPUInfo = CPUInfo
@typing_extensions.final
class MemoryInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
TOTAL_FIELD_NUMBER: builtins.int
AVAILABLE_FIELD_NUMBER: builtins.int
total: builtins.int
"""Total virtual memory in bytes"""
available: builtins.int
"""Immediately available memory in bytes"""
def __init__(
self,
*,
total: builtins.int | None = ...,
available: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["available", b"available", "total", b"total"]) -> None: ...
global___MemoryInfo = MemoryInfo
@typing_extensions.final
class GPUInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
MODEL_FIELD_NUMBER: builtins.int
UUID_FIELD_NUMBER: builtins.int
BUS_ID_FIELD_NUMBER: builtins.int
model: builtins.str
"""e.g. "Tesla K40c" """
uuid: builtins.str
"""Final entry in output of "nvidia-smi -L" """
bus_id: builtins.str
"""e.g. "0000:04:00.0" """
def __init__(
self,
*,
model: builtins.str | None = ...,
uuid: builtins.str | None = ...,
bus_id: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bus_id", b"bus_id", "model", b"model", "uuid", b"uuid"]) -> None: ...
global___GPUInfo = GPUInfo
@typing_extensions.final
class PlatformInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
BITS_FIELD_NUMBER: builtins.int
LINKAGE_FIELD_NUMBER: builtins.int
MACHINE_FIELD_NUMBER: builtins.int
RELEASE_FIELD_NUMBER: builtins.int
SYSTEM_FIELD_NUMBER: builtins.int
VERSION_FIELD_NUMBER: builtins.int
bits: builtins.str
"""e.g. '64bit'"""
linkage: builtins.str
"""e.g. 'ELF'"""
machine: builtins.str
"""e.g. 'i386'"""
release: builtins.str
"""e.g. '3.13.0-76-generic'"""
system: builtins.str
"""e.g. 'Linux'"""
version: builtins.str
"""e.g. '#120-Ubuntu SMP Mon Jan 18 15:59:10 UTC 2016'"""
def __init__(
self,
*,
bits: builtins.str | None = ...,
linkage: builtins.str | None = ...,
machine: builtins.str | None = ...,
release: builtins.str | None = ...,
system: builtins.str | None = ...,
version: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bits", b"bits", "linkage", b"linkage", "machine", b"machine", "release", b"release", "system", b"system", "version", b"version"]) -> None: ...
global___PlatformInfo = PlatformInfo
@typing_extensions.final
class AvailableDeviceInfo(google.protobuf.message.Message):
"""Matches DeviceAttributes"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
TYPE_FIELD_NUMBER: builtins.int
MEMORY_LIMIT_FIELD_NUMBER: builtins.int
PHYSICAL_DESCRIPTION_FIELD_NUMBER: builtins.int
name: builtins.str
"""Device name."""
type: builtins.str
"""Device type, e.g. 'CPU' or 'GPU'."""
memory_limit: builtins.int
"""Memory capacity in bytes."""
physical_description: builtins.str
"""The physical description of this device."""
def __init__(
self,
*,
name: builtins.str | None = ...,
type: builtins.str | None = ...,
memory_limit: builtins.int | None = ...,
physical_description: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["memory_limit", b"memory_limit", "name", b"name", "physical_description", b"physical_description", "type", b"type"]) -> None: ...
global___AvailableDeviceInfo = AvailableDeviceInfo
@typing_extensions.final
class MachineConfiguration(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
HOSTNAME_FIELD_NUMBER: builtins.int
SERIAL_IDENTIFIER_FIELD_NUMBER: builtins.int
PLATFORM_INFO_FIELD_NUMBER: builtins.int
CPU_INFO_FIELD_NUMBER: builtins.int
DEVICE_INFO_FIELD_NUMBER: builtins.int
AVAILABLE_DEVICE_INFO_FIELD_NUMBER: builtins.int
MEMORY_INFO_FIELD_NUMBER: builtins.int
hostname: builtins.str
"""Host name of machine that ran the benchmark."""
serial_identifier: builtins.str
"""Unique serial number of the machine."""
@property
def platform_info(self) -> global___PlatformInfo:
"""Additional platform information."""
@property
def cpu_info(self) -> global___CPUInfo:
"""CPU Information."""
@property
def device_info(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[google.protobuf.any_pb2.Any]:
"""Other devices that are attached and relevant (e.g. GPUInfo)."""
@property
def available_device_info(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___AvailableDeviceInfo]:
"""Devices accessible to the test (e.g. as given by list_local_devices)."""
@property
def memory_info(self) -> global___MemoryInfo: ...
def __init__(
self,
*,
hostname: builtins.str | None = ...,
serial_identifier: builtins.str | None = ...,
platform_info: global___PlatformInfo | None = ...,
cpu_info: global___CPUInfo | None = ...,
device_info: collections.abc.Iterable[google.protobuf.any_pb2.Any] | None = ...,
available_device_info: collections.abc.Iterable[global___AvailableDeviceInfo] | None = ...,
memory_info: global___MemoryInfo | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["cpu_info", b"cpu_info", "memory_info", b"memory_info", "platform_info", b"platform_info"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["available_device_info", b"available_device_info", "cpu_info", b"cpu_info", "device_info", b"device_info", "hostname", b"hostname", "memory_info", b"memory_info", "platform_info", b"platform_info", "serial_identifier", b"serial_identifier"]) -> None: ...
global___MachineConfiguration = MachineConfiguration
@typing_extensions.final
class RunConfiguration(google.protobuf.message.Message):
"""Run-specific items such as arguments to the test / benchmark."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class EnvVarsEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.str
value: builtins.str
def __init__(
self,
*,
key: builtins.str | None = ...,
value: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
ARGUMENT_FIELD_NUMBER: builtins.int
ENV_VARS_FIELD_NUMBER: builtins.int
@property
def argument(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: ...
@property
def env_vars(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]:
"""Environment variables used to run the test/benchmark."""
def __init__(
self,
*,
argument: collections.abc.Iterable[builtins.str] | None = ...,
env_vars: collections.abc.Mapping[builtins.str, builtins.str] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["argument", b"argument", "env_vars", b"env_vars"]) -> None: ...
global___RunConfiguration = RunConfiguration
@typing_extensions.final
class TestResults(google.protobuf.message.Message):
"""The output of one benchmark / test run. Each run contains a list of
tests or benchmarks, stored as BenchmarkEntry messages.
This message should be emitted by the reporter (which runs the
test / BM in a subprocess and then reads the emitted BenchmarkEntry messages;
usually from a serialized json file, finally collecting them along
with additional information about the test run.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _BenchmarkType:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _BenchmarkTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TestResults._BenchmarkType.ValueType], builtins.type): # noqa: F821
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNKNOWN: TestResults._BenchmarkType.ValueType # 0
"""Fallback for protos written before Type was introduced."""
CPP_MICROBENCHMARK: TestResults._BenchmarkType.ValueType # 1
PYTHON_BENCHMARK: TestResults._BenchmarkType.ValueType # 2
ANDROID_BENCHMARK: TestResults._BenchmarkType.ValueType # 3
EDGE_BENCHMARK: TestResults._BenchmarkType.ValueType # 4
IOS_BENCHMARK: TestResults._BenchmarkType.ValueType # 5
class BenchmarkType(_BenchmarkType, metaclass=_BenchmarkTypeEnumTypeWrapper):
"""The type of benchmark."""
UNKNOWN: TestResults.BenchmarkType.ValueType # 0
"""Fallback for protos written before Type was introduced."""
CPP_MICROBENCHMARK: TestResults.BenchmarkType.ValueType # 1
PYTHON_BENCHMARK: TestResults.BenchmarkType.ValueType # 2
ANDROID_BENCHMARK: TestResults.BenchmarkType.ValueType # 3
EDGE_BENCHMARK: TestResults.BenchmarkType.ValueType # 4
IOS_BENCHMARK: TestResults.BenchmarkType.ValueType # 5
TARGET_FIELD_NUMBER: builtins.int
ENTRIES_FIELD_NUMBER: builtins.int
BUILD_CONFIGURATION_FIELD_NUMBER: builtins.int
COMMIT_ID_FIELD_NUMBER: builtins.int
START_TIME_FIELD_NUMBER: builtins.int
RUN_TIME_FIELD_NUMBER: builtins.int
MACHINE_CONFIGURATION_FIELD_NUMBER: builtins.int
RUN_CONFIGURATION_FIELD_NUMBER: builtins.int
NAME_FIELD_NUMBER: builtins.int
BENCHMARK_TYPE_FIELD_NUMBER: builtins.int
RUN_MODE_FIELD_NUMBER: builtins.int
TF_VERSION_FIELD_NUMBER: builtins.int
target: builtins.str
"""The target of the run, e.g.:
//tensorflow/core:kernels_adjust_contrast_op_benchmark_test
"""
@property
def entries(self) -> global___BenchmarkEntries:
"""The list of tests or benchmarks in this run."""
@property
def build_configuration(self) -> global___BuildConfiguration:
"""The configuration of the build (compiled opt? with cuda? any copts?)"""
@property
def commit_id(self) -> global___CommitId:
"""The commit id (git hash or changelist)"""
start_time: builtins.int
"""The time the run started (in seconds of UTC time since Unix epoch)"""
run_time: builtins.float
"""The amount of time the total run took (wall time in seconds)"""
@property
def machine_configuration(self) -> global___MachineConfiguration:
"""Machine-specific parameters (Platform and CPU info)"""
@property
def run_configuration(self) -> global___RunConfiguration:
"""Run-specific parameters (arguments, etc)"""
name: builtins.str
"""Benchmark target identifier."""
benchmark_type: global___TestResults.BenchmarkType.ValueType
run_mode: builtins.str
"""Used for differentiating between continuous and debug builds.
Must be one of:
* cbuild: results from continuous build.
* presubmit: results from oneshot requests.
* culprit: results from culprit finder rerun.
"""
tf_version: builtins.str
"""TensorFlow version this benchmark runs against.
This can be either set to full version or just the major version.
"""
def __init__(
self,
*,
target: builtins.str | None = ...,
entries: global___BenchmarkEntries | None = ...,
build_configuration: global___BuildConfiguration | None = ...,
commit_id: global___CommitId | None = ...,
start_time: builtins.int | None = ...,
run_time: builtins.float | None = ...,
machine_configuration: global___MachineConfiguration | None = ...,
run_configuration: global___RunConfiguration | None = ...,
name: builtins.str | None = ...,
benchmark_type: global___TestResults.BenchmarkType.ValueType | None = ...,
run_mode: builtins.str | None = ...,
tf_version: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["build_configuration", b"build_configuration", "commit_id", b"commit_id", "entries", b"entries", "machine_configuration", b"machine_configuration", "run_configuration", b"run_configuration"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["benchmark_type", b"benchmark_type", "build_configuration", b"build_configuration", "commit_id", b"commit_id", "entries", b"entries", "machine_configuration", b"machine_configuration", "name", b"name", "run_configuration", b"run_configuration", "run_mode", b"run_mode", "run_time", b"run_time", "start_time", b"start_time", "target", b"target", "tf_version", b"tf_version"]) -> None: ...
global___TestResults = TestResults

View File

@@ -0,0 +1,250 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
This file defines protos that store the results of autotuning various
operations.
They are in proto format because we want to log them structured. They offer
tremendous statistical, testing, and debugging value.
"""
import builtins
import collections.abc
import google.protobuf.any_pb2
import google.protobuf.descriptor
import google.protobuf.duration_pb2
import google.protobuf.internal.containers
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.message
import sys
import tensorflow.tsl.protobuf.dnn_pb2
import typing
if sys.version_info >= (3, 10):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class CudnnVersion(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
MAJOR_FIELD_NUMBER: builtins.int
MINOR_FIELD_NUMBER: builtins.int
PATCH_FIELD_NUMBER: builtins.int
major: builtins.int
minor: builtins.int
patch: builtins.int
def __init__(
self,
*,
major: builtins.int | None = ...,
minor: builtins.int | None = ...,
patch: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["major", b"major", "minor", b"minor", "patch", b"patch"]) -> None: ...
global___CudnnVersion = CudnnVersion
@typing_extensions.final
class ComputeCapability(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
MAJOR_FIELD_NUMBER: builtins.int
MINOR_FIELD_NUMBER: builtins.int
major: builtins.int
minor: builtins.int
def __init__(
self,
*,
major: builtins.int | None = ...,
minor: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["major", b"major", "minor", b"minor"]) -> None: ...
global___ComputeCapability = ComputeCapability
@typing_extensions.final
class AutotuneResult(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _FailureKind:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _FailureKindEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[AutotuneResult._FailureKind.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNKNOWN: AutotuneResult._FailureKind.ValueType # 0
REDZONE_MODIFIED: AutotuneResult._FailureKind.ValueType # 1
"""Algorithm wrote memory outside its output buffers."""
WRONG_RESULT: AutotuneResult._FailureKind.ValueType # 2
"""Algorithm gave a different result from a reference algorithm."""
DISQUALIFIED: AutotuneResult._FailureKind.ValueType # 3
"""Algorithm was rejected for failing to run or for known bugs."""
class FailureKind(_FailureKind, metaclass=_FailureKindEnumTypeWrapper): ...
UNKNOWN: AutotuneResult.FailureKind.ValueType # 0
REDZONE_MODIFIED: AutotuneResult.FailureKind.ValueType # 1
"""Algorithm wrote memory outside its output buffers."""
WRONG_RESULT: AutotuneResult.FailureKind.ValueType # 2
"""Algorithm gave a different result from a reference algorithm."""
DISQUALIFIED: AutotuneResult.FailureKind.ValueType # 3
"""Algorithm was rejected for failing to run or for known bugs."""
@typing_extensions.final
class FailureResult(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KIND_FIELD_NUMBER: builtins.int
MSG_FIELD_NUMBER: builtins.int
REFERENCE_CONV_FIELD_NUMBER: builtins.int
REFERENCE_GEMM_FIELD_NUMBER: builtins.int
REFERENCE_CUDA_CONV_PLAN_FIELD_NUMBER: builtins.int
REFERENCE_ALGORITHM_FIELD_NUMBER: builtins.int
BUFFER_ADDRESS_FIELD_NUMBER: builtins.int
kind: global___AutotuneResult.FailureKind.ValueType
msg: builtins.str
@property
def reference_conv(self) -> global___AutotuneResult.ConvKey: ...
@property
def reference_gemm(self) -> global___AutotuneResult.GemmKey: ...
@property
def reference_cuda_conv_plan(self) -> global___AutotuneResult.CudaConvPlanKey: ...
@property
def reference_algorithm(self) -> tensorflow.tsl.protobuf.dnn_pb2.AlgorithmProto: ...
buffer_address: builtins.int
def __init__(
self,
*,
kind: global___AutotuneResult.FailureKind.ValueType | None = ...,
msg: builtins.str | None = ...,
reference_conv: global___AutotuneResult.ConvKey | None = ...,
reference_gemm: global___AutotuneResult.GemmKey | None = ...,
reference_cuda_conv_plan: global___AutotuneResult.CudaConvPlanKey | None = ...,
reference_algorithm: tensorflow.tsl.protobuf.dnn_pb2.AlgorithmProto | None = ...,
buffer_address: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["key", b"key", "reference_algorithm", b"reference_algorithm", "reference_conv", b"reference_conv", "reference_cuda_conv_plan", b"reference_cuda_conv_plan", "reference_gemm", b"reference_gemm"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["buffer_address", b"buffer_address", "key", b"key", "kind", b"kind", "msg", b"msg", "reference_algorithm", b"reference_algorithm", "reference_conv", b"reference_conv", "reference_cuda_conv_plan", b"reference_cuda_conv_plan", "reference_gemm", b"reference_gemm"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["key", b"key"]) -> typing_extensions.Literal["reference_conv", "reference_gemm", "reference_cuda_conv_plan", "reference_algorithm"] | None: ...
@typing_extensions.final
class ConvKey(google.protobuf.message.Message):
"""Legacy and unused in new data; superseded by AlgorithmProto."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ALGORITHM_FIELD_NUMBER: builtins.int
TENSOR_OPS_ENABLED_FIELD_NUMBER: builtins.int
algorithm: builtins.int
tensor_ops_enabled: builtins.bool
def __init__(
self,
*,
algorithm: builtins.int | None = ...,
tensor_ops_enabled: builtins.bool | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["algorithm", b"algorithm", "tensor_ops_enabled", b"tensor_ops_enabled"]) -> None: ...
@typing_extensions.final
class GemmKey(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ALGORITHM_FIELD_NUMBER: builtins.int
algorithm: builtins.int
def __init__(
self,
*,
algorithm: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["algorithm", b"algorithm"]) -> None: ...
@typing_extensions.final
class CudaConvPlanKey(google.protobuf.message.Message):
"""Legacy and unused in new data; superseded by AlgorithmProto."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
EXEC_PLAN_ID_FIELD_NUMBER: builtins.int
exec_plan_id: builtins.str
def __init__(
self,
*,
exec_plan_id: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["exec_plan_id", b"exec_plan_id"]) -> None: ...
SCRATCH_BYTES_FIELD_NUMBER: builtins.int
RUN_TIME_FIELD_NUMBER: builtins.int
FAILURE_FIELD_NUMBER: builtins.int
CONV_FIELD_NUMBER: builtins.int
GEMM_FIELD_NUMBER: builtins.int
CUDA_CONV_PLAN_FIELD_NUMBER: builtins.int
ALGORITHM_FIELD_NUMBER: builtins.int
scratch_bytes: builtins.int
@property
def run_time(self) -> google.protobuf.duration_pb2.Duration: ...
@property
def failure(self) -> global___AutotuneResult.FailureResult: ...
@property
def conv(self) -> global___AutotuneResult.ConvKey: ...
@property
def gemm(self) -> global___AutotuneResult.GemmKey: ...
@property
def cuda_conv_plan(self) -> global___AutotuneResult.CudaConvPlanKey: ...
@property
def algorithm(self) -> tensorflow.tsl.protobuf.dnn_pb2.AlgorithmProto: ...
def __init__(
self,
*,
scratch_bytes: builtins.int | None = ...,
run_time: google.protobuf.duration_pb2.Duration | None = ...,
failure: global___AutotuneResult.FailureResult | None = ...,
conv: global___AutotuneResult.ConvKey | None = ...,
gemm: global___AutotuneResult.GemmKey | None = ...,
cuda_conv_plan: global___AutotuneResult.CudaConvPlanKey | None = ...,
algorithm: tensorflow.tsl.protobuf.dnn_pb2.AlgorithmProto | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["algorithm", b"algorithm", "conv", b"conv", "cuda_conv_plan", b"cuda_conv_plan", "failure", b"failure", "gemm", b"gemm", "key", b"key", "run_time", b"run_time"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["algorithm", b"algorithm", "conv", b"conv", "cuda_conv_plan", b"cuda_conv_plan", "failure", b"failure", "gemm", b"gemm", "key", b"key", "run_time", b"run_time", "scratch_bytes", b"scratch_bytes"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["key", b"key"]) -> typing_extensions.Literal["conv", "gemm", "cuda_conv_plan", "algorithm"] | None: ...
global___AutotuneResult = AutotuneResult
@typing_extensions.final
class AutotuningLog(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
INSTR_FIELD_NUMBER: builtins.int
RESULTS_FIELD_NUMBER: builtins.int
CUDNN_VERSION_FIELD_NUMBER: builtins.int
COMPUTE_CAPABILITY_FIELD_NUMBER: builtins.int
DEVICE_PCI_BUS_ID_FIELD_NUMBER: builtins.int
BLAS_VERSION_FIELD_NUMBER: builtins.int
@property
def instr(self) -> google.protobuf.any_pb2.Any: ...
@property
def results(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___AutotuneResult]:
"""Records all auto-tuning results per algorithm."""
@property
def cudnn_version(self) -> global___CudnnVersion: ...
@property
def compute_capability(self) -> global___ComputeCapability: ...
device_pci_bus_id: builtins.str
"""stream_executor::DeviceDescription::pci_bus_id."""
blas_version: builtins.str
def __init__(
self,
*,
instr: google.protobuf.any_pb2.Any | None = ...,
results: collections.abc.Iterable[global___AutotuneResult] | None = ...,
cudnn_version: global___CudnnVersion | None = ...,
compute_capability: global___ComputeCapability | None = ...,
device_pci_bus_id: builtins.str | None = ...,
blas_version: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["compute_capability", b"compute_capability", "cudnn_version", b"cudnn_version", "instr", b"instr"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["blas_version", b"blas_version", "compute_capability", b"compute_capability", "cudnn_version", b"cudnn_version", "device_pci_bus_id", b"device_pci_bus_id", "instr", b"instr", "results", b"results"]) -> None: ...
global___AutotuningLog = AutotuningLog

View File

@@ -0,0 +1,162 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class MemAllocatorStats(google.protobuf.message.Message):
"""Some of the data from AllocatorStats"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NUM_ALLOCS_FIELD_NUMBER: builtins.int
BYTES_IN_USE_FIELD_NUMBER: builtins.int
PEAK_BYTES_IN_USE_FIELD_NUMBER: builtins.int
LARGEST_ALLOC_SIZE_FIELD_NUMBER: builtins.int
FRAGMENTATION_METRIC_FIELD_NUMBER: builtins.int
num_allocs: builtins.int
bytes_in_use: builtins.int
peak_bytes_in_use: builtins.int
largest_alloc_size: builtins.int
fragmentation_metric: builtins.float
def __init__(
self,
*,
num_allocs: builtins.int | None = ...,
bytes_in_use: builtins.int | None = ...,
peak_bytes_in_use: builtins.int | None = ...,
largest_alloc_size: builtins.int | None = ...,
fragmentation_metric: builtins.float | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bytes_in_use", b"bytes_in_use", "fragmentation_metric", b"fragmentation_metric", "largest_alloc_size", b"largest_alloc_size", "num_allocs", b"num_allocs", "peak_bytes_in_use", b"peak_bytes_in_use"]) -> None: ...
global___MemAllocatorStats = MemAllocatorStats
@typing_extensions.final
class MemChunk(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ADDRESS_FIELD_NUMBER: builtins.int
SIZE_FIELD_NUMBER: builtins.int
REQUESTED_SIZE_FIELD_NUMBER: builtins.int
BIN_FIELD_NUMBER: builtins.int
OP_NAME_FIELD_NUMBER: builtins.int
FREED_AT_COUNT_FIELD_NUMBER: builtins.int
ACTION_COUNT_FIELD_NUMBER: builtins.int
IN_USE_FIELD_NUMBER: builtins.int
STEP_ID_FIELD_NUMBER: builtins.int
address: builtins.int
size: builtins.int
requested_size: builtins.int
bin: builtins.int
op_name: builtins.str
freed_at_count: builtins.int
action_count: builtins.int
in_use: builtins.bool
step_id: builtins.int
def __init__(
self,
*,
address: builtins.int | None = ...,
size: builtins.int | None = ...,
requested_size: builtins.int | None = ...,
bin: builtins.int | None = ...,
op_name: builtins.str | None = ...,
freed_at_count: builtins.int | None = ...,
action_count: builtins.int | None = ...,
in_use: builtins.bool | None = ...,
step_id: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["action_count", b"action_count", "address", b"address", "bin", b"bin", "freed_at_count", b"freed_at_count", "in_use", b"in_use", "op_name", b"op_name", "requested_size", b"requested_size", "size", b"size", "step_id", b"step_id"]) -> None: ...
global___MemChunk = MemChunk
@typing_extensions.final
class BinSummary(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
BIN_FIELD_NUMBER: builtins.int
TOTAL_BYTES_IN_USE_FIELD_NUMBER: builtins.int
TOTAL_BYTES_IN_BIN_FIELD_NUMBER: builtins.int
TOTAL_CHUNKS_IN_USE_FIELD_NUMBER: builtins.int
TOTAL_CHUNKS_IN_BIN_FIELD_NUMBER: builtins.int
bin: builtins.int
total_bytes_in_use: builtins.int
total_bytes_in_bin: builtins.int
total_chunks_in_use: builtins.int
total_chunks_in_bin: builtins.int
def __init__(
self,
*,
bin: builtins.int | None = ...,
total_bytes_in_use: builtins.int | None = ...,
total_bytes_in_bin: builtins.int | None = ...,
total_chunks_in_use: builtins.int | None = ...,
total_chunks_in_bin: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bin", b"bin", "total_bytes_in_bin", b"total_bytes_in_bin", "total_bytes_in_use", b"total_bytes_in_use", "total_chunks_in_bin", b"total_chunks_in_bin", "total_chunks_in_use", b"total_chunks_in_use"]) -> None: ...
global___BinSummary = BinSummary
@typing_extensions.final
class SnapShot(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ACTION_COUNT_FIELD_NUMBER: builtins.int
SIZE_FIELD_NUMBER: builtins.int
action_count: builtins.int
size: builtins.int
def __init__(
self,
*,
action_count: builtins.int | None = ...,
size: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["action_count", b"action_count", "size", b"size"]) -> None: ...
global___SnapShot = SnapShot
@typing_extensions.final
class MemoryDump(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ALLOCATOR_NAME_FIELD_NUMBER: builtins.int
BIN_SUMMARY_FIELD_NUMBER: builtins.int
CHUNK_FIELD_NUMBER: builtins.int
SNAP_SHOT_FIELD_NUMBER: builtins.int
STATS_FIELD_NUMBER: builtins.int
allocator_name: builtins.str
@property
def bin_summary(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BinSummary]: ...
@property
def chunk(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___MemChunk]: ...
@property
def snap_shot(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___SnapShot]: ...
@property
def stats(self) -> global___MemAllocatorStats: ...
def __init__(
self,
*,
allocator_name: builtins.str | None = ...,
bin_summary: collections.abc.Iterable[global___BinSummary] | None = ...,
chunk: collections.abc.Iterable[global___MemChunk] | None = ...,
snap_shot: collections.abc.Iterable[global___SnapShot] | None = ...,
stats: global___MemAllocatorStats | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["stats", b"stats"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["allocator_name", b"allocator_name", "bin_summary", b"bin_summary", "chunk", b"chunk", "snap_shot", b"snap_shot", "stats", b"stats"]) -> None: ...
global___MemoryDump = MemoryDump

View File

@@ -0,0 +1,112 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class CoordinatedJob(google.protobuf.message.Message):
"""Represents a job type and the number of tasks under this job.
For example, ("worker", 20) implies that there will be 20 worker tasks.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
NUM_TASKS_FIELD_NUMBER: builtins.int
name: builtins.str
num_tasks: builtins.int
def __init__(
self,
*,
name: builtins.str | None = ...,
num_tasks: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["name", b"name", "num_tasks", b"num_tasks"]) -> None: ...
global___CoordinatedJob = CoordinatedJob
@typing_extensions.final
class CoordinationServiceConfig(google.protobuf.message.Message):
"""Coordination service configuration parameters.
The system picks appropriate values for fields that are not set.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
SERVICE_TYPE_FIELD_NUMBER: builtins.int
SERVICE_LEADER_FIELD_NUMBER: builtins.int
ENABLE_HEALTH_CHECK_FIELD_NUMBER: builtins.int
CLUSTER_REGISTER_TIMEOUT_IN_MS_FIELD_NUMBER: builtins.int
HEARTBEAT_TIMEOUT_IN_MS_FIELD_NUMBER: builtins.int
COORDINATED_JOB_LIST_FIELD_NUMBER: builtins.int
SHUTDOWN_BARRIER_TIMEOUT_IN_MS_FIELD_NUMBER: builtins.int
AGENT_DESTRUCTION_WITHOUT_SHUTDOWN_FIELD_NUMBER: builtins.int
RECOVERABLE_JOBS_FIELD_NUMBER: builtins.int
service_type: builtins.str
"""Type of coordination service implementation to enable.
For example, setting the service type as "standalone" starts a service
instance on the leader task to provide the coordination services such as
heartbeats and consistent key-value store.
"""
service_leader: builtins.str
"""Address where the coordination service instance is hosted."""
enable_health_check: builtins.bool
"""Whether to enable the health check mechanism."""
cluster_register_timeout_in_ms: builtins.int
"""Maximum wait time for all members in the cluster to be registered."""
heartbeat_timeout_in_ms: builtins.int
"""Heartbeat timeout, if a task does not record heartbeat in this time
window, it will be considered disconnected.
Note: This is also used as a grace period to accept any heartbeats after
the agent has disconnected, to account for the lag time between the service
recording the state change and the agent stopping heartbeats.
"""
@property
def coordinated_job_list(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___CoordinatedJob]: ...
shutdown_barrier_timeout_in_ms: builtins.int
"""Denotes how long to wait for all coordination agents to reach the barriers
(after the first shutdown request) before disconnecting together. If
set to 0, no barrier is imposed upon shutdown and each worker can
disconnect individually.
"""
agent_destruction_without_shutdown: builtins.bool
"""If set, agents do not make an explicit Shutdown() call. Service will only
find out about the disconnecte agent via stale heartbeats. Used for
testing.
"""
@property
def recoverable_jobs(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
"""The list of jobs which are recoverable. If a task in this list fails,
it will not propagate error to other tasks.
If empty, no jobs will be recoverable and every task failure will cause
error propagation to other tasks.
"""
def __init__(
self,
*,
service_type: builtins.str | None = ...,
service_leader: builtins.str | None = ...,
enable_health_check: builtins.bool | None = ...,
cluster_register_timeout_in_ms: builtins.int | None = ...,
heartbeat_timeout_in_ms: builtins.int | None = ...,
coordinated_job_list: collections.abc.Iterable[global___CoordinatedJob] | None = ...,
shutdown_barrier_timeout_in_ms: builtins.int | None = ...,
agent_destruction_without_shutdown: builtins.bool | None = ...,
recoverable_jobs: collections.abc.Iterable[builtins.str] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["agent_destruction_without_shutdown", b"agent_destruction_without_shutdown", "cluster_register_timeout_in_ms", b"cluster_register_timeout_in_ms", "coordinated_job_list", b"coordinated_job_list", "enable_health_check", b"enable_health_check", "heartbeat_timeout_in_ms", b"heartbeat_timeout_in_ms", "recoverable_jobs", b"recoverable_jobs", "service_leader", b"service_leader", "service_type", b"service_type", "shutdown_barrier_timeout_in_ms", b"shutdown_barrier_timeout_in_ms"]) -> None: ...
global___CoordinationServiceConfig = CoordinationServiceConfig

View File

@@ -0,0 +1,699 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.any_pb2
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.message
import sys
import typing
if sys.version_info >= (3, 10):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
class _CoordinatedTaskState:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _CoordinatedTaskStateEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_CoordinatedTaskState.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
TASKSTATE_UNSPECIFIED: _CoordinatedTaskState.ValueType # 0
"""TASKSTATE_UNSPECIFIED is an invalid state such that indicates a bug."""
TASKSTATE_UNINITIALIZED: _CoordinatedTaskState.ValueType # 1
"""TASKSTATE_UNINITIALIZED is an agent-only state. While the agent is
disconnected, the service has no way of knowing if the task is
initialized/uninitialized.
"""
TASKSTATE_DISCONNECTED: _CoordinatedTaskState.ValueType # 2
TASKSTATE_CONNECTED: _CoordinatedTaskState.ValueType # 3
TASKSTATE_ERROR: _CoordinatedTaskState.ValueType # 4
class CoordinatedTaskState(_CoordinatedTaskState, metaclass=_CoordinatedTaskStateEnumTypeWrapper):
"""Represents the state of a remote worker"""
TASKSTATE_UNSPECIFIED: CoordinatedTaskState.ValueType # 0
"""TASKSTATE_UNSPECIFIED is an invalid state such that indicates a bug."""
TASKSTATE_UNINITIALIZED: CoordinatedTaskState.ValueType # 1
"""TASKSTATE_UNINITIALIZED is an agent-only state. While the agent is
disconnected, the service has no way of knowing if the task is
initialized/uninitialized.
"""
TASKSTATE_DISCONNECTED: CoordinatedTaskState.ValueType # 2
TASKSTATE_CONNECTED: CoordinatedTaskState.ValueType # 3
TASKSTATE_ERROR: CoordinatedTaskState.ValueType # 4
global___CoordinatedTaskState = CoordinatedTaskState
@typing_extensions.final
class CoordinatedTask(google.protobuf.message.Message):
"""Represents a remote worker task, specified by job name and task id."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
JOB_NAME_FIELD_NUMBER: builtins.int
TASK_ID_FIELD_NUMBER: builtins.int
job_name: builtins.str
task_id: builtins.int
def __init__(
self,
*,
job_name: builtins.str | None = ...,
task_id: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["job_name", b"job_name", "task_id", b"task_id"]) -> None: ...
global___CoordinatedTask = CoordinatedTask
@typing_extensions.final
class CoordinationServiceError(google.protobuf.message.Message):
"""Status payload for all coordination service errors.
Note: an empty proto may be set if the error is triggered by the task's own
agent calls (i.e. not propagated by the service from another remote task).
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
IS_REPORTED_ERROR_FIELD_NUMBER: builtins.int
SOURCE_TASK_FIELD_NUMBER: builtins.int
is_reported_error: builtins.bool
"""If true, error is reported via the agent API by the user (and not an
internal service error).
"""
@property
def source_task(self) -> global___CoordinatedTask:
"""Denotes which task hit the error. If unset, the error originated from the
same task that is processing this error.
"""
def __init__(
self,
*,
is_reported_error: builtins.bool | None = ...,
source_task: global___CoordinatedTask | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["is_reported_error", b"is_reported_error", "source_task", b"source_task"]) -> None: ...
global___CoordinationServiceError = CoordinationServiceError
@typing_extensions.final
class CoordinatedTaskStateInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
TASK_FIELD_NUMBER: builtins.int
STATE_FIELD_NUMBER: builtins.int
ERROR_CODE_FIELD_NUMBER: builtins.int
ERROR_MESSAGE_FIELD_NUMBER: builtins.int
ERROR_PAYLOAD_FIELD_NUMBER: builtins.int
@property
def task(self) -> global___CoordinatedTask: ...
state: global___CoordinatedTaskState.ValueType
error_code: builtins.int
error_message: builtins.str
@property
def error_payload(self) -> global___CoordinationServiceError: ...
def __init__(
self,
*,
task: global___CoordinatedTask | None = ...,
state: global___CoordinatedTaskState.ValueType | None = ...,
error_code: builtins.int | None = ...,
error_message: builtins.str | None = ...,
error_payload: global___CoordinationServiceError | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["error_payload", b"error_payload", "task", b"task"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["error_code", b"error_code", "error_message", b"error_message", "error_payload", b"error_payload", "state", b"state", "task", b"task"]) -> None: ...
global___CoordinatedTaskStateInfo = CoordinatedTaskStateInfo
@typing_extensions.final
class DeviceInfo(google.protobuf.message.Message):
"""Placeholder message to be extended by other runtimes' device representations."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DEVICE_FIELD_NUMBER: builtins.int
@property
def device(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[google.protobuf.any_pb2.Any]: ...
def __init__(
self,
*,
device: collections.abc.Iterable[google.protobuf.any_pb2.Any] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["device", b"device"]) -> None: ...
global___DeviceInfo = DeviceInfo
@typing_extensions.final
class RegisterTaskRequest(google.protobuf.message.Message):
"""Request and response messages for registering a task to the cluster leader.
A task is uniquely represented by its `job_name`, `task_id` and
`incarnation`. Leader responds with its `incarnation` to identify a leader
process.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
INCARNATION_FIELD_NUMBER: builtins.int
SOURCE_TASK_FIELD_NUMBER: builtins.int
incarnation: builtins.int
@property
def source_task(self) -> global___CoordinatedTask: ...
def __init__(
self,
*,
incarnation: builtins.int | None = ...,
source_task: global___CoordinatedTask | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["incarnation", b"incarnation", "source_task", b"source_task"]) -> None: ...
global___RegisterTaskRequest = RegisterTaskRequest
@typing_extensions.final
class RegisterTaskResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
LEADER_INCARNATION_FIELD_NUMBER: builtins.int
leader_incarnation: builtins.int
def __init__(
self,
*,
leader_incarnation: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["leader_incarnation", b"leader_incarnation"]) -> None: ...
global___RegisterTaskResponse = RegisterTaskResponse
@typing_extensions.final
class HeartbeatRequest(google.protobuf.message.Message):
"""Request and response messages for sending heartbeats."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
INCARNATION_FIELD_NUMBER: builtins.int
SOURCE_TASK_FIELD_NUMBER: builtins.int
incarnation: builtins.int
@property
def source_task(self) -> global___CoordinatedTask: ...
def __init__(
self,
*,
incarnation: builtins.int | None = ...,
source_task: global___CoordinatedTask | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["incarnation", b"incarnation", "source_task", b"source_task"]) -> None: ...
global___HeartbeatRequest = HeartbeatRequest
@typing_extensions.final
class HeartbeatResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
LEADER_INCARNATION_FIELD_NUMBER: builtins.int
leader_incarnation: builtins.int
"""If there are failures in cluster, use additional metadata in response to
broadcast error code and message to other tasks.
"""
def __init__(
self,
*,
leader_incarnation: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["leader_incarnation", b"leader_incarnation"]) -> None: ...
global___HeartbeatResponse = HeartbeatResponse
@typing_extensions.final
class WaitForAllTasksRequest(google.protobuf.message.Message):
"""Request and response messages for waiting for all tasks."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
SOURCE_TASK_FIELD_NUMBER: builtins.int
DEVICE_INFO_FIELD_NUMBER: builtins.int
@property
def source_task(self) -> global___CoordinatedTask: ...
@property
def device_info(self) -> global___DeviceInfo:
"""All local device attributes on the request sender;"""
def __init__(
self,
*,
source_task: global___CoordinatedTask | None = ...,
device_info: global___DeviceInfo | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["device_info", b"device_info", "source_task", b"source_task"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["device_info", b"device_info", "source_task", b"source_task"]) -> None: ...
global___WaitForAllTasksRequest = WaitForAllTasksRequest
@typing_extensions.final
class WaitForAllTasksResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
LEADER_INCARNATION_FIELD_NUMBER: builtins.int
DEVICE_INFO_FIELD_NUMBER: builtins.int
leader_incarnation: builtins.int
@property
def device_info(self) -> global___DeviceInfo:
"""All devices in the cluster."""
def __init__(
self,
*,
leader_incarnation: builtins.int | None = ...,
device_info: global___DeviceInfo | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["device_info", b"device_info"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["device_info", b"device_info", "leader_incarnation", b"leader_incarnation"]) -> None: ...
global___WaitForAllTasksResponse = WaitForAllTasksResponse
@typing_extensions.final
class ShutdownTaskRequest(google.protobuf.message.Message):
"""Request and response messages for disconnecting a task from the service."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
SOURCE_TASK_FIELD_NUMBER: builtins.int
@property
def source_task(self) -> global___CoordinatedTask: ...
def __init__(
self,
*,
source_task: global___CoordinatedTask | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> None: ...
global___ShutdownTaskRequest = ShutdownTaskRequest
@typing_extensions.final
class ShutdownTaskResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___ShutdownTaskResponse = ShutdownTaskResponse
@typing_extensions.final
class ResetTaskRequest(google.protobuf.message.Message):
"""Request and response messages for resetting a task state in the service."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
SOURCE_TASK_FIELD_NUMBER: builtins.int
@property
def source_task(self) -> global___CoordinatedTask: ...
def __init__(
self,
*,
source_task: global___CoordinatedTask | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> None: ...
global___ResetTaskRequest = ResetTaskRequest
@typing_extensions.final
class ResetTaskResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___ResetTaskResponse = ResetTaskResponse
@typing_extensions.final
class ReportErrorToTaskRequest(google.protobuf.message.Message):
"""Request and response messages for reporting errors to task."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ERROR_CODE_FIELD_NUMBER: builtins.int
ERROR_MESSAGE_FIELD_NUMBER: builtins.int
ERROR_PAYLOAD_FIELD_NUMBER: builtins.int
error_code: builtins.int
error_message: builtins.str
@property
def error_payload(self) -> global___CoordinationServiceError: ...
def __init__(
self,
*,
error_code: builtins.int | None = ...,
error_message: builtins.str | None = ...,
error_payload: global___CoordinationServiceError | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["error_payload", b"error_payload"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["error_code", b"error_code", "error_message", b"error_message", "error_payload", b"error_payload"]) -> None: ...
global___ReportErrorToTaskRequest = ReportErrorToTaskRequest
@typing_extensions.final
class ReportErrorToTaskResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___ReportErrorToTaskResponse = ReportErrorToTaskResponse
@typing_extensions.final
class ReportErrorToServiceRequest(google.protobuf.message.Message):
"""Request and response messages for reporting errors to service instance."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ERROR_CODE_FIELD_NUMBER: builtins.int
ERROR_MESSAGE_FIELD_NUMBER: builtins.int
ERROR_ORIGIN_FIELD_NUMBER: builtins.int
error_code: builtins.int
error_message: builtins.str
@property
def error_origin(self) -> global___CoordinatedTask: ...
def __init__(
self,
*,
error_code: builtins.int | None = ...,
error_message: builtins.str | None = ...,
error_origin: global___CoordinatedTask | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["error_origin", b"error_origin"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["error_code", b"error_code", "error_message", b"error_message", "error_origin", b"error_origin"]) -> None: ...
global___ReportErrorToServiceRequest = ReportErrorToServiceRequest
@typing_extensions.final
class ReportErrorToServiceResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___ReportErrorToServiceResponse = ReportErrorToServiceResponse
@typing_extensions.final
class GetTaskStateRequest(google.protobuf.message.Message):
"""Request and response messages for getting state of a remote task."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
SOURCE_TASK_FIELD_NUMBER: builtins.int
@property
def source_task(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___CoordinatedTask]: ...
def __init__(
self,
*,
source_task: collections.abc.Iterable[global___CoordinatedTask] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> None: ...
global___GetTaskStateRequest = GetTaskStateRequest
@typing_extensions.final
class GetTaskStateResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
TASK_STATE_FIELD_NUMBER: builtins.int
@property
def task_state(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___CoordinatedTaskStateInfo]: ...
def __init__(
self,
*,
task_state: collections.abc.Iterable[global___CoordinatedTaskStateInfo] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["task_state", b"task_state"]) -> None: ...
global___GetTaskStateResponse = GetTaskStateResponse
@typing_extensions.final
class KeyValueEntry(google.protobuf.message.Message):
"""Message for configuration key value.
Key is structured like Unix file system, with multiple levels of directory
names separated by the slash ('/') characters.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.str
value: builtins.bytes
def __init__(
self,
*,
key: builtins.str | None = ...,
value: builtins.bytes | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
global___KeyValueEntry = KeyValueEntry
@typing_extensions.final
class InsertKeyValueRequest(google.protobuf.message.Message):
"""Request and response messages for inserting configuration key-value data."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KV_FIELD_NUMBER: builtins.int
@property
def kv(self) -> global___KeyValueEntry: ...
def __init__(
self,
*,
kv: global___KeyValueEntry | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["kv", b"kv"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["kv", b"kv"]) -> None: ...
global___InsertKeyValueRequest = InsertKeyValueRequest
@typing_extensions.final
class InsertKeyValueResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___InsertKeyValueResponse = InsertKeyValueResponse
@typing_extensions.final
class GetKeyValueRequest(google.protobuf.message.Message):
"""Request and response messages for getting configuration key-value data."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
key: builtins.str
def __init__(
self,
*,
key: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key"]) -> None: ...
global___GetKeyValueRequest = GetKeyValueRequest
@typing_extensions.final
class GetKeyValueResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KV_FIELD_NUMBER: builtins.int
@property
def kv(self) -> global___KeyValueEntry: ...
def __init__(
self,
*,
kv: global___KeyValueEntry | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["kv", b"kv"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["kv", b"kv"]) -> None: ...
global___GetKeyValueResponse = GetKeyValueResponse
@typing_extensions.final
class TryGetKeyValueRequest(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
key: builtins.str
def __init__(
self,
*,
key: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key"]) -> None: ...
global___TryGetKeyValueRequest = TryGetKeyValueRequest
@typing_extensions.final
class TryGetKeyValueResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KV_FIELD_NUMBER: builtins.int
@property
def kv(self) -> global___KeyValueEntry: ...
def __init__(
self,
*,
kv: global___KeyValueEntry | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["kv", b"kv"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["kv", b"kv"]) -> None: ...
global___TryGetKeyValueResponse = TryGetKeyValueResponse
@typing_extensions.final
class GetKeyValueDirRequest(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DIRECTORY_KEY_FIELD_NUMBER: builtins.int
directory_key: builtins.str
def __init__(
self,
*,
directory_key: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["directory_key", b"directory_key"]) -> None: ...
global___GetKeyValueDirRequest = GetKeyValueDirRequest
@typing_extensions.final
class GetKeyValueDirResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DIRECTORY_KEY_FIELD_NUMBER: builtins.int
KV_FIELD_NUMBER: builtins.int
directory_key: builtins.str
@property
def kv(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___KeyValueEntry]: ...
def __init__(
self,
*,
directory_key: builtins.str | None = ...,
kv: collections.abc.Iterable[global___KeyValueEntry] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["directory_key", b"directory_key", "kv", b"kv"]) -> None: ...
global___GetKeyValueDirResponse = GetKeyValueDirResponse
@typing_extensions.final
class DeleteKeyValueRequest(google.protobuf.message.Message):
"""Request and response messages for deleting configuration key-value data.
When is_directory is true, delete key-values recursively under `key`.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
IS_DIRECTORY_FIELD_NUMBER: builtins.int
key: builtins.str
is_directory: builtins.bool
def __init__(
self,
*,
key: builtins.str | None = ...,
is_directory: builtins.bool | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["is_directory", b"is_directory", "key", b"key"]) -> None: ...
global___DeleteKeyValueRequest = DeleteKeyValueRequest
@typing_extensions.final
class DeleteKeyValueResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___DeleteKeyValueResponse = DeleteKeyValueResponse
@typing_extensions.final
class BarrierRequest(google.protobuf.message.Message):
"""Request and response messages for generic sync barriers."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
BARRIER_ID_FIELD_NUMBER: builtins.int
BARRIER_TIMEOUT_IN_MS_FIELD_NUMBER: builtins.int
TASKS_FIELD_NUMBER: builtins.int
SOURCE_TASK_FIELD_NUMBER: builtins.int
barrier_id: builtins.str
barrier_timeout_in_ms: builtins.int
@property
def tasks(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___CoordinatedTask]:
"""Denotes list of tasks that will wait for the barrier. If unspecified, it
implies that the entire cluster is participating in the barrier.
"""
@property
def source_task(self) -> global___CoordinatedTask:
"""Task that is making the request."""
def __init__(
self,
*,
barrier_id: builtins.str | None = ...,
barrier_timeout_in_ms: builtins.int | None = ...,
tasks: collections.abc.Iterable[global___CoordinatedTask] | None = ...,
source_task: global___CoordinatedTask | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["barrier_id", b"barrier_id", "barrier_timeout_in_ms", b"barrier_timeout_in_ms", "source_task", b"source_task", "tasks", b"tasks"]) -> None: ...
global___BarrierRequest = BarrierRequest
@typing_extensions.final
class BarrierResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___BarrierResponse = BarrierResponse
@typing_extensions.final
class CancelBarrierRequest(google.protobuf.message.Message):
"""Request and response messages for cancelling generic sync barriers."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
BARRIER_ID_FIELD_NUMBER: builtins.int
SOURCE_TASK_FIELD_NUMBER: builtins.int
barrier_id: builtins.str
@property
def source_task(self) -> global___CoordinatedTask:
"""Task that is making the request."""
def __init__(
self,
*,
barrier_id: builtins.str | None = ...,
source_task: global___CoordinatedTask | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["source_task", b"source_task"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["barrier_id", b"barrier_id", "source_task", b"source_task"]) -> None: ...
global___CancelBarrierRequest = CancelBarrierRequest
@typing_extensions.final
class CancelBarrierResponse(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___CancelBarrierResponse = CancelBarrierResponse

View File

@@ -0,0 +1,85 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.message
import sys
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class GrpcPayloadContainer(google.protobuf.message.Message):
"""Used to serialize and transmit tensorflow::Status payloads through
grpc::Status `error_details` since grpc::Status lacks payload API.
TODO(b/204231601): Use GRPC API once supported.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class PayloadsEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.str
value: builtins.bytes
def __init__(
self,
*,
key: builtins.str | None = ...,
value: builtins.bytes | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
PAYLOADS_FIELD_NUMBER: builtins.int
@property
def payloads(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.bytes]: ...
def __init__(
self,
*,
payloads: collections.abc.Mapping[builtins.str, builtins.bytes] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["payloads", b"payloads"]) -> None: ...
global___GrpcPayloadContainer = GrpcPayloadContainer
@typing_extensions.final
class GrpcPayloadsLost(google.protobuf.message.Message):
"""If included as a payload, this message flags the Status to have lost payloads
during the GRPC transmission.
URI: "type.googleapis.com/tensorflow.distributed_runtime.GrpcPayloadsLost"
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___GrpcPayloadsLost = GrpcPayloadsLost
@typing_extensions.final
class WorkerPossiblyRestarted(google.protobuf.message.Message):
"""If included as a payload, this message flags the Status to be a possible
outcome of a worker restart.
URI:
"type.googleapis.com/tensorflow.distributed_runtime.WorkerPossiblyRestarted"
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
def __init__(
self,
) -> None: ...
global___WorkerPossiblyRestarted = WorkerPossiblyRestarted

View File

@@ -0,0 +1,440 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
LINT: LEGACY_NAMES"""
import builtins
import collections.abc
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.message
import google.protobuf.wrappers_pb2
import sys
import typing
if sys.version_info >= (3, 10):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
class _DataType:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _DataTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_DataType.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
kFloat: _DataType.ValueType # 0
kDouble: _DataType.ValueType # 1
kHalf: _DataType.ValueType # 2
kInt8: _DataType.ValueType # 3
kInt32: _DataType.ValueType # 4
kComplexFloat: _DataType.ValueType # 5
kComplexDouble: _DataType.ValueType # 6
kBF16: _DataType.ValueType # 7
kF8E5M2: _DataType.ValueType # 8
kF8E4M3FN: _DataType.ValueType # 9
class DataType(_DataType, metaclass=_DataTypeEnumTypeWrapper):
"""Specifies the data type used by an operation."""
kFloat: DataType.ValueType # 0
kDouble: DataType.ValueType # 1
kHalf: DataType.ValueType # 2
kInt8: DataType.ValueType # 3
kInt32: DataType.ValueType # 4
kComplexFloat: DataType.ValueType # 5
kComplexDouble: DataType.ValueType # 6
kBF16: DataType.ValueType # 7
kF8E5M2: DataType.ValueType # 8
kF8E4M3FN: DataType.ValueType # 9
global___DataType = DataType
class _DataLayout:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _DataLayoutEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_DataLayout.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
kYXDepthBatch: _DataLayout.ValueType # 0
"""Naming convention:
Y <-> row or height
X <-> column or width
Batch <-> batch, or N
Depth <-> feature, or channel
TODO(timshen): turn them into cuDNN names, e.g. kNCHW.
Note: In cudnn, kBatchDepthYX4 and kBatchDepthYX32 are the same layout
(namely, NCHW_VECT_C). It differentiates between these two by using a
different data type (int8x4 vs int8x32). In StreamExecutor we use
different layouts for these, because we don't usually pass an explicit data
type to StreamExecutor functions.
"""
kYXBatchDepth: _DataLayout.ValueType # 1
kBatchYXDepth: _DataLayout.ValueType # 2
"""cuDNN's NHWC layout"""
kBatchDepthYX: _DataLayout.ValueType # 3
"""cuDNN's NCHW layout"""
kBatchDepthYX4: _DataLayout.ValueType # 4
"""cuDNN's NCHW_VECT_C with 4-elem vectors (e.g. int8x4)"""
kBatchDepthYX32: _DataLayout.ValueType # 5
"""cuDNN's NCHW_VECT_C with 32-elem vects (e.g. int8x32)"""
class DataLayout(_DataLayout, metaclass=_DataLayoutEnumTypeWrapper):
"""Describes how a convolution input or output layer's data is formatted."""
kYXDepthBatch: DataLayout.ValueType # 0
"""Naming convention:
Y <-> row or height
X <-> column or width
Batch <-> batch, or N
Depth <-> feature, or channel
TODO(timshen): turn them into cuDNN names, e.g. kNCHW.
Note: In cudnn, kBatchDepthYX4 and kBatchDepthYX32 are the same layout
(namely, NCHW_VECT_C). It differentiates between these two by using a
different data type (int8x4 vs int8x32). In StreamExecutor we use
different layouts for these, because we don't usually pass an explicit data
type to StreamExecutor functions.
"""
kYXBatchDepth: DataLayout.ValueType # 1
kBatchYXDepth: DataLayout.ValueType # 2
"""cuDNN's NHWC layout"""
kBatchDepthYX: DataLayout.ValueType # 3
"""cuDNN's NCHW layout"""
kBatchDepthYX4: DataLayout.ValueType # 4
"""cuDNN's NCHW_VECT_C with 4-elem vectors (e.g. int8x4)"""
kBatchDepthYX32: DataLayout.ValueType # 5
"""cuDNN's NCHW_VECT_C with 32-elem vects (e.g. int8x32)"""
global___DataLayout = DataLayout
class _FilterLayout:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _FilterLayoutEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_FilterLayout.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
kOutputInputYX: _FilterLayout.ValueType # 0
"""Naming convention:
Y <-> row or height
X <-> column or width
Output <-> output feature, or N
Input <-> input feature, or N
TODO(timshen): turn them into cuDNN names, e.g. kNCHW.
cuDNN's NCHW layout
"""
kOutputYXInput: _FilterLayout.ValueType # 1
"""cuDNN's NHWC layout"""
kOutputInputYX4: _FilterLayout.ValueType # 2
"""cuDNN's NCHW_VECT_C layout with 4-elem vectors"""
kOutputInputYX32: _FilterLayout.ValueType # 5
"""cuDNN's NCHW_VECT_C layout with 32-elem vectors"""
kInputYXOutput: _FilterLayout.ValueType # 3
kYXInputOutput: _FilterLayout.ValueType # 4
class FilterLayout(_FilterLayout, metaclass=_FilterLayoutEnumTypeWrapper):
"""Describes how a convolution filter is laid out in the memory."""
kOutputInputYX: FilterLayout.ValueType # 0
"""Naming convention:
Y <-> row or height
X <-> column or width
Output <-> output feature, or N
Input <-> input feature, or N
TODO(timshen): turn them into cuDNN names, e.g. kNCHW.
cuDNN's NCHW layout
"""
kOutputYXInput: FilterLayout.ValueType # 1
"""cuDNN's NHWC layout"""
kOutputInputYX4: FilterLayout.ValueType # 2
"""cuDNN's NCHW_VECT_C layout with 4-elem vectors"""
kOutputInputYX32: FilterLayout.ValueType # 5
"""cuDNN's NCHW_VECT_C layout with 32-elem vectors"""
kInputYXOutput: FilterLayout.ValueType # 3
kYXInputOutput: FilterLayout.ValueType # 4
global___FilterLayout = FilterLayout
class _ActivationMode:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ActivationModeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_ActivationMode.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
kNone: _ActivationMode.ValueType # 0
kSigmoid: _ActivationMode.ValueType # 1
kRelu: _ActivationMode.ValueType # 2
"""Rectified linear activation: f(x) = x < 0 ? 0 : x"""
kRelu6: _ActivationMode.ValueType # 3
"""Rectified linear activation; where upper maximum is 6.0."""
kReluX: _ActivationMode.ValueType # 4
"""Rectified linear activation; where upper maximum specified by
BatchDescriptor::value_max().
"""
kTanh: _ActivationMode.ValueType # 5
kBandPass: _ActivationMode.ValueType # 6
"""Like ReluX; but passes all values in the range [-X,X]."""
kElu: _ActivationMode.ValueType # 7
"""Exponential linear activation: f(x) = x < 0 ? e^x - 1 : x"""
kLeakyRelu: _ActivationMode.ValueType # 8
"""Leaky Rectified linear activation: f(x) = x < 0 ? alpha * x : x"""
kGeluExact: _ActivationMode.ValueType # 9
"""Gaussian Error linear unit activation:
x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2))), where P(X) ~ N(0, 1).
"""
class ActivationMode(_ActivationMode, metaclass=_ActivationModeEnumTypeWrapper):
"""Describes a kind of non-linearity (threshold-like mathematical function)."""
kNone: ActivationMode.ValueType # 0
kSigmoid: ActivationMode.ValueType # 1
kRelu: ActivationMode.ValueType # 2
"""Rectified linear activation: f(x) = x < 0 ? 0 : x"""
kRelu6: ActivationMode.ValueType # 3
"""Rectified linear activation; where upper maximum is 6.0."""
kReluX: ActivationMode.ValueType # 4
"""Rectified linear activation; where upper maximum specified by
BatchDescriptor::value_max().
"""
kTanh: ActivationMode.ValueType # 5
kBandPass: ActivationMode.ValueType # 6
"""Like ReluX; but passes all values in the range [-X,X]."""
kElu: ActivationMode.ValueType # 7
"""Exponential linear activation: f(x) = x < 0 ? e^x - 1 : x"""
kLeakyRelu: ActivationMode.ValueType # 8
"""Leaky Rectified linear activation: f(x) = x < 0 ? alpha * x : x"""
kGeluExact: ActivationMode.ValueType # 9
"""Gaussian Error linear unit activation:
x * P(X <= x) = 0.5 * x * (1 + erf(x / sqrt(2))), where P(X) ~ N(0, 1).
"""
global___ActivationMode = ActivationMode
class _ConvolutionMode:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ConvolutionModeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_ConvolutionMode.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
CROSS_CORRELATION: _ConvolutionMode.ValueType # 0
CONVOLUTION: _ConvolutionMode.ValueType # 1
class ConvolutionMode(_ConvolutionMode, metaclass=_ConvolutionModeEnumTypeWrapper):
"""Describe the math definition for the conv op. The popular behavior is
actually called cross-correlation in math, despite the operation is often
referred as convolution. See cuDNN cudnnConvolutionMode_t.
"""
CROSS_CORRELATION: ConvolutionMode.ValueType # 0
CONVOLUTION: ConvolutionMode.ValueType # 1
global___ConvolutionMode = ConvolutionMode
class _ConvolutionKind:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _ConvolutionKindEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[_ConvolutionKind.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
INVALID: _ConvolutionKind.ValueType # 0
FORWARD: _ConvolutionKind.ValueType # 1
BACKWARD_FILTER: _ConvolutionKind.ValueType # 2
BACKWARD_DATA: _ConvolutionKind.ValueType # 3
FORWARD_BIAS_ACTIVATION: _ConvolutionKind.ValueType # 4
class ConvolutionKind(_ConvolutionKind, metaclass=_ConvolutionKindEnumTypeWrapper): ...
INVALID: ConvolutionKind.ValueType # 0
FORWARD: ConvolutionKind.ValueType # 1
BACKWARD_FILTER: ConvolutionKind.ValueType # 2
BACKWARD_DATA: ConvolutionKind.ValueType # 3
FORWARD_BIAS_ACTIVATION: ConvolutionKind.ValueType # 4
global___ConvolutionKind = ConvolutionKind
@typing_extensions.final
class TensorDescriptorProto(google.protobuf.message.Message):
"""Generic tensor representation."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DIMENSIONS_FIELD_NUMBER: builtins.int
DATA_TYPE_FIELD_NUMBER: builtins.int
DATA_LAYOUT_FIELD_NUMBER: builtins.int
FILTER_LAYOUT_FIELD_NUMBER: builtins.int
@property
def dimensions(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ...
data_type: global___DataType.ValueType
data_layout: global___DataLayout.ValueType
filter_layout: global___FilterLayout.ValueType
def __init__(
self,
*,
dimensions: collections.abc.Iterable[builtins.int] | None = ...,
data_type: global___DataType.ValueType | None = ...,
data_layout: global___DataLayout.ValueType | None = ...,
filter_layout: global___FilterLayout.ValueType | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["data_layout", b"data_layout", "filter_layout", b"filter_layout", "layout_oneof", b"layout_oneof"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["data_layout", b"data_layout", "data_type", b"data_type", "dimensions", b"dimensions", "filter_layout", b"filter_layout", "layout_oneof", b"layout_oneof"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["layout_oneof", b"layout_oneof"]) -> typing_extensions.Literal["data_layout", "filter_layout"] | None: ...
global___TensorDescriptorProto = TensorDescriptorProto
@typing_extensions.final
class AlgorithmProto(google.protobuf.message.Message):
"""Generic algorithm representation."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _MathType:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _MathTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[AlgorithmProto._MathType.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
DEFAULT_MATH: AlgorithmProto._MathType.ValueType # 0
TENSOR_OP_MATH: AlgorithmProto._MathType.ValueType # 1
"""The GPU may operate 4x4 matrix FMA.
See cuDNN's documentation for CUDNN_TENSOR_OP_MATH.
"""
class MathType(_MathType, metaclass=_MathTypeEnumTypeWrapper): ...
DEFAULT_MATH: AlgorithmProto.MathType.ValueType # 0
TENSOR_OP_MATH: AlgorithmProto.MathType.ValueType # 1
"""The GPU may operate 4x4 matrix FMA.
See cuDNN's documentation for CUDNN_TENSOR_OP_MATH.
"""
@typing_extensions.final
class TuningKnobsEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.int
value: builtins.int
def __init__(
self,
*,
key: builtins.int | None = ...,
value: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
ALGO_ID_FIELD_NUMBER: builtins.int
MATH_TYPE_FIELD_NUMBER: builtins.int
TUNING_KNOBS_FIELD_NUMBER: builtins.int
IS_CUDNN_FRONTEND_FIELD_NUMBER: builtins.int
WORKSPACE_SIZE_FIELD_NUMBER: builtins.int
algo_id: builtins.int
math_type: global___AlgorithmProto.MathType.ValueType
@property
def tuning_knobs(self) -> google.protobuf.internal.containers.ScalarMap[builtins.int, builtins.int]: ...
is_cudnn_frontend: builtins.bool
"""Legacy algorithm enums and cuDNN Frontend engine numbers need to coexist in
the same proto medium-term, until we can be confident of no longer needing
the legacy cuDNN convolution API. Once the migration is complete, we can
stop producing legacy algorithm enums and remove this field.
"""
@property
def workspace_size(self) -> google.protobuf.wrappers_pb2.UInt64Value:
"""For ROCm only, it's impossible to re-query the required workspace size
after running the algorithm search, so we must store the workspace size
along with the choice of algorithm. For consistency and convenience,
cuDNN uses this field in the same way, even though it would be possible to
re-query the workspace size from cuDNN at each use.
Since this message is persisted in files, we need to be able to distinguish
0 workspace size from unknown workspace size in an old message, so this is
a message field.
"""
def __init__(
self,
*,
algo_id: builtins.int | None = ...,
math_type: global___AlgorithmProto.MathType.ValueType | None = ...,
tuning_knobs: collections.abc.Mapping[builtins.int, builtins.int] | None = ...,
is_cudnn_frontend: builtins.bool | None = ...,
workspace_size: google.protobuf.wrappers_pb2.UInt64Value | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["workspace_size", b"workspace_size"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["algo_id", b"algo_id", "is_cudnn_frontend", b"is_cudnn_frontend", "math_type", b"math_type", "tuning_knobs", b"tuning_knobs", "workspace_size", b"workspace_size"]) -> None: ...
global___AlgorithmProto = AlgorithmProto
@typing_extensions.final
class AlgorithmConfigProto(google.protobuf.message.Message):
"""Proto definition of AlgorithmConfig in "dnn.h".
TODO(ruochengw): After cl/380702564 is submitted, add support for algorithm
configs with cuDNN Frontend APIs.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ALGORITHM_FIELD_NUMBER: builtins.int
ALGORITHM_NO_SCRATCH_FIELD_NUMBER: builtins.int
SCRATCH_SIZE_FIELD_NUMBER: builtins.int
@property
def algorithm(self) -> global___AlgorithmProto: ...
@property
def algorithm_no_scratch(self) -> global___AlgorithmProto: ...
scratch_size: builtins.int
def __init__(
self,
*,
algorithm: global___AlgorithmProto | None = ...,
algorithm_no_scratch: global___AlgorithmProto | None = ...,
scratch_size: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["algorithm", b"algorithm", "algorithm_no_scratch", b"algorithm_no_scratch", "optional_algorithm", b"optional_algorithm", "optional_algorithm_no_scratch", b"optional_algorithm_no_scratch", "optional_scratch_size", b"optional_scratch_size", "scratch_size", b"scratch_size"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["algorithm", b"algorithm", "algorithm_no_scratch", b"algorithm_no_scratch", "optional_algorithm", b"optional_algorithm", "optional_algorithm_no_scratch", b"optional_algorithm_no_scratch", "optional_scratch_size", b"optional_scratch_size", "scratch_size", b"scratch_size"]) -> None: ...
@typing.overload
def WhichOneof(self, oneof_group: typing_extensions.Literal["optional_algorithm", b"optional_algorithm"]) -> typing_extensions.Literal["algorithm"] | None: ...
@typing.overload
def WhichOneof(self, oneof_group: typing_extensions.Literal["optional_algorithm_no_scratch", b"optional_algorithm_no_scratch"]) -> typing_extensions.Literal["algorithm_no_scratch"] | None: ...
@typing.overload
def WhichOneof(self, oneof_group: typing_extensions.Literal["optional_scratch_size", b"optional_scratch_size"]) -> typing_extensions.Literal["scratch_size"] | None: ...
global___AlgorithmConfigProto = AlgorithmConfigProto
@typing_extensions.final
class ConvolutionDescriptorProto(google.protobuf.message.Message):
"""Convolution-specific parameters."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
PADDINGS_FIELD_NUMBER: builtins.int
STRIDES_FIELD_NUMBER: builtins.int
DILATIONS_FIELD_NUMBER: builtins.int
COMPUTE_MODE_FIELD_NUMBER: builtins.int
GROUP_COUNT_FIELD_NUMBER: builtins.int
CONVOLUTION_MODE_FIELD_NUMBER: builtins.int
NAME_FIELD_NUMBER: builtins.int
@property
def paddings(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ...
@property
def strides(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ...
@property
def dilations(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.int]: ...
compute_mode: global___DataType.ValueType
"""The "accumulator" type. For example, use F32 as an accumulator for F16
convolutions.
See cuDNN's cudnnConvolutionMode_t.
"""
group_count: builtins.int
"""See cuDNN's group count."""
convolution_mode: global___ConvolutionMode.ValueType
name: builtins.str
"""Tensorflow node name, same as in NodeDef, for debugging purposes."""
def __init__(
self,
*,
paddings: collections.abc.Iterable[builtins.int] | None = ...,
strides: collections.abc.Iterable[builtins.int] | None = ...,
dilations: collections.abc.Iterable[builtins.int] | None = ...,
compute_mode: global___DataType.ValueType | None = ...,
group_count: builtins.int | None = ...,
convolution_mode: global___ConvolutionMode.ValueType | None = ...,
name: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["compute_mode", b"compute_mode", "convolution_mode", b"convolution_mode", "dilations", b"dilations", "group_count", b"group_count", "name", b"name", "paddings", b"paddings", "strides", b"strides"]) -> None: ...
global___ConvolutionDescriptorProto = ConvolutionDescriptorProto

View File

@@ -0,0 +1,72 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
"""
import builtins
import google.protobuf.descriptor
import google.protobuf.message
import sys
if sys.version_info >= (3, 8):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class RPCOptions(google.protobuf.message.Message):
"""RPC options for distributed runtime."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
USE_RPC_FOR_INPROCESS_MASTER_FIELD_NUMBER: builtins.int
COMPRESSION_ALGORITHM_FIELD_NUMBER: builtins.int
COMPRESSION_LEVEL_FIELD_NUMBER: builtins.int
CACHE_RPC_RESPONSE_FIELD_NUMBER: builtins.int
DISABLE_SESSION_CONNECTION_SHARING_FIELD_NUMBER: builtins.int
NUM_CHANNELS_PER_TARGET_FIELD_NUMBER: builtins.int
use_rpc_for_inprocess_master: builtins.bool
"""If true, always use RPC to contact the session target.
If false (the default option), TensorFlow may use an optimized
transport for client-master communication that avoids the RPC
stack. This option is primarily for used testing the RPC stack.
"""
compression_algorithm: builtins.str
"""The compression algorithm to be used. One of "deflate", "gzip"."""
compression_level: builtins.int
"""If compression_algorithm is set, the compression level to be used.
From 0 (no compression), up to 3.
"""
cache_rpc_response: builtins.bool
"""Setting cache_rpc_response to true will enable sender side caching of
response for RecvTensorAsync and RecvBufAsync to allow receiver to retry
requests . This is only necessary when the network fabric is experiencing a
significant error rate. Without it we'll fail a step on an network error,
while with it we'll be able to complete long steps (like complex
initializations) in the face of some network errors during RecvTensor.
"""
disable_session_connection_sharing: builtins.bool
"""Disables TCP connection sharing when opening a new RPC channel."""
num_channels_per_target: builtins.int
"""Setting num_channels_per_target > 0 allows uses of multiple channels to
communicate to the same target. This can be used to improve the aggregate
throughput on high speed links (e.g 100G) where single connection is not
sufficient to maximize link utilization. Note that a single RPC only goes
on a single channel, this only helps in situations where there are multiple
transfers to the same target overlapping in time.
"""
def __init__(
self,
*,
use_rpc_for_inprocess_master: builtins.bool | None = ...,
compression_algorithm: builtins.str | None = ...,
compression_level: builtins.int | None = ...,
cache_rpc_response: builtins.bool | None = ...,
disable_session_connection_sharing: builtins.bool | None = ...,
num_channels_per_target: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["cache_rpc_response", b"cache_rpc_response", "compression_algorithm", b"compression_algorithm", "compression_level", b"compression_level", "disable_session_connection_sharing", b"disable_session_connection_sharing", "num_channels_per_target", b"num_channels_per_target", "use_rpc_for_inprocess_master", b"use_rpc_for_inprocess_master"]) -> None: ...
global___RPCOptions = RPCOptions

View File

@@ -0,0 +1,575 @@
"""
@generated by mypy-protobuf. Do not edit manually!
isort:skip_file
Protocol messages for describing the results of benchmarks and unit tests."""
import builtins
import collections.abc
import google.protobuf.any_pb2
import google.protobuf.descriptor
import google.protobuf.internal.containers
import google.protobuf.internal.enum_type_wrapper
import google.protobuf.message
import google.protobuf.wrappers_pb2
import sys
import typing
if sys.version_info >= (3, 10):
import typing as typing_extensions
else:
import typing_extensions
DESCRIPTOR: google.protobuf.descriptor.FileDescriptor
@typing_extensions.final
class EntryValue(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
DOUBLE_VALUE_FIELD_NUMBER: builtins.int
STRING_VALUE_FIELD_NUMBER: builtins.int
double_value: builtins.float
string_value: builtins.str
def __init__(
self,
*,
double_value: builtins.float | None = ...,
string_value: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["double_value", b"double_value", "kind", b"kind", "string_value", b"string_value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["double_value", b"double_value", "kind", b"kind", "string_value", b"string_value"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["kind", b"kind"]) -> typing_extensions.Literal["double_value", "string_value"] | None: ...
global___EntryValue = EntryValue
@typing_extensions.final
class MetricEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
MIN_VALUE_FIELD_NUMBER: builtins.int
MAX_VALUE_FIELD_NUMBER: builtins.int
name: builtins.str
"""Metric name"""
value: builtins.float
"""Metric value"""
@property
def min_value(self) -> google.protobuf.wrappers_pb2.DoubleValue:
"""The minimum acceptable value for the metric if specified"""
@property
def max_value(self) -> google.protobuf.wrappers_pb2.DoubleValue:
"""The maximum acceptable value for the metric if specified"""
def __init__(
self,
*,
name: builtins.str | None = ...,
value: builtins.float | None = ...,
min_value: google.protobuf.wrappers_pb2.DoubleValue | None = ...,
max_value: google.protobuf.wrappers_pb2.DoubleValue | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["max_value", b"max_value", "min_value", b"min_value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["max_value", b"max_value", "min_value", b"min_value", "name", b"name", "value", b"value"]) -> None: ...
global___MetricEntry = MetricEntry
@typing_extensions.final
class BenchmarkEntry(google.protobuf.message.Message):
"""Each unit test or benchmark in a test or benchmark run provides
some set of information. Here we provide some reasonable keys
one would expect to see, with optional key/value pairs for things
we haven't considered.
This BenchmarkEntry should be emitted by each unit test or benchmark
reporter.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class ExtrasEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.str
@property
def value(self) -> global___EntryValue: ...
def __init__(
self,
*,
key: builtins.str | None = ...,
value: global___EntryValue | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["value", b"value"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
NAME_FIELD_NUMBER: builtins.int
ITERS_FIELD_NUMBER: builtins.int
CPU_TIME_FIELD_NUMBER: builtins.int
WALL_TIME_FIELD_NUMBER: builtins.int
THROUGHPUT_FIELD_NUMBER: builtins.int
EXTRAS_FIELD_NUMBER: builtins.int
METRICS_FIELD_NUMBER: builtins.int
name: builtins.str
"""The name of the specific benchmark or test
(e.g. BM_AdjustContrast_gpu_B_W_H)
"""
iters: builtins.int
"""If a benchmark, how many iterations it was run for"""
cpu_time: builtins.float
"""Total cpu time used for all iterations (in seconds)"""
wall_time: builtins.float
"""Total wall time used for all iterations (in seconds)"""
throughput: builtins.float
"""Throughput (in MB/s)"""
@property
def extras(self) -> google.protobuf.internal.containers.MessageMap[builtins.str, global___EntryValue]:
"""Generic map from result key to value."""
@property
def metrics(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___MetricEntry]:
"""Metric name, value and expected range. This can include accuracy metrics
typically used to determine whether the accuracy test has passed
"""
def __init__(
self,
*,
name: builtins.str | None = ...,
iters: builtins.int | None = ...,
cpu_time: builtins.float | None = ...,
wall_time: builtins.float | None = ...,
throughput: builtins.float | None = ...,
extras: collections.abc.Mapping[builtins.str, global___EntryValue] | None = ...,
metrics: collections.abc.Iterable[global___MetricEntry] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["cpu_time", b"cpu_time", "extras", b"extras", "iters", b"iters", "metrics", b"metrics", "name", b"name", "throughput", b"throughput", "wall_time", b"wall_time"]) -> None: ...
global___BenchmarkEntry = BenchmarkEntry
@typing_extensions.final
class BenchmarkEntries(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
ENTRY_FIELD_NUMBER: builtins.int
@property
def entry(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___BenchmarkEntry]: ...
def __init__(
self,
*,
entry: collections.abc.Iterable[global___BenchmarkEntry] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["entry", b"entry"]) -> None: ...
global___BenchmarkEntries = BenchmarkEntries
@typing_extensions.final
class BuildConfiguration(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
MODE_FIELD_NUMBER: builtins.int
CC_FLAGS_FIELD_NUMBER: builtins.int
OPTS_FIELD_NUMBER: builtins.int
mode: builtins.str
"""opt, dbg, etc"""
@property
def cc_flags(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
"""CC compiler flags, if known"""
@property
def opts(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]:
"""Bazel compilation options, if known"""
def __init__(
self,
*,
mode: builtins.str | None = ...,
cc_flags: collections.abc.Iterable[builtins.str] | None = ...,
opts: collections.abc.Iterable[builtins.str] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["cc_flags", b"cc_flags", "mode", b"mode", "opts", b"opts"]) -> None: ...
global___BuildConfiguration = BuildConfiguration
@typing_extensions.final
class CommitId(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
CHANGELIST_FIELD_NUMBER: builtins.int
HASH_FIELD_NUMBER: builtins.int
SNAPSHOT_FIELD_NUMBER: builtins.int
PENDING_CHANGELIST_FIELD_NUMBER: builtins.int
changelist: builtins.int
"""Submitted changelist."""
hash: builtins.str
snapshot: builtins.str
"""Hash of intermediate change between hash/changelist and what was tested.
Not used if the build is from a commit without modifications.
"""
pending_changelist: builtins.int
"""Changelist tested if the change list is not already submitted."""
def __init__(
self,
*,
changelist: builtins.int | None = ...,
hash: builtins.str | None = ...,
snapshot: builtins.str | None = ...,
pending_changelist: builtins.int | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["changelist", b"changelist", "hash", b"hash", "kind", b"kind"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["changelist", b"changelist", "hash", b"hash", "kind", b"kind", "pending_changelist", b"pending_changelist", "snapshot", b"snapshot"]) -> None: ...
def WhichOneof(self, oneof_group: typing_extensions.Literal["kind", b"kind"]) -> typing_extensions.Literal["changelist", "hash"] | None: ...
global___CommitId = CommitId
@typing_extensions.final
class CPUInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class CacheSizeEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.str
value: builtins.int
def __init__(
self,
*,
key: builtins.str | None = ...,
value: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
NUM_CORES_FIELD_NUMBER: builtins.int
NUM_CORES_ALLOWED_FIELD_NUMBER: builtins.int
MHZ_PER_CPU_FIELD_NUMBER: builtins.int
CPU_INFO_FIELD_NUMBER: builtins.int
CPU_GOVERNOR_FIELD_NUMBER: builtins.int
CACHE_SIZE_FIELD_NUMBER: builtins.int
num_cores: builtins.int
num_cores_allowed: builtins.int
mhz_per_cpu: builtins.float
"""How fast are these cpus?"""
cpu_info: builtins.str
"""Additional cpu information. For example,
Intel Ivybridge with HyperThreading (24 cores) dL1:32KB dL2:256KB dL3:30MB
"""
cpu_governor: builtins.str
"""What kind of cpu scaling is enabled on the host.
Examples include "performance", "ondemand", "conservative", "mixed".
"""
@property
def cache_size(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.int]:
"""Cache sizes (in bytes), e.g. "L2": 262144 (for 256KB)"""
def __init__(
self,
*,
num_cores: builtins.int | None = ...,
num_cores_allowed: builtins.int | None = ...,
mhz_per_cpu: builtins.float | None = ...,
cpu_info: builtins.str | None = ...,
cpu_governor: builtins.str | None = ...,
cache_size: collections.abc.Mapping[builtins.str, builtins.int] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["cache_size", b"cache_size", "cpu_governor", b"cpu_governor", "cpu_info", b"cpu_info", "mhz_per_cpu", b"mhz_per_cpu", "num_cores", b"num_cores", "num_cores_allowed", b"num_cores_allowed"]) -> None: ...
global___CPUInfo = CPUInfo
@typing_extensions.final
class MemoryInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
TOTAL_FIELD_NUMBER: builtins.int
AVAILABLE_FIELD_NUMBER: builtins.int
total: builtins.int
"""Total virtual memory in bytes"""
available: builtins.int
"""Immediately available memory in bytes"""
def __init__(
self,
*,
total: builtins.int | None = ...,
available: builtins.int | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["available", b"available", "total", b"total"]) -> None: ...
global___MemoryInfo = MemoryInfo
@typing_extensions.final
class GPUInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
MODEL_FIELD_NUMBER: builtins.int
UUID_FIELD_NUMBER: builtins.int
BUS_ID_FIELD_NUMBER: builtins.int
model: builtins.str
"""e.g. "Tesla K40c" """
uuid: builtins.str
"""Final entry in output of "nvidia-smi -L" """
bus_id: builtins.str
"""e.g. "0000:04:00.0" """
def __init__(
self,
*,
model: builtins.str | None = ...,
uuid: builtins.str | None = ...,
bus_id: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bus_id", b"bus_id", "model", b"model", "uuid", b"uuid"]) -> None: ...
global___GPUInfo = GPUInfo
@typing_extensions.final
class PlatformInfo(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
BITS_FIELD_NUMBER: builtins.int
LINKAGE_FIELD_NUMBER: builtins.int
MACHINE_FIELD_NUMBER: builtins.int
RELEASE_FIELD_NUMBER: builtins.int
SYSTEM_FIELD_NUMBER: builtins.int
VERSION_FIELD_NUMBER: builtins.int
bits: builtins.str
"""e.g. '64bit'"""
linkage: builtins.str
"""e.g. 'ELF'"""
machine: builtins.str
"""e.g. 'i386'"""
release: builtins.str
"""e.g. '3.13.0-76-generic'"""
system: builtins.str
"""e.g. 'Linux'"""
version: builtins.str
"""e.g. '#120-Ubuntu SMP Mon Jan 18 15:59:10 UTC 2016'"""
def __init__(
self,
*,
bits: builtins.str | None = ...,
linkage: builtins.str | None = ...,
machine: builtins.str | None = ...,
release: builtins.str | None = ...,
system: builtins.str | None = ...,
version: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["bits", b"bits", "linkage", b"linkage", "machine", b"machine", "release", b"release", "system", b"system", "version", b"version"]) -> None: ...
global___PlatformInfo = PlatformInfo
@typing_extensions.final
class AvailableDeviceInfo(google.protobuf.message.Message):
"""Matches DeviceAttributes"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
NAME_FIELD_NUMBER: builtins.int
TYPE_FIELD_NUMBER: builtins.int
MEMORY_LIMIT_FIELD_NUMBER: builtins.int
PHYSICAL_DESCRIPTION_FIELD_NUMBER: builtins.int
name: builtins.str
"""Device name."""
type: builtins.str
"""Device type, e.g. 'CPU' or 'GPU'."""
memory_limit: builtins.int
"""Memory capacity in bytes."""
physical_description: builtins.str
"""The physical description of this device."""
def __init__(
self,
*,
name: builtins.str | None = ...,
type: builtins.str | None = ...,
memory_limit: builtins.int | None = ...,
physical_description: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["memory_limit", b"memory_limit", "name", b"name", "physical_description", b"physical_description", "type", b"type"]) -> None: ...
global___AvailableDeviceInfo = AvailableDeviceInfo
@typing_extensions.final
class MachineConfiguration(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
HOSTNAME_FIELD_NUMBER: builtins.int
SERIAL_IDENTIFIER_FIELD_NUMBER: builtins.int
PLATFORM_INFO_FIELD_NUMBER: builtins.int
CPU_INFO_FIELD_NUMBER: builtins.int
DEVICE_INFO_FIELD_NUMBER: builtins.int
AVAILABLE_DEVICE_INFO_FIELD_NUMBER: builtins.int
MEMORY_INFO_FIELD_NUMBER: builtins.int
hostname: builtins.str
"""Host name of machine that ran the benchmark."""
serial_identifier: builtins.str
"""Unique serial number of the machine."""
@property
def platform_info(self) -> global___PlatformInfo:
"""Additional platform information."""
@property
def cpu_info(self) -> global___CPUInfo:
"""CPU Information."""
@property
def device_info(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[google.protobuf.any_pb2.Any]:
"""Other devices that are attached and relevant (e.g. GPUInfo)."""
@property
def available_device_info(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___AvailableDeviceInfo]:
"""Devices accessible to the test (e.g. as given by list_local_devices)."""
@property
def memory_info(self) -> global___MemoryInfo: ...
def __init__(
self,
*,
hostname: builtins.str | None = ...,
serial_identifier: builtins.str | None = ...,
platform_info: global___PlatformInfo | None = ...,
cpu_info: global___CPUInfo | None = ...,
device_info: collections.abc.Iterable[google.protobuf.any_pb2.Any] | None = ...,
available_device_info: collections.abc.Iterable[global___AvailableDeviceInfo] | None = ...,
memory_info: global___MemoryInfo | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["cpu_info", b"cpu_info", "memory_info", b"memory_info", "platform_info", b"platform_info"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["available_device_info", b"available_device_info", "cpu_info", b"cpu_info", "device_info", b"device_info", "hostname", b"hostname", "memory_info", b"memory_info", "platform_info", b"platform_info", "serial_identifier", b"serial_identifier"]) -> None: ...
global___MachineConfiguration = MachineConfiguration
@typing_extensions.final
class RunConfiguration(google.protobuf.message.Message):
"""Run-specific items such as arguments to the test / benchmark."""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
@typing_extensions.final
class EnvVarsEntry(google.protobuf.message.Message):
DESCRIPTOR: google.protobuf.descriptor.Descriptor
KEY_FIELD_NUMBER: builtins.int
VALUE_FIELD_NUMBER: builtins.int
key: builtins.str
value: builtins.str
def __init__(
self,
*,
key: builtins.str | None = ...,
value: builtins.str | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["key", b"key", "value", b"value"]) -> None: ...
ARGUMENT_FIELD_NUMBER: builtins.int
ENV_VARS_FIELD_NUMBER: builtins.int
@property
def argument(self) -> google.protobuf.internal.containers.RepeatedScalarFieldContainer[builtins.str]: ...
@property
def env_vars(self) -> google.protobuf.internal.containers.ScalarMap[builtins.str, builtins.str]:
"""Environment variables used to run the test/benchmark."""
def __init__(
self,
*,
argument: collections.abc.Iterable[builtins.str] | None = ...,
env_vars: collections.abc.Mapping[builtins.str, builtins.str] | None = ...,
) -> None: ...
def ClearField(self, field_name: typing_extensions.Literal["argument", b"argument", "env_vars", b"env_vars"]) -> None: ...
global___RunConfiguration = RunConfiguration
@typing_extensions.final
class TestResults(google.protobuf.message.Message):
"""The output of one benchmark / test run. Each run contains a list of
tests or benchmarks, stored as BenchmarkEntry messages.
This message should be emitted by the reporter (which runs the
test / BM in a subprocess and then reads the emitted BenchmarkEntry messages;
usually from a serialized json file, finally collecting them along
with additional information about the test run.
"""
DESCRIPTOR: google.protobuf.descriptor.Descriptor
class _BenchmarkType:
ValueType = typing.NewType("ValueType", builtins.int)
V: typing_extensions.TypeAlias = ValueType
class _BenchmarkTypeEnumTypeWrapper(google.protobuf.internal.enum_type_wrapper._EnumTypeWrapper[TestResults._BenchmarkType.ValueType], builtins.type):
DESCRIPTOR: google.protobuf.descriptor.EnumDescriptor
UNKNOWN: TestResults._BenchmarkType.ValueType # 0
"""Fallback for protos written before Type was introduced."""
CPP_MICROBENCHMARK: TestResults._BenchmarkType.ValueType # 1
PYTHON_BENCHMARK: TestResults._BenchmarkType.ValueType # 2
ANDROID_BENCHMARK: TestResults._BenchmarkType.ValueType # 3
EDGE_BENCHMARK: TestResults._BenchmarkType.ValueType # 4
IOS_BENCHMARK: TestResults._BenchmarkType.ValueType # 5
class BenchmarkType(_BenchmarkType, metaclass=_BenchmarkTypeEnumTypeWrapper):
"""The type of benchmark."""
UNKNOWN: TestResults.BenchmarkType.ValueType # 0
"""Fallback for protos written before Type was introduced."""
CPP_MICROBENCHMARK: TestResults.BenchmarkType.ValueType # 1
PYTHON_BENCHMARK: TestResults.BenchmarkType.ValueType # 2
ANDROID_BENCHMARK: TestResults.BenchmarkType.ValueType # 3
EDGE_BENCHMARK: TestResults.BenchmarkType.ValueType # 4
IOS_BENCHMARK: TestResults.BenchmarkType.ValueType # 5
TARGET_FIELD_NUMBER: builtins.int
ENTRIES_FIELD_NUMBER: builtins.int
BUILD_CONFIGURATION_FIELD_NUMBER: builtins.int
COMMIT_ID_FIELD_NUMBER: builtins.int
START_TIME_FIELD_NUMBER: builtins.int
RUN_TIME_FIELD_NUMBER: builtins.int
MACHINE_CONFIGURATION_FIELD_NUMBER: builtins.int
RUN_CONFIGURATION_FIELD_NUMBER: builtins.int
NAME_FIELD_NUMBER: builtins.int
BENCHMARK_TYPE_FIELD_NUMBER: builtins.int
RUN_MODE_FIELD_NUMBER: builtins.int
TF_VERSION_FIELD_NUMBER: builtins.int
target: builtins.str
"""The target of the run, e.g.:
//tensorflow/core:kernels_adjust_contrast_op_benchmark_test
"""
@property
def entries(self) -> global___BenchmarkEntries:
"""The list of tests or benchmarks in this run."""
@property
def build_configuration(self) -> global___BuildConfiguration:
"""The configuration of the build (compiled opt? with cuda? any copts?)"""
@property
def commit_id(self) -> global___CommitId:
"""The commit id (git hash or changelist)"""
start_time: builtins.int
"""The time the run started (in seconds of UTC time since Unix epoch)"""
run_time: builtins.float
"""The amount of time the total run took (wall time in seconds)"""
@property
def machine_configuration(self) -> global___MachineConfiguration:
"""Machine-specific parameters (Platform and CPU info)"""
@property
def run_configuration(self) -> global___RunConfiguration:
"""Run-specific parameters (arguments, etc)"""
name: builtins.str
"""Benchmark target identifier."""
benchmark_type: global___TestResults.BenchmarkType.ValueType
run_mode: builtins.str
"""Used for differentiating between continuous and debug builds.
Must be one of:
* cbuild: results from continuous build.
* presubmit: results from oneshot requests.
* culprit: results from culprit finder rerun.
"""
tf_version: builtins.str
"""TensorFlow version this benchmark runs against.
This can be either set to full version or just the major version.
"""
def __init__(
self,
*,
target: builtins.str | None = ...,
entries: global___BenchmarkEntries | None = ...,
build_configuration: global___BuildConfiguration | None = ...,
commit_id: global___CommitId | None = ...,
start_time: builtins.int | None = ...,
run_time: builtins.float | None = ...,
machine_configuration: global___MachineConfiguration | None = ...,
run_configuration: global___RunConfiguration | None = ...,
name: builtins.str | None = ...,
benchmark_type: global___TestResults.BenchmarkType.ValueType | None = ...,
run_mode: builtins.str | None = ...,
tf_version: builtins.str | None = ...,
) -> None: ...
def HasField(self, field_name: typing_extensions.Literal["build_configuration", b"build_configuration", "commit_id", b"commit_id", "entries", b"entries", "machine_configuration", b"machine_configuration", "run_configuration", b"run_configuration"]) -> builtins.bool: ...
def ClearField(self, field_name: typing_extensions.Literal["benchmark_type", b"benchmark_type", "build_configuration", b"build_configuration", "commit_id", b"commit_id", "entries", b"entries", "machine_configuration", b"machine_configuration", "name", b"name", "run_configuration", b"run_configuration", "run_mode", b"run_mode", "run_time", b"run_time", "start_time", b"start_time", "target", b"target", "tf_version", b"tf_version"]) -> None: ...
global___TestResults = TestResults