mirror of
https://github.com/davidhalter/typeshed.git
synced 2025-12-08 04:54:47 +08:00
add the remaining encodings submodules (#13123)
This commit is contained in:
@@ -58,19 +58,13 @@ weakref.WeakValueDictionary.update
|
||||
# ==========
|
||||
# TODO: Modules that exist at runtime, but are missing from stubs
|
||||
# ==========
|
||||
encodings.aliases
|
||||
encodings.ascii
|
||||
encodings.base64_codec
|
||||
encodings.bz2_codec
|
||||
encodings.charmap
|
||||
encodings.hex_codec
|
||||
encodings.idna
|
||||
encodings.latin_1
|
||||
encodings.punycode
|
||||
encodings.quopri_codec
|
||||
encodings.raw_unicode_escape
|
||||
encodings.rot_13
|
||||
encodings.undefined
|
||||
encodings.unicode_escape
|
||||
encodings.utf_16
|
||||
encodings.utf_16_be
|
||||
|
||||
1
stdlib/encodings/aliases.pyi
Normal file
1
stdlib/encodings/aliases.pyi
Normal file
@@ -0,0 +1 @@
|
||||
aliases: dict[str, str]
|
||||
33
stdlib/encodings/charmap.pyi
Normal file
33
stdlib/encodings/charmap.pyi
Normal file
@@ -0,0 +1,33 @@
|
||||
import codecs
|
||||
from _codecs import _CharMap
|
||||
from _typeshed import ReadableBuffer
|
||||
|
||||
class Codec(codecs.Codec):
|
||||
# At runtime, this is codecs.charmap_encode
|
||||
@staticmethod
|
||||
def encode(str: str, errors: str | None = None, mapping: _CharMap | None = None, /) -> tuple[bytes, int]: ...
|
||||
# At runtime, this is codecs.charmap_decode
|
||||
@staticmethod
|
||||
def decode(data: ReadableBuffer, errors: str | None = None, mapping: _CharMap | None = None, /) -> tuple[str, int]: ...
|
||||
|
||||
class IncrementalEncoder(codecs.IncrementalEncoder):
|
||||
mapping: _CharMap | None
|
||||
def __init__(self, errors: str = "strict", mapping: _CharMap | None = None) -> None: ...
|
||||
def encode(self, input: str, final: bool = False) -> bytes: ...
|
||||
|
||||
class IncrementalDecoder(codecs.IncrementalDecoder):
|
||||
mapping: _CharMap | None
|
||||
def __init__(self, errors: str = "strict", mapping: _CharMap | None = None) -> None: ...
|
||||
def decode(self, input: ReadableBuffer, final: bool = False) -> str: ...
|
||||
|
||||
class StreamWriter(Codec, codecs.StreamWriter):
|
||||
mapping: _CharMap | None
|
||||
def __init__(self, stream: codecs._WritableStream, errors: str = "strict", mapping: _CharMap | None = None) -> None: ...
|
||||
def encode(self, input: str, errors: str = "strict") -> tuple[bytes, int]: ... # type: ignore[override]
|
||||
|
||||
class StreamReader(Codec, codecs.StreamReader):
|
||||
mapping: _CharMap | None
|
||||
def __init__(self, stream: codecs._ReadableStream, errors: str = "strict", mapping: _CharMap | None = None) -> None: ...
|
||||
def decode(self, input: ReadableBuffer, errors: str = "strict") -> tuple[str, int]: ... # type: ignore[override]
|
||||
|
||||
def getregentry() -> codecs.CodecInfo: ...
|
||||
26
stdlib/encodings/idna.pyi
Normal file
26
stdlib/encodings/idna.pyi
Normal file
@@ -0,0 +1,26 @@
|
||||
import codecs
|
||||
import re
|
||||
from _typeshed import ReadableBuffer
|
||||
|
||||
dots: re.Pattern[str]
|
||||
ace_prefix: bytes
|
||||
sace_prefix: str
|
||||
|
||||
def nameprep(label: str) -> str: ...
|
||||
def ToASCII(label: str) -> bytes: ...
|
||||
def ToUnicode(label: bytes | str) -> str: ...
|
||||
|
||||
class Codec(codecs.Codec):
|
||||
def encode(self, input: str, errors: str = "strict") -> tuple[bytes, int]: ...
|
||||
def decode(self, input: ReadableBuffer | str, errors: str = "strict") -> tuple[str, int]: ...
|
||||
|
||||
class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
|
||||
def _buffer_encode(self, input: str, errors: str, final: bool) -> tuple[bytes, int]: ...
|
||||
|
||||
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
|
||||
def _buffer_decode(self, input: ReadableBuffer | str, errors: str, final: bool) -> tuple[str, int]: ...
|
||||
|
||||
class StreamWriter(Codec, codecs.StreamWriter): ...
|
||||
class StreamReader(Codec, codecs.StreamReader): ...
|
||||
|
||||
def getregentry() -> codecs.CodecInfo: ...
|
||||
33
stdlib/encodings/punycode.pyi
Normal file
33
stdlib/encodings/punycode.pyi
Normal file
@@ -0,0 +1,33 @@
|
||||
import codecs
|
||||
from typing import Literal
|
||||
|
||||
def segregate(str: str) -> tuple[bytes, list[int]]: ...
|
||||
def selective_len(str: str, max: int) -> int: ...
|
||||
def selective_find(str: str, char: str, index: int, pos: int) -> tuple[int, int]: ...
|
||||
def insertion_unsort(str: str, extended: list[int]) -> list[int]: ...
|
||||
def T(j: int, bias: int) -> int: ...
|
||||
|
||||
digits: Literal[b"abcdefghijklmnopqrstuvwxyz0123456789"]
|
||||
|
||||
def generate_generalized_integer(N: int, bias: int) -> bytes: ...
|
||||
def adapt(delta: int, first: bool, numchars: int) -> int: ...
|
||||
def generate_integers(baselen: int, deltas: list[int]) -> bytes: ...
|
||||
def punycode_encode(text: str) -> bytes: ...
|
||||
def decode_generalized_number(extended: bytes, extpos: int, bias: int, errors: str) -> tuple[int, int | None]: ...
|
||||
def insertion_sort(base: str, extended: bytes, errors: str) -> str: ...
|
||||
def punycode_decode(text: memoryview | bytes | bytearray | str, errors: str) -> str: ...
|
||||
|
||||
class Codec(codecs.Codec):
|
||||
def encode(self, input: str, errors: str = "strict") -> tuple[bytes, int]: ...
|
||||
def decode(self, input: memoryview | bytes | bytearray | str, errors: str = "strict") -> tuple[str, int]: ...
|
||||
|
||||
class IncrementalEncoder(codecs.IncrementalEncoder):
|
||||
def encode(self, input: str, final: bool = False) -> bytes: ...
|
||||
|
||||
class IncrementalDecoder(codecs.IncrementalDecoder):
|
||||
def decode(self, input: memoryview | bytes | bytearray | str, final: bool = False) -> str: ... # type: ignore[override]
|
||||
|
||||
class StreamWriter(Codec, codecs.StreamWriter): ...
|
||||
class StreamReader(Codec, codecs.StreamReader): ...
|
||||
|
||||
def getregentry() -> codecs.CodecInfo: ...
|
||||
23
stdlib/encodings/rot_13.pyi
Normal file
23
stdlib/encodings/rot_13.pyi
Normal file
@@ -0,0 +1,23 @@
|
||||
import codecs
|
||||
from _typeshed import SupportsRead, SupportsWrite
|
||||
|
||||
# This codec is string to string.
|
||||
|
||||
class Codec(codecs.Codec):
|
||||
def encode(self, input: str, errors: str = "strict") -> tuple[str, int]: ... # type: ignore[override]
|
||||
def decode(self, input: str, errors: str = "strict") -> tuple[str, int]: ... # type: ignore[override]
|
||||
|
||||
class IncrementalEncoder(codecs.IncrementalEncoder):
|
||||
def encode(self, input: str, final: bool = False) -> str: ... # type: ignore[override]
|
||||
|
||||
class IncrementalDecoder(codecs.IncrementalDecoder):
|
||||
def decode(self, input: str, final: bool = False) -> str: ... # type: ignore[override]
|
||||
|
||||
class StreamWriter(Codec, codecs.StreamWriter): ...
|
||||
class StreamReader(Codec, codecs.StreamReader): ...
|
||||
|
||||
def getregentry() -> codecs.CodecInfo: ...
|
||||
|
||||
rot13_map: dict[int, int]
|
||||
|
||||
def rot13(infile: SupportsRead[str], outfile: SupportsWrite[str]) -> None: ...
|
||||
20
stdlib/encodings/undefined.pyi
Normal file
20
stdlib/encodings/undefined.pyi
Normal file
@@ -0,0 +1,20 @@
|
||||
import codecs
|
||||
from _typeshed import ReadableBuffer
|
||||
|
||||
# These return types are just to match the base types. In reality, these always
|
||||
# raise an error.
|
||||
|
||||
class Codec(codecs.Codec):
|
||||
def encode(self, input: str, errors: str = "strict") -> tuple[bytes, int]: ...
|
||||
def decode(self, input: ReadableBuffer, errors: str = "strict") -> tuple[str, int]: ...
|
||||
|
||||
class IncrementalEncoder(codecs.IncrementalEncoder):
|
||||
def encode(self, input: str, final: bool = False) -> bytes: ...
|
||||
|
||||
class IncrementalDecoder(codecs.IncrementalDecoder):
|
||||
def decode(self, input: ReadableBuffer, final: bool = False) -> str: ...
|
||||
|
||||
class StreamWriter(Codec, codecs.StreamWriter): ...
|
||||
class StreamReader(Codec, codecs.StreamReader): ...
|
||||
|
||||
def getregentry() -> codecs.CodecInfo: ...
|
||||
Reference in New Issue
Block a user