[Pygments] Complete stubs for various modules (#15610)

This commit is contained in:
Brian Schubert
2026-04-06 21:47:22 -04:00
committed by GitHub
parent 9d757d079b
commit d3235e3d7b
13 changed files with 169 additions and 112 deletions
@@ -13,9 +13,6 @@ pygments.__main__
pygments.lexer.Lexer.name
pygments.lexer.Lexer.url
pygments.lexer.Lexer.version_added
# Class attributes that are set to None in the base class, but are
# always overridden with a non-None value in subclasses.
pygments.formatter.Formatter.name
# Individual lexers and styles submodules are not stubbed at this time.
+8 -7
View File
@@ -1,6 +1,6 @@
from _typeshed import SupportsWrite
from collections.abc import Iterator
from typing import TypeVar, overload
from collections.abc import Iterable, Iterator
from typing import Final, TypeVar, overload
from pygments.formatter import Formatter
from pygments.lexer import Lexer
@@ -8,15 +8,16 @@ from pygments.token import _TokenType
_T = TypeVar("_T", str, bytes)
__version__: str
__version__: Final[str]
__docformat__: Final = "restructuredtext"
__all__ = ["lex", "format", "highlight"]
def lex(code: str, lexer: Lexer) -> Iterator[tuple[_TokenType, str]]: ...
@overload
def format(tokens, formatter: Formatter[_T], outfile: SupportsWrite[_T]) -> None: ...
def format(tokens: Iterable[tuple[_TokenType, str]], formatter: Formatter[_T], outfile: SupportsWrite[_T]) -> None: ...
@overload
def format(tokens, formatter: Formatter[_T], outfile: None = None) -> _T: ...
def format(tokens: Iterable[tuple[_TokenType, str]], formatter: Formatter[_T], outfile: None = None) -> _T: ...
@overload
def highlight(code, lexer, formatter: Formatter[_T], outfile: SupportsWrite[_T]) -> None: ...
def highlight(code: str, lexer: Lexer, formatter: Formatter[_T], outfile: SupportsWrite[_T]) -> None: ...
@overload
def highlight(code, lexer, formatter: Formatter[_T], outfile: None = None) -> _T: ...
def highlight(code: str, lexer: Lexer, formatter: Formatter[_T], outfile: None = None) -> _T: ...
+4 -3
View File
@@ -1,8 +1,9 @@
import argparse
from collections.abc import Sequence
def main_inner(parser, argns): ...
def main_inner(parser: argparse.ArgumentParser, argns: argparse.Namespace) -> int: ...
class HelpFormatter(argparse.HelpFormatter):
def __init__(self, prog, indent_increment: int = 2, max_help_position: int = 16, width=None) -> None: ...
def __init__(self, prog: str, indent_increment: int = 2, max_help_position: int = 16, width: int | None = None) -> None: ...
def main(args=...): ...
def main(args: Sequence[str] | None = ...) -> int: ...
+8 -8
View File
@@ -1,10 +1,10 @@
from _typeshed import Incomplete
from typing import Final
esc: str
codes: Incomplete
dark_colors: Incomplete
light_colors: Incomplete
esc: Final = "\x1b["
codes: Final[dict[str, str]]
dark_colors: Final[list[str]]
light_colors: Final[list[str]]
def reset_color(): ...
def colorize(color_key, text): ...
def ansiformat(attr, text): ...
def reset_color() -> str: ...
def colorize(color_key: str, text: str) -> str: ...
def ansiformat(attr: str, text: str) -> str: ...
+1 -1
View File
@@ -1,3 +1,3 @@
__all__ = ["get_filetype_from_buffer"]
def get_filetype_from_buffer(buf, max_lines: int = 5): ...
def get_filetype_from_buffer(buf: str, max_lines: int = 5) -> str | None: ...
+5 -4
View File
@@ -1,16 +1,17 @@
import sys
from _typeshed import Incomplete
from collections.abc import Generator
from typing import Final
from pygments.filter import Filter
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.style import Style
LEXER_ENTRY_POINT: str
FORMATTER_ENTRY_POINT: str
STYLE_ENTRY_POINT: str
FILTER_ENTRY_POINT: str
LEXER_ENTRY_POINT: Final = "pygments.lexers"
FORMATTER_ENTRY_POINT: Final = "pygments.formatters"
STYLE_ENTRY_POINT: Final = "pygments.styles"
FILTER_ENTRY_POINT: Final = "pygments.filters"
if sys.version_info >= (3, 10):
from importlib.metadata import EntryPoints
+9 -7
View File
@@ -1,10 +1,12 @@
from _typeshed import Incomplete
from collections.abc import Iterable
import re
from collections.abc import Iterable, Sequence
from operator import itemgetter
from typing import Final
CS_ESCAPE: Incomplete
FIRST_ELEMENT: Incomplete
CS_ESCAPE: Final[re.Pattern[str]]
FIRST_ELEMENT: Final[itemgetter[int]]
def commonprefix(m: Iterable[str]) -> str: ...
def make_charset(letters): ...
def regex_opt_inner(strings, open_paren): ...
def regex_opt(strings, prefix: str = "", suffix: str = ""): ...
def make_charset(letters: Iterable[str]) -> str: ...
def regex_opt_inner(strings: Sequence[str], open_paren: str) -> str: ...
def regex_opt(strings: Iterable[str], prefix: str = "", suffix: str = "") -> str: ...
+11 -11
View File
@@ -1,19 +1,19 @@
from _typeshed import Incomplete
from re import Match, Pattern, RegexFlag
class EndOfText(RuntimeError): ...
class Scanner:
data: Incomplete
data_length: Incomplete
data: str
data_length: int
start_pos: int
pos: int
flags: Incomplete
last: Incomplete
match: Incomplete
def __init__(self, text, flags: int = 0) -> None: ...
flags: int | RegexFlag
last: str | None
match: str | None
def __init__(self, text: str, flags: int | RegexFlag = 0) -> None: ...
@property
def eos(self): ...
def check(self, pattern): ...
def test(self, pattern): ...
def scan(self, pattern): ...
def eos(self) -> bool: ...
def check(self, pattern: str | Pattern[str]) -> Match[str] | None: ...
def test(self, pattern: str | Pattern[str]) -> bool: ...
def scan(self, pattern: str | Pattern[str]) -> bool: ...
def get_char(self) -> None: ...
+7 -5
View File
@@ -1,9 +1,11 @@
from typing import Any, Final
from docutils.parsers.rst import Directive
MODULEDOC: str
LEXERDOC: str
FMTERDOC: str
FILTERDOC: str
MODULEDOC: Final[str]
LEXERDOC: Final[str]
FMTERDOC: Final[str]
FILTERDOC: Final[str]
class PygmentsDoc(Directive):
filenames: set[str]
@@ -12,4 +14,4 @@ class PygmentsDoc(Directive):
def document_formatters(self) -> str: ...
def document_filters(self) -> str: ...
def setup(app) -> None: ...
def setup(app: Any) -> None: ... # Actual type of 'app' is sphinx.application.Sphinx
+3 -2
View File
@@ -1,5 +1,6 @@
from _typeshed import Self
from collections.abc import Iterator, Mapping, Sequence, Set as AbstractSet
from typing import ClassVar, TypedDict, type_check_only
from typing import Any, ClassVar, TypedDict, type_check_only
from pygments.token import _TokenType
@@ -20,7 +21,7 @@ class _StyleDict(TypedDict):
bgansicolor: str | None
class StyleMeta(type):
def __new__(cls, name, bases, dct): ...
def __new__(cls: type[Self], name: str, bases: tuple[type[Any], ...], dct: dict[str, Any]) -> Self: ...
def style_for_token(cls, token: _TokenType) -> _StyleDict: ...
def styles_token(cls, ttype: _TokenType) -> bool: ...
def list_styles(cls) -> list[tuple[_TokenType, _StyleDict]]: ...
+4 -4
View File
@@ -1,5 +1,5 @@
from collections.abc import Mapping
from typing import Any
from typing import Any, Final
from typing_extensions import Self
class _TokenType(tuple[str, ...]):
@@ -27,8 +27,8 @@ Operator: _TokenType
Comment: _TokenType
Generic: _TokenType
def is_token_subtype(ttype, other): ...
def string_to_tokentype(s): ...
def is_token_subtype(ttype: _TokenType, other: _TokenType) -> bool: ...
def string_to_tokentype(s: str | _TokenType) -> _TokenType: ...
# dict, but shouldn't be mutated
STANDARD_TYPES: Mapping[_TokenType, str]
STANDARD_TYPES: Final[Mapping[_TokenType, str]]
+69 -36
View File
@@ -1,38 +1,71 @@
from _typeshed import Incomplete
from typing import Final, Literal, TypeAlias
Cc: str
Cf: str
Cn: str
Co: str
Cs: str
Ll: str
Lm: str
Lo: str
Lt: str
Lu: str
Mc: str
Me: str
Mn: str
Nd: str
Nl: str
No: str
Pc: str
Pd: str
Pe: str
Pf: str
Pi: str
Po: str
Ps: str
Sc: str
Sk: str
Sm: str
So: str
Zl: str
Zp: str
Zs: str
xid_continue: str
xid_start: str
cats: Incomplete
_Cats: TypeAlias = Literal[
"Cc",
"Cf",
"Cn",
"Co",
"Cs",
"Ll",
"Lm",
"Lo",
"Lt",
"Lu",
"Mc",
"Me",
"Mn",
"Nd",
"Nl",
"No",
"Pc",
"Pd",
"Pe",
"Pf",
"Pi",
"Po",
"Ps",
"Sc",
"Sk",
"Sm",
"So",
"Zl",
"Zp",
"Zs",
]
def combine(*args): ...
def allexcept(*args): ...
Cc: Final[str]
Cf: Final[str]
Cn: Final[str]
Co: Final[str]
Cs: Final[str]
Ll: Final[str]
Lm: Final[str]
Lo: Final[str]
Lt: Final[str]
Lu: Final[str]
Mc: Final[str]
Me: Final[str]
Mn: Final[str]
Nd: Final[str]
Nl: Final[str]
No: Final[str]
Pc: Final[str]
Pd: Final[str]
Pe: Final[str]
Pf: Final[str]
Pi: Final[str]
Po: Final[str]
Ps: Final[str]
Sc: Final[str]
Sk: Final[str]
Sm: Final[str]
So: Final[str]
Zl: Final[str]
Zp: Final[str]
Zs: Final[str]
xid_continue: Final[str]
xid_start: Final[str]
cats: Final[list[_Cats]]
def combine(*args: _Cats) -> str: ...
def allexcept(*args: _Cats) -> str: ...
+40 -21
View File
@@ -1,34 +1,53 @@
from _typeshed import Incomplete
from collections.abc import Callable, Container, Hashable, Iterable
from io import TextIOWrapper
from re import Pattern
from typing import Any, Final, Protocol, TypeVar, type_check_only
split_path_re: Incomplete
doctype_lookup_re: Incomplete
tag_re: Incomplete
xml_decl_re: Incomplete
_T = TypeVar("_T")
_H = TypeVar("_H", bound=Hashable)
split_path_re: Final[Pattern[str]]
doctype_lookup_re: Final[Pattern[str]]
tag_re: Final[Pattern[str]]
xml_decl_re: Final[Pattern[str]]
class ClassNotFound(ValueError): ...
class OptionError(Exception): ...
def get_choice_opt(options, optname, allowed, default=None, normcase: bool = False): ...
def get_bool_opt(options, optname, default=None): ...
def get_int_opt(options, optname, default=None): ...
def get_list_opt(options, optname, default=None): ...
def docstring_headline(obj): ...
def make_analysator(f): ...
def shebang_matches(text, regex): ...
def doctype_matches(text, regex): ...
def html_doctype_matches(text): ...
def looks_like_xml(text): ...
def surrogatepair(c): ...
def format_lines(var_name, seq, raw: bool = False, indent_level: int = 0): ...
def duplicates_removed(it, already_seen=()): ...
@type_check_only
class _SupportsGetStrWithDefault(Protocol):
def get(self, item: str, default: Any, /) -> Any: ...
# 'options' contains the **kwargs of an arbitrary function.
def get_choice_opt(
options: _SupportsGetStrWithDefault, optname: str, allowed: Container[_T], default: _T | None = None, normcase: bool = False
) -> _T: ...
def get_bool_opt(options: _SupportsGetStrWithDefault, optname: str, default: bool | None = None) -> bool: ...
def get_int_opt(options: _SupportsGetStrWithDefault, optname: str, default: int | None = None) -> int: ...
# Return type and type of 'default' depend on the signature of the function whose **kwargs
# are being processed.
def get_list_opt(
options: _SupportsGetStrWithDefault, optname: str, default: list[Any] | tuple[Any, ...] | None = None
) -> list[Any]: ...
def docstring_headline(obj: object) -> str: ...
def make_analysator(f: Callable[[str], float]) -> Callable[[str], float]: ...
def shebang_matches(text: str, regex: str) -> bool: ...
def doctype_matches(text: str, regex: str) -> bool: ...
def html_doctype_matches(text: str) -> bool: ...
def looks_like_xml(text: str) -> bool: ...
def surrogatepair(c: int) -> int: ...
def format_lines(var_name: str, seq: Iterable[str], raw: bool = False, indent_level: int = 0) -> str: ...
def duplicates_removed(it: Iterable[_H], already_seen: Container[_H] = ()) -> list[_H]: ...
class Future:
def get(self) -> None: ...
def guess_decode(text): ...
def guess_decode_from_terminal(text, term): ...
def terminal_encoding(term): ...
def guess_decode(text: bytes) -> tuple[str, str]: ...
# If 'term' has an 'encoding' attribute, it should be a str. Otherwise any object is accepted.
def guess_decode_from_terminal(text: bytes, term: Any) -> tuple[str, str]: ...
def terminal_encoding(term: Any) -> str: ...
class UnclosingTextIOWrapper(TextIOWrapper):
def close(self) -> None: ...