From d4ba892baa42cdee8189742c3f27bf63d6094370 Mon Sep 17 00:00:00 2001
From: tristanlatr
Date: Tue, 10 Dec 2024 17:32:02 -0500
Subject: [PATCH 1/5] Add simple black config
---
.black.toml | 12 ++++++++++++
tox.ini | 13 +++++++++++++
2 files changed, 25 insertions(+)
create mode 100644 .black.toml
diff --git a/.black.toml b/.black.toml
new file mode 100644
index 000000000..f946dd780
--- /dev/null
+++ b/.black.toml
@@ -0,0 +1,12 @@
+[tool.black]
+line-length = 120
+skip-string-normalization = 1
+required-version = 24
+target-version = ['py39']
+
+# 'extend-exclude' excludes files or directories in addition to the defaults
+extend-exclude = '''
+(
+ .+/sre_.+.py | .+/testpackages/.+
+)
+'''
\ No newline at end of file
diff --git a/tox.ini b/tox.ini
index ec3c4accf..1add50a25 100644
--- a/tox.ini
+++ b/tox.ini
@@ -73,6 +73,19 @@ commands =
sh -c "find pydoctor/ -name \*.py ! -path '*/testpackages/*' ! -path '*/sre_parse36.py' ! -path '*/sre_constants36.py' | xargs pyflakes"
sh -c "find docs/ -name \*.py ! -path '*demo/*' | xargs pyflakes"
+[testenv:black]
+description = Run black over the pydoctor code
+deps =
+ black==24.8.0
+commands =
+ black --check --diff --color --config=.black.toml ./pydoctor
+
+[testenv:black-reformat]
+description = Run black over the pydoctor code
+deps =
+ black==24.8.0
+commands =
+ black --color --config=.black.toml ./pydoctor
[testenv:cpython-apidocs]
description = Build CPython 3.11 API documentation
From 38068054e22e32c47afb018624ab1572232af14d Mon Sep 17 00:00:00 2001
From: tristanlatr
Date: Tue, 10 Dec 2024 17:34:11 -0500
Subject: [PATCH 2/5] Rename tox envs to "black" and "reformat"
---
tox.ini | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/tox.ini b/tox.ini
index 1add50a25..77e218448 100644
--- a/tox.ini
+++ b/tox.ini
@@ -74,14 +74,14 @@ commands =
sh -c "find docs/ -name \*.py ! -path '*demo/*' | xargs pyflakes"
[testenv:black]
-description = Run black over the pydoctor code
+description = Check the format of the code with black
deps =
black==24.8.0
commands =
black --check --diff --color --config=.black.toml ./pydoctor
-[testenv:black-reformat]
-description = Run black over the pydoctor code
+[testenv:reformat]
+description = Reformat the code with black
deps =
black==24.8.0
commands =
From a10c695097ea21683a1c9d0e1f3da27c284a87fb Mon Sep 17 00:00:00 2001
From: tristanlatr
Date: Thu, 12 Dec 2024 10:08:35 -0500
Subject: [PATCH 3/5] Use the default value for line-length
---
.black.toml | 2 +-
pydoctor/__init__.py | 1 +
pydoctor/_configparser.py | 178 ++-
pydoctor/astbuilder.py | 591 ++++---
pydoctor/astutils.py | 419 ++---
pydoctor/driver.py | 69 +-
pydoctor/epydoc/__init__.py | 1 -
pydoctor/epydoc/doctest.py | 76 +-
pydoctor/epydoc/docutils.py | 66 +-
pydoctor/epydoc/markup/__init__.py | 108 +-
pydoctor/epydoc/markup/_napoleon.py | 25 +-
pydoctor/epydoc/markup/_pyval_repr.py | 483 +++---
pydoctor/epydoc/markup/_types.py | 98 +-
pydoctor/epydoc/markup/epytext.py | 600 ++++---
pydoctor/epydoc/markup/google.py | 1 +
pydoctor/epydoc/markup/numpy.py | 1 +
pydoctor/epydoc/markup/plaintext.py | 35 +-
pydoctor/epydoc/markup/restructuredtext.py | 199 +--
pydoctor/epydoc2stan.py | 392 +++--
pydoctor/extensions/__init__.py | 107 +-
pydoctor/extensions/attrs.py | 78 +-
pydoctor/extensions/deprecate.py | 73 +-
pydoctor/extensions/zopeinterface.py | 136 +-
pydoctor/factory.py | 24 +-
pydoctor/linker.py | 99 +-
pydoctor/model.py | 585 ++++---
pydoctor/mro.py | 4 +-
pydoctor/napoleon/docstring.py | 225 +--
pydoctor/napoleon/iterators.py | 21 +-
pydoctor/node2stan.py | 70 +-
pydoctor/options.py | 554 ++++---
pydoctor/qnmatch.py | 21 +-
pydoctor/sphinx.py | 93 +-
pydoctor/sphinx_ext/build_apidocs.py | 10 +-
pydoctor/stanutils.py | 16 +-
pydoctor/templatewriter/__init__.py | 160 +-
pydoctor/templatewriter/pages/__init__.py | 273 ++--
.../templatewriter/pages/attributechild.py | 11 +-
.../templatewriter/pages/functionchild.py | 10 +-
pydoctor/templatewriter/pages/sidebar.py | 302 ++--
pydoctor/templatewriter/pages/table.py | 42 +-
pydoctor/templatewriter/search.py | 105 +-
pydoctor/templatewriter/summary.py | 132 +-
pydoctor/templatewriter/util.py | 110 +-
pydoctor/templatewriter/writer.py | 46 +-
pydoctor/test/__init__.py | 3 +-
pydoctor/test/epydoc/__init__.py | 7 +-
pydoctor/test/epydoc/test_epytext.py | 66 +-
pydoctor/test/epydoc/test_epytext2html.py | 38 +-
pydoctor/test/epydoc/test_epytext2node.py | 9 +-
pydoctor/test/epydoc/test_google_numpy.py | 43 +-
.../test/epydoc/test_parsed_docstrings.py | 22 +-
pydoctor/test/epydoc/test_pyval_repr.py | 1191 +++++++++++---
pydoctor/test/epydoc/test_restructuredtext.py | 171 +-
pydoctor/test/test_astbuilder.py | 1183 +++++++++-----
pydoctor/test/test_astutils.py | 41 +-
pydoctor/test/test_attrs.py | 70 +-
pydoctor/test/test_colorize.py | 4 +
pydoctor/test/test_commandline.py | 128 +-
pydoctor/test/test_configparser.py | 467 +++---
.../test/test_cyclic_imports_base_classes.py | 3 +-
pydoctor/test/test_epydoc2stan.py | 962 +++++++----
pydoctor/test/test_model.py | 224 +--
pydoctor/test/test_mro.py | 237 ++-
pydoctor/test/test_napoleon_docstring.py | 1414 ++++++++++-------
pydoctor/test/test_napoleon_iterators.py | 4 +
pydoctor/test/test_node2stan.py | 111 +-
pydoctor/test/test_options.py | 75 +-
pydoctor/test/test_packages.py | 16 +-
pydoctor/test/test_pydantic_fields.py | 14 +-
pydoctor/test/test_qnmatch.py | 89 +-
pydoctor/test/test_sphinx.py | 186 +--
pydoctor/test/test_templatewriter.py | 390 +++--
.../test/test_twisted_python_deprecate.py | 135 +-
pydoctor/test/test_type_fields.py | 304 ++--
pydoctor/test/test_utils.py | 11 +-
pydoctor/test/test_visitor.py | 66 +-
pydoctor/test/test_zopeinterface.py | 96 +-
pydoctor/themes/__init__.py | 2 +
pydoctor/utils.py | 30 +-
pydoctor/visitor.py | 312 ++--
81 files changed, 8990 insertions(+), 5786 deletions(-)
diff --git a/.black.toml b/.black.toml
index f946dd780..757d0d0bc 100644
--- a/.black.toml
+++ b/.black.toml
@@ -1,5 +1,5 @@
[tool.black]
-line-length = 120
+line-length = 88
skip-string-normalization = 1
required-version = 24
target-version = ['py39']
diff --git a/pydoctor/__init__.py b/pydoctor/__init__.py
index c8f38d605..c5cdb4b3c 100644
--- a/pydoctor/__init__.py
+++ b/pydoctor/__init__.py
@@ -3,6 +3,7 @@
Warning: PyDoctor's API isn't stable YET, custom builds are prone to break!
"""
+
import importlib.metadata as importlib_metadata
__version__ = importlib_metadata.version('pydoctor')
diff --git a/pydoctor/_configparser.py b/pydoctor/_configparser.py
index 90cd73d15..75524f7c6 100644
--- a/pydoctor/_configparser.py
+++ b/pydoctor/_configparser.py
@@ -20,6 +20,7 @@
>>> parser = ArgumentParser(..., default_config_files=['./pyproject.toml', 'setup.cfg', 'my_super_tool.ini'], config_file_parser_class=MixedParser)
"""
+
from __future__ import annotations
import argparse
@@ -38,11 +39,13 @@
if sys.version_info >= (3, 11):
from tomllib import load as _toml_load
import io
- # The tomllib module from the standard library
- # expect a binary IO and will fail if receives otherwise.
+
+ # The tomllib module from the standard library
+ # expect a binary IO and will fail if receives otherwise.
# So we hack a compat function that will work with TextIO and assume the utf-8 encoding.
def toml_load(stream: TextIO) -> Any:
return _toml_load(io.BytesIO(stream.read().encode()))
+
else:
from toml import load as toml_load
@@ -50,34 +53,37 @@ def toml_load(stream: TextIO) -> Any:
# - https://stackoverflow.com/questions/11859442/how-to-match-string-in-quotes-using-regex
# - and https://stackoverflow.com/a/41005190
-_QUOTED_STR_REGEX = re.compile(r'(^\"(?:\\.|[^\"\\])*\"$)|'
- r'(^\'(?:\\.|[^\'\\])*\'$)')
+_QUOTED_STR_REGEX = re.compile(r'(^\"(?:\\.|[^\"\\])*\"$)|' r'(^\'(?:\\.|[^\'\\])*\'$)')
+
+_TRIPLE_QUOTED_STR_REGEX = re.compile(
+ r'(^\"\"\"(\s+)?(([^\"]|\"([^\"]|\"[^\"]))*(\"\"?)?)?(\s+)?(?:\\.|[^\"\\])\"\"\"$)|'
+ # Unescaped quotes at the end of a string generates
+ # "SyntaxError: EOL while scanning string literal",
+ # so we don't account for those kind of strings as quoted.
+ r'(^\'\'\'(\s+)?(([^\']|\'([^\']|\'[^\']))*(\'\'?)?)?(\s+)?(?:\\.|[^\'\\])\'\'\'$)',
+ flags=re.DOTALL,
+)
-_TRIPLE_QUOTED_STR_REGEX = re.compile(r'(^\"\"\"(\s+)?(([^\"]|\"([^\"]|\"[^\"]))*(\"\"?)?)?(\s+)?(?:\\.|[^\"\\])\"\"\"$)|'
- # Unescaped quotes at the end of a string generates
- # "SyntaxError: EOL while scanning string literal",
- # so we don't account for those kind of strings as quoted.
- r'(^\'\'\'(\s+)?(([^\']|\'([^\']|\'[^\']))*(\'\'?)?)?(\s+)?(?:\\.|[^\'\\])\'\'\'$)', flags=re.DOTALL)
@functools.lru_cache(maxsize=256, typed=True)
-def is_quoted(text:str, triple:bool=True) -> bool:
+def is_quoted(text: str, triple: bool = True) -> bool:
"""
- Detect whether a string is a quoted representation.
+ Detect whether a string is a quoted representation.
@param triple: Also match tripple quoted strings.
"""
- return bool(_QUOTED_STR_REGEX.match(text)) or \
- (triple and bool(_TRIPLE_QUOTED_STR_REGEX.match(text)))
+ return bool(_QUOTED_STR_REGEX.match(text)) or (triple and bool(_TRIPLE_QUOTED_STR_REGEX.match(text)))
+
-def unquote_str(text:str, triple:bool=True) -> str:
+def unquote_str(text: str, triple: bool = True) -> str:
"""
- Unquote a maybe quoted string representation.
+ Unquote a maybe quoted string representation.
If the string is not detected as being a quoted representation, it returns the same string as passed.
It supports all kinds of python quotes: C{\"\"\"}, C{'''}, C{"} and C{'}.
@param triple: Also unquote tripple quoted strings.
@raises ValueError: If the string is detected as beeing quoted but literal_eval() fails to evaluate it as string.
- This would be a bug in the regex.
+ This would be a bug in the regex.
"""
if is_quoted(text, triple=triple):
try:
@@ -88,7 +94,8 @@ def unquote_str(text:str, triple:bool=True) -> str:
return s
return text
-def parse_toml_section_name(section_name:str) -> Tuple[str, ...]:
+
+def parse_toml_section_name(section_name: str) -> Tuple[str, ...]:
"""
Parse a TOML section name to a sequence of strings.
@@ -105,7 +112,8 @@ def parse_toml_section_name(section_name:str) -> Tuple[str, ...]:
section.append(unquote_str(a.strip(), triple=False))
return tuple(section)
-def get_toml_section(data:Dict[str, Any], section:Union[Tuple[str, ...], str]) -> Optional[Dict[str, Any]]:
+
+def get_toml_section(data: Dict[str, Any], section: Union[Tuple[str, ...], str]) -> Optional[Dict[str, Any]]:
"""
Given some TOML data (as loaded with C{toml.load()}), returns the requested section of the data.
Returns C{None} if the section is not found.
@@ -122,6 +130,7 @@ def get_toml_section(data:Dict[str, Any], section:Union[Tuple[str, ...], str]) -
return None
return itemdata
+
class TomlConfigParser(ConfigFileParser):
"""
U{TOML } parser with support for sections.
@@ -132,7 +141,7 @@ class TomlConfigParser(ConfigFileParser):
# this is a comment
# this is TOML section table:
- [tool.my-software]
+ [tool.my-software]
# how to specify a key-value pair (strings must be quoted):
format-string = "restructuredtext"
# how to set an arg which has action="store_true":
@@ -144,9 +153,9 @@ class TomlConfigParser(ConfigFileParser):
"https://twistedmatrix.com/documents/current/api/objects.inv"]
# how to specify a multiline text:
multi-line-text = '''
- Lorem ipsum dolor sit amet, consectetur adipiscing elit.
- Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
- Maecenas quis dapibus leo, a pellentesque leo.
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+ Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
+ Maecenas quis dapibus leo, a pellentesque leo.
'''
# how to specify a empty text:
empty-text = ''
@@ -166,11 +175,11 @@ class TomlConfigParser(ConfigFileParser):
def __init__(self, sections: List[str]) -> None:
super().__init__()
self.sections = sections
-
+
def __call__(self) -> ConfigFileParser:
return self
- def parse(self, stream:TextIO) -> Dict[str, Any]:
+ def parse(self, stream: TextIO) -> Dict[str, Any]:
"""Parses the keys and values from a TOML config file."""
# parse with configparser to allow multi-line values
try:
@@ -184,7 +193,7 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
for section in self.sections:
data = get_toml_section(config, section)
if data:
- # Seems a little weird, but anything that is not a list is converted to string,
+ # Seems a little weird, but anything that is not a list is converted to string,
# It will be converted back to boolean, int or whatever after.
# Because config values are still passed to argparser for computation.
for key, value in data.items():
@@ -195,26 +204,29 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
else:
result[key] = str(value)
break
-
+
return result
def get_syntax_description(self) -> str:
- return ("Config file syntax is Tom's Obvious, Minimal Language. "
- "See https://github.com/toml-lang/toml/blob/v0.5.0/README.md for details.")
+ return (
+ "Config file syntax is Tom's Obvious, Minimal Language. "
+ "See https://github.com/toml-lang/toml/blob/v0.5.0/README.md for details."
+ )
+
class IniConfigParser(ConfigFileParser):
"""
INI parser with support for sections.
-
- This parser somewhat ressembles L{configargparse.ConfigparserConfigFileParser}.
- It uses L{configparser} and evaluate values written with python list syntax.
- With the following changes:
+ This parser somewhat ressembles L{configargparse.ConfigparserConfigFileParser}.
+ It uses L{configparser} and evaluate values written with python list syntax.
+
+ With the following changes:
- Must be created with argument to bind the parser to a list of sections.
- Does not convert multiline strings to single line.
- - Optional support for converting multiline strings to list (if ``split_ml_text_to_list=True``).
- - Optional support for quoting strings in config file
- (useful when text must not be converted to list or when text
+ - Optional support for converting multiline strings to list (if ``split_ml_text_to_list=True``).
+ - Optional support for quoting strings in config file
+ (useful when text must not be converted to list or when text
should contain trailing whitespaces).
- Comments may only appear on their own in an otherwise empty line (like in configparser).
@@ -226,7 +238,7 @@ class IniConfigParser(ConfigFileParser):
; also a comment
[my_super_tool]
# how to specify a key-value pair:
- format-string: restructuredtext
+ format-string: restructuredtext
# white space are ignored, so name = value same as name=value
# this is why you can quote strings (double quotes works just as well)
quoted-string = '\thello\tmom... '
@@ -238,39 +250,39 @@ class IniConfigParser(ConfigFileParser):
repeatable-option = ["https://docs.python.org/3/objects.inv",
"https://twistedmatrix.com/documents/current/api/objects.inv"]
# how to specify a multiline text:
- multi-line-text =
- Lorem ipsum dolor sit amet, consectetur adipiscing elit.
- Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
- Maecenas quis dapibus leo, a pellentesque leo.
+ multi-line-text =
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+ Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
+ Maecenas quis dapibus leo, a pellentesque leo.
# how to specify a empty text:
- empty-text =
+ empty-text =
# this also works:
empty-text = ''
# how to specify a empty list:
empty-list = []
- If you use L{IniConfigParser(sections, split_ml_text_to_list=True)},
+ If you use L{IniConfigParser(sections, split_ml_text_to_list=True)},
the same rules are applicable with the following changes::
[my-software]
- # to specify a list arg (eg. arg which has action="append"),
+ # to specify a list arg (eg. arg which has action="append"),
# just enter one value per line (the list literal format can still be used):
repeatable-option =
https://docs.python.org/3/objects.inv
https://twistedmatrix.com/documents/current/api/objects.inv
# to specify a multiline text, you have to quote it:
multi-line-text = '''
- Lorem ipsum dolor sit amet, consectetur adipiscing elit.
- Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
- Maecenas quis dapibus leo, a pellentesque leo.
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+ Vivamus tortor odio, dignissim non ornare non, laoreet quis nunc.
+ Maecenas quis dapibus leo, a pellentesque leo.
'''
# how to specify a empty text:
empty-text = ''
# how to specify a empty list:
empty-list = []
- # the following empty value would be simply ignored because we can't
+ # the following empty value would be simply ignored because we can't
# differenciate between simple value and list value without any data:
- totally-ignored-field =
+ totally-ignored-field =
Usage:
@@ -282,7 +294,7 @@ class IniConfigParser(ConfigFileParser):
"""
- def __init__(self, sections:List[str], split_ml_text_to_list:bool) -> None:
+ def __init__(self, sections: List[str], split_ml_text_to_list: bool) -> None:
super().__init__()
self.sections = sections
self.split_ml_text_to_list = split_ml_text_to_list
@@ -290,7 +302,7 @@ def __init__(self, sections:List[str], split_ml_text_to_list:bool) -> None:
def __call__(self) -> ConfigFileParser:
return self
- def parse(self, stream:TextIO) -> Dict[str, Any]:
+ def parse(self, stream: TextIO) -> Dict[str, Any]:
"""Parses the keys and values from an INI config file."""
# parse with configparser to allow multi-line values
config = configparser.ConfigParser()
@@ -304,7 +316,7 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
for section in config.sections() + [configparser.DEFAULTSECT]:
if section not in self.sections:
continue
- for k,value in config[section].items():
+ for k, value in config[section].items():
# value is already strip by configparser
if not value and self.split_ml_text_to_list:
# ignores empty values when split_ml_text_to_list is True
@@ -320,7 +332,11 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
except Exception as e:
# error evaluating object
_tripple = 'tripple ' if '\n' in value else ''
- raise ConfigFileParserException("Error evaluating list: " + str(e) + f". Put {_tripple}quotes around your text if it's meant to be a string.") from e
+ raise ConfigFileParserException(
+ "Error evaluating list: "
+ + str(e)
+ + f". Put {_tripple}quotes around your text if it's meant to be a string."
+ ) from e
else:
if is_quoted(value):
# evaluate quoted string
@@ -337,22 +353,27 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
return result
def get_syntax_description(self) -> str:
- msg = ("Uses configparser module to parse an INI file which allows multi-line values. "
- "See https://docs.python.org/3/library/configparser.html for details. "
- "This parser includes support for quoting strings literal as well as python list syntax evaluation. ")
+ msg = (
+ "Uses configparser module to parse an INI file which allows multi-line values. "
+ "See https://docs.python.org/3/library/configparser.html for details. "
+ "This parser includes support for quoting strings literal as well as python list syntax evaluation. "
+ )
if self.split_ml_text_to_list:
- msg += ("Alternatively lists can be constructed with a plain multiline string, "
- "each non-empty line will be converted to a list item.")
+ msg += (
+ "Alternatively lists can be constructed with a plain multiline string, "
+ "each non-empty line will be converted to a list item."
+ )
return msg
+
class CompositeConfigParser(ConfigFileParser):
"""
A config parser that understands multiple formats.
- This parser will successively try to parse the file with each compisite parser, until it succeeds,
+ This parser will successively try to parse the file with each compisite parser, until it succeeds,
else it fails showing all encountered error messages.
- The following code will make configargparse understand both TOML and INI formats.
+ The following code will make configargparse understand both TOML and INI formats.
Making it easy to integrate in both C{pyproject.toml} and C{setup.cfg}.
>>> import configargparse
@@ -361,7 +382,7 @@ class CompositeConfigParser(ConfigFileParser):
>>> parser = configargparse.ArgParser(
... default_config_files=['setup.cfg', 'my_super_tool.ini'],
... config_file_parser_class=configargparse.CompositeConfigParser(
- ... [configargparse.TomlConfigParser(my_tool_sections),
+ ... [configargparse.TomlConfigParser(my_tool_sections),
... configargparse.IniConfigParser(my_tool_sections, split_ml_text_to_list=True)]
... ),
... )
@@ -375,36 +396,36 @@ def __init__(self, config_parser_types: List[Callable[[], ConfigFileParser]]) ->
def __call__(self) -> ConfigFileParser:
return self
- def parse(self, stream:TextIO) -> Dict[str, Any]:
+ def parse(self, stream: TextIO) -> Dict[str, Any]:
errors = []
for p in self.parsers:
try:
- return p.parse(stream) # type: ignore[no-any-return]
+ return p.parse(stream) # type: ignore[no-any-return]
except Exception as e:
stream.seek(0)
errors.append(e)
- raise ConfigFileParserException(
- f"Error parsing config: {', '.join(repr(str(e)) for e in errors)}")
-
+ raise ConfigFileParserException(f"Error parsing config: {', '.join(repr(str(e)) for e in errors)}")
+
def get_syntax_description(self) -> str:
msg = "Uses multiple config parser settings (in order): \n"
- for i, parser in enumerate(self.parsers):
+ for i, parser in enumerate(self.parsers):
msg += f"[{i+1}] {parser.__class__.__name__}: {parser.get_syntax_description()} \n"
return msg
+
class ValidatorParser(ConfigFileParser):
"""
- A parser that warns when unknown options are used.
+ A parser that warns when unknown options are used.
It must be created with a reference to the ArgumentParser object, so like::
parser = ArgumentParser(
prog='mysoft',
config_file_parser_class=ConfigParser,)
-
+
# Add the validator to the config file parser, this is arguably a hack.
parser._config_file_parser = ValidatorParser(parser._config_file_parser, parser)
-
- @note: Using this parser implies acting
+
+ @note: Using this parser implies acting
like L{ArgumentParser}'s option C{ignore_unknown_config_file_keys=True}.
So no need to explicitely mention it.
"""
@@ -413,18 +434,21 @@ def __init__(self, config_parser: ConfigFileParser, argument_parser: ArgumentPar
super().__init__()
self.config_parser = config_parser
self.argument_parser = argument_parser
-
+
def get_syntax_description(self) -> str:
- return self.config_parser.get_syntax_description() #type:ignore[no-any-return]
+ return self.config_parser.get_syntax_description() # type:ignore[no-any-return]
- def parse(self, stream:TextIO) -> Dict[str, Any]:
+ def parse(self, stream: TextIO) -> Dict[str, Any]:
data: Dict[str, Any] = self.config_parser.parse(stream)
# Prepare for checking config file.
- # This code maps all supported config keys to their
+ # This code maps all supported config keys to their
# argparse action counterpart, it will allow more checks to be done down the road.
- known_config_keys: Dict[str, argparse.Action] = {config_key: action for action in self.argument_parser._actions
- for config_key in self.argument_parser.get_possible_config_keys(action)}
+ known_config_keys: Dict[str, argparse.Action] = {
+ config_key: action
+ for action in self.argument_parser._actions
+ for config_key in self.argument_parser.get_possible_config_keys(action)
+ }
# Trigger warning
new_data = {}
@@ -436,5 +460,5 @@ def parse(self, stream:TextIO) -> Dict[str, Any]:
# Remove option
else:
new_data[key] = value
-
+
return new_data
diff --git a/pydoctor/astbuilder.py b/pydoctor/astbuilder.py
index f80acdcc0..85240ce00 100644
--- a/pydoctor/astbuilder.py
+++ b/pydoctor/astbuilder.py
@@ -1,4 +1,5 @@
"""Convert ASTs into L{pydoctor.model.Documentable} instances."""
+
from __future__ import annotations
import ast
@@ -9,15 +10,47 @@
from inspect import Parameter, Signature
from pathlib import Path
from typing import (
- Any, Callable, Collection, Dict, Iterable, Iterator, List, Mapping, Optional, Sequence, Tuple,
- Type, TypeVar, Union, Set, cast
+ Any,
+ Callable,
+ Collection,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ Set,
+ cast,
)
from pydoctor import epydoc2stan, model, node2stan, extensions, linker
from pydoctor.epydoc.markup._pyval_repr import colorize_inline_pyval
-from pydoctor.astutils import (is_none_literal, is_typing_annotation, is_using_annotations, is_using_typing_final, node2dottedname, node2fullname,
- is__name__equals__main__, unstring_annotation, upgrade_annotation, iterassign, extract_docstring_linenum, infer_type, get_parents,
- get_docstring_node, get_assign_docstring_node, unparse, NodeVisitor, Parentage, Str)
+from pydoctor.astutils import (
+ is_none_literal,
+ is_typing_annotation,
+ is_using_annotations,
+ is_using_typing_final,
+ node2dottedname,
+ node2fullname,
+ is__name__equals__main__,
+ unstring_annotation,
+ upgrade_annotation,
+ iterassign,
+ extract_docstring_linenum,
+ infer_type,
+ get_parents,
+ get_docstring_node,
+ get_assign_docstring_node,
+ unparse,
+ NodeVisitor,
+ Parentage,
+ Str,
+)
def parseFile(path: Path) -> ast.Module:
@@ -26,29 +59,29 @@ def parseFile(path: Path) -> ast.Module:
src = f.read() + b'\n'
return _parse(src, filename=str(path))
+
_parse = partial(ast.parse, type_comments=True)
+
def _maybeAttribute(cls: model.Class, name: str) -> bool:
"""Check whether a name is a potential attribute of the given class.
This is used to prevent an assignment that wraps a method from
creating an attribute that would overwrite or shadow that method.
@return: L{True} if the name does not exist or is an existing (possibly
- inherited) attribute, L{False} if this name defines something else than an L{Attribute}.
+ inherited) attribute, L{False} if this name defines something else than an L{Attribute}.
"""
obj = cls.find(name)
return obj is None or isinstance(obj, model.Attribute)
+
class IgnoreAssignment(Exception):
"""
A control flow exception meaning that the assignment should not be further proccessed.
"""
-def _handleAliasing(
- ctx: model.CanContainImportsDocumentable,
- target: str,
- expr: Optional[ast.expr]
- ) -> bool:
+
+def _handleAliasing(ctx: model.CanContainImportsDocumentable, target: str, expr: Optional[ast.expr]) -> bool:
"""If the given expression is a name assigned to a target that is not yet
in use, create an alias.
@return: L{True} iff an alias was created.
@@ -62,8 +95,15 @@ def _handleAliasing(
return True
-_CONTROL_FLOW_BLOCKS:Tuple[Type[ast.stmt],...] = (ast.If, ast.While, ast.For, ast.Try,
- ast.AsyncFor, ast.With, ast.AsyncWith)
+_CONTROL_FLOW_BLOCKS: Tuple[Type[ast.stmt], ...] = (
+ ast.If,
+ ast.While,
+ ast.For,
+ ast.Try,
+ ast.AsyncFor,
+ ast.With,
+ ast.AsyncWith,
+)
"""
AST types that introduces a new control flow block, potentially conditionnal.
"""
@@ -72,17 +112,16 @@ def _handleAliasing(
if sys.version_info >= (3, 11):
_CONTROL_FLOW_BLOCKS += (ast.TryStar,)
-def is_constant(obj: model.Attribute,
- annotation:Optional[ast.expr],
- value:Optional[ast.expr]) -> bool:
+
+def is_constant(obj: model.Attribute, annotation: Optional[ast.expr], value: Optional[ast.expr]) -> bool:
"""
- Detect if the given assignment is a constant.
+ Detect if the given assignment is a constant.
- For an assignment to be detected as constant, it should:
+ For an assignment to be detected as constant, it should:
- have all-caps variable name or using L{typing.Final} annotation
- not be overriden
- not be defined in a conditionnal block or any other kind of control flow blocks
-
+
@note: Must be called after setting obj.annotation to detect variables using Final.
"""
if is_using_typing_final(annotation, obj):
@@ -92,27 +131,29 @@ def is_constant(obj: model.Attribute,
return obj.name.isupper()
return False
+
class TypeAliasVisitorExt(extensions.ModuleVisitorExt):
"""
This visitor implements the handling of type aliases and type variables.
"""
+
def _isTypeVariable(self, ob: model.Attribute) -> bool:
if ob.value is not None:
- if isinstance(ob.value, ast.Call) and \
- node2fullname(ob.value.func, ob) in ('typing.TypeVar',
- 'typing_extensions.TypeVar',
- 'typing.TypeVarTuple',
- 'typing_extensions.TypeVarTuple'):
+ if isinstance(ob.value, ast.Call) and node2fullname(ob.value.func, ob) in (
+ 'typing.TypeVar',
+ 'typing_extensions.TypeVar',
+ 'typing.TypeVarTuple',
+ 'typing_extensions.TypeVarTuple',
+ ):
return True
return False
-
+
def _isTypeAlias(self, ob: model.Attribute) -> bool:
"""
Return C{True} if the Attribute is a type alias.
"""
if ob.value is not None:
- if is_using_annotations(ob.annotation, ('typing.TypeAlias',
- 'typing_extensions.TypeAlias'), ob):
+ if is_using_annotations(ob.annotation, ('typing.TypeAlias', 'typing_extensions.TypeAlias'), ob):
return True
if is_typing_annotation(ob.value, ob.parent):
return True
@@ -120,8 +161,8 @@ def _isTypeAlias(self, ob: model.Attribute) -> bool:
def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
current = self.visitor.builder.current
- for dottedname in iterassign(node):
- if dottedname and len(dottedname)==1:
+ for dottedname in iterassign(node):
+ if dottedname and len(dottedname) == 1:
attr = current.contents.get(dottedname[0])
if attr is None:
return
@@ -130,27 +171,36 @@ def visit_Assign(self, node: Union[ast.Assign, ast.AnnAssign]) -> None:
if self._isTypeAlias(attr) is True:
attr.kind = model.DocumentableKind.TYPE_ALIAS
# unstring type aliases
- attr.value = upgrade_annotation(unstring_annotation(
- # this cast() is safe because _isTypeAlias() return True only if value is not None
- cast(ast.expr, attr.value), attr, section='type alias'), attr, section='type alias')
+ attr.value = upgrade_annotation(
+ unstring_annotation(
+ # this cast() is safe because _isTypeAlias() return True only if value is not None
+ cast(ast.expr, attr.value),
+ attr,
+ section='type alias',
+ ),
+ attr,
+ section='type alias',
+ )
elif self._isTypeVariable(attr) is True:
# TODO: unstring bound argument of type variables
attr.kind = model.DocumentableKind.TYPE_VARIABLE
-
+
visit_AnnAssign = visit_Assign
+
def is_attribute_overridden(obj: model.Attribute, new_value: Optional[ast.expr]) -> bool:
"""
Detect if the optional C{new_value} expression override the one already stored in the L{Attribute.value} attribute.
"""
return obj.value is not None and new_value is not None
+
def extract_final_subscript(annotation: ast.Subscript) -> ast.expr:
"""
Extract the "str" part from annotations like "Final[str]".
@raises ValueError: If the "Final" annotation is not valid.
- """
+ """
ann_slice = annotation.slice
if isinstance(ann_slice, (ast.Slice, ast.Tuple)):
raise ValueError("Annotation is invalid, it should not contain slices.")
@@ -158,6 +208,7 @@ def extract_final_subscript(annotation: ast.Subscript) -> ast.expr:
assert isinstance(ann_slice, ast.expr)
return ann_slice
+
class ModuleVistor(NodeVisitor):
def __init__(self, builder: 'ASTBuilder', module: model.Module):
@@ -166,13 +217,13 @@ def __init__(self, builder: 'ASTBuilder', module: model.Module):
self.system = builder.system
self.module = module
self._override_guard_state: Tuple[Optional[model.Documentable], Set[str]] = (None, set())
-
+
@contextlib.contextmanager
def override_guard(self) -> Iterator[None]:
"""
- Returns a context manager that will make the builder ignore any new
+ Returns a context manager that will make the builder ignore any new
assigments to existing names within the same context. Currently used to visit C{If.orelse} and C{Try.handlers}.
-
+
@note: The list of existing names is generated at the moment of
calling the function, such that new names defined inside these blocks follows the usual override rules.
"""
@@ -186,10 +237,10 @@ def override_guard(self) -> Iterator[None]:
self._override_guard_state = (ctx, set(ctx.localNames()))
yield
self._override_guard_state = ignore_override_init
-
- def _ignore_name(self, ob: model.Documentable, name:str) -> bool:
+
+ def _ignore_name(self, ob: model.Documentable, name: str) -> bool:
"""
- Should this C{name} be ignored because it matches
+ Should this C{name} be ignored because it matches
the override guard in the context of C{ob}?
"""
ctx, names = self._override_guard_state
@@ -201,17 +252,17 @@ def _infer_attr_annotations(self, scope: model.Documentable) -> None:
for attrib in scope.contents.values():
if not isinstance(attrib, model.Attribute):
continue
- # If this attribute has not explicit annotation,
+ # If this attribute has not explicit annotation,
# infer its type from it's ast expression.
if attrib.annotation is None and attrib.value is not None:
# do not override explicit annotation
attrib.annotation = infer_type(attrib.value)
-
+
def _tweak_constants_annotations(self, scope: model.Documentable) -> None:
# tweak constants annotations when we leave the scope so we can still
# check whether the annotation uses Final while we're visiting other nodes.
for attrib in scope.contents.values():
- if not isinstance(attrib, model.Attribute) or attrib.kind is not model.DocumentableKind.CONSTANT :
+ if not isinstance(attrib, model.Attribute) or attrib.kind is not model.DocumentableKind.CONSTANT:
continue
self._tweak_constant_annotation(attrib)
@@ -222,24 +273,24 @@ def visit_If(self, node: ast.If) -> None:
# whatever is declared in them cannot be imported
# and thus is not part of the API
raise self.SkipChildren()
-
+
def depart_If(self, node: ast.If) -> None:
# At this point the body of the If node has already been visited
# Visit the 'orelse' block of the If node, with override guard
with self.override_guard():
for n in node.orelse:
self.walkabout(n)
-
+
def depart_Try(self, node: ast.Try) -> None:
# At this point the body of the Try node has already been visited
# Visit the 'orelse' and 'finalbody' blocks of the Try node.
-
+
for n in node.orelse:
self.walkabout(n)
for n in node.finalbody:
self.walkabout(n)
-
- # Visit the handlers with override guard
+
+ # Visit the handlers with override guard
with self.override_guard():
for h in node.handlers:
for n in h.body:
@@ -277,29 +328,30 @@ def visit_ClassDef(self, node: ast.ClassDef) -> None:
# This handles generics in MRO, by extracting the first
# subscript value::
# class Visitor(MyGeneric[T]):...
- # 'MyGeneric' will be added to rawbases instead
+ # 'MyGeneric' will be added to rawbases instead
# of 'MyGeneric[T]' which cannot resolve to anything.
name_node = base_node
if isinstance(base_node, ast.Subscript):
name_node = base_node.value
-
- str_base = '.'.join(node2dottedname(name_node) or \
- # Fallback on unparse() if the expression is unknown by node2dottedname().
- [unparse(base_node).strip()])
-
+
+ str_base = '.'.join(
+ node2dottedname(name_node) # Fallback on unparse() if the expression is unknown by node2dottedname().
+ or [unparse(base_node).strip()]
+ )
+
# Store the base as string and as ast.expr in rawbases list.
rawbases += [(str_base, base_node)]
-
+
# Try to resolve the base, put None if could not resolve it,
# if we can't resolve it now, it most likely mean that there are
- # import cycles (maybe in TYPE_CHECKING blocks).
+ # import cycles (maybe in TYPE_CHECKING blocks).
# None bases will be re-resolved in post-processing.
expandbase = parent.expandName(str_base)
baseobj = self.system.objForFullName(expandbase)
-
+
if not isinstance(baseobj, model.Class):
baseobj = None
-
+
initialbases.append(expandbase)
initialbaseobjects.append(baseobj)
@@ -319,9 +371,9 @@ def visit_ClassDef(self, node: ast.ClassDef) -> None:
epydoc2stan.extract_fields(cls)
if node.decorator_list:
-
+
cls.raw_decorators = node.decorator_list
-
+
for decnode in node.decorator_list:
args: Optional[Sequence[ast.expr]]
if isinstance(decnode, ast.Call):
@@ -338,18 +390,15 @@ def visit_ClassDef(self, node: ast.ClassDef) -> None:
else:
cls.decorators.append((base, args))
-
- # We're not resolving the subclasses at this point yet because all
+ # We're not resolving the subclasses at this point yet because all
# modules might not have been processed, and since subclasses are only used in the presentation,
# it's better to resolve them in the post-processing instead.
-
def depart_ClassDef(self, node: ast.ClassDef) -> None:
self._tweak_constants_annotations(self.builder.current)
self._infer_attr_annotations(self.builder.current)
self.builder.popClass()
-
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
ctx = self.builder.current
if not isinstance(ctx, model.CanContainImportsDocumentable):
@@ -369,10 +418,7 @@ def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
parent = parent.parent
if parent is None:
assert ctx.parentMod is not None
- ctx.parentMod.report(
- "relative import level (%d) too high" % node.level,
- lineno_offset=node.lineno
- )
+ ctx.parentMod.report("relative import level (%d) too high" % node.level, lineno_offset=node.lineno)
return
if modname is None:
modname = parent.fullName()
@@ -405,8 +451,7 @@ def _importAll(self, modname: str) -> None:
# names that are not private.
names = mod.all
if names is None:
- names = [ name for name in mod.localNames()
- if not name.startswith('_') ]
+ names = [name for name in mod.localNames() if not name.startswith('_')]
# Fetch names to export.
exports = self._getCurrentModuleExports()
@@ -438,9 +483,9 @@ def _getCurrentModuleExports(self) -> Collection[str]:
exports = []
return exports
- def _handleReExport(self, curr_mod_exports:Collection[str],
- origin_name:str, as_name:str,
- origin_module:model.Module) -> bool:
+ def _handleReExport(
+ self, curr_mod_exports: Collection[str], origin_name: str, as_name: str, origin_module: model.Module
+ ) -> bool:
"""
Move re-exported objects into current module.
@@ -451,17 +496,13 @@ def _handleReExport(self, curr_mod_exports:Collection[str],
modname = origin_module.fullName()
if as_name in curr_mod_exports:
# In case of duplicates names, we can't rely on resolveName,
- # So we use content.get first to resolve non-alias names.
+ # So we use content.get first to resolve non-alias names.
ob = origin_module.contents.get(origin_name) or origin_module.resolveName(origin_name)
if ob is None:
- current.report("cannot resolve re-exported name :"
- f'{modname}.{origin_name}', thresh=1)
+ current.report("cannot resolve re-exported name :" f'{modname}.{origin_name}', thresh=1)
else:
if origin_module.all is None or origin_name not in origin_module.all:
- self.system.msg(
- "astbuilder",
- "moving %r into %r" % (ob.fullName(), current.fullName())
- )
+ self.system.msg("astbuilder", "moving %r into %r" % (ob.fullName(), current.fullName()))
# Must be a Module since the exports is set to an empty list if it's not.
assert isinstance(current, model.Module)
ob.reparent(current, as_name)
@@ -484,11 +525,11 @@ def _importNames(self, modname: str, names: Iterable[ast.alias]) -> None:
orgname, asname = al.name, al.asname
if asname is None:
asname = orgname
-
+
# Ignore in override guard
if self._ignore_name(current, asname):
continue
-
+
# If we're importing from a package, make sure imported modules
# are processed (getProcessedModule() ignores non-modules).
if isinstance(mod, model.Package):
@@ -516,7 +557,7 @@ def visit_Import(self, node: ast.Import) -> None:
# processing import statement in odd context
return
_localNameToFullName = current._localNameToFullName_map
-
+
for al in node.names:
targetname, asname = al.name, al.asname
if asname is None:
@@ -537,7 +578,7 @@ def _handleOldSchoolMethodDecoration(self, target: str, expr: Optional[ast.expr]
args = expr.args
if len(args) != 1:
return False
- arg, = args
+ (arg,) = args
if not isinstance(arg, ast.Name):
return False
if target == arg.id and func_name in ['staticmethod', 'classmethod']:
@@ -555,11 +596,14 @@ def _handleOldSchoolMethodDecoration(self, target: str, expr: Optional[ast.expr]
return False
@classmethod
- def _handleConstant(cls, obj:model.Attribute,
- annotation:Optional[ast.expr],
- value:Optional[ast.expr],
- lineno:int,
- defaultKind:model.DocumentableKind) -> None:
+ def _handleConstant(
+ cls,
+ obj: model.Attribute,
+ annotation: Optional[ast.expr],
+ value: Optional[ast.expr],
+ lineno: int,
+ defaultKind: model.DocumentableKind,
+ ) -> None:
if is_constant(obj, annotation=annotation, value=value):
obj.kind = model.DocumentableKind.CONSTANT
# do not call tweak annotation just yet...
@@ -568,7 +612,7 @@ def _handleConstant(cls, obj:model.Attribute,
# declared as constants
if not is_using_typing_final(obj.annotation, obj):
obj.kind = defaultKind
-
+
@staticmethod
def _tweak_constant_annotation(obj: model.Attribute) -> None:
# Display variables annotated with Final with the real type instead.
@@ -578,7 +622,7 @@ def _tweak_constant_annotation(obj: model.Attribute) -> None:
try:
annotation = extract_final_subscript(annotation)
except ValueError as e:
- obj.report(str(e), section='ast', lineno_offset=annotation.lineno-obj.linenumber)
+ obj.report(str(e), section='ast', lineno_offset=annotation.lineno - obj.linenumber)
obj.annotation = infer_type(obj.value) if obj.value else None
else:
# Will not display as "Final[str]" but rather only "str"
@@ -589,35 +633,38 @@ def _tweak_constant_annotation(obj: model.Attribute) -> None:
obj.annotation = infer_type(obj.value) if obj.value else None
@staticmethod
- def _setAttributeAnnotation(obj: model.Attribute,
- annotation: Optional[ast.expr],) -> None:
+ def _setAttributeAnnotation(
+ obj: model.Attribute,
+ annotation: Optional[ast.expr],
+ ) -> None:
if annotation is not None:
# TODO: What to do when an attribute has several explicit annotations?
# (mypy reports a warning in these kind of cases)
obj.annotation = annotation
@staticmethod
- def _storeAttrValue(obj:model.Attribute, new_value:Optional[ast.expr],
- augassign:Optional[ast.operator]=None) -> None:
+ def _storeAttrValue(
+ obj: model.Attribute, new_value: Optional[ast.expr], augassign: Optional[ast.operator] = None
+ ) -> None:
if new_value:
- if augassign:
+ if augassign:
if obj.value:
- # We're storing the value of augmented assignemnt value as binop for the sake
+ # We're storing the value of augmented assignemnt value as binop for the sake
# of correctness, but we're not doing anything special with it at the
# moment, nonethless this could be useful for future developments.
# We don't bother reporting warnings, pydoctor is not a checker.
obj.value = ast.BinOp(left=obj.value, op=augassign, right=new_value)
else:
obj.value = new_value
-
-
- def _handleModuleVar(self,
- target: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator],
- ) -> None:
+
+ def _handleModuleVar(
+ self,
+ target: str,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator],
+ ) -> None:
if target in MODULE_VARIABLES_META_PARSERS:
# This is metadata, not a variable that needs to be documented,
# and therefore doesn't need an Attribute instance.
@@ -627,40 +674,39 @@ def _handleModuleVar(self,
if obj is None:
if augassign:
return
- obj = self.builder.addAttribute(name=target,
- kind=model.DocumentableKind.VARIABLE,
- parent=parent,
- lineno=lineno)
-
- # If it's not an attribute it means that the name is already denifed as function/class
- # probably meaning that this attribute is a bound callable.
+ obj = self.builder.addAttribute(
+ name=target, kind=model.DocumentableKind.VARIABLE, parent=parent, lineno=lineno
+ )
+
+ # If it's not an attribute it means that the name is already denifed as function/class
+ # probably meaning that this attribute is a bound callable.
#
# def func(value, stock) -> int:...
# var = 2
# func = partial(func, value=var)
#
# We don't know how to handle this,
- # so we ignore it to document the original object. This means that we might document arguments
+ # so we ignore it to document the original object. This means that we might document arguments
# that are in reality not existing because they have values in a partial() call for instance.
if not isinstance(obj, model.Attribute):
raise IgnoreAssignment()
-
+
self._setAttributeAnnotation(obj, annotation)
-
+
obj.setLineNumber(lineno)
-
- self._handleConstant(obj, annotation, expr, lineno,
- model.DocumentableKind.VARIABLE)
+
+ self._handleConstant(obj, annotation, expr, lineno, model.DocumentableKind.VARIABLE)
self._storeAttrValue(obj, expr, augassign)
- def _handleAssignmentInModule(self,
- target: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator],
- ) -> None:
+ def _handleAssignmentInModule(
+ self,
+ target: str,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator],
+ ) -> None:
module = self.builder.current
assert isinstance(module, model.Module)
if not _handleAliasing(module, target, expr):
@@ -668,14 +714,15 @@ def _handleAssignmentInModule(self,
else:
raise IgnoreAssignment()
- def _handleClassVar(self,
- name: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator],
- ) -> None:
-
+ def _handleClassVar(
+ self,
+ name: str,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator],
+ ) -> None:
+
cls = self.builder.current
assert isinstance(cls, model.Class)
if not _maybeAttribute(cls, name):
@@ -693,21 +740,16 @@ def _handleClassVar(self,
obj.kind = model.DocumentableKind.CLASS_VARIABLE
self._setAttributeAnnotation(obj, annotation)
-
+
obj.setLineNumber(lineno)
- self._handleConstant(obj, annotation, expr, lineno,
- model.DocumentableKind.CLASS_VARIABLE)
+ self._handleConstant(obj, annotation, expr, lineno, model.DocumentableKind.CLASS_VARIABLE)
self._storeAttrValue(obj, expr, augassign)
-
- def _handleInstanceVar(self,
- name: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int
- ) -> None:
- if not (cls:=self._getClassFromMethodContext()):
+ def _handleInstanceVar(
+ self, name: str, annotation: Optional[ast.expr], expr: Optional[ast.expr], lineno: int
+ ) -> None:
+ if not (cls := self._getClassFromMethodContext()):
raise IgnoreAssignment()
if not _maybeAttribute(cls, name):
raise IgnoreAssignment()
@@ -726,13 +768,14 @@ def _handleInstanceVar(self,
obj.kind = model.DocumentableKind.INSTANCE_VARIABLE
self._storeAttrValue(obj, expr)
- def _handleAssignmentInClass(self,
- target: str,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator],
- ) -> None:
+ def _handleAssignmentInClass(
+ self,
+ target: str,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator],
+ ) -> None:
cls = self.builder.current
assert isinstance(cls, model.Class)
if not _handleAliasing(cls, target, expr):
@@ -740,11 +783,7 @@ def _handleAssignmentInClass(self,
else:
raise IgnoreAssignment()
- def _handleDocstringUpdate(self,
- targetNode: ast.expr,
- expr: Optional[ast.expr],
- lineno: int
- ) -> None:
+ def _handleDocstringUpdate(self, targetNode: ast.expr, expr: Optional[ast.expr], lineno: int) -> None:
def warn(msg: str) -> None:
module = self.builder.currentMod
assert module is not None
@@ -764,8 +803,9 @@ def warn(msg: str) -> None:
else:
obj = self.system.objForFullName(full_name)
if obj is None:
- warn("Unable to figure out target for __doc__ assignment: "
- "computed full name not found: " + full_name)
+ warn(
+ "Unable to figure out target for __doc__ assignment: " "computed full name not found: " + full_name
+ )
# Determine docstring value.
try:
@@ -775,8 +815,7 @@ def warn(msg: str) -> None:
raise ValueError()
docstring: object = ast.literal_eval(expr)
except ValueError:
- warn("Unable to figure out value for __doc__ assignment, "
- "maybe too complex")
+ warn("Unable to figure out value for __doc__ assignment, " "maybe too complex")
return
if not isinstance(docstring, str):
warn("Ignoring value assigned to __doc__: not a string")
@@ -788,13 +827,14 @@ def warn(msg: str) -> None:
# we have the final docstrings for all objects.
obj.parsed_docstring = None
- def _handleAssignment(self,
- targetNode: ast.expr,
- annotation: Optional[ast.expr],
- expr: Optional[ast.expr],
- lineno: int,
- augassign:Optional[ast.operator]=None,
- ) -> None:
+ def _handleAssignment(
+ self,
+ targetNode: ast.expr,
+ annotation: Optional[ast.expr],
+ expr: Optional[ast.expr],
+ lineno: int,
+ augassign: Optional[ast.operator] = None,
+ ) -> None:
"""
@raises IgnoreAssignment: If the assignemnt should not be further processed.
"""
@@ -826,12 +866,14 @@ def visit_Assign(self, node: ast.Assign) -> None:
if type_comment is None:
annotation = None
else:
- annotation = upgrade_annotation(unstring_annotation(
- ast.Constant(type_comment, lineno=lineno), self.builder.current), self.builder.current)
+ annotation = upgrade_annotation(
+ unstring_annotation(ast.Constant(type_comment, lineno=lineno), self.builder.current),
+ self.builder.current,
+ )
for target in node.targets:
try:
- if isTupleAssignment:=isinstance(target, ast.Tuple):
+ if isTupleAssignment := isinstance(target, ast.Tuple):
# TODO: Only one level of nested tuple is taken into account...
# ideally we would extract al the names declared in the lhs, not
# only the first level ones.
@@ -847,12 +889,13 @@ def visit_Assign(self, node: ast.Assign) -> None:
if not isTupleAssignment:
self._handleInlineDocstrings(node, target)
else:
- for elem in cast(ast.Tuple, target).elts: # mypy is not as smart as pyright yet.
+ for elem in cast(ast.Tuple, target).elts: # mypy is not as smart as pyright yet.
self._handleInlineDocstrings(node, elem)
def visit_AnnAssign(self, node: ast.AnnAssign) -> None:
- annotation = upgrade_annotation(unstring_annotation(
- node.annotation, self.builder.current), self.builder.current)
+ annotation = upgrade_annotation(
+ unstring_annotation(node.annotation, self.builder.current), self.builder.current
+ )
try:
self._handleAssignment(node.target, annotation, node.value, node.lineno)
except IgnoreAssignment:
@@ -868,12 +911,12 @@ def _getClassFromMethodContext(self) -> Optional[model.Class]:
if not isinstance(cls, model.Class):
return None
return cls
-
- def _contextualizeTarget(self, target:ast.expr) -> Tuple[model.Documentable, str]:
+
+ def _contextualizeTarget(self, target: ast.expr) -> Tuple[model.Documentable, str]:
"""
- Find out the documentatble wich is the parent of the assignment's target as well as it's name.
+ Find out the documentatble wich is the parent of the assignment's target as well as it's name.
- @returns: Tuple C{parent, name}.
+ @returns: Tuple C{parent, name}.
@raises ValueError: if the target does not bind a new variable.
"""
dottedname = node2dottedname(target)
@@ -884,7 +927,7 @@ def _contextualizeTarget(self, target:ast.expr) -> Tuple[model.Documentable, str
# an instance variable.
# TODO: This currently only works if the first argument of methods
# is named 'self'.
- if (maybe_cls:=self._getClassFromMethodContext()) is None:
+ if (maybe_cls := self._getClassFromMethodContext()) is None:
raise ValueError('using self in unsupported context')
dottedname = dottedname[1:]
parent = maybe_cls
@@ -894,28 +937,26 @@ def _contextualizeTarget(self, target:ast.expr) -> Tuple[model.Documentable, str
parent = self.builder.current
return parent, dottedname[0]
- def _handleInlineDocstrings(self, assign:Union[ast.Assign, ast.AnnAssign], target:ast.expr) -> None:
+ def _handleInlineDocstrings(self, assign: Union[ast.Assign, ast.AnnAssign], target: ast.expr) -> None:
# Process the inline docstrings
try:
parent, name = self._contextualizeTarget(target)
except ValueError:
return
-
+
docstring_node = get_assign_docstring_node(assign)
if docstring_node:
# fetch the target of the inline docstring
attr = parent.contents.get(name)
if attr:
attr.setDocstring(docstring_node)
-
- def visit_AugAssign(self, node:ast.AugAssign) -> None:
+
+ def visit_AugAssign(self, node: ast.AugAssign) -> None:
try:
- self._handleAssignment(node.target, None, node.value,
- node.lineno, augassign=node.op)
+ self._handleAssignment(node.target, None, node.value, node.lineno, augassign=node.op)
except IgnoreAssignment:
pass
-
def visit_Expr(self, node: ast.Expr) -> None:
# Visit's ast.Expr.value with the visitor, used by extensions to visit top-level calls.
self.generic_visit(node)
@@ -926,10 +967,7 @@ def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
self._handleFunctionDef(node, is_async=False)
- def _handleFunctionDef(self,
- node: Union[ast.AsyncFunctionDef, ast.FunctionDef],
- is_async: bool
- ) -> None:
+ def _handleFunctionDef(self, node: Union[ast.AsyncFunctionDef, ast.FunctionDef], is_async: bool) -> None:
# Ignore inner functions.
parent = self.builder.current
if isinstance(parent, model.Function):
@@ -983,7 +1021,7 @@ def _handleFunctionDef(self,
attr.report(f'{attr.fullName()} is both property and classmethod')
if is_staticmethod:
attr.report(f'{attr.fullName()} is both property and staticmethod')
- raise self.SkipNode() # visitor extensions will still be called.
+ raise self.SkipNode() # visitor extensions will still be called.
# Check if it's a new func or exists with an overload
existing_func = parent.contents.get(func_name)
@@ -993,7 +1031,10 @@ def _handleFunctionDef(self,
# which we do not allow. This also ensures that func will have
# properties set for the primary function and not overloads.
if existing_func.signature and is_overload_func:
- existing_func.report(f'{existing_func.fullName()} overload appeared after primary function', lineno_offset=lineno-existing_func.linenumber)
+ existing_func.report(
+ f'{existing_func.fullName()} overload appeared after primary function',
+ lineno_offset=lineno - existing_func.linenumber,
+ )
raise self.IgnoreNode()
# Do not recreate function object, just re-push it
self.builder.push(existing_func, lineno)
@@ -1006,7 +1047,9 @@ def _handleFunctionDef(self,
# Docstring not allowed on overload
if is_overload_func:
docline = extract_docstring_linenum(doc_node)
- func.report(f'{func.fullName()} overload has docstring, unsupported', lineno_offset=docline-func.linenumber)
+ func.report(
+ f'{func.fullName()} overload has docstring, unsupported', lineno_offset=docline - func.linenumber
+ )
else:
func.setDocstring(doc_node)
func.decorators = node.decorator_list
@@ -1031,10 +1074,15 @@ def get_default(index: int) -> Optional[ast.expr]:
return None if index < 0 else defaults[index]
parameters: List[Parameter] = []
+
def add_arg(name: str, kind: Any, default: Optional[ast.expr]) -> None:
default_val = Parameter.empty if default is None else _ValueFormatter(default, ctx=func)
- # this cast() is safe since we're checking if annotations.get(name) is None first
- annotation = Parameter.empty if annotations.get(name) is None else _AnnotationValueFormatter(cast(ast.expr, annotations[name]), ctx=func)
+ # this cast() is safe since we're checking if annotations.get(name) is None first
+ annotation = (
+ Parameter.empty
+ if annotations.get(name) is None
+ else _AnnotationValueFormatter(cast(ast.expr, annotations[name]), ctx=func)
+ )
parameters.append(Parameter(name, kind, default=default_val, annotation=annotation))
for index, arg in enumerate(posonlyargs):
@@ -1056,7 +1104,11 @@ def add_arg(name: str, kind: Any, default: Optional[ast.expr]) -> None:
add_arg(kwarg.arg, Parameter.VAR_KEYWORD, None)
return_type = annotations.get('return')
- return_annotation = Parameter.empty if return_type is None or is_none_literal(return_type) else _AnnotationValueFormatter(return_type, ctx=func)
+ return_annotation = (
+ Parameter.empty
+ if return_type is None or is_none_literal(return_type)
+ else _AnnotationValueFormatter(return_type, ctx=func)
+ )
try:
signature = Signature(parameters, return_annotation=return_annotation)
except ValueError as ex:
@@ -1067,7 +1119,9 @@ def add_arg(name: str, kind: Any, default: Optional[ast.expr]) -> None:
# Only set main function signature if it is a non-overload
if is_overload_func:
- func.overloads.append(model.FunctionOverload(primary=func, signature=signature, decorators=node.decorator_list))
+ func.overloads.append(
+ model.FunctionOverload(primary=func, signature=signature, decorators=node.decorator_list)
+ )
else:
func.signature = signature
@@ -1077,16 +1131,13 @@ def depart_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
def depart_FunctionDef(self, node: ast.FunctionDef) -> None:
self.builder.popFunction()
- def _handlePropertyDef(self,
- node: Union[ast.AsyncFunctionDef, ast.FunctionDef],
- doc_node: Optional[Str],
- lineno: int
- ) -> model.Attribute:
+ def _handlePropertyDef(
+ self, node: Union[ast.AsyncFunctionDef, ast.FunctionDef], doc_node: Optional[Str], lineno: int
+ ) -> model.Attribute:
- attr = self.builder.addAttribute(name=node.name,
- kind=model.DocumentableKind.PROPERTY,
- parent=self.builder.current,
- lineno=lineno)
+ attr = self.builder.addAttribute(
+ name=node.name, kind=model.DocumentableKind.PROPERTY, parent=self.builder.current, lineno=lineno
+ )
attr.setLineNumber(lineno)
if doc_node is not None:
@@ -1114,14 +1165,15 @@ def _handlePropertyDef(self,
return attr
def _annotations_from_function(
- self, func: Union[ast.AsyncFunctionDef, ast.FunctionDef]
- ) -> Mapping[str, Optional[ast.expr]]:
+ self, func: Union[ast.AsyncFunctionDef, ast.FunctionDef]
+ ) -> Mapping[str, Optional[ast.expr]]:
"""Get annotations from a function definition.
@param func: The function definition's AST.
@return: Mapping from argument name to annotation.
The name C{return} is used for the return type.
Unannotated arguments are omitted.
"""
+
def _get_all_args() -> Iterator[ast.arg]:
base_args = func.args
yield from base_args.posonlyargs
@@ -1135,21 +1187,27 @@ def _get_all_args() -> Iterator[ast.arg]:
if kwargs:
kwargs.arg = epydoc2stan.KeywordArgument(kwargs.arg)
yield kwargs
+
def _get_all_ast_annotations() -> Iterator[Tuple[str, Optional[ast.expr]]]:
for arg in _get_all_args():
yield arg.arg, arg.annotation
returns = func.returns
if returns:
yield 'return', returns
+
return {
# Include parameter names even if they're not annotated, so that
# we can use the key set to know which parameters exist and warn
# when non-existing parameters are documented.
- name: None if value is None else upgrade_annotation(unstring_annotation(
- value, self.builder.current), self.builder.current)
+ name: (
+ None
+ if value is None
+ else upgrade_annotation(unstring_annotation(value, self.builder.current), self.builder.current)
+ )
for name, value in _get_all_ast_annotations()
- }
-
+ }
+
+
class _ValueFormatter:
"""
Class to encapsulate a python value and translate it to HTML when calling L{repr()} on the L{_ValueFormatter}.
@@ -1169,50 +1227,53 @@ def __init__(self, value: ast.expr, ctx: model.Documentable):
def __repr__(self) -> str:
"""
- Present the python value as HTML.
+ Present the python value as HTML.
Without the englobing tags.
"""
- # Using node2stan.node2html instead of flatten(to_stan()).
- # This avoids calling flatten() twice,
+ # Using node2stan.node2html instead of flatten(to_stan()).
+ # This avoids calling flatten() twice,
# but potential XML parser errors caused by XMLString needs to be handled later.
return ''.join(node2stan.node2html(self._colorized.to_node(), self._linker))
+
class _AnnotationValueFormatter(_ValueFormatter):
"""
Special L{_ValueFormatter} for function annotations.
"""
+
def __init__(self, value: ast.expr, ctx: model.Function):
super().__init__(value, ctx)
self._linker = linker._AnnotationLinker(ctx)
-
+
def __repr__(self) -> str:
"""
Present the annotation wrapped inside tags.
"""
return '%s' % super().__repr__()
+
DocumentableT = TypeVar('DocumentableT', bound=model.Documentable)
+
class ASTBuilder:
"""
Keeps tracks of the state of the AST build, creates documentable and adds objects to the system.
"""
+
ModuleVistor = ModuleVistor
def __init__(self, system: model.System):
self.system = system
-
- self.current = cast(model.Documentable, None) # current visited object.
- self.currentMod: Optional[model.Module] = None # current module, set when visiting ast.Module.
-
+
+ self.current = cast(model.Documentable, None) # current visited object.
+ self.currentMod: Optional[model.Module] = None # current module, set when visiting ast.Module.
+
self._stack: List[model.Documentable] = []
self.ast_cache: Dict[Path, Optional[ast.Module]] = {}
- def _push(self,
- cls: Type[DocumentableT],
- name: str,
- lineno: int,
- parent:Optional[model.Documentable]=None) -> DocumentableT:
+ def _push(
+ self, cls: Type[DocumentableT], name: str, lineno: int, parent: Optional[model.Documentable] = None
+ ) -> DocumentableT:
"""
Create and enter a new object of the given type and add it to the system.
@@ -1220,7 +1281,7 @@ def _push(self,
Used for attributes declared in methods, typically ``__init__``.
"""
obj = cls(self.system, name, parent or self.current)
- self.push(obj, lineno)
+ self.push(obj, lineno)
# make sure push() is called before addObject() since addObject() can trigger a warning for duplicates
# and this relies on the correct parentMod attribute, which is set in push().
self.system.addObject(obj)
@@ -1282,12 +1343,9 @@ def popFunction(self) -> None:
"""
self._pop(self.system.Function)
- def addAttribute(self,
- name: str,
- kind: Optional[model.DocumentableKind],
- parent: model.Documentable,
- lineno: int
- ) -> model.Attribute:
+ def addAttribute(
+ self, name: str, kind: Optional[model.DocumentableKind], parent: model.Documentable, lineno: int
+ ) -> model.Attribute:
"""
Add a new attribute to the system.
"""
@@ -1296,7 +1354,6 @@ def addAttribute(self,
attr.kind = kind
return attr
-
def processModuleAST(self, mod_ast: ast.Module, mod: model.Module) -> None:
for name, node in findModuleLevelAssign(mod_ast):
@@ -1324,8 +1381,8 @@ def parseFile(self, path: Path, ctx: model.Module) -> Optional[ast.Module]:
self.ast_cache[path] = mod
return mod
-
- def parseString(self, py_string:str, ctx: model.Module) -> Optional[ast.Module]:
+
+ def parseString(self, py_string: str, ctx: model.Module) -> Optional[ast.Module]:
mod = None
try:
mod = _parse(py_string)
@@ -1333,27 +1390,26 @@ def parseString(self, py_string:str, ctx: model.Module) -> Optional[ast.Module]:
ctx.report("cannot parse string")
return mod
+
model.System.defaultBuilder = ASTBuilder
+
def findModuleLevelAssign(mod_ast: ast.Module) -> Iterator[Tuple[str, ast.Assign]]:
"""
- Find module level Assign.
+ Find module level Assign.
Yields tuples containing the assigment name and the Assign node.
"""
for node in mod_ast.body:
- if isinstance(node, ast.Assign) and \
- len(node.targets) == 1 and \
- isinstance(node.targets[0], ast.Name):
- yield (node.targets[0].id, node)
+ if isinstance(node, ast.Assign) and len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
+ yield (node.targets[0].id, node)
+
def parseAll(node: ast.Assign, mod: model.Module) -> None:
- """Find and attempt to parse into a list of names the
+ """Find and attempt to parse into a list of names the
C{__all__} variable of a module's AST and set L{Module.all} accordingly."""
if not isinstance(node.value, (ast.List, ast.Tuple)):
- mod.report(
- 'Cannot parse value assigned to "__all__"',
- section='all', lineno_offset=node.lineno)
+ mod.report('Cannot parse value assigned to "__all__"', section='all', lineno_offset=node.lineno)
return
names = []
@@ -1361,29 +1417,27 @@ def parseAll(node: ast.Assign, mod: model.Module) -> None:
try:
name: object = ast.literal_eval(item)
except ValueError:
- mod.report(
- f'Cannot parse element {idx} of "__all__"',
- section='all', lineno_offset=node.lineno)
+ mod.report(f'Cannot parse element {idx} of "__all__"', section='all', lineno_offset=node.lineno)
else:
if isinstance(name, str):
names.append(name)
else:
mod.report(
- f'Element {idx} of "__all__" has '
- f'type "{type(name).__name__}", expected "str"',
- section='all', lineno_offset=node.lineno)
+ f'Element {idx} of "__all__" has ' f'type "{type(name).__name__}", expected "str"',
+ section='all',
+ lineno_offset=node.lineno,
+ )
if mod.all is not None:
- mod.report(
- 'Assignment to "__all__" overrides previous assignment',
- section='all', lineno_offset=node.lineno)
+ mod.report('Assignment to "__all__" overrides previous assignment', section='all', lineno_offset=node.lineno)
mod.all = names
+
def parseDocformat(node: ast.Assign, mod: model.Module) -> None:
"""
- Find C{__docformat__} variable of this
+ Find C{__docformat__} variable of this
module's AST and set L{Module.docformat} accordingly.
-
+
This is all valid::
__docformat__ = "reStructuredText en"
@@ -1396,37 +1450,46 @@ def parseDocformat(node: ast.Assign, mod: model.Module) -> None:
except ValueError:
mod.report(
'Cannot parse value assigned to "__docformat__": not a string',
- section='docformat', lineno_offset=node.lineno)
+ section='docformat',
+ lineno_offset=node.lineno,
+ )
return
-
+
if not isinstance(value, str):
mod.report(
'Cannot parse value assigned to "__docformat__": not a string',
- section='docformat', lineno_offset=node.lineno)
+ section='docformat',
+ lineno_offset=node.lineno,
+ )
return
-
+
if not value.strip():
mod.report(
'Cannot parse value assigned to "__docformat__": empty value',
- section='docformat', lineno_offset=node.lineno)
+ section='docformat',
+ lineno_offset=node.lineno,
+ )
return
-
+
# Language is ignored and parser name is lowercased.
value = value.split(" ", 1)[0].lower()
if mod._docformat is not None:
mod.report(
'Assignment to "__docformat__" overrides previous assignment',
- section='docformat', lineno_offset=node.lineno)
+ section='docformat',
+ lineno_offset=node.lineno,
+ )
mod.docformat = value
+
MODULE_VARIABLES_META_PARSERS: Mapping[str, Callable[[ast.Assign, model.Module], None]] = {
'__all__': parseAll,
- '__docformat__': parseDocformat
+ '__docformat__': parseDocformat,
}
-def setup_pydoctor_extension(r:extensions.ExtRegistrar) -> None:
+def setup_pydoctor_extension(r: extensions.ExtRegistrar) -> None:
r.register_astbuilder_visitor(TypeAliasVisitorExt)
r.register_post_processor(model.defaultPostProcess, priority=200)
diff --git a/pydoctor/astutils.py b/pydoctor/astutils.py
index 2163c841b..9a1c89ed5 100644
--- a/pydoctor/astutils.py
+++ b/pydoctor/astutils.py
@@ -1,12 +1,26 @@
"""
Various bits of reusable code related to L{ast.AST} node processing.
"""
+
from __future__ import annotations
import inspect
import sys
from numbers import Number
-from typing import Any, Callable, Collection, Iterator, Optional, List, Iterable, Sequence, TYPE_CHECKING, Tuple, Union, cast
+from typing import (
+ Any,
+ Callable,
+ Collection,
+ Iterator,
+ Optional,
+ List,
+ Iterable,
+ Sequence,
+ TYPE_CHECKING,
+ Tuple,
+ Union,
+ cast,
+)
from inspect import BoundArguments, Signature
import ast
@@ -19,6 +33,7 @@
# AST visitors
+
def iter_values(node: ast.AST) -> Iterator[ast.AST]:
for _, value in ast.iter_fields(node):
if isinstance(value, list):
@@ -28,18 +43,20 @@ def iter_values(node: ast.AST) -> Iterator[ast.AST]:
elif isinstance(value, ast.AST):
yield value
+
class NodeVisitor(visitor.PartialVisitor[ast.AST]):
"""
- Generic AST node visitor. This class does not work like L{ast.NodeVisitor},
+ Generic AST node visitor. This class does not work like L{ast.NodeVisitor},
it only visits statements directly within a C{B{body}}. Also, visitor methods can't return anything.
:See: L{visitor} for more informations.
"""
+
def generic_visit(self, node: ast.AST) -> None:
"""
- Helper method to visit a node by calling C{visit()} on each child of the node.
- This is useful because this vistitor only visits statements inside C{.body} attribute.
-
+ Helper method to visit a node by calling C{visit()} on each child of the node.
+ This is useful because this vistitor only visits statements inside C{.body} attribute.
+
So if one wants to visit L{ast.Expr} children with their visitor, they should include::
def visit_Expr(self, node:ast.Expr):
@@ -47,7 +64,7 @@ def visit_Expr(self, node:ast.Expr):
"""
for v in iter_values(node):
self.visit(v)
-
+
@classmethod
def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:
"""
@@ -58,13 +75,16 @@ def get_children(cls, node: ast.AST) -> Iterable[ast.AST]:
for child in body:
yield child
-class NodeVisitorExt(visitor.VisitorExt[ast.AST]):
- ...
+
+class NodeVisitorExt(visitor.VisitorExt[ast.AST]): ...
+
_AssingT = Union[ast.Assign, ast.AnnAssign]
-def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:
+
+
+def iterassign(node: _AssingT) -> Iterator[Optional[List[str]]]:
"""
- Utility function to iterate assignments targets.
+ Utility function to iterate assignments targets.
Useful for all the following AST assignments:
@@ -82,15 +102,16 @@ def iterassign(node:_AssingT) -> Iterator[Optional[List[str]]]:
>>> from ast import parse
>>> node = parse('self.var = target = thing[0] = node.astext()').body[0]
>>> list(iterassign(node))
-
+
"""
for target in node.targets if isinstance(node, ast.Assign) else [node.target]:
- dottedname = node2dottedname(target)
+ dottedname = node2dottedname(target)
yield dottedname
+
def node2dottedname(node: Optional[ast.AST]) -> Optional[List[str]]:
"""
- Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names.
+ Resove expression composed by L{ast.Attribute} and L{ast.Name} nodes to a list of names.
"""
parts = []
while isinstance(node, ast.Attribute):
@@ -103,10 +124,10 @@ def node2dottedname(node: Optional[ast.AST]) -> Optional[List[str]]:
parts.reverse()
return parts
-def node2fullname(expr: Optional[ast.AST],
- ctx: model.Documentable | None = None,
- *,
- expandName:Callable[[str], str] | None = None) -> Optional[str]:
+
+def node2fullname(
+ expr: Optional[ast.AST], ctx: model.Documentable | None = None, *, expandName: Callable[[str], str] | None = None
+) -> Optional[str]:
if expandName is None:
if ctx is None:
raise TypeError('this function takes exactly two arguments')
@@ -119,6 +140,7 @@ def node2fullname(expr: Optional[ast.AST],
return None
return expandName('.'.join(dottedname))
+
def bind_args(sig: Signature, call: ast.Call) -> BoundArguments:
"""
Binds the arguments of a function call to that function's signature.
@@ -130,49 +152,56 @@ def bind_args(sig: Signature, call: ast.Call) -> BoundArguments:
# When keywords are passed using '**kwargs', the 'arg' field will
# be None. We don't currently support keywords passed that way.
if kw.arg is not None
- }
+ }
return sig.bind(*call.args, **kwargs)
-def get_str_value(expr:ast.expr) -> Optional[str]:
+def get_str_value(expr: ast.expr) -> Optional[str]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, str):
return expr.value
return None
-def get_num_value(expr:ast.expr) -> Optional[Number]:
+
+
+def get_num_value(expr: ast.expr) -> Optional[Number]:
if isinstance(expr, ast.Constant) and isinstance(expr.value, Number):
return expr.value
return None
+
+
def _is_str_constant(expr: ast.expr, s: str) -> bool:
return isinstance(expr, ast.Constant) and expr.value == s
+
def get_int_value(expr: ast.expr) -> Optional[int]:
num = get_num_value(expr)
if isinstance(num, int):
- return num # type:ignore[unreachable]
+ return num # type:ignore[unreachable]
return None
+
def is__name__equals__main__(cmp: ast.Compare) -> bool:
"""
Returns whether or not the given L{ast.Compare} is equal to C{__name__ == '__main__'}.
"""
- return isinstance(cmp.left, ast.Name) \
- and cmp.left.id == '__name__' \
- and len(cmp.ops) == 1 \
- and isinstance(cmp.ops[0], ast.Eq) \
- and len(cmp.comparators) == 1 \
- and _is_str_constant(cmp.comparators[0], '__main__')
+ return (
+ isinstance(cmp.left, ast.Name)
+ and cmp.left.id == '__name__'
+ and len(cmp.ops) == 1
+ and isinstance(cmp.ops[0], ast.Eq)
+ and len(cmp.comparators) == 1
+ and _is_str_constant(cmp.comparators[0], '__main__')
+ )
+
-def is_using_typing_final(expr: Optional[ast.AST],
- ctx:'model.Documentable') -> bool:
+def is_using_typing_final(expr: Optional[ast.AST], ctx: 'model.Documentable') -> bool:
return is_using_annotations(expr, ("typing.Final", "typing_extensions.Final"), ctx)
-def is_using_typing_classvar(expr: Optional[ast.AST],
- ctx:'model.Documentable') -> bool:
+
+def is_using_typing_classvar(expr: Optional[ast.AST], ctx: 'model.Documentable') -> bool:
return is_using_annotations(expr, ('typing.ClassVar', "typing_extensions.ClassVar"), ctx)
-def is_using_annotations(expr: Optional[ast.AST],
- annotations:Sequence[str],
- ctx:'model.Documentable') -> bool:
+
+def is_using_annotations(expr: Optional[ast.AST], annotations: Sequence[str], ctx: 'model.Documentable') -> bool:
"""
Detect if this expr is firstly composed by one of the specified annotation(s)' full name.
"""
@@ -188,10 +217,11 @@ def is_using_annotations(expr: Optional[ast.AST],
return True
return False
+
def get_node_block(node: ast.AST) -> tuple[ast.AST, str]:
"""
- Tell in wich block the given node lives in.
-
+ Tell in wich block the given node lives in.
+
A block is defined by a tuple: (parent node, fieldname)
"""
try:
@@ -205,7 +235,8 @@ def get_node_block(node: ast.AST) -> tuple[ast.AST, str]:
raise ValueError(f"node {node} not found in {parent}")
return parent, fieldname
-def get_assign_docstring_node(assign:ast.Assign | ast.AnnAssign) -> Str | None:
+
+def get_assign_docstring_node(assign: ast.Assign | ast.AnnAssign) -> Str | None:
"""
Get the docstring for a L{ast.Assign} or L{ast.AnnAssign} node.
@@ -215,25 +246,26 @@ def get_assign_docstring_node(assign:ast.Assign | ast.AnnAssign) -> Str | None:
# if this call raises an ValueError it means that we're doing something nasty with the ast...
parent_node, fieldname = get_node_block(assign)
statements = getattr(parent_node, fieldname, None)
-
+
if isinstance(statements, Sequence):
- # it must be a sequence if it's not None since an assignment
+ # it must be a sequence if it's not None since an assignment
# can only be a part of a compound statement.
assign_index = statements.index(assign)
try:
- right_sibling = statements[assign_index+1]
+ right_sibling = statements[assign_index + 1]
except IndexError:
return None
- if isinstance(right_sibling, ast.Expr) and \
- get_str_value(right_sibling.value) is not None:
+ if isinstance(right_sibling, ast.Expr) and get_str_value(right_sibling.value) is not None:
return cast(Str, right_sibling.value)
return None
+
def is_none_literal(node: ast.expr) -> bool:
"""Does this AST node represent the literal constant None?"""
return isinstance(node, ast.Constant) and node.value is None
-
-def unstring_annotation(node: ast.expr, ctx:'model.Documentable', section:str='annotation') -> ast.expr:
+
+
+def unstring_annotation(node: ast.expr, ctx: 'model.Documentable', section: str = 'annotation') -> ast.expr:
"""Replace all strings in the given expression by parsed versions.
@return: The unstringed node. If parsing fails, an error is logged
and the original node is returned.
@@ -249,6 +281,7 @@ def unstring_annotation(node: ast.expr, ctx:'model.Documentable', section:str='a
assert isinstance(expr, ast.expr), expr
return expr
+
class _AnnotationStringParser(ast.NodeTransformer):
"""Implementation of L{unstring_annotation()}.
@@ -262,7 +295,7 @@ def _parse_string(self, value: str) -> ast.expr:
statements = ast.parse(value).body
if len(statements) != 1:
raise SyntaxError("expected expression, found multiple statements")
- stmt, = statements
+ (stmt,) = statements
if isinstance(stmt, ast.Expr):
# Expression wrapped in an Expr statement.
expr = self.visit(stmt.value)
@@ -286,7 +319,7 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.Subscript:
def visit_fast(self, node: ast.expr) -> ast.expr:
return node
-
+
visit_Attribute = visit_Name = visit_fast
def visit_Constant(self, node: ast.Constant) -> ast.expr:
@@ -298,29 +331,33 @@ def visit_Constant(self, node: ast.Constant) -> ast.expr:
assert isinstance(const, ast.Constant), const
return const
-def upgrade_annotation(node: ast.expr, ctx: model.Documentable, section:str='annotation') -> ast.expr:
+
+def upgrade_annotation(node: ast.expr, ctx: model.Documentable, section: str = 'annotation') -> ast.expr:
"""
- Transform the annotation to use python 3.10+ syntax.
+ Transform the annotation to use python 3.10+ syntax.
"""
return _UpgradeDeprecatedAnnotations(ctx).visit(node)
+
class _UpgradeDeprecatedAnnotations(ast.NodeTransformer):
if TYPE_CHECKING:
- def visit(self, node:ast.AST) -> ast.expr:...
+
+ def visit(self, node: ast.AST) -> ast.expr: ...
def __init__(self, ctx: model.Documentable) -> None:
- def _node2fullname(node:ast.expr) -> str | None:
+ def _node2fullname(node: ast.expr) -> str | None:
return node2fullname(node, expandName=ctx.expandAnnotationName)
+
self.node2fullname = _node2fullname
- def _union_args_to_bitor(self, args: list[ast.expr], ctxnode:ast.AST) -> ast.BinOp:
+ def _union_args_to_bitor(self, args: list[ast.expr], ctxnode: ast.AST) -> ast.BinOp:
assert len(args) > 1
*others, right = args
if len(others) == 1:
rnode = ast.BinOp(left=others[0], right=right, op=ast.BitOr())
else:
rnode = ast.BinOp(left=self._union_args_to_bitor(others, ctxnode), right=right, op=ast.BitOr())
-
+
return ast.fix_missing_locations(ast.copy_location(rnode, ctxnode))
def visit_Name(self, node: ast.Name | ast.Attribute) -> Any:
@@ -328,7 +365,7 @@ def visit_Name(self, node: ast.Name | ast.Attribute) -> Any:
if fullName in DEPRECATED_TYPING_ALIAS_BUILTINS:
return ast.Name(id=DEPRECATED_TYPING_ALIAS_BUILTINS[fullName], ctx=ast.Load())
# TODO: Support all deprecated aliases including the ones in the collections.abc module.
- # In order to support that we need to generate the parsed docstring directly and include
+ # In order to support that we need to generate the parsed docstring directly and include
# custom refmap or transform the ast such that missing imports are added.
return node
@@ -338,9 +375,9 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.expr:
node.value = self.visit(node.value)
node.slice = self.visit(node.slice)
fullName = self.node2fullname(node.value)
-
+
if fullName == 'typing.Union':
- # typing.Union can be used with a single type or a
+ # typing.Union can be used with a single type or a
# tuple of types, includea single element tuple, which is the same
# as the directly using the type: Union[x] == Union[(x,)] == x
slice_ = node.slice
@@ -352,7 +389,7 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.expr:
return args[0]
elif isinstance(slice_, (ast.Attribute, ast.Name, ast.Subscript, ast.BinOp)):
return slice_
-
+
elif fullName == 'typing.Optional':
# typing.Optional requires a single type, so we don't process when slice is a tuple.
slice_ = node.slice
@@ -360,15 +397,16 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.expr:
return self._union_args_to_bitor([slice_, ast.Constant(value=None)], node)
return node
-
+
+
DEPRECATED_TYPING_ALIAS_BUILTINS = {
- "typing.Text": 'str',
- "typing.Dict": 'dict',
- "typing.Tuple": 'tuple',
- "typing.Type": 'type',
- "typing.List": 'list',
- "typing.Set": 'set',
- "typing.FrozenSet": 'frozenset',
+ "typing.Text": 'str',
+ "typing.Dict": 'dict',
+ "typing.Tuple": 'tuple',
+ "typing.Type": 'type',
+ "typing.List": 'list',
+ "typing.Set": 'set',
+ "typing.FrozenSet": 'frozenset',
}
# These do not belong in the deprecated builtins aliases, so we make sure it doesn't happen.
@@ -376,100 +414,103 @@ def visit_Subscript(self, node: ast.Subscript) -> ast.expr:
assert 'typing.Optional' not in DEPRECATED_TYPING_ALIAS_BUILTINS
TYPING_ALIAS = (
- "typing.Hashable",
- "typing.Awaitable",
- "typing.Coroutine",
- "typing.AsyncIterable",
- "typing.AsyncIterator",
- "typing.Iterable",
- "typing.Iterator",
- "typing.Reversible",
- "typing.Sized",
- "typing.Container",
- "typing.Collection",
- "typing.Callable",
- "typing.AbstractSet",
- "typing.MutableSet",
- "typing.Mapping",
- "typing.MutableMapping",
- "typing.Sequence",
- "typing.MutableSequence",
- "typing.ByteString",
- "typing.Deque",
- "typing.MappingView",
- "typing.KeysView",
- "typing.ItemsView",
- "typing.ValuesView",
- "typing.ContextManager",
- "typing.AsyncContextManager",
- "typing.DefaultDict",
- "typing.OrderedDict",
- "typing.Counter",
- "typing.ChainMap",
- "typing.Generator",
- "typing.AsyncGenerator",
- "typing.Pattern",
- "typing.Match",
- # Special forms
- "typing.Union",
- "typing.Literal",
- "typing.Optional",
- *DEPRECATED_TYPING_ALIAS_BUILTINS,
- )
+ "typing.Hashable",
+ "typing.Awaitable",
+ "typing.Coroutine",
+ "typing.AsyncIterable",
+ "typing.AsyncIterator",
+ "typing.Iterable",
+ "typing.Iterator",
+ "typing.Reversible",
+ "typing.Sized",
+ "typing.Container",
+ "typing.Collection",
+ "typing.Callable",
+ "typing.AbstractSet",
+ "typing.MutableSet",
+ "typing.Mapping",
+ "typing.MutableMapping",
+ "typing.Sequence",
+ "typing.MutableSequence",
+ "typing.ByteString",
+ "typing.Deque",
+ "typing.MappingView",
+ "typing.KeysView",
+ "typing.ItemsView",
+ "typing.ValuesView",
+ "typing.ContextManager",
+ "typing.AsyncContextManager",
+ "typing.DefaultDict",
+ "typing.OrderedDict",
+ "typing.Counter",
+ "typing.ChainMap",
+ "typing.Generator",
+ "typing.AsyncGenerator",
+ "typing.Pattern",
+ "typing.Match",
+ # Special forms
+ "typing.Union",
+ "typing.Literal",
+ "typing.Optional",
+ *DEPRECATED_TYPING_ALIAS_BUILTINS,
+)
SUBSCRIPTABLE_CLASSES_PEP585 = (
- "tuple",
- "list",
- "dict",
- "set",
- "frozenset",
- "type",
- "builtins.tuple",
- "builtins.list",
- "builtins.dict",
- "builtins.set",
- "builtins.frozenset",
- "builtins.type",
- "collections.deque",
- "collections.defaultdict",
- "collections.OrderedDict",
- "collections.Counter",
- "collections.ChainMap",
- "collections.abc.Awaitable",
- "collections.abc.Coroutine",
- "collections.abc.AsyncIterable",
- "collections.abc.AsyncIterator",
- "collections.abc.AsyncGenerator",
- "collections.abc.Iterable",
- "collections.abc.Iterator",
- "collections.abc.Generator",
- "collections.abc.Reversible",
- "collections.abc.Container",
- "collections.abc.Collection",
- "collections.abc.Callable",
- "collections.abc.Set",
- "collections.abc.MutableSet",
- "collections.abc.Mapping",
- "collections.abc.MutableMapping",
- "collections.abc.Sequence",
- "collections.abc.MutableSequence",
- "collections.abc.ByteString",
- "collections.abc.MappingView",
- "collections.abc.KeysView",
- "collections.abc.ItemsView",
- "collections.abc.ValuesView",
- "contextlib.AbstractContextManager",
- "contextlib.AbstractAsyncContextManager",
- "re.Pattern",
- "re.Match",
- )
+ "tuple",
+ "list",
+ "dict",
+ "set",
+ "frozenset",
+ "type",
+ "builtins.tuple",
+ "builtins.list",
+ "builtins.dict",
+ "builtins.set",
+ "builtins.frozenset",
+ "builtins.type",
+ "collections.deque",
+ "collections.defaultdict",
+ "collections.OrderedDict",
+ "collections.Counter",
+ "collections.ChainMap",
+ "collections.abc.Awaitable",
+ "collections.abc.Coroutine",
+ "collections.abc.AsyncIterable",
+ "collections.abc.AsyncIterator",
+ "collections.abc.AsyncGenerator",
+ "collections.abc.Iterable",
+ "collections.abc.Iterator",
+ "collections.abc.Generator",
+ "collections.abc.Reversible",
+ "collections.abc.Container",
+ "collections.abc.Collection",
+ "collections.abc.Callable",
+ "collections.abc.Set",
+ "collections.abc.MutableSet",
+ "collections.abc.Mapping",
+ "collections.abc.MutableMapping",
+ "collections.abc.Sequence",
+ "collections.abc.MutableSequence",
+ "collections.abc.ByteString",
+ "collections.abc.MappingView",
+ "collections.abc.KeysView",
+ "collections.abc.ItemsView",
+ "collections.abc.ValuesView",
+ "contextlib.AbstractContextManager",
+ "contextlib.AbstractAsyncContextManager",
+ "re.Pattern",
+ "re.Match",
+)
+
def is_typing_annotation(node: ast.AST, ctx: 'model.Documentable') -> bool:
"""
Whether this annotation node refers to a typing alias.
"""
- return is_using_annotations(node, TYPING_ALIAS, ctx) or \
- is_using_annotations(node, SUBSCRIPTABLE_CLASSES_PEP585, ctx)
+ return is_using_annotations(node, TYPING_ALIAS, ctx) or is_using_annotations(
+ node, SUBSCRIPTABLE_CLASSES_PEP585, ctx
+ )
+
def get_docstring_node(node: ast.AST) -> Str | None:
"""
@@ -484,15 +525,17 @@ def get_docstring_node(node: ast.AST) -> Str | None:
return node.value
return None
+
class _StrMeta(type):
def __instancecheck__(self, instance: object) -> bool:
if isinstance(instance, ast.expr):
return get_str_value(instance) is not None
return False
+
class Str(ast.expr, metaclass=_StrMeta):
"""
- Wraps ast.Constant/ast.Str for `isinstance` checks and annotations.
+ Wraps ast.Constant/ast.Str for `isinstance` checks and annotations.
Ensures that the value is actually a string.
Do not try to instanciate this class.
"""
@@ -502,6 +545,7 @@ class Str(ast.expr, metaclass=_StrMeta):
def __init__(self, *args: Any, **kwargs: Any) -> None:
raise TypeError(f'{Str.__qualname__} cannot be instanciated')
+
def extract_docstring_linenum(node: Str) -> int:
r"""
In older CPython versions, the AST only tells us the end line
@@ -522,14 +566,15 @@ def extract_docstring_linenum(node: Str) -> int:
lineno += 1
elif not ch.isspace():
break
-
+
return lineno
+
def extract_docstring(node: Str) -> Tuple[int, str]:
"""
Extract docstring information from an ast node that represents the docstring.
- @returns:
+ @returns:
- The line number of the first non-blank line of the docsring. See L{extract_docstring_linenum}.
- The docstring to be parsed, cleaned by L{inspect.cleandoc}.
"""
@@ -554,6 +599,7 @@ def infer_type(expr: ast.expr) -> Optional[ast.expr]:
else:
return ast.fix_missing_locations(ast.copy_location(ann, expr))
+
def _annotation_for_value(value: object) -> Optional[ast.expr]:
if value is None:
return None
@@ -569,11 +615,10 @@ def _annotation_for_value(value: object) -> Optional[ast.expr]:
if ann_elem is not None:
if name == 'tuple':
ann_elem = ast.Tuple(elts=[ann_elem, ast.Constant(value=...)], ctx=ast.Load())
- return ast.Subscript(value=ast.Name(id=name, ctx=ast.Load()),
- slice=ann_elem,
- ctx=ast.Load())
+ return ast.Subscript(value=ast.Name(id=name, ctx=ast.Load()), slice=ann_elem, ctx=ast.Load())
return ast.Name(id=name, ctx=ast.Load())
+
def _annotation_for_elements(sequence: Iterable[object]) -> Optional[ast.expr]:
names = set()
for elem in sequence:
@@ -590,11 +635,12 @@ def _annotation_for_elements(sequence: Iterable[object]) -> Optional[ast.expr]:
# Empty sequence or no uniform type.
return None
-
+
class Parentage(ast.NodeVisitor):
"""
Add C{parent} attribute to ast nodes instances.
"""
+
def __init__(self) -> None:
self.current: ast.AST | None = None
@@ -606,21 +652,25 @@ def generic_visit(self, node: ast.AST) -> None:
self.generic_visit(child)
self.current = current
-def get_parents(node:ast.AST) -> Iterator[ast.AST]:
+
+def get_parents(node: ast.AST) -> Iterator[ast.AST]:
"""
Once nodes have the C{.parent} attribute with {Parentage}, use this function
to get a iterator on all parents of the given node up to the root module.
"""
- def _yield_parents(n:Optional[ast.AST]) -> Iterator[ast.AST]:
+
+ def _yield_parents(n: Optional[ast.AST]) -> Iterator[ast.AST]:
if n:
yield n
p = cast(ast.AST, getattr(n, 'parent', None))
yield from _yield_parents(p)
+
yield from _yield_parents(getattr(node, 'parent', None))
-#Part of the astor library for Python AST manipulation.
-#License: 3-clause BSD
-#Copyright (c) 2015 Patrick Maupin
+
+# Part of the astor library for Python AST manipulation.
+# License: 3-clause BSD
+# Copyright (c) 2015 Patrick Maupin
_op_data = """
GeneratorExp 1
@@ -691,49 +741,60 @@ def _yield_parents(n:Optional[ast.AST]) -> Iterator[ast.AST]:
Constant 1
"""
-_op_data = [x.split() for x in _op_data.splitlines()] # type:ignore
-_op_data = [[x[0], ' '.join(x[1:-1]), int(x[-1])] for x in _op_data if x] # type:ignore
+_op_data = [x.split() for x in _op_data.splitlines()] # type:ignore
+_op_data = [[x[0], ' '.join(x[1:-1]), int(x[-1])] for x in _op_data if x] # type:ignore
for _index in range(1, len(_op_data)):
- _op_data[_index][2] *= 2 # type:ignore
- _op_data[_index][2] += _op_data[_index - 1][2] # type:ignore
+ _op_data[_index][2] *= 2 # type:ignore
+ _op_data[_index][2] += _op_data[_index - 1][2] # type:ignore
_deprecated: Collection[str] = ()
if sys.version_info >= (3, 12):
_deprecated = ('Num', 'Str', 'Bytes', 'Ellipsis', 'NameConstant')
-_precedence_data = dict((getattr(ast, x, None), z) for x, y, z in _op_data if x not in _deprecated) # type:ignore
-_symbol_data = dict((getattr(ast, x, None), y) for x, y, z in _op_data if x not in _deprecated) # type:ignore
+_precedence_data = dict((getattr(ast, x, None), z) for x, y, z in _op_data if x not in _deprecated) # type:ignore
+_symbol_data = dict((getattr(ast, x, None), y) for x, y, z in _op_data if x not in _deprecated) # type:ignore
+
class op_util:
"""
This class provides data and functions for mapping
AST nodes to symbols and precedences.
"""
+
@classmethod
- def get_op_symbol(cls, obj:ast.operator|ast.boolop|ast.cmpop|ast.unaryop,
- fmt:str='%s',
- symbol_data:dict[type[ast.AST]|None, str]=_symbol_data,
- type:Callable[[object], type[Any]]=type) -> str:
- """Given an AST node object, returns a string containing the symbol.
- """
+ def get_op_symbol(
+ cls,
+ obj: ast.operator | ast.boolop | ast.cmpop | ast.unaryop,
+ fmt: str = '%s',
+ symbol_data: dict[type[ast.AST] | None, str] = _symbol_data,
+ type: Callable[[object], type[Any]] = type,
+ ) -> str:
+ """Given an AST node object, returns a string containing the symbol."""
return fmt % symbol_data[type(obj)]
+
@classmethod
- def get_op_precedence(cls, obj:ast.AST,
- precedence_data:dict[type[ast.AST]|None, int]=_precedence_data,
- type:Callable[[object], type[Any]]=type) -> int:
+ def get_op_precedence(
+ cls,
+ obj: ast.AST,
+ precedence_data: dict[type[ast.AST] | None, int] = _precedence_data,
+ type: Callable[[object], type[Any]] = type,
+ ) -> int:
"""Given an AST node object, returns the precedence.
- @raises KeyError: If the node is not explicitely supported by this function.
+ @raises KeyError: If the node is not explicitely supported by this function.
This is a very legacy piece of code, all calls to L{get_op_precedence} should be
guarded in a C{try:... except KeyError:...} statement.
"""
return precedence_data[type(obj)]
if not TYPE_CHECKING:
+
class Precedence(object):
vars().update((cast(str, x), z) for x, _, z in _op_data)
highest = max(cast(int, z) for _, _, z in _op_data) + 2
+
else:
Precedence: Any
+
del _op_data, _index, _precedence_data, _symbol_data, _deprecated
# This was part of the astor library for Python AST manipulation.
diff --git a/pydoctor/driver.py b/pydoctor/driver.py
index 221d7de52..972a94ebf 100644
--- a/pydoctor/driver.py
+++ b/pydoctor/driver.py
@@ -1,7 +1,8 @@
"""The entry point."""
+
from __future__ import annotations
-from typing import Sequence
+from typing import Sequence
import datetime
import os
import sys
@@ -17,26 +18,28 @@
# On older versions, a compatibility package must be installed from PyPI.
import importlib.resources as importlib_resources
+
def get_system(options: model.Options) -> model.System:
"""
Get a system with the defined options. Load packages and modules.
"""
- cache = prepareCache(clearCache=options.clear_intersphinx_cache,
- enableCache=options.enable_intersphinx_cache,
- cachePath=options.intersphinx_cache_path,
- maxAge=options.intersphinx_cache_max_age)
+ cache = prepareCache(
+ clearCache=options.clear_intersphinx_cache,
+ enableCache=options.enable_intersphinx_cache,
+ cachePath=options.intersphinx_cache_path,
+ maxAge=options.intersphinx_cache_max_age,
+ )
# step 1: make/find the system
system = options.systemclass(options)
system.fetchIntersphinxInventories(cache)
- cache.close() # Fixes ResourceWarning: unclosed
+ cache.close() # Fixes ResourceWarning: unclosed
# TODO: load buildtime with default factory and converter in model.Options
# Support source date epoch:
# https://reproducible-builds.org/specs/source-date-epoch/
try:
- system.buildtime = datetime.datetime.utcfromtimestamp(
- int(os.environ['SOURCE_DATE_EPOCH']))
+ system.buildtime = datetime.datetime.utcfromtimestamp(int(os.environ['SOURCE_DATE_EPOCH']))
except ValueError as e:
error(str(e))
except KeyError:
@@ -44,11 +47,10 @@ def get_system(options: model.Options) -> model.System:
# Load custom buildtime
if options.buildtime:
try:
- system.buildtime = datetime.datetime.strptime(
- options.buildtime, BUILDTIME_FORMAT)
+ system.buildtime = datetime.datetime.strptime(options.buildtime, BUILDTIME_FORMAT)
except ValueError as e:
error(str(e))
-
+
# step 1.5: create the builder
builderT = system.systemBuilder
@@ -79,37 +81,38 @@ def get_system(options: model.Options) -> model.System:
return system
+
def make(system: model.System) -> None:
"""
- Produce the html/intersphinx output, as configured in the system's options.
+ Produce the html/intersphinx output, as configured in the system's options.
"""
options = system.options
# step 4: make html, if desired
if options.makehtml:
options.makeintersphinx = True
-
- system.msg('html', 'writing html to %s using %s.%s'%(
- options.htmloutput, options.htmlwriter.__module__,
- options.htmlwriter.__name__))
+
+ system.msg(
+ 'html',
+ 'writing html to %s using %s.%s'
+ % (options.htmloutput, options.htmlwriter.__module__, options.htmlwriter.__name__),
+ )
writer: IWriter
-
+
# Always init the writer with the 'base' set of templates at least.
- template_lookup = TemplateLookup(
- importlib_resources.files('pydoctor.themes') / 'base')
-
+ template_lookup = TemplateLookup(importlib_resources.files('pydoctor.themes') / 'base')
+
# Handle theme selection, 'classic' by default.
if system.options.theme != 'base':
- template_lookup.add_templatedir(
- importlib_resources.files('pydoctor.themes') / system.options.theme)
+ template_lookup.add_templatedir(importlib_resources.files('pydoctor.themes') / system.options.theme)
# Handle custom HTML templates
if system.options.templatedir:
try:
for t in system.options.templatedir:
template_lookup.add_templatedir(Path(t))
- except TemplateError as e:
+ except TemplateError as e:
error(str(e))
build_directory = Path(options.htmloutput)
@@ -128,7 +131,7 @@ def make(system: model.System) -> None:
writer.writeIndividualFiles(subjects)
if not options.htmlsubjects:
writer.writeLinks(system)
-
+
if options.makeintersphinx:
if not options.makehtml:
subjects = system.rootobjects
@@ -137,13 +140,14 @@ def make(system: model.System) -> None:
logger=system.msg,
project_name=system.projectname,
project_version=system.options.projectversion,
- )
+ )
if not os.path.exists(options.htmloutput):
os.makedirs(options.htmloutput)
sphinx_inventory.generate(
subjects=subjects,
basepath=options.htmloutput,
- )
+ )
+
def main(args: Sequence[str] = sys.argv[1:]) -> int:
"""
@@ -163,7 +167,7 @@ def main(args: Sequence[str] = sys.argv[1:]) -> int:
# Build model
system = get_system(options)
-
+
# Produce output (HMTL, json, ect)
make(system)
@@ -174,10 +178,10 @@ def main(args: Sequence[str] = sys.argv[1:]) -> int:
def p(msg: str) -> None:
system.msg('docstring-summary', msg, thresh=-1, topthresh=1)
- p("these %s objects' docstrings contain syntax errors:"
- %(len(docstring_syntax_errors),))
+
+ p("these %s objects' docstrings contain syntax errors:" % (len(docstring_syntax_errors),))
for fn in sorted(docstring_syntax_errors):
- p(' '+fn)
+ p(' ' + fn)
# If there is any other kind of parse errors, exit with code 2 as well.
# This applies to errors generated from colorizing AST.
@@ -187,11 +191,12 @@ def p(msg: str) -> None:
if system.violations and options.warnings_as_errors:
# Update exit code if the run has produced warnings.
exitcode = 3
-
+
except:
if options.pdb:
import pdb
+
pdb.post_mortem(sys.exc_info()[2])
raise
-
+
return exitcode
diff --git a/pydoctor/epydoc/__init__.py b/pydoctor/epydoc/__init__.py
index 7976685aa..692f61333 100644
--- a/pydoctor/epydoc/__init__.py
+++ b/pydoctor/epydoc/__init__.py
@@ -62,4 +62,3 @@
# - Add a faq?
# - @type a,b,c: ...
# - new command line option: --command-line-order
-
diff --git a/pydoctor/epydoc/doctest.py b/pydoctor/epydoc/doctest.py
index 442ef131e..196aa50cc 100644
--- a/pydoctor/epydoc/doctest.py
+++ b/pydoctor/epydoc/doctest.py
@@ -21,11 +21,39 @@
#: A list of the names of all Python keywords.
_KEYWORDS = [
- 'and', 'as', 'assert', 'async', 'await', 'break', 'class', 'continue',
- 'def', 'del', 'elif', 'else', 'except', 'finally', 'for', 'from', 'global',
- 'if', 'import', 'in', 'is', 'lambda', 'nonlocal', 'not', 'or', 'pass',
- 'raise', 'return', 'try', 'while', 'with', 'yield'
- ]
+ 'and',
+ 'as',
+ 'assert',
+ 'async',
+ 'await',
+ 'break',
+ 'class',
+ 'continue',
+ 'def',
+ 'del',
+ 'elif',
+ 'else',
+ 'except',
+ 'finally',
+ 'for',
+ 'from',
+ 'global',
+ 'if',
+ 'import',
+ 'in',
+ 'is',
+ 'lambda',
+ 'nonlocal',
+ 'not',
+ 'or',
+ 'pass',
+ 'raise',
+ 'return',
+ 'try',
+ 'while',
+ 'with',
+ 'yield',
+]
# The following are technically keywords since Python 3,
# but we don't want to colorize them as such: 'None', 'True', 'False'.
@@ -40,8 +68,8 @@
#: A regexp group that matches Python strings.
_STRING_GRP = '|'.join(
- [r'("""("""|.*?((?!").)"""))', r'("("|.*?((?!").)"))',
- r"('''('''|.*?[^\\']'''))", r"('('|.*?[^\\']'))"])
+ [r'("""("""|.*?((?!").)"""))', r'("("|.*?((?!").)"))', r"('''('''|.*?[^\\']'''))", r"('('|.*?[^\\']'))"]
+)
#: A regexp group that matches Python comments.
_COMMENT_GRP = '(#.*?$)'
@@ -59,16 +87,13 @@
DEFINE_FUNC_RE = re.compile(r'(?P\w+)(?P\s+)(?P\w+)')
#: A regexp that matches Python prompts
-PROMPT_RE = re.compile(f'({_PROMPT1_GRP}|{_PROMPT2_GRP})',
- re.MULTILINE | re.DOTALL)
+PROMPT_RE = re.compile(f'({_PROMPT1_GRP}|{_PROMPT2_GRP})', re.MULTILINE | re.DOTALL)
#: A regexp that matches Python "..." prompts.
-PROMPT2_RE = re.compile(f'({_PROMPT2_GRP})',
- re.MULTILINE | re.DOTALL)
+PROMPT2_RE = re.compile(f'({_PROMPT2_GRP})', re.MULTILINE | re.DOTALL)
#: A regexp that matches doctest exception blocks.
-EXCEPT_RE = re.compile(r'^[ \t]*Traceback \(most recent call last\):.*',
- re.DOTALL | re.MULTILINE)
+EXCEPT_RE = re.compile(r'^[ \t]*Traceback \(most recent call last\):.*', re.DOTALL | re.MULTILINE)
#: A regexp that matches doctest directives.
DOCTEST_DIRECTIVE_RE = re.compile(r'#[ \t]*doctest:.*')
@@ -77,17 +102,19 @@
#: that should be colored.
DOCTEST_RE = re.compile(
'('
- rf'(?P{_STRING_GRP})|(?P{_COMMENT_GRP})|'
- rf'(?P{_DEFINE_GRP})|'
- rf'(?P{_KEYWORD_GRP})|(?P{_BUILTIN_GRP})|'
- rf'(?P{_PROMPT1_GRP})|(?P{_PROMPT2_GRP})|(?P\Z)'
+ rf'(?P{_STRING_GRP})|(?P{_COMMENT_GRP})|'
+ rf'(?P{_DEFINE_GRP})|'
+ rf'(?P{_KEYWORD_GRP})|(?P{_BUILTIN_GRP})|'
+ rf'(?P{_PROMPT1_GRP})|(?P{_PROMPT2_GRP})|(?P\Z)'
')',
- re.MULTILINE | re.DOTALL)
+ re.MULTILINE | re.DOTALL,
+)
#: This regular expression is used to find doctest examples in a
#: string. This is copied from the standard Python doctest.py
#: module (after the refactoring in Python 2.4+).
-DOCTEST_EXAMPLE_RE = re.compile(r'''
+DOCTEST_EXAMPLE_RE = re.compile(
+ r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P
'''
assert epytext2html(doc) == squash(expected)
-
+
def test_epytext_url() -> None:
doc = '''
@@ -254,6 +259,7 @@ def test_epytext_url() -> None:
assert epytext2html(doc) == squash(expected)
+
def test_epytext_symbol() -> None:
doc = '''
Symbols can be used in equations:
@@ -270,16 +276,17 @@ def test_epytext_symbol() -> None:
'''
assert epytext2html(doc) == squash(expected)
+
def test_nested_markup() -> None:
"""
- The Epytext nested inline markup are correctly transformed to HTML.
+ The Epytext nested inline markup are correctly transformed to HTML.
"""
doc = '''
I{B{Inline markup} may be nested; and
it may span} multiple lines.
'''
expected = ''' Inline markup may be nested; and it may spanmultiple lines.'''
-
+
assert epytext2html(doc) == squash(expected)
doc = '''
@@ -288,11 +295,12 @@ def test_nested_markup() -> None:
expected = '''
It becomes a little bit complicated withcustomlinks
'''
-
+
assert epytext2html(doc) == squash(expected)
+
# From docutils 0.18 the toc entries uses different ids.
-@pytest.mark.skipif(docutils_version_info < (0,18), reason="HTML ids in toc tree changed in docutils 0.18.0.")
+@pytest.mark.skipif(docutils_version_info < (0, 18), reason="HTML ids in toc tree changed in docutils 0.18.0.")
def test_get_toc() -> None:
docstring = """
@@ -327,12 +335,12 @@ def test_get_toc() -> None:
errors: List[ParseError] = []
parsed = parse_docstring(docstring, errors)
assert not errors, [str(e.descr()) for e in errors]
-
+
toc = parsed.get_toc(4)
assert toc is not None
html = flatten(toc.to_stan(NotFoundLinker()))
-
- expected_html="""
+
+ expected_html = """