added podman, json and yaml
This commit is contained in:
467
venv/lib/python3.11/site-packages/mkdocs/utils/__init__.py
Normal file
467
venv/lib/python3.11/site-packages/mkdocs/utils/__init__.py
Normal file
@ -0,0 +1,467 @@
|
||||
"""
|
||||
Standalone file utils.
|
||||
|
||||
Nothing in this module should have an knowledge of config or the layout
|
||||
and structure of the site and pages in the site.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import logging
|
||||
import os
|
||||
import posixpath
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import warnings
|
||||
from collections import defaultdict
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import PurePath
|
||||
from typing import (
|
||||
IO,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Collection,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
MutableSequence,
|
||||
Optional,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
)
|
||||
from urllib.parse import urlsplit
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from importlib.metadata import EntryPoint, entry_points
|
||||
else:
|
||||
from importlib_metadata import EntryPoint, entry_points
|
||||
|
||||
import yaml
|
||||
from mergedeep import merge
|
||||
from yaml_env_tag import construct_env_tag
|
||||
|
||||
from mkdocs import exceptions
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from mkdocs.structure.pages import Page
|
||||
|
||||
T = TypeVar('T')
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
markdown_extensions = (
|
||||
'.markdown',
|
||||
'.mdown',
|
||||
'.mkdn',
|
||||
'.mkd',
|
||||
'.md',
|
||||
)
|
||||
|
||||
|
||||
def get_yaml_loader(loader=yaml.Loader):
|
||||
"""Wrap PyYaml's loader so we can extend it to suit our needs."""
|
||||
|
||||
class Loader(loader):
|
||||
"""
|
||||
Define a custom loader derived from the global loader to leave the
|
||||
global loader unaltered.
|
||||
"""
|
||||
|
||||
# Attach Environment Variable constructor.
|
||||
# See https://github.com/waylan/pyyaml-env-tag
|
||||
Loader.add_constructor('!ENV', construct_env_tag)
|
||||
|
||||
return Loader
|
||||
|
||||
|
||||
def yaml_load(source: IO, loader: Optional[Type[yaml.Loader]] = None) -> Optional[Dict[str, Any]]:
|
||||
"""Return dict of source YAML file using loader, recursively deep merging inherited parent."""
|
||||
Loader = loader or get_yaml_loader()
|
||||
result = yaml.load(source, Loader=Loader)
|
||||
if result is not None and 'INHERIT' in result:
|
||||
relpath = result.pop('INHERIT')
|
||||
abspath = os.path.normpath(os.path.join(os.path.dirname(source.name), relpath))
|
||||
if not os.path.exists(abspath):
|
||||
raise exceptions.ConfigurationError(
|
||||
f"Inherited config file '{relpath}' does not exist at '{abspath}'."
|
||||
)
|
||||
log.debug(f"Loading inherited configuration file: {abspath}")
|
||||
with open(abspath, 'rb') as fd:
|
||||
parent = yaml_load(fd, Loader)
|
||||
result = merge(parent, result)
|
||||
return result
|
||||
|
||||
|
||||
def modified_time(file_path):
|
||||
warnings.warn(
|
||||
"modified_time is never used in MkDocs and will be removed soon.", DeprecationWarning
|
||||
)
|
||||
if os.path.exists(file_path):
|
||||
return os.path.getmtime(file_path)
|
||||
else:
|
||||
return 0.0
|
||||
|
||||
|
||||
def get_build_timestamp() -> int:
|
||||
"""
|
||||
Returns the number of seconds since the epoch.
|
||||
|
||||
Support SOURCE_DATE_EPOCH environment variable for reproducible builds.
|
||||
See https://reproducible-builds.org/specs/source-date-epoch/
|
||||
"""
|
||||
source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
|
||||
if source_date_epoch is None:
|
||||
return int(datetime.now(timezone.utc).timestamp())
|
||||
|
||||
return int(source_date_epoch)
|
||||
|
||||
|
||||
def get_build_datetime() -> datetime:
|
||||
"""
|
||||
Returns an aware datetime object.
|
||||
|
||||
Support SOURCE_DATE_EPOCH environment variable for reproducible builds.
|
||||
See https://reproducible-builds.org/specs/source-date-epoch/
|
||||
"""
|
||||
source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
|
||||
if source_date_epoch is None:
|
||||
return datetime.now(timezone.utc)
|
||||
|
||||
return datetime.fromtimestamp(int(source_date_epoch), timezone.utc)
|
||||
|
||||
|
||||
def get_build_date() -> str:
|
||||
"""
|
||||
Returns the displayable date string.
|
||||
|
||||
Support SOURCE_DATE_EPOCH environment variable for reproducible builds.
|
||||
See https://reproducible-builds.org/specs/source-date-epoch/
|
||||
"""
|
||||
return get_build_datetime().strftime('%Y-%m-%d')
|
||||
|
||||
|
||||
def reduce_list(data_set: Iterable[str]) -> List[str]:
|
||||
"""Reduce duplicate items in a list and preserve order"""
|
||||
return list(dict.fromkeys(data_set))
|
||||
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from bisect import insort
|
||||
else:
|
||||
|
||||
def insort(a: MutableSequence[T], x: T, *, key=lambda v: v) -> None:
|
||||
kx = key(x)
|
||||
i = len(a)
|
||||
while i > 0 and kx < key(a[i - 1]):
|
||||
i -= 1
|
||||
a.insert(i, x)
|
||||
|
||||
|
||||
def copy_file(source_path: str, output_path: str) -> None:
|
||||
"""
|
||||
Copy source_path to output_path, making sure any parent directories exist.
|
||||
|
||||
The output_path may be a directory.
|
||||
"""
|
||||
output_dir = os.path.dirname(output_path)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
if os.path.isdir(output_path):
|
||||
output_path = os.path.join(output_path, os.path.basename(source_path))
|
||||
shutil.copyfile(source_path, output_path)
|
||||
|
||||
|
||||
def write_file(content: bytes, output_path: str) -> None:
|
||||
"""
|
||||
Write content to output_path, making sure any parent directories exist.
|
||||
"""
|
||||
output_dir = os.path.dirname(output_path)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
with open(output_path, 'wb') as f:
|
||||
f.write(content)
|
||||
|
||||
|
||||
def clean_directory(directory: str) -> None:
|
||||
"""
|
||||
Remove the content of a directory recursively but not the directory itself.
|
||||
"""
|
||||
if not os.path.exists(directory):
|
||||
return
|
||||
|
||||
for entry in os.listdir(directory):
|
||||
# Don't remove hidden files from the directory. We never copy files
|
||||
# that are hidden, so we shouldn't delete them either.
|
||||
if entry.startswith('.'):
|
||||
continue
|
||||
|
||||
path = os.path.join(directory, entry)
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path, True)
|
||||
else:
|
||||
os.unlink(path)
|
||||
|
||||
|
||||
def get_html_path(path):
|
||||
warnings.warn(
|
||||
"get_html_path is never used in MkDocs and will be removed soon.", DeprecationWarning
|
||||
)
|
||||
path = os.path.splitext(path)[0]
|
||||
if os.path.basename(path) == 'index':
|
||||
return path + '.html'
|
||||
return "/".join((path, 'index.html'))
|
||||
|
||||
|
||||
def get_url_path(path, use_directory_urls=True):
|
||||
warnings.warn(
|
||||
"get_url_path is never used in MkDocs and will be removed soon.", DeprecationWarning
|
||||
)
|
||||
path = get_html_path(path)
|
||||
url = '/' + path.replace(os.sep, '/')
|
||||
if use_directory_urls:
|
||||
return url[: -len('index.html')]
|
||||
return url
|
||||
|
||||
|
||||
def is_markdown_file(path: str) -> bool:
|
||||
"""
|
||||
Return True if the given file path is a Markdown file.
|
||||
|
||||
https://superuser.com/questions/249436/file-extension-for-markdown-files
|
||||
"""
|
||||
return path.endswith(markdown_extensions)
|
||||
|
||||
|
||||
def is_html_file(path):
|
||||
warnings.warn(
|
||||
"is_html_file is never used in MkDocs and will be removed soon.", DeprecationWarning
|
||||
)
|
||||
return path.lower().endswith(('.html', '.htm'))
|
||||
|
||||
|
||||
def is_template_file(path):
|
||||
warnings.warn(
|
||||
"is_template_file is never used in MkDocs and will be removed soon.", DeprecationWarning
|
||||
)
|
||||
return path.lower().endswith(('.html', '.htm', '.xml'))
|
||||
|
||||
|
||||
_ERROR_TEMPLATE_RE = re.compile(r'^\d{3}\.html?$')
|
||||
|
||||
|
||||
def is_error_template(path: str) -> bool:
|
||||
"""
|
||||
Return True if the given file path is an HTTP error template.
|
||||
"""
|
||||
return bool(_ERROR_TEMPLATE_RE.match(path))
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _norm_parts(path: str) -> List[str]:
|
||||
if not path.startswith('/'):
|
||||
path = '/' + path
|
||||
path = posixpath.normpath(path)[1:]
|
||||
return path.split('/') if path else []
|
||||
|
||||
|
||||
def get_relative_url(url: str, other: str) -> str:
|
||||
"""
|
||||
Return given url relative to other.
|
||||
|
||||
Both are operated as slash-separated paths, similarly to the 'path' part of a URL.
|
||||
The last component of `other` is skipped if it contains a dot (considered a file).
|
||||
Actual URLs (with schemas etc.) aren't supported. The leading slash is ignored.
|
||||
Paths are normalized ('..' works as parent directory), but going higher than the
|
||||
root has no effect ('foo/../../bar' ends up just as 'bar').
|
||||
"""
|
||||
# Remove filename from other url if it has one.
|
||||
dirname, _, basename = other.rpartition('/')
|
||||
if '.' in basename:
|
||||
other = dirname
|
||||
|
||||
other_parts = _norm_parts(other)
|
||||
dest_parts = _norm_parts(url)
|
||||
common = 0
|
||||
for a, b in zip(other_parts, dest_parts):
|
||||
if a != b:
|
||||
break
|
||||
common += 1
|
||||
|
||||
rel_parts = ['..'] * (len(other_parts) - common) + dest_parts[common:]
|
||||
relurl = '/'.join(rel_parts) or '.'
|
||||
return relurl + '/' if url.endswith('/') else relurl
|
||||
|
||||
|
||||
def normalize_url(path: str, page: Optional[Page] = None, base: str = '') -> str:
|
||||
"""Return a URL relative to the given page or using the base."""
|
||||
path, is_abs = _get_norm_url(path)
|
||||
if is_abs:
|
||||
return path
|
||||
if page is not None:
|
||||
return get_relative_url(path, page.url)
|
||||
return posixpath.join(base, path)
|
||||
|
||||
|
||||
@functools.lru_cache(maxsize=None)
|
||||
def _get_norm_url(path: str) -> Tuple[str, bool]:
|
||||
if not path:
|
||||
path = '.'
|
||||
elif '\\' in path:
|
||||
log.warning(
|
||||
f"Path '{path}' uses OS-specific separator '\\'. "
|
||||
f"That will be unsupported in a future release. Please change it to '/'."
|
||||
)
|
||||
path = path.replace('\\', '/')
|
||||
# Allow links to be fully qualified URLs
|
||||
parsed = urlsplit(path)
|
||||
if parsed.scheme or parsed.netloc or path.startswith(('/', '#')):
|
||||
return path, True
|
||||
return path, False
|
||||
|
||||
|
||||
def create_media_urls(
|
||||
path_list: List[str], page: Optional[Page] = None, base: str = ''
|
||||
) -> List[str]:
|
||||
"""
|
||||
Return a list of URLs relative to the given page or using the base.
|
||||
"""
|
||||
return [normalize_url(path, page, base) for path in path_list]
|
||||
|
||||
|
||||
def path_to_url(path):
|
||||
"""Soft-deprecated, do not use."""
|
||||
return path.replace('\\', '/')
|
||||
|
||||
|
||||
def get_theme_dir(name: str) -> str:
|
||||
"""Return the directory of an installed theme by name."""
|
||||
|
||||
theme = get_themes()[name]
|
||||
return os.path.dirname(os.path.abspath(theme.load().__file__))
|
||||
|
||||
|
||||
def get_themes() -> Dict[str, EntryPoint]:
|
||||
"""Return a dict of all installed themes as {name: EntryPoint}."""
|
||||
|
||||
themes: Dict[str, EntryPoint] = {}
|
||||
eps: Dict[EntryPoint, None] = dict.fromkeys(entry_points(group='mkdocs.themes'))
|
||||
builtins = {ep.name for ep in eps if ep.dist is not None and ep.dist.name == 'mkdocs'}
|
||||
|
||||
for theme in eps:
|
||||
assert theme.dist is not None
|
||||
|
||||
if theme.name in builtins and theme.dist.name != 'mkdocs':
|
||||
raise exceptions.ConfigurationError(
|
||||
f"The theme '{theme.name}' is a builtin theme but the package '{theme.dist.name}' "
|
||||
"attempts to provide a theme with the same name."
|
||||
)
|
||||
elif theme.name in themes:
|
||||
other_dist = themes[theme.name].dist
|
||||
assert other_dist is not None
|
||||
log.warning(
|
||||
f"A theme named '{theme.name}' is provided by the Python packages '{theme.dist.name}' "
|
||||
f"and '{other_dist.name}'. The one in '{theme.dist.name}' will be used."
|
||||
)
|
||||
|
||||
themes[theme.name] = theme
|
||||
|
||||
return themes
|
||||
|
||||
|
||||
def get_theme_names() -> Collection[str]:
|
||||
"""Return a list of all installed themes by name."""
|
||||
|
||||
return get_themes().keys()
|
||||
|
||||
|
||||
def dirname_to_title(dirname: str) -> str:
|
||||
"""Return a page tile obtained from a directory name."""
|
||||
title = dirname
|
||||
title = title.replace('-', ' ').replace('_', ' ')
|
||||
# Capitalize if the dirname was all lowercase, otherwise leave it as-is.
|
||||
if title.lower() == title:
|
||||
title = title.capitalize()
|
||||
|
||||
return title
|
||||
|
||||
|
||||
def get_markdown_title(markdown_src: str) -> Optional[str]:
|
||||
"""
|
||||
Get the title of a Markdown document. The title in this case is considered
|
||||
to be a H1 that occurs before any other content in the document.
|
||||
The procedure is then to iterate through the lines, stopping at the first
|
||||
non-whitespace content. If it is a title, return that, otherwise return
|
||||
None.
|
||||
"""
|
||||
lines = markdown_src.replace('\r\n', '\n').replace('\r', '\n').split('\n')
|
||||
while lines:
|
||||
line = lines.pop(0).strip()
|
||||
if not line.strip():
|
||||
continue
|
||||
if not line.startswith('# '):
|
||||
return None
|
||||
return line.lstrip('# ')
|
||||
return None
|
||||
|
||||
|
||||
def find_or_create_node(branch, key):
|
||||
"""
|
||||
Given a list, look for dictionary with a key matching key and return it's
|
||||
value. If it doesn't exist, create it with the value of an empty list and
|
||||
return that.
|
||||
"""
|
||||
for node in branch:
|
||||
if not isinstance(node, dict):
|
||||
continue
|
||||
|
||||
if key in node:
|
||||
return node[key]
|
||||
|
||||
new_branch = []
|
||||
node = {key: new_branch}
|
||||
branch.append(node)
|
||||
return new_branch
|
||||
|
||||
|
||||
def nest_paths(paths):
|
||||
"""
|
||||
Given a list of paths, convert them into a nested structure that will match
|
||||
the pages config.
|
||||
"""
|
||||
nested = []
|
||||
|
||||
for path in paths:
|
||||
parts = PurePath(path).parent.parts
|
||||
|
||||
branch = nested
|
||||
for part in parts:
|
||||
part = dirname_to_title(part)
|
||||
branch = find_or_create_node(branch, part)
|
||||
|
||||
branch.append(path)
|
||||
|
||||
return nested
|
||||
|
||||
|
||||
class CountHandler(logging.NullHandler):
|
||||
"""Counts all logged messages >= level."""
|
||||
|
||||
def __init__(self, **kwargs) -> None:
|
||||
self.counts: Dict[int, int] = defaultdict(int)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def handle(self, record):
|
||||
rv = self.filter(record)
|
||||
if rv:
|
||||
# Use levelno for keys so they can be sorted later
|
||||
self.counts[record.levelno] += 1
|
||||
return rv
|
||||
|
||||
def get_counts(self) -> List[Tuple[str, int]]:
|
||||
return [(logging.getLevelName(k), v) for k, v in sorted(self.counts.items(), reverse=True)]
|
||||
|
||||
|
||||
# For backward compatibility as some plugins import it.
|
||||
# It is no longer necessary as all messages on the
|
||||
# `mkdocs` logger get counted automatically.
|
||||
warning_filter = logging.Filter()
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
29
venv/lib/python3.11/site-packages/mkdocs/utils/babel_stub.py
Normal file
29
venv/lib/python3.11/site-packages/mkdocs/utils/babel_stub.py
Normal file
@ -0,0 +1,29 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from string import ascii_letters
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class UnknownLocaleError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class Locale(NamedTuple):
|
||||
language: str
|
||||
territory: str = ''
|
||||
|
||||
def __str__(self):
|
||||
if self.territory:
|
||||
return f'{self.language}_{self.territory}'
|
||||
return self.language
|
||||
|
||||
@classmethod
|
||||
def parse(cls, identifier, sep):
|
||||
if not isinstance(identifier, str):
|
||||
raise TypeError(f"Unexpected value for identifier: '{identifier}'")
|
||||
locale = cls(*identifier.split(sep, 1))
|
||||
if not all(x in ascii_letters for x in locale.language):
|
||||
raise ValueError(f"expected only letters, got '{locale.language}'")
|
||||
if len(locale.language) != 2:
|
||||
raise UnknownLocaleError(f"unknown locale '{locale.language}'")
|
||||
return locale
|
||||
14
venv/lib/python3.11/site-packages/mkdocs/utils/filters.py
Normal file
14
venv/lib/python3.11/site-packages/mkdocs/utils/filters.py
Normal file
@ -0,0 +1,14 @@
|
||||
from __future__ import annotations
|
||||
|
||||
try:
|
||||
from jinja2 import pass_context as contextfilter # type: ignore
|
||||
except ImportError:
|
||||
from jinja2 import contextfilter # type: ignore
|
||||
|
||||
from mkdocs.utils import normalize_url
|
||||
|
||||
|
||||
@contextfilter
|
||||
def url_filter(context, value: str) -> str:
|
||||
"""A Template filter to normalize URLs."""
|
||||
return normalize_url(value, page=context['page'], base=context['base_url'])
|
||||
102
venv/lib/python3.11/site-packages/mkdocs/utils/meta.py
Normal file
102
venv/lib/python3.11/site-packages/mkdocs/utils/meta.py
Normal file
@ -0,0 +1,102 @@
|
||||
"""
|
||||
Copyright (c) 2015, Waylan Limberg
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or other
|
||||
materials provided with the distribution.
|
||||
|
||||
3. Neither the name of the copyright holder nor the names of its contributors may
|
||||
be used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
|
||||
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
|
||||
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
||||
OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
MultiMarkdown Meta-Data
|
||||
|
||||
Extracts, parses and transforms MultiMarkdown style data from documents.
|
||||
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any, Dict, Tuple
|
||||
|
||||
import yaml
|
||||
|
||||
try:
|
||||
from yaml import CSafeLoader as SafeLoader
|
||||
except ImportError: # pragma: no cover
|
||||
from yaml import SafeLoader # type: ignore
|
||||
|
||||
#####################################################################
|
||||
# Data Parser #
|
||||
#####################################################################
|
||||
|
||||
YAML_RE = re.compile(r'^-{3}[ \t]*\n(.*?\n)(?:\.{3}|-{3})[ \t]*\n', re.UNICODE | re.DOTALL)
|
||||
META_RE = re.compile(r'^[ ]{0,3}(?P<key>[A-Za-z0-9_-]+):\s*(?P<value>.*)')
|
||||
META_MORE_RE = re.compile(r'^([ ]{4}|\t)(\s*)(?P<value>.*)')
|
||||
|
||||
|
||||
def get_data(doc: str) -> Tuple[str, Dict[str, Any]]:
|
||||
"""
|
||||
Extract meta-data from a text document.
|
||||
|
||||
Returns a tuple of document and a data dict.
|
||||
"""
|
||||
data = {}
|
||||
|
||||
# First try YAML
|
||||
m = YAML_RE.match(doc)
|
||||
if m:
|
||||
try:
|
||||
data = yaml.load(m.group(1), SafeLoader)
|
||||
if isinstance(data, dict):
|
||||
doc = doc[m.end() :].lstrip('\n')
|
||||
else:
|
||||
data = {} # type: ignore[unreachable]
|
||||
except Exception:
|
||||
pass
|
||||
return doc, data
|
||||
|
||||
# No YAML delimiters. Try MultiMarkdown style
|
||||
lines = doc.replace('\r\n', '\n').replace('\r', '\n').split('\n')
|
||||
|
||||
key = None
|
||||
while lines:
|
||||
line = lines.pop(0)
|
||||
|
||||
if line.strip() == '':
|
||||
break # blank line - done
|
||||
m1 = META_RE.match(line)
|
||||
if m1:
|
||||
key = m1.group('key').lower().strip()
|
||||
value = m1.group('value').strip()
|
||||
if key in data:
|
||||
data[key] += f' {value}'
|
||||
else:
|
||||
data[key] = value
|
||||
else:
|
||||
m2 = META_MORE_RE.match(line)
|
||||
if m2 and key:
|
||||
# Add another line to existing key
|
||||
data[key] += ' {}'.format(m2.group('value').strip())
|
||||
else:
|
||||
lines.insert(0, line)
|
||||
break # no meta data - done
|
||||
return '\n'.join(lines).lstrip('\n'), data
|
||||
Reference in New Issue
Block a user