Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1,99 @@
"""Test runner for all Ansible tests."""
from __future__ import annotations
import os
import sys
# This import should occur as early as possible.
# It must occur before subprocess has been imported anywhere in the current process.
from .init import (
CURRENT_RLIMIT_NOFILE,
)
from .util import (
ApplicationError,
display,
MAXFD,
)
from .delegation import (
delegate,
)
from .executor import (
ApplicationWarning,
Delegate,
ListTargets,
)
from .timeout import (
configure_timeout,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
from .cli import (
parse_args,
)
from .provisioning import (
PrimeContainers,
)
def main():
"""Main program function."""
try:
os.chdir(data_context().content.root)
args = parse_args()
config = args.config(args) # type: CommonConfig
display.verbosity = config.verbosity
display.truncate = config.truncate
display.redact = config.redact
display.color = config.color
display.info_stderr = config.info_stderr
configure_timeout(config)
display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
display.info('MAXFD: %d' % MAXFD, verbosity=2)
delegate_args = None
target_names = None
try:
args.func(config)
except PrimeContainers:
pass
except ListTargets as ex:
# save target_names for use once we exit the exception handler
target_names = ex.target_names
except Delegate as ex:
# save delegation args for use once we exit the exception handler
delegate_args = (ex.host_state, ex.exclude, ex.require)
if delegate_args:
# noinspection PyTypeChecker
delegate(config, *delegate_args)
if target_names:
for target_name in target_names:
print(target_name) # info goes to stderr, this should be on stdout
display.review_warnings()
config.success = True
except ApplicationWarning as ex:
display.warning(u'%s' % ex)
sys.exit(0)
except ApplicationError as ex:
display.error(u'%s' % ex)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
except BrokenPipeError:
sys.exit(3)

View File

@@ -0,0 +1,301 @@
"""Miscellaneous utility functions and classes specific to ansible cli tools."""
from __future__ import annotations
import json
import os
import typing as t
from .constants import (
SOFT_RLIMIT_NOFILE,
)
from .io import (
write_text_file,
)
from .util import (
common_environment,
ApplicationError,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_DATA_ROOT,
ANSIBLE_BIN_PATH,
ANSIBLE_SOURCE_ROOT,
ANSIBLE_TEST_TOOLS_ROOT,
get_ansible_version,
)
from .util_common import (
create_temp_dir,
run_command,
ResultType,
intercept_python,
get_injector_path,
)
from .config import (
IntegrationConfig,
PosixIntegrationConfig,
EnvironmentConfig,
CommonConfig,
)
from .data import (
data_context,
)
from .python_requirements import (
install_requirements,
)
from .host_configs import (
PythonConfig,
)
def parse_inventory(args, inventory_path): # type: (EnvironmentConfig, str) -> t.Dict[str, t.Any]
"""Return a dict parsed from the given inventory file."""
cmd = ['ansible-inventory', '-i', inventory_path, '--list']
env = ansible_environment(args)
inventory = json.loads(intercept_python(args, args.controller_python, cmd, env, capture=True, always=True)[0])
return inventory
def get_hosts(inventory, group_name): # type: (t.Dict[str, t.Any], str) -> t.Dict[str, t.Dict[str, t.Any]]
"""Return a dict of hosts from the specified group in the given inventory."""
hostvars = inventory.get('_meta', {}).get('hostvars', {})
group = inventory.get(group_name, {})
host_names = group.get('hosts', [])
hosts = dict((name, hostvars.get(name, {})) for name in host_names)
return hosts
def ansible_environment(args, color=True, ansible_config=None): # type: (CommonConfig, bool, t.Optional[str]) -> t.Dict[str, str]
"""Return a dictionary of environment variables to use when running Ansible commands."""
env = common_environment()
path = env['PATH']
if not path.startswith(ANSIBLE_BIN_PATH + os.path.pathsep):
path = ANSIBLE_BIN_PATH + os.path.pathsep + path
if not ansible_config:
# use the default empty configuration unless one has been provided
ansible_config = args.get_ansible_config()
if not args.explain and not os.path.exists(ansible_config):
raise ApplicationError('Configuration not found: %s' % ansible_config)
ansible = dict(
ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE=str(SOFT_RLIMIT_NOFILE),
ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color and color else 'false',
ANSIBLE_FORCE_HANDLERS='true', # allow cleanup handlers to run when tests fail
ANSIBLE_HOST_PATTERN_MISMATCH='error', # prevent tests from unintentionally passing when hosts are not found
ANSIBLE_INVENTORY='/dev/null', # force tests to provide inventory
ANSIBLE_DEPRECATION_WARNINGS='false',
ANSIBLE_HOST_KEY_CHECKING='false',
ANSIBLE_RETRY_FILES_ENABLED='false',
ANSIBLE_CONFIG=ansible_config,
ANSIBLE_LIBRARY='/dev/null',
ANSIBLE_DEVEL_WARNING='false', # Don't show warnings that CI is running devel
ANSIBLE_JINJA2_NATIVE_WARNING='false', # Don't show warnings in CI for old Jinja for native
PYTHONPATH=get_ansible_python_path(args),
PAGER='/bin/cat',
PATH=path,
# give TQM worker processes time to report code coverage results
# without this the last task in a play may write no coverage file, an empty file, or an incomplete file
# enabled even when not using code coverage to surface warnings when worker processes do not exit cleanly
ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT='100',
ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY='0.1',
)
if isinstance(args, IntegrationConfig) and args.coverage:
# standard path injection is not effective for ansible-connection, instead the location must be configured
# ansible-connection only requires the injector for code coverage
# the correct python interpreter is already selected using the sys.executable used to invoke ansible
ansible.update(dict(
ANSIBLE_CONNECTION_PATH=os.path.join(get_injector_path(), 'ansible-connection'),
))
if isinstance(args, PosixIntegrationConfig):
ansible.update(dict(
ANSIBLE_PYTHON_INTERPRETER='/set/ansible_python_interpreter/in/inventory', # force tests to set ansible_python_interpreter in inventory
))
env.update(ansible)
if args.debug:
env.update(dict(
ANSIBLE_DEBUG='true',
ANSIBLE_LOG_PATH=os.path.join(ResultType.LOGS.name, 'debug.log'),
))
if data_context().content.collection:
env.update(dict(
ANSIBLE_COLLECTIONS_PATH=data_context().content.collection.root,
))
if data_context().content.is_ansible:
env.update(configure_plugin_paths(args))
return env
def configure_plugin_paths(args): # type: (CommonConfig) -> t.Dict[str, str]
"""Return environment variables with paths to plugins relevant for the current command."""
if not isinstance(args, IntegrationConfig):
return {}
support_path = os.path.join(ANSIBLE_SOURCE_ROOT, 'test', 'support', args.command)
# provide private copies of collections for integration tests
collection_root = os.path.join(support_path, 'collections')
env = dict(
ANSIBLE_COLLECTIONS_PATH=collection_root,
)
# provide private copies of plugins for integration tests
plugin_root = os.path.join(support_path, 'plugins')
plugin_list = [
'action',
'become',
'cache',
'callback',
'cliconf',
'connection',
'filter',
'httpapi',
'inventory',
'lookup',
'netconf',
# 'shell' is not configurable
'strategy',
'terminal',
'test',
'vars',
]
# most plugins follow a standard naming convention
plugin_map = dict(('%s_plugins' % name, name) for name in plugin_list)
# these plugins do not follow the standard naming convention
plugin_map.update(
doc_fragment='doc_fragments',
library='modules',
module_utils='module_utils',
)
env.update(dict(('ANSIBLE_%s' % key.upper(), os.path.join(plugin_root, value)) for key, value in plugin_map.items()))
# only configure directories which exist
env = dict((key, value) for key, value in env.items() if os.path.isdir(value))
return env
def get_ansible_python_path(args): # type: (CommonConfig) -> str
"""
Return a directory usable for PYTHONPATH, containing only the ansible package.
If a temporary directory is required, it will be cached for the lifetime of the process and cleaned up at exit.
"""
try:
return get_ansible_python_path.python_path
except AttributeError:
pass
if ANSIBLE_SOURCE_ROOT:
# when running from source there is no need for a temporary directory to isolate the ansible package
python_path = os.path.dirname(ANSIBLE_LIB_ROOT)
else:
# when not running from source the installed directory is unsafe to add to PYTHONPATH
# doing so would expose many unwanted packages on sys.path
# instead a temporary directory is created which contains only ansible using a symlink
python_path = create_temp_dir(prefix='ansible-test-')
os.symlink(ANSIBLE_LIB_ROOT, os.path.join(python_path, 'ansible'))
if not args.explain:
generate_egg_info(python_path)
get_ansible_python_path.python_path = python_path
return python_path
def generate_egg_info(path): # type: (str) -> None
"""Generate an egg-info in the specified base directory."""
# minimal PKG-INFO stub following the format defined in PEP 241
# required for older setuptools versions to avoid a traceback when importing pkg_resources from packages like cryptography
# newer setuptools versions are happy with an empty directory
# including a stub here means we don't need to locate the existing file or have setup.py generate it when running from source
pkg_info = '''
Metadata-Version: 1.0
Name: ansible
Version: %s
Platform: UNKNOWN
Summary: Radically simple IT automation
Author-email: info@ansible.com
License: GPLv3+
''' % get_ansible_version()
pkg_info_path = os.path.join(path, 'ansible_core.egg-info', 'PKG-INFO')
if os.path.exists(pkg_info_path):
return
write_text_file(pkg_info_path, pkg_info.lstrip(), create_directories=True)
class CollectionDetail:
"""Collection detail."""
def __init__(self): # type: () -> None
self.version = None # type: t.Optional[str]
class CollectionDetailError(ApplicationError):
"""An error occurred retrieving collection detail."""
def __init__(self, reason): # type: (str) -> None
super().__init__('Error collecting collection detail: %s' % reason)
self.reason = reason
def get_collection_detail(args, python): # type: (EnvironmentConfig, PythonConfig) -> CollectionDetail
"""Return collection detail."""
collection = data_context().content.collection
directory = os.path.join(collection.root, collection.directory)
stdout = run_command(args, [python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'collection_detail.py'), directory], capture=True, always=True)[0]
result = json.loads(stdout)
error = result.get('error')
if error:
raise CollectionDetailError(error)
version = result.get('version')
detail = CollectionDetail()
detail.version = str(version) if version is not None else None
return detail
def run_playbook(
args, # type: EnvironmentConfig
inventory_path, # type: str
playbook, # type: str
run_playbook_vars=None, # type: t.Optional[t.Dict[str, t.Any]]
capture=False, # type: bool
): # type: (...) -> None
"""Run the specified playbook using the given inventory file and playbook variables."""
playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook)
cmd = ['ansible-playbook', '-i', inventory_path, playbook_path]
if run_playbook_vars:
cmd.extend(['-e', json.dumps(run_playbook_vars)])
if args.verbosity:
cmd.append('-%s' % ('v' * args.verbosity))
install_requirements(args, args.controller_python, ansible=True) # run_playbook()
env = ansible_environment(args)
intercept_python(args, args.controller_python, cmd, env, capture=capture)

View File

@@ -0,0 +1,52 @@
"""Become abstraction for interacting with test hosts."""
from __future__ import annotations
import abc
import shlex
import typing as t
class Become(metaclass=abc.ABCMeta):
"""Base class for become implementations."""
@property
@abc.abstractmethod
def method(self): # type: () -> str
"""The name of the Ansible become plugin that is equivalent to this."""
@abc.abstractmethod
def prepare_command(self, command): # type: (t.List[str]) -> t.List[str]
"""Return the given command, if any, with privilege escalation."""
class Su(Become):
"""Become using 'su'."""
@property
def method(self): # type: () -> str
"""The name of the Ansible become plugin that is equivalent to this."""
return 'su'
def prepare_command(self, command): # type: (t.List[str]) -> t.List[str]
"""Return the given command, if any, with privilege escalation."""
become = ['su', '-l', 'root']
if command:
become.extend(['-c', ' '.join(shlex.quote(c) for c in command)])
return become
class Sudo(Become):
"""Become using 'sudo'."""
@property
def method(self): # type: () -> str
"""The name of the Ansible become plugin that is equivalent to this."""
return 'sudo'
def prepare_command(self, command): # type: (t.List[str]) -> t.List[str]
"""Return the given command, if any, with privilege escalation."""
become = ['sudo', '-in']
if command:
become.extend(['sh', '-c', ' '.join(shlex.quote(c) for c in command)])
return become

View File

@@ -0,0 +1,95 @@
"""Bootstrapping for test hosts."""
from __future__ import annotations
import dataclasses
import os
import typing as t
from .io import (
read_text_file,
)
from .util import (
ANSIBLE_TEST_TARGET_ROOT,
)
from .util_common import (
ShellScriptTemplate,
set_shebang,
)
from .core_ci import (
SshKey,
)
@dataclasses.dataclass
class Bootstrap:
"""Base class for bootstrapping systems."""
controller: bool
python_versions: t.List[str]
ssh_key: SshKey
@property
def bootstrap_type(self): # type: () -> str
"""The bootstrap type to pass to the bootstrapping script."""
return self.__class__.__name__.replace('Bootstrap', '').lower()
def get_variables(self): # type: () -> t.Dict[str, str]
"""The variables to template in the boostrapping script."""
return dict(
bootstrap_type=self.bootstrap_type,
controller='yes' if self.controller else '',
python_versions=self.python_versions,
ssh_key_type=self.ssh_key.KEY_TYPE,
ssh_private_key=self.ssh_key.key_contents,
ssh_public_key=self.ssh_key.pub_contents,
)
def get_script(self): # type: () -> str
"""Return a shell script to bootstrap the specified host."""
path = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'bootstrap.sh')
content = read_text_file(path)
content = set_shebang(content, '/bin/sh')
template = ShellScriptTemplate(content)
variables = self.get_variables()
script = template.substitute(**variables)
return script
@dataclasses.dataclass
class BootstrapDocker(Bootstrap):
"""Bootstrap docker instances."""
def get_variables(self): # type: () -> t.Dict[str, str]
"""The variables to template in the boostrapping script."""
variables = super().get_variables()
variables.update(
platform='',
platform_version='',
)
return variables
@dataclasses.dataclass
class BootstrapRemote(Bootstrap):
"""Bootstrap remote instances."""
platform: str
platform_version: str
def get_variables(self): # type: () -> t.Dict[str, str]
"""The variables to template in the boostrapping script."""
variables = super().get_variables()
variables.update(
platform=self.platform,
platform_version=self.platform_version,
)
return variables

View File

@@ -0,0 +1,30 @@
"""Cache for commonly shared data that is intended to be immutable."""
from __future__ import annotations
import typing as t
from .config import (
CommonConfig,
)
TValue = t.TypeVar('TValue')
class CommonCache:
"""Common cache."""
def __init__(self, args): # type: (CommonConfig) -> None
self.args = args
def get(self, key, factory): # type: (str, t.Callable[[], TValue]) -> TValue
"""Return the value from the cache identified by the given key, using the specified factory method if it is not found."""
if key not in self.args.cache:
self.args.cache[key] = factory()
return self.args.cache[key]
def get_with_args(self, key, factory): # type: (str, t.Callable[[CommonConfig], TValue]) -> TValue
"""Return the value from the cache identified by the given key, using the specified factory method (which accepts args) if it is not found."""
if key not in self.args.cache:
self.args.cache[key] = factory(self.args)
return self.args.cache[key]

View File

@@ -0,0 +1,214 @@
"""Support code for CI environments."""
from __future__ import annotations
import abc
import base64
import json
import os
import tempfile
import typing as t
from ..encoding import (
to_bytes,
to_text,
)
from ..io import (
read_text_file,
write_text_file,
)
from ..config import (
CommonConfig,
TestConfig,
)
from ..util import (
ApplicationError,
display,
get_subclasses,
import_plugins,
raw_command,
cache,
)
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
class CIProvider(metaclass=abc.ABCMeta):
"""Base class for CI provider plugins."""
priority = 500
@staticmethod
@abc.abstractmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
@property
@abc.abstractmethod
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
@property
@abc.abstractmethod
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
@abc.abstractmethod
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
@abc.abstractmethod
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
@abc.abstractmethod
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
@abc.abstractmethod
def supports_core_ci_auth(self): # type: () -> bool
"""Return True if Ansible Core CI is supported."""
@abc.abstractmethod
def prepare_core_ci_auth(self): # type: () -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
@abc.abstractmethod
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
@cache
def get_ci_provider(): # type: () -> CIProvider
"""Return a CI provider instance for the current environment."""
provider = None
import_plugins('ci')
candidates = sorted(get_subclasses(CIProvider), key=lambda c: (c.priority, c.__name__))
for candidate in candidates:
if candidate.is_supported():
provider = candidate()
break
if provider.code:
display.info('Detected CI provider: %s' % provider.name)
return provider
class AuthHelper(metaclass=abc.ABCMeta):
"""Public key based authentication helper for Ansible Core CI."""
def sign_request(self, request): # type: (t.Dict[str, t.Any]) -> None
"""Sign the given auth request and make the public key available."""
payload_bytes = to_bytes(json.dumps(request, sort_keys=True))
signature_raw_bytes = self.sign_bytes(payload_bytes)
signature = to_text(base64.b64encode(signature_raw_bytes))
request.update(signature=signature)
def initialize_private_key(self): # type: () -> str
"""
Initialize and publish a new key pair (if needed) and return the private key.
The private key is cached across ansible-test invocations so it is only generated and published once per CI job.
"""
path = os.path.expanduser('~/.ansible-core-ci-private.key')
if os.path.exists(to_bytes(path)):
private_key_pem = read_text_file(path)
else:
private_key_pem = self.generate_private_key()
write_text_file(path, private_key_pem)
return private_key_pem
@abc.abstractmethod
def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
"""Sign the given payload and return the signature, initializing a new key pair if required."""
@abc.abstractmethod
def publish_public_key(self, public_key_pem): # type: (str) -> None
"""Publish the given public key."""
@abc.abstractmethod
def generate_private_key(self): # type: () -> str
"""Generate a new key pair, publishing the public key and returning the private key."""
class CryptographyAuthHelper(AuthHelper, metaclass=abc.ABCMeta):
"""Cryptography based public key based authentication helper for Ansible Core CI."""
def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
"""Sign the given payload and return the signature, initializing a new key pair if required."""
# import cryptography here to avoid overhead and failures in environments which do not use/provide it
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import load_pem_private_key
private_key_pem = self.initialize_private_key()
private_key = load_pem_private_key(to_bytes(private_key_pem), None, default_backend())
signature_raw_bytes = private_key.sign(payload_bytes, ec.ECDSA(hashes.SHA256()))
return signature_raw_bytes
def generate_private_key(self): # type: () -> str
"""Generate a new key pair, publishing the public key and returning the private key."""
# import cryptography here to avoid overhead and failures in environments which do not use/provide it
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
private_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
public_key = private_key.public_key()
# noinspection PyUnresolvedReferences
private_key_pem = to_text(private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
))
# noinspection PyTypeChecker
public_key_pem = to_text(public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
))
self.publish_public_key(public_key_pem)
return private_key_pem
class OpenSSLAuthHelper(AuthHelper, metaclass=abc.ABCMeta):
"""OpenSSL based public key based authentication helper for Ansible Core CI."""
def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
"""Sign the given payload and return the signature, initializing a new key pair if required."""
private_key_pem = self.initialize_private_key()
with tempfile.NamedTemporaryFile() as private_key_file:
private_key_file.write(to_bytes(private_key_pem))
private_key_file.flush()
with tempfile.NamedTemporaryFile() as payload_file:
payload_file.write(payload_bytes)
payload_file.flush()
with tempfile.NamedTemporaryFile() as signature_file:
raw_command(['openssl', 'dgst', '-sha256', '-sign', private_key_file.name, '-out', signature_file.name, payload_file.name], capture=True)
signature_raw_bytes = signature_file.read()
return signature_raw_bytes
def generate_private_key(self): # type: () -> str
"""Generate a new key pair, publishing the public key and returning the private key."""
private_key_pem = raw_command(['openssl', 'ecparam', '-genkey', '-name', 'secp384r1', '-noout'], capture=True)[0]
public_key_pem = raw_command(['openssl', 'ec', '-pubout'], data=private_key_pem, capture=True)[0]
self.publish_public_key(public_key_pem)
return private_key_pem

View File

@@ -0,0 +1,262 @@
"""Support code for working with Azure Pipelines."""
from __future__ import annotations
import os
import tempfile
import uuid
import typing as t
import urllib.parse
from ..encoding import (
to_bytes,
)
from ..config import (
CommonConfig,
TestConfig,
)
from ..git import (
Git,
)
from ..http import (
HttpClient,
)
from ..util import (
display,
MissingEnvironmentVariable,
)
from . import (
ChangeDetectionNotSupported,
CIProvider,
CryptographyAuthHelper,
)
CODE = 'azp'
class AzurePipelines(CIProvider):
"""CI provider implementation for Azure Pipelines."""
def __init__(self):
self.auth = AzurePipelinesAuthHelper()
@staticmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
@property
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
return CODE
@property
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
return 'Azure Pipelines'
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
try:
prefix = 'azp-%s-%s-%s' % (
os.environ['BUILD_BUILDID'],
os.environ['SYSTEM_JOBATTEMPT'],
os.environ['SYSTEM_JOBIDENTIFIER'],
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
return prefix
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
base_branch = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') or os.environ.get('BUILD_SOURCEBRANCHNAME')
if base_branch:
base_branch = 'origin/%s' % base_branch
return base_branch or ''
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
result = AzurePipelinesChanges(args)
if result.is_pr:
job_type = 'pull request'
else:
job_type = 'merge commit'
display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
if result.paths is None:
# There are several likely causes of this:
# - First run on a new branch.
# - Too many pull requests passed since the last merge run passed.
display.warning('No successful commit found. All tests will be executed.')
return result.paths
def supports_core_ci_auth(self): # type: () -> bool
"""Return True if Ansible Core CI is supported."""
return True
def prepare_core_ci_auth(self): # type: () -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
try:
request = dict(
org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
project_name=os.environ['SYSTEM_TEAMPROJECT'],
build_id=int(os.environ['BUILD_BUILDID']),
task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
self.auth.sign_request(request)
auth = dict(
azp=request,
)
return auth
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
changes = AzurePipelinesChanges(args)
details = dict(
base_commit=changes.base_commit,
commit=changes.commit,
)
return details
class AzurePipelinesAuthHelper(CryptographyAuthHelper):
"""
Authentication helper for Azure Pipelines.
Based on cryptography since it is provided by the default Azure Pipelines environment.
"""
def publish_public_key(self, public_key_pem): # type: (str) -> None
"""Publish the given public key."""
try:
agent_temp_directory = os.environ['AGENT_TEMPDIRECTORY']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
# the temporary file cannot be deleted because we do not know when the agent has processed it
# placing the file in the agent's temp directory allows it to be picked up when the job is running in a container
with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False, dir=agent_temp_directory) as public_key_file:
public_key_file.write(to_bytes(public_key_pem))
public_key_file.flush()
# make the agent aware of the public key by declaring it as an attachment
vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name)
class AzurePipelinesChanges:
"""Change information for an Azure Pipelines build."""
def __init__(self, args): # type: (CommonConfig) -> None
self.args = args
self.git = Git()
try:
self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
self.project = os.environ['SYSTEM_TEAMPROJECT']
self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
self.source_branch = os.environ['BUILD_SOURCEBRANCH']
self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.source_branch.startswith('refs/tags/'):
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
self.org = self.org_uri.strip('/').split('/')[-1]
self.is_pr = self.pr_branch_name is not None
if self.is_pr:
# HEAD is a merge commit of the PR branch into the target branch
# HEAD^1 is HEAD of the target branch (first parent of merge commit)
# HEAD^2 is HEAD of the PR branch (second parent of merge commit)
# see: https://git-scm.com/docs/gitrevisions
self.branch = self.pr_branch_name
self.base_commit = 'HEAD^1'
self.commit = 'HEAD^2'
else:
commits = self.get_successful_merge_run_commits()
self.branch = self.source_branch_name
self.base_commit = self.get_last_successful_commit(commits)
self.commit = 'HEAD'
self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
if self.base_commit:
self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
# <commit>...<commit>
# This form is to view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>.
# see: https://git-scm.com/docs/git-diff
dot_range = '%s...%s' % (self.base_commit, self.commit)
self.paths = sorted(self.git.get_diff_names([dot_range]))
self.diff = self.git.get_diff([dot_range])
else:
self.paths = None # act as though change detection not enabled, do not filter targets
self.diff = []
def get_successful_merge_run_commits(self): # type: () -> t.Set[str]
"""Return a set of recent successsful merge commits from Azure Pipelines."""
parameters = dict(
maxBuildsPerDefinition=100, # max 5000
queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
resultFilter='succeeded',
reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
repositoryType=self.repo_type,
repositoryId='%s/%s' % (self.org, self.project),
)
url = '%s%s/_apis/build/builds?api-version=6.0&%s' % (self.org_uri, self.project, urllib.parse.urlencode(parameters))
http = HttpClient(self.args, always=True)
response = http.get(url)
# noinspection PyBroadException
try:
result = response.json()
except Exception: # pylint: disable=broad-except
# most likely due to a private project, which returns an HTTP 203 response with HTML
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return set()
commits = set(build['sourceVersion'] for build in result['value'])
return commits
def get_last_successful_commit(self, commits): # type: (t.Set[str]) -> t.Optional[str]
"""Return the last successful commit from git history that is found in the given commit list, or None."""
commit_history = self.git.get_rev_list(max_count=100)
ordered_successful_commits = [commit for commit in commit_history if commit in commits]
last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
return last_successful_commit
def vso_add_attachment(file_type, file_name, path): # type: (str, str, str) -> None
"""Upload and attach a file to the current timeline record."""
vso('task.addattachment', dict(type=file_type, name=file_name), path)
def vso(name, data, message): # type: (str, t.Dict[str, str], str) -> None
"""
Write a logging command for the Azure Pipelines agent to process.
See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
"""
display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))

View File

@@ -0,0 +1,212 @@
"""Support code for working without a supported CI provider."""
from __future__ import annotations
import os
import platform
import random
import re
import typing as t
from ..config import (
CommonConfig,
TestConfig,
)
from ..io import (
read_text_file,
)
from ..git import (
Git,
)
from ..util import (
ApplicationError,
display,
is_binary_file,
SubprocessError,
)
from . import (
CIProvider,
)
CODE = '' # not really a CI provider, so use an empty string for the code
class Local(CIProvider):
"""CI provider implementation when not using CI."""
priority = 1000
@staticmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
return True
@property
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
return CODE
@property
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
return 'Local'
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
prefix = 'ansible-test-%d-%s' % (
random.randint(10000000, 99999999),
platform.node().split('.')[0],
)
return prefix
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
return ''
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
result = LocalChanges(args)
display.info('Detected branch %s forked from %s at commit %s' % (
result.current_branch, result.fork_branch, result.fork_point))
if result.untracked and not args.untracked:
display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
len(result.untracked))
if result.committed and not args.committed:
display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
len(result.committed))
if result.staged and not args.staged:
display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
len(result.staged))
if result.unstaged and not args.unstaged:
display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
len(result.unstaged))
names = set()
if args.tracked:
names |= set(result.tracked)
if args.untracked:
names |= set(result.untracked)
if args.committed:
names |= set(result.committed)
if args.staged:
names |= set(result.staged)
if args.unstaged:
names |= set(result.unstaged)
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
for path in result.untracked:
if is_binary_file(path):
args.metadata.changes[path] = ((0, 0),)
continue
line_count = len(read_text_file(path).splitlines())
args.metadata.changes[path] = ((1, line_count),)
return sorted(names)
def supports_core_ci_auth(self): # type: () -> bool
"""Return True if Ansible Core CI is supported."""
path = self._get_aci_key_path()
return os.path.exists(path)
def prepare_core_ci_auth(self): # type: () -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
path = self._get_aci_key_path()
auth_key = read_text_file(path).strip()
request = dict(
key=auth_key,
nonce=None,
)
auth = dict(
remote=request,
)
return auth
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
return None # not yet implemented for local
@staticmethod
def _get_aci_key_path(): # type: () -> str
path = os.path.expanduser('~/.ansible-core-ci.key')
return path
class InvalidBranch(ApplicationError):
"""Exception for invalid branch specification."""
def __init__(self, branch, reason): # type: (str, str) -> None
message = 'Invalid branch: %s\n%s' % (branch, reason)
super().__init__(message)
self.branch = branch
class LocalChanges:
"""Change information for local work."""
def __init__(self, args): # type: (TestConfig) -> None
self.args = args
self.git = Git()
self.current_branch = self.git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(self.git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = self.git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(self.git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(self.git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(self.git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(self.git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(self.git.get_diff_names([]))
# diff of all tracked files from fork point to working copy
self.diff = self.git.get_diff([self.fork_point])
def is_official_branch(self, name): # type: (str) -> bool
"""Return True if the given branch name an official branch for development or releases."""
if self.args.base_branch:
return name == self.args.base_branch
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False

View File

@@ -0,0 +1,900 @@
"""Classify changes in Ansible code."""
from __future__ import annotations
import collections
import os
import re
import time
import typing as t
from ..target import (
walk_module_targets,
walk_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
load_integration_prefixes,
analyze_integration_target_dependencies,
)
from ..util import (
display,
is_subdir,
)
from .python import (
get_python_module_utils_imports,
get_python_module_utils_name,
)
from .csharp import (
get_csharp_module_utils_imports,
get_csharp_module_utils_name,
)
from .powershell import (
get_powershell_module_utils_imports,
get_powershell_module_utils_name,
)
from ..config import (
TestConfig,
IntegrationConfig,
)
from ..metadata import (
ChangeDescription,
)
from ..data import (
data_context,
)
FOCUSED_TARGET = '__focused__'
def categorize_changes(args, paths, verbose_command=None): # type: (TestConfig, t.List[str], t.Optional[str]) -> ChangeDescription
"""Categorize the given list of changed paths and return a description of the changes."""
mapper = PathMapper(args)
commands = {
'sanity': set(),
'units': set(),
'integration': set(),
'windows-integration': set(),
'network-integration': set(),
}
focused_commands = collections.defaultdict(set)
deleted_paths = set()
original_paths = set()
additional_paths = set()
no_integration_paths = set()
for path in paths:
if not os.path.exists(path):
deleted_paths.add(path)
continue
original_paths.add(path)
dependent_paths = mapper.get_dependent_paths(path)
if not dependent_paths:
continue
display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
for dependent_path in dependent_paths:
display.info(dependent_path, verbosity=2)
additional_paths.add(dependent_path)
additional_paths -= set(paths) # don't count changed paths as additional paths
if additional_paths:
display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
paths = sorted(set(paths) | additional_paths)
display.info('Mapping %d changed file(s) to tests.' % len(paths))
none_count = 0
for path in paths:
tests = mapper.classify(path)
if tests is None:
focused_target = False
display.info('%s -> all' % path, verbosity=1)
tests = all_tests(args) # not categorized, run all tests
display.warning('Path not categorized: %s' % path)
else:
focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
tests = dict((key, value) for key, value in tests.items() if value)
if focused_target and not any('integration' in command for command in tests):
no_integration_paths.add(path) # path triggers no integration tests
if verbose_command:
result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
# identify targeted integration tests (those which only target a single integration command)
if 'integration' in verbose_command and tests.get(verbose_command):
if not any('integration' in command for command in tests if command != verbose_command):
if focused_target:
result += ' (focused)'
result += ' (targeted)'
else:
result = '%s' % tests
if not tests.get(verbose_command):
# minimize excessive output from potentially thousands of files which do not trigger tests
none_count += 1
verbosity = 2
else:
verbosity = 1
if args.verbosity >= verbosity:
display.info('%s -> %s' % (path, result), verbosity=1)
for command, target in tests.items():
commands[command].add(target)
if focused_target:
focused_commands[command].add(target)
if none_count > 0 and args.verbosity < 2:
display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
for command, targets in commands.items():
targets.discard('none')
if any(target == 'all' for target in targets):
commands[command] = {'all'}
commands = dict((c, sorted(targets)) for c, targets in commands.items() if targets)
focused_commands = dict((c, sorted(targets)) for c, targets in focused_commands.items())
for command, targets in commands.items():
if targets == ['all']:
commands[command] = [] # changes require testing all targets, do not filter targets
changes = ChangeDescription()
changes.command = verbose_command
changes.changed_paths = sorted(original_paths)
changes.deleted_paths = sorted(deleted_paths)
changes.regular_command_targets = commands
changes.focused_command_targets = focused_commands
changes.no_integration_paths = sorted(no_integration_paths)
return changes
class PathMapper:
"""Map file paths to test commands and targets."""
def __init__(self, args): # type: (TestConfig) -> None
self.args = args
self.integration_all_target = get_integration_all_target(self.args)
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
self.units_modules = set(target.module for target in self.units_targets if target.module)
self.units_paths = set(a for target in self.units_targets for a in target.aliases)
self.sanity_paths = set(target.path for target in self.sanity_targets)
self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'posix/' in target.aliases for m in target.modules)
self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'windows/' in target.aliases for m in target.modules)
self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'network/' in target.aliases for m in target.modules)
self.prefixes = load_integration_prefixes()
self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.paths_to_dependent_targets = {}
for target in self.integration_targets:
for path in target.needs_file:
if path not in self.paths_to_dependent_targets:
self.paths_to_dependent_targets[path] = set()
self.paths_to_dependent_targets[path].add(target)
def get_dependent_paths(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path, recursively expanding dependent paths as well."""
unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
paths = set()
while unprocessed_paths:
queued_paths = list(unprocessed_paths)
paths |= unprocessed_paths
unprocessed_paths = set()
for queued_path in queued_paths:
new_paths = self.get_dependent_paths_non_recursive(queued_path)
for new_path in new_paths:
if new_path not in paths:
unprocessed_paths.add(new_path)
return sorted(paths)
def get_dependent_paths_non_recursive(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path, including dependent integration test target paths."""
paths = self.get_dependent_paths_internal(path)
paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())]
paths = sorted(set(paths))
return paths
def get_dependent_paths_internal(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path."""
ext = os.path.splitext(os.path.split(path)[1])[1]
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.py':
return self.get_python_module_utils_usage(path)
if ext == '.psm1':
return self.get_powershell_module_utils_usage(path)
if ext == '.cs':
return self.get_csharp_module_utils_usage(path)
if is_subdir(path, data_context().content.integration_targets_path):
return self.get_integration_target_usage(path)
return []
def get_python_module_utils_usage(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path which is a Python module_utils file."""
if not self.python_module_utils_imports:
display.info('Analyzing python module_utils imports...')
before = time.time()
self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
after = time.time()
display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
name = get_python_module_utils_name(path)
return sorted(self.python_module_utils_imports[name])
def get_powershell_module_utils_usage(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path which is a PowerShell module_utils file."""
if not self.powershell_module_utils_imports:
display.info('Analyzing powershell module_utils imports...')
before = time.time()
self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
after = time.time()
display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
name = get_powershell_module_utils_name(path)
return sorted(self.powershell_module_utils_imports[name])
def get_csharp_module_utils_usage(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path which is a C# module_utils file."""
if not self.csharp_module_utils_imports:
display.info('Analyzing C# module_utils imports...')
before = time.time()
self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
after = time.time()
display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
name = get_csharp_module_utils_name(path)
return sorted(self.csharp_module_utils_imports[name])
def get_integration_target_usage(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path which is an integration target file."""
target_name = path.split('/')[3]
dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep
for target in sorted(self.integration_dependencies.get(target_name, set()))]
return dependents
def classify(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Classify the given path and return an optional dictionary of the results."""
result = self._classify(path)
# run all tests when no result given
if result is None:
return None
# run sanity on path unless result specified otherwise
if path in self.sanity_paths and 'sanity' not in result:
result['sanity'] = path
return result
def _classify(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path."""
if data_context().content.is_ansible:
return self._classify_ansible(path)
if data_context().content.collection:
return self._classify_collection(path)
return None
def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path using rules common to all layouts."""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if os.path.sep not in path:
if filename in (
'azure-pipelines.yml',
):
return all_tests(self.args) # test infrastructure, run all tests
if is_subdir(path, '.azure-pipelines'):
return all_tests(self.args) # test infrastructure, run all tests
if is_subdir(path, '.github'):
return minimal
if is_subdir(path, data_context().content.integration_targets_path):
if not os.path.exists(path):
return minimal
target = self.integration_targets_by_name.get(path.split('/')[3])
if not target:
display.warning('Unexpected non-target found: %s' % path)
return minimal
if 'hidden/' in target.aliases:
return minimal # already expanded using get_dependent_paths
return {
'integration': target.name if 'posix/' in target.aliases else None,
'windows-integration': target.name if 'windows/' in target.aliases else None,
'network-integration': target.name if 'network/' in target.aliases else None,
FOCUSED_TARGET: True,
}
if is_subdir(path, data_context().content.integration_path):
if dirname == data_context().content.integration_path:
for command in (
'integration',
'windows-integration',
'network-integration',
):
if name == command and ext == '.cfg':
return {
command: self.integration_all_target,
}
if name == command + '.requirements' and ext == '.txt':
return {
command: self.integration_all_target,
}
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
if is_subdir(path, data_context().content.sanity_path):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if is_subdir(path, data_context().content.unit_path):
if path in self.units_paths:
return {
'units': path,
}
# changes to files which are not unit tests should trigger tests from the nearest parent directory
test_path = os.path.dirname(path)
while test_path:
if test_path + '/' in self.units_paths:
return {
'units': test_path + '/',
}
test_path = os.path.dirname(test_path)
if is_subdir(path, data_context().content.module_path):
module_name = self.module_names_by_path.get(path)
if module_name:
return {
'units': module_name if module_name in self.units_modules else None,
'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
'network-integration': self.network_integration_by_module.get(module_name),
FOCUSED_TARGET: True,
}
return minimal
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.cs':
return minimal # already expanded using get_dependent_paths
if ext == '.psm1':
return minimal # already expanded using get_dependent_paths
if ext == '.py':
return minimal # already expanded using get_dependent_paths
if is_subdir(path, data_context().content.plugin_paths['action']):
if ext == '.py':
if name.startswith('net_'):
network_target = 'network/.*_%s' % name[4:]
if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
return {
'network-integration': network_target,
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if self.prefixes.get(name) == 'network':
network_platform = name
elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
network_platform = name[:-7]
elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
network_platform = name[:-9]
else:
network_platform = None
if network_platform:
network_target = 'network/%s/' % network_platform
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['connection']):
units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'connection')
if name == '__init__':
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': os.path.join(units_dir, ''),
}
units_path = os.path.join(units_dir, 'test_%s.py' % name)
if units_path not in self.units_paths:
units_path = None
integration_name = 'connection_%s' % name
if integration_name not in self.integration_targets_by_name:
integration_name = None
windows_integration_name = 'connection_windows_%s' % name
if windows_integration_name not in self.integration_targets_by_name:
windows_integration_name = None
# entire integration test commands depend on these connection plugins
if name in ['winrm', 'psrp']:
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if name == 'local':
return {
'integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'network_cli':
return {
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'paramiko_ssh':
return {
'integration': integration_name,
'network-integration': self.integration_all_target,
'units': units_path,
}
# other connection plugins have isolated integration and unit tests
return {
'integration': integration_name,
'windows-integration': windows_integration_name,
'units': units_path,
}
if is_subdir(path, data_context().content.plugin_paths['doc_fragments']):
return {
'sanity': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['inventory']):
if name == '__init__':
return all_tests(self.args) # broad impact, run all tests
# These inventory plugins are enabled by default (see INVENTORY_ENABLED).
# Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
test_all = [
'host_list',
'script',
'yaml',
'ini',
'auto',
]
if name in test_all:
posix_integration_fallback = get_integration_all_target(self.args)
else:
posix_integration_fallback = None
target = self.integration_targets_by_name.get('inventory_%s' % name)
units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'inventory')
units_path = os.path.join(units_dir, 'test_%s.py' % name)
if units_path not in self.units_paths:
units_path = None
return {
'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
'windows-integration': target.name if target and 'windows/' in target.aliases else None,
'network-integration': target.name if target and 'network/' in target.aliases else None,
'units': units_path,
FOCUSED_TARGET: target is not None,
}
if is_subdir(path, data_context().content.plugin_paths['filter']):
return self._simple_plugin_tests('filter', name)
if is_subdir(path, data_context().content.plugin_paths['lookup']):
return self._simple_plugin_tests('lookup', name)
if (is_subdir(path, data_context().content.plugin_paths['terminal']) or
is_subdir(path, data_context().content.plugin_paths['cliconf']) or
is_subdir(path, data_context().content.plugin_paths['netconf'])):
if ext == '.py':
if name in self.prefixes and self.prefixes[name] == 'network':
network_target = 'network/%s/' % name
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['test']):
return self._simple_plugin_tests('test', name)
return None
def _classify_collection(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path using rules specific to collections."""
result = self._classify_common(path)
if result is not None:
return result
filename = os.path.basename(path)
dummy, ext = os.path.splitext(filename)
minimal = {}
if path.startswith('changelogs/'):
return minimal
if path.startswith('docs/'):
return minimal
if '/' not in path:
if path in (
'.gitignore',
'COPYING',
'LICENSE',
'Makefile',
):
return minimal
if ext in (
'.in',
'.md',
'.rst',
'.toml',
'.txt',
):
return minimal
return None
def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path using rules specific to Ansible."""
if path.startswith('test/units/compat/'):
return {
'units': 'test/units/',
}
result = self._classify_common(path)
if result is not None:
return result
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if path.startswith('bin/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('changelogs/'):
return minimal
if path.startswith('docs/'):
return minimal
if path.startswith('examples/'):
if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
return {
'windows-integration': 'connection_winrm',
}
return minimal
if path.startswith('hacking/'):
return minimal
if path.startswith('lib/ansible/executor/powershell/'):
units_path = 'test/units/executor/powershell/'
if units_path not in self.units_paths:
units_path = None
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if path.startswith('lib/ansible/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('licenses/'):
return minimal
if path.startswith('packaging/'):
return minimal
if path.startswith('test/ansible_test/'):
return minimal # these tests are not invoked from ansible-test
if path.startswith('test/lib/ansible_test/config/'):
if name.startswith('cloud-config-'):
# noinspection PyTypeChecker
cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
if path.startswith('test/lib/ansible_test/_data/completion/'):
if path == 'test/lib/ansible_test/_data/completion/docker.txt':
return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
if path.startswith('test/lib/ansible_test/_internal/commands/integration/cloud/'):
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/lib/ansible_test/_internal/commands/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
'integration': 'ansible-test', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_internal/commands/units/'):
return {
'units': 'all', # test infrastructure, run all unit tests
'integration': 'ansible-test', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_data/requirements/'):
if name in (
'integration',
'network-integration',
'windows-integration',
):
return {
name: self.integration_all_target,
}
if name in (
'sanity',
'units',
):
return {
name: 'all',
}
if path.startswith('test/lib/ansible_test/_util/controller/sanity/') or path.startswith('test/lib/ansible_test/_util/target/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
'integration': 'ansible-test', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_util/target/pytest/'):
return {
'units': 'all', # test infrastructure, run all unit tests
'integration': 'ansible-test', # run ansible-test self tests
}
if path.startswith('test/lib/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/support/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/shippable/'):
if dirname == 'test/utils/shippable':
test_map = {
'cloud.sh': 'integration:cloud/',
'linux.sh': 'integration:all',
'network.sh': 'network-integration:all',
'remote.sh': 'integration:all',
'sanity.sh': 'sanity:all',
'units.sh': 'units:all',
'windows.sh': 'windows-integration:all',
}
test_match = test_map.get(filename)
if test_match:
test_command, test_target = test_match.split(':')
return {
test_command: test_target,
}
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/'):
return minimal
if '/' not in path:
if path in (
'.gitattributes',
'.gitignore',
'.mailmap',
'COPYING',
'Makefile',
):
return minimal
if path in (
'setup.py',
):
return all_tests(self.args) # broad impact, run all tests
if ext in (
'.in',
'.md',
'.rst',
'.toml',
'.txt',
):
return minimal
return None # unknown, will result in fall-back to run all tests
def _simple_plugin_tests(self, plugin_type, plugin_name): # type: (str, str) -> t.Dict[str, t.Optional[str]]
"""
Return tests for the given plugin type and plugin name.
This function is useful for plugin types which do not require special processing.
"""
if plugin_name == '__init__':
return all_tests(self.args, True)
integration_target = self.integration_targets_by_name.get('%s_%s' % (plugin_type, plugin_name))
if integration_target:
integration_name = integration_target.name
else:
integration_name = None
units_path = os.path.join(data_context().content.unit_path, 'plugins', plugin_type, 'test_%s.py' % plugin_name)
if units_path not in self.units_paths:
units_path = None
return dict(
integration=integration_name,
units=units_path,
)
def all_tests(args, force=False): # type: (TestConfig, bool) -> t.Dict[str, str]
"""Return the targets for each test command when all tests should be run."""
if force:
integration_all_target = 'all'
else:
integration_all_target = get_integration_all_target(args)
return {
'sanity': 'all',
'units': 'all',
'integration': integration_all_target,
'windows-integration': integration_all_target,
'network-integration': integration_all_target,
}
def get_integration_all_target(args): # type: (TestConfig) -> str
"""Return the target to use when all tests should be run."""
if isinstance(args, IntegrationConfig):
return args.changed_all_target
return 'all'

View File

@@ -0,0 +1,26 @@
"""Common classification code used by multiple languages."""
from __future__ import annotations
import os
from ..data import (
data_context,
)
def resolve_csharp_ps_util(import_name, path): # type: (str, str) -> str
"""Return the fully qualified name of the given import if possible, otherwise return the original import name."""
if data_context().content.is_ansible or not import_name.startswith('.'):
# We don't support relative paths for builtin utils, there's no point.
return import_name
packages = import_name.split('.')
module_packages = path.split(os.path.sep)
for package in packages:
if not module_packages or package:
break
del module_packages[-1]
return 'ansible_collections.%s%s' % (data_context().content.prefix,
'.'.join(module_packages + [p for p in packages if p]))

View File

@@ -0,0 +1,98 @@
"""Analyze C# import statements."""
from __future__ import annotations
import os
import re
import typing as t
from ..io import (
open_text_file,
)
from ..util import (
display,
)
from .common import (
resolve_csharp_ps_util,
)
from ..data import (
data_context,
)
from ..target import (
TestTarget,
)
def get_csharp_module_utils_imports(powershell_targets, csharp_targets): # type: (t.List[TestTarget], t.List[TestTarget]) -> t.Dict[str, t.Set[str]]
"""Return a dictionary of module_utils names mapped to sets of powershell file paths."""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, False)
for target in csharp_targets:
imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, True)
imports = {module_util: set() for module_util in module_utils} # type: t.Dict[str, t.Set[str]]
for target_path, modules in imports_by_target_path.items():
for module_util in modules:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_csharp_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_csharp_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils(): # type: () -> t.Set[str]
"""Return a set of available module_utils imports."""
return set(get_csharp_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_csharp_path)
if os.path.splitext(p)[1] == '.cs')
def extract_csharp_module_utils_imports(path, module_utils, is_pure_csharp): # type: (str, t.Set[str], bool) -> t.Set[str]
"""Return a set of module_utils imports found in the specified source file."""
imports = set()
if is_pure_csharp:
pattern = re.compile(r'(?i)^using\s((?:Ansible|AnsibleCollections)\..+);$')
else:
pattern = re.compile(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((?:Ansible|ansible.collections|\.)\..+)')
with open_text_file(path) as module_file:
for line_number, line in enumerate(module_file, 1):
match = re.search(pattern, line)
if not match:
continue
import_name = resolve_csharp_ps_util(match.group(1), path)
if import_name in module_utils:
imports.add(import_name)
elif data_context().content.is_ansible or \
import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports

View File

@@ -0,0 +1,99 @@
"""Analyze powershell import statements."""
from __future__ import annotations
import os
import re
import typing as t
from ..io import (
read_text_file,
)
from ..util import (
display,
)
from .common import (
resolve_csharp_ps_util,
)
from ..data import (
data_context,
)
from ..target import (
TestTarget,
)
def get_powershell_module_utils_imports(powershell_targets): # type: (t.List[TestTarget]) -> t.Dict[str, t.Set[str]]
"""Return a dictionary of module_utils names mapped to sets of powershell file paths."""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
imports = {module_util: set() for module_util in module_utils} # type: t.Dict[str, t.Set[str]]
for target_path, modules in imports_by_target_path.items():
for module_util in modules:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_powershell_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_powershell_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils(): # type: () -> t.Set[str]
"""Return a set of available module_utils imports."""
return set(get_powershell_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
if os.path.splitext(p)[1] == '.psm1')
def extract_powershell_module_utils_imports(path, module_utils): # type: (str, t.Set[str]) -> t.Set[str]
"""Return a set of module_utils imports found in the specified source file."""
imports = set()
code = read_text_file(path)
if data_context().content.is_ansible and '# POWERSHELL_COMMON' in code:
imports.add('Ansible.ModuleUtils.Legacy')
lines = code.splitlines()
line_number = 0
for line in lines:
line_number += 1
match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line)
if not match:
continue
import_name = resolve_csharp_ps_util(match.group(1), path)
if import_name in module_utils:
imports.add(import_name)
elif data_context().content.is_ansible or \
import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports

View File

@@ -0,0 +1,346 @@
"""Analyze python import statements."""
from __future__ import annotations
import ast
import os
import re
import typing as t
from ..io import (
read_binary_file,
)
from ..util import (
display,
ApplicationError,
is_subdir,
)
from ..data import (
data_context,
)
from ..target import (
TestTarget,
)
VIRTUAL_PACKAGES = {
'ansible.module_utils.six',
}
def get_python_module_utils_imports(compile_targets): # type: (t.List[TestTarget]) -> t.Dict[str, t.Set[str]]
"""Return a dictionary of module_utils names mapped to sets of python file paths."""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
"""Recursively expand module_utils imports from module_utils files."""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = {import_name}
results = {import_name}
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = get_import_path(import_name)
if import_path not in imports_by_target_path:
import_path = get_import_path(import_name, package=True)
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path, modules in imports_by_target_path.items():
if module_util in modules:
for module_util_import in sorted(module_util_imports):
if module_util_import not in modules:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
modules.add(module_util_import)
imports = {module_util: set() for module_util in module_utils | virtual_utils} # type: t.Dict[str, t.Set[str]]
for target_path, modules in imports_by_target_path.items():
for module_util in modules:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not imports[module_util]:
package_path = get_import_path(module_util, package=True)
if os.path.exists(package_path) and not os.path.getsize(package_path):
continue # ignore empty __init__.py files
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_python_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
else:
prefix = 'ansible.module_utils'
if path.endswith('/__init__.py'):
path = os.path.dirname(path)
if path == base_path:
name = prefix
else:
name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
for path in data_context().content.walk_files(data_context().content.module_utils_path):
ext = os.path.splitext(path)[1]
if ext != '.py':
continue
module_utils.append(get_python_module_utils_name(path))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils): # type: (str, t.Set[str]) -> t.Set[str]
"""Return a list of module_utils imports found in the specified source file."""
# Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
# See: https://www.python.org/dev/peps/pep-0263
# Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
code = read_binary_file(path)
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
def get_import_path(name, package=False): # type: (str, bool) -> str
"""Return a path from an import name."""
if package:
filename = os.path.join(name.replace('.', '/'), '__init__.py')
else:
filename = '%s.py' % name.replace('.', '/')
if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
path = os.path.join('lib', filename)
elif data_context().content.collection and (
name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
path = '/'.join(filename.split('/')[3:])
else:
raise Exception('Unexpected import name: %s' % name)
return path
def path_to_module(path): # type: (str) -> str
"""Convert the given path to a module name."""
module = os.path.splitext(path)[0].replace(os.path.sep, '.')
if module.endswith('.__init__'):
module = module[:-9]
return module
def relative_to_absolute(name, level, module, path, lineno): # type: (str, int, str, str, int) -> str
"""Convert a relative import to an absolute import."""
if level <= 0:
absolute_name = name
elif not module:
display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
absolute_name = 'relative.nomodule'
else:
parts = module.split('.')
if level >= len(parts):
display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
absolute_name = 'relative.abovelevel'
else:
absolute_name = '.'.join(parts[:-level] + [name])
return absolute_name
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils): # type: (str, t.Set[str]) -> None
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
self.module = None
if data_context().content.is_ansible:
# Various parts of the Ansible source tree execute within diffent modules.
# To support import analysis, each file which uses relative imports must reside under a path defined here.
# The mapping is a tuple consisting of a path pattern to match and a replacement path.
# During analyis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
path_map = (
('^hacking/build_library/build_ansible/', 'build_ansible/'),
('^lib/ansible/', 'ansible/'),
('^test/lib/ansible_test/_util/controller/sanity/validate-modules/', 'validate_modules/'),
('^test/lib/ansible_test/_util/target/legacy_collection_loader/', 'legacy_collection_loader/'),
('^test/units/', 'test/units/'),
('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
('^test/integration/targets/.*/library/', 'ansible/modules/'),
)
for pattern, replacement in path_map:
if re.search(pattern, self.path):
revised_path = re.sub(pattern, replacement, self.path)
self.module = path_to_module(revised_path)
break
else:
# This assumes that all files within the collection are executed by Ansible as part of the collection.
# While that will usually be true, there are exceptions which will result in this resolution being incorrect.
self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node): # type: (ast.Import) -> None
"""Visit an import node."""
self.generic_visit(node)
# import ansible.module_utils.MODULE[.MODULE]
# import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
self.add_imports([alias.name for alias in node.names], node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node): # type: (ast.ImportFrom) -> None
"""Visit an import from node."""
self.generic_visit(node)
if not node.module:
return
module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
if not module.startswith('ansible'):
return
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
def add_import(self, name, line_number): # type: (str, int) -> None
"""Record the specified import."""
import_name = name
while self.is_module_util_name(name):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if is_subdir(self.path, data_context().content.test_path):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
def add_imports(self, names, line_no): # type: (t.List[str], int) -> None
"""Add the given import names if they are module_utils imports."""
for name in names:
if self.is_module_util_name(name):
self.add_import(name, line_no)
@staticmethod
def is_module_util_name(name): # type: (str) -> bool
"""Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
return True
if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
return True
return False

View File

@@ -0,0 +1,55 @@
"""Command line parsing."""
from __future__ import annotations
import argparse
import os
import sys
from .argparsing import (
CompositeActionCompletionFinder,
)
from .commands import (
do_commands,
)
from .compat import (
HostSettings,
convert_legacy_args,
)
def parse_args(): # type: () -> argparse.Namespace
"""Parse command line arguments."""
completer = CompositeActionCompletionFinder()
if completer.enabled:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
parser = argparse.ArgumentParser(epilog=epilog)
do_commands(parser, completer)
completer(
parser,
always_complete_options=False,
)
argv = sys.argv[1:]
args = parser.parse_args(argv)
if args.explain and not args.verbosity:
args.verbosity = 1
if args.no_environment:
pass
elif args.host_path:
args.host_settings = HostSettings.deserialize(os.path.join(args.host_path, 'settings.dat'))
else:
args.host_settings = convert_legacy_args(argv, args, args.target_mode)
args.host_settings.apply_defaults()
return args

View File

@@ -0,0 +1,90 @@
"""Actions for handling composite arguments with argparse."""
from __future__ import annotations
from .argparsing import (
CompositeAction,
NamespaceParser,
)
from .parsers import (
DelegatedControllerParser,
NetworkSshTargetParser,
NetworkTargetParser,
OriginControllerParser,
PosixSshTargetParser,
PosixTargetParser,
SanityPythonTargetParser,
UnitsPythonTargetParser,
WindowsSshTargetParser,
WindowsTargetParser,
)
class OriginControllerAction(CompositeAction):
"""Composite action parser for the controller when the only option is `origin`."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return OriginControllerParser()
class DelegatedControllerAction(CompositeAction):
"""Composite action parser for the controller when delegation is supported."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return DelegatedControllerParser()
class PosixTargetAction(CompositeAction):
"""Composite action parser for a POSIX target."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return PosixTargetParser()
class WindowsTargetAction(CompositeAction):
"""Composite action parser for a Windows target."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return WindowsTargetParser()
class NetworkTargetAction(CompositeAction):
"""Composite action parser for a network target."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return NetworkTargetParser()
class SanityPythonTargetAction(CompositeAction):
"""Composite action parser for a sanity target."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return SanityPythonTargetParser()
class UnitsPythonTargetAction(CompositeAction):
"""Composite action parser for a units target."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return UnitsPythonTargetParser()
class PosixSshTargetAction(CompositeAction):
"""Composite action parser for a POSIX SSH target."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return PosixSshTargetParser()
class WindowsSshTargetAction(CompositeAction):
"""Composite action parser for a Windows SSH target."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return WindowsSshTargetParser()
class NetworkSshTargetAction(CompositeAction):
"""Composite action parser for a network SSH target."""
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
return NetworkSshTargetParser()

View File

@@ -0,0 +1,263 @@
"""Completion finder which brings together custom options and completion logic."""
from __future__ import annotations
import abc
import argparse
import os
import re
import typing as t
from .argcompletion import (
OptionCompletionFinder,
get_comp_type,
register_safe_action,
warn,
)
from .parsers import (
Completion,
CompletionError,
CompletionSuccess,
CompletionUnavailable,
DocumentationState,
NamespaceParser,
Parser,
ParserError,
ParserMode,
ParserState,
)
class RegisteredCompletionFinder(OptionCompletionFinder):
"""
Custom option completion finder for argcomplete which allows completion results to be registered.
These registered completions, if provided, are used to filter the final completion results.
This works around a known bug: https://github.com/kislyuk/argcomplete/issues/221
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.registered_completions = None # type: t.Optional[str]
def completer(
self,
prefix, # type: str
action, # type: argparse.Action
parsed_args, # type: argparse.Namespace
**kwargs,
): # type: (...) -> t.List[str]
"""
Return a list of completions for the specified prefix and action.
Use this as the completer function for argcomplete.
"""
kwargs.clear()
del kwargs
completions = self.get_completions(prefix, action, parsed_args)
if action.nargs and not isinstance(action.nargs, int):
# prevent argcomplete from including unrelated arguments in the completion results
self.registered_completions = completions
return completions
@abc.abstractmethod
def get_completions(
self,
prefix, # type: str
action, # type: argparse.Action
parsed_args, # type: argparse.Namespace
): # type: (...) -> t.List[str]
"""
Return a list of completions for the specified prefix and action.
Called by the complete function.
"""
def quote_completions(self, completions, cword_prequote, last_wordbreak_pos):
"""Modify completion results before returning them."""
if self.registered_completions is not None:
# If one of the completion handlers registered their results, only allow those exact results to be returned.
# This prevents argcomplete from adding results from other completers when they are known to be invalid.
allowed_completions = set(self.registered_completions)
completions = [completion for completion in completions if completion in allowed_completions]
return super().quote_completions(completions, cword_prequote, last_wordbreak_pos)
class CompositeAction(argparse.Action, metaclass=abc.ABCMeta):
"""Base class for actions that parse composite arguments."""
documentation_state = {} # type: t.Dict[t.Type[CompositeAction], DocumentationState]
# noinspection PyUnusedLocal
def __init__(
self,
*args,
dest, # type: str
**kwargs,
):
del dest
self.definition = self.create_parser()
self.documentation_state[type(self)] = documentation_state = DocumentationState()
self.definition.document(documentation_state)
super().__init__(*args, dest=self.definition.dest, **kwargs)
register_safe_action(type(self))
@abc.abstractmethod
def create_parser(self): # type: () -> NamespaceParser
"""Return a namespace parser to parse the argument associated with this action."""
def __call__(
self,
parser,
namespace,
values,
option_string=None,
):
state = ParserState(mode=ParserMode.PARSE, namespaces=[namespace], remainder=values)
try:
self.definition.parse(state)
except ParserError as ex:
error = str(ex)
except CompletionError as ex:
error = ex.message
else:
return
if get_comp_type():
# FUTURE: It may be possible to enhance error handling by surfacing this error message during downstream completion.
return # ignore parse errors during completion to avoid breaking downstream completion
raise argparse.ArgumentError(self, error)
class CompositeActionCompletionFinder(RegisteredCompletionFinder):
"""Completion finder with support for composite argument parsing."""
def get_completions(
self,
prefix, # type: str
action, # type: CompositeAction
parsed_args, # type: argparse.Namespace
): # type: (...) -> t.List[str]
"""Return a list of completions appropriate for the given prefix and action, taking into account the arguments that have already been parsed."""
state = ParserState(
mode=ParserMode.LIST if self.list_mode else ParserMode.COMPLETE,
remainder=prefix,
namespaces=[parsed_args],
)
answer = complete(action.definition, state)
completions = []
if isinstance(answer, CompletionSuccess):
self.disable_completion_mangling = answer.preserve
completions = answer.completions
if isinstance(answer, CompletionError):
warn(answer.message)
return completions
def detect_file_listing(value, mode): # type: (str, ParserMode) -> bool
"""
Return True if Bash will show a file listing and redraw the prompt, otherwise return False.
If there are no list results, a file listing will be shown if the value after the last `=` or `:` character:
- is empty
- matches a full path
- matches a partial path
Otherwise Bash will play the bell sound and display nothing.
see: https://github.com/kislyuk/argcomplete/issues/328
see: https://github.com/kislyuk/argcomplete/pull/284
"""
listing = False
if mode == ParserMode.LIST:
right = re.split('[=:]', value)[-1]
listing = not right or os.path.exists(right)
if not listing:
directory = os.path.dirname(right)
# noinspection PyBroadException
try:
filenames = os.listdir(directory or '.')
except Exception: # pylint: disable=broad-except
pass
else:
listing = any(filename.startswith(right) for filename in filenames)
return listing
def detect_false_file_completion(value, mode): # type: (str, ParserMode) -> bool
"""
Return True if Bash will provide an incorrect file completion, otherwise return False.
If there are no completion results, a filename will be automatically completed if the value after the last `=` or `:` character:
- matches exactly one partial path
Otherwise Bash will play the bell sound and display nothing.
see: https://github.com/kislyuk/argcomplete/issues/328
see: https://github.com/kislyuk/argcomplete/pull/284
"""
completion = False
if mode == ParserMode.COMPLETE:
completion = True
right = re.split('[=:]', value)[-1]
directory, prefix = os.path.split(right)
# noinspection PyBroadException
try:
filenames = os.listdir(directory or '.')
except Exception: # pylint: disable=broad-except
pass
else:
matches = [filename for filename in filenames if filename.startswith(prefix)]
completion = len(matches) == 1
return completion
def complete(
completer, # type: Parser
state, # type: ParserState
): # type: (...) -> Completion
"""Perform argument completion using the given completer and return the completion result."""
value = state.remainder
try:
completer.parse(state)
raise ParserError('completion expected')
except CompletionUnavailable as ex:
if detect_file_listing(value, state.mode):
# Displaying a warning before the file listing informs the user it is invalid. Bash will redraw the prompt after the list.
# If the file listing is not shown, a warning could be helpful, but would introduce noise on the terminal since the prompt is not redrawn.
answer = CompletionError(ex.message)
elif detect_false_file_completion(value, state.mode):
# When the current prefix provides no matches, but matches files a single file on disk, Bash will perform an incorrect completion.
# Returning multiple invalid matches instead of no matches will prevent Bash from using its own completion logic in this case.
answer = CompletionSuccess(
list_mode=True, # abuse list mode to enable preservation of the literal results
consumed='',
continuation='',
matches=['completion', 'invalid']
)
else:
answer = ex
except Completion as ex:
answer = ex
return answer

View File

@@ -0,0 +1,18 @@
"""Actions for argparse."""
from __future__ import annotations
import argparse
import enum
import typing as t
class EnumAction(argparse.Action):
"""Parse an enum using the lowercases enum names."""
def __init__(self, **kwargs): # type: (t.Dict[str, t.Any]) -> None
self.enum_type = kwargs.pop('type', None) # type: t.Type[enum.Enum]
kwargs.setdefault('choices', tuple(e.name.lower() for e in self.enum_type))
super().__init__(**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
value = self.enum_type[values.upper()]
setattr(namespace, self.dest, value)

View File

@@ -0,0 +1,124 @@
"""Wrapper around argcomplete providing bug fixes and additional features."""
from __future__ import annotations
import argparse
import enum
import os
import typing as t
class Substitute:
"""Substitute for missing class which accepts all arguments."""
def __init__(self, *args, **kwargs):
pass
try:
import argcomplete
from argcomplete import (
CompletionFinder,
default_validator,
)
warn = argcomplete.warn # pylint: disable=invalid-name
except ImportError:
argcomplete = None
CompletionFinder = Substitute
default_validator = Substitute # pylint: disable=invalid-name
warn = Substitute # pylint: disable=invalid-name
class CompType(enum.Enum):
"""
Bash COMP_TYPE argument completion types.
For documentation, see: https://www.gnu.org/software/bash/manual/html_node/Bash-Variables.html#index-COMP_005fTYPE
"""
COMPLETION = '\t'
"""
Standard completion, typically triggered by a single tab.
"""
MENU_COMPLETION = '%'
"""
Menu completion, which cyles through each completion instead of showing a list.
For help using this feature, see: https://stackoverflow.com/questions/12044574/getting-complete-and-menu-complete-to-work-together
"""
LIST = '?'
"""
Standard list, typically triggered by a double tab.
"""
LIST_AMBIGUOUS = '!'
"""
Listing with `show-all-if-ambiguous` set.
For documentation, see https://www.gnu.org/software/bash/manual/html_node/Readline-Init-File-Syntax.html#index-show_002dall_002dif_002dambiguous
For additional details, see: https://unix.stackexchange.com/questions/614123/explanation-of-bash-completion-comp-type
"""
LIST_UNMODIFIED = '@'
"""
Listing with `show-all-if-unmodified` set.
For documentation, see https://www.gnu.org/software/bash/manual/html_node/Readline-Init-File-Syntax.html#index-show_002dall_002dif_002dunmodified
For additional details, see: : https://unix.stackexchange.com/questions/614123/explanation-of-bash-completion-comp-type
"""
@property
def list_mode(self): # type: () -> bool
"""True if completion is running in list mode, otherwise False."""
return self in (CompType.LIST, CompType.LIST_AMBIGUOUS, CompType.LIST_UNMODIFIED)
def register_safe_action(action_type): # type: (t.Type[argparse.Action]) -> None
"""Register the given action as a safe action for argcomplete to use during completion if it is not already registered."""
if argcomplete and action_type not in argcomplete.safe_actions:
argcomplete.safe_actions += (action_type,)
def get_comp_type(): # type: () -> t.Optional[CompType]
"""Parse the COMP_TYPE environment variable (if present) and return the associated CompType enum value."""
value = os.environ.get('COMP_TYPE')
comp_type = CompType(chr(int(value))) if value else None
return comp_type
class OptionCompletionFinder(CompletionFinder):
"""
Custom completion finder for argcomplete.
It provides support for running completion in list mode, which argcomplete natively handles the same as standard completion.
"""
enabled = bool(argcomplete)
def __init__(self, *args, validator=None, **kwargs):
if validator:
raise ValueError()
self.comp_type = get_comp_type()
self.list_mode = self.comp_type.list_mode if self.comp_type else False
self.disable_completion_mangling = False
finder = self
def custom_validator(completion, prefix):
"""Completion validator used to optionally bypass validation."""
if finder.disable_completion_mangling:
return True
return default_validator(completion, prefix)
super().__init__(
*args,
validator=custom_validator,
**kwargs,
)
def __call__(self, *args, **kwargs):
if self.enabled:
super().__call__(*args, **kwargs)
def quote_completions(self, completions, cword_prequote, last_wordbreak_pos):
"""Intercept default quoting behavior to optionally block mangling of completion entries."""
if self.disable_completion_mangling:
# Word breaks have already been handled when generating completions, don't mangle them further.
# This is needed in many cases when returning completion lists which lack the existing completion prefix.
last_wordbreak_pos = None
return super().quote_completions(completions, cword_prequote, last_wordbreak_pos)

View File

@@ -0,0 +1,581 @@
"""General purpose composite argument parsing and completion."""
from __future__ import annotations
import abc
import contextlib
import dataclasses
import enum
import os
import re
import typing as t
# NOTE: When choosing delimiters, take into account Bash and argcomplete behavior.
#
# Recommended characters for assignment and/or continuation: `/` `:` `=`
#
# The recommended assignment_character list is due to how argcomplete handles continuation characters.
# see: https://github.com/kislyuk/argcomplete/blob/5a20d6165fbb4d4d58559378919b05964870cc16/argcomplete/__init__.py#L557-L558
PAIR_DELIMITER = ','
ASSIGNMENT_DELIMITER = '='
PATH_DELIMITER = '/'
@dataclasses.dataclass(frozen=True)
class Completion(Exception):
"""Base class for argument completion results."""
@dataclasses.dataclass(frozen=True)
class CompletionUnavailable(Completion):
"""Argument completion unavailable."""
message: str = 'No completions available.'
@dataclasses.dataclass(frozen=True)
class CompletionError(Completion):
"""Argument completion error."""
message: t.Optional[str] = None
@dataclasses.dataclass(frozen=True)
class CompletionSuccess(Completion):
"""Successful argument completion result."""
list_mode: bool
consumed: str
continuation: str
matches: t.List[str] = dataclasses.field(default_factory=list)
@property
def preserve(self): # type: () -> bool
"""
True if argcomplete should not mangle completion values, otherwise False.
Only used when more than one completion exists to avoid overwriting the word undergoing completion.
"""
return len(self.matches) > 1 and self.list_mode
@property
def completions(self): # type: () -> t.List[str]
"""List of completion values to return to argcomplete."""
completions = self.matches
continuation = '' if self.list_mode else self.continuation
if not self.preserve:
# include the existing prefix to avoid rewriting the word undergoing completion
completions = [f'{self.consumed}{completion}{continuation}' for completion in completions]
return completions
class ParserMode(enum.Enum):
"""Mode the parser is operating in."""
PARSE = enum.auto()
COMPLETE = enum.auto()
LIST = enum.auto()
class ParserError(Exception):
"""Base class for all parsing exceptions."""
@dataclasses.dataclass
class ParserBoundary:
"""Boundary details for parsing composite input."""
delimiters: str
required: bool
match: t.Optional[str] = None
ready: bool = True
@dataclasses.dataclass
class ParserState:
"""State of the composite argument parser."""
mode: ParserMode
remainder: str = ''
consumed: str = ''
boundaries: t.List[ParserBoundary] = dataclasses.field(default_factory=list)
namespaces: t.List[t.Any] = dataclasses.field(default_factory=list)
parts: t.List[str] = dataclasses.field(default_factory=list)
@property
def incomplete(self): # type: () -> bool
"""True if parsing is incomplete (unparsed input remains), otherwise False."""
return self.remainder is not None
def match(self, value, choices): # type: (str, t.List[str]) -> bool
"""Return True if the given value matches the provided choices, taking into account parsing boundaries, otherwise return False."""
if self.current_boundary:
delimiters, delimiter = self.current_boundary.delimiters, self.current_boundary.match
else:
delimiters, delimiter = '', None
for choice in choices:
if choice.rstrip(delimiters) == choice:
# choice is not delimited
if value == choice:
return True # value matched
else:
# choice is delimited
if f'{value}{delimiter}' == choice:
return True # value and delimiter matched
return False
def read(self): # type: () -> str
"""Read and return the next input segment, taking into account parsing boundaries."""
delimiters = "".join(boundary.delimiters for boundary in self.boundaries)
if delimiters:
pattern = '([' + re.escape(delimiters) + '])'
regex = re.compile(pattern)
parts = regex.split(self.remainder, 1)
else:
parts = [self.remainder]
if len(parts) > 1:
value, delimiter, remainder = parts
else:
value, delimiter, remainder = parts[0], None, None
for boundary in reversed(self.boundaries):
if delimiter and delimiter in boundary.delimiters:
boundary.match = delimiter
self.consumed += value + delimiter
break
boundary.match = None
boundary.ready = False
if boundary.required:
break
self.remainder = remainder
return value
@property
def root_namespace(self): # type: () -> t.Any
"""THe root namespace."""
return self.namespaces[0]
@property
def current_namespace(self): # type: () -> t.Any
"""The current namespace."""
return self.namespaces[-1]
@property
def current_boundary(self): # type: () -> t.Optional[ParserBoundary]
"""The current parser boundary, if any, otherwise None."""
return self.boundaries[-1] if self.boundaries else None
def set_namespace(self, namespace): # type: (t.Any) -> None
"""Set the current namespace."""
self.namespaces.append(namespace)
@contextlib.contextmanager
def delimit(self, delimiters, required=True): # type: (str, bool) -> t.ContextManager[ParserBoundary]
"""Context manager for delimiting parsing of input."""
boundary = ParserBoundary(delimiters=delimiters, required=required)
self.boundaries.append(boundary)
try:
yield boundary
finally:
self.boundaries.pop()
if boundary.required and not boundary.match:
raise ParserError('required delimiter not found, hit up-level delimiter or end of input instead')
@dataclasses.dataclass
class DocumentationState:
"""State of the composite argument parser's generated documentation."""
sections: t.Dict[str, str] = dataclasses.field(default_factory=dict)
class Parser(metaclass=abc.ABCMeta):
"""Base class for all composite argument parsers."""
@abc.abstractmethod
def parse(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result."""
def document(self, state): # type: (DocumentationState) -> t.Optional[str]
"""Generate and return documentation for this parser."""
raise Exception(f'Undocumented parser: {type(self)}')
class MatchConditions(enum.Flag):
"""Acceptable condition(s) for matching user input to available choices."""
CHOICE = enum.auto()
"""Match any choice."""
ANY = enum.auto()
"""Match any non-empty string."""
NOTHING = enum.auto()
"""Match an empty string which is not followed by a boundary match."""
class DynamicChoicesParser(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers which use a list of choices that can be generated during completion."""
def __init__(self, conditions=MatchConditions.CHOICE): # type: (MatchConditions) -> None
self.conditions = conditions
@abc.abstractmethod
def get_choices(self, value): # type: (str) -> t.List[str]
"""Return a list of valid choices based on the given input value."""
def no_completion_match(self, value): # type: (str) -> CompletionUnavailable # pylint: disable=unused-argument
"""Return an instance of CompletionUnavailable when no match was found for the given value."""
return CompletionUnavailable()
def no_choices_available(self, value): # type: (str) -> ParserError # pylint: disable=unused-argument
"""Return an instance of ParserError when parsing fails and no choices are available."""
return ParserError('No choices available.')
def parse(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result."""
value = state.read()
choices = self.get_choices(value)
if state.mode == ParserMode.PARSE or state.incomplete:
if self.conditions & MatchConditions.CHOICE and state.match(value, choices):
return value
if self.conditions & MatchConditions.ANY and value:
return value
if self.conditions & MatchConditions.NOTHING and not value and state.current_boundary and not state.current_boundary.match:
return value
if state.mode == ParserMode.PARSE:
if choices:
raise ParserError(f'"{value}" not in: {", ".join(choices)}')
raise self.no_choices_available(value)
raise CompletionUnavailable()
matches = [choice for choice in choices if choice.startswith(value)]
if not matches:
raise self.no_completion_match(value)
continuation = state.current_boundary.delimiters if state.current_boundary and state.current_boundary.required else ''
raise CompletionSuccess(
list_mode=state.mode == ParserMode.LIST,
consumed=state.consumed,
continuation=continuation,
matches=matches,
)
class ChoicesParser(DynamicChoicesParser):
"""Composite argument parser which relies on a static list of choices."""
def __init__(self, choices, conditions=MatchConditions.CHOICE): # type: (t.List[str], MatchConditions) -> None
self.choices = choices
super().__init__(conditions=conditions)
def get_choices(self, value): # type: (str) -> t.List[str]
"""Return a list of valid choices based on the given input value."""
return self.choices
def document(self, state): # type: (DocumentationState) -> t.Optional[str]
"""Generate and return documentation for this parser."""
return '|'.join(self.choices)
class IntegerParser(DynamicChoicesParser):
"""Composite argument parser for integers."""
PATTERN = re.compile('^[1-9][0-9]*$')
def __init__(self, maximum=None): # type: (t.Optional[int]) -> None
self.maximum = maximum
super().__init__()
def get_choices(self, value): # type: (str) -> t.List[str]
"""Return a list of valid choices based on the given input value."""
if not value:
numbers = list(range(1, 10))
elif self.PATTERN.search(value):
int_prefix = int(value)
base = int_prefix * 10
numbers = [int_prefix] + [base + i for i in range(0, 10)]
else:
numbers = []
# NOTE: the minimum is currently fixed at 1
if self.maximum is not None:
numbers = [n for n in numbers if n <= self.maximum]
return [str(n) for n in numbers]
def parse(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result."""
value = super().parse(state)
return int(value)
def document(self, state): # type: (DocumentationState) -> t.Optional[str]
"""Generate and return documentation for this parser."""
return '{integer}'
class BooleanParser(ChoicesParser):
"""Composite argument parser for boolean (yes/no) values."""
def __init__(self):
super().__init__(['yes', 'no'])
def parse(self, state): # type: (ParserState) -> bool
"""Parse the input from the given state and return the result."""
value = super().parse(state)
return value == 'yes'
class AnyParser(ChoicesParser):
"""Composite argument parser which accepts any input value."""
def __init__(self, nothing=False, no_match_message=None): # type: (bool, t.Optional[str]) -> None
self.no_match_message = no_match_message
conditions = MatchConditions.ANY
if nothing:
conditions |= MatchConditions.NOTHING
super().__init__([], conditions=conditions)
def no_completion_match(self, value): # type: (str) -> CompletionUnavailable
"""Return an instance of CompletionUnavailable when no match was found for the given value."""
if self.no_match_message:
return CompletionUnavailable(message=self.no_match_message)
return super().no_completion_match(value)
def no_choices_available(self, value): # type: (str) -> ParserError
"""Return an instance of ParserError when parsing fails and no choices are available."""
if self.no_match_message:
return ParserError(self.no_match_message)
return super().no_choices_available(value)
class RelativePathNameParser(DynamicChoicesParser):
"""Composite argument parser for relative path names."""
RELATIVE_NAMES = ['.', '..']
def __init__(self, choices): # type: (t.List[str]) -> None
self.choices = choices
super().__init__()
def get_choices(self, value): # type: (str) -> t.List[str]
"""Return a list of valid choices based on the given input value."""
choices = list(self.choices)
if value in self.RELATIVE_NAMES:
# complete relative names, but avoid suggesting them unless the current name is relative
# unfortunately this will be sorted in reverse of what bash presents ("../ ./" instead of "./ ../")
choices.extend(f'{item}{PATH_DELIMITER}' for item in self.RELATIVE_NAMES)
return choices
class FileParser(Parser):
"""Composite argument parser for absolute or relative file paths."""
def parse(self, state): # type: (ParserState) -> str
"""Parse the input from the given state and return the result."""
if state.mode == ParserMode.PARSE:
path = AnyParser().parse(state)
if not os.path.isfile(path):
raise ParserError(f'Not a file: {path}')
else:
path = ''
with state.delimit(PATH_DELIMITER, required=False) as boundary:
while boundary.ready:
directory = path or '.'
try:
with os.scandir(directory) as scan: # type: t.Iterator[os.DirEntry]
choices = [f'{item.name}{PATH_DELIMITER}' if item.is_dir() else item.name for item in scan]
except OSError:
choices = []
if not path:
choices.append(PATH_DELIMITER) # allow absolute paths
choices.append('../') # suggest relative paths
part = RelativePathNameParser(choices).parse(state)
path += f'{part}{boundary.match or ""}'
return path
class AbsolutePathParser(Parser):
"""Composite argument parser for absolute file paths. Paths are only verified for proper syntax, not for existence."""
def parse(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result."""
path = ''
with state.delimit(PATH_DELIMITER, required=False) as boundary:
while boundary.ready:
if path:
path += AnyParser(nothing=True).parse(state)
else:
path += ChoicesParser([PATH_DELIMITER]).parse(state)
path += (boundary.match or '')
return path
class NamespaceParser(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers that store their results in a namespace."""
def parse(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result."""
namespace = state.current_namespace
current = getattr(namespace, self.dest)
if current and self.limit_one:
if state.mode == ParserMode.PARSE:
raise ParserError('Option cannot be specified more than once.')
raise CompletionError('Option cannot be specified more than once.')
value = self.get_value(state)
if self.use_list:
if not current:
current = []
setattr(namespace, self.dest, current)
current.append(value)
else:
setattr(namespace, self.dest, value)
return value
def get_value(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result, without storing the result in the namespace."""
return super().parse(state)
@property
def use_list(self): # type: () -> bool
"""True if the destination is a list, otherwise False."""
return False
@property
def limit_one(self): # type: () -> bool
"""True if only one target is allowed, otherwise False."""
return not self.use_list
@property
@abc.abstractmethod
def dest(self): # type: () -> str
"""The name of the attribute where the value should be stored."""
class NamespaceWrappedParser(NamespaceParser):
"""Composite argument parser that wraps a non-namespace parser and stores the result in a namespace."""
def __init__(self, dest, parser): # type: (str, Parser) -> None
self._dest = dest
self.parser = parser
def get_value(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result, without storing the result in the namespace."""
return self.parser.parse(state)
@property
def dest(self): # type: () -> str
"""The name of the attribute where the value should be stored."""
return self._dest
class KeyValueParser(Parser, metaclass=abc.ABCMeta):
"""Base class for key/value composite argument parsers."""
@abc.abstractmethod
def get_parsers(self, state): # type: (ParserState) -> t.Dict[str, Parser]
"""Return a dictionary of key names and value parsers."""
def parse(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result."""
namespace = state.current_namespace
parsers = self.get_parsers(state)
keys = list(parsers)
with state.delimit(PAIR_DELIMITER, required=False) as pair:
while pair.ready:
with state.delimit(ASSIGNMENT_DELIMITER):
key = ChoicesParser(keys).parse(state)
value = parsers[key].parse(state)
setattr(namespace, key, value)
keys.remove(key)
return namespace
class PairParser(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers consisting of a left and right argument parser, with input separated by a delimiter."""
def parse(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result."""
namespace = self.create_namespace()
state.set_namespace(namespace)
with state.delimit(self.delimiter, self.required) as boundary:
choice = self.get_left_parser(state).parse(state)
if boundary.match:
self.get_right_parser(choice).parse(state)
return namespace
@property
def required(self): # type: () -> bool
"""True if the delimiter (and thus right parser) is required, otherwise False."""
return False
@property
def delimiter(self): # type: () -> str
"""The delimiter to use between the left and right parser."""
return PAIR_DELIMITER
@abc.abstractmethod
def create_namespace(self): # type: () -> t.Any
"""Create and return a namespace."""
@abc.abstractmethod
def get_left_parser(self, state): # type: (ParserState) -> Parser
"""Return the parser for the left side."""
@abc.abstractmethod
def get_right_parser(self, choice): # type: (t.Any) -> Parser
"""Return the parser for the right side."""
class TypeParser(Parser, metaclass=abc.ABCMeta):
"""Base class for composite argument parsers which parse a type name, a colon and then parse results based on the type given by the type name."""
def get_parsers(self, state): # type: (ParserState) -> t.Dict[str, Parser] # pylint: disable=unused-argument
"""Return a dictionary of type names and type parsers."""
return self.get_stateless_parsers()
@abc.abstractmethod
def get_stateless_parsers(self): # type: () -> t.Dict[str, Parser]
"""Return a dictionary of type names and type parsers."""
def parse(self, state): # type: (ParserState) -> t.Any
"""Parse the input from the given state and return the result."""
parsers = self.get_parsers(state)
with state.delimit(':'):
key = ChoicesParser(list(parsers)).parse(state)
value = parsers[key].parse(state)
return value

View File

@@ -0,0 +1,240 @@
"""Command line parsing for all commands."""
from __future__ import annotations
import argparse
import functools
import sys
from ...util import (
display,
)
from ..completers import (
complete_target,
)
from ..environments import (
CompositeActionCompletionFinder,
)
from .coverage import (
do_coverage,
)
from .env import (
do_env,
)
from .integration import (
do_integration,
)
from .sanity import (
do_sanity,
)
from .shell import (
do_shell,
)
from .units import (
do_units,
)
def do_commands(
parent, # type: argparse.ArgumentParser
completer, # type: CompositeActionCompletionFinder
): # type: (...) -> None
"""Command line parsing for all commands."""
common = argparse.ArgumentParser(add_help=False)
common.add_argument(
'-e',
'--explain',
action='store_true',
help='explain commands that would be executed',
)
common.add_argument(
'-v',
'--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output',
)
common.add_argument(
'--color',
metavar='COLOR',
nargs='?',
help='generate color output: yes, no, auto',
const='yes',
default='auto',
type=color,
)
common.add_argument(
'--debug',
action='store_true',
help='run ansible commands in debug mode',
)
common.add_argument(
'--truncate',
dest='truncate',
metavar='COLUMNS',
type=int,
default=display.columns,
help='truncate some long output (0=disabled) (default: auto)',
)
common.add_argument(
'--redact',
dest='redact',
action='store_true',
default=True,
help=argparse.SUPPRESS, # kept for backwards compatibility, but no point in advertising since it's the default
)
common.add_argument(
'--no-redact',
dest='redact',
action='store_false',
default=False,
help='show sensitive values in output',
)
test = argparse.ArgumentParser(add_help=False, parents=[common])
testing = test.add_argument_group(title='common testing arguments')
testing.add_argument(
'include',
metavar='TARGET',
nargs='*',
help='test the specified target',
).completer = functools.partial(complete_target, completer)
testing.add_argument(
'--include',
metavar='TARGET',
action='append',
help='include the specified target',
).completer = functools.partial(complete_target, completer)
testing.add_argument(
'--exclude',
metavar='TARGET',
action='append',
help='exclude the specified target',
).completer = functools.partial(complete_target, completer)
testing.add_argument(
'--require',
metavar='TARGET',
action='append',
help='require the specified target',
).completer = functools.partial(complete_target, completer)
testing.add_argument(
'--coverage',
action='store_true',
help='analyze code coverage when running tests',
)
testing.add_argument(
'--coverage-check',
action='store_true',
help='only verify code coverage can be enabled',
)
testing.add_argument(
'--metadata',
help=argparse.SUPPRESS,
)
testing.add_argument(
'--base-branch',
metavar='BRANCH',
help='base branch used for change detection',
)
testing.add_argument(
'--changed',
action='store_true',
help='limit targets based on changes',
)
changes = test.add_argument_group(title='change detection arguments')
changes.add_argument(
'--tracked',
action='store_true',
help=argparse.SUPPRESS,
)
changes.add_argument(
'--untracked',
action='store_true',
help='include untracked files',
)
changes.add_argument(
'--ignore-committed',
dest='committed',
action='store_false',
help='exclude committed files',
)
changes.add_argument(
'--ignore-staged',
dest='staged',
action='store_false',
help='exclude staged files',
)
changes.add_argument(
'--ignore-unstaged',
dest='unstaged',
action='store_false',
help='exclude unstaged files',
)
changes.add_argument(
'--changed-from',
metavar='PATH',
help=argparse.SUPPRESS,
)
changes.add_argument(
'--changed-path',
metavar='PATH',
action='append',
help=argparse.SUPPRESS,
)
subparsers = parent.add_subparsers(metavar='COMMAND', required=True)
do_coverage(subparsers, common, completer)
do_env(subparsers, common, completer)
do_shell(subparsers, common, completer)
do_integration(subparsers, test, completer)
do_sanity(subparsers, test, completer)
do_units(subparsers, test, completer)
def color(value): # type: (str) -> bool
"""Strict converter for color option."""
if value == 'yes':
return True
if value == 'no':
return False
if value == 'auto':
return sys.stdout.isatty()
raise argparse.ArgumentTypeError(f"invalid choice: '{value}' (choose from 'yes', 'no', 'auto')")

View File

@@ -0,0 +1,85 @@
"""Command line parsing for all `coverage` commands."""
from __future__ import annotations
import argparse
from ....commands.coverage import (
COVERAGE_GROUPS,
)
from ...environments import (
CompositeActionCompletionFinder,
)
from .analyze import (
do_analyze,
)
from .combine import (
do_combine,
)
from .erase import (
do_erase,
)
from .html import (
do_html,
)
from .report import (
do_report,
)
from .xml import (
do_xml,
)
def do_coverage(
subparsers,
parent, # type: argparse.ArgumentParser
completer, # type: CompositeActionCompletionFinder
): # type: (...) -> None
"""Command line parsing for all `coverage` commands."""
coverage_common = argparse.ArgumentParser(add_help=False, parents=[parent])
parser = subparsers.add_parser(
'coverage',
help='code coverage management and reporting',
)
coverage_subparsers = parser.add_subparsers(metavar='COMMAND', required=True)
do_analyze(coverage_subparsers, coverage_common, completer)
do_erase(coverage_subparsers, coverage_common, completer)
do_combine(coverage_subparsers, parent, add_coverage_common, completer)
do_report(coverage_subparsers, parent, add_coverage_common, completer)
do_html(coverage_subparsers, parent, add_coverage_common, completer)
do_xml(coverage_subparsers, parent, add_coverage_common, completer)
def add_coverage_common(
parser, # type: argparse.ArgumentParser
):
"""Add common coverage arguments."""
parser.add_argument(
'--group-by',
metavar='GROUP',
action='append',
choices=COVERAGE_GROUPS,
help='group output by: %s' % ', '.join(COVERAGE_GROUPS),
)
parser.add_argument(
'--all',
action='store_true',
help='include all python/powershell source files',
)
parser.add_argument(
'--stub',
action='store_true',
help='generate empty report of all python/powershell source files',
)

View File

@@ -0,0 +1,28 @@
"""Command line parsing for all `coverage analyze` commands."""
from __future__ import annotations
import argparse
from .targets import (
do_targets,
)
from ....environments import (
CompositeActionCompletionFinder,
)
def do_analyze(
subparsers,
parent, # type: argparse.ArgumentParser
completer, # type: CompositeActionCompletionFinder
): # type: (...) -> None
"""Command line parsing for all `coverage analyze` commands."""
parser = subparsers.add_parser(
'analyze',
help='analyze collected coverage data',
) # type: argparse.ArgumentParser
analyze_subparsers = parser.add_subparsers(metavar='COMMAND', required=True)
do_targets(analyze_subparsers, parent, completer)

View File

@@ -0,0 +1,48 @@
"""Command line parsing for all `coverage analyze targets` commands."""
from __future__ import annotations
import argparse
from .....environments import (
CompositeActionCompletionFinder,
)
from .combine import (
do_combine,
)
from .expand import (
do_expand,
)
from .filter import (
do_filter,
)
from .generate import (
do_generate,
)
from .missing import (
do_missing,
)
def do_targets(
subparsers,
parent, # type: argparse.ArgumentParser
completer, # type: CompositeActionCompletionFinder
): # type: (...) -> None
"""Command line parsing for all `coverage analyze targets` commands."""
targets = subparsers.add_parser(
'targets',
help='analyze integration test target coverage',
)
targets_subparsers = targets.add_subparsers(metavar='COMMAND', required=True)
do_generate(targets_subparsers, parent, completer)
do_expand(targets_subparsers, parent, completer)
do_filter(targets_subparsers, parent, completer)
do_combine(targets_subparsers, parent, completer)
do_missing(targets_subparsers, parent, completer)

View File

@@ -0,0 +1,49 @@
"""Command line parsing for the `coverage analyze targets combine` command."""
from __future__ import annotations
import argparse
from ......commands.coverage.analyze.targets.combine import (
command_coverage_analyze_targets_combine,
CoverageAnalyzeTargetsCombineConfig,
)
from .....environments import (
CompositeActionCompletionFinder,
ControllerMode,
TargetMode,
add_environments,
)
def do_combine(
subparsers,
parent, # type: argparse.ArgumentParser
completer, # type: CompositeActionCompletionFinder
):
"""Command line parsing for the `coverage analyze targets combine` command."""
parser = subparsers.add_parser(
'combine',
parents=[parent],
help='combine multiple aggregated coverage files',
) # type: argparse.ArgumentParser
parser.set_defaults(
func=command_coverage_analyze_targets_combine,
config=CoverageAnalyzeTargetsCombineConfig,
)
targets_combine = parser.add_argument_group('coverage arguments')
targets_combine.add_argument(
'input_file',
nargs='+',
help='input file to read aggregated coverage from',
)
targets_combine.add_argument(
'output_file',
help='output file to write aggregated coverage to',
)
add_environments(parser, completer, ControllerMode.ORIGIN, TargetMode.NO_TARGETS) # coverage analyze targets combine

Some files were not shown because too many files have changed in this diff Show More