Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
base image=quay.io/ansible/base-test-container:1.1.0 python=3.9,2.6,2.7,3.5,3.6,3.7,3.8,3.10 seccomp=unconfined
default image=quay.io/ansible/default-test-container:4.1.0 python=3.9,2.6,2.7,3.5,3.6,3.7,3.8,3.10 seccomp=unconfined context=collection
default image=quay.io/ansible/ansible-core-test-container:4.1.0 python=3.9,2.6,2.7,3.5,3.6,3.7,3.8,3.10 seccomp=unconfined context=ansible-core
alpine3 image=quay.io/ansible/alpine3-test-container:3.1.0 python=3.9
centos6 image=quay.io/ansible/centos6-test-container:3.1.0 python=2.6 seccomp=unconfined
centos7 image=quay.io/ansible/centos7-test-container:3.1.0 python=2.7 seccomp=unconfined
centos8 image=quay.io/ansible/centos8-test-container:3.1.0 python=3.6 seccomp=unconfined
fedora33 image=quay.io/ansible/fedora33-test-container:3.1.0 python=3.9
fedora34 image=quay.io/ansible/fedora34-test-container:3.1.0 python=3.9 seccomp=unconfined
opensuse15py2 image=quay.io/ansible/opensuse15py2-test-container:3.1.0 python=2.7
opensuse15 image=quay.io/ansible/opensuse15-test-container:3.1.0 python=3.6
ubuntu1804 image=quay.io/ansible/ubuntu1804-test-container:3.1.0 python=3.6 seccomp=unconfined
ubuntu2004 image=quay.io/ansible/ubuntu2004-test-container:3.1.0 python=3.8 seccomp=unconfined

View File

@@ -0,0 +1,2 @@
ios/csr1000v collection=cisco.ios connection=ansible.netcommon.network_cli provider=aws
vyos/1.1.8 collection=vyos.vyos connection=ansible.netcommon.network_cli provider=aws

View File

@@ -0,0 +1,10 @@
freebsd/12.2 python=3.7,2.7,3.8 python_dir=/usr/local/bin provider=aws
freebsd/13.0 python=3.7,2.7,3.8,3.9 python_dir=/usr/local/bin provider=aws
freebsd python_dir=/usr/local/bin provider=aws
macos/11.1 python=3.9 python_dir=/usr/local/bin provider=parallels
macos python_dir=/usr/local/bin provider=parallels
rhel/7.9 python=2.7 provider=aws
rhel/8.4 python=3.6,3.8 provider=aws
rhel provider=aws
aix/7.2 python=2.7,3.7 python_dir=/opt/freeware/bin provider=ibmps
aix python_dir=/opt/freeware/bin provider=ibmps

View File

@@ -0,0 +1,5 @@
windows/2012 provider=aws
windows/2012-R2 provider=aws
windows/2016 provider=aws
windows/2019 provider=aws
windows/2022 provider=aws

View File

@@ -0,0 +1,21 @@
- name: Setup POSIX code coverage configuration
hosts: all
gather_facts: no
tasks:
- name: Create coverage temporary directory
file:
path: "{{ common_temp_dir }}"
mode: "{{ mode_directory }}"
state: directory
- name: Create coverage configuration file
copy:
dest: "{{ coverage_config_path }}"
content: "{{ coverage_config }}"
mode: "{{ mode_file }}"
- name: Create coverage output directory
file:
path: "{{ coverage_output_path }}"
mode: "{{ mode_directory_write }}"
state: directory

View File

@@ -0,0 +1,8 @@
- name: Teardown POSIX code coverage configuration
hosts: all
gather_facts: no
tasks:
- name: Remove coverage temporary directory
file:
path: "{{ common_temp_dir }}"
state: absent

View File

@@ -0,0 +1,9 @@
- name: Prepare POSIX hosts file
hosts: all
gather_facts: no
tasks:
- name: Add container hostname(s) to hosts file
blockinfile:
path: /etc/hosts
block: "{{ '\n'.join(hosts_entries) }}"
unsafe_writes: yes

View File

@@ -0,0 +1,10 @@
- name: Restore POSIX hosts file
hosts: all
gather_facts: no
tasks:
- name: Remove container hostname(s) from hosts file
blockinfile:
path: /etc/hosts
block: "{{ '\n'.join(hosts_entries) }}"
unsafe_writes: yes
state: absent

View File

@@ -0,0 +1,23 @@
- name: Prepare PyPI proxy configuration
hosts: all
gather_facts: no
tasks:
- name: Make sure the ~/.pip directory exists
file:
path: ~/.pip
state: directory
- name: Configure a custom index for pip based installs
copy:
content: |
[global]
index-url = {{ pypi_endpoint }}
trusted-host = {{ pypi_hostname }}
dest: ~/.pip/pip.conf
force: "{{ force }}"
- name: Configure a custom index for easy_install based installs
copy:
content: |
[easy_install]
index_url = {0}
dest: ~/.pydistutils.cfg
force: "{{ force }}"

View File

@@ -0,0 +1,12 @@
- name: Restore PyPI proxy configuration
hosts: all
gather_facts: no
tasks:
- name: Remove custom index for pip based installs
file:
path: ~/.pip/pip.conf
state: absent
- name: Remove custom index for easy_install based installs
file:
path: ~/.pydistutils.cfg
state: absent

View File

@@ -0,0 +1,18 @@
- name: Setup Windows code coverage configuration
hosts: all
gather_facts: no
tasks:
- name: Create coverage temporary directory
ansible.windows.win_file:
path: '{{ remote_temp_path }}'
state: directory
- name: Allow everyone to write to the temporary coverage directory
ansible.windows.win_acl:
path: '{{ remote_temp_path }}'
user: Everyone
rights: Modify
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
type: allow
state: present

View File

@@ -0,0 +1,70 @@
- name: Teardown Windows code coverage configuration
hosts: all
gather_facts: no
tasks:
- name: Zip up all coverage files
ansible.windows.win_shell: |
$coverage_dir = '{{ remote_temp_path }}'
$zip_file = Join-Path -Path $coverage_dir -ChildPath 'coverage.zip'
if (Test-Path -LiteralPath $zip_file) {
Remove-Item -LiteralPath $zip_file -Force
}
$coverage_files = Get-ChildItem -LiteralPath $coverage_dir -Include '*=coverage*' -File
$legacy = $false
try {
# Requires .NET 4.5+ which isn't present on older WIndows versions. Remove once 2008/R2 is EOL.
# We also can't use the Shell.Application as it will fail on GUI-less servers (Server Core).
Add-Type -AssemblyName System.IO.Compression -ErrorAction Stop > $null
} catch {
$legacy = $true
}
if ($legacy) {
New-Item -Path $zip_file -ItemType File > $null
$shell = New-Object -ComObject Shell.Application
$zip = $shell.Namespace($zip_file)
foreach ($file in $coverage_files) {
$zip.CopyHere($file.FullName)
}
} else {
$fs = New-Object -TypeName System.IO.FileStream -ArgumentList $zip_file, 'CreateNew'
try {
$archive = New-Object -TypeName System.IO.Compression.ZipArchive -ArgumentList @(
$fs,
[System.IO.Compression.ZipArchiveMode]::Create
)
try {
foreach ($file in $coverage_files) {
$archive_entry = $archive.CreateEntry($file.Name, 'Optimal')
$entry_fs = $archive_entry.Open()
try {
$file_fs = [System.IO.File]::OpenRead($file.FullName)
try {
$file_fs.CopyTo($entry_fs)
} finally {
$file_fs.Dispose()
}
} finally {
$entry_fs.Dispose()
}
}
} finally {
$archive.Dispose()
}
} finally {
$fs.Dispose()
}
}
- name: Fetch coverage zip
fetch:
src: '{{ remote_temp_path }}\coverage.zip'
dest: '{{ local_temp_path }}/{{ inventory_hostname }}.zip'
flat: yes
- name: Remove temporary coverage directory
ansible.windows.win_file:
path: '{{ remote_temp_path }}'
state: absent

View File

@@ -0,0 +1,34 @@
<#
.SYNOPSIS
Add one or more hosts entries to the Windows hosts file.
.PARAMETER Hosts
A list of hosts entries, delimited by '|'.
#>
[CmdletBinding()]
param(
[Parameter(Mandatory=$true, Position=0)][String]$Hosts
)
$ProgressPreference = "SilentlyContinue"
$ErrorActionPreference = "Stop"
Write-Verbose -Message "Adding host file entries"
$hosts_entries = $Hosts.Split('|')
$hosts_file = "$env:SystemRoot\System32\drivers\etc\hosts"
$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file)
$changed = $false
foreach ($entry in $hosts_entries) {
if ($entry -notin $hosts_file_lines) {
$hosts_file_lines += $entry
$changed = $true
}
}
if ($changed) {
Write-Verbose -Message "Host file is missing entries, adding missing entries"
[System.IO.File]::WriteAllLines($hosts_file, $hosts_file_lines)
}

View File

@@ -0,0 +1,7 @@
- name: Prepare Windows hosts file
hosts: all
gather_facts: no
tasks:
- name: Add container hostname(s) to hosts file
script:
cmd: "\"{{ playbook_dir }}/windows_hosts_prepare.ps1\" -Hosts \"{{ '|'.join(hosts_entries) }}\""

View File

@@ -0,0 +1,37 @@
<#
.SYNOPSIS
Remove one or more hosts entries from the Windows hosts file.
.PARAMETER Hosts
A list of hosts entries, delimited by '|'.
#>
[CmdletBinding()]
param(
[Parameter(Mandatory=$true, Position=0)][String]$Hosts
)
$ProgressPreference = "SilentlyContinue"
$ErrorActionPreference = "Stop"
Write-Verbose -Message "Removing host file entries"
$hosts_entries = $Hosts.Split('|')
$hosts_file = "$env:SystemRoot\System32\drivers\etc\hosts"
$hosts_file_lines = [System.IO.File]::ReadAllLines($hosts_file)
$changed = $false
$new_lines = [System.Collections.ArrayList]@()
foreach ($host_line in $hosts_file_lines) {
if ($host_line -in $hosts_entries) {
$changed = $true
} else {
$new_lines += $host_line
}
}
if ($changed) {
Write-Verbose -Message "Host file has extra entries, removing extra entries"
[System.IO.File]::WriteAllLines($hosts_file, $new_lines)
}

View File

@@ -0,0 +1,7 @@
- name: Restore Windows hosts file
hosts: all
gather_facts: no
tasks:
- name: Remove container hostname(s) from hosts file
script:
cmd: "\"{{ playbook_dir }}/windows_hosts_restore.ps1\" -Hosts \"{{ '|'.join(hosts_entries) }}\""

View File

@@ -0,0 +1,9 @@
[pytest]
xfail_strict = true
mock_use_standalone_module = true
# It was decided to stick with "legacy" (aka "xunit1") for now.
# Currently used pytest versions all support xunit2 format too.
# Except the one used under Python 2.6 - it doesn't process this option
# at all. Ref:
# https://github.com/ansible/ansible/pull/66445#discussion_r372530176
junit_family = xunit1

View File

@@ -0,0 +1,13 @@
# Note: this requirements.txt file is used to specify what dependencies are
# needed to make the package run rather than for deployment of a tested set of
# packages. Thus, this should be the loosest set possible (only required
# packages, not optional ones, and with the widest range of versions that could
# be suitable)
jinja2
PyYAML
cryptography
packaging
# NOTE: resolvelib 0.x version bumps should be considered major/breaking
# NOTE: and we should update the upper cap with care, at least until 1.0
# NOTE: Ref: https://github.com/sarugaku/resolvelib/issues/69
resolvelib >= 0.5.3, < 0.6.0 # dependency resolver used by ansible-galaxy

View File

@@ -0,0 +1,30 @@
# do not add a cryptography or pyopenssl constraint to this file, they require special handling, see get_cryptography_requirements in python_requirements.py
# do not add a coverage constraint to this file, it is handled internally by ansible-test
packaging < 21.0 ; python_version < '3.6' # packaging 21.0 requires Python 3.6 or newer
six < 1.14.0 ; python_version < '2.7' # six 1.14.0 drops support for python 2.6
jinja2 < 2.11 ; python_version < '2.7' # jinja2 2.11 and later require python 2.7 or later
urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later
pywinrm >= 0.3.0 # message encryption support
wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later
idna < 2.6, >= 2.5 # linode requires idna < 2.9, >= 2.5, requests requires idna < 2.6, but cryptography will cause the latest version to be installed instead
paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6
pytest < 3.3.0, >= 3.1.0 ; python_version < '2.7' # pytest 3.3.0 drops support for python 2.6
pytest < 5.0.0, >= 4.5.0 ; python_version == '2.7' # pytest 5.0.0 and later will no longer support python 2.7
pytest >= 4.5.0 ; python_version > '2.7' # pytest 4.5.0 added support for --strict-markers
pytest-forked < 1.0.2 ; python_version < '2.7' # pytest-forked 1.0.2 and later require python 2.7 or later
pytest-forked >= 1.0.2 ; python_version >= '2.7' # pytest-forked before 1.0.2 does not work with pytest 4.2.0+ (which requires python 2.7+)
ntlm-auth >= 1.3.0 # message encryption support using cryptography
requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6
requests-ntlm >= 1.1.0 # message encryption support
requests-credssp >= 0.1.0 # message encryption support
virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later
pyparsing < 3.0.0 ; python_version < '3.5' # pyparsing 3 and later require python 3.5 or later
pyyaml < 5.1 ; python_version < '2.7' # pyyaml 5.1 and later require python 2.7 or later
pycparser < 2.19 ; python_version < '2.7' # pycparser 2.19 and later require python 2.7 or later
mock >= 2.0.0 # needed for features backported from Python 3.6 unittest.mock (assert_called, assert_called_once...)
pytest-mock >= 1.4.0 # needed for mock_use_standalone_module pytest option
xmltodict < 0.12.0 ; python_version < '2.7' # xmltodict 0.12.0 and later require python 2.7 or later
setuptools < 37 ; python_version == '2.6' # setuptools 37 and later require python 2.7 or later
setuptools < 45 ; python_version == '2.7' # setuptools 45 and later require python 3.5 or later
pyspnego >= 0.1.6 ; python_version >= '3.10' # bug in older releases breaks on Python 3.10
MarkupSafe < 2.0.0 ; python_version < '3.6' # MarkupSafe >= 2.0.0. requires Python >= 3.6

View File

@@ -0,0 +1,7 @@
jinja2 == 3.0.1 # ansible-core requirement
pyyaml == 5.4.1 # ansible-core requirement
packaging == 21.0 # ansible-doc requirement
# dependencies
MarkupSafe == 2.0.1
pyparsing == 2.4.7

View File

@@ -0,0 +1,9 @@
antsibull-changelog == 0.9.0
# dependencies
pyyaml == 5.4.1
docutils == 0.17.1
packaging == 21.0
pyparsing == 2.4.7
rstcheck == 3.3.1
semantic-version == 2.8.5

View File

@@ -0,0 +1 @@
pycodestyle == 2.6.0

View File

@@ -0,0 +1,44 @@
param (
[Switch]
$IsContainer
)
#Requires -Version 6
Set-StrictMode -Version 2.0
$ErrorActionPreference = "Stop"
$ProgressPreference = 'SilentlyContinue'
Function Install-PSModule {
[CmdletBinding()]
param(
[Parameter(Mandatory=$true)]
[String]
$Name,
[Parameter(Mandatory=$true)]
[Version]
$RequiredVersion
)
# In case PSGallery is down we check if the module is already installed.
$installedModule = Get-Module -Name $Name -ListAvailable | Where-Object Version -eq $RequiredVersion
if (-not $installedModule) {
Install-Module -Name $Name -RequiredVersion $RequiredVersion -Scope CurrentUser
}
}
Set-PSRepository -Name PSGallery -InstallationPolicy Trusted
Install-PSModule -Name PSScriptAnalyzer -RequiredVersion 1.18.0
if ($IsContainer) {
# PSScriptAnalyzer contain lots of json files for the UseCompatibleCommands check. We don't use this rule so by
# removing the contents we can save 200MB in the docker image (or more in the future).
# https://github.com/PowerShell/PSScriptAnalyzer/blob/master/RuleDocumentation/UseCompatibleCommands.md
$pssaPath = (Get-Module -ListAvailable -Name PSScriptAnalyzer).ModuleBase
$compatPath = Join-Path -Path $pssaPath -ChildPath compatibility_profiles -AdditionalChildPath '*'
Remove-Item -Path $compatPath -Recurse -Force
}
# Installed the PSCustomUseLiteralPath rule
Install-PSModule -Name PSSA-PSCustomUseLiteralPath -RequiredVersion 0.1.1

View File

@@ -0,0 +1,10 @@
pylint == 2.9.3
pyyaml == 5.4.1 # needed for collection_detail.py
# dependencies
astroid == 2.6.6
isort == 5.9.3
lazy-object-proxy == 1.6.0
mccabe == 0.6.1
toml == 0.10.2
wrapt == 1.12.1

View File

@@ -0,0 +1,2 @@
pyyaml == 5.4.1
voluptuous == 0.12.1

View File

@@ -0,0 +1,6 @@
jinja2 == 3.0.1 # ansible-core requirement
pyyaml == 5.4.1 # needed for collection_detail.py
voluptuous == 0.12.1
# dependencies
MarkupSafe == 2.0.1

View File

@@ -0,0 +1,5 @@
yamllint == 1.26.0
# dependencies
pathspec == 0.9.0
pyyaml == 5.4.1

View File

@@ -0,0 +1,5 @@
mock
pytest
pytest-mock
pytest-xdist
pyyaml # required by the collection loader (only needed for collections)

View File

@@ -0,0 +1,5 @@
ntlm-auth
requests-ntlm
requests-credssp
pypsrp
pywinrm[credssp]

View File

@@ -0,0 +1,99 @@
"""Test runner for all Ansible tests."""
from __future__ import annotations
import os
import sys
# This import should occur as early as possible.
# It must occur before subprocess has been imported anywhere in the current process.
from .init import (
CURRENT_RLIMIT_NOFILE,
)
from .util import (
ApplicationError,
display,
MAXFD,
)
from .delegation import (
delegate,
)
from .executor import (
ApplicationWarning,
Delegate,
ListTargets,
)
from .timeout import (
configure_timeout,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
from .cli import (
parse_args,
)
from .provisioning import (
PrimeContainers,
)
def main():
"""Main program function."""
try:
os.chdir(data_context().content.root)
args = parse_args()
config = args.config(args) # type: CommonConfig
display.verbosity = config.verbosity
display.truncate = config.truncate
display.redact = config.redact
display.color = config.color
display.info_stderr = config.info_stderr
configure_timeout(config)
display.info('RLIMIT_NOFILE: %s' % (CURRENT_RLIMIT_NOFILE,), verbosity=2)
display.info('MAXFD: %d' % MAXFD, verbosity=2)
delegate_args = None
target_names = None
try:
args.func(config)
except PrimeContainers:
pass
except ListTargets as ex:
# save target_names for use once we exit the exception handler
target_names = ex.target_names
except Delegate as ex:
# save delegation args for use once we exit the exception handler
delegate_args = (ex.host_state, ex.exclude, ex.require)
if delegate_args:
# noinspection PyTypeChecker
delegate(config, *delegate_args)
if target_names:
for target_name in target_names:
print(target_name) # info goes to stderr, this should be on stdout
display.review_warnings()
config.success = True
except ApplicationWarning as ex:
display.warning(u'%s' % ex)
sys.exit(0)
except ApplicationError as ex:
display.error(u'%s' % ex)
sys.exit(1)
except KeyboardInterrupt:
sys.exit(2)
except BrokenPipeError:
sys.exit(3)

View File

@@ -0,0 +1,301 @@
"""Miscellaneous utility functions and classes specific to ansible cli tools."""
from __future__ import annotations
import json
import os
import typing as t
from .constants import (
SOFT_RLIMIT_NOFILE,
)
from .io import (
write_text_file,
)
from .util import (
common_environment,
ApplicationError,
ANSIBLE_LIB_ROOT,
ANSIBLE_TEST_DATA_ROOT,
ANSIBLE_BIN_PATH,
ANSIBLE_SOURCE_ROOT,
ANSIBLE_TEST_TOOLS_ROOT,
get_ansible_version,
)
from .util_common import (
create_temp_dir,
run_command,
ResultType,
intercept_python,
get_injector_path,
)
from .config import (
IntegrationConfig,
PosixIntegrationConfig,
EnvironmentConfig,
CommonConfig,
)
from .data import (
data_context,
)
from .python_requirements import (
install_requirements,
)
from .host_configs import (
PythonConfig,
)
def parse_inventory(args, inventory_path): # type: (EnvironmentConfig, str) -> t.Dict[str, t.Any]
"""Return a dict parsed from the given inventory file."""
cmd = ['ansible-inventory', '-i', inventory_path, '--list']
env = ansible_environment(args)
inventory = json.loads(intercept_python(args, args.controller_python, cmd, env, capture=True, always=True)[0])
return inventory
def get_hosts(inventory, group_name): # type: (t.Dict[str, t.Any], str) -> t.Dict[str, t.Dict[str, t.Any]]
"""Return a dict of hosts from the specified group in the given inventory."""
hostvars = inventory.get('_meta', {}).get('hostvars', {})
group = inventory.get(group_name, {})
host_names = group.get('hosts', [])
hosts = dict((name, hostvars.get(name, {})) for name in host_names)
return hosts
def ansible_environment(args, color=True, ansible_config=None): # type: (CommonConfig, bool, t.Optional[str]) -> t.Dict[str, str]
"""Return a dictionary of environment variables to use when running Ansible commands."""
env = common_environment()
path = env['PATH']
if not path.startswith(ANSIBLE_BIN_PATH + os.path.pathsep):
path = ANSIBLE_BIN_PATH + os.path.pathsep + path
if not ansible_config:
# use the default empty configuration unless one has been provided
ansible_config = args.get_ansible_config()
if not args.explain and not os.path.exists(ansible_config):
raise ApplicationError('Configuration not found: %s' % ansible_config)
ansible = dict(
ANSIBLE_PYTHON_MODULE_RLIMIT_NOFILE=str(SOFT_RLIMIT_NOFILE),
ANSIBLE_FORCE_COLOR='%s' % 'true' if args.color and color else 'false',
ANSIBLE_FORCE_HANDLERS='true', # allow cleanup handlers to run when tests fail
ANSIBLE_HOST_PATTERN_MISMATCH='error', # prevent tests from unintentionally passing when hosts are not found
ANSIBLE_INVENTORY='/dev/null', # force tests to provide inventory
ANSIBLE_DEPRECATION_WARNINGS='false',
ANSIBLE_HOST_KEY_CHECKING='false',
ANSIBLE_RETRY_FILES_ENABLED='false',
ANSIBLE_CONFIG=ansible_config,
ANSIBLE_LIBRARY='/dev/null',
ANSIBLE_DEVEL_WARNING='false', # Don't show warnings that CI is running devel
ANSIBLE_JINJA2_NATIVE_WARNING='false', # Don't show warnings in CI for old Jinja for native
PYTHONPATH=get_ansible_python_path(args),
PAGER='/bin/cat',
PATH=path,
# give TQM worker processes time to report code coverage results
# without this the last task in a play may write no coverage file, an empty file, or an incomplete file
# enabled even when not using code coverage to surface warnings when worker processes do not exit cleanly
ANSIBLE_WORKER_SHUTDOWN_POLL_COUNT='100',
ANSIBLE_WORKER_SHUTDOWN_POLL_DELAY='0.1',
)
if isinstance(args, IntegrationConfig) and args.coverage:
# standard path injection is not effective for ansible-connection, instead the location must be configured
# ansible-connection only requires the injector for code coverage
# the correct python interpreter is already selected using the sys.executable used to invoke ansible
ansible.update(dict(
ANSIBLE_CONNECTION_PATH=os.path.join(get_injector_path(), 'ansible-connection'),
))
if isinstance(args, PosixIntegrationConfig):
ansible.update(dict(
ANSIBLE_PYTHON_INTERPRETER='/set/ansible_python_interpreter/in/inventory', # force tests to set ansible_python_interpreter in inventory
))
env.update(ansible)
if args.debug:
env.update(dict(
ANSIBLE_DEBUG='true',
ANSIBLE_LOG_PATH=os.path.join(ResultType.LOGS.name, 'debug.log'),
))
if data_context().content.collection:
env.update(dict(
ANSIBLE_COLLECTIONS_PATH=data_context().content.collection.root,
))
if data_context().content.is_ansible:
env.update(configure_plugin_paths(args))
return env
def configure_plugin_paths(args): # type: (CommonConfig) -> t.Dict[str, str]
"""Return environment variables with paths to plugins relevant for the current command."""
if not isinstance(args, IntegrationConfig):
return {}
support_path = os.path.join(ANSIBLE_SOURCE_ROOT, 'test', 'support', args.command)
# provide private copies of collections for integration tests
collection_root = os.path.join(support_path, 'collections')
env = dict(
ANSIBLE_COLLECTIONS_PATH=collection_root,
)
# provide private copies of plugins for integration tests
plugin_root = os.path.join(support_path, 'plugins')
plugin_list = [
'action',
'become',
'cache',
'callback',
'cliconf',
'connection',
'filter',
'httpapi',
'inventory',
'lookup',
'netconf',
# 'shell' is not configurable
'strategy',
'terminal',
'test',
'vars',
]
# most plugins follow a standard naming convention
plugin_map = dict(('%s_plugins' % name, name) for name in plugin_list)
# these plugins do not follow the standard naming convention
plugin_map.update(
doc_fragment='doc_fragments',
library='modules',
module_utils='module_utils',
)
env.update(dict(('ANSIBLE_%s' % key.upper(), os.path.join(plugin_root, value)) for key, value in plugin_map.items()))
# only configure directories which exist
env = dict((key, value) for key, value in env.items() if os.path.isdir(value))
return env
def get_ansible_python_path(args): # type: (CommonConfig) -> str
"""
Return a directory usable for PYTHONPATH, containing only the ansible package.
If a temporary directory is required, it will be cached for the lifetime of the process and cleaned up at exit.
"""
try:
return get_ansible_python_path.python_path
except AttributeError:
pass
if ANSIBLE_SOURCE_ROOT:
# when running from source there is no need for a temporary directory to isolate the ansible package
python_path = os.path.dirname(ANSIBLE_LIB_ROOT)
else:
# when not running from source the installed directory is unsafe to add to PYTHONPATH
# doing so would expose many unwanted packages on sys.path
# instead a temporary directory is created which contains only ansible using a symlink
python_path = create_temp_dir(prefix='ansible-test-')
os.symlink(ANSIBLE_LIB_ROOT, os.path.join(python_path, 'ansible'))
if not args.explain:
generate_egg_info(python_path)
get_ansible_python_path.python_path = python_path
return python_path
def generate_egg_info(path): # type: (str) -> None
"""Generate an egg-info in the specified base directory."""
# minimal PKG-INFO stub following the format defined in PEP 241
# required for older setuptools versions to avoid a traceback when importing pkg_resources from packages like cryptography
# newer setuptools versions are happy with an empty directory
# including a stub here means we don't need to locate the existing file or have setup.py generate it when running from source
pkg_info = '''
Metadata-Version: 1.0
Name: ansible
Version: %s
Platform: UNKNOWN
Summary: Radically simple IT automation
Author-email: info@ansible.com
License: GPLv3+
''' % get_ansible_version()
pkg_info_path = os.path.join(path, 'ansible_core.egg-info', 'PKG-INFO')
if os.path.exists(pkg_info_path):
return
write_text_file(pkg_info_path, pkg_info.lstrip(), create_directories=True)
class CollectionDetail:
"""Collection detail."""
def __init__(self): # type: () -> None
self.version = None # type: t.Optional[str]
class CollectionDetailError(ApplicationError):
"""An error occurred retrieving collection detail."""
def __init__(self, reason): # type: (str) -> None
super().__init__('Error collecting collection detail: %s' % reason)
self.reason = reason
def get_collection_detail(args, python): # type: (EnvironmentConfig, PythonConfig) -> CollectionDetail
"""Return collection detail."""
collection = data_context().content.collection
directory = os.path.join(collection.root, collection.directory)
stdout = run_command(args, [python.path, os.path.join(ANSIBLE_TEST_TOOLS_ROOT, 'collection_detail.py'), directory], capture=True, always=True)[0]
result = json.loads(stdout)
error = result.get('error')
if error:
raise CollectionDetailError(error)
version = result.get('version')
detail = CollectionDetail()
detail.version = str(version) if version is not None else None
return detail
def run_playbook(
args, # type: EnvironmentConfig
inventory_path, # type: str
playbook, # type: str
run_playbook_vars=None, # type: t.Optional[t.Dict[str, t.Any]]
capture=False, # type: bool
): # type: (...) -> None
"""Run the specified playbook using the given inventory file and playbook variables."""
playbook_path = os.path.join(ANSIBLE_TEST_DATA_ROOT, 'playbooks', playbook)
cmd = ['ansible-playbook', '-i', inventory_path, playbook_path]
if run_playbook_vars:
cmd.extend(['-e', json.dumps(run_playbook_vars)])
if args.verbosity:
cmd.append('-%s' % ('v' * args.verbosity))
install_requirements(args, args.controller_python, ansible=True) # run_playbook()
env = ansible_environment(args)
intercept_python(args, args.controller_python, cmd, env, capture=capture)

View File

@@ -0,0 +1,52 @@
"""Become abstraction for interacting with test hosts."""
from __future__ import annotations
import abc
import shlex
import typing as t
class Become(metaclass=abc.ABCMeta):
"""Base class for become implementations."""
@property
@abc.abstractmethod
def method(self): # type: () -> str
"""The name of the Ansible become plugin that is equivalent to this."""
@abc.abstractmethod
def prepare_command(self, command): # type: (t.List[str]) -> t.List[str]
"""Return the given command, if any, with privilege escalation."""
class Su(Become):
"""Become using 'su'."""
@property
def method(self): # type: () -> str
"""The name of the Ansible become plugin that is equivalent to this."""
return 'su'
def prepare_command(self, command): # type: (t.List[str]) -> t.List[str]
"""Return the given command, if any, with privilege escalation."""
become = ['su', '-l', 'root']
if command:
become.extend(['-c', ' '.join(shlex.quote(c) for c in command)])
return become
class Sudo(Become):
"""Become using 'sudo'."""
@property
def method(self): # type: () -> str
"""The name of the Ansible become plugin that is equivalent to this."""
return 'sudo'
def prepare_command(self, command): # type: (t.List[str]) -> t.List[str]
"""Return the given command, if any, with privilege escalation."""
become = ['sudo', '-in']
if command:
become.extend(['sh', '-c', ' '.join(shlex.quote(c) for c in command)])
return become

View File

@@ -0,0 +1,95 @@
"""Bootstrapping for test hosts."""
from __future__ import annotations
import dataclasses
import os
import typing as t
from .io import (
read_text_file,
)
from .util import (
ANSIBLE_TEST_TARGET_ROOT,
)
from .util_common import (
ShellScriptTemplate,
set_shebang,
)
from .core_ci import (
SshKey,
)
@dataclasses.dataclass
class Bootstrap:
"""Base class for bootstrapping systems."""
controller: bool
python_versions: t.List[str]
ssh_key: SshKey
@property
def bootstrap_type(self): # type: () -> str
"""The bootstrap type to pass to the bootstrapping script."""
return self.__class__.__name__.replace('Bootstrap', '').lower()
def get_variables(self): # type: () -> t.Dict[str, str]
"""The variables to template in the boostrapping script."""
return dict(
bootstrap_type=self.bootstrap_type,
controller='yes' if self.controller else '',
python_versions=self.python_versions,
ssh_key_type=self.ssh_key.KEY_TYPE,
ssh_private_key=self.ssh_key.key_contents,
ssh_public_key=self.ssh_key.pub_contents,
)
def get_script(self): # type: () -> str
"""Return a shell script to bootstrap the specified host."""
path = os.path.join(ANSIBLE_TEST_TARGET_ROOT, 'setup', 'bootstrap.sh')
content = read_text_file(path)
content = set_shebang(content, '/bin/sh')
template = ShellScriptTemplate(content)
variables = self.get_variables()
script = template.substitute(**variables)
return script
@dataclasses.dataclass
class BootstrapDocker(Bootstrap):
"""Bootstrap docker instances."""
def get_variables(self): # type: () -> t.Dict[str, str]
"""The variables to template in the boostrapping script."""
variables = super().get_variables()
variables.update(
platform='',
platform_version='',
)
return variables
@dataclasses.dataclass
class BootstrapRemote(Bootstrap):
"""Bootstrap remote instances."""
platform: str
platform_version: str
def get_variables(self): # type: () -> t.Dict[str, str]
"""The variables to template in the boostrapping script."""
variables = super().get_variables()
variables.update(
platform=self.platform,
platform_version=self.platform_version,
)
return variables

View File

@@ -0,0 +1,30 @@
"""Cache for commonly shared data that is intended to be immutable."""
from __future__ import annotations
import typing as t
from .config import (
CommonConfig,
)
TValue = t.TypeVar('TValue')
class CommonCache:
"""Common cache."""
def __init__(self, args): # type: (CommonConfig) -> None
self.args = args
def get(self, key, factory): # type: (str, t.Callable[[], TValue]) -> TValue
"""Return the value from the cache identified by the given key, using the specified factory method if it is not found."""
if key not in self.args.cache:
self.args.cache[key] = factory()
return self.args.cache[key]
def get_with_args(self, key, factory): # type: (str, t.Callable[[CommonConfig], TValue]) -> TValue
"""Return the value from the cache identified by the given key, using the specified factory method (which accepts args) if it is not found."""
if key not in self.args.cache:
self.args.cache[key] = factory(self.args)
return self.args.cache[key]

View File

@@ -0,0 +1,214 @@
"""Support code for CI environments."""
from __future__ import annotations
import abc
import base64
import json
import os
import tempfile
import typing as t
from ..encoding import (
to_bytes,
to_text,
)
from ..io import (
read_text_file,
write_text_file,
)
from ..config import (
CommonConfig,
TestConfig,
)
from ..util import (
ApplicationError,
display,
get_subclasses,
import_plugins,
raw_command,
cache,
)
class ChangeDetectionNotSupported(ApplicationError):
"""Exception for cases where change detection is not supported."""
class CIProvider(metaclass=abc.ABCMeta):
"""Base class for CI provider plugins."""
priority = 500
@staticmethod
@abc.abstractmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
@property
@abc.abstractmethod
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
@property
@abc.abstractmethod
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
@abc.abstractmethod
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
@abc.abstractmethod
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
@abc.abstractmethod
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
@abc.abstractmethod
def supports_core_ci_auth(self): # type: () -> bool
"""Return True if Ansible Core CI is supported."""
@abc.abstractmethod
def prepare_core_ci_auth(self): # type: () -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
@abc.abstractmethod
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
@cache
def get_ci_provider(): # type: () -> CIProvider
"""Return a CI provider instance for the current environment."""
provider = None
import_plugins('ci')
candidates = sorted(get_subclasses(CIProvider), key=lambda c: (c.priority, c.__name__))
for candidate in candidates:
if candidate.is_supported():
provider = candidate()
break
if provider.code:
display.info('Detected CI provider: %s' % provider.name)
return provider
class AuthHelper(metaclass=abc.ABCMeta):
"""Public key based authentication helper for Ansible Core CI."""
def sign_request(self, request): # type: (t.Dict[str, t.Any]) -> None
"""Sign the given auth request and make the public key available."""
payload_bytes = to_bytes(json.dumps(request, sort_keys=True))
signature_raw_bytes = self.sign_bytes(payload_bytes)
signature = to_text(base64.b64encode(signature_raw_bytes))
request.update(signature=signature)
def initialize_private_key(self): # type: () -> str
"""
Initialize and publish a new key pair (if needed) and return the private key.
The private key is cached across ansible-test invocations so it is only generated and published once per CI job.
"""
path = os.path.expanduser('~/.ansible-core-ci-private.key')
if os.path.exists(to_bytes(path)):
private_key_pem = read_text_file(path)
else:
private_key_pem = self.generate_private_key()
write_text_file(path, private_key_pem)
return private_key_pem
@abc.abstractmethod
def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
"""Sign the given payload and return the signature, initializing a new key pair if required."""
@abc.abstractmethod
def publish_public_key(self, public_key_pem): # type: (str) -> None
"""Publish the given public key."""
@abc.abstractmethod
def generate_private_key(self): # type: () -> str
"""Generate a new key pair, publishing the public key and returning the private key."""
class CryptographyAuthHelper(AuthHelper, metaclass=abc.ABCMeta):
"""Cryptography based public key based authentication helper for Ansible Core CI."""
def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
"""Sign the given payload and return the signature, initializing a new key pair if required."""
# import cryptography here to avoid overhead and failures in environments which do not use/provide it
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import ec
from cryptography.hazmat.primitives.serialization import load_pem_private_key
private_key_pem = self.initialize_private_key()
private_key = load_pem_private_key(to_bytes(private_key_pem), None, default_backend())
signature_raw_bytes = private_key.sign(payload_bytes, ec.ECDSA(hashes.SHA256()))
return signature_raw_bytes
def generate_private_key(self): # type: () -> str
"""Generate a new key pair, publishing the public key and returning the private key."""
# import cryptography here to avoid overhead and failures in environments which do not use/provide it
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import ec
private_key = ec.generate_private_key(ec.SECP384R1(), default_backend())
public_key = private_key.public_key()
# noinspection PyUnresolvedReferences
private_key_pem = to_text(private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
))
# noinspection PyTypeChecker
public_key_pem = to_text(public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
))
self.publish_public_key(public_key_pem)
return private_key_pem
class OpenSSLAuthHelper(AuthHelper, metaclass=abc.ABCMeta):
"""OpenSSL based public key based authentication helper for Ansible Core CI."""
def sign_bytes(self, payload_bytes): # type: (bytes) -> bytes
"""Sign the given payload and return the signature, initializing a new key pair if required."""
private_key_pem = self.initialize_private_key()
with tempfile.NamedTemporaryFile() as private_key_file:
private_key_file.write(to_bytes(private_key_pem))
private_key_file.flush()
with tempfile.NamedTemporaryFile() as payload_file:
payload_file.write(payload_bytes)
payload_file.flush()
with tempfile.NamedTemporaryFile() as signature_file:
raw_command(['openssl', 'dgst', '-sha256', '-sign', private_key_file.name, '-out', signature_file.name, payload_file.name], capture=True)
signature_raw_bytes = signature_file.read()
return signature_raw_bytes
def generate_private_key(self): # type: () -> str
"""Generate a new key pair, publishing the public key and returning the private key."""
private_key_pem = raw_command(['openssl', 'ecparam', '-genkey', '-name', 'secp384r1', '-noout'], capture=True)[0]
public_key_pem = raw_command(['openssl', 'ec', '-pubout'], data=private_key_pem, capture=True)[0]
self.publish_public_key(public_key_pem)
return private_key_pem

View File

@@ -0,0 +1,262 @@
"""Support code for working with Azure Pipelines."""
from __future__ import annotations
import os
import tempfile
import uuid
import typing as t
import urllib.parse
from ..encoding import (
to_bytes,
)
from ..config import (
CommonConfig,
TestConfig,
)
from ..git import (
Git,
)
from ..http import (
HttpClient,
)
from ..util import (
display,
MissingEnvironmentVariable,
)
from . import (
ChangeDetectionNotSupported,
CIProvider,
CryptographyAuthHelper,
)
CODE = 'azp'
class AzurePipelines(CIProvider):
"""CI provider implementation for Azure Pipelines."""
def __init__(self):
self.auth = AzurePipelinesAuthHelper()
@staticmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
return os.environ.get('SYSTEM_COLLECTIONURI', '').startswith('https://dev.azure.com/')
@property
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
return CODE
@property
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
return 'Azure Pipelines'
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
try:
prefix = 'azp-%s-%s-%s' % (
os.environ['BUILD_BUILDID'],
os.environ['SYSTEM_JOBATTEMPT'],
os.environ['SYSTEM_JOBIDENTIFIER'],
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
return prefix
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
base_branch = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH') or os.environ.get('BUILD_SOURCEBRANCHNAME')
if base_branch:
base_branch = 'origin/%s' % base_branch
return base_branch or ''
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
result = AzurePipelinesChanges(args)
if result.is_pr:
job_type = 'pull request'
else:
job_type = 'merge commit'
display.info('Processing %s for branch %s commit %s' % (job_type, result.branch, result.commit))
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
if result.paths is None:
# There are several likely causes of this:
# - First run on a new branch.
# - Too many pull requests passed since the last merge run passed.
display.warning('No successful commit found. All tests will be executed.')
return result.paths
def supports_core_ci_auth(self): # type: () -> bool
"""Return True if Ansible Core CI is supported."""
return True
def prepare_core_ci_auth(self): # type: () -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
try:
request = dict(
org_name=os.environ['SYSTEM_COLLECTIONURI'].strip('/').split('/')[-1],
project_name=os.environ['SYSTEM_TEAMPROJECT'],
build_id=int(os.environ['BUILD_BUILDID']),
task_id=str(uuid.UUID(os.environ['SYSTEM_TASKINSTANCEID'])),
)
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
self.auth.sign_request(request)
auth = dict(
azp=request,
)
return auth
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
changes = AzurePipelinesChanges(args)
details = dict(
base_commit=changes.base_commit,
commit=changes.commit,
)
return details
class AzurePipelinesAuthHelper(CryptographyAuthHelper):
"""
Authentication helper for Azure Pipelines.
Based on cryptography since it is provided by the default Azure Pipelines environment.
"""
def publish_public_key(self, public_key_pem): # type: (str) -> None
"""Publish the given public key."""
try:
agent_temp_directory = os.environ['AGENT_TEMPDIRECTORY']
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
# the temporary file cannot be deleted because we do not know when the agent has processed it
# placing the file in the agent's temp directory allows it to be picked up when the job is running in a container
with tempfile.NamedTemporaryFile(prefix='public-key-', suffix='.pem', delete=False, dir=agent_temp_directory) as public_key_file:
public_key_file.write(to_bytes(public_key_pem))
public_key_file.flush()
# make the agent aware of the public key by declaring it as an attachment
vso_add_attachment('ansible-core-ci', 'public-key.pem', public_key_file.name)
class AzurePipelinesChanges:
"""Change information for an Azure Pipelines build."""
def __init__(self, args): # type: (CommonConfig) -> None
self.args = args
self.git = Git()
try:
self.org_uri = os.environ['SYSTEM_COLLECTIONURI'] # ex: https://dev.azure.com/{org}/
self.project = os.environ['SYSTEM_TEAMPROJECT']
self.repo_type = os.environ['BUILD_REPOSITORY_PROVIDER'] # ex: GitHub
self.source_branch = os.environ['BUILD_SOURCEBRANCH']
self.source_branch_name = os.environ['BUILD_SOURCEBRANCHNAME']
self.pr_branch_name = os.environ.get('SYSTEM_PULLREQUEST_TARGETBRANCH')
except KeyError as ex:
raise MissingEnvironmentVariable(name=ex.args[0])
if self.source_branch.startswith('refs/tags/'):
raise ChangeDetectionNotSupported('Change detection is not supported for tags.')
self.org = self.org_uri.strip('/').split('/')[-1]
self.is_pr = self.pr_branch_name is not None
if self.is_pr:
# HEAD is a merge commit of the PR branch into the target branch
# HEAD^1 is HEAD of the target branch (first parent of merge commit)
# HEAD^2 is HEAD of the PR branch (second parent of merge commit)
# see: https://git-scm.com/docs/gitrevisions
self.branch = self.pr_branch_name
self.base_commit = 'HEAD^1'
self.commit = 'HEAD^2'
else:
commits = self.get_successful_merge_run_commits()
self.branch = self.source_branch_name
self.base_commit = self.get_last_successful_commit(commits)
self.commit = 'HEAD'
self.commit = self.git.run_git(['rev-parse', self.commit]).strip()
if self.base_commit:
self.base_commit = self.git.run_git(['rev-parse', self.base_commit]).strip()
# <commit>...<commit>
# This form is to view the changes on the branch containing and up to the second <commit>, starting at a common ancestor of both <commit>.
# see: https://git-scm.com/docs/git-diff
dot_range = '%s...%s' % (self.base_commit, self.commit)
self.paths = sorted(self.git.get_diff_names([dot_range]))
self.diff = self.git.get_diff([dot_range])
else:
self.paths = None # act as though change detection not enabled, do not filter targets
self.diff = []
def get_successful_merge_run_commits(self): # type: () -> t.Set[str]
"""Return a set of recent successsful merge commits from Azure Pipelines."""
parameters = dict(
maxBuildsPerDefinition=100, # max 5000
queryOrder='queueTimeDescending', # assumes under normal circumstances that later queued jobs are for later commits
resultFilter='succeeded',
reasonFilter='batchedCI', # may miss some non-PR reasons, the alternative is to filter the list after receiving it
repositoryType=self.repo_type,
repositoryId='%s/%s' % (self.org, self.project),
)
url = '%s%s/_apis/build/builds?api-version=6.0&%s' % (self.org_uri, self.project, urllib.parse.urlencode(parameters))
http = HttpClient(self.args, always=True)
response = http.get(url)
# noinspection PyBroadException
try:
result = response.json()
except Exception: # pylint: disable=broad-except
# most likely due to a private project, which returns an HTTP 203 response with HTML
display.warning('Unable to find project. Cannot determine changes. All tests will be executed.')
return set()
commits = set(build['sourceVersion'] for build in result['value'])
return commits
def get_last_successful_commit(self, commits): # type: (t.Set[str]) -> t.Optional[str]
"""Return the last successful commit from git history that is found in the given commit list, or None."""
commit_history = self.git.get_rev_list(max_count=100)
ordered_successful_commits = [commit for commit in commit_history if commit in commits]
last_successful_commit = ordered_successful_commits[0] if ordered_successful_commits else None
return last_successful_commit
def vso_add_attachment(file_type, file_name, path): # type: (str, str, str) -> None
"""Upload and attach a file to the current timeline record."""
vso('task.addattachment', dict(type=file_type, name=file_name), path)
def vso(name, data, message): # type: (str, t.Dict[str, str], str) -> None
"""
Write a logging command for the Azure Pipelines agent to process.
See: https://docs.microsoft.com/en-us/azure/devops/pipelines/scripts/logging-commands?view=azure-devops&tabs=bash
"""
display.info('##vso[%s %s]%s' % (name, ';'.join('='.join((key, value)) for key, value in data.items()), message))

View File

@@ -0,0 +1,212 @@
"""Support code for working without a supported CI provider."""
from __future__ import annotations
import os
import platform
import random
import re
import typing as t
from ..config import (
CommonConfig,
TestConfig,
)
from ..io import (
read_text_file,
)
from ..git import (
Git,
)
from ..util import (
ApplicationError,
display,
is_binary_file,
SubprocessError,
)
from . import (
CIProvider,
)
CODE = '' # not really a CI provider, so use an empty string for the code
class Local(CIProvider):
"""CI provider implementation when not using CI."""
priority = 1000
@staticmethod
def is_supported(): # type: () -> bool
"""Return True if this provider is supported in the current running environment."""
return True
@property
def code(self): # type: () -> str
"""Return a unique code representing this provider."""
return CODE
@property
def name(self): # type: () -> str
"""Return descriptive name for this provider."""
return 'Local'
def generate_resource_prefix(self): # type: () -> str
"""Return a resource prefix specific to this CI provider."""
prefix = 'ansible-test-%d-%s' % (
random.randint(10000000, 99999999),
platform.node().split('.')[0],
)
return prefix
def get_base_branch(self): # type: () -> str
"""Return the base branch or an empty string."""
return ''
def detect_changes(self, args): # type: (TestConfig) -> t.Optional[t.List[str]]
"""Initialize change detection."""
result = LocalChanges(args)
display.info('Detected branch %s forked from %s at commit %s' % (
result.current_branch, result.fork_branch, result.fork_point))
if result.untracked and not args.untracked:
display.warning('Ignored %s untracked file(s). Use --untracked to include them.' %
len(result.untracked))
if result.committed and not args.committed:
display.warning('Ignored %s committed change(s). Omit --ignore-committed to include them.' %
len(result.committed))
if result.staged and not args.staged:
display.warning('Ignored %s staged change(s). Omit --ignore-staged to include them.' %
len(result.staged))
if result.unstaged and not args.unstaged:
display.warning('Ignored %s unstaged change(s). Omit --ignore-unstaged to include them.' %
len(result.unstaged))
names = set()
if args.tracked:
names |= set(result.tracked)
if args.untracked:
names |= set(result.untracked)
if args.committed:
names |= set(result.committed)
if args.staged:
names |= set(result.staged)
if args.unstaged:
names |= set(result.unstaged)
if not args.metadata.changes:
args.metadata.populate_changes(result.diff)
for path in result.untracked:
if is_binary_file(path):
args.metadata.changes[path] = ((0, 0),)
continue
line_count = len(read_text_file(path).splitlines())
args.metadata.changes[path] = ((1, line_count),)
return sorted(names)
def supports_core_ci_auth(self): # type: () -> bool
"""Return True if Ansible Core CI is supported."""
path = self._get_aci_key_path()
return os.path.exists(path)
def prepare_core_ci_auth(self): # type: () -> t.Dict[str, t.Any]
"""Return authentication details for Ansible Core CI."""
path = self._get_aci_key_path()
auth_key = read_text_file(path).strip()
request = dict(
key=auth_key,
nonce=None,
)
auth = dict(
remote=request,
)
return auth
def get_git_details(self, args): # type: (CommonConfig) -> t.Optional[t.Dict[str, t.Any]]
"""Return details about git in the current environment."""
return None # not yet implemented for local
@staticmethod
def _get_aci_key_path(): # type: () -> str
path = os.path.expanduser('~/.ansible-core-ci.key')
return path
class InvalidBranch(ApplicationError):
"""Exception for invalid branch specification."""
def __init__(self, branch, reason): # type: (str, str) -> None
message = 'Invalid branch: %s\n%s' % (branch, reason)
super().__init__(message)
self.branch = branch
class LocalChanges:
"""Change information for local work."""
def __init__(self, args): # type: (TestConfig) -> None
self.args = args
self.git = Git()
self.current_branch = self.git.get_branch()
if self.is_official_branch(self.current_branch):
raise InvalidBranch(branch=self.current_branch,
reason='Current branch is not a feature branch.')
self.fork_branch = None
self.fork_point = None
self.local_branches = sorted(self.git.get_branches())
self.official_branches = sorted([b for b in self.local_branches if self.is_official_branch(b)])
for self.fork_branch in self.official_branches:
try:
self.fork_point = self.git.get_branch_fork_point(self.fork_branch)
break
except SubprocessError:
pass
if self.fork_point is None:
raise ApplicationError('Unable to auto-detect fork branch and fork point.')
# tracked files (including unchanged)
self.tracked = sorted(self.git.get_file_names(['--cached']))
# untracked files (except ignored)
self.untracked = sorted(self.git.get_file_names(['--others', '--exclude-standard']))
# tracked changes (including deletions) committed since the branch was forked
self.committed = sorted(self.git.get_diff_names([self.fork_point, 'HEAD']))
# tracked changes (including deletions) which are staged
self.staged = sorted(self.git.get_diff_names(['--cached']))
# tracked changes (including deletions) which are not staged
self.unstaged = sorted(self.git.get_diff_names([]))
# diff of all tracked files from fork point to working copy
self.diff = self.git.get_diff([self.fork_point])
def is_official_branch(self, name): # type: (str) -> bool
"""Return True if the given branch name an official branch for development or releases."""
if self.args.base_branch:
return name == self.args.base_branch
if name == 'devel':
return True
if re.match(r'^stable-[0-9]+\.[0-9]+$', name):
return True
return False

View File

@@ -0,0 +1,900 @@
"""Classify changes in Ansible code."""
from __future__ import annotations
import collections
import os
import re
import time
import typing as t
from ..target import (
walk_module_targets,
walk_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
load_integration_prefixes,
analyze_integration_target_dependencies,
)
from ..util import (
display,
is_subdir,
)
from .python import (
get_python_module_utils_imports,
get_python_module_utils_name,
)
from .csharp import (
get_csharp_module_utils_imports,
get_csharp_module_utils_name,
)
from .powershell import (
get_powershell_module_utils_imports,
get_powershell_module_utils_name,
)
from ..config import (
TestConfig,
IntegrationConfig,
)
from ..metadata import (
ChangeDescription,
)
from ..data import (
data_context,
)
FOCUSED_TARGET = '__focused__'
def categorize_changes(args, paths, verbose_command=None): # type: (TestConfig, t.List[str], t.Optional[str]) -> ChangeDescription
"""Categorize the given list of changed paths and return a description of the changes."""
mapper = PathMapper(args)
commands = {
'sanity': set(),
'units': set(),
'integration': set(),
'windows-integration': set(),
'network-integration': set(),
}
focused_commands = collections.defaultdict(set)
deleted_paths = set()
original_paths = set()
additional_paths = set()
no_integration_paths = set()
for path in paths:
if not os.path.exists(path):
deleted_paths.add(path)
continue
original_paths.add(path)
dependent_paths = mapper.get_dependent_paths(path)
if not dependent_paths:
continue
display.info('Expanded "%s" to %d dependent file(s):' % (path, len(dependent_paths)), verbosity=2)
for dependent_path in dependent_paths:
display.info(dependent_path, verbosity=2)
additional_paths.add(dependent_path)
additional_paths -= set(paths) # don't count changed paths as additional paths
if additional_paths:
display.info('Expanded %d changed file(s) into %d additional dependent file(s).' % (len(paths), len(additional_paths)))
paths = sorted(set(paths) | additional_paths)
display.info('Mapping %d changed file(s) to tests.' % len(paths))
none_count = 0
for path in paths:
tests = mapper.classify(path)
if tests is None:
focused_target = False
display.info('%s -> all' % path, verbosity=1)
tests = all_tests(args) # not categorized, run all tests
display.warning('Path not categorized: %s' % path)
else:
focused_target = tests.pop(FOCUSED_TARGET, False) and path in original_paths
tests = dict((key, value) for key, value in tests.items() if value)
if focused_target and not any('integration' in command for command in tests):
no_integration_paths.add(path) # path triggers no integration tests
if verbose_command:
result = '%s: %s' % (verbose_command, tests.get(verbose_command) or 'none')
# identify targeted integration tests (those which only target a single integration command)
if 'integration' in verbose_command and tests.get(verbose_command):
if not any('integration' in command for command in tests if command != verbose_command):
if focused_target:
result += ' (focused)'
result += ' (targeted)'
else:
result = '%s' % tests
if not tests.get(verbose_command):
# minimize excessive output from potentially thousands of files which do not trigger tests
none_count += 1
verbosity = 2
else:
verbosity = 1
if args.verbosity >= verbosity:
display.info('%s -> %s' % (path, result), verbosity=1)
for command, target in tests.items():
commands[command].add(target)
if focused_target:
focused_commands[command].add(target)
if none_count > 0 and args.verbosity < 2:
display.notice('Omitted %d file(s) that triggered no tests.' % none_count)
for command, targets in commands.items():
targets.discard('none')
if any(target == 'all' for target in targets):
commands[command] = {'all'}
commands = dict((c, sorted(targets)) for c, targets in commands.items() if targets)
focused_commands = dict((c, sorted(targets)) for c, targets in focused_commands.items())
for command, targets in commands.items():
if targets == ['all']:
commands[command] = [] # changes require testing all targets, do not filter targets
changes = ChangeDescription()
changes.command = verbose_command
changes.changed_paths = sorted(original_paths)
changes.deleted_paths = sorted(deleted_paths)
changes.regular_command_targets = commands
changes.focused_command_targets = focused_commands
changes.no_integration_paths = sorted(no_integration_paths)
return changes
class PathMapper:
"""Map file paths to test commands and targets."""
def __init__(self, args): # type: (TestConfig) -> None
self.args = args
self.integration_all_target = get_integration_all_target(self.args)
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
self.units_modules = set(target.module for target in self.units_targets if target.module)
self.units_paths = set(a for target in self.units_targets for a in target.aliases)
self.sanity_paths = set(target.path for target in self.sanity_targets)
self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'posix/' in target.aliases for m in target.modules)
self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'windows/' in target.aliases for m in target.modules)
self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'network/' in target.aliases for m in target.modules)
self.prefixes = load_integration_prefixes()
self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
self.python_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.powershell_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.csharp_module_utils_imports = {} # populated on first use to reduce overhead when not needed
self.paths_to_dependent_targets = {}
for target in self.integration_targets:
for path in target.needs_file:
if path not in self.paths_to_dependent_targets:
self.paths_to_dependent_targets[path] = set()
self.paths_to_dependent_targets[path].add(target)
def get_dependent_paths(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path, recursively expanding dependent paths as well."""
unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
paths = set()
while unprocessed_paths:
queued_paths = list(unprocessed_paths)
paths |= unprocessed_paths
unprocessed_paths = set()
for queued_path in queued_paths:
new_paths = self.get_dependent_paths_non_recursive(queued_path)
for new_path in new_paths:
if new_path not in paths:
unprocessed_paths.add(new_path)
return sorted(paths)
def get_dependent_paths_non_recursive(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path, including dependent integration test target paths."""
paths = self.get_dependent_paths_internal(path)
paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())]
paths = sorted(set(paths))
return paths
def get_dependent_paths_internal(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path."""
ext = os.path.splitext(os.path.split(path)[1])[1]
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.py':
return self.get_python_module_utils_usage(path)
if ext == '.psm1':
return self.get_powershell_module_utils_usage(path)
if ext == '.cs':
return self.get_csharp_module_utils_usage(path)
if is_subdir(path, data_context().content.integration_targets_path):
return self.get_integration_target_usage(path)
return []
def get_python_module_utils_usage(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path which is a Python module_utils file."""
if not self.python_module_utils_imports:
display.info('Analyzing python module_utils imports...')
before = time.time()
self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
after = time.time()
display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
name = get_python_module_utils_name(path)
return sorted(self.python_module_utils_imports[name])
def get_powershell_module_utils_usage(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path which is a PowerShell module_utils file."""
if not self.powershell_module_utils_imports:
display.info('Analyzing powershell module_utils imports...')
before = time.time()
self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
after = time.time()
display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
name = get_powershell_module_utils_name(path)
return sorted(self.powershell_module_utils_imports[name])
def get_csharp_module_utils_usage(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path which is a C# module_utils file."""
if not self.csharp_module_utils_imports:
display.info('Analyzing C# module_utils imports...')
before = time.time()
self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
after = time.time()
display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
name = get_csharp_module_utils_name(path)
return sorted(self.csharp_module_utils_imports[name])
def get_integration_target_usage(self, path): # type: (str) -> t.List[str]
"""Return a list of paths which depend on the given path which is an integration target file."""
target_name = path.split('/')[3]
dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep
for target in sorted(self.integration_dependencies.get(target_name, set()))]
return dependents
def classify(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Classify the given path and return an optional dictionary of the results."""
result = self._classify(path)
# run all tests when no result given
if result is None:
return None
# run sanity on path unless result specified otherwise
if path in self.sanity_paths and 'sanity' not in result:
result['sanity'] = path
return result
def _classify(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path."""
if data_context().content.is_ansible:
return self._classify_ansible(path)
if data_context().content.collection:
return self._classify_collection(path)
return None
def _classify_common(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path using rules common to all layouts."""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if os.path.sep not in path:
if filename in (
'azure-pipelines.yml',
):
return all_tests(self.args) # test infrastructure, run all tests
if is_subdir(path, '.azure-pipelines'):
return all_tests(self.args) # test infrastructure, run all tests
if is_subdir(path, '.github'):
return minimal
if is_subdir(path, data_context().content.integration_targets_path):
if not os.path.exists(path):
return minimal
target = self.integration_targets_by_name.get(path.split('/')[3])
if not target:
display.warning('Unexpected non-target found: %s' % path)
return minimal
if 'hidden/' in target.aliases:
return minimal # already expanded using get_dependent_paths
return {
'integration': target.name if 'posix/' in target.aliases else None,
'windows-integration': target.name if 'windows/' in target.aliases else None,
'network-integration': target.name if 'network/' in target.aliases else None,
FOCUSED_TARGET: True,
}
if is_subdir(path, data_context().content.integration_path):
if dirname == data_context().content.integration_path:
for command in (
'integration',
'windows-integration',
'network-integration',
):
if name == command and ext == '.cfg':
return {
command: self.integration_all_target,
}
if name == command + '.requirements' and ext == '.txt':
return {
command: self.integration_all_target,
}
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
if is_subdir(path, data_context().content.sanity_path):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if is_subdir(path, data_context().content.unit_path):
if path in self.units_paths:
return {
'units': path,
}
# changes to files which are not unit tests should trigger tests from the nearest parent directory
test_path = os.path.dirname(path)
while test_path:
if test_path + '/' in self.units_paths:
return {
'units': test_path + '/',
}
test_path = os.path.dirname(test_path)
if is_subdir(path, data_context().content.module_path):
module_name = self.module_names_by_path.get(path)
if module_name:
return {
'units': module_name if module_name in self.units_modules else None,
'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
'network-integration': self.network_integration_by_module.get(module_name),
FOCUSED_TARGET: True,
}
return minimal
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.cs':
return minimal # already expanded using get_dependent_paths
if ext == '.psm1':
return minimal # already expanded using get_dependent_paths
if ext == '.py':
return minimal # already expanded using get_dependent_paths
if is_subdir(path, data_context().content.plugin_paths['action']):
if ext == '.py':
if name.startswith('net_'):
network_target = 'network/.*_%s' % name[4:]
if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
return {
'network-integration': network_target,
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if self.prefixes.get(name) == 'network':
network_platform = name
elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
network_platform = name[:-7]
elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
network_platform = name[:-9]
else:
network_platform = None
if network_platform:
network_target = 'network/%s/' % network_platform
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['connection']):
units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'connection')
if name == '__init__':
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': os.path.join(units_dir, ''),
}
units_path = os.path.join(units_dir, 'test_%s.py' % name)
if units_path not in self.units_paths:
units_path = None
integration_name = 'connection_%s' % name
if integration_name not in self.integration_targets_by_name:
integration_name = None
windows_integration_name = 'connection_windows_%s' % name
if windows_integration_name not in self.integration_targets_by_name:
windows_integration_name = None
# entire integration test commands depend on these connection plugins
if name in ['winrm', 'psrp']:
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if name == 'local':
return {
'integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'network_cli':
return {
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'paramiko_ssh':
return {
'integration': integration_name,
'network-integration': self.integration_all_target,
'units': units_path,
}
# other connection plugins have isolated integration and unit tests
return {
'integration': integration_name,
'windows-integration': windows_integration_name,
'units': units_path,
}
if is_subdir(path, data_context().content.plugin_paths['doc_fragments']):
return {
'sanity': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['inventory']):
if name == '__init__':
return all_tests(self.args) # broad impact, run all tests
# These inventory plugins are enabled by default (see INVENTORY_ENABLED).
# Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
test_all = [
'host_list',
'script',
'yaml',
'ini',
'auto',
]
if name in test_all:
posix_integration_fallback = get_integration_all_target(self.args)
else:
posix_integration_fallback = None
target = self.integration_targets_by_name.get('inventory_%s' % name)
units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'inventory')
units_path = os.path.join(units_dir, 'test_%s.py' % name)
if units_path not in self.units_paths:
units_path = None
return {
'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
'windows-integration': target.name if target and 'windows/' in target.aliases else None,
'network-integration': target.name if target and 'network/' in target.aliases else None,
'units': units_path,
FOCUSED_TARGET: target is not None,
}
if is_subdir(path, data_context().content.plugin_paths['filter']):
return self._simple_plugin_tests('filter', name)
if is_subdir(path, data_context().content.plugin_paths['lookup']):
return self._simple_plugin_tests('lookup', name)
if (is_subdir(path, data_context().content.plugin_paths['terminal']) or
is_subdir(path, data_context().content.plugin_paths['cliconf']) or
is_subdir(path, data_context().content.plugin_paths['netconf'])):
if ext == '.py':
if name in self.prefixes and self.prefixes[name] == 'network':
network_target = 'network/%s/' % name
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['test']):
return self._simple_plugin_tests('test', name)
return None
def _classify_collection(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path using rules specific to collections."""
result = self._classify_common(path)
if result is not None:
return result
filename = os.path.basename(path)
dummy, ext = os.path.splitext(filename)
minimal = {}
if path.startswith('changelogs/'):
return minimal
if path.startswith('docs/'):
return minimal
if '/' not in path:
if path in (
'.gitignore',
'COPYING',
'LICENSE',
'Makefile',
):
return minimal
if ext in (
'.in',
'.md',
'.rst',
'.toml',
'.txt',
):
return minimal
return None
def _classify_ansible(self, path): # type: (str) -> t.Optional[t.Dict[str, str]]
"""Return the classification for the given path using rules specific to Ansible."""
if path.startswith('test/units/compat/'):
return {
'units': 'test/units/',
}
result = self._classify_common(path)
if result is not None:
return result
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal = {}
if path.startswith('bin/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('changelogs/'):
return minimal
if path.startswith('docs/'):
return minimal
if path.startswith('examples/'):
if path == 'examples/scripts/ConfigureRemotingForAnsible.ps1':
return {
'windows-integration': 'connection_winrm',
}
return minimal
if path.startswith('hacking/'):
return minimal
if path.startswith('lib/ansible/executor/powershell/'):
units_path = 'test/units/executor/powershell/'
if units_path not in self.units_paths:
units_path = None
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if path.startswith('lib/ansible/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('licenses/'):
return minimal
if path.startswith('packaging/'):
return minimal
if path.startswith('test/ansible_test/'):
return minimal # these tests are not invoked from ansible-test
if path.startswith('test/lib/ansible_test/config/'):
if name.startswith('cloud-config-'):
# noinspection PyTypeChecker
cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
if path.startswith('test/lib/ansible_test/_data/completion/'):
if path == 'test/lib/ansible_test/_data/completion/docker.txt':
return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
if path.startswith('test/lib/ansible_test/_internal/commands/integration/cloud/'):
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/lib/ansible_test/_internal/commands/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
'integration': 'ansible-test', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_internal/commands/units/'):
return {
'units': 'all', # test infrastructure, run all unit tests
'integration': 'ansible-test', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_data/requirements/'):
if name in (
'integration',
'network-integration',
'windows-integration',
):
return {
name: self.integration_all_target,
}
if name in (
'sanity',
'units',
):
return {
name: 'all',
}
if path.startswith('test/lib/ansible_test/_util/controller/sanity/') or path.startswith('test/lib/ansible_test/_util/target/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
'integration': 'ansible-test', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_util/target/pytest/'):
return {
'units': 'all', # test infrastructure, run all unit tests
'integration': 'ansible-test', # run ansible-test self tests
}
if path.startswith('test/lib/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/support/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/shippable/'):
if dirname == 'test/utils/shippable':
test_map = {
'cloud.sh': 'integration:cloud/',
'linux.sh': 'integration:all',
'network.sh': 'network-integration:all',
'remote.sh': 'integration:all',
'sanity.sh': 'sanity:all',
'units.sh': 'units:all',
'windows.sh': 'windows-integration:all',
}
test_match = test_map.get(filename)
if test_match:
test_command, test_target = test_match.split(':')
return {
test_command: test_target,
}
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/utils/'):
return minimal
if '/' not in path:
if path in (
'.gitattributes',
'.gitignore',
'.mailmap',
'COPYING',
'Makefile',
):
return minimal
if path in (
'setup.py',
):
return all_tests(self.args) # broad impact, run all tests
if ext in (
'.in',
'.md',
'.rst',
'.toml',
'.txt',
):
return minimal
return None # unknown, will result in fall-back to run all tests
def _simple_plugin_tests(self, plugin_type, plugin_name): # type: (str, str) -> t.Dict[str, t.Optional[str]]
"""
Return tests for the given plugin type and plugin name.
This function is useful for plugin types which do not require special processing.
"""
if plugin_name == '__init__':
return all_tests(self.args, True)
integration_target = self.integration_targets_by_name.get('%s_%s' % (plugin_type, plugin_name))
if integration_target:
integration_name = integration_target.name
else:
integration_name = None
units_path = os.path.join(data_context().content.unit_path, 'plugins', plugin_type, 'test_%s.py' % plugin_name)
if units_path not in self.units_paths:
units_path = None
return dict(
integration=integration_name,
units=units_path,
)
def all_tests(args, force=False): # type: (TestConfig, bool) -> t.Dict[str, str]
"""Return the targets for each test command when all tests should be run."""
if force:
integration_all_target = 'all'
else:
integration_all_target = get_integration_all_target(args)
return {
'sanity': 'all',
'units': 'all',
'integration': integration_all_target,
'windows-integration': integration_all_target,
'network-integration': integration_all_target,
}
def get_integration_all_target(args): # type: (TestConfig) -> str
"""Return the target to use when all tests should be run."""
if isinstance(args, IntegrationConfig):
return args.changed_all_target
return 'all'

View File

@@ -0,0 +1,26 @@
"""Common classification code used by multiple languages."""
from __future__ import annotations
import os
from ..data import (
data_context,
)
def resolve_csharp_ps_util(import_name, path): # type: (str, str) -> str
"""Return the fully qualified name of the given import if possible, otherwise return the original import name."""
if data_context().content.is_ansible or not import_name.startswith('.'):
# We don't support relative paths for builtin utils, there's no point.
return import_name
packages = import_name.split('.')
module_packages = path.split(os.path.sep)
for package in packages:
if not module_packages or package:
break
del module_packages[-1]
return 'ansible_collections.%s%s' % (data_context().content.prefix,
'.'.join(module_packages + [p for p in packages if p]))

View File

@@ -0,0 +1,98 @@
"""Analyze C# import statements."""
from __future__ import annotations
import os
import re
import typing as t
from ..io import (
open_text_file,
)
from ..util import (
display,
)
from .common import (
resolve_csharp_ps_util,
)
from ..data import (
data_context,
)
from ..target import (
TestTarget,
)
def get_csharp_module_utils_imports(powershell_targets, csharp_targets): # type: (t.List[TestTarget], t.List[TestTarget]) -> t.Dict[str, t.Set[str]]
"""Return a dictionary of module_utils names mapped to sets of powershell file paths."""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, False)
for target in csharp_targets:
imports_by_target_path[target.path] = extract_csharp_module_utils_imports(target.path, module_utils, True)
imports = {module_util: set() for module_util in module_utils} # type: t.Dict[str, t.Set[str]]
for target_path, modules in imports_by_target_path.items():
for module_util in modules:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_csharp_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_csharp_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils(): # type: () -> t.Set[str]
"""Return a set of available module_utils imports."""
return set(get_csharp_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_csharp_path)
if os.path.splitext(p)[1] == '.cs')
def extract_csharp_module_utils_imports(path, module_utils, is_pure_csharp): # type: (str, t.Set[str], bool) -> t.Set[str]
"""Return a set of module_utils imports found in the specified source file."""
imports = set()
if is_pure_csharp:
pattern = re.compile(r'(?i)^using\s((?:Ansible|AnsibleCollections)\..+);$')
else:
pattern = re.compile(r'(?i)^#\s*ansiblerequires\s+-csharputil\s+((?:Ansible|ansible.collections|\.)\..+)')
with open_text_file(path) as module_file:
for line_number, line in enumerate(module_file, 1):
match = re.search(pattern, line)
if not match:
continue
import_name = resolve_csharp_ps_util(match.group(1), path)
if import_name in module_utils:
imports.add(import_name)
elif data_context().content.is_ansible or \
import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports

View File

@@ -0,0 +1,99 @@
"""Analyze powershell import statements."""
from __future__ import annotations
import os
import re
import typing as t
from ..io import (
read_text_file,
)
from ..util import (
display,
)
from .common import (
resolve_csharp_ps_util,
)
from ..data import (
data_context,
)
from ..target import (
TestTarget,
)
def get_powershell_module_utils_imports(powershell_targets): # type: (t.List[TestTarget]) -> t.Dict[str, t.Set[str]]
"""Return a dictionary of module_utils names mapped to sets of powershell file paths."""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
imports = {module_util: set() for module_util in module_utils} # type: t.Dict[str, t.Set[str]]
for target_path, modules in imports_by_target_path.items():
for module_util in modules:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_powershell_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_powershell_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils(): # type: () -> t.Set[str]
"""Return a set of available module_utils imports."""
return set(get_powershell_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
if os.path.splitext(p)[1] == '.psm1')
def extract_powershell_module_utils_imports(path, module_utils): # type: (str, t.Set[str]) -> t.Set[str]
"""Return a set of module_utils imports found in the specified source file."""
imports = set()
code = read_text_file(path)
if data_context().content.is_ansible and '# POWERSHELL_COMMON' in code:
imports.add('Ansible.ModuleUtils.Legacy')
lines = code.splitlines()
line_number = 0
for line in lines:
line_number += 1
match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections|\.)\..+)', line)
if not match:
continue
import_name = resolve_csharp_ps_util(match.group(1), path)
if import_name in module_utils:
imports.add(import_name)
elif data_context().content.is_ansible or \
import_name.startswith('ansible_collections.%s' % data_context().content.prefix):
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports

View File

@@ -0,0 +1,346 @@
"""Analyze python import statements."""
from __future__ import annotations
import ast
import os
import re
import typing as t
from ..io import (
read_binary_file,
)
from ..util import (
display,
ApplicationError,
is_subdir,
)
from ..data import (
data_context,
)
from ..target import (
TestTarget,
)
VIRTUAL_PACKAGES = {
'ansible.module_utils.six',
}
def get_python_module_utils_imports(compile_targets): # type: (t.List[TestTarget]) -> t.Dict[str, t.Set[str]]
"""Return a dictionary of module_utils names mapped to sets of python file paths."""
module_utils = enumerate_module_utils()
virtual_utils = set(m for m in module_utils if any(m.startswith('%s.' % v) for v in VIRTUAL_PACKAGES))
module_utils -= virtual_utils
imports_by_target_path = {}
for target in compile_targets:
imports_by_target_path[target.path] = extract_python_module_utils_imports(target.path, module_utils)
def recurse_import(import_name, depth=0, seen=None): # type: (str, int, t.Optional[t.Set[str]]) -> t.Set[str]
"""Recursively expand module_utils imports from module_utils files."""
display.info('module_utils import: %s%s' % (' ' * depth, import_name), verbosity=4)
if seen is None:
seen = {import_name}
results = {import_name}
# virtual packages depend on the modules they contain instead of the reverse
if import_name in VIRTUAL_PACKAGES:
for sub_import in sorted(virtual_utils):
if sub_import.startswith('%s.' % import_name):
if sub_import in seen:
continue
seen.add(sub_import)
matches = sorted(recurse_import(sub_import, depth + 1, seen))
for result in matches:
results.add(result)
import_path = get_import_path(import_name)
if import_path not in imports_by_target_path:
import_path = get_import_path(import_name, package=True)
if import_path not in imports_by_target_path:
raise ApplicationError('Cannot determine path for module_utils import: %s' % import_name)
# process imports in reverse so the deepest imports come first
for name in sorted(imports_by_target_path[import_path], reverse=True):
if name in virtual_utils:
continue
if name in seen:
continue
seen.add(name)
matches = sorted(recurse_import(name, depth + 1, seen))
for result in matches:
results.add(result)
return results
for module_util in module_utils:
# recurse over module_utils imports while excluding self
module_util_imports = recurse_import(module_util)
module_util_imports.remove(module_util)
# add recursive imports to all path entries which import this module_util
for target_path, modules in imports_by_target_path.items():
if module_util in modules:
for module_util_import in sorted(module_util_imports):
if module_util_import not in modules:
display.info('%s inherits import %s via %s' % (target_path, module_util_import, module_util), verbosity=6)
modules.add(module_util_import)
imports = {module_util: set() for module_util in module_utils | virtual_utils} # type: t.Dict[str, t.Set[str]]
for target_path, modules in imports_by_target_path.items():
for module_util in modules:
imports[module_util].add(target_path)
# for purposes of mapping module_utils to paths, treat imports of virtual utils the same as the parent package
for virtual_util in virtual_utils:
parent_package = '.'.join(virtual_util.split('.')[:-1])
imports[virtual_util] = imports[parent_package]
display.info('%s reports imports from parent package %s' % (virtual_util, parent_package), verbosity=6)
for module_util in sorted(imports):
if not imports[module_util]:
package_path = get_import_path(module_util, package=True)
if os.path.exists(package_path) and not os.path.getsize(package_path):
continue # ignore empty __init__.py files
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_python_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + 'plugins.module_utils'
else:
prefix = 'ansible.module_utils'
if path.endswith('/__init__.py'):
path = os.path.dirname(path)
if path == base_path:
name = prefix
else:
name = prefix + '.' + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.path.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
module_utils = []
for path in data_context().content.walk_files(data_context().content.module_utils_path):
ext = os.path.splitext(path)[1]
if ext != '.py':
continue
module_utils.append(get_python_module_utils_name(path))
return set(module_utils)
def extract_python_module_utils_imports(path, module_utils): # type: (str, t.Set[str]) -> t.Set[str]
"""Return a list of module_utils imports found in the specified source file."""
# Python code must be read as bytes to avoid a SyntaxError when the source uses comments to declare the file encoding.
# See: https://www.python.org/dev/peps/pep-0263
# Specifically: If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.
code = read_binary_file(path)
try:
tree = ast.parse(code)
except SyntaxError as ex:
# Treat this error as a warning so tests can be executed as best as possible.
# The compile test will detect and report this syntax error.
display.warning('%s:%s Syntax error extracting module_utils imports: %s' % (path, ex.lineno, ex.msg))
return set()
finder = ModuleUtilFinder(path, module_utils)
finder.visit(tree)
return finder.imports
def get_import_path(name, package=False): # type: (str, bool) -> str
"""Return a path from an import name."""
if package:
filename = os.path.join(name.replace('.', '/'), '__init__.py')
else:
filename = '%s.py' % name.replace('.', '/')
if name.startswith('ansible.module_utils.') or name == 'ansible.module_utils':
path = os.path.join('lib', filename)
elif data_context().content.collection and (
name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name) or
name == 'ansible_collections.%s.plugins.module_utils' % data_context().content.collection.full_name):
path = '/'.join(filename.split('/')[3:])
else:
raise Exception('Unexpected import name: %s' % name)
return path
def path_to_module(path): # type: (str) -> str
"""Convert the given path to a module name."""
module = os.path.splitext(path)[0].replace(os.path.sep, '.')
if module.endswith('.__init__'):
module = module[:-9]
return module
def relative_to_absolute(name, level, module, path, lineno): # type: (str, int, str, str, int) -> str
"""Convert a relative import to an absolute import."""
if level <= 0:
absolute_name = name
elif not module:
display.warning('Cannot resolve relative import "%s%s" in unknown module at %s:%d' % ('.' * level, name, path, lineno))
absolute_name = 'relative.nomodule'
else:
parts = module.split('.')
if level >= len(parts):
display.warning('Cannot resolve relative import "%s%s" above module "%s" at %s:%d' % ('.' * level, name, module, path, lineno))
absolute_name = 'relative.abovelevel'
else:
absolute_name = '.'.join(parts[:-level] + [name])
return absolute_name
class ModuleUtilFinder(ast.NodeVisitor):
"""AST visitor to find valid module_utils imports."""
def __init__(self, path, module_utils): # type: (str, t.Set[str]) -> None
self.path = path
self.module_utils = module_utils
self.imports = set()
# implicitly import parent package
if path.endswith('/__init__.py'):
path = os.path.split(path)[0]
if path.startswith('lib/ansible/module_utils/'):
package = os.path.split(path)[0].replace('/', '.')[4:]
if package != 'ansible.module_utils' and package not in VIRTUAL_PACKAGES:
self.add_import(package, 0)
self.module = None
if data_context().content.is_ansible:
# Various parts of the Ansible source tree execute within diffent modules.
# To support import analysis, each file which uses relative imports must reside under a path defined here.
# The mapping is a tuple consisting of a path pattern to match and a replacement path.
# During analyis, any relative imports not covered here will result in warnings, which can be fixed by adding the appropriate entry.
path_map = (
('^hacking/build_library/build_ansible/', 'build_ansible/'),
('^lib/ansible/', 'ansible/'),
('^test/lib/ansible_test/_util/controller/sanity/validate-modules/', 'validate_modules/'),
('^test/lib/ansible_test/_util/target/legacy_collection_loader/', 'legacy_collection_loader/'),
('^test/units/', 'test/units/'),
('^test/lib/ansible_test/_internal/', 'ansible_test/_internal/'),
('^test/integration/targets/.*/ansible_collections/(?P<ns>[^/]*)/(?P<col>[^/]*)/', r'ansible_collections/\g<ns>/\g<col>/'),
('^test/integration/targets/.*/library/', 'ansible/modules/'),
)
for pattern, replacement in path_map:
if re.search(pattern, self.path):
revised_path = re.sub(pattern, replacement, self.path)
self.module = path_to_module(revised_path)
break
else:
# This assumes that all files within the collection are executed by Ansible as part of the collection.
# While that will usually be true, there are exceptions which will result in this resolution being incorrect.
self.module = path_to_module(os.path.join(data_context().content.collection.directory, self.path))
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_Import(self, node): # type: (ast.Import) -> None
"""Visit an import node."""
self.generic_visit(node)
# import ansible.module_utils.MODULE[.MODULE]
# import ansible_collections.{ns}.{col}.plugins.module_utils.module_utils.MODULE[.MODULE]
self.add_imports([alias.name for alias in node.names], node.lineno)
# noinspection PyPep8Naming
# pylint: disable=locally-disabled, invalid-name
def visit_ImportFrom(self, node): # type: (ast.ImportFrom) -> None
"""Visit an import from node."""
self.generic_visit(node)
if not node.module:
return
module = relative_to_absolute(node.module, node.level, self.module, self.path, node.lineno)
if not module.startswith('ansible'):
return
# from ansible.module_utils import MODULE[, MODULE]
# from ansible.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils import MODULE[, MODULE]
# from ansible_collections.{ns}.{col}.plugins.module_utils.MODULE[.MODULE] import MODULE[, MODULE]
self.add_imports(['%s.%s' % (module, alias.name) for alias in node.names], node.lineno)
def add_import(self, name, line_number): # type: (str, int) -> None
"""Record the specified import."""
import_name = name
while self.is_module_util_name(name):
if name in self.module_utils:
if name not in self.imports:
display.info('%s:%d imports module_utils: %s' % (self.path, line_number, name), verbosity=5)
self.imports.add(name)
return # duplicate imports are ignored
name = '.'.join(name.split('.')[:-1])
if is_subdir(self.path, data_context().content.test_path):
return # invalid imports in tests are ignored
# Treat this error as a warning so tests can be executed as best as possible.
# This error should be detected by unit or integration tests.
display.warning('%s:%d Invalid module_utils import: %s' % (self.path, line_number, import_name))
def add_imports(self, names, line_no): # type: (t.List[str], int) -> None
"""Add the given import names if they are module_utils imports."""
for name in names:
if self.is_module_util_name(name):
self.add_import(name, line_no)
@staticmethod
def is_module_util_name(name): # type: (str) -> bool
"""Return True if the given name is a module_util name for the content under test. External module_utils are ignored."""
if data_context().content.is_ansible and name.startswith('ansible.module_utils.'):
return True
if data_context().content.collection and name.startswith('ansible_collections.%s.plugins.module_utils.' % data_context().content.collection.full_name):
return True
return False

View File

@@ -0,0 +1,55 @@
"""Command line parsing."""
from __future__ import annotations
import argparse
import os
import sys
from .argparsing import (
CompositeActionCompletionFinder,
)
from .commands import (
do_commands,
)
from .compat import (
HostSettings,
convert_legacy_args,
)
def parse_args(): # type: () -> argparse.Namespace
"""Parse command line arguments."""
completer = CompositeActionCompletionFinder()
if completer.enabled:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
parser = argparse.ArgumentParser(epilog=epilog)
do_commands(parser, completer)
completer(
parser,
always_complete_options=False,
)
argv = sys.argv[1:]
args = parser.parse_args(argv)
if args.explain and not args.verbosity:
args.verbosity = 1
if args.no_environment:
pass
elif args.host_path:
args.host_settings = HostSettings.deserialize(os.path.join(args.host_path, 'settings.dat'))
else:
args.host_settings = convert_legacy_args(argv, args, args.target_mode)
args.host_settings.apply_defaults()
return args

Some files were not shown because too many files have changed in this diff Show More