Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1,41 @@
# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import base64
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
from ansible_collections.community.docker.plugins.module_utils._scramble import unscramble
class ActionModule(ActionBase):
# Set to True when transfering files to the remote
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=None):
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
self._task.args['_max_file_size_for_diff'] = C.MAX_FILE_SIZE_FOR_DIFF
result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val))
if u'diff' in result and result[u'diff'].get(u'scrambled_diff'):
# Scrambling is not done for security, but to avoid no_log screwing up the diff
diff = result[u'diff']
key = base64.b64decode(diff.pop(u'scrambled_diff'))
for k in (u'before', u'after'):
if k in diff:
diff[k] = unscramble(diff[k], key)
return result

View File

@@ -0,0 +1,452 @@
# Based on the chroot connection plugin by Maykel Moya
#
# (c) 2014, Lorin Hochstein
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author:
- Lorin Hochestein (!UNKNOWN)
- Leendert Brouwer (!UNKNOWN)
name: docker
short_description: Run tasks in docker containers
description:
- Run commands or put/fetch files to an existing docker container.
- Uses the Docker CLI to execute commands in the container. If you prefer
to directly connect to the Docker daemon, use the
R(community.docker.docker_api,ansible_collections.community.docker.docker_api_connection)
connection plugin.
options:
remote_addr:
description:
- The name of the container you want to access.
default: inventory_hostname
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_docker_host
remote_user:
description:
- The user to execute as inside the container.
- If Docker is too old to allow this (< 1.7), the one set by Docker itself will be used.
vars:
- name: ansible_user
- name: ansible_docker_user
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
cli:
- name: user
keyword:
- name: remote_user
docker_extra_args:
description:
- Extra arguments to pass to the docker command line.
default: ''
vars:
- name: ansible_docker_extra_args
ini:
- section: docker_connection
key: extra_cli_args
container_timeout:
default: 10
description:
- Controls how long we can wait to access reading output from the container once execution started.
env:
- name: ANSIBLE_TIMEOUT
- name: ANSIBLE_DOCKER_TIMEOUT
version_added: 2.2.0
ini:
- key: timeout
section: defaults
- key: timeout
section: docker_connection
version_added: 2.2.0
vars:
- name: ansible_docker_timeout
version_added: 2.2.0
cli:
- name: timeout
type: integer
'''
import fcntl
import os
import os.path
import subprocess
import re
from ansible.compat import selectors
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils.six.moves import shlex_quote
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase, BUFSIZE
from ansible.utils.display import Display
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
display = Display()
class Connection(ConnectionBase):
''' Local docker based connections '''
transport = 'community.docker.docker'
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
# Note: docker supports running as non-root in some configurations.
# (For instance, setting the UNIX socket file to be readable and
# writable by a specific UNIX group and then putting users into that
# group). Therefore we don't check that the user is root when using
# this connection. But if the user is getting a permission denied
# error it probably means that docker on their system is only
# configured to be connected to by root and they are not running as
# root.
self._docker_args = []
self._container_user_cache = {}
self._version = None
# Windows uses Powershell modules
if getattr(self._shell, "_IS_WINDOWS", False):
self.module_implementation_preferences = ('.ps1', '.exe', '')
if 'docker_command' in kwargs:
self.docker_cmd = kwargs['docker_command']
else:
try:
self.docker_cmd = get_bin_path('docker')
except ValueError:
raise AnsibleError("docker command not found in PATH")
@staticmethod
def _sanitize_version(version):
version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
version = re.sub(u'^v', u'', version)
return version
def _old_docker_version(self):
cmd_args = self._docker_args
old_version_subcommand = ['version']
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmd_output, err = p.communicate()
return old_docker_cmd, to_native(cmd_output), err, p.returncode
def _new_docker_version(self):
# no result yet, must be newer Docker version
cmd_args = self._docker_args
new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
cmd_output, err = p.communicate()
return new_docker_cmd, to_native(cmd_output), err, p.returncode
def _get_docker_version(self):
cmd, cmd_output, err, returncode = self._old_docker_version()
if returncode == 0:
for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
if line.startswith(u'Server version:'): # old docker versions
return self._sanitize_version(line.split()[2])
cmd, cmd_output, err, returncode = self._new_docker_version()
if returncode:
raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
def _get_docker_remote_user(self):
""" Get the default user configured in the docker container """
container = self.get_option('remote_addr')
if container in self._container_user_cache:
return self._container_user_cache[container]
p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', container],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
out = to_text(out, errors='surrogate_or_strict')
if p.returncode != 0:
display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
self._container_user_cache[container] = None
return None
# The default exec user is root, unless it was changed in the Dockerfile with USER
user = out.strip() or u'root'
self._container_user_cache[container] = user
return user
def _build_exec_cmd(self, cmd):
""" Build the local docker exec command to run cmd on remote_host
If remote_user is available and is supported by the docker
version we are using, it will be provided to docker exec.
"""
local_cmd = [self.docker_cmd]
if self._docker_args:
local_cmd += self._docker_args
local_cmd += [b'exec']
if self.remote_user is not None:
local_cmd += [b'-u', self.remote_user]
# -i is needed to keep stdin open which allows pipelining to work
local_cmd += [b'-i', self.get_option('remote_addr')] + cmd
return local_cmd
def _set_docker_args(self):
# TODO: this is mostly for backwards compatibility, play_context is used as fallback for older versions
# docker arguments
del self._docker_args[:]
extra_args = self.get_option('docker_extra_args') or getattr(self._play_context, 'docker_extra_args', '')
if extra_args:
self._docker_args += extra_args.split(' ')
def _set_conn_data(self):
''' initialize for the connection, cannot do only in init since all data is not ready at that point '''
self._set_docker_args()
self.remote_user = self.get_option('remote_user')
if self.remote_user is None and self._play_context.remote_user is not None:
self.remote_user = self._play_context.remote_user
# timeout, use unless default and pc is different, backwards compat
self.timeout = self.get_option('container_timeout')
if self.timeout == 10 and self.timeout != self._play_context.timeout:
self.timeout = self._play_context.timeout
@property
def docker_version(self):
if not self._version:
self._set_docker_args()
self._version = self._get_docker_version()
if self._version == u'dev':
display.warning(u'Docker version number is "dev". Will assume latest version.')
if self._version != u'dev' and LooseVersion(self._version) < LooseVersion(u'1.3'):
raise AnsibleError('docker connection type requires docker 1.3 or higher')
return self._version
def _get_actual_user(self):
if self.remote_user is not None:
# An explicit user is provided
if self.docker_version == u'dev' or LooseVersion(self.docker_version) >= LooseVersion(u'1.7'):
# Support for specifying the exec user was added in docker 1.7
return self.remote_user
else:
self.remote_user = None
actual_user = self._get_docker_remote_user()
if actual_user != self.get_option('remote_user'):
display.warning(u'docker {0} does not support remote_user, using container default: {1}'
.format(self.docker_version, self.actual_user or u'?'))
return actual_user
elif self._display.verbosity > 2:
# Since we're not setting the actual_user, look it up so we have it for logging later
# Only do this if display verbosity is high enough that we'll need the value
# This saves overhead from calling into docker when we don't need to.
return self._get_docker_remote_user()
else:
return None
def _connect(self, port=None):
""" Connect to the container. Nothing to do """
super(Connection, self)._connect()
if not self._connected:
self._set_conn_data()
actual_user = self._get_actual_user()
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
actual_user or u'?'), host=self.get_option('remote_addr')
)
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=False):
""" Run a command on the docker host """
self._set_conn_data()
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self.get_option('remote_addr'))
display.debug("opening command with Popen()")
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
p = subprocess.Popen(
local_cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
display.debug("done running command with Popen()")
if self.become and self.become.expect_prompt() and sudoable:
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
become_output = b''
try:
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
events = selector.select(self.timeout)
if not events:
stdout, stderr = p.communicate()
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
for key, event in events:
if key.fileobj == p.stdout:
chunk = p.stdout.read()
elif key.fileobj == p.stderr:
chunk = p.stderr.read()
if not chunk:
stdout, stderr = p.communicate()
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
become_output += chunk
finally:
selector.close()
if not self.become.check_success(become_output):
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
display.debug("getting output with communicate()")
stdout, stderr = p.communicate(in_data)
display.debug("done communicating")
display.debug("done with docker.exec_command()")
return (p.returncode, stdout, stderr)
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if getattr(self._shell, "_IS_WINDOWS", False):
import ntpath
return ntpath.normpath(remote_path)
else:
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
""" Transfer a file from local to docker container """
self._set_conn_data()
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
out_path = self._prefix_login_path(out_path)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound(
"file or module does not exist: %s" % to_native(in_path))
out_path = shlex_quote(out_path)
# Older docker doesn't have native support for copying files into
# running containers, so we use docker exec to implement this
# Although docker version 1.8 and later provide support, the
# owner and group of the files are always set to root
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
if not os.fstat(in_file.fileno()).st_size:
count = ' count=0'
else:
count = ''
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
try:
p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
raise AnsibleError("docker connection requires dd command in the container to put files")
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
(to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
self._set_conn_data()
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
in_path = self._prefix_login_path(in_path)
# out_path is the final file path, but docker takes a directory, not a
# file path
out_dir = os.path.dirname(out_path)
args = [self.docker_cmd, "cp", "%s:%s" % (self.get_option('remote_addr'), in_path), out_dir]
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if getattr(self._shell, "_IS_WINDOWS", False):
import ntpath
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
else:
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
if p.returncode != 0:
# Older docker doesn't have native support for fetching files command `cp`
# If `cp` fails, try to use `dd` instead
args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
try:
p = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=out_file, stderr=subprocess.PIPE)
except OSError:
raise AnsibleError("docker connection requires dd command in the container to put files")
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
# Rename if needed
if actual_out_path != out_path:
os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
def close(self):
""" Terminate the connection. Nothing to do for Docker"""
super(Connection, self).close()
self._connected = False
def reset(self):
# Clear container user cache
self._container_user_cache = {}

View File

@@ -0,0 +1,338 @@
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author:
- Felix Fontein (@felixfontein)
name: docker_api
short_description: Run tasks in docker containers
version_added: 1.1.0
description:
- Run commands or put/fetch files to an existing docker container.
- Uses the L(requests library,https://pypi.org/project/requests/) to interact
directly with the Docker daemon instead of using the Docker CLI. Use the
R(community.docker.docker,ansible_collections.community.docker.docker_connection)
connection plugin if you want to use the Docker CLI.
extends_documentation_fragment:
- community.docker.docker.api_documentation
- community.docker.docker.var_names
options:
remote_user:
type: str
description:
- The user to execute as inside the container.
vars:
- name: ansible_user
- name: ansible_docker_user
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
cli:
- name: user
keyword:
- name: remote_user
remote_addr:
type: str
description:
- The name of the container you want to access.
default: inventory_hostname
vars:
- name: inventory_hostname
- name: ansible_host
- name: ansible_docker_host
container_timeout:
default: 10
description:
- Controls how long we can wait to access reading output from the container once execution started.
env:
- name: ANSIBLE_TIMEOUT
- name: ANSIBLE_DOCKER_TIMEOUT
version_added: 2.2.0
ini:
- key: timeout
section: defaults
- key: timeout
section: docker_connection
version_added: 2.2.0
vars:
- name: ansible_docker_timeout
version_added: 2.2.0
cli:
- name: timeout
type: integer
'''
import os
import os.path
from ansible.errors import AnsibleFileNotFound, AnsibleConnectionFailure
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
from ansible.utils.display import Display
from ansible_collections.community.docker.plugins.module_utils.common_api import (
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.copy import (
DockerFileCopyError,
DockerFileNotFound,
fetch_file,
put_file,
)
from ansible_collections.community.docker.plugins.plugin_utils.socket_handler import (
DockerSocketHandler,
)
from ansible_collections.community.docker.plugins.plugin_utils.common_api import (
AnsibleDockerClient,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
MIN_DOCKER_API = None
display = Display()
class Connection(ConnectionBase):
''' Local docker based connections '''
transport = 'community.docker.docker_api'
has_pipelining = True
def _call_client(self, callable, not_found_can_be_resource=False):
try:
return callable()
except NotFound as e:
if not_found_can_be_resource:
raise AnsibleConnectionFailure('Could not find container "{1}" or resource in it ({0})'.format(e, self.get_option('remote_addr')))
else:
raise AnsibleConnectionFailure('Could not find container "{1}" ({0})'.format(e, self.get_option('remote_addr')))
except APIError as e:
if e.response is not None and e.response.status_code == 409:
raise AnsibleConnectionFailure('The container "{1}" has been paused ({0})'.format(e, self.get_option('remote_addr')))
self.client.fail(
'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
)
except DockerException as e:
self.client.fail(
'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
)
except RequestException as e:
self.client.fail(
'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}'
.format(e, self.get_option('remote_addr'))
)
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self.client = None
self.ids = dict()
# Windows uses Powershell modules
if getattr(self._shell, "_IS_WINDOWS", False):
self.module_implementation_preferences = ('.ps1', '.exe', '')
self.actual_user = None
def _connect(self, port=None):
""" Connect to the container. Nothing to do """
super(Connection, self)._connect()
if not self._connected:
self.actual_user = self.get_option('remote_user')
display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
self.actual_user or u'?'), host=self.get_option('remote_addr')
)
if self.client is None:
self.client = AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
self._connected = True
if self.actual_user is None and display.verbosity > 2:
# Since we're not setting the actual_user, look it up so we have it for logging later
# Only do this if display verbosity is high enough that we'll need the value
# This saves overhead from calling into docker when we don't need to
display.vvv(u"Trying to determine actual user")
result = self._call_client(lambda: self.client.get_json('/containers/{0}/json', self.get_option('remote_addr')))
if result.get('Config'):
self.actual_user = result['Config'].get('User')
if self.actual_user is not None:
display.vvv(u"Actual user is '{0}'".format(self.actual_user))
def exec_command(self, cmd, in_data=None, sudoable=False):
""" Run a command on the docker host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
command = [self._play_context.executable, '-c', to_text(cmd)]
do_become = self.become and self.become.expect_prompt() and sudoable
display.vvv(
u"EXEC {0}{1}{2}".format(
to_text(command),
', with stdin ({0} bytes)'.format(len(in_data)) if in_data is not None else '',
', with become prompt' if do_become else '',
),
host=self.get_option('remote_addr')
)
need_stdin = True if (in_data is not None) or do_become else False
data = {
'Container': self.get_option('remote_addr'),
'User': self.get_option('remote_user') or '',
'Privileged': False,
'Tty': False,
'AttachStdin': need_stdin,
'AttachStdout': True,
'AttachStderr': True,
'Cmd': command,
}
if 'detachKeys' in self.client._general_configs:
data['detachKeys'] = self.client._general_configs['detachKeys']
exec_data = self._call_client(lambda: self.client.post_json_to_json('/containers/{0}/exec', self.get_option('remote_addr'), data=data))
exec_id = exec_data['Id']
data = {
'Tty': False,
'Detach': False
}
if need_stdin:
exec_socket = self._call_client(lambda: self.client.post_json_to_stream_socket('/exec/{0}/start', exec_id, data=data))
try:
with DockerSocketHandler(display, exec_socket, container=self.get_option('remote_addr')) as exec_socket_handler:
if do_become:
become_output = [b'']
def append_become_output(stream_id, data):
become_output[0] += data
exec_socket_handler.set_block_done_callback(append_become_output)
while not self.become.check_success(become_output[0]) and not self.become.check_password_prompt(become_output[0]):
if not exec_socket_handler.select(self.get_option('container_timeout')):
stdout, stderr = exec_socket_handler.consume()
raise AnsibleConnectionFailure('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output[0]))
if exec_socket_handler.is_eof():
raise AnsibleConnectionFailure('privilege output closed while waiting for password prompt:\n' + to_native(become_output[0]))
if not self.become.check_success(become_output[0]):
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
exec_socket_handler.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
if in_data is not None:
exec_socket_handler.write(in_data)
stdout, stderr = exec_socket_handler.consume()
finally:
exec_socket.close()
else:
stdout, stderr = self._call_client(lambda: self.client.post_json_to_stream(
'/exec/{0}/start', exec_id, stream=False, demux=True, tty=False, data=data))
result = self._call_client(lambda: self.client.get_json('/exec/{0}/json', exec_id))
return result.get('ExitCode') or 0, stdout or b'', stderr or b''
def _prefix_login_path(self, remote_path):
''' Make sure that we put files into a standard path
If a path is relative, then we need to choose where to put it.
ssh chooses $HOME but we aren't guaranteed that a home dir will
exist in any given chroot. So for now we're choosing "/" instead.
This also happens to be the former default.
Can revisit using $HOME instead if it's a problem
'''
if getattr(self._shell, "_IS_WINDOWS", False):
import ntpath
return ntpath.normpath(remote_path)
else:
if not remote_path.startswith(os.path.sep):
remote_path = os.path.join(os.path.sep, remote_path)
return os.path.normpath(remote_path)
def put_file(self, in_path, out_path):
""" Transfer a file from local to docker container """
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
out_path = self._prefix_login_path(out_path)
if self.actual_user not in self.ids:
dummy, ids, dummy = self.exec_command(b'id -u && id -g')
try:
user_id, group_id = ids.splitlines()
self.ids[self.actual_user] = int(user_id), int(group_id)
display.vvvv(
'PUT: Determined uid={0} and gid={1} for user "{2}"'.format(user_id, group_id, self.actual_user),
host=self.get_option('remote_addr')
)
except Exception as e:
raise AnsibleConnectionFailure(
'Error while determining user and group ID of current user in container "{1}": {0}\nGot value: {2!r}'
.format(e, self.get_option('remote_addr'), ids)
)
user_id, group_id = self.ids[self.actual_user]
try:
self._call_client(
lambda: put_file(
self.client,
container=self.get_option('remote_addr'),
in_path=in_path,
out_path=out_path,
user_id=user_id,
group_id=group_id,
user_name=self.actual_user,
follow_links=True,
),
not_found_can_be_resource=True,
)
except DockerFileNotFound as exc:
raise AnsibleFileNotFound(to_native(exc))
except DockerFileCopyError as exc:
raise AnsibleConnectionFailure(to_native(exc))
def fetch_file(self, in_path, out_path):
""" Fetch a file from container to local. """
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
in_path = self._prefix_login_path(in_path)
try:
self._call_client(
lambda: fetch_file(
self.client,
container=self.get_option('remote_addr'),
in_path=in_path,
out_path=out_path,
follow_links=True,
log=lambda msg: display.vvvv(msg, host=self.get_option('remote_addr')),
),
not_found_can_be_resource=True,
)
except DockerFileNotFound as exc:
raise AnsibleFileNotFound(to_native(exc))
except DockerFileCopyError as exc:
raise AnsibleConnectionFailure(to_native(exc))
def close(self):
""" Terminate the connection. Nothing to do for Docker"""
super(Connection, self).close()
self._connected = False
def reset(self):
self.ids.clear()

View File

@@ -0,0 +1,240 @@
# Copyright (c) 2021 Jeff Goldschrafe <jeff@holyhandgrenade.org>
# Based on Ansible local connection plugin by:
# Copyright (c) 2012 Michael DeHaan <michael.dehaan@gmail.com>
# Copyright (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: nsenter
short_description: execute on host running controller container
version_added: 1.9.0
description:
- This connection plugin allows Ansible, running in a privileged container, to execute tasks on the container
host instead of in the container itself.
- This is useful for running Ansible in a pull model, while still keeping the Ansible control node
containerized.
- It relies on having privileged access to run C(nsenter) in the host's PID namespace, allowing it to enter the
namespaces of the provided PID (default PID 1, or init/systemd).
author: Jeff Goldschrafe (@jgoldschrafe)
options:
nsenter_pid:
description:
- PID to attach with using nsenter.
- The default should be fine unless you are attaching as a non-root user.
type: int
default: 1
vars:
- name: ansible_nsenter_pid
env:
- name: ANSIBLE_NSENTER_PID
ini:
- section: nsenter_connection
key: nsenter_pid
notes:
- The remote user is ignored; this plugin always runs as root.
- >-
This plugin requires the Ansible controller container to be launched in the following way:
(1) The container image contains the C(nsenter) program;
(2) The container is launched in privileged mode;
(3) The container is launched in the host's PID namespace (C(--pid host)).
'''
import os
import pty
import shutil
import subprocess
import fcntl
import ansible.constants as C
from ansible.errors import AnsibleError, AnsibleFileNotFound
from ansible.module_utils.compat import selectors
from ansible.module_utils.six import binary_type, text_type
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.plugins.connection import ConnectionBase
from ansible.utils.display import Display
from ansible.utils.path import unfrackpath
display = Display()
class Connection(ConnectionBase):
'''Connections to a container host using nsenter
'''
transport = 'community.docker.nsenter'
has_pipelining = False
def __init__(self, *args, **kwargs):
super(Connection, self).__init__(*args, **kwargs)
self.cwd = None
def _connect(self):
self._nsenter_pid = self.get_option("nsenter_pid")
# Because nsenter requires very high privileges, our remote user
# is always assumed to be root.
self._play_context.remote_user = "root"
if not self._connected:
display.vvv(
u"ESTABLISH NSENTER CONNECTION FOR USER: {0}".format(
self._play_context.remote_user
),
host=self._play_context.remote_addr,
)
self._connected = True
return self
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
display.debug("in nsenter.exec_command()")
executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
raise AnsibleError("failed to find the executable specified %s."
" Please verify if the executable exists and re-try." % executable)
# Rewrite the provided command to prefix it with nsenter
nsenter_cmd_parts = [
"nsenter",
"--ipc",
"--mount",
"--net",
"--pid",
"--uts",
"--preserve-credentials",
"--target={0}".format(self._nsenter_pid),
"--",
]
if isinstance(cmd, (text_type, binary_type)):
cmd_parts = nsenter_cmd_parts + [cmd]
cmd = to_bytes(" ".join(cmd_parts))
else:
cmd_parts = nsenter_cmd_parts + cmd
cmd = [to_bytes(arg) for arg in cmd_parts]
display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
display.debug("opening command with Popen()")
master = None
stdin = subprocess.PIPE
# This plugin does not support pipelining. This diverges from the behavior of
# the core "local" connection plugin that this one derives from.
if sudoable and self.become and self.become.expect_prompt():
# Create a pty if sudoable for privlege escalation that needs it.
# Falls back to using a standard pipe if this fails, which may
# cause the command to fail in certain situations where we are escalating
# privileges or the command otherwise needs a pty.
try:
master, stdin = pty.openpty()
except (IOError, OSError) as e:
display.debug("Unable to open pty: %s" % to_native(e))
p = subprocess.Popen(
cmd,
shell=isinstance(cmd, (text_type, binary_type)),
executable=executable if isinstance(cmd, (text_type, binary_type)) else None,
cwd=self.cwd,
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# if we created a master, we can close the other half of the pty now, otherwise master is stdin
if master is not None:
os.close(stdin)
display.debug("done running command with Popen()")
if self.become and self.become.expect_prompt() and sudoable:
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
selector = selectors.DefaultSelector()
selector.register(p.stdout, selectors.EVENT_READ)
selector.register(p.stderr, selectors.EVENT_READ)
become_output = b''
try:
while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
events = selector.select(self._play_context.timeout)
if not events:
stdout, stderr = p.communicate()
raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
for key, event in events:
if key.fileobj == p.stdout:
chunk = p.stdout.read()
elif key.fileobj == p.stderr:
chunk = p.stderr.read()
if not chunk:
stdout, stderr = p.communicate()
raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
become_output += chunk
finally:
selector.close()
if not self.become.check_success(become_output):
become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
if master is None:
p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
else:
os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
display.debug("getting output with communicate()")
stdout, stderr = p.communicate(in_data)
display.debug("done communicating")
# finally, close the other half of the pty, if it was created
if master:
os.close(master)
display.debug("done with nsenter.exec_command()")
return (p.returncode, stdout, stderr)
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
in_path = unfrackpath(in_path, basedir=self.cwd)
out_path = unfrackpath(out_path, basedir=self.cwd)
display.vvv(u"PUT {0} to {1}".format(in_path, out_path), host=self._play_context.remote_addr)
try:
with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
in_data = in_file.read()
rc, out, err = self.exec_command(cmd=["tee", out_path], in_data=in_data)
if rc != 0:
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, err))
except IOError as e:
raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, to_native(e)))
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
in_path = unfrackpath(in_path, basedir=self.cwd)
out_path = unfrackpath(out_path, basedir=self.cwd)
try:
rc, out, err = self.exec_command(cmd=["cat", in_path])
display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
if rc != 0:
raise AnsibleError("failed to transfer file to {0}: {1}".format(in_path, err))
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as out_file:
out_file.write(out)
except IOError as e:
raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
def close(self):
''' terminate the connection; nothing to do here '''
self._connected = False

View File

@@ -0,0 +1,96 @@
# -*- coding: utf-8 -*-
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Standard documentation fragment
DOCUMENTATION = r'''
options: {}
attributes:
check_mode:
description: Can run in C(check_mode) and return changed status prediction without modifying target.
diff_mode:
description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
'''
# Should be used together with the standard fragment
INFO_MODULE = r'''
options: {}
attributes:
check_mode:
support: full
details:
- This action does not modify state.
diff_mode:
support: N/A
details:
- This action does not modify state.
'''
ACTIONGROUP_DOCKER = r'''
options: {}
attributes:
action_group:
description: Use C(group/docker) or C(group/community.docker.docker) in C(module_defaults) to set defaults for this module.
support: full
membership:
- community.docker.docker
- docker
'''
CONN = r'''
options: {}
attributes:
become:
description: Is usable alongside C(become) keywords.
connection:
description: Uses the target's configured connection information to execute code on it.
delegation:
description: Can be used in conjunction with C(delegate_to) and related keywords.
'''
FACTS = r'''
options: {}
attributes:
facts:
description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
'''
# Should be used together with the standard fragment and the FACTS fragment
FACTS_MODULE = r'''
options: {}
attributes:
check_mode:
support: full
details:
- This action does not modify state.
diff_mode:
support: N/A
details:
- This action does not modify state.
facts:
support: full
'''
FILES = r'''
options: {}
attributes:
safe_file_operations:
description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
'''
FLOW = r'''
options: {}
attributes:
action:
description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
async:
description: Supports being used with the C(async) keyword.
'''

View File

@@ -0,0 +1,297 @@
# -*- coding: utf-8 -*-
# Copyright (c) Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class ModuleDocFragment(object):
# Docker doc fragment
DOCUMENTATION = r'''
options:
docker_host:
description:
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
the module will automatically replace C(tcp) in the connection URL with C(https).
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
instead. If the environment variable is not set, the default value will be used.
type: str
default: unix://var/run/docker.sock
aliases: [ docker_url ]
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
be used instead. If the environment variable is not set, the default value will be used.
- Note that this option had a default value C(localhost) in older versions. It was removed in community.docker 3.0.0.
type: str
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
used instead. If the environment variable is not set, the default value will be used.
type: str
default: auto
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
ca_cert:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_ca_cert, cacert_path ]
client_cert:
description:
- Path to the client's TLS certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_cert, cert_path ]
client_key:
description:
- Path to the client's TLS key file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_key, key_path ]
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by ssl.py module.
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
used instead.
type: str
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server. Note that if I(validate_certs) is set to C(true) as well, it will take precedence.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
instead. If the environment variable is not set, the default value will be used.
type: bool
default: false
use_ssh_client:
description:
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
- Requires Docker SDK for Python 4.4.0 or newer.
type: bool
default: false
version_added: 1.5.0
validate_certs:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
used instead. If the environment variable is not set, the default value will be used.
type: bool
default: false
aliases: [ tls_verify ]
debug:
description:
- Debug mode
type: bool
default: false
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
U(https://docs.docker.com/machine/reference/env/) for more details.
- When connecting to Docker daemon with TLS, you might need to install additional Python packages.
For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
and use C($DOCKER_CONFIG/config.json) otherwise.
'''
# For plugins: allow to define common options with Ansible variables
VAR_NAMES = r'''
options:
docker_host:
vars:
- name: ansible_docker_docker_host
tls_hostname:
vars:
- name: ansible_docker_tls_hostname
api_version:
vars:
- name: ansible_docker_api_version
timeout:
vars:
- name: ansible_docker_timeout
ca_cert:
vars:
- name: ansible_docker_ca_cert
client_cert:
vars:
- name: ansible_docker_client_cert
client_key:
vars:
- name: ansible_docker_client_key
ssl_version:
vars:
- name: ansible_docker_ssl_version
tls:
vars:
- name: ansible_docker_tls
validate_certs:
vars:
- name: ansible_docker_validate_certs
'''
# Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
DOCKER_PY_1_DOCUMENTATION = r'''
options: {}
notes:
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
communicate with the Docker daemon.
requirements:
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details). Note that both
modules should *not* be installed at the same time. Also note that when both modules are
installed and one of them is uninstalled, the other might no longer function and a reinstall
of it is required."
'''
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
# Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
DOCKER_PY_2_DOCUMENTATION = r'''
options: {}
notes:
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
communicate with the Docker daemon.
requirements:
- "Python >= 2.7"
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
This module does *not* work with docker-py."
'''
# Docker doc fragment when using the vendored API access code
API_DOCUMENTATION = r'''
options:
docker_host:
description:
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
the module will automatically replace C(tcp) in the connection URL with C(https).
- If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
instead. If the environment variable is not set, the default value will be used.
type: str
default: unix://var/run/docker.sock
aliases: [ docker_url ]
tls_hostname:
description:
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
be used instead. If the environment variable is not set, the default value will be used.
- Note that this option had a default value C(localhost) in older versions. It was removed in community.docker 3.0.0.
type: str
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by this collection and the docker daemon.
- If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
used instead. If the environment variable is not set, the default value will be used.
type: str
default: auto
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
ca_cert:
description:
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_ca_cert, cacert_path ]
client_cert:
description:
- Path to the client's TLS certificate file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_cert, cert_path ]
client_key:
description:
- Path to the client's TLS key file.
- If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
type: path
aliases: [ tls_client_key, key_path ]
ssl_version:
description:
- Provide a valid SSL version number. Default value determined by ssl.py module.
- If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
used instead.
type: str
tls:
description:
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
server. Note that if I(validate_certs) is set to C(true) as well, it will take precedence.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
instead. If the environment variable is not set, the default value will be used.
type: bool
default: false
use_ssh_client:
description:
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
type: bool
default: false
version_added: 1.5.0
validate_certs:
description:
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
used instead. If the environment variable is not set, the default value will be used.
type: bool
default: false
aliases: [ tls_verify ]
debug:
description:
- Debug mode
type: bool
default: false
notes:
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
with the product that sets up the environment. It will set these variables for you. See
U(https://docs.docker.com/machine/reference/env/) for more details.
# - When connecting to Docker daemon with TLS, you might need to install additional Python packages.
# For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
# - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
# In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
# and use C($DOCKER_CONFIG/config.json) otherwise.
- This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
communicate with the Docker daemon. It uses code derived from the Docker SDK or Python that is included in this
collection.
requirements:
- requests
- pywin32 (when using named pipes on Windows 32)
- paramiko (when using SSH with I(use_ssh_client=false))
- pyOpenSSL (when using TLS)
- backports.ssl_match_hostname (when using TLS on Python 2)
'''

View File

@@ -0,0 +1,351 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
# For the parts taken from the docker inventory script:
# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: docker_containers
short_description: Ansible dynamic inventory plugin for Docker containers
version_added: 1.1.0
author:
- Felix Fontein (@felixfontein)
extends_documentation_fragment:
- ansible.builtin.constructed
- community.docker.docker.api_documentation
description:
- Reads inventories from the Docker API.
- Uses a YAML configuration file that ends with C(docker.[yml|yaml]).
options:
plugin:
description:
- The name of this plugin, it should always be set to C(community.docker.docker_containers)
for this plugin to recognize it as it's own.
type: str
required: true
choices: [ community.docker.docker_containers ]
connection_type:
description:
- Which connection type to use the containers.
- One way to connect to containers is to use SSH (C(ssh)). For this, the options I(default_ip) and
I(private_ssh_port) are used. This requires that a SSH daemon is running inside the containers.
- Alternatively, C(docker-cli) selects the
R(docker connection plugin,ansible_collections.community.docker.docker_connection),
and C(docker-api) (default) selects the
R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection).
- When C(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin
to the connection plugin. This can be controlled with I(configure_docker_daemon).
type: str
default: docker-api
choices:
- ssh
- docker-cli
- docker-api
configure_docker_daemon:
description:
- Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
- Only used when I(connection_type=docker-api).
type: bool
default: true
version_added: 1.8.0
verbose_output:
description:
- Toggle to (not) include all available inspection metadata.
- Note that all top-level keys will be transformed to the format C(docker_xxx).
For example, C(HostConfig) is converted to C(docker_hostconfig).
- If this is C(false), these values can only be used during I(constructed), I(groups), and I(keyed_groups).
- The C(docker) inventory script always added these variables, so for compatibility set this to C(true).
type: bool
default: false
default_ip:
description:
- The IP address to assign to ansible_host when the container's SSH port is mapped to interface
'0.0.0.0'.
- Only used if I(connection_type) is C(ssh).
type: str
default: 127.0.0.1
private_ssh_port:
description:
- The port containers use for SSH.
- Only used if I(connection_type) is C(ssh).
type: int
default: 22
add_legacy_groups:
description:
- "Add the same groups as the C(docker) inventory script does. These are the following:"
- "C(<container id>): contains the container of this ID."
- "C(<container name>): contains the container that has this name."
- "C(<container short id>): contains the containers that have this short ID (first 13 letters of ID)."
- "C(image_<image name>): contains the containers that have the image C(<image name>)."
- "C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>)."
- "C(service_<service name>): contains the containers that belong to the service C(<service name>)"
- "C(<docker_host>): contains the containers which belong to the Docker daemon I(docker_host).
Useful if you run this plugin against multiple Docker daemons."
- "C(running): contains all containers that are running."
- "C(stopped): contains all containers that are not running."
- If this is not set to C(true), you should use keyed groups to add the containers to groups.
See the examples for how to do that.
type: bool
default: false
'''
EXAMPLES = '''
# Minimal example using local Docker daemon
plugin: community.docker.docker_containers
docker_host: unix://var/run/docker.sock
# Minimal example using remote Docker daemon
plugin: community.docker.docker_containers
docker_host: tcp://my-docker-host:2375
# Example using remote Docker daemon with unverified TLS
plugin: community.docker.docker_containers
docker_host: tcp://my-docker-host:2376
tls: true
# Example using remote Docker daemon with verified TLS and client certificate verification
plugin: community.docker.docker_containers
docker_host: tcp://my-docker-host:2376
validate_certs: true
ca_cert: /somewhere/ca.pem
client_key: /somewhere/key.pem
client_cert: /somewhere/cert.pem
# Example using constructed features to create groups
plugin: community.docker.docker_containers
docker_host: tcp://my-docker-host:2375
strict: false
keyed_groups:
# Add containers with primary network foo to a network_foo group
- prefix: network
key: 'docker_hostconfig.NetworkMode'
# Add Linux hosts to an os_linux group
- prefix: os
key: docker_platform
# Example using SSH connection with an explicit fallback for when port 22 has not been
# exported: use container name as ansible_ssh_host and 22 as ansible_ssh_port
plugin: community.docker.docker_containers
connection_type: ssh
compose:
ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true)
ansible_ssh_port: ansible_ssh_port | default(22, true)
'''
import re
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible_collections.community.docker.plugins.module_utils.common_api import (
RequestException,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DOCKER_COMMON_ARGS_VARS,
)
from ansible_collections.community.docker.plugins.plugin_utils.common_api import (
AnsibleDockerClient,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException
MIN_DOCKER_API = None
class InventoryModule(BaseInventoryPlugin, Constructable):
''' Host inventory parser for ansible using Docker daemon as source. '''
NAME = 'community.docker.docker_containers'
def _slugify(self, value):
return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
def _populate(self, client):
strict = self.get_option('strict')
ssh_port = self.get_option('private_ssh_port')
default_ip = self.get_option('default_ip')
hostname = self.get_option('docker_host')
verbose_output = self.get_option('verbose_output')
connection_type = self.get_option('connection_type')
add_legacy_groups = self.get_option('add_legacy_groups')
try:
params = {
'limit': -1,
'all': 1,
'size': 0,
'trunc_cmd': 0,
'since': None,
'before': None,
}
containers = client.get_json('/containers/json', params=params)
except APIError as exc:
raise AnsibleError("Error listing containers: %s" % to_native(exc))
if add_legacy_groups:
self.inventory.add_group('running')
self.inventory.add_group('stopped')
extra_facts = {}
if self.get_option('configure_docker_daemon'):
for option_name, var_name in DOCKER_COMMON_ARGS_VARS.items():
value = self.get_option(option_name)
if value is not None:
extra_facts[var_name] = value
for container in containers:
id = container.get('Id')
short_id = id[:13]
try:
name = container.get('Names', list())[0].lstrip('/')
full_name = name
except IndexError:
name = short_id
full_name = id
self.inventory.add_host(name)
facts = dict(
docker_name=name,
docker_short_id=short_id
)
full_facts = dict()
try:
inspect = client.get_json('/containers/{0}/json', id)
except APIError as exc:
raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc)))
state = inspect.get('State') or dict()
config = inspect.get('Config') or dict()
labels = config.get('Labels') or dict()
running = state.get('Running')
# Add container to groups
image_name = config.get('Image')
if image_name and add_legacy_groups:
self.inventory.add_group('image_{0}'.format(image_name))
self.inventory.add_host(name, group='image_{0}'.format(image_name))
stack_name = labels.get('com.docker.stack.namespace')
if stack_name:
full_facts['docker_stack'] = stack_name
if add_legacy_groups:
self.inventory.add_group('stack_{0}'.format(stack_name))
self.inventory.add_host(name, group='stack_{0}'.format(stack_name))
service_name = labels.get('com.docker.swarm.service.name')
if service_name:
full_facts['docker_service'] = service_name
if add_legacy_groups:
self.inventory.add_group('service_{0}'.format(service_name))
self.inventory.add_host(name, group='service_{0}'.format(service_name))
if connection_type == 'ssh':
# Figure out ssh IP and Port
try:
# Lookup the public facing port Nat'ed to ssh port.
network_settings = inspect.get('NetworkSettings') or {}
port_settings = network_settings.get('Ports') or {}
port = port_settings.get('%d/tcp' % (ssh_port, ))[0]
except (IndexError, AttributeError, TypeError):
port = dict()
try:
ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
except KeyError:
ip = ''
facts.update(dict(
ansible_ssh_host=ip,
ansible_ssh_port=port.get('HostPort', 0),
))
elif connection_type == 'docker-cli':
facts.update(dict(
ansible_host=full_name,
ansible_connection='community.docker.docker',
))
elif connection_type == 'docker-api':
facts.update(dict(
ansible_host=full_name,
ansible_connection='community.docker.docker_api',
))
facts.update(extra_facts)
full_facts.update(facts)
for key, value in inspect.items():
fact_key = self._slugify(key)
full_facts[fact_key] = value
if verbose_output:
facts.update(full_facts)
for key, value in facts.items():
self.inventory.set_variable(name, key, value)
# Use constructed if applicable
# Composed variables
self._set_composite_vars(self.get_option('compose'), full_facts, name, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), full_facts, name, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), full_facts, name, strict=strict)
# We need to do this last since we also add a group called `name`.
# When we do this before a set_variable() call, the variables are assigned
# to the group, and not to the host.
if add_legacy_groups:
self.inventory.add_group(id)
self.inventory.add_host(name, group=id)
self.inventory.add_group(name)
self.inventory.add_host(name, group=name)
self.inventory.add_group(short_id)
self.inventory.add_host(name, group=short_id)
self.inventory.add_group(hostname)
self.inventory.add_host(name, group=hostname)
if running is True:
self.inventory.add_host(name, group='running')
else:
self.inventory.add_host(name, group='stopped')
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""
return (
super(InventoryModule, self).verify_file(path) and
path.endswith(('docker.yaml', 'docker.yml')))
def _create_client(self):
return AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path)
client = self._create_client()
try:
self._populate(client)
except DockerException as e:
raise AnsibleError(
'An unexpected Docker error occurred: {0}'.format(e)
)
except RequestException as e:
raise AnsibleError(
'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(e)
)

View File

@@ -0,0 +1,275 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: docker_machine
author: Ximon Eighteen (@ximon18)
short_description: Docker Machine inventory source
requirements:
- L(Docker Machine,https://docs.docker.com/machine/)
extends_documentation_fragment:
- constructed
description:
- Get inventory hosts from Docker Machine.
- Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
- The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
options:
plugin:
description: token that ensures this is a source file for the C(docker_machine) plugin.
required: true
choices: ['docker_machine', 'community.docker.docker_machine']
daemon_env:
description:
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
- With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
A warning will be issued for any skipped host if the choice is C(require).
- With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
- With C(skip), do not attempt to fetch the docker daemon connection environment variables.
- If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
type: str
choices:
- require
- require-silently
- optional
- optional-silently
- skip
default: require
running_required:
description:
- When C(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
type: bool
default: true
verbose_output:
description:
- When C(true), include all available nodes metadata (for exmaple C(Image), C(Region), C(Size)) as a JSON object
named C(docker_machine_node_attributes).
type: bool
default: true
'''
EXAMPLES = '''
# Minimal example
plugin: community.docker.docker_machine
# Example using constructed features to create a group per Docker Machine driver
# (https://docs.docker.com/machine/drivers/), for example:
# $ docker-machine create --driver digitalocean ... mymachine
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
# {
# ...
# "digitalocean": {
# "hosts": [
# "mymachine"
# ]
# ...
# }
strict: false
keyed_groups:
- separator: ''
key: docker_machine_node_attributes.DriverName
# Example grouping hosts by Digital Machine tag
strict: false
keyed_groups:
- prefix: tag
key: 'dm_tags'
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
compose:
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
'''
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible.module_utils.common.text.converters import to_text
from ansible.module_utils.common.process import get_bin_path
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
from ansible.utils.display import Display
import json
import re
import subprocess
display = Display()
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
''' Host inventory parser for ansible using Docker machine as source. '''
NAME = 'community.docker.docker_machine'
DOCKER_MACHINE_PATH = None
def _run_command(self, args):
if not self.DOCKER_MACHINE_PATH:
try:
self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
except ValueError as e:
raise AnsibleError(to_native(e))
command = [self.DOCKER_MACHINE_PATH]
command.extend(args)
display.debug('Executing command {0}'.format(command))
try:
result = subprocess.check_output(command)
except subprocess.CalledProcessError as e:
display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
raise e
return to_text(result).strip()
def _get_docker_daemon_variables(self, machine_name):
'''
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
'''
try:
env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
except subprocess.CalledProcessError:
# This can happen when the machine is created but provisioning is incomplete
return []
# example output of docker-machine env --shell=sh:
# export DOCKER_TLS_VERIFY="1"
# export DOCKER_HOST="tcp://134.209.204.160:2376"
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
# export DOCKER_MACHINE_NAME="routinator"
# # Run this command to configure your shell:
# # eval $(docker-machine env --shell=bash routinator)
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
# with the same name and value but with a dm_ name prefix.
vars = []
for line in env_lines:
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
if match:
env_var_name = match.group(1)
env_var_value = match.group(2)
vars.append((env_var_name, env_var_value))
return vars
def _get_machine_names(self):
# Filter out machines that are not in the Running state as we probably can't do anything useful actions
# with them.
ls_command = ['ls', '-q']
if self.get_option('running_required'):
ls_command.extend(['--filter', 'state=Running'])
try:
ls_lines = self._run_command(ls_command)
except subprocess.CalledProcessError:
return []
return ls_lines.splitlines()
def _inspect_docker_machine_host(self, node):
try:
inspect_lines = self._run_command(['inspect', self.node])
except subprocess.CalledProcessError:
return None
return json.loads(inspect_lines)
def _ip_addr_docker_machine_host(self, node):
try:
ip_addr = self._run_command(['ip', self.node])
except subprocess.CalledProcessError:
return None
return ip_addr
def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
if not env_var_tuples:
warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
if daemon_env in ('require', 'require-silently'):
if daemon_env == 'require':
display.warning('{0}: host will be skipped'.format(warning_prefix))
return True
else: # 'optional', 'optional-silently'
if daemon_env == 'optional':
display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
return False
def _populate(self):
daemon_env = self.get_option('daemon_env')
try:
for self.node in self._get_machine_names():
self.node_attrs = self._inspect_docker_machine_host(self.node)
if not self.node_attrs:
continue
machine_name = self.node_attrs['Driver']['MachineName']
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
# that could be used to set environment variables to influence a local Docker client:
if daemon_env == 'skip':
env_var_tuples = []
else:
env_var_tuples = self._get_docker_daemon_variables(machine_name)
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
continue
# add an entry in the inventory for this host
self.inventory.add_host(machine_name)
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
# this works around an issue seen with Google Compute Platform where the IP address was not available
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
if self.node_attrs['Driver']['IPAddress']:
ip_addr = self.node_attrs['Driver']['IPAddress']
else:
ip_addr = self._ip_addr_docker_machine_host(self.node)
# set standard Ansible remote host connection settings to details captured from `docker-machine`
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
# set variables based on Docker Machine tags
tags = self.node_attrs['Driver'].get('Tags') or ''
self.inventory.set_variable(machine_name, 'dm_tags', tags)
# set variables based on Docker Machine env variables
for kv in env_var_tuples:
self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
if self.get_option('verbose_output'):
self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
except Exception as e:
raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
to_native(e), orig_exc=e)
def verify_file(self, path):
"""Return the possibility of a file being consumable by this plugin."""
return (
super(InventoryModule, self).verify_file(path) and
path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
def parse(self, inventory, loader, path, cache=True):
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path)
self._populate()

View File

@@ -0,0 +1,264 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: docker_swarm
author:
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
requirements:
- python >= 2.7
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
extends_documentation_fragment:
- constructed
description:
- Reads inventories from the Docker swarm API.
- Uses a YAML configuration file docker_swarm.[yml|yaml].
- "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
I(managers) - all manager nodes; I(leader) - the swarm leader node;
I(nonleaders) - all nodes except the swarm leader."
options:
plugin:
description: The name of this plugin, it should always be set to C(community.docker.docker_swarm)
for this plugin to recognize it as it's own.
type: str
required: true
choices: [ docker_swarm, community.docker.docker_swarm ]
docker_host:
description:
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
- "Use C(unix://var/run/docker.sock) to connect via local socket."
type: str
required: true
aliases: [ docker_url ]
verbose_output:
description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS),
C(EngineVersion))
type: bool
default: true
tls:
description: Connect using TLS without verifying the authenticity of the Docker host server.
type: bool
default: false
validate_certs:
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
host server.
type: bool
default: false
aliases: [ tls_verify ]
client_key:
description: Path to the client's TLS key file.
type: path
aliases: [ tls_client_key, key_path ]
ca_cert:
description: Use a CA certificate when performing server verification by providing the path to a CA
certificate file.
type: path
aliases: [ tls_ca_cert, cacert_path ]
client_cert:
description: Path to the client's TLS certificate file.
type: path
aliases: [ tls_client_cert, cert_path ]
tls_hostname:
description: When verifying the authenticity of the Docker host server, provide the expected name of
the server.
type: str
ssl_version:
description: Provide a valid SSL version number. Default value determined by ssl.py module.
type: str
api_version:
description:
- The version of the Docker API running on the Docker Host.
- Defaults to the latest version of the API supported by Docker SDK for Python.
type: str
aliases: [ docker_api_version ]
timeout:
description:
- The maximum amount of time in seconds to wait on a response from the API.
- If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
will be used instead. If the environment variable is not set, the default value will be used.
type: int
default: 60
aliases: [ time_out ]
use_ssh_client:
description:
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
- Requires Docker SDK for Python 4.4.0 or newer.
type: bool
default: false
version_added: 1.5.0
include_host_uri:
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
The port always defaults to C(2376).
type: bool
default: false
include_host_uri_port:
description: Override the detected port number included in I(ansible_host_uri)
type: int
'''
EXAMPLES = '''
# Minimal example using local docker
plugin: community.docker.docker_swarm
docker_host: unix://var/run/docker.sock
# Minimal example using remote docker
plugin: community.docker.docker_swarm
docker_host: tcp://my-docker-host:2375
# Example using remote docker with unverified TLS
plugin: community.docker.docker_swarm
docker_host: tcp://my-docker-host:2376
tls: true
# Example using remote docker with verified TLS and client certificate verification
plugin: community.docker.docker_swarm
docker_host: tcp://my-docker-host:2376
validate_certs: true
ca_cert: /somewhere/ca.pem
client_key: /somewhere/key.pem
client_cert: /somewhere/cert.pem
# Example using constructed features to create groups and set ansible_host
plugin: community.docker.docker_swarm
docker_host: tcp://my-docker-host:2375
strict: False
keyed_groups:
# add for example x86_64 hosts to an arch_x86_64 group
- prefix: arch
key: 'Description.Platform.Architecture'
# add for example linux hosts to an os_linux group
- prefix: os
key: 'Description.Platform.OS'
# create a group per node label
# for exomple a node labeled w/ "production" ends up in group "label_production"
# hint: labels containing special characters will be converted to safe names
- key: 'Spec.Labels'
prefix: label
'''
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.common import get_connect_params
from ansible_collections.community.docker.plugins.module_utils.util import update_tls_hostname
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
from ansible.parsing.utils.addresses import parse_address
try:
import docker
HAS_DOCKER = True
except ImportError:
HAS_DOCKER = False
class InventoryModule(BaseInventoryPlugin, Constructable):
''' Host inventory parser for ansible using Docker swarm as source. '''
NAME = 'community.docker.docker_swarm'
def _fail(self, msg):
raise AnsibleError(msg)
def _populate(self):
raw_params = dict(
docker_host=self.get_option('docker_host'),
tls=self.get_option('tls'),
tls_verify=self.get_option('validate_certs'),
key_path=self.get_option('client_key'),
cacert_path=self.get_option('ca_cert'),
cert_path=self.get_option('client_cert'),
tls_hostname=self.get_option('tls_hostname'),
api_version=self.get_option('api_version'),
timeout=self.get_option('timeout'),
ssl_version=self.get_option('ssl_version'),
use_ssh_client=self.get_option('use_ssh_client'),
debug=None,
)
update_tls_hostname(raw_params)
connect_params = get_connect_params(raw_params, fail_function=self._fail)
self.client = docker.DockerClient(**connect_params)
self.inventory.add_group('all')
self.inventory.add_group('manager')
self.inventory.add_group('worker')
self.inventory.add_group('leader')
self.inventory.add_group('nonleaders')
if self.get_option('include_host_uri'):
if self.get_option('include_host_uri_port'):
host_uri_port = str(self.get_option('include_host_uri_port'))
elif self.get_option('tls') or self.get_option('validate_certs'):
host_uri_port = '2376'
else:
host_uri_port = '2375'
try:
self.nodes = self.client.nodes.list()
for self.node in self.nodes:
self.node_attrs = self.client.nodes.get(self.node.id).attrs
self.inventory.add_host(self.node_attrs['ID'])
self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
self.node_attrs['Status']['Addr'])
if self.get_option('include_host_uri'):
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
if self.get_option('verbose_output'):
self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
if 'ManagerStatus' in self.node_attrs:
if self.node_attrs['ManagerStatus'].get('Leader'):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
self.node_attrs['Status']['Addr']
if self.get_option('include_host_uri'):
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
'tcp://' + swarm_leader_ip + ':' + host_uri_port)
self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
self.inventory.add_host(self.node_attrs['ID'], group='leader')
else:
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
else:
self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
# Use constructed if applicable
strict = self.get_option('strict')
# Composed variables
self._set_composite_vars(self.get_option('compose'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
self._add_host_to_composed_groups(self.get_option('groups'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
# Create groups based on variable values and add the corresponding hosts to it
self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
self.node_attrs,
self.node_attrs['ID'],
strict=strict)
except Exception as e:
raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
to_native(e))
def verify_file(self, path):
"""Return the possibly of a file being consumable by this plugin."""
return (
super(InventoryModule, self).verify_file(path) and
path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
def parse(self, inventory, loader, path, cache=True):
if not HAS_DOCKER:
raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
'https://github.com/docker/docker-py.')
super(InventoryModule, self).parse(inventory, loader, path, cache)
self._read_config_data(path)
self._populate()

View File

@@ -0,0 +1,97 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import traceback
from ansible.module_utils.six import PY2
REQUESTS_IMPORT_ERROR = None
URLLIB3_IMPORT_ERROR = None
BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = None
try:
from requests import Session
from requests.adapters import HTTPAdapter
from requests.exceptions import HTTPError, InvalidSchema
except ImportError:
REQUESTS_IMPORT_ERROR = traceback.format_exc()
class Session(object):
__attrs__ = []
class HTTPAdapter(object):
__attrs__ = []
class HTTPError(Exception):
pass
class InvalidSchema(Exception):
pass
try:
from requests.packages import urllib3
except ImportError:
try:
import urllib3
except ImportError:
URLLIB3_IMPORT_ERROR = traceback.format_exc()
class _HTTPConnectionPool(object):
pass
class FakeURLLIB3(object):
def __init__(self):
self._collections = self
self.poolmanager = self
self.connection = self
self.connectionpool = self
self.RecentlyUsedContainer = object()
self.PoolManager = object()
self.match_hostname = object()
self.HTTPConnectionPool = _HTTPConnectionPool
urllib3 = FakeURLLIB3()
# Monkey-patching match_hostname with a version that supports
# IP-address checking. Not necessary for Python 3.5 and above
if PY2:
try:
from backports.ssl_match_hostname import match_hostname
urllib3.connection.match_hostname = match_hostname
except ImportError:
BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = traceback.format_exc()
def fail_on_missing_imports():
if REQUESTS_IMPORT_ERROR is not None:
from .errors import MissingRequirementException
raise MissingRequirementException(
'You have to install requests',
'requests', REQUESTS_IMPORT_ERROR)
if URLLIB3_IMPORT_ERROR is not None:
from .errors import MissingRequirementException
raise MissingRequirementException(
'You have to install urllib3',
'urllib3', URLLIB3_IMPORT_ERROR)
if BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR is not None:
from .errors import MissingRequirementException
raise MissingRequirementException(
'You have to install backports.ssl-match-hostname',
'backports.ssl-match-hostname', BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR)

View File

@@ -0,0 +1,599 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import logging
import struct
from functools import partial
from ansible.module_utils.six import PY3, binary_type, iteritems, string_types, raise_from
from ansible.module_utils.six.moves.urllib.parse import quote
from .. import auth
from .._import_helper import fail_on_missing_imports
from .._import_helper import HTTPError as _HTTPError
from .._import_helper import InvalidSchema as _InvalidSchema
from .._import_helper import Session as _Session
from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES,
DEFAULT_DATA_CHUNK_SIZE)
from ..errors import (DockerException, InvalidVersion, TLSParameterError, MissingRequirementException,
create_api_error_from_http_exception)
from ..tls import TLSConfig
from ..transport.npipeconn import NpipeHTTPAdapter
from ..transport.npipesocket import PYWIN32_IMPORT_ERROR
from ..transport.unixconn import UnixHTTPAdapter
from ..transport.sshconn import SSHHTTPAdapter, PARAMIKO_IMPORT_ERROR
from ..transport.ssladapter import SSLHTTPAdapter
from ..utils import config, utils, json_stream
from ..utils.decorators import check_resource, update_headers
from ..utils.proxy import ProxyConfig
from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
from .daemon import DaemonApiMixin
log = logging.getLogger(__name__)
class APIClient(
_Session,
DaemonApiMixin):
"""
A low-level client for the Docker Engine API.
Example:
>>> import docker
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
>>> client.version()
{u'ApiVersion': u'1.33',
u'Arch': u'amd64',
u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
u'GitCommit': u'f4ffd2511c',
u'GoVersion': u'go1.9.2',
u'KernelVersion': u'4.14.3-1-ARCH',
u'MinAPIVersion': u'1.12',
u'Os': u'linux',
u'Version': u'17.10.0-ce'}
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
__attrs__ = _Session.__attrs__ + ['_auth_configs',
'_general_configs',
'_version',
'base_url',
'timeout']
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
user_agent=DEFAULT_USER_AGENT, num_pools=None,
credstore_env=None, use_ssh_client=False,
max_pool_size=DEFAULT_MAX_POOL_SIZE):
super(APIClient, self).__init__()
fail_on_missing_imports()
if tls and not base_url:
raise TLSParameterError(
'If using TLS, the base_url argument must be provided.'
)
self.base_url = base_url
self.timeout = timeout
self.headers['User-Agent'] = user_agent
self._general_configs = config.load_general_config()
proxy_config = self._general_configs.get('proxies', {})
try:
proxies = proxy_config[base_url]
except KeyError:
proxies = proxy_config.get('default', {})
self._proxy_configs = ProxyConfig.from_dict(proxies)
self._auth_configs = auth.load_config(
config_dict=self._general_configs, credstore_env=credstore_env,
)
self.credstore_env = credstore_env
base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
)
# SSH has a different default for num_pools to all other adapters
num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
if base_url.startswith('http+unix://'):
self._custom_adapter = UnixHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size
)
self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://')
# host part of URL should be unused, but is resolved by requests
# module in proxy_bypass_macosx_sysconf()
self.base_url = 'http+docker://localhost'
elif base_url.startswith('npipe://'):
if not IS_WINDOWS_PLATFORM:
raise DockerException(
'The npipe:// protocol is only supported on Windows'
)
if PYWIN32_IMPORT_ERROR is not None:
raise MissingRequirementException(
'Install pypiwin32 package to enable npipe:// support',
'pywin32',
PYWIN32_IMPORT_ERROR)
self._custom_adapter = NpipeHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size
)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localnpipe'
elif base_url.startswith('ssh://'):
if PARAMIKO_IMPORT_ERROR is not None and not use_ssh_client:
raise MissingRequirementException(
'Install paramiko package to enable ssh:// support',
'paramiko',
PARAMIKO_IMPORT_ERROR)
self._custom_adapter = SSHHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size, shell_out=use_ssh_client
)
self.mount('http+docker://ssh', self._custom_adapter)
self._unmount('http://', 'https://')
self.base_url = 'http+docker://ssh'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
self._custom_adapter = SSLHTTPAdapter(
pool_connections=num_pools)
self.mount('https://', self._custom_adapter)
self.base_url = base_url
# version detection needs to be after unix adapter mounting
if version is None or (isinstance(version, string_types) and version.lower() == 'auto'):
self._version = self._retrieve_server_version()
else:
self._version = version
if not isinstance(self._version, string_types):
raise DockerException(
'Version parameter must be a string or None. Found {0}'.format(
type(version).__name__
)
)
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
raise InvalidVersion(
'API versions below {0} are no longer supported by this '
'library.'.format(MINIMUM_DOCKER_API_VERSION)
)
def _retrieve_server_version(self):
try:
return self.version(api_version=False)["ApiVersion"]
except KeyError:
raise DockerException(
'Invalid response from docker daemon: key "ApiVersion"'
' is missing.'
)
except Exception as e:
raise DockerException(
'Error while fetching server API version: {0}'.format(e)
)
def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout
parameter, if not already present."""
kwargs.setdefault('timeout', self.timeout)
return kwargs
@update_headers
def _post(self, url, **kwargs):
return self.post(url, **self._set_request_timeout(kwargs))
@update_headers
def _get(self, url, **kwargs):
return self.get(url, **self._set_request_timeout(kwargs))
@update_headers
def _head(self, url, **kwargs):
return self.head(url, **self._set_request_timeout(kwargs))
@update_headers
def _put(self, url, **kwargs):
return self.put(url, **self._set_request_timeout(kwargs))
@update_headers
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
if not isinstance(arg, string_types):
raise ValueError(
'Expected a string but found {0} ({1}) '
'instead'.format(arg, type(arg))
)
quote_f = partial(quote, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
return '{0}/v{1}{2}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred."""
try:
response.raise_for_status()
except _HTTPError as e:
raise_from(create_api_error_from_http_exception(e), e)
def _result(self, response, json=False, binary=False):
if json and binary:
raise AssertionError('json and binary must not be both True')
self._raise_for_status(response)
if json:
return response.json()
if binary:
return response.content
return response.text
def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
for k, v in iteritems(data):
if v is not None:
data2[k] = v
elif data is not None:
data2 = data
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Content-Type'] = 'application/json'
return self._post(url, data=json.dumps(data2), **kwargs)
def _attach_params(self, override=None):
return override or {
'stdout': 1,
'stderr': 1,
'stream': 1
}
def _get_raw_response_socket(self, response):
self._raise_for_status(response)
if self.base_url == "http+docker://localnpipe":
sock = response.raw._fp.fp.raw.sock
elif self.base_url.startswith('http+docker://ssh'):
sock = response.raw._fp.fp.channel
elif PY3:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
else:
sock = response.raw._fp.fp._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
# close TLS sockets.
sock._response = response
except AttributeError:
# UNIX sockets can't have attributes set on them, but that's
# fine because we won't be doing TLS over them
pass
return sock
def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
if response.raw._fp.chunked:
if decode:
for chunk in json_stream.json_stream(self._stream_helper(response, False)):
yield chunk
else:
reader = response.raw
while not reader.closed:
# this read call will block until we get a chunk
data = reader.read(1)
if not data:
break
if reader._fp.chunk_left:
data += reader.read(reader._fp.chunk_left)
yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
yield self._result(response, json=decode)
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
buf_length = len(buf)
walker = 0
while True:
if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
break
header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
dummy, length = struct.unpack_from('>BxxxL', header)
start = walker + STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end]
def _multiplexed_response_stream_helper(self, response):
"""A generator of multiplexed data blocks coming from a response
stream."""
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
while True:
header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
if not header:
break
dummy, length = struct.unpack('>BxxxL', header)
if not length:
continue
data = response.raw.read(length)
if not data:
break
yield data
def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
for out in response.iter_content(chunk_size, decode):
yield out
def _read_from_socket(self, response, stream, tty=True, demux=False):
socket = self._get_raw_response_socket(response)
gen = frames_iter(socket, tty)
if demux:
# The generator will output tuples (stdout, stderr)
gen = (demux_adaptor(*frame) for frame in gen)
else:
# The generator will output strings
gen = (data for (dummy, data) in gen)
if stream:
return gen
else:
# Wait for all the frames, concatenate them, and return the result
return consume_socket_output(gen, demux=demux)
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
missing the correct one, we try both.
We also do not want to set the timeout if it is already disabled, as
you run the risk of changing a socket that was non-blocking to
blocking, for example when using gevent.
"""
sockets = [socket, getattr(socket, '_sock', None)]
for s in sockets:
if not hasattr(s, 'settimeout'):
continue
timeout = -1
if hasattr(s, 'gettimeout'):
timeout = s.gettimeout()
# Don't change the timeout if it is already disabled.
if timeout is None or timeout == 0.0:
continue
s.settimeout(None)
@check_resource('container')
def _check_is_tty(self, container):
cont = self.inspect_container(container)
return cont['Config']['Tty']
def _get_result(self, container, stream, res):
return self._get_result_tty(stream, res, self._check_is_tty(container))
def _get_result_tty(self, stream, res, is_tty):
# We should also use raw streaming (without keep-alives)
# if we're dealing with a tty-enabled container.
if is_tty:
return self._stream_raw_result(res) if stream else \
self._result(res, binary=True)
self._raise_for_status(res)
sep = binary_type()
if stream:
return self._multiplexed_response_stream_helper(res)
else:
return sep.join(
list(self._multiplexed_buffer_helper(res))
)
def _unmount(self, *args):
for proto in args:
self.adapters.pop(proto)
def get_adapter(self, url):
try:
return super(APIClient, self).get_adapter(url)
except _InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
else:
raise e
@property
def api_version(self):
return self._version
def reload_config(self, dockercfg_path=None):
"""
Force a reload of the auth configuration
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``)
Returns:
None
"""
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
def _set_auth_headers(self, headers):
log.debug('Looking for auth config')
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs or self._auth_configs.is_empty:
log.debug("No auth config in memory - loading from filesystem")
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
auth_data = self._auth_configs.get_all_credentials()
# See https://github.com/docker/docker-py/issues/1683
if (auth.INDEX_URL not in auth_data and
auth.INDEX_NAME in auth_data):
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug(
'Sending auth config (%s)',
', '.join(repr(k) for k in auth_data.keys())
)
if auth_data:
headers['X-Registry-Config'] = auth.encode_header(
auth_data
)
else:
log.debug('No auth config found')
def get_binary(self, pathfmt, *args, **kwargs):
return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), binary=True)
def get_json(self, pathfmt, *args, **kwargs):
return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
def get_text(self, pathfmt, *args, **kwargs):
return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs))
def get_raw_stream(self, pathfmt, *args, **kwargs):
chunk_size = kwargs.pop('chunk_size', DEFAULT_DATA_CHUNK_SIZE)
res = self._get(self._url(pathfmt, *args, versioned_api=True), stream=True, **kwargs)
self._raise_for_status(res)
return self._stream_raw_result(res, chunk_size, False)
def delete_call(self, pathfmt, *args, **kwargs):
self._raise_for_status(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs))
def delete_json(self, pathfmt, *args, **kwargs):
return self._result(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
def post_call(self, pathfmt, *args, **kwargs):
self._raise_for_status(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs))
def post_json(self, pathfmt, *args, **kwargs):
data = kwargs.pop('data', None)
self._raise_for_status(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs))
def post_json_to_binary(self, pathfmt, *args, **kwargs):
data = kwargs.pop('data', None)
return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), binary=True)
def post_json_to_json(self, pathfmt, *args, **kwargs):
data = kwargs.pop('data', None)
return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), json=True)
def post_json_to_text(self, pathfmt, *args, **kwargs):
data = kwargs.pop('data', None)
def post_json_to_stream_socket(self, pathfmt, *args, **kwargs):
data = kwargs.pop('data', None)
headers = (kwargs.pop('headers', None) or {}).copy()
headers.update({
'Connection': 'Upgrade',
'Upgrade': 'tcp',
})
return self._get_raw_response_socket(
self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs))
def post_json_to_stream(self, pathfmt, *args, **kwargs):
data = kwargs.pop('data', None)
headers = (kwargs.pop('headers', None) or {}).copy()
headers.update({
'Connection': 'Upgrade',
'Upgrade': 'tcp',
})
stream = kwargs.pop('stream', False)
demux = kwargs.pop('demux', False)
tty = kwargs.pop('tty', False)
return self._read_from_socket(
self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs),
stream,
tty=tty,
demux=demux
)
def post_to_json(self, pathfmt, *args, **kwargs):
return self._result(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)

View File

@@ -0,0 +1,196 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from datetime import datetime
from .. import auth
from ..utils.utils import datetime_to_timestamp, convert_filters
from ..utils.decorators import minimum_version
from ..types.daemon import CancellableStream
class DaemonApiMixin(object):
@minimum_version('1.25')
def df(self):
"""
Get data usage information.
Returns:
(dict): A dictionary representing different resource categories
and their respective data usage.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/system/df')
return self._result(self._get(url), True)
def events(self, since=None, until=None, filters=None, decode=None):
"""
Get real-time events from the server. Similar to the ``docker events``
command.
Args:
since (UTC datetime or int): Get events from this point
until (UTC datetime or int): Get events until this point
filters (dict): Filter the events by event time, container or image
decode (bool): If set to true, stream will be decoded into dicts on
the fly. False by default.
Returns:
A :py:class:`docker.types.daemon.CancellableStream` generator
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for event in client.events(decode=True)
... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
u'time': 1423339459}
...
or
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
if isinstance(since, datetime):
since = datetime_to_timestamp(since)
if isinstance(until, datetime):
until = datetime_to_timestamp(until)
if filters:
filters = convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
url = self._url('/events')
response = self._get(url, params=params, stream=True, timeout=None)
stream = self._stream_helper(response, decode=decode)
return CancellableStream(stream, response)
def info(self):
"""
Display system-wide information. Identical to the ``docker info``
command.
Returns:
(dict): The info as a dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None):
"""
Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
"""
Checks the server is responsive. An exception will be raised if it
isn't responding.
Returns:
(bool) The response from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
"""
Returns version information from the server. Similar to the ``docker
version`` command.
Returns:
(dict): The server version information
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)

View File

@@ -0,0 +1,388 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import json
import logging
from ansible.module_utils.six import iteritems, string_types
from . import errors
from .credentials.store import Store
from .credentials.errors import StoreError, CredentialsNotFound
from .utils import config
INDEX_NAME = 'docker.io'
INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
'Repository name cannot contain a scheme ({0})'.format(repo_name)
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
'Invalid index name ({0}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name):
index_name = convert_to_hostname(index_name)
if index_name == 'index.' + INDEX_NAME:
index_name = INDEX_NAME
return index_name
def get_config_header(client, registry):
log.debug('Looking for auth config')
if not client._auth_configs or client._auth_configs.is_empty:
log.debug(
"No auth config in memory - loading from filesystem"
)
client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
return encode_header(authcfg)
log.debug('No auth config found')
return None
def split_repo_name(repo_name):
parts = repo_name.split('/', 1)
if len(parts) == 1 or (
'.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
):
# This is a docker index repo (ex: username/foobar or ubuntu)
return INDEX_NAME, repo_name
return tuple(parts)
def get_credential_store(authconfig, registry):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig)
return authconfig.get_credential_store(registry)
class AuthConfig(dict):
def __init__(self, dct, credstore_env=None):
if 'auths' not in dct:
dct['auths'] = {}
self.update(dct)
self._credstore_env = credstore_env
self._stores = {}
@classmethod
def parse_auth(cls, entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf = {}
for registry, entry in iteritems(entries):
if not isinstance(entry, dict):
log.debug('Config entry for key %s is not auth config', registry)
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {0}'.format(
registry
)
)
return {}
if 'identitytoken' in entry:
log.debug('Found an IdentityToken entry for registry %s', registry)
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug('Auth data for %s is absent. Client might be using a credentials store instead.', registry)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug('Found entry (registry=%s, username=%s)', repr(registry), repr(username))
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf
@classmethod
def load_config(cls, config_path, config_dict, credstore_env=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment
variable > ~/.docker/config.json > ~/.dockercfg
"""
if not config_dict:
config_file = config.find_config_file(config_path)
if not config_file:
return cls({}, credstore_env)
try:
with open(config_file) as f:
config_dict = json.load(f)
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
return cls(_load_legacy_config(config_file), credstore_env)
res = {}
if config_dict.get('auths'):
log.debug("Found 'auths' section")
res.update({
'auths': cls.parse_auth(
config_dict.pop('auths'), raise_on_error=True
)
})
if config_dict.get('credsStore'):
log.debug("Found 'credsStore' section")
res.update({'credsStore': config_dict.pop('credsStore')})
if config_dict.get('credHelpers'):
log.debug("Found 'credHelpers' section")
res.update({'credHelpers': config_dict.pop('credHelpers')})
if res:
return cls(res, credstore_env)
log.debug(
"Couldn't find auth-related section ; attempting to interpret "
"as auth-only file"
)
return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
@property
def auths(self):
return self.get('auths', {})
@property
def creds_store(self):
return self.get('credsStore', None)
@property
def cred_helpers(self):
return self.get('credHelpers', {})
@property
def is_empty(self):
return (
not self.auths and not self.creds_store and not self.cred_helpers
)
def resolve_authconfig(self, registry=None):
"""
Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the
config with full URLs are stripped down to hostnames before checking
for a match. Returns None if no match was found.
"""
if self.creds_store or self.cred_helpers:
store_name = self.get_credential_store(registry)
if store_name is not None:
log.debug('Using credentials store "%s"', store_name)
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None:
return cfg
log.debug('No entry in credstore - fetching from auth dict')
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for %s", repr(registry))
if registry in self.auths:
log.debug("Found %s", repr(registry))
return self.auths[registry]
for key, conf in iteritems(self.auths):
if resolve_index_name(key) == registry:
log.debug("Found %s", repr(key))
return conf
log.debug("No entry found")
return None
def _resolve_authconfig_credstore(self, registry, credstore_name):
if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
log.debug("Looking for auth entry for %s", repr(registry))
store = self._get_store_instance(credstore_name)
try:
data = store.get(registry)
res = {
'ServerAddress': registry,
}
if data['Username'] == TOKEN_USERNAME:
res['IdentityToken'] = data['Secret']
else:
res.update({
'Username': data['Username'],
'Password': data['Secret'],
})
return res
except CredentialsNotFound:
log.debug('No entry found')
return None
except StoreError as e:
raise errors.DockerException(
'Credentials store error: {0}'.format(repr(e))
)
def _get_store_instance(self, name):
if name not in self._stores:
self._stores[name] = Store(
name, environment=self._credstore_env
)
return self._stores[name]
def get_credential_store(self, registry):
if not registry or registry == INDEX_NAME:
registry = INDEX_URL
return self.cred_helpers.get(registry) or self.creds_store
def get_all_credentials(self):
auth_data = self.auths.copy()
if self.creds_store:
# Retrieve all credentials from the default store
store = self._get_store_instance(self.creds_store)
for k in store.list().keys():
auth_data[k] = self._resolve_authconfig_credstore(
k, self.creds_store
)
auth_data[convert_to_hostname(k)] = auth_data[k]
# credHelpers entries take priority over all others
for reg, store_name in self.cred_helpers.items():
auth_data[reg] = self._resolve_authconfig_credstore(
reg, store_name
)
auth_data[convert_to_hostname(reg)] = auth_data[reg]
return auth_data
def add_auth(self, reg, data):
self['auths'][reg] = data
def resolve_authconfig(authconfig, registry=None, credstore_env=None):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig, credstore_env)
return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url):
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
def decode_auth(auth):
if isinstance(auth, string_types):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
auth_json = json.dumps(auth).encode('ascii')
return base64.urlsafe_b64encode(auth_json)
def parse_auth(entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
return AuthConfig.parse_auth(entries, raise_on_error)
def load_config(config_path=None, config_dict=None, credstore_env=None):
return AuthConfig.load_config(config_path, config_dict, credstore_env)
def _load_legacy_config(config_file):
log.debug("Attempting to parse legacy auth file format")
try:
data = []
with open(config_file) as f:
for line in f.readlines():
data.append(line.strip().split(' = ')[1])
if len(data) < 2:
# Not enough data
raise errors.InvalidConfigFile(
'Invalid or empty configuration file!'
)
username, password = decode_auth(data[0])
return {'auths': {
INDEX_NAME: {
'username': username,
'password': password,
'email': data[1],
'serveraddress': INDEX_URL,
}
}}
except Exception as e:
log.debug(e)
pass
log.debug("All parsing attempts failed - returning empty config")
return {}

View File

@@ -0,0 +1,50 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
DEFAULT_DOCKER_API_VERSION = '1.41'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [
'memory', 'memswap', 'cpushares', 'cpusetcpus'
]
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
BYTE_UNITS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024
}
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
DEFAULT_USER_AGENT = "ansible-community.docker"
DEFAULT_NUM_POOLS = 25
# The OpenSSH server default value for MaxSessions is 10 which means we can
# use up to 9, leaving the final session for the underlying SSH connection.
# For more details see: https://github.com/docker/docker-py/issues/2246
DEFAULT_NUM_POOLS_SSH = 9
DEFAULT_MAX_POOL_SIZE = 10
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
DEFAULT_SWARM_SUBNET_SIZE = 24

View File

@@ -0,0 +1,16 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
PROGRAM_PREFIX = 'docker-credential-'
DEFAULT_LINUX_STORE = 'secretservice'
DEFAULT_OSX_STORE = 'osxkeychain'
DEFAULT_WIN32_STORE = 'wincred'

View File

@@ -0,0 +1,38 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class StoreError(RuntimeError):
pass
class CredentialsNotFound(StoreError):
pass
class InitializationError(StoreError):
pass
def process_store_error(cpe, program):
message = cpe.output.decode('utf-8')
if 'credentials not found in native keychain' in message:
return CredentialsNotFound(
'No matching credentials in {0}'.format(
program
)
)
return StoreError(
'Credentials store {0} exited with "{1}".'.format(
program, cpe.output.decode('utf-8').strip()
)
)

View File

@@ -0,0 +1,119 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import json
import subprocess
from ansible.module_utils.six import PY3, binary_type
from . import constants
from . import errors
from .utils import create_environment_dict
from .utils import find_executable
class Store(object):
def __init__(self, program, environment=None):
""" Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
and erasing credentials using `program`.
"""
self.program = constants.PROGRAM_PREFIX + program
self.exe = find_executable(self.program)
self.environment = environment
if self.exe is None:
raise errors.InitializationError(
'{0} not installed or not available in PATH'.format(
self.program
)
)
def get(self, server):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, binary_type):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
'No matching credentials in {0}'.format(self.program)
)
return result
def store(self, server, username, secret):
""" Store credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
data_input = json.dumps({
'ServerURL': server,
'Username': username,
'Secret': secret
}).encode('utf-8')
return self._execute('store', data_input)
def erase(self, server):
""" Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
if not isinstance(server, binary_type):
server = server.encode('utf-8')
self._execute('erase', server)
def list(self):
""" List stored credentials. Requires v0.4.0+ of the helper.
"""
data = self._execute('list', None)
return json.loads(data.decode('utf-8'))
def _execute(self, subcmd, data_input):
output = None
env = create_environment_dict(self.environment)
try:
if PY3:
output = subprocess.check_output(
[self.exe, subcmd], input=data_input, env=env,
)
else:
process = subprocess.Popen(
[self.exe, subcmd], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, env=env,
)
output, dummy = process.communicate(data_input)
if process.returncode != 0:
raise subprocess.CalledProcessError(
returncode=process.returncode, cmd='', output=output
)
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program)
except OSError as e:
if e.errno == errno.ENOENT:
raise errors.StoreError(
'{0} not installed or not available in PATH'.format(
self.program
)
)
else:
raise errors.StoreError(
'Unexpected OS error "{0}", errno={1}'.format(
e.strerror, e.errno
)
)
return output

View File

@@ -0,0 +1,62 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
from ansible.module_utils.six import PY2
if PY2:
from distutils.spawn import find_executable as which
else:
from shutil import which
def find_executable(executable, path=None):
"""
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
"""
if not PY2:
# shutil.which() already uses PATHEXT on Windows, so on
# Python 3 we can simply use shutil.which() in all cases.
# (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
return which(executable, path=path)
if sys.platform != 'win32':
return which(executable, path)
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
base, ext = os.path.splitext(executable)
if not os.path.isfile(executable):
for p in paths:
for ext in extensions:
f = os.path.join(p, base + ext)
if os.path.isfile(f):
return f
return None
else:
return executable
def create_environment_dict(overrides):
"""
Create and return a copy of os.environ with the specified overrides
"""
result = os.environ.copy()
result.update(overrides or {})
return result

View File

@@ -0,0 +1,223 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ._import_helper import HTTPError as _HTTPError
from ansible.module_utils.six import raise_from
class DockerException(Exception):
"""
A base class from which all other exceptions inherit.
If you want to catch all errors that the Docker SDK might raise,
catch this base exception.
"""
def create_api_error_from_http_exception(e):
"""
Create a suitable APIError from requests.exceptions.HTTPError.
"""
response = e.response
try:
explanation = response.json()['message']
except ValueError:
explanation = (response.content or '').strip()
cls = APIError
if response.status_code == 404:
if explanation and ('No such image' in str(explanation) or
'not found: does not exist or no pull access'
in str(explanation) or
'repository does not exist' in str(explanation)):
cls = ImageNotFound
else:
cls = NotFound
raise_from(cls(e, response=response, explanation=explanation), e)
class APIError(_HTTPError, DockerException):
"""
An HTTP error from the API.
"""
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
super(APIError, self).__init__(message)
self.response = response
self.explanation = explanation
def __str__(self):
message = super(APIError, self).__str__()
if self.is_client_error():
message = '{0} Client Error for {1}: {2}'.format(
self.response.status_code, self.response.url,
self.response.reason)
elif self.is_server_error():
message = '{0} Server Error for {1}: {2}'.format(
self.response.status_code, self.response.url,
self.response.reason)
if self.explanation:
message = '{0} ("{1}")'.format(message, self.explanation)
return message
@property
def status_code(self):
if self.response is not None:
return self.response.status_code
def is_error(self):
return self.is_client_error() or self.is_server_error()
def is_client_error(self):
if self.status_code is None:
return False
return 400 <= self.status_code < 500
def is_server_error(self):
if self.status_code is None:
return False
return 500 <= self.status_code < 600
class NotFound(APIError):
pass
class ImageNotFound(NotFound):
pass
class InvalidVersion(DockerException):
pass
class InvalidRepository(DockerException):
pass
class InvalidConfigFile(DockerException):
pass
class InvalidArgument(DockerException):
pass
class DeprecatedMethod(DockerException):
pass
class TLSParameterError(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg + (". TLS configurations should map the Docker CLI "
"client configurations. See "
"https://docs.docker.com/engine/articles/https/ "
"for API details.")
class NullResource(DockerException, ValueError):
pass
class ContainerError(DockerException):
"""
Represents a container that has exited with a non-zero exit code.
"""
def __init__(self, container, exit_status, command, image, stderr):
self.container = container
self.exit_status = exit_status
self.command = command
self.image = image
self.stderr = stderr
err = ": {0}".format(stderr) if stderr is not None else ""
msg = ("Command '{0}' in image '{1}' returned non-zero exit "
"status {2}{3}").format(command, image, exit_status, err)
super(ContainerError, self).__init__(msg)
class StreamParseError(RuntimeError):
def __init__(self, reason):
self.msg = reason
class BuildError(DockerException):
def __init__(self, reason, build_log):
super(BuildError, self).__init__(reason)
self.msg = reason
self.build_log = build_log
class ImageLoadError(DockerException):
pass
def create_unexpected_kwargs_error(name, kwargs):
quoted_kwargs = ["'{0}'".format(k) for k in sorted(kwargs)]
text = ["{0}() ".format(name)]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(', '.join(quoted_kwargs))
return TypeError(''.join(text))
class MissingContextParameter(DockerException):
def __init__(self, param):
self.param = param
def __str__(self):
return ("missing parameter: {0}".format(self.param))
class ContextAlreadyExists(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return ("context {0} already exists".format(self.name))
class ContextException(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return (self.msg)
class ContextNotFound(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return ("context '{0}' not found".format(self.name))
class MissingRequirementException(DockerException):
def __init__(self, msg, requirement, import_exception):
self.msg = msg
self.requirement = requirement
self.import_exception = import_exception
def __str__(self):
return (self.msg)

View File

@@ -0,0 +1,122 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import ssl
import sys
from . import errors
from .transport.ssladapter import SSLHTTPAdapter
class TLSConfig(object):
"""
TLS configuration.
Args:
client_cert (tuple of str): Path to client cert, path to client key.
ca_cert (str): Path to CA cert file.
verify (bool or str): This can be ``False`` or a path to a CA cert
file.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
cert = None
ca_cert = None
verify = None
ssl_version = None
def __init__(self, client_cert=None, ca_cert=None, verify=None,
ssl_version=None, assert_hostname=None,
assert_fingerprint=None):
# Argument compatibility/mapping with
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving verify=False
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
# If the user provides an SSL version, we should use their preference
if ssl_version:
self.ssl_version = ssl_version
elif (sys.version_info.major, sys.version_info.minor) < (3, 6):
# If the user provides no ssl version, we should default to
# TLSv1_2. This option is the most secure, and will work for the
# majority of users with reasonably up-to-date software. However,
# before doing so, detect openssl version to ensure we can support
# it.
if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
ssl, 'PROTOCOL_TLSv1_2'):
# If the OpenSSL version is high enough to support TLSv1_2,
# then we should use it.
self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
else:
# Otherwise, TLS v1.0 seems to be the safest default;
# SSLv23 fails in mysterious ways:
# https://github.com/docker/docker-py/issues/963
self.ssl_version = ssl.PROTOCOL_TLSv1
else:
self.ssl_version = ssl.PROTOCOL_TLS_CLIENT
# "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
if client_cert:
try:
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
'client_cert must be a tuple of'
' (client certificate, key file)'
)
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
' through the client_cert param'
)
self.cert = (tls_cert, tls_key)
# If verify is set, make sure the cert exists
self.verify = verify
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
'Invalid CA certificate provided for `ca_cert`.'
)
def configure_client(self, client):
"""
Configure a client with these TLS options.
"""
client.ssl_version = self.ssl_version
if self.verify and self.ca_cert:
client.verify = self.ca_cert
else:
client.verify = self.verify
if self.cert:
client.cert = self.cert
client.mount('https://', SSLHTTPAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
))

View File

@@ -0,0 +1,20 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from .._import_helper import HTTPAdapter as _HTTPAdapter
class BaseHTTPAdapter(_HTTPAdapter):
def close(self):
super(BaseHTTPAdapter, self).close()
if hasattr(self, 'pools'):
self.pools.clear()

View File

@@ -0,0 +1,119 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves.queue import Empty
from .. import constants
from .._import_helper import HTTPAdapter, urllib3
from .basehttpadapter import BaseHTTPAdapter
from .npipesocket import NpipeSocket
if PY3:
import http.client as httplib
else:
import httplib
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class NpipeHTTPConnection(httplib.HTTPConnection, object):
def __init__(self, npipe_path, timeout=60):
super(NpipeHTTPConnection, self).__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
self.timeout = timeout
def connect(self):
sock = NpipeSocket()
sock.settimeout(self.timeout)
sock.connect(self.npipe_path)
self.sock = sock
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
super(NpipeHTTPConnectionPool, self).__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
self.timeout = timeout
def _new_conn(self):
return NpipeHTTPConnection(
self.npipe_path, self.timeout
)
# When re-using connections, urllib3 tries to call select() on our
# NpipeSocket instance, causing a crash. To circumvent this, we override
# _get_conn, where that check happens.
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
)
pass # Oh well, we'll create a new connection then
return conn or self._new_conn()
class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
'timeout',
'max_pool_size']
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super(NpipeHTTPAdapter, self).__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = NpipeHTTPConnectionPool(
self.npipe_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url

View File

@@ -0,0 +1,236 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import functools
import io
import time
import traceback
from ansible.module_utils.six import PY2
PYWIN32_IMPORT_ERROR = None
try:
import win32file
import win32pipe
except ImportError:
PYWIN32_IMPORT_ERROR = traceback.format_exc()
cERROR_PIPE_BUSY = 0xe7
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
MAXIMUM_RETRY_COUNT = 10
def check_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self._closed:
raise RuntimeError(
'Can not reuse socket after connection was closed.'
)
return f(self, *args, **kwargs)
return wrapped
class NpipeSocket(object):
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
def __init__(self, handle=None):
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
self._closed = False
def accept(self):
raise NotImplementedError()
def bind(self, address):
raise NotImplementedError()
def close(self):
self._handle.Close()
self._closed = True
@check_closed
def connect(self, address, retry_count=0):
try:
handle = win32file.CreateFile(
address,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
0
)
except win32pipe.error as e:
# See Remarks:
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
if e.winerror == cERROR_PIPE_BUSY:
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
retry_count = retry_count + 1
if (retry_count < MAXIMUM_RETRY_COUNT):
time.sleep(1)
return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
self._handle = handle
self._address = address
@check_closed
def connect_ex(self, address):
return self.connect(address)
@check_closed
def detach(self):
self._closed = True
return self._handle
@check_closed
def dup(self):
return NpipeSocket(self._handle)
def getpeername(self):
return self._address
def getsockname(self):
return self._address
def getsockopt(self, level, optname, buflen=None):
raise NotImplementedError()
def ioctl(self, control, option):
raise NotImplementedError()
def listen(self, backlog):
raise NotImplementedError()
def makefile(self, mode=None, bufsize=None):
if mode.strip('b') != 'r':
raise NotImplementedError()
rawio = NpipeFileIOBase(self)
if bufsize is None or bufsize <= 0:
bufsize = io.DEFAULT_BUFFER_SIZE
return io.BufferedReader(rawio, buffer_size=bufsize)
@check_closed
def recv(self, bufsize, flags=0):
err, data = win32file.ReadFile(self._handle, bufsize)
return data
@check_closed
def recvfrom(self, bufsize, flags=0):
data = self.recv(bufsize, flags)
return (data, self._address)
@check_closed
def recvfrom_into(self, buf, nbytes=0, flags=0):
return self.recv_into(buf, nbytes, flags), self._address
@check_closed
def recv_into(self, buf, nbytes=0):
if PY2:
return self._recv_into_py2(buf, nbytes)
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
err, data = win32file.ReadFile(
self._handle,
readbuf[:nbytes] if nbytes else readbuf
)
return len(data)
def _recv_into_py2(self, buf, nbytes):
err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
n = len(data)
buf[:n] = data
return n
@check_closed
def send(self, string, flags=0):
err, nbytes = win32file.WriteFile(self._handle, string)
return nbytes
@check_closed
def sendall(self, string, flags=0):
return self.send(string, flags)
@check_closed
def sendto(self, string, address):
self.connect(address)
return self.send(string)
def setblocking(self, flag):
if flag:
return self.settimeout(None)
return self.settimeout(0)
def settimeout(self, value):
if value is None:
# Blocking mode
self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
elif not isinstance(value, (float, int)) or value < 0:
raise ValueError('Timeout value out of range')
elif value == 0:
# Non-blocking mode
self._timeout = win32pipe.NMPWAIT_NO_WAIT
else:
# Timeout mode - Value converted to milliseconds
self._timeout = value * 1000
def gettimeout(self):
return self._timeout
def setsockopt(self, level, optname, value):
raise NotImplementedError()
@check_closed
def shutdown(self, how):
return self.close()
class NpipeFileIOBase(io.RawIOBase):
def __init__(self, npipe_socket):
self.sock = npipe_socket
def close(self):
super(NpipeFileIOBase, self).close()
self.sock = None
def fileno(self):
return self.sock.fileno()
def isatty(self):
return False
def readable(self):
return True
def readinto(self, buf):
return self.sock.recv_into(buf)
def seekable(self):
return False
def writable(self):
return False

View File

@@ -0,0 +1,275 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import logging
import os
import signal
import socket
import subprocess
import traceback
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves.queue import Empty
from ansible.module_utils.six.moves.urllib_parse import urlparse
from .basehttpadapter import BaseHTTPAdapter
from .. import constants
if PY3:
import http.client as httplib
else:
import httplib
from .._import_helper import HTTPAdapter, urllib3
PARAMIKO_IMPORT_ERROR = None
try:
import paramiko
except ImportError:
PARAMIKO_IMPORT_ERROR = traceback.format_exc()
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class SSHSocket(socket.socket):
def __init__(self, host):
super(SSHSocket, self).__init__(
socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = None
self.user = None
if ':' in self.host:
self.host, self.port = self.host.split(':')
if '@' in self.host:
self.user, self.host = self.host.split('@')
self.proc = None
def connect(self, **kwargs):
args = ['ssh']
if self.user:
args = args + ['-l', self.user]
if self.port:
args = args + ['-p', self.port]
args = args + ['--', self.host, 'docker system dial-stdio']
preexec_func = None
if not constants.IS_WINDOWS_PLATFORM:
def f():
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_func = f
env = dict(os.environ)
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
env.pop('LD_LIBRARY_PATH', None)
env.pop('SSL_CERT_FILE', None)
self.proc = subprocess.Popen(
args,
env=env,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=preexec_func)
def _write(self, data):
if not self.proc or self.proc.stdin.closed:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
written = self.proc.stdin.write(data)
self.proc.stdin.flush()
return written
def sendall(self, data):
self._write(data)
def send(self, data):
return self._write(data)
def recv(self, n):
if not self.proc:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
return self.proc.stdout.read(n)
def makefile(self, mode):
if not self.proc:
self.connect()
if PY3:
self.proc.stdout.channel = self
return self.proc.stdout
def close(self):
if not self.proc or self.proc.stdin.closed:
return
self.proc.stdin.write(b'\n\n')
self.proc.stdin.flush()
self.proc.terminate()
class SSHConnection(httplib.HTTPConnection, object):
def __init__(self, ssh_transport=None, timeout=60, host=None):
super(SSHConnection, self).__init__(
'localhost', timeout=timeout
)
self.ssh_transport = ssh_transport
self.timeout = timeout
self.ssh_host = host
def connect(self):
if self.ssh_transport:
sock = self.ssh_transport.open_session()
sock.settimeout(self.timeout)
sock.exec_command('docker system dial-stdio')
else:
sock = SSHSocket(self.ssh_host)
sock.settimeout(self.timeout)
sock.connect()
self.sock = sock
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = 'ssh'
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
super(SSHConnectionPool, self).__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.ssh_transport = None
self.timeout = timeout
if ssh_client:
self.ssh_transport = ssh_client.get_transport()
self.ssh_host = host
def _new_conn(self):
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
# we override _get_conn
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
)
pass # Oh well, we'll create a new connection then
return conn or self._new_conn()
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + [
'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
]
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
shell_out=False):
self.ssh_client = None
if not shell_out:
self._create_paramiko_client(base_url)
self._connect()
self.ssh_host = base_url
if base_url.startswith('ssh://'):
self.ssh_host = base_url[len('ssh://'):]
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super(SSHHTTPAdapter, self).__init__()
def _create_paramiko_client(self, base_url):
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.ssh_client = paramiko.SSHClient()
base_url = urlparse(base_url)
self.ssh_params = {
"hostname": base_url.hostname,
"port": base_url.port,
"username": base_url.username,
}
ssh_config_file = os.path.expanduser("~/.ssh/config")
if os.path.exists(ssh_config_file):
conf = paramiko.SSHConfig()
with open(ssh_config_file) as f:
conf.parse(f)
host_config = conf.lookup(base_url.hostname)
if 'proxycommand' in host_config:
self.ssh_params["sock"] = paramiko.ProxyCommand(
host_config['proxycommand']
)
if 'hostname' in host_config:
self.ssh_params['hostname'] = host_config['hostname']
if base_url.port is None and 'port' in host_config:
self.ssh_params['port'] = host_config['port']
if base_url.username is None and 'user' in host_config:
self.ssh_params['username'] = host_config['user']
if 'identityfile' in host_config:
self.ssh_params['key_filename'] = host_config['identityfile']
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
def _connect(self):
if self.ssh_client:
self.ssh_client.connect(**self.ssh_params)
def get_connection(self, url, proxies=None):
if not self.ssh_client:
return SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
# Connection is closed try a reconnect
if self.ssh_client and not self.ssh_client.get_transport():
self._connect()
pool = SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
self.pools[url] = pool
return pool
def close(self):
super(SSHHTTPAdapter, self).close()
if self.ssh_client:
self.ssh_client.close()

View File

@@ -0,0 +1,74 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
""" Resolves OpenSSL issues in some servers:
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
https://github.com/kennethreitz/requests/pull/799
"""
import sys
from ansible_collections.community.docker.plugins.module_utils.version import StrictVersion
from .._import_helper import HTTPAdapter, urllib3
from .basehttpadapter import BaseHTTPAdapter
PoolManager = urllib3.poolmanager.PoolManager
class SSLHTTPAdapter(BaseHTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
'assert_hostname',
'ssl_version']
def __init__(self, ssl_version=None, assert_hostname=None,
assert_fingerprint=None, **kwargs):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
super(SSLHTTPAdapter, self).__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
'block': block,
'assert_hostname': self.assert_hostname,
'assert_fingerprint': self.assert_fingerprint,
}
if self.ssl_version and self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
def get_connection(self, *args, **kwargs):
"""
Ensure assert_hostname is set correctly on our pool
We already take care of a normal poolmanager via init_poolmanager
But we still need to take care of when there is a proxy poolmanager
"""
conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
def can_override_ssl_version(self):
urllib_ver = urllib3.__version__.split('-')[0]
if urllib_ver is None:
return False
if urllib_ver == 'dev':
return True
return StrictVersion(urllib_ver) > StrictVersion('1.5')

View File

@@ -0,0 +1,123 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import socket
from ansible.module_utils.six import PY2
from ansible.module_utils.six.moves import http_client as httplib
from .basehttpadapter import BaseHTTPAdapter
from .. import constants
from .._import_helper import HTTPAdapter, urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPResponse(httplib.HTTPResponse, object):
def __init__(self, sock, *args, **kwargs):
disable_buffering = kwargs.pop('disable_buffering', False)
if PY2:
# FIXME: We may need to disable buffering on Py3 as well,
# but there's no clear way to do it at the moment. See:
# https://github.com/docker/docker-py/issues/1799
kwargs['buffering'] = not disable_buffering
super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
class UnixHTTPConnection(httplib.HTTPConnection, object):
def __init__(self, base_url, unix_socket, timeout=60):
super(UnixHTTPConnection, self).__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
self.disable_buffering = False
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.unix_socket)
self.sock = sock
def putheader(self, header, *values):
super(UnixHTTPConnection, self).putheader(header, *values)
if header == 'Connection' and 'Upgrade' in values:
self.disable_buffering = True
def response_class(self, sock, *args, **kwargs):
if self.disable_buffering:
kwargs['disable_buffering'] = True
return UnixHTTPResponse(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
super(UnixHTTPConnectionPool, self).__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
return UnixHTTPConnection(
self.base_url, self.socket_path, self.timeout
)
class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + ['pools',
'socket_path',
'timeout',
'max_pool_size']
def __init__(self, socket_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super(UnixHTTPAdapter, self).__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url

View File

@@ -0,0 +1,83 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import socket
from .._import_helper import urllib3
from ..errors import DockerException
class CancellableStream(object):
"""
Stream wrapper for real-time events, logs, etc. from the server.
Example:
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
def __init__(self, stream, response):
self._stream = stream
self._response = response
def __iter__(self):
return self
def __next__(self):
try:
return next(self._stream)
except urllib3.exceptions.ProtocolError:
raise StopIteration
except socket.error:
raise StopIteration
next = __next__
def close(self):
"""
Closes the event streaming.
"""
if not self._response.raw.closed:
# find the underlying socket object
# based on api.client._get_raw_response_socket
sock_fp = self._response.raw._fp.fp
if hasattr(sock_fp, 'raw'):
sock_raw = sock_fp.raw
if hasattr(sock_raw, 'sock'):
sock = sock_raw.sock
elif hasattr(sock_raw, '_sock'):
sock = sock_raw._sock
elif hasattr(sock_fp, 'channel'):
# We're working with a paramiko (SSH) channel, which doesn't
# support cancelable streams with the current implementation
raise DockerException(
'Cancellable streams not supported for the SSH protocol'
)
else:
sock = sock_fp._sock
if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
sock, urllib3.contrib.pyopenssl.WrappedSocket):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
sock.close()

View File

@@ -0,0 +1,305 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import io
import os
import random
import re
import tarfile
import tempfile
from ansible.module_utils.six import PY3
from . import fnmatch
from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
root = os.path.abspath(path)
exclude = exclude or []
dockerfile = dockerfile or (None, None)
extra_files = []
if dockerfile[1] is not None:
dockerignore_contents = '\n'.join(
(exclude or ['.dockerignore']) + [dockerfile[0]]
)
extra_files = [
('.dockerignore', dockerignore_contents),
dockerfile,
]
return create_archive(
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
)
def exclude_paths(root, patterns, dockerfile=None):
"""
Given a root directory path and a list of .dockerignore patterns, return
an iterator of all paths (both regular files and directories) in the root
directory that do *not* match any of the patterns.
All paths returned are relative to the root.
"""
if dockerfile is None:
dockerfile = 'Dockerfile'
patterns.append('!' + dockerfile)
pm = PatternMatcher(patterns)
return set(pm.walk(root))
def build_file_list(root):
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(
longpath.replace(root, '', 1).lstrip('/')
)
return files
def create_archive(root, files=None, fileobj=None, gzip=False,
extra_files=None):
extra_files = extra_files or []
if not fileobj:
fileobj = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
if files is None:
files = build_file_list(root)
extra_names = set(e[0] for e in extra_files)
for path in files:
if path in extra_names:
# Extra files override context files with the same name
continue
full_path = os.path.join(root, path)
i = t.gettarinfo(full_path, arcname=path)
if i is None:
# This happens when we encounter a socket file. We can safely
# ignore it and proceed.
continue
# Workaround https://bugs.python.org/issue32713
if i.mtime < 0 or i.mtime > 8**11 - 1:
i.mtime = int(i.mtime)
if IS_WINDOWS_PLATFORM:
# Windows doesn't keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111
if i.isfile():
try:
with open(full_path, 'rb') as f:
t.addfile(i, f)
except IOError:
raise IOError(
'Can not read file in context: {0}'.format(full_path)
)
else:
# Directories, FIFOs, symlinks... don't need to be read.
t.addfile(i, None)
for name, contents in extra_files:
info = tarfile.TarInfo(name)
contents_encoded = contents.encode('utf-8')
info.size = len(contents_encoded)
t.addfile(info, io.BytesIO(contents_encoded))
t.close()
fileobj.seek(0)
return fileobj
def mkbuildcontext(dockerfile):
f = tempfile.NamedTemporaryFile()
t = tarfile.open(mode='w', fileobj=f)
if isinstance(dockerfile, io.StringIO):
dfinfo = tarfile.TarInfo('Dockerfile')
if PY3:
raise TypeError('Please use io.BytesIO to create in-memory '
'Dockerfiles with Python 3')
else:
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
elif isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo('Dockerfile')
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
else:
dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
t.addfile(dfinfo, dockerfile)
t.close()
f.seek(0)
return f
def split_path(p):
return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
def normalize_slashes(p):
if IS_WINDOWS_PLATFORM:
return '/'.join(split_path(p))
return p
def walk(root, patterns, default=True):
pm = PatternMatcher(patterns)
return pm.walk(root)
# Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
class PatternMatcher(object):
def __init__(self, patterns):
self.patterns = list(filter(
lambda p: p.dirs, [Pattern(p) for p in patterns]
))
self.patterns.append(Pattern('!.dockerignore'))
def matches(self, filepath):
matched = False
parent_path = os.path.dirname(filepath)
parent_path_dirs = split_path(parent_path)
for pattern in self.patterns:
negative = pattern.exclusion
match = pattern.match(filepath)
if not match and parent_path != '':
if len(pattern.dirs) <= len(parent_path_dirs):
match = pattern.match(
os.path.sep.join(parent_path_dirs[:len(pattern.dirs)])
)
if match:
matched = not negative
return matched
def walk(self, root):
def rec_walk(current_dir):
for f in os.listdir(current_dir):
fpath = os.path.join(
os.path.relpath(current_dir, root), f
)
if fpath.startswith('.' + os.path.sep):
fpath = fpath[2:]
match = self.matches(fpath)
if not match:
yield fpath
cur = os.path.join(root, fpath)
if not os.path.isdir(cur) or os.path.islink(cur):
continue
if match:
# If we want to skip this file and it's a directory
# then we should first check to see if there's an
# excludes pattern (e.g. !dir/file) that starts with this
# dir. If so then we can't skip this dir.
skip = True
for pat in self.patterns:
if not pat.exclusion:
continue
if pat.cleaned_pattern.startswith(
normalize_slashes(fpath)):
skip = False
break
if skip:
continue
for sub in rec_walk(cur):
yield sub
return rec_walk(root)
class Pattern(object):
def __init__(self, pattern_str):
self.exclusion = False
if pattern_str.startswith('!'):
self.exclusion = True
pattern_str = pattern_str[1:]
self.dirs = self.normalize(pattern_str)
self.cleaned_pattern = '/'.join(self.dirs)
@classmethod
def normalize(cls, p):
# Remove trailing spaces
p = p.strip()
# Leading and trailing slashes are not relevant. Yes,
# "foo.py/" must exclude the "foo.py" regular file. "."
# components are not relevant either, even if the whole
# pattern is only ".", as the Docker reference states: "For
# historical reasons, the pattern . is ignored."
# ".." component must be cleared with the potential previous
# component, regardless of whether it exists: "A preprocessing
# step [...] eliminates . and .. elements using Go's
# filepath.".
i = 0
split = split_path(p)
while i < len(split):
if split[i] == '..':
del split[i]
if i > 0:
del split[i - 1]
i -= 1
else:
i += 1
return split
def match(self, filepath):
return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
def process_dockerfile(dockerfile, path):
if not dockerfile:
return (None, None)
abs_dockerfile = dockerfile
if not os.path.isabs(dockerfile):
abs_dockerfile = os.path.join(path, dockerfile)
if IS_WINDOWS_PLATFORM and path.startswith(
WINDOWS_LONGPATH_PREFIX):
abs_dockerfile = '{0}{1}'.format(
WINDOWS_LONGPATH_PREFIX,
os.path.normpath(
abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX):]
)
)
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later
with open(abs_dockerfile) as df:
return (
'.dockerfile.{random:x}'.format(random=random.getrandbits(160)),
df.read()
)
# Dockerfile is inside the context - return path relative to context root
if dockerfile == abs_dockerfile:
# Only calculate relpath if necessary to avoid errors
# on Windows client -> Linux Docker
# see https://github.com/docker/compose/issues/5969
dockerfile = os.path.relpath(abs_dockerfile, path)
return (dockerfile, None)

View File

@@ -0,0 +1,78 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import logging
import os
from ..constants import IS_WINDOWS_PLATFORM
DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
log = logging.getLogger(__name__)
def find_config_file(config_path=None):
paths = list(filter(None, [
config_path, # 1
config_path_from_environment(), # 2
os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
]))
log.debug("Trying paths: %s", repr(paths))
for path in paths:
if os.path.exists(path):
log.debug("Found file at path: %s", path)
return path
log.debug("No config file found")
return None
def config_path_from_environment():
config_dir = os.environ.get('DOCKER_CONFIG')
if not config_dir:
return None
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
def home_dir():
"""
Get the user's home directory, using the same logic as the Docker Engine
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
"""
if IS_WINDOWS_PLATFORM:
return os.environ.get('USERPROFILE', '')
else:
return os.path.expanduser('~')
def load_general_config(config_path=None):
config_file = find_config_file(config_path)
if not config_file:
return {}
try:
with open(config_file) as f:
return json.load(f)
except (IOError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we won't
# be able to load any JSON data.
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}

View File

@@ -0,0 +1,59 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import functools
from .. import errors
from . import utils
def check_resource(resource_name):
def decorator(f):
@functools.wraps(f)
def wrapped(self, resource_id=None, *args, **kwargs):
if resource_id is None and kwargs.get(resource_name):
resource_id = kwargs.pop(resource_name)
if isinstance(resource_id, dict):
resource_id = resource_id.get('Id', resource_id.get('ID'))
if not resource_id:
raise errors.NullResource(
'Resource ID was not provided'
)
return f(self, resource_id, *args, **kwargs)
return wrapped
return decorator
def minimum_version(version):
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
if utils.version_lt(self._version, version):
raise errors.InvalidVersion(
'{0} is not available for version < {1}'.format(
f.__name__, version
)
)
return f(self, *args, **kwargs)
return wrapper
return decorator
def update_headers(f):
def inner(self, *args, **kwargs):
if 'HttpHeaders' in self._general_configs:
if not kwargs.get('headers'):
kwargs['headers'] = self._general_configs['HttpHeaders']
else:
kwargs['headers'].update(self._general_configs['HttpHeaders'])
return f(self, *args, **kwargs)
return inner

View File

@@ -0,0 +1,127 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import re
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache = {}
_MAXCACHE = 100
def _purge():
"""Clear the pattern cache"""
_cache.clear()
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = name.lower()
pat = pat.lower()
return fnmatchcase(name, pat)
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
try:
re_pat = _cache[pat]
except KeyError:
res = translate(pat)
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[pat] = re_pat = re.compile(res)
return re_pat.match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = '^'
while i < n:
c = pat[i]
i = i + 1
if c == '*':
if i < n and pat[i] == '*':
# is some flavor of "**"
i = i + 1
# Treat **/ as ** so eat the "/"
if i < n and pat[i] == '/':
i = i + 1
if i >= n:
# is "**EOF" - to align with .gitignore just accept all
res = res + '.*'
else:
# is "**"
# Note that this allows for any # of /'s (even 0) because
# the .* will eat everything, even /'s
res = res + '(.*/)?'
else:
# is "*" so map it to anything but "/"
res = res + '[^/]*'
elif c == '?':
# "?" is any char except "/"
res = res + '[^/]'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j + 1
if j < n and pat[j] == ']':
j = j + 1
while j < n and pat[j] != ']':
j = j + 1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\', '\\\\')
i = j + 1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '$'

View File

@@ -0,0 +1,89 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import json.decoder
from ansible.module_utils.six import text_type
from ..errors import StreamParseError
json_decoder = json.JSONDecoder()
def stream_as_text(stream):
"""
Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
"""
for data in stream:
if not isinstance(data, text_type):
data = data.decode('utf-8', 'replace')
yield data
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
return obj, rest
except ValueError:
return None
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def line_splitter(buffer, separator=u'\n'):
index = buffer.find(text_type(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = text_type('')
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
raise StreamParseError(e)

View File

@@ -0,0 +1,95 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
"$" # Match full string
)
def add_port_mapping(port_bindings, internal_port, external):
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
def add_port(port_bindings, internal_port_range, external_range):
if external_range is None:
for internal_port in internal_port_range:
add_port_mapping(port_bindings, internal_port, None)
else:
ports = zip(internal_port_range, external_range)
for internal_port, external_port in ports:
add_port_mapping(port_bindings, internal_port, external_port)
def build_port_bindings(ports):
port_bindings = {}
for port in ports:
internal_port_range, external_range = split_port(port)
add_port(port_bindings, internal_port_range, external_range)
return port_bindings
def _raise_invalid_port(port):
raise ValueError('Invalid port "%s", should be '
'[[remote_ip:]remote_port[-remote_port]:]'
'port[/protocol]' % port)
def port_range(start, end, proto, randomly_available_port=False):
if not start:
return start
if not end:
return [start + proto]
if randomly_available_port:
return ['{0}-{1}'.format(start, end) + proto]
return [str(port) + proto for port in range(int(start), int(end) + 1)]
def split_port(port):
if hasattr(port, 'legacy_repr'):
# This is the worst hack, but it prevents a bug in Compose 1.14.0
# https://github.com/docker/docker-py/issues/1668
# TODO: remove once fixed in Compose stable
port = port.legacy_repr()
port = str(port)
match = PORT_SPEC.match(port)
if match is None:
_raise_invalid_port(port)
parts = match.groupdict()
host = parts['host']
proto = parts['proto'] or ''
internal = port_range(parts['int'], parts['int_end'], proto)
external = port_range(
parts['ext'], parts['ext_end'], '', len(internal) == 1)
if host is None:
if external is not None and len(internal) != len(external):
raise ValueError('Port ranges don\'t match in length')
return internal, external
else:
if not external:
external = [None] * len(internal)
elif len(internal) != len(external):
raise ValueError('Port ranges don\'t match in length')
return internal, [(host, ext_port) for ext_port in external]

View File

@@ -0,0 +1,85 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from .utils import format_environment
class ProxyConfig(dict):
'''
Hold the client's proxy configuration
'''
@property
def http(self):
return self.get('http')
@property
def https(self):
return self.get('https')
@property
def ftp(self):
return self.get('ftp')
@property
def no_proxy(self):
return self.get('no_proxy')
@staticmethod
def from_dict(config):
'''
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
'''
return ProxyConfig(
http=config.get('httpProxy'),
https=config.get('httpsProxy'),
ftp=config.get('ftpProxy'),
no_proxy=config.get('noProxy'),
)
def get_environment(self):
'''
Return a dictionary representing the environment variables used to
set the proxy settings.
'''
env = {}
if self.http:
env['http_proxy'] = env['HTTP_PROXY'] = self.http
if self.https:
env['https_proxy'] = env['HTTPS_PROXY'] = self.https
if self.ftp:
env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
if self.no_proxy:
env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
return env
def inject_proxy_environment(self, environment):
'''
Given a list of strings representing environment variables, prepend the
environment variables corresponding to the proxy settings.
'''
if not self:
return environment
proxy_env = format_environment(self.get_environment())
if not environment:
return proxy_env
# It is important to prepend our variables, because we want the
# variables defined in "environment" to take precedence.
return proxy_env + environment
def __str__(self):
return 'ProxyConfig(http={0}, https={1}, ftp={2}, no_proxy={3})'.format(
self.http, self.https, self.ftp, self.no_proxy)

View File

@@ -0,0 +1,193 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import os
import select
import socket as pysocket
import struct
from ansible.module_utils.six import PY3, binary_type
from ..transport.npipesocket import NpipeSocket
STDOUT = 1
STDERR = 2
class SocketError(Exception):
pass
# NpipeSockets have their own error types
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
NPIPE_ENDED = 109
def read(socket, n=4096):
"""
Reads at most n bytes from socket
"""
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if PY3 and not isinstance(socket, NpipeSocket):
select.select([socket], [], [])
try:
if hasattr(socket, 'recv'):
return socket.recv(n)
if PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
return socket.read(n)
return os.read(socket.fileno(), n)
except EnvironmentError as e:
if e.errno not in recoverable_errors:
raise
except Exception as e:
is_pipe_ended = (isinstance(socket, NpipeSocket) and
len(e.args) > 0 and
e.args[0] == NPIPE_ENDED)
if is_pipe_ended:
# npipes don't support duplex sockets, so we interpret
# a PIPE_ENDED error as a close operation (0-length read).
return 0
raise
def read_exactly(socket, n):
"""
Reads exactly n bytes from socket
Raises SocketError if there isn't enough data
"""
data = binary_type()
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
raise SocketError("Unexpected EOF")
data += next_data
return data
def next_frame_header(socket):
"""
Returns the stream and size of the next frame of data waiting to be read
from socket, according to the protocol defined here:
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
return (-1, -1)
stream, actual = struct.unpack('>BxxxL', data)
return (stream, actual)
def frames_iter(socket, tty):
"""
Return a generator of frames read from socket. A frame is a tuple where
the first item is the stream number and the second item is a chunk of data.
If the tty setting is enabled, the streams are multiplexed into the stdout
stream.
"""
if tty:
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
else:
return frames_iter_no_tty(socket)
def frames_iter_no_tty(socket):
"""
Returns a generator of data read from the socket when the tty setting is
not enabled.
"""
while True:
(stream, n) = next_frame_header(socket)
if n < 0:
break
while n > 0:
result = read(socket, n)
if result is None:
continue
data_length = len(result)
if data_length == 0:
# We have reached EOF
return
n -= data_length
yield (stream, result)
def frames_iter_tty(socket):
"""
Return a generator of data read from the socket when the tty setting is
enabled.
"""
while True:
result = read(socket)
if len(result) == 0:
# We have reached EOF
return
yield result
def consume_socket_output(frames, demux=False):
"""
Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream.
"""
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
return binary_type().join(frames)
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
out = [None, None]
for frame in frames:
# It is guaranteed that for each frame, one and only one stream
# is not None.
if frame == (None, None):
raise AssertionError('frame must be (None, None), but got %s' % (frame, ))
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]
else:
out[0] += frame[0]
else:
if out[1] is None:
out[1] = frame[1]
else:
out[1] += frame[1]
return tuple(out)
def demux_adaptor(stream_id, data):
"""
Utility to demultiplex stdout and stderr when reading frames from the
socket.
"""
if stream_id == STDOUT:
return (data, None)
elif stream_id == STDERR:
return (None, data)
else:
raise ValueError('{0} is not a valid stream'.format(stream_id))

View File

@@ -0,0 +1,524 @@
# -*- coding: utf-8 -*-
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import collections
import json
import os
import os.path
import shlex
import string
from datetime import datetime
from ansible_collections.community.docker.plugins.module_utils.version import StrictVersion
from ansible.module_utils.six import PY2, PY3, binary_type, integer_types, iteritems, string_types, text_type
from .. import errors
from ..constants import DEFAULT_HTTP_HOST
from ..constants import DEFAULT_UNIX_SOCKET
from ..constants import DEFAULT_NPIPE
from ..constants import BYTE_UNITS
from ..tls import TLSConfig
if PY2:
from urlparse import urlparse, urlunparse
else:
from urllib.parse import urlparse, urlunparse
URLComponents = collections.namedtuple(
'URLComponents',
'scheme netloc url params query fragment',
)
def create_ipam_pool(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_pool has been removed. Please use a '
'docker.types.IPAMPool object instead.'
)
def create_ipam_config(*args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_ipam_config has been removed. Please use a '
'docker.types.IPAMConfig object instead.'
)
def decode_json_header(header):
data = base64.b64decode(header)
if PY3:
data = data.decode('utf-8')
return json.loads(data)
def compare_version(v1, v2):
"""Compare docker versions
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0
elif s1 > s2:
return -1
else:
return 1
def version_lt(v1, v2):
return compare_version(v1, v2) > 0
def version_gte(v1, v2):
return not version_lt(v1, v2)
def _convert_port_binding(binding):
result = {'HostIp': '', 'HostPort': ''}
if isinstance(binding, tuple):
if len(binding) == 2:
result['HostPort'] = binding[1]
result['HostIp'] = binding[0]
elif isinstance(binding[0], string_types):
result['HostIp'] = binding[0]
else:
result['HostPort'] = binding[0]
elif isinstance(binding, dict):
if 'HostPort' in binding:
result['HostPort'] = binding['HostPort']
if 'HostIp' in binding:
result['HostIp'] = binding['HostIp']
else:
raise ValueError(binding)
else:
result['HostPort'] = binding
if result['HostPort'] is None:
result['HostPort'] = ''
else:
result['HostPort'] = str(result['HostPort'])
return result
def convert_port_bindings(port_bindings):
result = {}
for k, v in iteritems(port_bindings):
key = str(k)
if '/' not in key:
key += '/tcp'
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
result[key] = [_convert_port_binding(v)]
return result
def convert_volume_binds(binds):
if isinstance(binds, list):
return binds
result = []
for k, v in binds.items():
if isinstance(k, binary_type):
k = k.decode('utf-8')
if isinstance(v, dict):
if 'ro' in v and 'mode' in v:
raise ValueError(
'Binding cannot contain both "ro" and "mode": {0}'
.format(repr(v))
)
bind = v['bind']
if isinstance(bind, binary_type):
bind = bind.decode('utf-8')
if 'ro' in v:
mode = 'ro' if v['ro'] else 'rw'
elif 'mode' in v:
mode = v['mode']
else:
mode = 'rw'
result.append(
text_type('{0}:{1}:{2}').format(k, bind, mode)
)
else:
if isinstance(v, binary_type):
v = v.decode('utf-8')
result.append(
text_type('{0}:{1}:rw').format(k, v)
)
return result
def convert_tmpfs_mounts(tmpfs):
if isinstance(tmpfs, dict):
return tmpfs
if not isinstance(tmpfs, list):
raise ValueError(
'Expected tmpfs value to be either a list or a dict, found: {0}'
.format(type(tmpfs).__name__)
)
result = {}
for mount in tmpfs:
if isinstance(mount, string_types):
if ":" in mount:
name, options = mount.split(":", 1)
else:
name = mount
options = ""
else:
raise ValueError(
"Expected item in tmpfs list to be a string, found: {0}"
.format(type(mount).__name__)
)
result[name] = options
return result
def convert_service_networks(networks):
if not networks:
return networks
if not isinstance(networks, list):
raise TypeError('networks parameter must be a list.')
result = []
for n in networks:
if isinstance(n, string_types):
n = {'Target': n}
result.append(n)
return result
def parse_repository_tag(repo_name):
parts = repo_name.rsplit('@', 1)
if len(parts) == 2:
return tuple(parts)
parts = repo_name.rsplit(':', 1)
if len(parts) == 2 and '/' not in parts[1]:
return tuple(parts)
return repo_name, None
def parse_host(addr, is_win32=False, tls=False):
# Sensible defaults
if not addr and is_win32:
return DEFAULT_NPIPE
if not addr or addr.strip() == 'unix://':
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
parsed_url = urlparse(addr)
proto = parsed_url.scheme
if not proto or any(x not in string.ascii_letters + '+' for x in proto):
# https://bugs.python.org/issue754016
parsed_url = urlparse('//' + addr, 'tcp')
proto = 'tcp'
if proto == 'fd':
raise errors.DockerException('fd protocol is not implemented')
# These protos are valid aliases for our library but not for the
# official spec
if proto == 'http' or proto == 'https':
tls = proto == 'https'
proto = 'tcp'
elif proto == 'http+unix':
proto = 'unix'
if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
raise errors.DockerException(
"Invalid bind address protocol: {0}".format(addr)
)
if proto == 'tcp' and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(
'Invalid bind address format: {0}'.format(addr)
)
if any([
parsed_url.params, parsed_url.query, parsed_url.fragment,
parsed_url.password
]):
raise errors.DockerException(
'Invalid bind address format: {0}'.format(addr)
)
if parsed_url.path and proto == 'ssh':
raise errors.DockerException(
'Invalid bind address format: no path allowed for this protocol:'
' {0}'.format(addr)
)
else:
path = parsed_url.path
if proto == 'unix' and parsed_url.hostname is not None:
# For legacy reasons, we consider unix://path
# to be valid and equivalent to unix:///path
path = '/'.join((parsed_url.hostname, path))
netloc = parsed_url.netloc
if proto in ('tcp', 'ssh'):
port = parsed_url.port or 0
if port <= 0:
if proto != 'ssh':
raise errors.DockerException(
'Invalid bind address format: port is required:'
' {0}'.format(addr)
)
port = 22
netloc = '{0}:{1}'.format(parsed_url.netloc, port)
if not parsed_url.hostname:
netloc = '{0}:{1}'.format(DEFAULT_HTTP_HOST, port)
# Rewrite schemes to fit library internals (requests adapters)
if proto == 'tcp':
proto = 'http{0}'.format('s' if tls else '')
elif proto == 'unix':
proto = 'http+unix'
if proto in ('http+unix', 'npipe'):
return "{0}://{1}".format(proto, path).rstrip('/')
return urlunparse(URLComponents(
scheme=proto,
netloc=netloc,
url=path,
params='',
query='',
fragment='',
)).rstrip('/')
def parse_devices(devices):
device_list = []
for device in devices:
if isinstance(device, dict):
device_list.append(device)
continue
if not isinstance(device, string_types):
raise errors.DockerException(
'Invalid device type {0}'.format(type(device))
)
device_mapping = device.split(':')
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
path_in_container = device_mapping[1]
else:
path_in_container = path_on_host
if len(device_mapping) > 2:
permissions = device_mapping[2]
else:
permissions = 'rwm'
device_list.append({
'PathOnHost': path_on_host,
'PathInContainer': path_in_container,
'CgroupPermissions': permissions
})
return device_list
def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
if not environment:
environment = os.environ
host = environment.get('DOCKER_HOST')
# empty string for cert path is the same as unset.
cert_path = environment.get('DOCKER_CERT_PATH') or None
# empty string for tls verify counts as "false".
# Any value or 'unset' counts as true.
tls_verify = environment.get('DOCKER_TLS_VERIFY')
if tls_verify == '':
tls_verify = False
else:
tls_verify = tls_verify is not None
enable_tls = cert_path or tls_verify
params = {}
if host:
params['base_url'] = host
if not enable_tls:
return params
if not cert_path:
cert_path = os.path.join(os.path.expanduser('~'), '.docker')
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it's not set already then set it to false.
assert_hostname = False
params['tls'] = TLSConfig(
client_cert=(os.path.join(cert_path, 'cert.pem'),
os.path.join(cert_path, 'key.pem')),
ca_cert=os.path.join(cert_path, 'ca.pem'),
verify=tls_verify,
ssl_version=ssl_version,
assert_hostname=assert_hostname,
)
return params
def convert_filters(filters):
result = {}
for k, v in iteritems(filters):
if isinstance(v, bool):
v = 'true' if v else 'false'
if not isinstance(v, list):
v = [v, ]
result[k] = [
str(item) if not isinstance(item, string_types) else item
for item in v
]
return json.dumps(result)
def datetime_to_timestamp(dt):
"""Convert a UTC datetime to a Unix timestamp"""
delta = dt - datetime.utcfromtimestamp(0)
return delta.seconds + delta.days * 24 * 3600
def parse_bytes(s):
if isinstance(s, integer_types + (float,)):
return s
if len(s) == 0:
return 0
if s[-2:-1].isalpha() and s[-1].isalpha():
if s[-1] == "b" or s[-1] == "B":
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = 'b'
else:
digits_part = s[:-1]
if suffix in units.keys() or suffix.isdigit():
try:
digits = float(digits_part)
except ValueError:
raise errors.DockerException(
'Failed converting the string value for memory ({0}) to'
' an integer.'.format(digits_part)
)
# Reconvert to long for the final result
s = int(digits * units[suffix])
else:
raise errors.DockerException(
'The specified value for memory ({0}) should specify the'
' units. The postfix should be one of the `b` `k` `m` `g`'
' characters'.format(s)
)
return s
def normalize_links(links):
if isinstance(links, dict):
links = iteritems(links)
return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
def parse_env_file(env_file):
"""
Reads a line-separated environment file.
The format of each line should be "key=value".
"""
environment = {}
with open(env_file, 'r') as f:
for line in f:
if line[0] == '#':
continue
line = line.strip()
if not line:
continue
parse_line = line.split('=', 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
else:
raise errors.DockerException(
'Invalid line in environment file {0}:\n{1}'.format(
env_file, line))
return environment
def split_command(command):
if PY2 and not isinstance(command, binary_type):
command = command.encode('utf-8')
return shlex.split(command)
def format_environment(environment):
def format_env(key, value):
if value is None:
return key
if isinstance(value, binary_type):
value = value.decode('utf-8')
return u'{key}={value}'.format(key=key, value=value)
return [format_env(*var) for var in iteritems(environment)]
def format_extra_hosts(extra_hosts, task=False):
# Use format dictated by Swarm API if container is part of a task
if task:
return [
'{0} {1}'.format(v, k) for k, v in sorted(iteritems(extra_hosts))
]
return [
'{0}:{1}'.format(k, v) for k, v in sorted(iteritems(extra_hosts))
]
def create_host_config(self, *args, **kwargs):
raise errors.DeprecatedMethod(
'utils.create_host_config has been removed. Please use a '
'docker.types.HostConfig object instead.'
)

View File

@@ -0,0 +1,56 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import random
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.six import PY2
def generate_insecure_key():
'''Do NOT use this for cryptographic purposes!'''
while True:
# Generate a one-byte key. Right now the functions below do not use more
# than one byte, so this is sufficient.
if PY2:
key = chr(random.randint(0, 255))
else:
key = bytes([random.randint(0, 255)])
# Return anything that is not zero
if key != b'\x00':
return key
def scramble(value, key):
'''Do NOT use this for cryptographic purposes!'''
if len(key) < 1:
raise ValueError('Key must be at least one byte')
value = to_bytes(value)
if PY2:
k = ord(key[0])
value = b''.join([chr(k ^ ord(b)) for b in value])
else:
k = key[0]
value = bytes([k ^ b for b in value])
return '=S=' + to_native(base64.b64encode(value))
def unscramble(value, key):
'''Do NOT use this for cryptographic purposes!'''
if len(key) < 1:
raise ValueError('Key must be at least one byte')
if not value.startswith(u'=S='):
raise ValueError('Value does not start with indicator')
value = base64.b64decode(value[3:])
if PY2:
k = ord(key[0])
value = b''.join([chr(k ^ ord(b)) for b in value])
else:
k = key[0]
value = bytes([k ^ b for b in value])
return to_text(value)

View File

@@ -0,0 +1,696 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import os
import platform
import re
import sys
import traceback
from datetime import timedelta
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
HAS_DOCKER_PY = True
HAS_DOCKER_PY_2 = False
HAS_DOCKER_PY_3 = False
HAS_DOCKER_ERROR = None
HAS_DOCKER_TRACEBACK = None
try:
from requests.exceptions import SSLError
from docker import __version__ as docker_version
from docker.errors import APIError, NotFound, TLSParameterError
from docker.tls import TLSConfig
from docker import auth
if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
HAS_DOCKER_PY_3 = True
from docker import APIClient as Client
elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
HAS_DOCKER_PY_2 = True
from docker import APIClient as Client
else:
from docker import Client
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc)
HAS_DOCKER_TRACEBACK = traceback.format_exc()
HAS_DOCKER_PY = False
# The next two imports ``docker.models`` and ``docker.ssladapter`` are used
# to ensure the user does not have both ``docker`` and ``docker-py`` modules
# installed, as they utilize the same namespace are are incompatible
try:
# docker (Docker SDK for Python >= 2.0.0)
import docker.models # noqa: F401
HAS_DOCKER_MODELS = True
except ImportError:
HAS_DOCKER_MODELS = False
try:
# docker-py (Docker SDK for Python < 2.0.0)
import docker.ssladapter # noqa: F401
HAS_DOCKER_SSLADAPTER = True
except ImportError:
HAS_DOCKER_SSLADAPTER = False
try:
from requests.exceptions import RequestException
except ImportError:
# Either Docker SDK for Python is no longer using requests, or Docker SDK for Python isn't around either,
# or Docker SDK for Python's dependency requests is missing. In any case, define an exception
# class RequestException so that our code doesn't break.
class RequestException(Exception):
pass
from ansible_collections.community.docker.plugins.module_utils.util import (
DEFAULT_DOCKER_HOST,
DEFAULT_TLS,
DEFAULT_TLS_VERIFY,
DEFAULT_TLS_HOSTNAME,
DEFAULT_TIMEOUT_SECONDS,
DOCKER_COMMON_ARGS,
DOCKER_COMMON_ARGS_VARS,
DOCKER_MUTUALLY_EXCLUSIVE,
DOCKER_REQUIRED_TOGETHER,
DEFAULT_DOCKER_REGISTRY,
BYTE_SUFFIXES,
is_image_name_id,
is_valid_tag,
sanitize_result,
DockerBaseClass,
update_tls_hostname,
compare_dict_allow_more_present,
compare_generic,
DifferenceTracker,
clean_dict_booleans_for_docker_api,
convert_duration_to_nanosecond,
parse_healthcheck,
omit_none_from_dict,
)
MIN_DOCKER_VERSION = "1.8.0"
if not HAS_DOCKER_PY:
docker_version = None
# No Docker SDK for Python. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client(object): # noqa: F811
def __init__(self, **kwargs):
pass
class APIError(Exception): # noqa: F811
pass
class NotFound(Exception): # noqa: F811
pass
def _get_tls_config(fail_function, **kwargs):
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
fail_function("TLS config error: %s" % exc)
def is_using_tls(auth):
return auth['tls_verify'] or auth['tls']
def get_connect_params(auth, fail_function):
if is_using_tls(auth):
auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
result = dict(
base_url=auth['docker_host'],
version=auth['api_version'],
timeout=auth['timeout'],
)
if auth['tls_verify']:
# TLS with verification
tls_config = dict(
verify=True,
assert_hostname=auth['tls_hostname'],
ssl_version=auth['ssl_version'],
fail_function=fail_function,
)
if auth['cert_path'] and auth['key_path']:
tls_config['client_cert'] = (auth['cert_path'], auth['key_path'])
if auth['cacert_path']:
tls_config['ca_cert'] = auth['cacert_path']
result['tls'] = _get_tls_config(**tls_config)
elif auth['tls']:
# TLS without verification
tls_config = dict(
verify=False,
ssl_version=auth['ssl_version'],
fail_function=fail_function,
)
if auth['cert_path'] and auth['key_path']:
tls_config['client_cert'] = (auth['cert_path'], auth['key_path'])
result['tls'] = _get_tls_config(**tls_config)
if auth.get('use_ssh_client'):
if LooseVersion(docker_version) < LooseVersion('4.4.0'):
fail_function("use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer")
result['use_ssh_client'] = True
# No TLS
return result
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
DOCKERPYUPGRADE_RECOMMEND_DOCKER = "Use `pip install --upgrade docker-py` to upgrade."
class AnsibleDockerClientBase(Client):
def __init__(self, min_docker_version=None, min_docker_api_version=None):
if min_docker_version is None:
min_docker_version = MIN_DOCKER_VERSION
NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
self.docker_py_version = LooseVersion(docker_version)
if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
"SDK for Python) installed together as they use the same namespace and cause a corrupt "
"installation. Please uninstall both packages, and re-install only the docker-py or docker "
"python module (for %s's Python %s). It is recommended to install the docker module. Please "
"note that simply uninstalling one of the modules can leave the other module in a broken "
"state." % (platform.node(), sys.executable))
if not HAS_DOCKER_PY:
msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0 (Python >= 3.6) or "
"docker<5.0.0 (Python 2.7)")
msg = msg + ", for example via `pip install docker` (Python >= 3.6) or " \
+ "`pip install docker==4.4.4` (Python 2.7). The error was: %s"
self.fail(msg % HAS_DOCKER_ERROR, exception=HAS_DOCKER_TRACEBACK)
if self.docker_py_version < LooseVersion(min_docker_version):
msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
if not NEEDS_DOCKER_PY2:
# The minimal required version is < 2.0 (and the current version as well).
# Advertise docker (instead of docker-py).
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif docker_version < LooseVersion('2.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
try:
super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
self.docker_api_version_str = self.api_version
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
self.docker_api_version = LooseVersion(self.docker_api_version_str)
min_docker_api_version = min_docker_api_version or '1.25'
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
@abc.abstractmethod
def fail(self, msg, **kwargs):
pass
def deprecate(self, msg, version=None, date=None, collection_name=None):
pass
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value, type='str'):
if param_value is not None:
# take module parameter value
if type == 'bool':
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return bool(param_value)
if type == 'int':
return int(param_value)
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if type == 'bool':
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return bool(env_value)
if type == 'int':
return int(env_value)
return env_value
# take the default
return default_value
@abc.abstractmethod
def _get_params(self):
pass
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
client_params = self._get_params()
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = client_params.get(key)
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST, type='str'),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', None, type='str'),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto', type='str'),
cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None, type='str'),
cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'),
key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None, type='str'),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS, type='bool'),
tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY, type='bool'),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS, type='int'),
use_ssh_client=self._get_value('use_ssh_client', params['use_ssh_client'], None, False, type='bool'),
)
update_tls_hostname(result)
return result
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
"setting the `tls` parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
def get_container_by_id(self, container_id):
try:
self.log("Inspecting container Id %s" % container_id)
result = self.inspect_container(container=container_id)
self.log("Completed container inspection")
return result
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
for container in self.containers(all=True):
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is None:
return None
return self.get_container_by_id(result['Id'])
def get_network(self, name=None, network_id=None):
'''
Lookup a network and return the inspection results.
'''
if name is None and network_id is None:
return None
result = None
if network_id is None:
try:
for network in self.networks():
self.log("testing network: %s" % (network['Name']))
if name == network['Name']:
result = network
break
if network['Id'].startswith(name):
result = network
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
if result is not None:
network_id = result['Id']
if network_id is not None:
try:
self.log("Inspecting network Id %s" % network_id)
result = self.inspect_network(network_id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
return result
def find_image(self, name, tag):
'''
Lookup an image (by name and tag) and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io wasn't there,
# it can be that the image wasn't found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images and '/' not in repo_name:
# This seems to be happening with podman-docker
# (https://github.com/ansible-collections/community.docker/issues/291)
lookup = "%s/library/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
inspection = self.inspect_image(images[0]['Id'])
except NotFound:
self.log("Image %s:%s not found." % (name, tag))
return None
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
return inspection
self.log("Image %s:%s not found." % (name, tag))
return None
def find_image_by_id(self, image_id, accept_missing_image=False):
'''
Lookup an image (by ID) and return the inspection results.
'''
if not image_id:
return None
self.log("Find image %s (by ID)" % image_id)
try:
inspection = self.inspect_image(image_id)
except NotFound as exc:
if not accept_missing_image:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
self.log("Image %s not found." % image_id)
return None
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
return inspection
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the Docker SDK for Python images method
does not work consistently. Instead, get the result set for name and manually check
if the tag exists.
'''
try:
response = self.images(name=name)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
images = response
if tag:
lookup = "%s:%s" % (name, tag)
lookup_digest = "%s@%s" % (name, tag)
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if (tags and lookup in tags) or (digests and lookup_digest in digests):
images = [image]
break
return images
def pull_image(self, name, tag="latest", platform=None):
'''
Pull an image
'''
kwargs = dict(
tag=tag,
stream=True,
decode=True,
)
if platform is not None:
kwargs['platform'] = platform
self.log("Pulling image %s:%s" % (name, tag))
old_tag = self.find_image(name, tag)
try:
for line in self.pull(name, **kwargs):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
new_tag = self.find_image(name, tag)
return new_tag, old_tag == new_tag
def inspect_distribution(self, image, **kwargs):
'''
Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
since prior versions did not support accessing private repositories.
'''
if self.docker_py_version < LooseVersion('4.0.0'):
registry = auth.resolve_repository_name(image)[0]
header = auth.get_config_header(self, registry)
if header:
return self._result(self._get(
self._url('/distribution/{0}/json', image),
headers={'X-Registry-Auth': header}
), json=True)
return super(AnsibleDockerClientBase, self).inspect_distribution(image, **kwargs)
class AnsibleDockerClient(AnsibleDockerClientBase):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None, required_one_of=None, required_by=None,
min_docker_version=None, min_docker_api_version=None, option_minimal_versions=None,
option_minimal_versions_ignore_params=None, fail_results=None):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if,
required_one_of=required_one_of,
required_by=required_by or {},
)
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
super(AnsibleDockerClient, self).__init__(
min_docker_version=min_docker_version,
min_docker_api_version=min_docker_api_version)
if option_minimal_versions is not None:
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
def fail(self, msg, **kwargs):
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
def deprecate(self, msg, version=None, date=None, collection_name=None):
self.module.deprecate(msg, version=version, date=date, collection_name=collection_name)
def _get_params(self):
return self.module.params
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
self.option_minimal_versions = dict()
for option in self.module.argument_spec:
if ignore_params is not None:
if option in ignore_params:
continue
self.option_minimal_versions[option] = dict()
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_py = True
support_docker_api = True
if 'docker_py_version' in data:
support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
if 'docker_api_version' in data:
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
data['supported'] = support_docker_py and support_docker_api
# Fail if option is not supported but used
if not data['supported']:
# Test whether option is specified
if 'detect_usage' in data:
used = data['detect_usage'](self)
else:
used = self.module.params.get(option) is not None
if used and 'default' in self.module.argument_spec[option]:
used = self.module.params[option] != self.module.argument_spec[option]['default']
if used:
# If the option is used, compose error message.
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
if not support_docker_api:
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
elif not support_docker_py:
msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
elif self.docker_py_version < LooseVersion('2.0.0'):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
self.fail(msg)
def report_warnings(self, result, warnings_key=None):
'''
Checks result of client operation for warnings, and if present, outputs them.
warnings_key should be a list of keys used to crawl the result dictionary.
For example, if warnings_key == ['a', 'b'], the function will consider
result['a']['b'] if these keys exist. If the result is a non-empty string, it
will be reported as a warning. If the result is a list, every entry will be
reported as a warning.
In most cases (if warnings are returned at all), warnings_key should be
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
'''
if warnings_key is None:
warnings_key = ['Warnings']
for key in warnings_key:
if not isinstance(result, Mapping):
return
result = result.get(key)
if isinstance(result, Sequence):
for warning in result:
self.module.warn('Docker warning: {0}'.format(warning))
elif isinstance(result, string_types) and result:
self.module.warn('Docker warning: {0}'.format(result))

View File

@@ -0,0 +1,592 @@
# Copyright 2016 Red Hat | Ansible
# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import os
import re
from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
from ansible.module_utils.common._collections_compat import Mapping, Sequence
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
try:
from requests.exceptions import RequestException, SSLError
except ImportError:
# Define an exception class RequestException so that our code doesn't break.
class RequestException(Exception):
pass
from ansible_collections.community.docker.plugins.module_utils._api import auth
from ansible_collections.community.docker.plugins.module_utils._api.api.client import APIClient as Client
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
APIError,
NotFound,
MissingRequirementException,
TLSParameterError,
)
from ansible_collections.community.docker.plugins.module_utils._api.tls import TLSConfig
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
convert_filters,
parse_repository_tag,
)
from ansible_collections.community.docker.plugins.module_utils.util import (
DEFAULT_DOCKER_HOST,
DEFAULT_TLS,
DEFAULT_TLS_VERIFY,
DEFAULT_TLS_HOSTNAME,
DEFAULT_TIMEOUT_SECONDS,
DOCKER_COMMON_ARGS,
DOCKER_MUTUALLY_EXCLUSIVE,
DOCKER_REQUIRED_TOGETHER,
DEFAULT_DOCKER_REGISTRY,
is_image_name_id,
is_valid_tag,
sanitize_result,
update_tls_hostname,
)
def _get_tls_config(fail_function, **kwargs):
try:
tls_config = TLSConfig(**kwargs)
return tls_config
except TLSParameterError as exc:
fail_function("TLS config error: %s" % exc)
def is_using_tls(auth_data):
return auth_data['tls_verify'] or auth_data['tls']
def get_connect_params(auth_data, fail_function):
if is_using_tls(auth_data):
auth_data['docker_host'] = auth_data['docker_host'].replace('tcp://', 'https://')
result = dict(
base_url=auth_data['docker_host'],
version=auth_data['api_version'],
timeout=auth_data['timeout'],
)
if auth_data['tls_verify']:
# TLS with verification
tls_config = dict(
verify=True,
assert_hostname=auth_data['tls_hostname'],
ssl_version=auth_data['ssl_version'],
fail_function=fail_function,
)
if auth_data['cert_path'] and auth_data['key_path']:
tls_config['client_cert'] = (auth_data['cert_path'], auth_data['key_path'])
if auth_data['cacert_path']:
tls_config['ca_cert'] = auth_data['cacert_path']
result['tls'] = _get_tls_config(**tls_config)
elif auth_data['tls']:
# TLS without verification
tls_config = dict(
verify=False,
ssl_version=auth_data['ssl_version'],
fail_function=fail_function,
)
if auth_data['cert_path'] and auth_data['key_path']:
tls_config['client_cert'] = (auth_data['cert_path'], auth_data['key_path'])
result['tls'] = _get_tls_config(**tls_config)
if auth_data.get('use_ssh_client'):
result['use_ssh_client'] = True
# No TLS
return result
class AnsibleDockerClientBase(Client):
def __init__(self, min_docker_api_version=None):
self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
try:
super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
self.docker_api_version_str = self.api_version
except MissingRequirementException as exc:
self.fail(missing_required_lib(exc.requirement), exception=exc.import_exception)
except APIError as exc:
self.fail("Docker API error: %s" % exc)
except Exception as exc:
self.fail("Error connecting: %s" % exc)
self.docker_api_version = LooseVersion(self.docker_api_version_str)
min_docker_api_version = min_docker_api_version or '1.25'
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
@abc.abstractmethod
def fail(self, msg, **kwargs):
pass
def deprecate(self, msg, version=None, date=None, collection_name=None):
pass
@staticmethod
def _get_value(param_name, param_value, env_variable, default_value, type='str'):
if param_value is not None:
# take module parameter value
if type == 'bool':
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return bool(param_value)
if type == 'int':
return int(param_value)
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == 'cert_path':
return os.path.join(env_value, 'cert.pem')
if param_name == 'cacert_path':
return os.path.join(env_value, 'ca.pem')
if param_name == 'key_path':
return os.path.join(env_value, 'key.pem')
if type == 'bool':
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return bool(env_value)
if type == 'int':
return int(env_value)
return env_value
# take the default
return default_value
@abc.abstractmethod
def _get_params(self):
pass
@property
def auth_params(self):
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log('Getting credentials')
client_params = self._get_params()
params = dict()
for key in DOCKER_COMMON_ARGS:
params[key] = client_params.get(key)
result = dict(
docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
DEFAULT_DOCKER_HOST, type='str'),
tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
'DOCKER_TLS_HOSTNAME', None, type='str'),
api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
'auto', type='str'),
cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None, type='str'),
cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'),
key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'),
ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None, type='str'),
tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS, type='bool'),
tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
DEFAULT_TLS_VERIFY, type='bool'),
timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
DEFAULT_TIMEOUT_SECONDS, type='int'),
use_ssh_client=self._get_value('use_ssh_client', params['use_ssh_client'], None, False, type='bool'),
)
def depr(*args, **kwargs):
self.deprecate(*args, **kwargs)
update_tls_hostname(result, old_behavior=True, deprecate_function=depr, uses_tls=is_using_tls(result))
return result
def _handle_ssl_error(self, error):
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
"The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
"or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
"setting the `tls` parameter to true."
% (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
self.fail("SSL Exception: %s" % (error))
def get_container_by_id(self, container_id):
try:
self.log("Inspecting container Id %s" % container_id)
result = self.get_json('/containers/{0}/json', container_id)
self.log("Completed container inspection")
return result
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting container: %s" % exc)
def get_container(self, name=None):
'''
Lookup a container and return the inspection results.
'''
if name is None:
return None
search_name = name
if not name.startswith('/'):
search_name = '/' + name
result = None
try:
params = {
'limit': -1,
'all': 1,
'size': 0,
'trunc_cmd': 0,
}
containers = self.get_json("/containers/json", params=params)
for container in containers:
self.log("testing container: %s" % (container['Names']))
if isinstance(container['Names'], list) and search_name in container['Names']:
result = container
break
if container['Id'].startswith(name):
result = container
break
if container['Id'] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving container list: %s" % exc)
if result is None:
return None
return self.get_container_by_id(result['Id'])
def get_network(self, name=None, network_id=None):
'''
Lookup a network and return the inspection results.
'''
if name is None and network_id is None:
return None
result = None
if network_id is None:
try:
networks = self.get_json("/networks")
for network in networks:
self.log("testing network: %s" % (network['Name']))
if name == network['Name']:
result = network
break
if network['Id'].startswith(name):
result = network
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc:
self.fail("Error retrieving network list: %s" % exc)
if result is not None:
network_id = result['Id']
if network_id is not None:
try:
self.log("Inspecting network Id %s" % network_id)
result = self.get_json('/networks/{0}', network_id)
self.log("Completed network inspection")
except NotFound as dummy:
return None
except Exception as exc:
self.fail("Error inspecting network: %s" % exc)
return result
def _image_lookup(self, name, tag):
'''
Including a tag in the name parameter sent to the Docker SDK for Python images method
does not work consistently. Instead, get the result set for name and manually check
if the tag exists.
'''
try:
params = {
'only_ids': 0,
'all': 0,
}
if LooseVersion(self.api_version) < LooseVersion('1.25'):
# only use "filter" on API 1.24 and under, as it is deprecated
params['filter'] = name
else:
params['filters'] = convert_filters({'reference': name})
images = self.get_json("/images/json", params=params)
except Exception as exc:
self.fail("Error searching for image %s - %s" % (name, str(exc)))
if tag:
lookup = "%s:%s" % (name, tag)
lookup_digest = "%s@%s" % (name, tag)
response = images
images = []
for image in response:
tags = image.get('RepoTags')
digests = image.get('RepoDigests')
if (tags and lookup in tags) or (digests and lookup_digest in digests):
images = [image]
break
return images
def find_image(self, name, tag):
'''
Lookup an image (by name and tag) and return the inspection results.
'''
if not name:
return None
self.log("Find image %s:%s" % (name, tag))
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == 'docker.io':
# If docker.io is explicitly there in name, the image
# isn't found in some cases (#41509)
self.log("Check for docker.io image: %s" % repo_name)
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith('library/'):
# Sometimes library/xxx images are not found
lookup = repo_name[len('library/'):]
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io wasn't there,
# it can be that the image wasn't found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = "%s/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if not images and '/' not in repo_name:
# This seems to be happening with podman-docker
# (https://github.com/ansible-collections/community.docker/issues/291)
lookup = "%s/library/%s" % (registry, repo_name)
self.log("Check for docker.io image: %s" % lookup)
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail("Registry returned more than one result for %s:%s" % (name, tag))
if len(images) == 1:
try:
return self.get_json('/images/{0}/json', images[0]['Id'])
except NotFound:
self.log("Image %s:%s not found." % (name, tag))
return None
except Exception as exc:
self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
self.log("Image %s:%s not found." % (name, tag))
return None
def find_image_by_id(self, image_id, accept_missing_image=False):
'''
Lookup an image (by ID) and return the inspection results.
'''
if not image_id:
return None
self.log("Find image %s (by ID)" % image_id)
try:
return self.get_json('/images/{0}/json', image_id)
except NotFound as exc:
if not accept_missing_image:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
self.log("Image %s not found." % image_id)
return None
except Exception as exc:
self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
def pull_image(self, name, tag="latest", platform=None):
'''
Pull an image
'''
self.log("Pulling image %s:%s" % (name, tag))
old_tag = self.find_image(name, tag)
try:
repository, image_tag = parse_repository_tag(name)
registry, repo_name = auth.resolve_repository_name(repository)
params = {
'tag': tag or image_tag or 'latest',
'fromImage': repository,
}
if platform is not None:
params['platform'] = platform
headers = {}
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=True, timeout=None
)
self._raise_for_status(response)
for line in self._stream_helper(response, decode=True):
self.log(line, pretty_print=True)
if line.get('error'):
if line.get('errorDetail'):
error_detail = line.get('errorDetail')
self.fail("Error pulling %s - code: %s message: %s" % (name,
error_detail.get('code'),
error_detail.get('message')))
else:
self.fail("Error pulling %s - %s" % (name, line.get('error')))
except Exception as exc:
self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
new_tag = self.find_image(name, tag)
return new_tag, old_tag == new_tag
class AnsibleDockerClient(AnsibleDockerClientBase):
def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
required_together=None, required_if=None, required_one_of=None, required_by=None,
min_docker_api_version=None, option_minimal_versions=None,
option_minimal_versions_ignore_params=None, fail_results=None):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = dict()
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if,
required_one_of=required_one_of,
required_by=required_by or {},
)
self.debug = self.module.params.get('debug')
self.check_mode = self.module.check_mode
super(AnsibleDockerClient, self).__init__(min_docker_api_version=min_docker_api_version)
if option_minimal_versions is not None:
self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
def fail(self, msg, **kwargs):
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
def deprecate(self, msg, version=None, date=None, collection_name=None):
self.module.deprecate(msg, version=version, date=date, collection_name=collection_name)
def _get_params(self):
return self.module.params
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
self.option_minimal_versions = dict()
for option in self.module.argument_spec:
if ignore_params is not None:
if option in ignore_params:
continue
self.option_minimal_versions[option] = dict()
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_api = True
if 'docker_api_version' in data:
support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
data['supported'] = support_docker_api
# Fail if option is not supported but used
if not data['supported']:
# Test whether option is specified
if 'detect_usage' in data:
used = data['detect_usage'](self)
else:
used = self.module.params.get(option) is not None
if used and 'default' in self.module.argument_spec[option]:
used = self.module.params[option] != self.module.argument_spec[option]['default']
if used:
# If the option is used, compose error message.
if 'usage_msg' in data:
usg = data['usage_msg']
else:
usg = 'set %s option' % (option, )
if not support_docker_api:
msg = 'Docker API version is %s. Minimum version required is %s to %s.'
msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
else:
# should not happen
msg = 'Cannot %s with your configuration.' % (usg, )
self.fail(msg)
def report_warnings(self, result, warnings_key=None):
'''
Checks result of client operation for warnings, and if present, outputs them.
warnings_key should be a list of keys used to crawl the result dictionary.
For example, if warnings_key == ['a', 'b'], the function will consider
result['a']['b'] if these keys exist. If the result is a non-empty string, it
will be reported as a warning. If the result is a list, every entry will be
reported as a warning.
In most cases (if warnings are returned at all), warnings_key should be
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
'''
if warnings_key is None:
warnings_key = ['Warnings']
for key in warnings_key:
if not isinstance(result, Mapping):
return
result = result.get(key)
if isinstance(result, Sequence):
for warning in result:
self.module.warn('Docker warning: {0}'.format(warning))
elif isinstance(result, string_types) and result:
self.module.warn('Docker warning: {0}'.format(result))

View File

@@ -0,0 +1,442 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import base64
import datetime
import io
import json
import os
import os.path
import shutil
import stat
import tarfile
from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
from ansible.module_utils.six import raise_from
from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, NotFound
class DockerFileCopyError(Exception):
pass
class DockerUnexpectedError(DockerFileCopyError):
pass
class DockerFileNotFound(DockerFileCopyError):
pass
def _put_archive(client, container, path, data):
# data can also be file object for streaming. This is because _put uses requests's put().
# See https://requests.readthedocs.io/en/latest/user/advanced/#streaming-uploads
url = client._url('/containers/{0}/archive', container)
res = client._put(url, params={'path': path}, data=data)
client._raise_for_status(res)
return res.status_code == 200
def _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
if not stat.S_ISLNK(file_stat.st_mode):
raise DockerUnexpectedError('stat information is not for a symlink')
bio = io.BytesIO()
with tarfile.open(fileobj=bio, mode='w|', dereference=False, encoding='utf-8') as tar:
# Note that without both name (bytes) and arcname (unicode), this either fails for
# Python 2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
# form) it works with Python 2.7, 3.5, 3.6, and 3.7 up to 3.11
tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
tarinfo.uid = user_id
tarinfo.uname = ''
if user_name:
tarinfo.uname = user_name
tarinfo.gid = group_id
tarinfo.gname = ''
tarinfo.mode &= 0o700
if mode is not None:
tarinfo.mode = mode
if not tarinfo.issym():
raise DockerUnexpectedError('stat information is not for a symlink')
tar.addfile(tarinfo)
return bio.getvalue()
def _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
yield _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode, user_name)
def _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
if not stat.S_ISREG(file_stat.st_mode):
raise DockerUnexpectedError('stat information is not for a regular file')
tarinfo = tarfile.TarInfo()
tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
tarinfo.mode = (file_stat.st_mode & 0o700) if mode is None else mode
tarinfo.uid = user_id
tarinfo.gid = group_id
tarinfo.size = file_stat.st_size
tarinfo.mtime = file_stat.st_mtime
tarinfo.type = tarfile.REGTYPE
tarinfo.linkname = ''
if user_name:
tarinfo.uname = user_name
tarinfo_buf = tarinfo.tobuf()
total_size = len(tarinfo_buf)
yield tarinfo_buf
size = tarinfo.size
total_size += size
with open(b_in_path, 'rb') as f:
while size > 0:
to_read = min(size, 65536)
buf = f.read(to_read)
if not buf:
break
size -= len(buf)
yield buf
if size:
# If for some reason the file shrunk, fill up to the announced size with zeros.
# (If it enlarged, ignore the remainder.)
yield tarfile.NUL * size
remainder = tarinfo.size % tarfile.BLOCKSIZE
if remainder:
# We need to write a multiple of 512 bytes. Fill up with zeros.
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
total_size += tarfile.BLOCKSIZE - remainder
# End with two zeroed blocks
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
total_size += 2 * tarfile.BLOCKSIZE
remainder = total_size % tarfile.RECORDSIZE
if remainder > 0:
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
def _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=None):
tarinfo = tarfile.TarInfo()
tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
tarinfo.mode = mode
tarinfo.uid = user_id
tarinfo.gid = group_id
tarinfo.size = len(content)
try:
tarinfo.mtime = int(datetime.datetime.now().timestamp())
except AttributeError:
# Python 2 (or more precisely: Python < 3.3) has no timestamp(). Use the following
# expression for Python 2:
tarinfo.mtime = int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds())
tarinfo.type = tarfile.REGTYPE
tarinfo.linkname = ''
if user_name:
tarinfo.uname = user_name
tarinfo_buf = tarinfo.tobuf()
total_size = len(tarinfo_buf)
yield tarinfo_buf
total_size += len(content)
yield content
remainder = tarinfo.size % tarfile.BLOCKSIZE
if remainder:
# We need to write a multiple of 512 bytes. Fill up with zeros.
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
total_size += tarfile.BLOCKSIZE - remainder
# End with two zeroed blocks
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
total_size += 2 * tarfile.BLOCKSIZE
remainder = total_size % tarfile.RECORDSIZE
if remainder > 0:
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
def put_file(client, container, in_path, out_path, user_id, group_id, mode=None, user_name=None, follow_links=False):
"""Transfer a file from local to Docker container."""
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise DockerFileNotFound(
"file or module does not exist: %s" % to_native(in_path))
b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
out_dir, out_file = os.path.split(out_path)
if follow_links:
file_stat = os.stat(b_in_path)
else:
file_stat = os.lstat(b_in_path)
if stat.S_ISREG(file_stat.st_mode):
stream = _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
elif stat.S_ISLNK(file_stat.st_mode):
stream = _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
else:
raise DockerFileCopyError(
'File{0} {1} is neither a regular file nor a symlink (stat mode {2}).'.format(
' referenced by' if follow_links else '', in_path, oct(file_stat.st_mode)))
ok = _put_archive(client, container, out_dir, stream)
if not ok:
raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
def put_file_content(client, container, content, out_path, user_id, group_id, mode, user_name=None):
"""Transfer a file from local to Docker container."""
out_dir, out_file = os.path.split(out_path)
stream = _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=user_name)
ok = _put_archive(client, container, out_dir, stream)
if not ok:
raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
def stat_file(client, container, in_path, follow_links=False, log=None):
"""Fetch information on a file from a Docker container to local.
Return a tuple ``(path, stat_data, link_target)`` where:
:path: is the resolved path in case ``follow_links=True``;
:stat_data: is ``None`` if the file does not exist, or a dictionary with fields
``name`` (string), ``size`` (integer), ``mode`` (integer, see https://pkg.go.dev/io/fs#FileMode),
``mtime`` (string), and ``linkTarget`` (string);
:link_target: is ``None`` if the file is not a symlink or when ``follow_links=False``,
and a string with the symlink target otherwise.
"""
considered_in_paths = set()
while True:
if in_path in considered_in_paths:
raise DockerFileCopyError('Found infinite symbolic link loop when trying to stating "{0}"'.format(in_path))
considered_in_paths.add(in_path)
if log:
log('FETCH: Stating "%s"' % in_path)
response = client._head(
client._url('/containers/{0}/archive', container),
params={'path': in_path},
)
if response.status_code == 404:
return in_path, None, None
client._raise_for_status(response)
header = response.headers.get('x-docker-container-path-stat')
try:
stat_data = json.loads(base64.b64decode(header))
except Exception as exc:
raise DockerUnexpectedError(
'When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}'
.format(in_path=in_path, container=container, header=header, exc=exc)
)
# https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink
if stat_data['mode'] & (1 << (32 - 5)) != 0:
link_target = stat_data['linkTarget']
if not follow_links:
return in_path, stat_data, link_target
in_path = os.path.join(os.path.split(in_path)[0], link_target)
continue
return in_path, stat_data, None
class _RawGeneratorFileobj(io.RawIOBase):
def __init__(self, stream):
self._stream = stream
self._buf = b''
def readable(self):
return True
def _readinto_from_buf(self, b, index, length):
cpy = min(length - index, len(self._buf))
if cpy:
b[index:index + cpy] = self._buf[:cpy]
self._buf = self._buf[cpy:]
index += cpy
return index
def readinto(self, b):
index = 0
length = len(b)
index = self._readinto_from_buf(b, index, length)
if index == length:
return index
try:
self._buf += next(self._stream)
except StopIteration:
return index
return self._readinto_from_buf(b, index, length)
def _stream_generator_to_fileobj(stream):
'''Given a generator that generates chunks of bytes, create a readable buffered stream.'''
raw = _RawGeneratorFileobj(stream)
return io.BufferedReader(raw)
def fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=False, log=None):
"""Fetch a file (as a tar file entry) from a Docker container to local."""
considered_in_paths = set()
while True:
if in_path in considered_in_paths:
raise DockerFileCopyError('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path))
considered_in_paths.add(in_path)
if log:
log('FETCH: Fetching "%s"' % in_path)
try:
stream = client.get_raw_stream(
'/containers/{0}/archive', container,
params={'path': in_path},
headers={'Accept-Encoding': 'identity'},
)
except NotFound:
return process_none(in_path)
with tarfile.open(fileobj=_stream_generator_to_fileobj(stream), mode='r|') as tar:
symlink_member = None
result = None
found = False
for member in tar:
if found:
raise DockerUnexpectedError('Received tarfile contains more than one file!')
found = True
if member.issym():
symlink_member = member
continue
if member.isfile():
result = process_regular(in_path, tar, member)
continue
result = process_other(in_path, member)
if symlink_member:
if not follow_links:
return process_symlink(in_path, symlink_member)
in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
if log:
log('FETCH: Following symbolic link to "%s"' % in_path)
continue
if found:
return result
raise DockerUnexpectedError('Received tarfile is empty!')
def fetch_file(client, container, in_path, out_path, follow_links=False, log=None):
b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
def process_none(in_path):
raise DockerFileNotFound(
'File {in_path} does not exist in container {container}'
.format(in_path=in_path, container=container)
)
def process_regular(in_path, tar, member):
if not follow_links and os.path.exists(b_out_path):
os.unlink(b_out_path)
in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
with open(b_out_path, 'wb') as out_f:
shutil.copyfileobj(in_f, out_f)
return in_path
def process_symlink(in_path, member):
if os.path.exists(b_out_path):
os.unlink(b_out_path)
os.symlink(member.linkname, b_out_path)
return in_path
def process_other(in_path, member):
raise DockerFileCopyError('Remote file "%s" is not a regular file or a symbolic link' % in_path)
return fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=follow_links, log=log)
def _execute_command(client, container, command, log=None, check_rc=False):
if log:
log('Executing {command} in {container}'.format(command=command, container=container))
data = {
'Container': container,
'User': '',
'Privileged': False,
'Tty': False,
'AttachStdin': False,
'AttachStdout': True,
'AttachStderr': True,
'Cmd': command,
}
if 'detachKeys' in client._general_configs:
data['detachKeys'] = client._general_configs['detachKeys']
try:
exec_data = client.post_json_to_json('/containers/{0}/exec', container, data=data)
except NotFound as e:
raise_from(
DockerFileCopyError('Could not find container "{container}"'.format(container=container)),
e,
)
except APIError as e:
if e.response is not None and e.response.status_code == 409:
raise_from(
DockerFileCopyError('Cannot execute command in paused container "{container}"'.format(container=container)),
e,
)
raise
exec_id = exec_data['Id']
data = {
'Tty': False,
'Detach': False
}
stdout, stderr = client.post_json_to_stream('/exec/{0}/start', exec_id, stream=False, demux=True, tty=False)
result = client.get_json('/exec/{0}/json', exec_id)
rc = result.get('ExitCode') or 0
stdout = stdout or b''
stderr = stderr or b''
if log:
log('Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}'.format(rc=rc, stdout=stdout, stderr=stderr))
if check_rc and rc != 0:
raise DockerUnexpectedError(
'Obtained unexpected exit code {rc} when running "{command}" in {container}.\nSTDOUT: {stdout}\nSTDERR: {stderr}'
.format(command=' '.join(command), container=container, rc=rc, stdout=stdout, stderr=stderr)
)
return rc, stdout, stderr
def determine_user_group(client, container, log=None):
dummy, stdout, stderr = _execute_command(client, container, ['/bin/sh', '-c', 'id -u && id -g'], check_rc=True, log=log)
stdout_lines = stdout.splitlines()
if len(stdout_lines) != 2:
raise DockerUnexpectedError(
'Expected two-line output to obtain user and group ID for container {container}, but got {lc} lines:\n{stdout}'
.format(container=container, lc=len(stdout_lines), stdout=stdout)
)
user_id, group_id = stdout_lines
try:
return int(user_id), int(group_id)
except ValueError:
raise DockerUnexpectedError(
'Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got "{l1}" and "{l2}" instead'
.format(container=container, l1=user_id, l2=group_id)
)

View File

@@ -0,0 +1,157 @@
# Copyright 2022 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import tarfile
from ansible.module_utils.common.text.converters import to_native
class ImageArchiveManifestSummary(object):
'''
Represents data extracted from a manifest.json found in the tar archive output of the
"docker image save some:tag > some.tar" command.
'''
def __init__(self, image_id, repo_tags):
'''
:param image_id: File name portion of Config entry, e.g. abcde12345 from abcde12345.json
:type image_id: str
:param repo_tags Docker image names, e.g. ["hello-world:latest"]
:type repo_tags: list
'''
self.image_id = image_id
self.repo_tags = repo_tags
class ImageArchiveInvalidException(Exception):
def __init__(self, message, cause):
'''
:param message: Exception message
:type message: str
:param cause: Inner exception that this exception wraps
:type cause: Exception | None
'''
super(ImageArchiveInvalidException, self).__init__(message)
# Python 2 doesn't support causes
self.cause = cause
def api_image_id(archive_image_id):
'''
Accepts an image hash in the format stored in manifest.json, and returns an equivalent identifier
that represents the same image hash, but in the format presented by the Docker Engine API.
:param archive_image_id: plain image hash
:type archive_image_id: str
:returns: Prefixed hash used by REST api
:rtype: str
'''
return 'sha256:%s' % archive_image_id
def archived_image_manifest(archive_path):
'''
Attempts to get Image.Id and image name from metadata stored in the image
archive tar file.
The tar should contain a file "manifest.json" with an array with a single entry,
and the entry should have a Config field with the image ID in its file name, as
well as a RepoTags list, which typically has only one entry.
:raises:
ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
:param archive_path: Tar file to read
:type archive_path: str
:return: None, if no file at archive_path, or the extracted image ID, which will not have a sha256: prefix.
:rtype: ImageArchiveManifestSummary
'''
try:
# FileNotFoundError does not exist in Python 2
if not os.path.isfile(archive_path):
return None
tf = tarfile.open(archive_path, 'r')
try:
try:
ef = tf.extractfile('manifest.json')
try:
text = ef.read().decode('utf-8')
manifest = json.loads(text)
except Exception as exc:
raise ImageArchiveInvalidException(
"Failed to decode and deserialize manifest.json: %s" % to_native(exc),
exc
)
finally:
# In Python 2.6, this does not have __exit__
ef.close()
if len(manifest) != 1:
raise ImageArchiveInvalidException(
"Expected to have one entry in manifest.json but found %s" % len(manifest),
None
)
m0 = manifest[0]
try:
config_file = m0['Config']
except KeyError as exc:
raise ImageArchiveInvalidException(
"Failed to get Config entry from manifest.json: %s" % to_native(exc),
exc
)
# Extracts hash without 'sha256:' prefix
try:
# Strip off .json filename extension, leaving just the hash.
image_id = os.path.splitext(config_file)[0]
except Exception as exc:
raise ImageArchiveInvalidException(
"Failed to extract image id from config file name %s: %s" % (config_file, to_native(exc)),
exc
)
try:
repo_tags = m0['RepoTags']
except KeyError as exc:
raise ImageArchiveInvalidException(
"Failed to get RepoTags entry from manifest.json: %s" % to_native(exc),
exc
)
return ImageArchiveManifestSummary(
image_id=image_id,
repo_tags=repo_tags
)
except ImageArchiveInvalidException:
raise
except Exception as exc:
raise ImageArchiveInvalidException(
"Failed to extract manifest.json from tar file %s: %s" % (archive_path, to_native(exc)),
exc
)
finally:
# In Python 2.6, TarFile does not have __exit__
tf.close()
except ImageArchiveInvalidException:
raise
except Exception as exc:
raise ImageArchiveInvalidException("Failed to open tar file %s: %s" % (archive_path, to_native(exc)), exc)

View File

@@ -0,0 +1,843 @@
# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from time import sleep
from ansible.module_utils.common.text.converters import to_native, to_text
from ansible_collections.community.docker.plugins.module_utils.util import (
DifferenceTracker,
DockerBaseClass,
compare_generic,
is_image_name_id,
sanitize_result,
)
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag
class Container(DockerBaseClass):
def __init__(self, container, engine_driver):
super(Container, self).__init__()
self.raw = container
self.id = None
self.image = None
self.image_name = None
self.container = container
self.engine_driver = engine_driver
if container:
self.id = engine_driver.get_container_id(container)
self.image = engine_driver.get_image_from_container(container)
self.image_name = engine_driver.get_image_name_from_container(container)
self.log(self.container, pretty_print=True)
@property
def exists(self):
return True if self.container else False
@property
def removing(self):
return self.engine_driver.is_container_removing(self.container) if self.container else False
@property
def running(self):
return self.engine_driver.is_container_running(self.container) if self.container else False
@property
def paused(self):
return self.engine_driver.is_container_paused(self.container) if self.container else False
class ContainerManager(DockerBaseClass):
def __init__(self, module, engine_driver, client, active_options):
self.module = module
self.engine_driver = engine_driver
self.client = client
self.options = active_options
self.all_options = self._collect_all_options(active_options)
self.check_mode = self.module.check_mode
self.param_cleanup = self.module.params['cleanup']
self.param_container_default_behavior = self.module.params['container_default_behavior']
self.param_default_host_ip = self.module.params['default_host_ip']
self.param_debug = self.module.params['debug']
self.param_force_kill = self.module.params['force_kill']
self.param_image = self.module.params['image']
self.param_image_comparison = self.module.params['image_comparison']
self.param_image_label_mismatch = self.module.params['image_label_mismatch']
self.param_image_name_mismatch = self.module.params['image_name_mismatch']
self.param_keep_volumes = self.module.params['keep_volumes']
self.param_kill_signal = self.module.params['kill_signal']
self.param_name = self.module.params['name']
self.param_networks_cli_compatible = self.module.params['networks_cli_compatible']
self.param_output_logs = self.module.params['output_logs']
self.param_paused = self.module.params['paused']
self.param_pull = self.module.params['pull']
self.param_recreate = self.module.params['recreate']
self.param_removal_wait_timeout = self.module.params['removal_wait_timeout']
self.param_restart = self.module.params['restart']
self.param_state = self.module.params['state']
self._parse_comparisons()
self._update_params()
self.results = {'changed': False, 'actions': []}
self.diff = {}
self.diff_tracker = DifferenceTracker()
self.facts = {}
if self.param_default_host_ip:
valid_ip = False
if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip):
valid_ip = True
if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip):
valid_ip = True
if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip):
self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip)
valid_ip = True
if not valid_ip:
self.fail('The value of default_host_ip must be an empty string, an IPv4 address, '
'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip))
def _collect_all_options(self, active_options):
all_options = {}
for options in active_options:
for option in options.options:
all_options[option.name] = option
return all_options
def _collect_all_module_params(self):
all_module_options = set()
for option, data in self.module.argument_spec.items():
all_module_options.add(option)
if 'aliases' in data:
for alias in data['aliases']:
all_module_options.add(alias)
return all_module_options
def _parse_comparisons(self):
# Keep track of all module params and all option aliases
all_module_options = self._collect_all_module_params()
comp_aliases = {}
for option_name, option in self.all_options.items():
if option.not_an_ansible_option:
continue
comp_aliases[option_name] = option_name
for alias in option.ansible_aliases:
comp_aliases[alias] = option_name
# Process legacy ignore options
if self.module.params['ignore_image']:
self.all_options['image'].comparison = 'ignore'
if self.module.params['purge_networks']:
self.all_options['networks'].comparison = 'strict'
# Process comparsions specified by user
if self.module.params.get('comparisons'):
# If '*' appears in comparisons, process it first
if '*' in self.module.params['comparisons']:
value = self.module.params['comparisons']['*']
if value not in ('strict', 'ignore'):
self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
for option in self.all_options.values():
if option.name == 'networks':
# `networks` is special: only update if
# some value is actually specified
if self.module.params['networks'] is None:
continue
option.comparison = value
# Now process all other comparisons.
comp_aliases_used = {}
for key, value in self.module.params['comparisons'].items():
if key == '*':
continue
# Find main key
key_main = comp_aliases.get(key)
if key_main is None:
if key_main in all_module_options:
self.fail("The module option '%s' cannot be specified in the comparisons dict, "
"since it does not correspond to container's state!" % key)
if key not in self.all_options or self.all_options[key].not_an_ansible_option:
self.fail("Unknown module option '%s' in comparisons dict!" % key)
key_main = key
if key_main in comp_aliases_used:
self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
comp_aliases_used[key_main] = key
# Check value and update accordingly
if value in ('strict', 'ignore'):
self.all_options[key_main].comparison = value
elif value == 'allow_more_present':
if self.all_options[key_main].comparison_type == 'value':
self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
self.all_options[key_main].comparison = value
else:
self.fail("Unknown comparison mode '%s'!" % value)
# Copy values
for option in self.all_options.values():
if option.copy_comparison_from is not None:
option.comparison = self.all_options[option.copy_comparison_from].comparison
# Check legacy values
if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore':
self.module.warn('The ignore_image option has been overridden by the comparisons option!')
if self.module.params['purge_networks'] and self.all_options['networks'].comparison != 'strict':
self.module.warn('The purge_networks option has been overridden by the comparisons option!')
def _update_params(self):
if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None:
# Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode
# (assuming no explicit value is specified for network_mode)
self.module.params['network_mode'] = self.module.params['networks'][0]['name']
if self.param_container_default_behavior == 'compatibility':
old_default_values = dict(
auto_remove=False,
detach=True,
init=False,
interactive=False,
memory='0',
paused=False,
privileged=False,
read_only=False,
tty=False,
)
for param, value in old_default_values.items():
if self.module.params[param] is None:
self.module.params[param] = value
def fail(self, *args, **kwargs):
self.client.fail(*args, **kwargs)
def run(self):
if self.param_state in ('stopped', 'started', 'present'):
self.present(self.param_state)
elif self.param_state == 'absent':
self.absent()
if not self.check_mode and not self.param_debug:
self.results.pop('actions')
if self.module._diff or self.param_debug:
self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff
if self.facts:
self.results['container'] = self.facts
def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
delay = 1.0
total_wait = 0
while True:
# Inspect container
result = self.engine_driver.inspect_container_by_id(self.client, container_id)
if result is None:
if accept_removal:
return
msg = 'Encontered vanished container while waiting for container "{0}"'
self.fail(msg.format(container_id))
# Check container state
state = result.get('State', {}).get('Status')
if complete_states is not None and state in complete_states:
return
if wait_states is not None and state not in wait_states:
msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
self.fail(msg.format(container_id, state))
# Wait
if max_wait is not None:
if total_wait > max_wait:
msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
self.fail(msg.format(container_id, max_wait))
if total_wait + delay > max_wait:
delay = max_wait - total_wait
sleep(delay)
total_wait += delay
# Exponential backoff, but never wait longer than 10 seconds
# (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
# until the maximal 10 seconds delay is reached. By then, the
# code will have slept for ~1.5 minutes.)
delay = min(delay * 1.1, 10)
def _collect_params(self, active_options):
parameters = []
for options in active_options:
values = {}
engine = options.get_engine(self.engine_driver.name)
for option in options.all_options:
if not option.not_an_ansible_option and self.module.params[option.name] is not None:
values[option.name] = self.module.params[option.name]
values = options.preprocess(self.module, values)
engine.preprocess_value(self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, values)
parameters.append((options, values))
return parameters
def present(self, state):
self.parameters = self._collect_params(self.options)
container = self._get_container(self.param_name)
was_running = container.running
was_paused = container.paused
container_created = False
# If the image parameter was passed then we need to deal with the image
# version comparison. Otherwise we handle this depending on whether
# the container already runs or not; in the former case, in case the
# container needs to be restarted, we use the existing container's
# image ID.
image, comparison_image = self._get_image(container)
self.log(image, pretty_print=True)
if not container.exists or container.removing:
# New container
if container.removing:
self.log('Found container in removal phase')
else:
self.log('No container found')
if not self.param_image:
self.fail('Cannot create container when image is not specified!')
self.diff_tracker.add('exists', parameter=True, active=False)
if container.removing and not self.check_mode:
# Wait for container to be removed before trying to create it
self.wait_for_state(
container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout)
new_container = self.container_create(self.param_image)
if new_container:
container = new_container
container_created = True
else:
# Existing container
different, differences = self.has_different_configuration(container, comparison_image)
image_different = False
if self.all_options['image'].comparison == 'strict':
image_different = self._image_is_different(image, container)
if self.param_image_name_mismatch == 'recreate' and self.param_image is not None and self.param_image != container.image_name:
different = True
self.diff_tracker.add('image_name', parameter=self.param_image, active=container.image_name)
if image_different or different or self.param_recreate:
self.diff_tracker.merge(differences)
self.diff['differences'] = differences.get_legacy_docker_container_diffs()
if image_different:
self.diff['image_different'] = True
self.log("differences")
self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
image_to_use = self.param_image
if not image_to_use and container and container.image:
image_to_use = container.image
if not image_to_use:
self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
if container.running:
self.container_stop(container.id)
self.container_remove(container.id)
if not self.check_mode:
self.wait_for_state(
container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout)
new_container = self.container_create(image_to_use)
if new_container:
container = new_container
container_created = True
comparison_image = image
if container and container.exists:
container = self.update_limits(container, comparison_image)
container = self.update_networks(container, container_created)
if state == 'started' and not container.running:
self.diff_tracker.add('running', parameter=True, active=was_running)
container = self.container_start(container.id)
elif state == 'started' and self.param_restart:
self.diff_tracker.add('running', parameter=True, active=was_running)
self.diff_tracker.add('restarted', parameter=True, active=False)
container = self.container_restart(container.id)
elif state == 'stopped' and container.running:
self.diff_tracker.add('running', parameter=False, active=was_running)
self.container_stop(container.id)
container = self._get_container(container.id)
if state == 'started' and self.param_paused is not None and container.paused != self.param_paused:
self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused)
if not self.check_mode:
try:
if self.param_paused:
self.engine_driver.pause_container(self.client, container.id)
else:
self.engine_driver.unpause_container(self.client, container.id)
except Exception as exc:
self.fail("Error %s container %s: %s" % (
"pausing" if self.param_paused else "unpausing", container.id, to_native(exc)
))
container = self._get_container(container.id)
self.results['changed'] = True
self.results['actions'].append(dict(set_paused=self.param_paused))
self.facts = container.raw
def absent(self):
container = self._get_container(self.param_name)
if container.exists:
if container.running:
self.diff_tracker.add('running', parameter=False, active=True)
self.container_stop(container.id)
self.diff_tracker.add('exists', parameter=False, active=True)
self.container_remove(container.id)
def _output_logs(self, msg):
self.module.log(msg=msg)
def _get_container(self, container):
'''
Expects container ID or Name. Returns a container object
'''
container = self.engine_driver.inspect_container_by_name(self.client, container)
return Container(container, self.engine_driver)
def _get_container_image(self, container, fallback=None):
if not container.exists or container.removing:
return fallback
image = container.image
if is_image_name_id(image):
image = self.engine_driver.inspect_image_by_id(self.client, image)
else:
repository, tag = parse_repository_tag(image)
if not tag:
tag = "latest"
image = self.engine_driver.inspect_image_by_name(self.client, repository, tag)
return image or fallback
def _get_image(self, container):
image_parameter = self.param_image
if not image_parameter:
self.log('No image specified')
return None, self._get_container_image(container)
if is_image_name_id(image_parameter):
image = self.engine_driver.inspect_image_by_id(self.client, image_parameter)
else:
repository, tag = parse_repository_tag(image_parameter)
if not tag:
tag = "latest"
image = self.engine_driver.inspect_image_by_name(self.client, repository, tag)
if not image or self.param_pull:
if not self.check_mode:
self.log("Pull the image.")
image, alreadyToLatest = self.engine_driver.pull_image(
self.client, repository, tag, platform=self.module.params['platform'])
if alreadyToLatest:
self.results['changed'] = False
else:
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
elif not image:
# If the image isn't there, claim we'll pull.
# (Implicitly: if the image is there, claim it already was latest.)
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
self.log("image")
self.log(image, pretty_print=True)
comparison_image = image
if self.param_image_comparison == 'current-image':
comparison_image = self._get_container_image(container, image)
if comparison_image != image:
self.log("current image")
self.log(comparison_image, pretty_print=True)
return image, comparison_image
def _image_is_different(self, image, container):
if image and image.get('Id'):
if container and container.image:
if image.get('Id') != container.image:
self.diff_tracker.add('image', parameter=image.get('Id'), active=container.image)
return True
return False
def _compose_create_parameters(self, image):
params = {}
for options, values in self.parameters:
engine = options.get_engine(self.engine_driver.name)
if engine.can_set_value(self.engine_driver.get_api_version(self.client)):
engine.set_value(self.module, params, self.engine_driver.get_api_version(self.client), options.options, values)
params['Image'] = image
return params
def _record_differences(self, differences, options, param_values, engine, container, image):
container_values = engine.get_value(self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options)
expected_values = engine.get_expected_values(
self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy())
for option in options.options:
if option.name in expected_values:
param_value = expected_values[option.name]
container_value = container_values.get(option.name)
match = compare_generic(param_value, container_value, option.comparison, option.comparison_type)
if not match:
# No match.
if engine.ignore_mismatching_result(self.module, self.client, self.engine_driver.get_api_version(self.client),
option, image, container_value, param_value):
# Ignore the result
continue
# Record the differences
p = param_value
c = container_value
if option.comparison_type == 'set':
# Since the order does not matter, sort so that the diff output is better.
if p is not None:
p = sorted(p)
if c is not None:
c = sorted(c)
elif option.comparison_type == 'set(dict)':
# Since the order does not matter, sort so that the diff output is better.
if option.name == 'expected_mounts':
# For selected values, use one entry as key
def sort_key_fn(x):
return x['target']
else:
# We sort the list of dictionaries by using the sorted items of a dict as its key.
def sort_key_fn(x):
return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
if p is not None:
p = sorted(p, key=sort_key_fn)
if c is not None:
c = sorted(c, key=sort_key_fn)
differences.add(option.name, parameter=p, active=c)
def has_different_configuration(self, container, image):
differences = DifferenceTracker()
update_differences = DifferenceTracker()
for options, param_values in self.parameters:
engine = options.get_engine(self.engine_driver.name)
if engine.can_update_value(self.engine_driver.get_api_version(self.client)):
self._record_differences(update_differences, options, param_values, engine, container, image)
else:
self._record_differences(differences, options, param_values, engine, container, image)
has_differences = not differences.empty
# Only consider differences of properties that can be updated when there are also other differences
if has_differences:
differences.merge(update_differences)
return has_differences, differences
def has_different_resource_limits(self, container, image):
differences = DifferenceTracker()
for options, param_values in self.parameters:
engine = options.get_engine(self.engine_driver.name)
if not engine.can_update_value(self.engine_driver.get_api_version(self.client)):
continue
self._record_differences(differences, options, param_values, engine, container, image)
has_differences = not differences.empty
return has_differences, differences
def _compose_update_parameters(self):
result = {}
for options, values in self.parameters:
engine = options.get_engine(self.engine_driver.name)
if not engine.can_update_value(self.engine_driver.get_api_version(self.client)):
continue
engine.update_value(self.module, result, self.engine_driver.get_api_version(self.client), options.options, values)
return result
def update_limits(self, container, image):
limits_differ, different_limits = self.has_different_resource_limits(container, image)
if limits_differ:
self.log("limit differences:")
self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
self.diff_tracker.merge(different_limits)
if limits_differ and not self.check_mode:
self.container_update(container.id, self._compose_update_parameters())
return self._get_container(container.id)
return container
def has_network_differences(self, container):
'''
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
'''
different = False
differences = []
if not self.module.params['networks']:
return different, differences
if not container.container.get('NetworkSettings'):
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = container.container['NetworkSettings']['Networks']
for network in self.module.params['networks']:
network_info = connected_networks.get(network['name'])
if network_info is None:
different = True
differences.append(dict(
parameter=network,
container=None
))
else:
diff = False
network_info_ipam = network_info.get('IPAMConfig') or {}
if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
diff = True
if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
diff = True
if network.get('aliases'):
if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
diff = True
if network.get('links'):
expected_links = []
for link, alias in network['links']:
expected_links.append("%s:%s" % (link, alias))
if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
diff = True
if diff:
different = True
differences.append(dict(
parameter=network,
container=dict(
name=network['name'],
ipv4_address=network_info_ipam.get('IPv4Address'),
ipv6_address=network_info_ipam.get('IPv6Address'),
aliases=network_info.get('Aliases'),
links=network_info.get('Links')
)
))
return different, differences
def has_extra_networks(self, container):
'''
Check if the container is connected to non-requested networks
'''
extra_networks = []
extra = False
if not container.container.get('NetworkSettings'):
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = container.container['NetworkSettings'].get('Networks')
if connected_networks:
for network, network_config in connected_networks.items():
keep = False
if self.module.params['networks']:
for expected_network in self.module.params['networks']:
if expected_network['name'] == network:
keep = True
if not keep:
extra = True
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
def update_networks(self, container, container_created):
updated_container = container
if self.all_options['networks'].comparison != 'ignore' or container_created:
has_network_differences, network_differences = self.has_network_differences(container)
if has_network_differences:
if self.diff.get('differences'):
self.diff['differences'].append(dict(network_differences=network_differences))
else:
self.diff['differences'] = [dict(network_differences=network_differences)]
for netdiff in network_differences:
self.diff_tracker.add(
'network.{0}'.format(netdiff['parameter']['name']),
parameter=netdiff['parameter'],
active=netdiff['container']
)
self.results['changed'] = True
updated_container = self._add_networks(container, network_differences)
purge_networks = self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None
if not purge_networks and self.module.params['purge_networks']:
purge_networks = True
self.module.deprecate(
'The purge_networks option is used while networks is not specified. In this case purge_networks=true cannot'
' be replaced by `networks: strict` in comparisons, which is necessary once purge_networks is removed.'
' Please modify the docker_container invocation by adding `networks: []`',
version='4.0.0', collection_name='community.docker')
if purge_networks:
has_extra_networks, extra_networks = self.has_extra_networks(container)
if has_extra_networks:
if self.diff.get('differences'):
self.diff['differences'].append(dict(purge_networks=extra_networks))
else:
self.diff['differences'] = [dict(purge_networks=extra_networks)]
for extra_network in extra_networks:
self.diff_tracker.add(
'network.{0}'.format(extra_network['name']),
active=extra_network
)
self.results['changed'] = True
updated_container = self._purge_networks(container, extra_networks)
return updated_container
def _add_networks(self, container, differences):
for diff in differences:
# remove the container from the network, if connected
if diff.get('container'):
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
if not self.check_mode:
try:
self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
to_native(exc)))
# connect to the network
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter']))
if not self.check_mode:
params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')}
try:
self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
return self._get_container(container.id)
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
if not self.check_mode:
try:
self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
to_native(exc)))
return self._get_container(container.id)
def container_create(self, image):
create_parameters = self._compose_create_parameters(image)
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(create_parameters, pretty_print=True)
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
self.results['changed'] = True
new_container = None
if not self.check_mode:
try:
container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters)
except Exception as exc:
self.fail("Error creating container: %s" % to_native(exc))
return self._get_container(container_id)
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.engine_driver.start_container(self.client, container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
if self.module.params['detach'] is False:
status = self.engine_driver.wait_for_container(self.client, container_id)
self.client.fail_results['status'] = status
self.results['status'] = status
if self.module.params['auto_remove']:
output = "Cannot retrieve result as auto_remove is enabled"
if self.param_output_logs:
self.module.warn('Cannot output_logs if auto_remove is enabled!')
else:
output, real_output = self.engine_driver.get_container_output(self.client, container_id)
if real_output and self.param_output_logs:
self._output_logs(msg=output)
if self.param_cleanup:
self.container_remove(container_id, force=True)
insp = self._get_container(container_id)
if insp.raw:
insp.raw['Output'] = output
else:
insp.raw = dict(Output=output)
if status != 0:
# Set `failed` to True and return output as msg
self.results['failed'] = True
self.results['msg'] = output
return insp
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.param_keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
if not self.check_mode:
try:
self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force)
except Exception as exc:
self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
if not self.check_mode:
try:
self.engine_driver.update_container(self.client, container_id, update_parameters)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
return self._get_container(container_id)
def container_kill(self, container_id):
self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal))
self.results['changed'] = True
if not self.check_mode:
try:
self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, to_native(exc)))
def container_restart(self, container_id):
self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout']))
self.results['changed'] = True
if not self.check_mode:
try:
self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10)
except Exception as exc:
self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
return self._get_container(container_id)
def container_stop(self, container_id):
if self.param_force_kill:
self.container_kill(container_id)
return
self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout']))
self.results['changed'] = True
if not self.check_mode:
try:
self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout'])
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
def run_module(engine_driver):
module, active_options, client = engine_driver.setup(
argument_spec=dict(
cleanup=dict(type='bool', default=False),
comparisons=dict(type='dict'),
container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'),
default_host_ip=dict(type='str'),
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
ignore_image=dict(type='bool', default=False, removed_in_version='4.0.0', removed_from_collection='community.docker'),
image=dict(type='str'),
image_comparison=dict(type='str', choices=['desired-image', 'current-image'], default='desired-image'),
image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'),
image_name_mismatch=dict(type='str', choices=['ignore', 'recreate'], default='ignore'),
keep_volumes=dict(type='bool', default=True),
kill_signal=dict(type='str'),
name=dict(type='str', required=True),
networks_cli_compatible=dict(type='bool', default=True),
output_logs=dict(type='bool', default=False),
paused=dict(type='bool'),
pull=dict(type='bool', default=False),
purge_networks=dict(type='bool', default=False, removed_in_version='4.0.0', removed_from_collection='community.docker'),
recreate=dict(type='bool', default=False),
removal_wait_timeout=dict(type='float'),
restart=dict(type='bool', default=False),
state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
),
required_if=[
('state', 'present', ['image'])
],
)
def execute():
cm = ContainerManager(module, engine_driver, client, active_options)
cm.run()
module.exit_json(**sanitize_result(cm.results))
engine_driver.run(execute, client)

View File

@@ -0,0 +1,211 @@
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import os.path
import socket as pysocket
from ansible.module_utils.basic import missing_required_lib
from ansible.module_utils.six import PY2
try:
from docker.utils import socket as docker_socket
import struct
except Exception:
# missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
pass
from ansible_collections.community.docker.plugins.module_utils.socket_helper import (
make_unblocking,
shutdown_writing,
write_to_socket,
)
PARAMIKO_POLL_TIMEOUT = 0.01 # 10 milliseconds
class DockerSocketHandlerBase(object):
def __init__(self, sock, selectors, log=None):
make_unblocking(sock)
self._selectors = selectors
if log is not None:
self._log = log
else:
self._log = lambda msg: True
self._paramiko_read_workaround = hasattr(sock, 'send_ready') and 'paramiko' in str(type(sock))
self._sock = sock
self._block_done_callback = None
self._block_buffer = []
self._eof = False
self._read_buffer = b''
self._write_buffer = b''
self._end_of_writing = False
self._current_stream = None
self._current_missing = 0
self._current_buffer = b''
self._selector = self._selectors.DefaultSelector()
self._selector.register(self._sock, self._selectors.EVENT_READ)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self._selector.close()
def set_block_done_callback(self, block_done_callback):
self._block_done_callback = block_done_callback
if self._block_done_callback is not None:
while self._block_buffer:
elt = self._block_buffer.remove(0)
self._block_done_callback(*elt)
def _add_block(self, stream_id, data):
if self._block_done_callback is not None:
self._block_done_callback(stream_id, data)
else:
self._block_buffer.append((stream_id, data))
def _read(self):
if self._eof:
return
if hasattr(self._sock, 'recv'):
try:
data = self._sock.recv(262144)
except Exception as e:
# After calling self._sock.shutdown(), OpenSSL's/urllib3's
# WrappedSocket seems to eventually raise ZeroReturnError in
# case of EOF
if 'OpenSSL.SSL.ZeroReturnError' in str(type(e)):
self._eof = True
return
else:
raise
elif not PY2 and isinstance(self._sock, getattr(pysocket, 'SocketIO')):
data = self._sock.read()
else:
data = os.read(self._sock.fileno())
if data is None:
# no data available
return
self._log('read {0} bytes'.format(len(data)))
if len(data) == 0:
# Stream EOF
self._eof = True
return
self._read_buffer += data
while len(self._read_buffer) > 0:
if self._current_missing > 0:
n = min(len(self._read_buffer), self._current_missing)
self._current_buffer += self._read_buffer[:n]
self._read_buffer = self._read_buffer[n:]
self._current_missing -= n
if self._current_missing == 0:
self._add_block(self._current_stream, self._current_buffer)
self._current_buffer = b''
if len(self._read_buffer) < 8:
break
self._current_stream, self._current_missing = struct.unpack('>BxxxL', self._read_buffer[:8])
self._read_buffer = self._read_buffer[8:]
if self._current_missing < 0:
# Stream EOF (as reported by docker daemon)
self._eof = True
break
def _handle_end_of_writing(self):
if self._end_of_writing and len(self._write_buffer) == 0:
self._end_of_writing = False
self._log('Shutting socket down for writing')
shutdown_writing(self._sock, self._log)
def _write(self):
if len(self._write_buffer) > 0:
written = write_to_socket(self._sock, self._write_buffer)
self._write_buffer = self._write_buffer[written:]
self._log('wrote {0} bytes, {1} are left'.format(written, len(self._write_buffer)))
if len(self._write_buffer) > 0:
self._selector.modify(self._sock, self._selectors.EVENT_READ | self._selectors.EVENT_WRITE)
else:
self._selector.modify(self._sock, self._selectors.EVENT_READ)
self._handle_end_of_writing()
def select(self, timeout=None, _internal_recursion=False):
if not _internal_recursion and self._paramiko_read_workaround and len(self._write_buffer) > 0:
# When the SSH transport is used, Docker SDK for Python internally uses Paramiko, whose
# Channel object supports select(), but only for reading
# (https://github.com/paramiko/paramiko/issues/695).
if self._sock.send_ready():
self._write()
return True
while timeout is None or timeout > PARAMIKO_POLL_TIMEOUT:
result = self.select(PARAMIKO_POLL_TIMEOUT, _internal_recursion=True)
if self._sock.send_ready():
self._read()
result += 1
if result > 0:
return True
if timeout is not None:
timeout -= PARAMIKO_POLL_TIMEOUT
self._log('select... ({0})'.format(timeout))
events = self._selector.select(timeout)
for key, event in events:
if key.fileobj == self._sock:
self._log(
'select event read:{0} write:{1}'.format(
event & self._selectors.EVENT_READ != 0,
event & self._selectors.EVENT_WRITE != 0))
if event & self._selectors.EVENT_READ != 0:
self._read()
if event & self._selectors.EVENT_WRITE != 0:
self._write()
result = len(events)
if self._paramiko_read_workaround and len(self._write_buffer) > 0:
if self._sock.send_ready():
self._write()
result += 1
return result > 0
def is_eof(self):
return self._eof
def end_of_writing(self):
self._end_of_writing = True
self._handle_end_of_writing()
def consume(self):
stdout = []
stderr = []
def append_block(stream_id, data):
if stream_id == docker_socket.STDOUT:
stdout.append(data)
elif stream_id == docker_socket.STDERR:
stderr.append(data)
else:
raise ValueError('{0} is not a valid stream ID'.format(stream_id))
self.end_of_writing()
self.set_block_done_callback(append_block)
while not self._eof:
self.select()
return b''.join(stdout), b''.join(stderr)
def write(self, str):
self._write_buffer += str
if len(self._write_buffer) == len(str):
self._write()
class DockerSocketHandlerModule(DockerSocketHandlerBase):
def __init__(self, sock, module, selectors):
super(DockerSocketHandlerModule, self).__init__(sock, selectors, module.debug)

View File

@@ -0,0 +1,62 @@
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fcntl
import os
import os.path
import socket as pysocket
from ansible.module_utils.six import PY2
def make_file_unblocking(file):
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fcntl.fcntl(file.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
def make_file_blocking(file):
fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fcntl.fcntl(file.fileno(), fcntl.F_GETFL) & ~os.O_NONBLOCK)
def make_unblocking(sock):
if hasattr(sock, '_sock'):
sock._sock.setblocking(0)
elif hasattr(sock, 'setblocking'):
sock.setblocking(0)
else:
make_file_unblocking(sock)
def _empty_writer(msg):
pass
def shutdown_writing(sock, log=_empty_writer):
if hasattr(sock, 'shutdown_write'):
sock.shutdown_write()
elif hasattr(sock, 'shutdown'):
try:
sock.shutdown(pysocket.SHUT_WR)
except TypeError as e:
# probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
log('Shutting down for writing not possible; trying shutdown instead: {0}'.format(e))
sock.shutdown()
elif not PY2 and isinstance(sock, getattr(pysocket, 'SocketIO')):
sock._sock.shutdown(pysocket.SHUT_WR)
else:
log('No idea how to signal end of writing')
def write_to_socket(sock, data):
if hasattr(sock, '_send_until_done'):
# WrappedSocket (urllib3/contrib/pyopenssl) doesn't have `send`, but
# only `sendall`, which uses `_send_until_done` under the hood.
return sock._send_until_done(data)
elif hasattr(sock, 'send'):
return sock.send(data)
else:
return os.write(sock.fileno(), data)

View File

@@ -0,0 +1,281 @@
# Copyright (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
# Copyright (c) Thierry Bouvet (@tbouvet)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from time import sleep
try:
from docker.errors import APIError, NotFound
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
from ansible_collections.community.docker.plugins.module_utils.common import AnsibleDockerClient
class AnsibleDockerSwarmClient(AnsibleDockerClient):
def __init__(self, **kwargs):
super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
def get_swarm_node_id(self):
"""
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
of Docker host the module is executed on
:return:
NodeID of host or 'None' if not part of Swarm
"""
try:
info = self.info()
except APIError as exc:
self.fail("Failed to get node information for %s" % to_native(exc))
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return swarm_info['Swarm']['NodeID']
return None
def check_if_swarm_node(self, node_id=None):
"""
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
it is not executed on Swarm manager
:param node_id: Node identifier
:return:
bool: True if node is part of Swarm, False otherwise
"""
if node_id is None:
try:
info = self.info()
except APIError:
self.fail("Failed to get host information.")
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info['Swarm']['NodeID']:
return True
if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
return True
return False
else:
try:
node_info = self.get_node_inspect(node_id=node_id)
except APIError:
return
if node_info['ID'] is not None:
return True
return False
def check_if_swarm_manager(self):
"""
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
is performed. The inspect_swarm() will fail if node is not a manager
:return: True if node is Swarm Manager, False otherwise
"""
try:
self.inspect_swarm()
return True
except APIError:
return False
def fail_task_if_not_swarm_manager(self):
"""
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
"""
if not self.check_if_swarm_manager():
self.fail("Error running docker swarm module: must run on swarm manager node")
def check_if_swarm_worker(self):
"""
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
:return: True if node is Swarm Worker, False otherwise
"""
if self.check_if_swarm_node() and not self.check_if_swarm_manager():
return True
return False
def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
"""
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
host that is not part of Swarm it will fail the playbook
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
True if node is part of swarm but its state is down, False otherwise
"""
if repeat_check < 1:
repeat_check = 1
if node_id is None:
node_id = self.get_swarm_node_id()
for retry in range(0, repeat_check):
if retry > 0:
sleep(5)
node_info = self.get_node_inspect(node_id=node_id)
if node_info['Status']['State'] == 'down':
return True
return False
def get_node_inspect(self, node_id=None, skip_missing=False):
"""
Returns Swarm node info as in 'docker node inspect' command about single node
:param skip_missing: if True then function will return None instead of failing the task
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
Single node information structure
"""
if node_id is None:
node_id = self.get_swarm_node_id()
if node_id is None:
self.fail("Failed to get node information.")
try:
node_info = self.inspect_node(node_id=node_id)
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
if exc.status_code == 404:
if skip_missing:
return None
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
if 'ManagerStatus' in node_info:
if node_info['ManagerStatus'].get('Leader'):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
count_colons = node_info['ManagerStatus']['Addr'].count(":")
if count_colons == 1:
swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
else:
swarm_leader_ip = node_info['Status']['Addr']
node_info['Status']['Addr'] = swarm_leader_ip
return node_info
def get_all_nodes_inspect(self):
"""
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
:return:
Structure with information about all nodes
"""
try:
node_info = self.nodes()
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
except Exception as exc:
self.fail("Error inspecting swarm node: %s" % exc)
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
return node_info
def get_all_nodes_list(self, output='short'):
"""
Returns list of nodes registered in Swarm
:param output: Defines format of returned data
:return:
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
if 'output' is 'long' then returns data is list of dict containing the attributes as in
output of command 'docker node ls'
"""
nodes_list = []
nodes_inspect = self.get_all_nodes_inspect()
if nodes_inspect is None:
return None
if output == 'short':
for node in nodes_inspect:
nodes_list.append(node['Description']['Hostname'])
elif output == 'long':
for node in nodes_inspect:
node_property = {}
node_property.update({'ID': node['ID']})
node_property.update({'Hostname': node['Description']['Hostname']})
node_property.update({'Status': node['Status']['State']})
node_property.update({'Availability': node['Spec']['Availability']})
if 'ManagerStatus' in node:
if node['ManagerStatus']['Leader'] is True:
node_property.update({'Leader': True})
node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
nodes_list.append(node_property)
else:
return None
return nodes_list
def get_node_name_by_id(self, nodeid):
return self.get_node_inspect(nodeid)['Description']['Hostname']
def get_unlock_key(self):
if self.docker_py_version < LooseVersion('2.7.0'):
return None
return super(AnsibleDockerSwarmClient, self).get_unlock_key()
def get_service_inspect(self, service_id, skip_missing=False):
"""
Returns Swarm service info as in 'docker service inspect' command about single service
:param service_id: service ID or name
:param skip_missing: if True then function will return None instead of failing the task
:return:
Single service information structure
"""
try:
service_info = self.inspect_service(service_id)
except NotFound as exc:
if skip_missing is False:
self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
else:
return None
except APIError as exc:
if exc.status_code == 503:
self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
self.fail("Error inspecting swarm service: %s" % exc)
except Exception as exc:
self.fail("Error inspecting swarm service: %s" % exc)
json_str = json.dumps(service_info, ensure_ascii=False)
service_info = json.loads(json_str)
return service_info

View File

@@ -0,0 +1,400 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import os
import platform
import re
import sys
import traceback
from datetime import timedelta
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common._collections_compat import Sequence
from ansible.module_utils.six.moves.urllib.parse import urlparse
DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = 'localhost' # deprecated
DEFAULT_TIMEOUT_SECONDS = 60
DOCKER_COMMON_ARGS = dict(
docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
tls_hostname=dict(type='str', fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
use_ssh_client=dict(type='bool', default=False),
validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
debug=dict(type='bool', default=False)
)
DOCKER_COMMON_ARGS_VARS = dict([
[option_name, 'ansible_docker_%s' % option_name]
for option_name in DOCKER_COMMON_ARGS
if option_name != 'debug'
])
DOCKER_MUTUALLY_EXCLUSIVE = []
DOCKER_REQUIRED_TOGETHER = [
['client_cert', 'client_key']
]
DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def is_image_name_id(name):
"""Check whether the given image name is in fact an image ID (hash)."""
if re.match('^sha256:[0-9a-fA-F]{64}$', name):
return True
return False
def is_valid_tag(tag, allow_empty=False):
"""Check whether the given string is a valid docker tag name."""
if not tag:
return allow_empty
# See here ("Extended description") for a definition what tags can be:
# https://docs.docker.com/engine/reference/commandline/tag/
return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
def sanitize_result(data):
"""Sanitize data object for return to Ansible.
When the data object contains types such as docker.types.containers.HostConfig,
Ansible will fail when these are returned via exit_json or fail_json.
HostConfig is derived from dict, but its constructor requires additional
arguments. This function sanitizes data structures by recursively converting
everything derived from dict to dict and everything derived from list (and tuple)
to a list.
"""
if isinstance(data, dict):
return dict((k, sanitize_result(v)) for k, v in data.items())
elif isinstance(data, (list, tuple)):
return [sanitize_result(v) for v in data]
else:
return data
class DockerBaseClass(object):
def __init__(self):
self.debug = False
def log(self, msg, pretty_print=False):
pass
# if self.debug:
# log_file = open('docker.log', 'a')
# if pretty_print:
# log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
# log_file.write(u'\n')
# else:
# log_file.write(msg + u'\n')
def update_tls_hostname(result, old_behavior=False, deprecate_function=None, uses_tls=True):
if result['tls_hostname'] is None:
# get default machine name from the url
parsed_url = urlparse(result['docker_host'])
result['tls_hostname'] = parsed_url.netloc.rsplit(':', 1)[0]
def compare_dict_allow_more_present(av, bv):
'''
Compare two dictionaries for whether every entry of the first is in the second.
'''
for key, value in av.items():
if key not in bv:
return False
if bv[key] != value:
return False
return True
def compare_generic(a, b, method, datatype):
'''
Compare values a and b as described by method and datatype.
Returns ``True`` if the values compare equal, and ``False`` if not.
``a`` is usually the module's parameter, while ``b`` is a property
of the current object. ``a`` must not be ``None`` (except for
``datatype == 'value'``).
Valid values for ``method`` are:
- ``ignore`` (always compare as equal);
- ``strict`` (only compare if really equal)
- ``allow_more_present`` (allow b to have elements which a does not have).
Valid values for ``datatype`` are:
- ``value``: for simple values (strings, numbers, ...);
- ``list``: for ``list``s or ``tuple``s where order matters;
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
matter;
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
not matter and which contain ``dict``s; ``allow_more_present`` is used
for the ``dict``s, and these are assumed to be dictionaries of values;
- ``dict``: for dictionaries of values.
'''
if method == 'ignore':
return True
# If a or b is None:
if a is None or b is None:
# If both are None: equality
if a == b:
return True
# Otherwise, not equal for values, and equal
# if the other is empty for set/list/dict
if datatype == 'value':
return False
# For allow_more_present, allow a to be None
if method == 'allow_more_present' and a is None:
return True
# Otherwise, the iterable object which is not None must have length 0
return len(b if a is None else a) == 0
# Do proper comparison (both objects not None)
if datatype == 'value':
return a == b
elif datatype == 'list':
if method == 'strict':
return a == b
else:
i = 0
for v in a:
while i < len(b) and b[i] != v:
i += 1
if i == len(b):
return False
i += 1
return True
elif datatype == 'dict':
if method == 'strict':
return a == b
else:
return compare_dict_allow_more_present(a, b)
elif datatype == 'set':
set_a = set(a)
set_b = set(b)
if method == 'strict':
return set_a == set_b
else:
return set_b >= set_a
elif datatype == 'set(dict)':
for av in a:
found = False
for bv in b:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
if method == 'strict':
# If we would know that both a and b do not contain duplicates,
# we could simply compare len(a) to len(b) to finish this test.
# We can assume that b has no duplicates (as it is returned by
# docker), but we don't know for a.
for bv in b:
found = False
for av in a:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
return True
class DifferenceTracker(object):
def __init__(self):
self._diff = []
def add(self, name, parameter=None, active=None):
self._diff.append(dict(
name=name,
parameter=parameter,
active=active,
))
def merge(self, other_tracker):
self._diff.extend(other_tracker._diff)
@property
def empty(self):
return len(self._diff) == 0
def get_before_after(self):
'''
Return texts ``before`` and ``after``.
'''
before = dict()
after = dict()
for item in self._diff:
before[item['name']] = item['active']
after[item['name']] = item['parameter']
return before, after
def has_difference_for(self, name):
'''
Returns a boolean if a difference exists for name
'''
return any(diff for diff in self._diff if diff['name'] == name)
def get_legacy_docker_container_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = []
for entry in self._diff:
item = dict()
item[entry['name']] = dict(
parameter=entry['parameter'],
container=entry['active'],
)
result.append(item)
return result
def get_legacy_docker_diffs(self):
'''
Return differences in the docker_container legacy format.
'''
result = [entry['name'] for entry in self._diff]
return result
def clean_dict_booleans_for_docker_api(data, allow_sequences=False):
'''
Go doesn't like Python booleans 'True' or 'False', while Ansible is just
fine with them in YAML. As such, they need to be converted in cases where
we pass dictionaries to the Docker API (e.g. docker_network's
driver_options and docker_prune's filters). When `allow_sequences=True`
YAML sequences (lists, tuples) are converted to [str] instead of str([...])
which is the expected format of filters which accept lists such as labels.
'''
def sanitize(value):
if value is True:
return 'true'
elif value is False:
return 'false'
else:
return str(value)
result = dict()
if data is not None:
for k, v in data.items():
result[str(k)] = [sanitize(e) for e in v] if allow_sequences and is_sequence(v) else sanitize(v)
return result
def convert_duration_to_nanosecond(time_str):
"""
Return time duration in nanosecond.
"""
if not isinstance(time_str, str):
raise ValueError('Missing unit in duration - %s' % time_str)
regex = re.compile(
r'^(((?P<hours>\d+)h)?'
r'((?P<minutes>\d+)m(?!s))?'
r'((?P<seconds>\d+)s)?'
r'((?P<milliseconds>\d+)ms)?'
r'((?P<microseconds>\d+)us)?)$'
)
parts = regex.match(time_str)
if not parts:
raise ValueError('Invalid time duration - %s' % time_str)
parts = parts.groupdict()
time_params = {}
for (name, value) in parts.items():
if value:
time_params[name] = int(value)
delta = timedelta(**time_params)
time_in_nanoseconds = (
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
) * 10 ** 3
return time_in_nanoseconds
def normalize_healthcheck_test(test):
if isinstance(test, (tuple, list)):
return [str(e) for e in test]
return ['CMD-SHELL', str(test)]
def normalize_healthcheck(healthcheck, normalize_test=False):
"""
Return dictionary of healthcheck parameters.
"""
result = dict()
# All supported healthcheck parameters
options = ('test', 'interval', 'timeout', 'start_period', 'retries')
duration_options = ('interval', 'timeout', 'start_period')
for key in options:
if key in healthcheck:
value = healthcheck[key]
if value is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if key in duration_options:
value = convert_duration_to_nanosecond(value)
if not value:
continue
if key == 'retries':
try:
value = int(value)
except ValueError:
raise ValueError(
'Cannot parse number of retries for healthcheck. '
'Expected an integer, got "{0}".'.format(value)
)
if key == 'test' and normalize_test:
value = normalize_healthcheck_test(value)
result[key] = value
return result
def parse_healthcheck(healthcheck):
"""
Return dictionary of healthcheck parameters and boolean if
healthcheck defined in image was requested to be disabled.
"""
if (not healthcheck) or (not healthcheck.get('test')):
return None, None
result = normalize_healthcheck(healthcheck, normalize_test=True)
if result['test'] == ['NONE']:
# If the user explicitly disables the healthcheck, return None
# as the healthcheck object, and set disable_healthcheck to True
return None, True
return result, False
def omit_none_from_dict(d):
"""
Return a copy of the dictionary with all keys with value None omitted.
"""
return dict((k, v) for (k, v) in d.items() if v is not None)

View File

@@ -0,0 +1,24 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
"""Provide version object to compare version numbers."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# Once we drop support for ansible-core 2.11, we can remove the try/except.
from ansible.module_utils.six import raise_from
try:
from ansible.module_utils.compat.version import LooseVersion, StrictVersion
except ImportError:
try:
from distutils.version import LooseVersion, StrictVersion
except ImportError as exc:
msg = 'To use this plugin or module with ansible-core 2.11, you need to use Python < 3.12 with distutils.version present'
raise_from(ImportError(msg), exc)

Some files were not shown because too many files have changed in this diff Show More