Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1,9 @@
# flake8: noqa
from .api import APIClient
from .client import DockerClient, from_env
from .context import Context
from .context import ContextAPI
from .tls import TLSConfig
from .version import __version__
__title__ = 'docker'

View File

@@ -0,0 +1,5 @@
# coding: utf-8
# file generated by setuptools_scm
# don't change, don't track in version control
__version__ = version = '6.0.1'
__version_tuple__ = version_tuple = (6, 0, 1)

View File

@@ -0,0 +1,2 @@
# flake8: noqa
from .client import APIClient

View File

@@ -0,0 +1,360 @@
import json
import logging
import os
import random
from .. import auth
from .. import constants
from .. import errors
from .. import utils
log = logging.getLogger(__name__)
class BuildApiMixin:
def build(self, path=None, tag=None, quiet=False, fileobj=None,
nocache=False, rm=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
decode=False, buildargs=None, gzip=False, shmsize=None,
labels=None, cache_from=None, target=None, network_mode=None,
squash=None, extra_hosts=None, platform=None, isolation=None,
use_config_proxy=True):
"""
Similar to the ``docker build`` command. Either ``path`` or ``fileobj``
needs to be set. ``path`` can be a local path (to a directory
containing a Dockerfile) or a remote URL. ``fileobj`` must be a
readable file-like object to a Dockerfile.
If you have a tar file for the Docker build context (including a
Dockerfile) already, pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is compressed
also, set ``encoding`` to the correct value (e.g ``gzip``).
Example:
>>> from io import BytesIO
>>> from docker import APIClient
>>> dockerfile = '''
... # Shared Volume
... FROM busybox:buildroot-2014.02
... VOLUME /data
... CMD ["/bin/sh"]
... '''
>>> f = BytesIO(dockerfile.encode('utf-8'))
>>> cli = APIClient(base_url='tcp://127.0.0.1:2375')
>>> response = [line for line in cli.build(
... fileobj=f, rm=True, tag='yourname/volume'
... )]
>>> response
['{"stream":" ---\\u003e a9eb17255234\\n"}',
'{"stream":"Step 1 : VOLUME /data\\n"}',
'{"stream":" ---\\u003e Running in abdc1e6896c6\\n"}',
'{"stream":" ---\\u003e 713bca62012e\\n"}',
'{"stream":"Removing intermediate container abdc1e6896c6\\n"}',
'{"stream":"Step 2 : CMD [\\"/bin/sh\\"]\\n"}',
'{"stream":" ---\\u003e Running in dba30f2a1a7e\\n"}',
'{"stream":" ---\\u003e 032b8b2855fc\\n"}',
'{"stream":"Removing intermediate container dba30f2a1a7e\\n"}',
'{"stream":"Successfully built 032b8b2855fc\\n"}']
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
gzip (bool): If set to ``True``, gzip compression/encoding is used
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
decode (bool): If set to ``True``, the returned stream will be
decoded into dicts on the fly. Default ``False``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (:py:class:`list`): A list of images used for build
cache resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str): Platform in the format ``os[/arch[/variant]]``
isolation (str): Isolation technology used during build.
Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns:
A generator for the build output.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
remote = context = None
headers = {}
container_limits = container_limits or {}
buildargs = buildargs or {}
if path is None and fileobj is None:
raise TypeError("Either path or fileobj needs to be provided.")
if gzip and encoding is not None:
raise errors.DockerException(
'Can not use custom encoding if gzip is enabled'
)
for key in container_limits.keys():
if key not in constants.CONTAINER_LIMITS_KEYS:
raise errors.DockerException(
f'Invalid container_limits key {key}'
)
if custom_context:
if not fileobj:
raise TypeError("You must specify fileobj with custom_context")
context = fileobj
elif fileobj is not None:
context = utils.mkbuildcontext(fileobj)
elif path.startswith(('http://', 'https://',
'git://', 'github.com/', 'git@')):
remote = path
elif not os.path.isdir(path):
raise TypeError("You must specify a directory to build in path")
else:
dockerignore = os.path.join(path, '.dockerignore')
exclude = None
if os.path.exists(dockerignore):
with open(dockerignore) as f:
exclude = list(filter(
lambda x: x != '' and x[0] != '#',
[line.strip() for line in f.read().splitlines()]
))
dockerfile = process_dockerfile(dockerfile, path)
context = utils.tar(
path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
)
encoding = 'gzip' if gzip else encoding
u = self._url('/build')
params = {
't': tag,
'remote': remote,
'q': quiet,
'nocache': nocache,
'rm': rm,
'forcerm': forcerm,
'pull': pull,
'dockerfile': dockerfile,
}
params.update(container_limits)
if use_config_proxy:
proxy_args = self._proxy_configs.get_environment()
for k, v in proxy_args.items():
buildargs.setdefault(k, v)
if buildargs:
params.update({'buildargs': json.dumps(buildargs)})
if shmsize:
if utils.version_gte(self._version, '1.22'):
params.update({'shmsize': shmsize})
else:
raise errors.InvalidVersion(
'shmsize was only introduced in API version 1.22'
)
if labels:
if utils.version_gte(self._version, '1.23'):
params.update({'labels': json.dumps(labels)})
else:
raise errors.InvalidVersion(
'labels was only introduced in API version 1.23'
)
if cache_from:
if utils.version_gte(self._version, '1.25'):
params.update({'cachefrom': json.dumps(cache_from)})
else:
raise errors.InvalidVersion(
'cache_from was only introduced in API version 1.25'
)
if target:
if utils.version_gte(self._version, '1.29'):
params.update({'target': target})
else:
raise errors.InvalidVersion(
'target was only introduced in API version 1.29'
)
if network_mode:
if utils.version_gte(self._version, '1.25'):
params.update({'networkmode': network_mode})
else:
raise errors.InvalidVersion(
'network_mode was only introduced in API version 1.25'
)
if squash:
if utils.version_gte(self._version, '1.25'):
params.update({'squash': squash})
else:
raise errors.InvalidVersion(
'squash was only introduced in API version 1.25'
)
if extra_hosts is not None:
if utils.version_lt(self._version, '1.27'):
raise errors.InvalidVersion(
'extra_hosts was only introduced in API version 1.27'
)
if isinstance(extra_hosts, dict):
extra_hosts = utils.format_extra_hosts(extra_hosts)
params.update({'extrahosts': extra_hosts})
if platform is not None:
if utils.version_lt(self._version, '1.32'):
raise errors.InvalidVersion(
'platform was only introduced in API version 1.32'
)
params['platform'] = platform
if isolation is not None:
if utils.version_lt(self._version, '1.24'):
raise errors.InvalidVersion(
'isolation was only introduced in API version 1.24'
)
params['isolation'] = isolation
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
headers['Content-Encoding'] = encoding
self._set_auth_headers(headers)
response = self._post(
u,
data=context,
params=params,
headers=headers,
stream=True,
timeout=timeout,
)
if context is not None and not custom_context:
context.close()
return self._stream_helper(response, decode=decode)
@utils.minimum_version('1.31')
def prune_builds(self):
"""
Delete the builder cache
Returns:
(dict): A dictionary containing information about the operation's
result. The ``SpaceReclaimed`` key indicates the amount of
bytes of disk space reclaimed.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/build/prune")
return self._result(self._post(url), True)
def _set_auth_headers(self, headers):
log.debug('Looking for auth config')
# If we don't have any auth data so far, try reloading the config
# file one more time in case anything showed up in there.
if not self._auth_configs or self._auth_configs.is_empty:
log.debug("No auth config in memory - loading from filesystem")
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
# Send the full auth configuration (if any exists), since the build
# could use any (or all) of the registries.
if self._auth_configs:
auth_data = self._auth_configs.get_all_credentials()
# See https://github.com/docker/docker-py/issues/1683
if (auth.INDEX_URL not in auth_data and
auth.INDEX_NAME in auth_data):
auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
log.debug(
'Sending auth config ({})'.format(
', '.join(repr(k) for k in auth_data.keys())
)
)
if auth_data:
headers['X-Registry-Config'] = auth.encode_header(
auth_data
)
else:
log.debug('No auth config found')
def process_dockerfile(dockerfile, path):
if not dockerfile:
return (None, None)
abs_dockerfile = dockerfile
if not os.path.isabs(dockerfile):
abs_dockerfile = os.path.join(path, dockerfile)
if constants.IS_WINDOWS_PLATFORM and path.startswith(
constants.WINDOWS_LONGPATH_PREFIX):
abs_dockerfile = '{}{}'.format(
constants.WINDOWS_LONGPATH_PREFIX,
os.path.normpath(
abs_dockerfile[len(constants.WINDOWS_LONGPATH_PREFIX):]
)
)
if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
os.path.relpath(abs_dockerfile, path).startswith('..')):
# Dockerfile not in context - read data to insert into tar later
with open(abs_dockerfile) as df:
return (
f'.dockerfile.{random.getrandbits(160):x}',
df.read()
)
# Dockerfile is inside the context - return path relative to context root
if dockerfile == abs_dockerfile:
# Only calculate relpath if necessary to avoid errors
# on Windows client -> Linux Docker
# see https://github.com/docker/compose/issues/5969
dockerfile = os.path.relpath(abs_dockerfile, path)
return (dockerfile, None)

View File

@@ -0,0 +1,509 @@
import json
import struct
import urllib
from functools import partial
import requests
import requests.exceptions
import websocket
from .. import auth
from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES)
from ..errors import (DockerException, InvalidVersion, TLSParameterError,
create_api_error_from_http_exception)
from ..tls import TLSConfig
from ..transport import SSLHTTPAdapter, UnixHTTPAdapter
from ..utils import check_resource, config, update_headers, utils
from ..utils.json_stream import json_stream
from ..utils.proxy import ProxyConfig
from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
from .build import BuildApiMixin
from .config import ConfigApiMixin
from .container import ContainerApiMixin
from .daemon import DaemonApiMixin
from .exec_api import ExecApiMixin
from .image import ImageApiMixin
from .network import NetworkApiMixin
from .plugin import PluginApiMixin
from .secret import SecretApiMixin
from .service import ServiceApiMixin
from .swarm import SwarmApiMixin
from .volume import VolumeApiMixin
try:
from ..transport import NpipeHTTPAdapter
except ImportError:
pass
try:
from ..transport import SSHHTTPAdapter
except ImportError:
pass
class APIClient(
requests.Session,
BuildApiMixin,
ConfigApiMixin,
ContainerApiMixin,
DaemonApiMixin,
ExecApiMixin,
ImageApiMixin,
NetworkApiMixin,
PluginApiMixin,
SecretApiMixin,
ServiceApiMixin,
SwarmApiMixin,
VolumeApiMixin):
"""
A low-level client for the Docker Engine API.
Example:
>>> import docker
>>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
>>> client.version()
{u'ApiVersion': u'1.33',
u'Arch': u'amd64',
u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
u'GitCommit': u'f4ffd2511c',
u'GoVersion': u'go1.9.2',
u'KernelVersion': u'4.14.3-1-ARCH',
u'MinAPIVersion': u'1.12',
u'Os': u'linux',
u'Version': u'17.10.0-ce'}
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
__attrs__ = requests.Session.__attrs__ + ['_auth_configs',
'_general_configs',
'_version',
'base_url',
'timeout']
def __init__(self, base_url=None, version=None,
timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
user_agent=DEFAULT_USER_AGENT, num_pools=None,
credstore_env=None, use_ssh_client=False,
max_pool_size=DEFAULT_MAX_POOL_SIZE):
super().__init__()
if tls and not base_url:
raise TLSParameterError(
'If using TLS, the base_url argument must be provided.'
)
self.base_url = base_url
self.timeout = timeout
self.headers['User-Agent'] = user_agent
self._general_configs = config.load_general_config()
proxy_config = self._general_configs.get('proxies', {})
try:
proxies = proxy_config[base_url]
except KeyError:
proxies = proxy_config.get('default', {})
self._proxy_configs = ProxyConfig.from_dict(proxies)
self._auth_configs = auth.load_config(
config_dict=self._general_configs, credstore_env=credstore_env,
)
self.credstore_env = credstore_env
base_url = utils.parse_host(
base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
)
# SSH has a different default for num_pools to all other adapters
num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
if base_url.startswith('http+unix://'):
self._custom_adapter = UnixHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size
)
self.mount('http+docker://', self._custom_adapter)
self._unmount('http://', 'https://')
# host part of URL should be unused, but is resolved by requests
# module in proxy_bypass_macosx_sysconf()
self.base_url = 'http+docker://localhost'
elif base_url.startswith('npipe://'):
if not IS_WINDOWS_PLATFORM:
raise DockerException(
'The npipe:// protocol is only supported on Windows'
)
try:
self._custom_adapter = NpipeHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size
)
except NameError:
raise DockerException(
'Install pypiwin32 package to enable npipe:// support'
)
self.mount('http+docker://', self._custom_adapter)
self.base_url = 'http+docker://localnpipe'
elif base_url.startswith('ssh://'):
try:
self._custom_adapter = SSHHTTPAdapter(
base_url, timeout, pool_connections=num_pools,
max_pool_size=max_pool_size, shell_out=use_ssh_client
)
except NameError:
raise DockerException(
'Install paramiko package to enable ssh:// support'
)
self.mount('http+docker://ssh', self._custom_adapter)
self._unmount('http://', 'https://')
self.base_url = 'http+docker://ssh'
else:
# Use SSLAdapter for the ability to specify SSL version
if isinstance(tls, TLSConfig):
tls.configure_client(self)
elif tls:
self._custom_adapter = SSLHTTPAdapter(
pool_connections=num_pools)
self.mount('https://', self._custom_adapter)
self.base_url = base_url
# version detection needs to be after unix adapter mounting
if version is None or (isinstance(
version,
str
) and version.lower() == 'auto'):
self._version = self._retrieve_server_version()
else:
self._version = version
if not isinstance(self._version, str):
raise DockerException(
'Version parameter must be a string or None. Found {}'.format(
type(version).__name__
)
)
if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
raise InvalidVersion(
'API versions below {} are no longer supported by this '
'library.'.format(MINIMUM_DOCKER_API_VERSION)
)
def _retrieve_server_version(self):
try:
return self.version(api_version=False)["ApiVersion"]
except KeyError:
raise DockerException(
'Invalid response from docker daemon: key "ApiVersion"'
' is missing.'
)
except Exception as e:
raise DockerException(
f'Error while fetching server API version: {e}'
)
def _set_request_timeout(self, kwargs):
"""Prepare the kwargs for an HTTP request by inserting the timeout
parameter, if not already present."""
kwargs.setdefault('timeout', self.timeout)
return kwargs
@update_headers
def _post(self, url, **kwargs):
return self.post(url, **self._set_request_timeout(kwargs))
@update_headers
def _get(self, url, **kwargs):
return self.get(url, **self._set_request_timeout(kwargs))
@update_headers
def _put(self, url, **kwargs):
return self.put(url, **self._set_request_timeout(kwargs))
@update_headers
def _delete(self, url, **kwargs):
return self.delete(url, **self._set_request_timeout(kwargs))
def _url(self, pathfmt, *args, **kwargs):
for arg in args:
if not isinstance(arg, str):
raise ValueError(
'Expected a string but found {} ({}) '
'instead'.format(arg, type(arg))
)
quote_f = partial(urllib.parse.quote, safe="/:")
args = map(quote_f, args)
if kwargs.get('versioned_api', True):
return '{}/v{}{}'.format(
self.base_url, self._version, pathfmt.format(*args)
)
else:
return f'{self.base_url}{pathfmt.format(*args)}'
def _raise_for_status(self, response):
"""Raises stored :class:`APIError`, if one occurred."""
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise create_api_error_from_http_exception(e) from e
def _result(self, response, json=False, binary=False):
assert not (json and binary)
self._raise_for_status(response)
if json:
return response.json()
if binary:
return response.content
return response.text
def _post_json(self, url, data, **kwargs):
# Go <1.1 can't unserialize null to a string
# so we do this disgusting thing here.
data2 = {}
if data is not None and isinstance(data, dict):
for k, v in iter(data.items()):
if v is not None:
data2[k] = v
elif data is not None:
data2 = data
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Content-Type'] = 'application/json'
return self._post(url, data=json.dumps(data2), **kwargs)
def _attach_params(self, override=None):
return override or {
'stdout': 1,
'stderr': 1,
'stream': 1
}
@check_resource('container')
def _attach_websocket(self, container, params=None):
url = self._url("/containers/{0}/attach/ws", container)
req = requests.Request("POST", url, params=self._attach_params(params))
full_url = req.prepare().url
full_url = full_url.replace("http://", "ws://", 1)
full_url = full_url.replace("https://", "wss://", 1)
return self._create_websocket_connection(full_url)
def _create_websocket_connection(self, url):
return websocket.create_connection(url)
def _get_raw_response_socket(self, response):
self._raise_for_status(response)
if self.base_url == "http+docker://localnpipe":
sock = response.raw._fp.fp.raw.sock
elif self.base_url.startswith('http+docker://ssh'):
sock = response.raw._fp.fp.channel
else:
sock = response.raw._fp.fp.raw
if self.base_url.startswith("https://"):
sock = sock._sock
try:
# Keep a reference to the response to stop it being garbage
# collected. If the response is garbage collected, it will
# close TLS sockets.
sock._response = response
except AttributeError:
# UNIX sockets can't have attributes set on them, but that's
# fine because we won't be doing TLS over them
pass
return sock
def _stream_helper(self, response, decode=False):
"""Generator for data coming from a chunked-encoded HTTP response."""
if response.raw._fp.chunked:
if decode:
yield from json_stream(self._stream_helper(response, False))
else:
reader = response.raw
while not reader.closed:
# this read call will block until we get a chunk
data = reader.read(1)
if not data:
break
if reader._fp.chunk_left:
data += reader.read(reader._fp.chunk_left)
yield data
else:
# Response isn't chunked, meaning we probably
# encountered an error immediately
yield self._result(response, json=decode)
def _multiplexed_buffer_helper(self, response):
"""A generator of multiplexed data blocks read from a buffered
response."""
buf = self._result(response, binary=True)
buf_length = len(buf)
walker = 0
while True:
if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
break
header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
_, length = struct.unpack_from('>BxxxL', header)
start = walker + STREAM_HEADER_SIZE_BYTES
end = start + length
walker = end
yield buf[start:end]
def _multiplexed_response_stream_helper(self, response):
"""A generator of multiplexed data blocks coming from a response
stream."""
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
while True:
header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
if not header:
break
_, length = struct.unpack('>BxxxL', header)
if not length:
continue
data = response.raw.read(length)
if not data:
break
yield data
def _stream_raw_result(self, response, chunk_size=1, decode=True):
''' Stream result for TTY-enabled container and raw binary data'''
self._raise_for_status(response)
# Disable timeout on the underlying socket to prevent
# Read timed out(s) for long running processes
socket = self._get_raw_response_socket(response)
self._disable_socket_timeout(socket)
yield from response.iter_content(chunk_size, decode)
def _read_from_socket(self, response, stream, tty=True, demux=False):
socket = self._get_raw_response_socket(response)
gen = frames_iter(socket, tty)
if demux:
# The generator will output tuples (stdout, stderr)
gen = (demux_adaptor(*frame) for frame in gen)
else:
# The generator will output strings
gen = (data for (_, data) in gen)
if stream:
return gen
else:
# Wait for all the frames, concatenate them, and return the result
return consume_socket_output(gen, demux=demux)
def _disable_socket_timeout(self, socket):
""" Depending on the combination of python version and whether we're
connecting over http or https, we might need to access _sock, which
may or may not exist; or we may need to just settimeout on socket
itself, which also may or may not have settimeout on it. To avoid
missing the correct one, we try both.
We also do not want to set the timeout if it is already disabled, as
you run the risk of changing a socket that was non-blocking to
blocking, for example when using gevent.
"""
sockets = [socket, getattr(socket, '_sock', None)]
for s in sockets:
if not hasattr(s, 'settimeout'):
continue
timeout = -1
if hasattr(s, 'gettimeout'):
timeout = s.gettimeout()
# Don't change the timeout if it is already disabled.
if timeout is None or timeout == 0.0:
continue
s.settimeout(None)
@check_resource('container')
def _check_is_tty(self, container):
cont = self.inspect_container(container)
return cont['Config']['Tty']
def _get_result(self, container, stream, res):
return self._get_result_tty(stream, res, self._check_is_tty(container))
def _get_result_tty(self, stream, res, is_tty):
# We should also use raw streaming (without keep-alives)
# if we're dealing with a tty-enabled container.
if is_tty:
return self._stream_raw_result(res) if stream else \
self._result(res, binary=True)
self._raise_for_status(res)
sep = b''
if stream:
return self._multiplexed_response_stream_helper(res)
else:
return sep.join(
[x for x in self._multiplexed_buffer_helper(res)]
)
def _unmount(self, *args):
for proto in args:
self.adapters.pop(proto)
def get_adapter(self, url):
try:
return super().get_adapter(url)
except requests.exceptions.InvalidSchema as e:
if self._custom_adapter:
return self._custom_adapter
else:
raise e
@property
def api_version(self):
return self._version
def reload_config(self, dockercfg_path=None):
"""
Force a reload of the auth configuration
Args:
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``)
Returns:
None
"""
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)

View File

@@ -0,0 +1,92 @@
import base64
from .. import utils
class ConfigApiMixin:
@utils.minimum_version('1.30')
def create_config(self, name, data, labels=None, templating=None):
"""
Create a config
Args:
name (string): Name of the config
data (bytes): Config data to be stored
labels (dict): A mapping of labels to assign to the config
templating (dict): dictionary containing the name of the
templating driver to be used expressed as
{ name: <templating_driver_name>}
Returns (dict): ID of the newly created config
"""
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels,
'Templating': templating
}
url = self._url('/configs/create')
return self._result(
self._post_json(url, data=body), True
)
@utils.minimum_version('1.30')
@utils.check_resource('id')
def inspect_config(self, id):
"""
Retrieve config metadata
Args:
id (string): Full ID of the config to inspect
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists
"""
url = self._url('/configs/{0}', id)
return self._result(self._get(url), True)
@utils.minimum_version('1.30')
@utils.check_resource('id')
def remove_config(self, id):
"""
Remove a config
Args:
id (string): Full ID of the config to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no config with that ID exists
"""
url = self._url('/configs/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.30')
def configs(self, filters=None):
"""
List configs
Args:
filters (dict): A map of filters to process on the configs
list. Available filters: ``names``
Returns (list): A list of configs
"""
url = self._url('/configs')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,181 @@
import os
from datetime import datetime
from .. import auth, types, utils
class DaemonApiMixin:
@utils.minimum_version('1.25')
def df(self):
"""
Get data usage information.
Returns:
(dict): A dictionary representing different resource categories
and their respective data usage.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/system/df')
return self._result(self._get(url), True)
def events(self, since=None, until=None, filters=None, decode=None):
"""
Get real-time events from the server. Similar to the ``docker events``
command.
Args:
since (UTC datetime or int): Get events from this point
until (UTC datetime or int): Get events until this point
filters (dict): Filter the events by event time, container or image
decode (bool): If set to true, stream will be decoded into dicts on
the fly. False by default.
Returns:
A :py:class:`docker.types.daemon.CancellableStream` generator
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for event in client.events(decode=True)
... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
u'time': 1423339459}
...
or
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
if isinstance(until, datetime):
until = utils.datetime_to_timestamp(until)
if filters:
filters = utils.convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
url = self._url('/events')
response = self._get(url, params=params, stream=True, timeout=None)
stream = self._stream_helper(response, decode=decode)
return types.CancellableStream(stream, response)
def info(self):
"""
Display system-wide information. Identical to the ``docker info``
command.
Returns:
(dict): The info as a dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None):
"""
Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
"""
Checks the server is responsive. An exception will be raised if it
isn't responding.
Returns:
(bool) The response from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
"""
Returns version information from the server. Similar to the ``docker
version`` command.
Returns:
(dict): The server version information
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)

View File

@@ -0,0 +1,167 @@
from .. import errors
from .. import utils
class ExecApiMixin:
@utils.check_resource('container')
def exec_create(self, container, cmd, stdout=True, stderr=True,
stdin=False, tty=False, privileged=False, user='',
environment=None, workdir=None, detach_keys=None):
"""
Sets up an exec instance in a running container.
Args:
container (str): Target container where exec instance will be
created
cmd (str or list): Command to be executed
stdout (bool): Attach to stdout. Default: ``True``
stderr (bool): Attach to stderr. Default: ``True``
stdin (bool): Attach to stdin. Default: ``False``
tty (bool): Allocate a pseudo-TTY. Default: False
privileged (bool): Run as privileged.
user (str): User to execute command as. Default: root
environment (dict or list): A dictionary or a list of strings in
the following format ``["PASSWORD=xxx"]`` or
``{"PASSWORD": "xxx"}``.
workdir (str): Path to working directory for this exec session
detach_keys (str): Override the key sequence for detaching
a container. Format is a single character `[a-Z]`
or `ctrl-<value>` where `<value>` is one of:
`a-z`, `@`, `^`, `[`, `,` or `_`.
~/.docker/config.json is used by default.
Returns:
(dict): A dictionary with an exec ``Id`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if environment is not None and utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'Setting environment for exec is not supported in API < 1.25'
)
if isinstance(cmd, str):
cmd = utils.split_command(cmd)
if isinstance(environment, dict):
environment = utils.utils.format_environment(environment)
data = {
'Container': container,
'User': user,
'Privileged': privileged,
'Tty': tty,
'AttachStdin': stdin,
'AttachStdout': stdout,
'AttachStderr': stderr,
'Cmd': cmd,
'Env': environment,
}
if workdir is not None:
if utils.version_lt(self._version, '1.35'):
raise errors.InvalidVersion(
'workdir is not supported for API version < 1.35'
)
data['WorkingDir'] = workdir
if detach_keys:
data['detachKeys'] = detach_keys
elif 'detachKeys' in self._general_configs:
data['detachKeys'] = self._general_configs['detachKeys']
url = self._url('/containers/{0}/exec', container)
res = self._post_json(url, data=data)
return self._result(res, True)
def exec_inspect(self, exec_id):
"""
Return low-level information about an exec command.
Args:
exec_id (str): ID of the exec instance
Returns:
(dict): Dictionary of values returned by the endpoint.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
res = self._get(self._url("/exec/{0}/json", exec_id))
return self._result(res, True)
def exec_resize(self, exec_id, height=None, width=None):
"""
Resize the tty session used by the specified exec command.
Args:
exec_id (str): ID of the exec instance
height (int): Height of tty session
width (int): Width of tty session
"""
if isinstance(exec_id, dict):
exec_id = exec_id.get('Id')
params = {'h': height, 'w': width}
url = self._url("/exec/{0}/resize", exec_id)
res = self._post(url, params=params)
self._raise_for_status(res)
@utils.check_resource('exec_id')
def exec_start(self, exec_id, detach=False, tty=False, stream=False,
socket=False, demux=False):
"""
Start a previously set up exec instance.
Args:
exec_id (str): ID of the exec instance
detach (bool): If true, detach from the exec command.
Default: False
tty (bool): Allocate a pseudo-TTY. Default: False
stream (bool): Stream response data. Default: False
socket (bool): Return the connection socket to allow custom
read/write operations.
demux (bool): Return stdout and stderr separately
Returns:
(generator or str or tuple): If ``stream=True``, a generator
yielding response chunks. If ``socket=True``, a socket object for
the connection. A string containing response data otherwise. If
``demux=True``, a tuple with two elements of type byte: stdout and
stderr.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# we want opened socket if socket == True
data = {
'Tty': tty,
'Detach': detach
}
headers = {} if detach else {
'Connection': 'Upgrade',
'Upgrade': 'tcp'
}
res = self._post_json(
self._url('/exec/{0}/start', exec_id),
headers=headers,
data=data,
stream=True
)
if detach:
return self._result(res)
if socket:
return self._get_raw_response_socket(res)
return self._read_from_socket(res, stream, tty=tty, demux=demux)

View File

@@ -0,0 +1,601 @@
import logging
import os
from .. import auth, errors, utils
from ..constants import DEFAULT_DATA_CHUNK_SIZE
log = logging.getLogger(__name__)
class ImageApiMixin:
@utils.check_resource('image')
def get_image(self, image, chunk_size=DEFAULT_DATA_CHUNK_SIZE):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
image (str): Image name to get
chunk_size (int): The number of bytes returned by each iteration
of the generator. If ``None``, data will be streamed as it is
received. Default: 2 MB
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = client.api.get_image("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image:
>>> f.write(chunk)
>>> f.close()
"""
res = self._get(self._url("/images/{0}/get", image), stream=True)
return self._stream_raw_result(res, chunk_size, False)
@utils.check_resource('image')
def history(self, image):
"""
Show the history of an image.
Args:
image (str): The image to show history for
Returns:
(str): The history of the image
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
res = self._get(self._url("/images/{0}/history", image))
return self._result(res, True)
def images(self, name=None, quiet=False, all=False, filters=None):
"""
List images. Similar to the ``docker images`` command.
Args:
name (str): Only show images belonging to the repository ``name``
quiet (bool): Only return numeric IDs as a list.
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
Returns:
(dict or list): A list if ``quiet=True``, otherwise a dict.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'only_ids': 1 if quiet else 0,
'all': 1 if all else 0,
}
if name:
if utils.version_lt(self._version, '1.25'):
# only use "filter" on API 1.24 and under, as it is deprecated
params['filter'] = name
else:
if filters:
filters['reference'] = name
else:
filters = {'reference': name}
if filters:
params['filters'] = utils.convert_filters(filters)
res = self._result(self._get(self._url("/images/json"), params=params),
True)
if quiet:
return [x['Id'] for x in res]
return res
def import_image(self, src=None, repository=None, tag=None, image=None,
changes=None, stream_src=False):
"""
Import an image. Similar to the ``docker import`` command.
If ``src`` is a string or unicode string, it will first be treated as a
path to a tarball on the local system. If there is an error reading
from that file, ``src`` will be treated as a URL instead to fetch the
image from. You can also pass an open file handle as ``src``, in which
case the data will be read from that file.
If ``src`` is unset but ``image`` is set, the ``image`` parameter will
be taken as the name of an existing image to import from.
Args:
src (str or file): Path to tarfile, URL, or file-like object
repository (str): The repository to create
tag (str): The tag to apply
image (str): Use another image like the ``FROM`` Dockerfile
parameter
"""
if not (src or image):
raise errors.DockerException(
'Must specify src or image to import from'
)
u = self._url('/images/create')
params = _import_image_params(
repository, tag, image,
src=(src if isinstance(src, str) else None),
changes=changes
)
headers = {'Content-Type': 'application/tar'}
if image or params.get('fromSrc') != '-': # from image or URL
return self._result(
self._post(u, data=None, params=params)
)
elif isinstance(src, str): # from file path
with open(src, 'rb') as f:
return self._result(
self._post(
u, data=f, params=params, headers=headers, timeout=None
)
)
else: # from raw data
if stream_src:
headers['Transfer-Encoding'] = 'chunked'
return self._result(
self._post(u, data=src, params=params, headers=headers)
)
def import_image_from_data(self, data, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but
allows importing in-memory bytes data.
Args:
data (bytes collection): Bytes collection containing valid tar data
repository (str): The repository to create
tag (str): The tag to apply
"""
u = self._url('/images/create')
params = _import_image_params(
repository, tag, src='-', changes=changes
)
headers = {'Content-Type': 'application/tar'}
return self._result(
self._post(
u, data=data, params=params, headers=headers, timeout=None
)
)
def import_image_from_file(self, filename, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a tar file on disk.
Args:
filename (str): Full path to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
Raises:
IOError: File does not exist.
"""
return self.import_image(
src=filename, repository=repository, tag=tag, changes=changes
)
def import_image_from_stream(self, stream, repository=None, tag=None,
changes=None):
return self.import_image(
src=stream, stream_src=True, repository=repository, tag=tag,
changes=changes
)
def import_image_from_url(self, url, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from a URL.
Args:
url (str): A URL pointing to a tar file.
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
src=url, repository=repository, tag=tag, changes=changes
)
def import_image_from_image(self, image, repository=None, tag=None,
changes=None):
"""
Like :py:meth:`~docker.api.image.ImageApiMixin.import_image`, but only
supports importing from another image, like the ``FROM`` Dockerfile
parameter.
Args:
image (str): Image name to import from
repository (str): The repository to create
tag (str): The tag to apply
"""
return self.import_image(
image=image, repository=repository, tag=tag, changes=changes
)
@utils.check_resource('image')
def inspect_image(self, image):
"""
Get detailed information about an image. Similar to the ``docker
inspect`` command, but only for images.
Args:
image (str): The image to inspect
Returns:
(dict): Similar to the output of ``docker inspect``, but as a
single dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(
self._get(self._url("/images/{0}/json", image)), True
)
@utils.minimum_version('1.30')
@utils.check_resource('image')
def inspect_distribution(self, image, auth_config=None):
"""
Get image digest and platform information by contacting the registry.
Args:
image (str): The image name to inspect
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns:
(dict): A dict containing distribution data
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
registry, _ = auth.resolve_repository_name(image)
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
url = self._url("/distribution/{0}/json", image)
return self._result(
self._get(url, headers=headers), True
)
def load_image(self, data, quiet=None):
"""
Load an image that was previously saved using
:py:meth:`~docker.api.image.ImageApiMixin.get_image` (or ``docker
save``). Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
quiet (boolean): Suppress progress details in response.
Returns:
(generator): Progress output as JSON objects. Only available for
API version >= 1.23
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if quiet is not None:
if utils.version_lt(self._version, '1.23'):
raise errors.InvalidVersion(
'quiet is not supported in API version < 1.23'
)
params['quiet'] = quiet
res = self._post(
self._url("/images/load"), data=data, params=params, stream=True
)
if utils.version_gte(self._version, '1.23'):
return self._stream_helper(res, decode=True)
self._raise_for_status(res)
@utils.minimum_version('1.25')
def prune_images(self, filters=None):
"""
Delete unused images
Args:
filters (dict): Filters to process on the prune list.
Available filters:
- dangling (bool): When set to true (or 1), prune only
unused and untagged images.
Returns:
(dict): A dict containing a list of deleted image IDs and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/images/prune")
params = {}
if filters is not None:
params['filters'] = utils.convert_filters(filters)
return self._result(self._post(url, params=params), True)
def pull(self, repository, tag=None, stream=False, auth_config=None,
decode=False, platform=None, all_tags=False):
"""
Pulls an image. Similar to the ``docker pull`` command.
Args:
repository (str): The repository to pull
tag (str): The tag to pull. If ``tag`` is ``None`` or empty, it
is set to ``latest``.
stream (bool): Stream the output as a generator. Make sure to
consume the generator, otherwise pull might get cancelled.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags, the ``tag`` parameter is
ignored.
Returns:
(generator or str): The output
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> resp = client.api.pull('busybox', stream=True, decode=True)
... for line in resp:
... print(json.dumps(line, indent=4))
{
"status": "Pulling image (latest) from busybox",
"progressDetail": {},
"id": "e72ac664f4f0"
}
{
"status": "Pulling image (latest) from busybox, endpoint: ...",
"progressDetail": {},
"id": "e72ac664f4f0"
}
"""
repository, image_tag = utils.parse_repository_tag(repository)
tag = tag or image_tag or 'latest'
if all_tags:
tag = None
registry, repo_name = auth.resolve_repository_name(repository)
params = {
'tag': tag,
'fromImage': repository
}
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
if platform is not None:
if utils.version_lt(self._version, '1.32'):
raise errors.InvalidVersion(
'platform was only introduced in API version 1.32'
)
params['platform'] = platform
response = self._post(
self._url('/images/create'), params=params, headers=headers,
stream=stream, timeout=None
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response)
def push(self, repository, tag=None, stream=False, auth_config=None,
decode=False):
"""
Push an image or a repository to the registry. Similar to the ``docker
push`` command.
Args:
repository (str): The repository to push to
tag (str): An optional tag to push
stream (bool): Stream the output as a blocking generator
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
decode (bool): Decode the JSON data from the server into dicts.
Only applies with ``stream=True``
Returns:
(generator or str): The output from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> resp = client.api.push(
... 'yourname/app',
... stream=True,
... decode=True,
... )
... for line in resp:
... print(line)
{'status': 'Pushing repository yourname/app (1 tags)'}
{'status': 'Pushing','progressDetail': {}, 'id': '511136ea3c5a'}
{'status': 'Image already pushed, skipping', 'progressDetail':{},
'id': '511136ea3c5a'}
...
"""
if not tag:
repository, tag = utils.parse_repository_tag(repository)
registry, repo_name = auth.resolve_repository_name(repository)
u = self._url("/images/{0}/push", repository)
params = {
'tag': tag
}
headers = {}
if auth_config is None:
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
else:
log.debug('Sending supplied auth config')
headers['X-Registry-Auth'] = auth.encode_header(auth_config)
response = self._post_json(
u, None, headers=headers, stream=stream, params=params
)
self._raise_for_status(response)
if stream:
return self._stream_helper(response, decode=decode)
return self._result(response)
@utils.check_resource('image')
def remove_image(self, image, force=False, noprune=False):
"""
Remove an image. Similar to the ``docker rmi`` command.
Args:
image (str): The image to remove
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
"""
params = {'force': force, 'noprune': noprune}
res = self._delete(self._url("/images/{0}", image), params=params)
return self._result(res, True)
def search(self, term, limit=None):
"""
Search for images on Docker Hub. Similar to the ``docker search``
command.
Args:
term (str): A term to search for.
limit (int): The maximum number of results to return.
Returns:
(list of dicts): The response of the search.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {'term': term}
if limit is not None:
params['limit'] = limit
return self._result(
self._get(self._url("/images/search"), params=params),
True
)
@utils.check_resource('image')
def tag(self, image, repository, tag=None, force=False):
"""
Tag an image into a repository. Similar to the ``docker tag`` command.
Args:
image (str): The image to tag
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Returns:
(bool): ``True`` if successful
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.tag('ubuntu', 'localhost:5000/ubuntu', 'latest',
force=True)
"""
params = {
'tag': tag,
'repo': repository,
'force': 1 if force else 0
}
url = self._url("/images/{0}/tag", image)
res = self._post(url, params=params)
self._raise_for_status(res)
return res.status_code == 201
def is_file(src):
try:
return (
isinstance(src, str) and
os.path.isfile(src)
)
except TypeError: # a data string will make isfile() raise a TypeError
return False
def _import_image_params(repo, tag, image=None, src=None,
changes=None):
params = {
'repo': repo,
'tag': tag,
}
if image:
params['fromImage'] = image
elif src and not is_file(src):
params['fromSrc'] = src
else:
params['fromSrc'] = '-'
if changes:
params['changes'] = changes
return params

View File

@@ -0,0 +1,278 @@
from ..errors import InvalidVersion
from ..utils import check_resource, minimum_version
from ..utils import version_lt
from .. import utils
class NetworkApiMixin:
def networks(self, names=None, ids=None, filters=None):
"""
List networks. Similar to the ``docker network ls`` command.
Args:
names (:py:class:`list`): List of names to filter by
ids (:py:class:`list`): List of ids to filter by
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- ``label=[<key>]``, ``label=[<key>=<value>]`` or a list of
such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
Returns:
(dict): List of network objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
if names:
filters['name'] = names
if ids:
filters['id'] = ids
params = {'filters': utils.convert_filters(filters)}
url = self._url("/networks")
res = self._get(url, params=params)
return self._result(res, json=True)
def create_network(self, name, driver=None, options=None, ipam=None,
check_duplicate=None, internal=False, labels=None,
enable_ipv6=False, attachable=None, scope=None,
ingress=None):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(dict): The created network reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.api.create_network("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> client.api.create_network("network1", driver="bridge",
ipam=ipam_config)
"""
if options is not None and not isinstance(options, dict):
raise TypeError('options must be a dictionary')
data = {
'Name': name,
'Driver': driver,
'Options': options,
'IPAM': ipam,
'CheckDuplicate': check_duplicate,
}
if labels is not None:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'network labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
if enable_ipv6:
if version_lt(self._version, '1.23'):
raise InvalidVersion(
'enable_ipv6 was introduced in API 1.23'
)
data['EnableIPv6'] = True
if internal:
if version_lt(self._version, '1.22'):
raise InvalidVersion('Internal networks are not '
'supported in API version < 1.22')
data['Internal'] = True
if attachable is not None:
if version_lt(self._version, '1.24'):
raise InvalidVersion(
'attachable is not supported in API version < 1.24'
)
data['Attachable'] = attachable
if ingress is not None:
if version_lt(self._version, '1.29'):
raise InvalidVersion(
'ingress is not supported in API version < 1.29'
)
data['Ingress'] = ingress
if scope is not None:
if version_lt(self._version, '1.30'):
raise InvalidVersion(
'scope is not supported in API version < 1.30'
)
data['Scope'] = scope
url = self._url("/networks/create")
res = self._post_json(url, data=data)
return self._result(res, json=True)
@minimum_version('1.25')
def prune_networks(self, filters=None):
"""
Delete unused networks
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted network names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/networks/prune')
return self._result(self._post(url, params=params), True)
@check_resource('net_id')
def remove_network(self, net_id):
"""
Remove a network. Similar to the ``docker network rm`` command.
Args:
net_id (str): The network's id
"""
url = self._url("/networks/{0}", net_id)
res = self._delete(url)
self._raise_for_status(res)
@check_resource('net_id')
def inspect_network(self, net_id, verbose=None, scope=None):
"""
Get detailed information about a network.
Args:
net_id (str): ID of network
verbose (bool): Show the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
"""
params = {}
if verbose is not None:
if version_lt(self._version, '1.28'):
raise InvalidVersion('verbose was introduced in API 1.28')
params['verbose'] = verbose
if scope is not None:
if version_lt(self._version, '1.31'):
raise InvalidVersion('scope was introduced in API 1.31')
params['scope'] = scope
url = self._url("/networks/{0}", net_id)
res = self._get(url, params=params)
return self._result(res, json=True)
@check_resource('container')
def connect_container_to_network(self, container, net_id,
ipv4_address=None, ipv6_address=None,
aliases=None, links=None,
link_local_ips=None, driver_opt=None,
mac_address=None):
"""
Connect a container to a network.
Args:
container (str): container-id/name to be connected to the network
net_id (str): network id
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linked to this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local
(IPv4/IPv6) addresses.
mac_address (str): The MAC address of this container on the
network. Defaults to ``None``.
"""
data = {
"Container": container,
"EndpointConfig": self.create_endpoint_config(
aliases=aliases, links=links, ipv4_address=ipv4_address,
ipv6_address=ipv6_address, link_local_ips=link_local_ips,
driver_opt=driver_opt,
mac_address=mac_address
),
}
url = self._url("/networks/{0}/connect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)
@check_resource('container')
def disconnect_container_from_network(self, container, net_id,
force=False):
"""
Disconnect a container from a network.
Args:
container (str): container ID or name to be disconnected from the
network
net_id (str): network ID
force (bool): Force the container to disconnect from a network.
Default: ``False``
"""
data = {"Container": container}
if force:
if version_lt(self._version, '1.22'):
raise InvalidVersion(
'Forced disconnect was introduced in API 1.22'
)
data['Force'] = force
url = self._url("/networks/{0}/disconnect", net_id)
res = self._post_json(url, data=data)
self._raise_for_status(res)

View File

@@ -0,0 +1,261 @@
from .. import auth, utils
class PluginApiMixin:
@utils.minimum_version('1.25')
@utils.check_resource('name')
def configure_plugin(self, name, options):
"""
Configure a plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
options (dict): A key-value mapping of options
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/set', name)
data = options
if isinstance(data, dict):
data = [f'{k}={v}' for k, v in data.items()]
res = self._post_json(url, data=data)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def create_plugin(self, name, plugin_data_dir, gzip=False):
"""
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
``True`` if successful
"""
url = self._url('/plugins/create')
with utils.create_archive(
root=plugin_data_dir, gzip=gzip,
files=set(utils.build.walk(plugin_data_dir, []))
) as archv:
res = self._post(url, params={'name': name}, data=archv)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def disable_plugin(self, name, force=False):
"""
Disable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
force (bool): To enable the force query parameter.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/disable', name)
res = self._post(url, params={'force': force})
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def enable_plugin(self, name, timeout=0):
"""
Enable an installed plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
timeout (int): Operation timeout (in seconds). Default: 0
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/enable', name)
params = {'timeout': timeout}
res = self._post(url, params=params)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def inspect_plugin(self, name):
"""
Retrieve plugin metadata.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
Returns:
A dict containing plugin info
"""
url = self._url('/plugins/{0}/json', name)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
def pull_plugin(self, remote, privileges, name=None):
"""
Pull and install a plugin. After the plugin is installed, it can be
enabled using :py:meth:`~enable_plugin`.
Args:
remote (string): Remote reference for the plugin to install.
The ``:latest`` tag is optional, and is the default if
omitted.
privileges (:py:class:`list`): A list of privileges the user
consents to grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
name (string): Local name for the pulled plugin. The
``:latest`` tag is optional, and is the default if omitted.
Returns:
An iterable object streaming the decoded API logs
"""
url = self._url('/plugins/pull')
params = {
'remote': remote,
}
if name:
params['name'] = name
headers = {}
registry, repo_name = auth.resolve_repository_name(remote)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
response = self._post_json(
url, params=params, headers=headers, data=privileges,
stream=True
)
self._raise_for_status(response)
return self._stream_helper(response, decode=True)
@utils.minimum_version('1.25')
def plugins(self):
"""
Retrieve a list of installed plugins.
Returns:
A list of dicts, one per plugin
"""
url = self._url('/plugins')
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
def plugin_privileges(self, name):
"""
Retrieve list of privileges to be granted to a plugin.
Args:
name (string): Name of the remote plugin to examine. The
``:latest`` tag is optional, and is the default if omitted.
Returns:
A list of dictionaries representing the plugin's
permissions
"""
params = {
'remote': name,
}
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
url = self._url('/plugins/privileges')
return self._result(
self._get(url, params=params, headers=headers), True
)
@utils.minimum_version('1.25')
@utils.check_resource('name')
def push_plugin(self, name):
"""
Push a plugin to the registry.
Args:
name (string): Name of the plugin to upload. The ``:latest``
tag is optional, and is the default if omitted.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}/pull', name)
headers = {}
registry, repo_name = auth.resolve_repository_name(name)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
res = self._post(url, headers=headers)
self._raise_for_status(res)
return self._stream_helper(res, decode=True)
@utils.minimum_version('1.25')
@utils.check_resource('name')
def remove_plugin(self, name, force=False):
"""
Remove an installed plugin.
Args:
name (string): Name of the plugin to remove. The ``:latest``
tag is optional, and is the default if omitted.
force (bool): Disable the plugin before removing. This may
result in issues if the plugin is in use by a container.
Returns:
``True`` if successful
"""
url = self._url('/plugins/{0}', name)
res = self._delete(url, params={'force': force})
self._raise_for_status(res)
return True
@utils.minimum_version('1.26')
@utils.check_resource('name')
def upgrade_plugin(self, name, remote, privileges):
"""
Upgrade an installed plugin.
Args:
name (string): Name of the plugin to upgrade. The ``:latest``
tag is optional and is the default if omitted.
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
privileges (:py:class:`list`): A list of privileges the user
consents to grant to the plugin. Can be retrieved using
:py:meth:`~plugin_privileges`.
Returns:
An iterable object streaming the decoded API logs
"""
url = self._url('/plugins/{0}/upgrade', name)
params = {
'remote': remote,
}
headers = {}
registry, repo_name = auth.resolve_repository_name(remote)
header = auth.get_config_header(self, registry)
if header:
headers['X-Registry-Auth'] = header
response = self._post_json(
url, params=params, headers=headers, data=privileges,
stream=True
)
self._raise_for_status(response)
return self._stream_helper(response, decode=True)

View File

@@ -0,0 +1,99 @@
import base64
from .. import errors
from .. import utils
class SecretApiMixin:
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None, driver=None):
"""
Create a secret
Args:
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
driver (DriverConfig): A custom driver configuration. If
unspecified, the default ``internal`` driver will be used
Returns (dict): ID of the newly created secret
"""
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
if driver is not None:
if utils.version_lt(self._version, '1.31'):
raise errors.InvalidVersion(
'Secret driver is only available for API version > 1.31'
)
body['Driver'] = driver
url = self._url('/secrets/create')
return self._result(
self._post_json(url, data=body), True
)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def inspect_secret(self, id):
"""
Retrieve secret metadata
Args:
id (string): Full ID of the secret to inspect
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def remove_secret(self, id):
"""
Remove a secret
Args:
id (string): Full ID of the secret to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def secrets(self, filters=None):
"""
List secrets
Args:
filters (dict): A map of filters to process on the secrets
list. Available filters: ``names``
Returns (list): A list of secrets
"""
url = self._url('/secrets')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)

View File

@@ -0,0 +1,480 @@
from .. import auth, errors, utils
from ..types import ServiceMode
def _check_api_features(version, task_template, update_config, endpoint_spec,
rollback_config):
def raise_version_error(param, min_version):
raise errors.InvalidVersion(
'{} is not supported in API version < {}'.format(
param, min_version
)
)
if update_config is not None:
if utils.version_lt(version, '1.25'):
if 'MaxFailureRatio' in update_config:
raise_version_error('UpdateConfig.max_failure_ratio', '1.25')
if 'Monitor' in update_config:
raise_version_error('UpdateConfig.monitor', '1.25')
if utils.version_lt(version, '1.28'):
if update_config.get('FailureAction') == 'rollback':
raise_version_error(
'UpdateConfig.failure_action rollback', '1.28'
)
if utils.version_lt(version, '1.29'):
if 'Order' in update_config:
raise_version_error('UpdateConfig.order', '1.29')
if rollback_config is not None:
if utils.version_lt(version, '1.28'):
raise_version_error('rollback_config', '1.28')
if utils.version_lt(version, '1.29'):
if 'Order' in update_config:
raise_version_error('RollbackConfig.order', '1.29')
if endpoint_spec is not None:
if utils.version_lt(version, '1.32') and 'Ports' in endpoint_spec:
if any(p.get('PublishMode') for p in endpoint_spec['Ports']):
raise_version_error('EndpointSpec.Ports[].mode', '1.32')
if task_template is not None:
if 'ForceUpdate' in task_template and utils.version_lt(
version, '1.25'):
raise_version_error('force_update', '1.25')
if task_template.get('Placement'):
if utils.version_lt(version, '1.30'):
if task_template['Placement'].get('Platforms'):
raise_version_error('Placement.platforms', '1.30')
if utils.version_lt(version, '1.27'):
if task_template['Placement'].get('Preferences'):
raise_version_error('Placement.preferences', '1.27')
if task_template.get('ContainerSpec'):
container_spec = task_template.get('ContainerSpec')
if utils.version_lt(version, '1.25'):
if container_spec.get('TTY'):
raise_version_error('ContainerSpec.tty', '1.25')
if container_spec.get('Hostname') is not None:
raise_version_error('ContainerSpec.hostname', '1.25')
if container_spec.get('Hosts') is not None:
raise_version_error('ContainerSpec.hosts', '1.25')
if container_spec.get('Groups') is not None:
raise_version_error('ContainerSpec.groups', '1.25')
if container_spec.get('DNSConfig') is not None:
raise_version_error('ContainerSpec.dns_config', '1.25')
if container_spec.get('Healthcheck') is not None:
raise_version_error('ContainerSpec.healthcheck', '1.25')
if utils.version_lt(version, '1.28'):
if container_spec.get('ReadOnly') is not None:
raise_version_error('ContainerSpec.dns_config', '1.28')
if container_spec.get('StopSignal') is not None:
raise_version_error('ContainerSpec.stop_signal', '1.28')
if utils.version_lt(version, '1.30'):
if container_spec.get('Configs') is not None:
raise_version_error('ContainerSpec.configs', '1.30')
if container_spec.get('Privileges') is not None:
raise_version_error('ContainerSpec.privileges', '1.30')
if utils.version_lt(version, '1.35'):
if container_spec.get('Isolation') is not None:
raise_version_error('ContainerSpec.isolation', '1.35')
if utils.version_lt(version, '1.38'):
if container_spec.get('Init') is not None:
raise_version_error('ContainerSpec.init', '1.38')
if task_template.get('Resources'):
if utils.version_lt(version, '1.32'):
if task_template['Resources'].get('GenericResources'):
raise_version_error('Resources.generic_resources', '1.32')
def _merge_task_template(current, override):
merged = current.copy()
if override is not None:
for ts_key, ts_value in override.items():
if ts_key == 'ContainerSpec':
if 'ContainerSpec' not in merged:
merged['ContainerSpec'] = {}
for cs_key, cs_value in override['ContainerSpec'].items():
if cs_value is not None:
merged['ContainerSpec'][cs_key] = cs_value
elif ts_value is not None:
merged[ts_key] = ts_value
return merged
class ServiceApiMixin:
@utils.minimum_version('1.24')
def create_service(
self, task_template, name=None, labels=None, mode=None,
update_config=None, networks=None, endpoint_config=None,
endpoint_spec=None, rollback_config=None
):
"""
Create a service.
Args:
task_template (TaskTemplate): Specification of the task to start as
part of the new service.
name (string): User-defined name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
Returns:
A dictionary containing an ``ID`` key for the newly created
service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
_check_api_features(
self._version, task_template, update_config, endpoint_spec,
rollback_config
)
url = self._url('/services/create')
headers = {}
image = task_template.get('ContainerSpec', {}).get('Image', None)
if image is None:
raise errors.DockerException(
'Missing mandatory Image key in ContainerSpec'
)
if mode and not isinstance(mode, dict):
mode = ServiceMode(mode)
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
if utils.version_lt(self._version, '1.25'):
networks = networks or task_template.pop('Networks', None)
data = {
'Name': name,
'Labels': labels,
'TaskTemplate': task_template,
'Mode': mode,
'Networks': utils.convert_service_networks(networks),
'EndpointSpec': endpoint_spec
}
if update_config is not None:
data['UpdateConfig'] = update_config
if rollback_config is not None:
data['RollbackConfig'] = rollback_config
return self._result(
self._post_json(url, data=data, headers=headers), True
)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def inspect_service(self, service, insert_defaults=None):
"""
Return information about a service.
Args:
service (str): Service name or ID.
insert_defaults (boolean): If true, default values will be merged
into the service inspect output.
Returns:
(dict): A dictionary of the server-side representation of the
service, including all relevant properties.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/services/{0}', service)
params = {}
if insert_defaults is not None:
if utils.version_lt(self._version, '1.29'):
raise errors.InvalidVersion(
'insert_defaults is not supported in API version < 1.29'
)
params['insertDefaults'] = insert_defaults
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
@utils.check_resource('task')
def inspect_task(self, task):
"""
Retrieve information about a task.
Args:
task (str): Task ID
Returns:
(dict): Information about the task.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/tasks/{0}', task)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def remove_service(self, service):
"""
Stop and remove a service.
Args:
service (str): Service name or ID
Returns:
``True`` if successful.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/services/{0}', service)
resp = self._delete(url)
self._raise_for_status(resp)
return True
@utils.minimum_version('1.24')
def services(self, filters=None):
"""
List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``.
Returns:
A list of dictionaries containing data about each service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/services')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.25')
@utils.check_resource('service')
def service_logs(self, service, details=False, follow=False, stdout=False,
stderr=False, since=0, timestamps=False, tail='all',
is_tty=None):
"""
Get log stream for a service.
Note: This endpoint works only for services with the ``json-file``
or ``journald`` logging drivers.
Args:
service (str): ID or name of the service
details (bool): Show extra details provided to logs.
Default: ``False``
follow (bool): Keep connection open to read logs as they are
sent by the Engine. Default: ``False``
stdout (bool): Return logs from ``stdout``. Default: ``False``
stderr (bool): Return logs from ``stderr``. Default: ``False``
since (int): UNIX timestamp for the logs staring point.
Default: 0
timestamps (bool): Add timestamps to every log line.
tail (string or int): Number of log lines to be returned,
counting from the current end of the logs. Specify an
integer or ``'all'`` to output all log lines.
Default: ``all``
is_tty (bool): Whether the service's :py:class:`ContainerSpec`
enables the TTY option. If omitted, the method will query
the Engine for the information, causing an additional
roundtrip.
Returns (generator): Logs for the service.
"""
params = {
'details': details,
'follow': follow,
'stdout': stdout,
'stderr': stderr,
'since': since,
'timestamps': timestamps,
'tail': tail
}
url = self._url('/services/{0}/logs', service)
res = self._get(url, params=params, stream=True)
if is_tty is None:
is_tty = self.inspect_service(
service
)['Spec']['TaskTemplate']['ContainerSpec'].get('TTY', False)
return self._get_result_tty(True, res, is_tty)
@utils.minimum_version('1.24')
def tasks(self, filters=None):
"""
Retrieve a list of tasks.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``service``, ``node``,
``label`` and ``desired-state``.
Returns:
(:py:class:`list`): List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/tasks')
return self._result(self._get(url, params=params), True)
@utils.minimum_version('1.24')
@utils.check_resource('service')
def update_service(self, service, version, task_template=None, name=None,
labels=None, mode=None, update_config=None,
networks=None, endpoint_config=None,
endpoint_spec=None, fetch_current_spec=False,
rollback_config=None):
"""
Update a service.
Args:
service (string): A service identifier (either its name or service
ID).
version (int): The version number of the service object being
updated. This is required to avoid conflicting writes.
task_template (TaskTemplate): Specification of the updated task to
start as part of the service.
name (string): New name for the service. Optional.
labels (dict): A map of labels to associate with the service.
Optional.
mode (ServiceMode): Scheduling mode for the service (replicated
or global). Defaults to replicated.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``.
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
fetch_current_spec (boolean): Use the undefined settings from the
current specification of the service. Default: ``False``
Returns:
A dictionary containing a ``Warnings`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
_check_api_features(
self._version, task_template, update_config, endpoint_spec,
rollback_config
)
if fetch_current_spec:
inspect_defaults = True
if utils.version_lt(self._version, '1.29'):
inspect_defaults = None
current = self.inspect_service(
service, insert_defaults=inspect_defaults
)['Spec']
else:
current = {}
url = self._url('/services/{0}/update', service)
data = {}
headers = {}
data['Name'] = current.get('Name') if name is None else name
data['Labels'] = current.get('Labels') if labels is None else labels
if mode is not None:
if not isinstance(mode, dict):
mode = ServiceMode(mode)
data['Mode'] = mode
else:
data['Mode'] = current.get('Mode')
data['TaskTemplate'] = _merge_task_template(
current.get('TaskTemplate', {}), task_template
)
container_spec = data['TaskTemplate'].get('ContainerSpec', {})
image = container_spec.get('Image', None)
if image is not None:
registry, repo_name = auth.resolve_repository_name(image)
auth_header = auth.get_config_header(self, registry)
if auth_header:
headers['X-Registry-Auth'] = auth_header
if update_config is not None:
data['UpdateConfig'] = update_config
else:
data['UpdateConfig'] = current.get('UpdateConfig')
if rollback_config is not None:
data['RollbackConfig'] = rollback_config
else:
data['RollbackConfig'] = current.get('RollbackConfig')
if networks is not None:
converted_networks = utils.convert_service_networks(networks)
if utils.version_lt(self._version, '1.25'):
data['Networks'] = converted_networks
else:
data['TaskTemplate']['Networks'] = converted_networks
elif utils.version_lt(self._version, '1.25'):
data['Networks'] = current.get('Networks')
elif data['TaskTemplate'].get('Networks') is None:
current_task_template = current.get('TaskTemplate', {})
current_networks = current_task_template.get('Networks')
if current_networks is None:
current_networks = current.get('Networks')
if current_networks is not None:
data['TaskTemplate']['Networks'] = current_networks
if endpoint_spec is not None:
data['EndpointSpec'] = endpoint_spec
else:
data['EndpointSpec'] = current.get('EndpointSpec')
resp = self._post_json(
url, data=data, params={'version': version}, headers=headers
)
return self._result(resp, json=True)

View File

@@ -0,0 +1,463 @@
import logging
import http.client as http_client
from ..constants import DEFAULT_SWARM_ADDR_POOL, DEFAULT_SWARM_SUBNET_SIZE
from .. import errors
from .. import types
from .. import utils
log = logging.getLogger(__name__)
class SwarmApiMixin:
def create_swarm_spec(self, *args, **kwargs):
"""
Create a :py:class:`docker.types.SwarmSpec` instance that can be used
as the ``swarm_spec`` argument in
:py:meth:`~docker.api.swarm.SwarmApiMixin.init_swarm`.
Args:
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
keep_old_snapshots (int): Number of snapshots to keep beyond the
current snapshot.
log_entries_for_slow_followers (int): Number of log entries to
keep around to sync up slow followers after a snapshot is
created.
heartbeat_tick (int): Amount of ticks (in seconds) between each
heartbeat.
election_tick (int): Amount of ticks (in seconds) needed without a
leader to trigger a new election.
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
external_cas (:py:class:`list`): Configuration for forwarding
signing requests to an external certificate authority. Use
a list of :py:class:`docker.types.SwarmExternalCA`.
name (string): Swarm's name
labels (dict): User-defined key/value metadata.
signing_ca_cert (str): The desired signing CA certificate for all
swarm node TLS leaf certificates, in PEM format.
signing_ca_key (str): The desired signing CA key for all swarm
node TLS leaf certificates, in PEM format.
ca_force_rotate (int): An integer whose purpose is to force swarm
to generate a new signing CA certificate and key, if none have
been specified.
autolock_managers (boolean): If set, generate a key and use it to
lock data stored on the managers.
log_driver (DriverConfig): The default log driver to use for tasks
created in the orchestrator.
Returns:
:py:class:`docker.types.SwarmSpec`
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> spec = client.api.create_swarm_spec(
snapshot_interval=5000, log_entries_for_slow_followers=1200
)
>>> client.api.init_swarm(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, swarm_spec=spec
)
"""
ext_ca = kwargs.pop('external_ca', None)
if ext_ca:
kwargs['external_cas'] = [ext_ca]
return types.SwarmSpec(self._version, *args, **kwargs)
@utils.minimum_version('1.24')
def get_unlock_key(self):
"""
Get the unlock key for this Swarm manager.
Returns:
A ``dict`` containing an ``UnlockKey`` member
"""
return self._result(self._get(self._url('/swarm/unlockkey')), True)
@utils.minimum_version('1.24')
def init_swarm(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, swarm_spec=None,
default_addr_pool=None, subnet_size=None,
data_path_addr=None, data_path_port=None):
"""
Initialize a new Swarm using the current connected engine as the first
node.
Args:
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
``advertise_addr`` is not specified, it will be automatically
detected when possible. Default: None
listen_addr (string): Listen address used for inter-manager
communication, as well as determining the networking interface
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
an address/port combination in the form ``192.168.1.1:4567``,
or an interface followed by a port number, like ``eth0:4567``.
If the port number is omitted, the default swarm listening port
is used. Default: '0.0.0.0:2377'
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
swarm_spec (dict): Configuration settings of the new Swarm. Use
``APIClient.create_swarm_spec`` to generate a valid
configuration. Default: None
default_addr_pool (list of strings): Default Address Pool specifies
default subnet pools for global scope networks. Each pool
should be specified as a CIDR block, like '10.0.0.0/8'.
Default: None
subnet_size (int): SubnetSize specifies the subnet size of the
networks created from the default subnet pool. Default: None
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
data_path_port (int): Port number to use for data path traffic.
Acceptable port range is 1024 to 49151. If set to ``None`` or
0, the default port 4789 will be used. Default: None
Returns:
(str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/init')
if swarm_spec is not None and not isinstance(swarm_spec, dict):
raise TypeError('swarm_spec must be a dictionary')
if default_addr_pool is not None:
if utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'Address pool is only available for API version >= 1.39'
)
# subnet_size becomes 0 if not set with default_addr_pool
if subnet_size is None:
subnet_size = DEFAULT_SWARM_SUBNET_SIZE
if subnet_size is not None:
if utils.version_lt(self._version, '1.39'):
raise errors.InvalidVersion(
'Subnet size is only available for API version >= 1.39'
)
# subnet_size is ignored if set without default_addr_pool
if default_addr_pool is None:
default_addr_pool = DEFAULT_SWARM_ADDR_POOL
data = {
'AdvertiseAddr': advertise_addr,
'ListenAddr': listen_addr,
'DefaultAddrPool': default_addr_pool,
'SubnetSize': subnet_size,
'ForceNewCluster': force_new_cluster,
'Spec': swarm_spec,
}
if data_path_addr is not None:
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion(
'Data address path is only available for '
'API version >= 1.30'
)
data['DataPathAddr'] = data_path_addr
if data_path_port is not None:
if utils.version_lt(self._version, '1.40'):
raise errors.InvalidVersion(
'Data path port is only available for '
'API version >= 1.40'
)
data['DataPathPort'] = data_path_port
response = self._post_json(url, data=data)
return self._result(response, json=True)
@utils.minimum_version('1.24')
def inspect_swarm(self):
"""
Retrieve low-level information about the current swarm.
Returns:
A dictionary containing data about the swarm.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm')
return self._result(self._get(url), True)
@utils.check_resource('node_id')
@utils.minimum_version('1.24')
def inspect_node(self, node_id):
"""
Retrieve low-level information about a swarm node
Args:
node_id (string): ID of the node to be inspected.
Returns:
A dictionary containing data about this node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/nodes/{0}', node_id)
return self._result(self._get(url), True)
@utils.minimum_version('1.24')
def join_swarm(self, remote_addrs, join_token, listen_addr='0.0.0.0:2377',
advertise_addr=None, data_path_addr=None):
"""
Make this Engine join a swarm that has already been created.
Args:
remote_addrs (:py:class:`list`): Addresses of one or more manager
nodes already participating in the Swarm to join.
join_token (string): Secret token for joining this Swarm.
listen_addr (string): Listen address used for inter-manager
communication if the node gets promoted to manager, as well as
determining the networking interface used for the VXLAN Tunnel
Endpoint (VTEP). Default: ``'0.0.0.0:2377``
advertise_addr (string): Externally reachable address advertised
to other nodes. This can either be an address/port combination
in the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used. If
AdvertiseAddr is not specified, it will be automatically
detected when possible. Default: ``None``
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
data = {
'RemoteAddrs': remote_addrs,
'ListenAddr': listen_addr,
'JoinToken': join_token,
'AdvertiseAddr': advertise_addr,
}
if data_path_addr is not None:
if utils.version_lt(self._version, '1.30'):
raise errors.InvalidVersion(
'Data address path is only available for '
'API version >= 1.30'
)
data['DataPathAddr'] = data_path_addr
url = self._url('/swarm/join')
response = self._post_json(url, data=data)
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def leave_swarm(self, force=False):
"""
Leave a swarm.
Args:
force (bool): Leave the swarm even if this node is a manager.
Default: ``False``
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/leave')
response = self._post(url, params={'force': force})
# Ignore "this node is not part of a swarm" error
if force and response.status_code == http_client.NOT_ACCEPTABLE:
return True
# FIXME: Temporary workaround for 1.13.0-rc bug
# https://github.com/docker/docker/issues/29192
if force and response.status_code == http_client.SERVICE_UNAVAILABLE:
return True
self._raise_for_status(response)
return True
@utils.minimum_version('1.24')
def nodes(self, filters=None):
"""
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of dictionaries containing data about each swarm node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/nodes')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
@utils.check_resource('node_id')
@utils.minimum_version('1.24')
def remove_node(self, node_id, force=False):
"""
Remove a node from the swarm.
Args:
node_id (string): ID of the node to be removed.
force (bool): Force remove an active node. Default: `False`
Raises:
:py:class:`docker.errors.NotFound`
If the node referenced doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
"""
url = self._url('/nodes/{0}', node_id)
params = {
'force': force
}
res = self._delete(url, params=params)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def unlock_swarm(self, key):
"""
Unlock a locked swarm.
Args:
key (string): The unlock key as provided by
:py:meth:`get_unlock_key`
Raises:
:py:class:`docker.errors.InvalidArgument`
If the key argument is in an incompatible format
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
`True` if the request was successful.
Example:
>>> key = client.api.get_unlock_key()
>>> client.unlock_swarm(key)
"""
if isinstance(key, dict):
if 'UnlockKey' not in key:
raise errors.InvalidArgument('Invalid unlock key format')
else:
key = {'UnlockKey': key}
url = self._url('/swarm/unlock')
res = self._post_json(url, data=key)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_node(self, node_id, version, node_spec=None):
"""
Update the node's configuration
Args:
node_id (string): ID of the node to be updated.
version (int): The version number of the node object being
updated. This is required to avoid conflicting writes.
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> client.api.update_node(node_id='24ifsmvkjbyhk', version=8,
node_spec=node_spec)
"""
url = self._url('/nodes/{0}/update?version={1}', node_id, str(version))
res = self._post_json(url, data=node_spec)
self._raise_for_status(res)
return True
@utils.minimum_version('1.24')
def update_swarm(self, version, swarm_spec=None,
rotate_worker_token=False,
rotate_manager_token=False,
rotate_manager_unlock_key=False):
"""
Update the Swarm's configuration
Args:
version (int): The version number of the swarm object being
updated. This is required to avoid conflicting writes.
swarm_spec (dict): Configuration settings to update. Use
:py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` to
generate a valid configuration. Default: ``None``.
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
Default: ``False``.
Returns:
``True`` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/swarm/update')
params = {
'rotateWorkerToken': rotate_worker_token,
'rotateManagerToken': rotate_manager_token,
'version': version
}
if rotate_manager_unlock_key:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'Rotate manager unlock key '
'is only available for API version >= 1.25'
)
params['rotateManagerUnlockKey'] = rotate_manager_unlock_key
response = self._post_json(url, data=swarm_spec, params=params)
self._raise_for_status(response)
return True

View File

@@ -0,0 +1,164 @@
from .. import errors
from .. import utils
class VolumeApiMixin:
def volumes(self, filters=None):
"""
List volumes currently registered by the docker daemon. Similar to the
``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(dict): Dictionary with list of volume objects as value of the
``Volumes`` key.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.volumes()
{u'Volumes': [{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'},
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/baz/_data',
u'Name': u'baz'}]}
"""
params = {
'filters': utils.convert_filters(filters) if filters else None
}
url = self._url('/volumes')
return self._result(self._get(url, params=params), True)
def create_volume(self, name=None, driver=None, driver_opts=None,
labels=None):
"""
Create and register a named volume
Args:
name (str): Name of the volume
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
Returns:
(dict): The created volume reference object
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> volume = client.api.create_volume(
... name='foobar',
... driver='local',
... driver_opts={'foo': 'bar', 'baz': 'false'},
... labels={"key": "value"},
... )
... print(volume)
{u'Driver': u'local',
u'Labels': {u'key': u'value'},
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar',
u'Scope': u'local'}
"""
url = self._url('/volumes/create')
if driver_opts is not None and not isinstance(driver_opts, dict):
raise TypeError('driver_opts must be a dictionary')
data = {
'Name': name,
'Driver': driver,
'DriverOpts': driver_opts,
}
if labels is not None:
if utils.compare_version('1.23', self._version) < 0:
raise errors.InvalidVersion(
'volume labels were introduced in API 1.23'
)
if not isinstance(labels, dict):
raise TypeError('labels must be a dictionary')
data["Labels"] = labels
return self._result(self._post_json(url, data=data), True)
def inspect_volume(self, name):
"""
Retrieve volume info by name.
Args:
name (str): volume name
Returns:
(dict): Volume information dictionary
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.api.inspect_volume('foobar')
{u'Driver': u'local',
u'Mountpoint': u'/var/lib/docker/volumes/foobar/_data',
u'Name': u'foobar'}
"""
url = self._url('/volumes/{0}', name)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
def prune_volumes(self, filters=None):
"""
Delete unused volumes
Args:
filters (dict): Filters to process on the prune list.
Returns:
(dict): A dict containing a list of deleted volume names and
the amount of disk space reclaimed in bytes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
url = self._url('/volumes/prune')
return self._result(self._post(url, params=params), True)
def remove_volume(self, name, force=False):
"""
Remove a volume. Similar to the ``docker volume rm`` command.
Args:
name (str): The volume's name
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
params = {}
if force:
if utils.version_lt(self._version, '1.25'):
raise errors.InvalidVersion(
'force removal was introduced in API 1.25'
)
params = {'force': force}
url = self._url('/volumes/{0}', name, params=params)
resp = self._delete(url)
self._raise_for_status(resp)

View File

@@ -0,0 +1,388 @@
import base64
import json
import logging
from . import credentials
from . import errors
from .utils import config
INDEX_NAME = 'docker.io'
INDEX_URL = f'https://index.{INDEX_NAME}/v1/'
TOKEN_USERNAME = '<token>'
log = logging.getLogger(__name__)
def resolve_repository_name(repo_name):
if '://' in repo_name:
raise errors.InvalidRepository(
f'Repository name cannot contain a scheme ({repo_name})'
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == '-' or index_name[-1] == '-':
raise errors.InvalidRepository(
'Invalid index name ({}). Cannot begin or end with a'
' hyphen.'.format(index_name)
)
return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name):
index_name = convert_to_hostname(index_name)
if index_name == 'index.' + INDEX_NAME:
index_name = INDEX_NAME
return index_name
def get_config_header(client, registry):
log.debug('Looking for auth config')
if not client._auth_configs or client._auth_configs.is_empty:
log.debug(
"No auth config in memory - loading from filesystem"
)
client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug('Found auth config')
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
return encode_header(authcfg)
log.debug('No auth config found')
return None
def split_repo_name(repo_name):
parts = repo_name.split('/', 1)
if len(parts) == 1 or (
'.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
):
# This is a docker index repo (ex: username/foobar or ubuntu)
return INDEX_NAME, repo_name
return tuple(parts)
def get_credential_store(authconfig, registry):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig)
return authconfig.get_credential_store(registry)
class AuthConfig(dict):
def __init__(self, dct, credstore_env=None):
if 'auths' not in dct:
dct['auths'] = {}
self.update(dct)
self._credstore_env = credstore_env
self._stores = {}
@classmethod
def parse_auth(cls, entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf = {}
for registry, entry in entries.items():
if not isinstance(entry, dict):
log.debug(
'Config entry for key {} is not auth config'.format(
registry
)
)
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
'Invalid configuration for registry {}'.format(
registry
)
)
return {}
if 'identitytoken' in entry:
log.debug(
'Found an IdentityToken entry for registry {}'.format(
registry
)
)
conf[registry] = {
'IdentityToken': entry['identitytoken']
}
continue # Other values are irrelevant if we have a token
if 'auth' not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
'Auth data for {} is absent. Client might be using a '
'credentials store instead.'.format(registry)
)
conf[registry] = {}
continue
username, password = decode_auth(entry['auth'])
log.debug(
'Found entry (registry={}, username={})'
.format(repr(registry), repr(username))
)
conf[registry] = {
'username': username,
'password': password,
'email': entry.get('email'),
'serveraddress': registry,
}
return conf
@classmethod
def load_config(cls, config_path, config_dict, credstore_env=None):
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment
variable > ~/.docker/config.json > ~/.dockercfg
"""
if not config_dict:
config_file = config.find_config_file(config_path)
if not config_file:
return cls({}, credstore_env)
try:
with open(config_file) as f:
config_dict = json.load(f)
except (OSError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it's in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
return cls(_load_legacy_config(config_file), credstore_env)
res = {}
if config_dict.get('auths'):
log.debug("Found 'auths' section")
res.update({
'auths': cls.parse_auth(
config_dict.pop('auths'), raise_on_error=True
)
})
if config_dict.get('credsStore'):
log.debug("Found 'credsStore' section")
res.update({'credsStore': config_dict.pop('credsStore')})
if config_dict.get('credHelpers'):
log.debug("Found 'credHelpers' section")
res.update({'credHelpers': config_dict.pop('credHelpers')})
if res:
return cls(res, credstore_env)
log.debug(
"Couldn't find auth-related section ; attempting to interpret "
"as auth-only file"
)
return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
@property
def auths(self):
return self.get('auths', {})
@property
def creds_store(self):
return self.get('credsStore', None)
@property
def cred_helpers(self):
return self.get('credHelpers', {})
@property
def is_empty(self):
return (
not self.auths and not self.creds_store and not self.cred_helpers
)
def resolve_authconfig(self, registry=None):
"""
Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the
config with full URLs are stripped down to hostnames before checking
for a match. Returns None if no match was found.
"""
if self.creds_store or self.cred_helpers:
store_name = self.get_credential_store(registry)
if store_name is not None:
log.debug(
f'Using credentials store "{store_name}"'
)
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None:
return cfg
log.debug('No entry in credstore - fetching from auth dict')
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug(f"Looking for auth entry for {repr(registry)}")
if registry in self.auths:
log.debug(f"Found {repr(registry)}")
return self.auths[registry]
for key, conf in self.auths.items():
if resolve_index_name(key) == registry:
log.debug(f"Found {repr(key)}")
return conf
log.debug("No entry found")
return None
def _resolve_authconfig_credstore(self, registry, credstore_name):
if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
log.debug(f"Looking for auth entry for {repr(registry)}")
store = self._get_store_instance(credstore_name)
try:
data = store.get(registry)
res = {
'ServerAddress': registry,
}
if data['Username'] == TOKEN_USERNAME:
res['IdentityToken'] = data['Secret']
else:
res.update({
'Username': data['Username'],
'Password': data['Secret'],
})
return res
except credentials.CredentialsNotFound:
log.debug('No entry found')
return None
except credentials.StoreError as e:
raise errors.DockerException(
f'Credentials store error: {repr(e)}'
)
def _get_store_instance(self, name):
if name not in self._stores:
self._stores[name] = credentials.Store(
name, environment=self._credstore_env
)
return self._stores[name]
def get_credential_store(self, registry):
if not registry or registry == INDEX_NAME:
registry = INDEX_URL
return self.cred_helpers.get(registry) or self.creds_store
def get_all_credentials(self):
auth_data = self.auths.copy()
if self.creds_store:
# Retrieve all credentials from the default store
store = self._get_store_instance(self.creds_store)
for k in store.list().keys():
auth_data[k] = self._resolve_authconfig_credstore(
k, self.creds_store
)
auth_data[convert_to_hostname(k)] = auth_data[k]
# credHelpers entries take priority over all others
for reg, store_name in self.cred_helpers.items():
auth_data[reg] = self._resolve_authconfig_credstore(
reg, store_name
)
auth_data[convert_to_hostname(reg)] = auth_data[reg]
return auth_data
def add_auth(self, reg, data):
self['auths'][reg] = data
def resolve_authconfig(authconfig, registry=None, credstore_env=None):
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig, credstore_env)
return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url):
return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
def decode_auth(auth):
if isinstance(auth, str):
auth = auth.encode('ascii')
s = base64.b64decode(auth)
login, pwd = s.split(b':', 1)
return login.decode('utf8'), pwd.decode('utf8')
def encode_header(auth):
auth_json = json.dumps(auth).encode('ascii')
return base64.urlsafe_b64encode(auth_json)
def parse_auth(entries, raise_on_error=False):
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
return AuthConfig.parse_auth(entries, raise_on_error)
def load_config(config_path=None, config_dict=None, credstore_env=None):
return AuthConfig.load_config(config_path, config_dict, credstore_env)
def _load_legacy_config(config_file):
log.debug("Attempting to parse legacy auth file format")
try:
data = []
with open(config_file) as f:
for line in f.readlines():
data.append(line.strip().split(' = ')[1])
if len(data) < 2:
# Not enough data
raise errors.InvalidConfigFile(
'Invalid or empty configuration file!'
)
username, password = decode_auth(data[0])
return {'auths': {
INDEX_NAME: {
'username': username,
'password': password,
'email': data[1],
'serveraddress': INDEX_URL,
}
}}
except Exception as e:
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}

View File

@@ -0,0 +1,224 @@
from .api.client import APIClient
from .constants import (DEFAULT_TIMEOUT_SECONDS, DEFAULT_MAX_POOL_SIZE)
from .models.configs import ConfigCollection
from .models.containers import ContainerCollection
from .models.images import ImageCollection
from .models.networks import NetworkCollection
from .models.nodes import NodeCollection
from .models.plugins import PluginCollection
from .models.secrets import SecretCollection
from .models.services import ServiceCollection
from .models.swarm import Swarm
from .models.volumes import VolumeCollection
from .utils import kwargs_from_env
class DockerClient:
"""
A client for communicating with a Docker server.
Example:
>>> import docker
>>> client = docker.DockerClient(base_url='unix://var/run/docker.sock')
Args:
base_url (str): URL to the Docker server. For example,
``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``1.35``
timeout (int): Default timeout for API calls, in seconds.
tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
``True`` to enable it with default options, or pass a
:py:class:`~docker.tls.TLSConfig` object to use custom
configuration.
user_agent (str): Set a custom user agent for requests to the server.
credstore_env (dict): Override environment variables when calling the
credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is made
via shelling out to the ssh client. Ensure the ssh client is
installed and configured on the host.
max_pool_size (int): The maximum number of connections
to save in the pool.
"""
def __init__(self, *args, **kwargs):
self.api = APIClient(*args, **kwargs)
@classmethod
def from_env(cls, **kwargs):
"""
Return a client configured from environment variables.
The environment variables used are the same as those used by the
Docker command-line client. They are:
.. envvar:: DOCKER_HOST
The URL to the Docker host.
.. envvar:: DOCKER_TLS_VERIFY
Verify the host against a CA certificate.
.. envvar:: DOCKER_CERT_PATH
A path to a directory containing TLS certificates to use when
connecting to the Docker host.
Args:
version (str): The version of the API to use. Set to ``auto`` to
automatically detect the server's version. Default: ``auto``
timeout (int): Default timeout for API calls, in seconds.
max_pool_size (int): The maximum number of connections
to save in the pool.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
environment (dict): The environment to read environment variables
from. Default: the value of ``os.environ``
credstore_env (dict): Override environment variables when calling
the credential store process.
use_ssh_client (bool): If set to `True`, an ssh connection is
made via shelling out to the ssh client. Ensure the ssh
client is installed and configured on the host.
Example:
>>> import docker
>>> client = docker.from_env()
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT_SECONDS)
max_pool_size = kwargs.pop('max_pool_size', DEFAULT_MAX_POOL_SIZE)
version = kwargs.pop('version', None)
use_ssh_client = kwargs.pop('use_ssh_client', False)
return cls(
timeout=timeout,
max_pool_size=max_pool_size,
version=version,
use_ssh_client=use_ssh_client,
**kwargs_from_env(**kwargs)
)
# Resources
@property
def configs(self):
"""
An object for managing configs on the server. See the
:doc:`configs documentation <configs>` for full details.
"""
return ConfigCollection(client=self)
@property
def containers(self):
"""
An object for managing containers on the server. See the
:doc:`containers documentation <containers>` for full details.
"""
return ContainerCollection(client=self)
@property
def images(self):
"""
An object for managing images on the server. See the
:doc:`images documentation <images>` for full details.
"""
return ImageCollection(client=self)
@property
def networks(self):
"""
An object for managing networks on the server. See the
:doc:`networks documentation <networks>` for full details.
"""
return NetworkCollection(client=self)
@property
def nodes(self):
"""
An object for managing nodes on the server. See the
:doc:`nodes documentation <nodes>` for full details.
"""
return NodeCollection(client=self)
@property
def plugins(self):
"""
An object for managing plugins on the server. See the
:doc:`plugins documentation <plugins>` for full details.
"""
return PluginCollection(client=self)
@property
def secrets(self):
"""
An object for managing secrets on the server. See the
:doc:`secrets documentation <secrets>` for full details.
"""
return SecretCollection(client=self)
@property
def services(self):
"""
An object for managing services on the server. See the
:doc:`services documentation <services>` for full details.
"""
return ServiceCollection(client=self)
@property
def swarm(self):
"""
An object for managing a swarm on the server. See the
:doc:`swarm documentation <swarm>` for full details.
"""
return Swarm(client=self)
@property
def volumes(self):
"""
An object for managing volumes on the server. See the
:doc:`volumes documentation <volumes>` for full details.
"""
return VolumeCollection(client=self)
# Top-level methods
def events(self, *args, **kwargs):
return self.api.events(*args, **kwargs)
events.__doc__ = APIClient.events.__doc__
def df(self):
return self.api.df()
df.__doc__ = APIClient.df.__doc__
def info(self, *args, **kwargs):
return self.api.info(*args, **kwargs)
info.__doc__ = APIClient.info.__doc__
def login(self, *args, **kwargs):
return self.api.login(*args, **kwargs)
login.__doc__ = APIClient.login.__doc__
def ping(self, *args, **kwargs):
return self.api.ping(*args, **kwargs)
ping.__doc__ = APIClient.ping.__doc__
def version(self, *args, **kwargs):
return self.api.version(*args, **kwargs)
version.__doc__ = APIClient.version.__doc__
def close(self):
return self.api.close()
close.__doc__ = APIClient.close.__doc__
def __getattr__(self, name):
s = [f"'DockerClient' object has no attribute '{name}'"]
# If a user calls a method on APIClient, they
if hasattr(APIClient, name):
s.append("In Docker SDK for Python 2.0, this method is now on the "
"object APIClient. See the low-level API section of the "
"documentation for more details.")
raise AttributeError(' '.join(s))
from_env = DockerClient.from_env

View File

@@ -0,0 +1,44 @@
import sys
from .version import __version__
DEFAULT_DOCKER_API_VERSION = '1.41'
MINIMUM_DOCKER_API_VERSION = '1.21'
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = [
'memory', 'memswap', 'cpushares', 'cpusetcpus'
]
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
BYTE_UNITS = {
'b': 1,
'k': 1024,
'm': 1024 * 1024,
'g': 1024 * 1024 * 1024
}
INSECURE_REGISTRY_DEPRECATION_WARNING = \
'The `insecure_registry` argument to {} ' \
'is deprecated and non-functional. Please remove it.'
IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
DEFAULT_USER_AGENT = f"docker-sdk-python/{__version__}"
DEFAULT_NUM_POOLS = 25
# The OpenSSH server default value for MaxSessions is 10 which means we can
# use up to 9, leaving the final session for the underlying SSH connection.
# For more details see: https://github.com/docker/docker-py/issues/2246
DEFAULT_NUM_POOLS_SSH = 9
DEFAULT_MAX_POOL_SIZE = 10
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
DEFAULT_SWARM_SUBNET_SIZE = 24

View File

@@ -0,0 +1,3 @@
# flake8: noqa
from .context import Context
from .api import ContextAPI

View File

@@ -0,0 +1,203 @@
import json
import os
from docker import errors
from docker.context.config import get_meta_dir
from docker.context.config import METAFILE
from docker.context.config import get_current_context_name
from docker.context.config import write_context_name_to_docker_config
from docker.context import Context
class ContextAPI:
"""Context API.
Contains methods for context management:
create, list, remove, get, inspect.
"""
DEFAULT_CONTEXT = Context("default", "swarm")
@classmethod
def create_context(
cls, name, orchestrator=None, host=None, tls_cfg=None,
default_namespace=None, skip_tls_verify=False):
"""Creates a new context.
Returns:
(Context): a Context object.
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextAlreadyExists`
If a context with the name already exists.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.create_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException(
'"default" is a reserved context name')
ctx = Context.load_context(name)
if ctx:
raise errors.ContextAlreadyExists(name)
endpoint = "docker"
if orchestrator and orchestrator != "swarm":
endpoint = orchestrator
ctx = Context(name, orchestrator)
ctx.set_endpoint(
endpoint, host, tls_cfg,
skip_tls_verify=skip_tls_verify,
def_namespace=default_namespace)
ctx.save()
return ctx
@classmethod
def get_context(cls, name=None):
"""Retrieves a context object.
Args:
name (str): The name of the context
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.get_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
name = get_current_context_name()
if name == "default":
return cls.DEFAULT_CONTEXT
return Context.load_context(name)
@classmethod
def contexts(cls):
"""Context list.
Returns:
(Context): List of context objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
names = []
for dirname, dirnames, fnames in os.walk(get_meta_dir()):
for filename in fnames + dirnames:
if filename == METAFILE:
try:
data = json.load(
open(os.path.join(dirname, filename)))
names.append(data["Name"])
except Exception as e:
raise errors.ContextException(
"Failed to load metafile {}: {}".format(
filename, e))
contexts = [cls.DEFAULT_CONTEXT]
for name in names:
contexts.append(Context.load_context(name))
return contexts
@classmethod
def get_current_context(cls):
"""Get current context.
Returns:
(Context): current context object.
"""
return cls.get_context()
@classmethod
def set_current_context(cls, name="default"):
ctx = cls.get_context(name)
if not ctx:
raise errors.ContextNotFound(name)
err = write_context_name_to_docker_config(name)
if err:
raise errors.ContextException(
f'Failed to set current context: {err}')
@classmethod
def remove_context(cls, name):
"""Remove a context. Similar to the ``docker context rm`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException(
'context "default" cannot be removed')
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
if name == get_current_context_name():
write_context_name_to_docker_config(None)
ctx.remove()
@classmethod
def inspect_context(cls, name="default"):
"""Remove a context. Similar to the ``docker context inspect`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
return cls.DEFAULT_CONTEXT()
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
return ctx()

View File

@@ -0,0 +1,81 @@
import os
import json
import hashlib
from docker import utils
from docker.constants import IS_WINDOWS_PLATFORM
from docker.constants import DEFAULT_UNIX_SOCKET
from docker.utils.config import find_config_file
METAFILE = "meta.json"
def get_current_context_name():
name = "default"
docker_cfg_path = find_config_file()
if docker_cfg_path:
try:
with open(docker_cfg_path) as f:
name = json.load(f).get("currentContext", "default")
except Exception:
return "default"
return name
def write_context_name_to_docker_config(name=None):
if name == 'default':
name = None
docker_cfg_path = find_config_file()
config = {}
if docker_cfg_path:
try:
with open(docker_cfg_path) as f:
config = json.load(f)
except Exception as e:
return e
current_context = config.get("currentContext", None)
if current_context and not name:
del config["currentContext"]
elif name:
config["currentContext"] = name
else:
return
try:
with open(docker_cfg_path, "w") as f:
json.dump(config, f, indent=4)
except Exception as e:
return e
def get_context_id(name):
return hashlib.sha256(name.encode('utf-8')).hexdigest()
def get_context_dir():
return os.path.join(os.path.dirname(find_config_file() or ""), "contexts")
def get_meta_dir(name=None):
meta_dir = os.path.join(get_context_dir(), "meta")
if name:
return os.path.join(meta_dir, get_context_id(name))
return meta_dir
def get_meta_file(name):
return os.path.join(get_meta_dir(name), METAFILE)
def get_tls_dir(name=None, endpoint=""):
context_dir = get_context_dir()
if name:
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
return os.path.join(context_dir, "tls")
def get_context_host(path=None, tls=False):
host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls)
if host == DEFAULT_UNIX_SOCKET:
# remove http+ from default docker socket url
return host.strip("http+")
return host

View File

@@ -0,0 +1,243 @@
import os
import json
from shutil import copyfile, rmtree
from docker.tls import TLSConfig
from docker.errors import ContextException
from docker.context.config import get_meta_dir
from docker.context.config import get_meta_file
from docker.context.config import get_tls_dir
from docker.context.config import get_context_host
class Context:
"""A context."""
def __init__(self, name, orchestrator=None, host=None, endpoints=None,
tls=False):
if not name:
raise Exception("Name not provided")
self.name = name
self.context_type = None
self.orchestrator = orchestrator
self.endpoints = {}
self.tls_cfg = {}
self.meta_path = "IN MEMORY"
self.tls_path = "IN MEMORY"
if not endpoints:
# set default docker endpoint if no endpoint is set
default_endpoint = "docker" if (
not orchestrator or orchestrator == "swarm"
) else orchestrator
self.endpoints = {
default_endpoint: {
"Host": get_context_host(host, tls),
"SkipTLSVerify": not tls
}
}
return
# check docker endpoints
for k, v in endpoints.items():
if not isinstance(v, dict):
# unknown format
raise ContextException("""Unknown endpoint format for
context {}: {}""".format(name, v))
self.endpoints[k] = v
if k != "docker":
continue
self.endpoints[k]["Host"] = v.get("Host", get_context_host(
host, tls))
self.endpoints[k]["SkipTLSVerify"] = bool(v.get(
"SkipTLSVerify", not tls))
def set_endpoint(
self, name="docker", host=None, tls_cfg=None,
skip_tls_verify=False, def_namespace=None):
self.endpoints[name] = {
"Host": get_context_host(host, not skip_tls_verify),
"SkipTLSVerify": skip_tls_verify
}
if def_namespace:
self.endpoints[name]["DefaultNamespace"] = def_namespace
if tls_cfg:
self.tls_cfg[name] = tls_cfg
def inspect(self):
return self.__call__()
@classmethod
def load_context(cls, name):
meta = Context._load_meta(name)
if meta:
instance = cls(
meta["Name"],
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
endpoints=meta.get("Endpoints", None))
instance.context_type = meta["Metadata"].get("Type", None)
instance._load_certs()
instance.meta_path = get_meta_dir(name)
return instance
return None
@classmethod
def _load_meta(cls, name):
meta_file = get_meta_file(name)
if not os.path.isfile(meta_file):
return None
metadata = {}
try:
with open(meta_file) as f:
metadata = json.load(f)
except (OSError, KeyError, ValueError) as e:
# unknown format
raise Exception("""Detected corrupted meta file for
context {} : {}""".format(name, e))
# for docker endpoints, set defaults for
# Host and SkipTLSVerify fields
for k, v in metadata["Endpoints"].items():
if k != "docker":
continue
metadata["Endpoints"][k]["Host"] = v.get(
"Host", get_context_host(None, False))
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
v.get("SkipTLSVerify", True))
return metadata
def _load_certs(self):
certs = {}
tls_dir = get_tls_dir(self.name)
for endpoint in self.endpoints.keys():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
continue
ca_cert = None
cert = None
key = None
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
if filename.startswith("ca"):
ca_cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("cert"):
cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("key"):
key = os.path.join(tls_dir, endpoint, filename)
if all([ca_cert, cert, key]):
verify = None
if endpoint == "docker" and not self.endpoints["docker"].get(
"SkipTLSVerify", False):
verify = True
certs[endpoint] = TLSConfig(
client_cert=(cert, key), ca_cert=ca_cert, verify=verify)
self.tls_cfg = certs
self.tls_path = tls_dir
def save(self):
meta_dir = get_meta_dir(self.name)
if not os.path.isdir(meta_dir):
os.makedirs(meta_dir)
with open(get_meta_file(self.name), "w") as f:
f.write(json.dumps(self.Metadata))
tls_dir = get_tls_dir(self.name)
for endpoint, tls in self.tls_cfg.items():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
os.makedirs(os.path.join(tls_dir, endpoint))
ca_file = tls.ca_cert
if ca_file:
copyfile(ca_file, os.path.join(
tls_dir, endpoint, os.path.basename(ca_file)))
if tls.cert:
cert_file, key_file = tls.cert
copyfile(cert_file, os.path.join(
tls_dir, endpoint, os.path.basename(cert_file)))
copyfile(key_file, os.path.join(
tls_dir, endpoint, os.path.basename(key_file)))
self.meta_path = get_meta_dir(self.name)
self.tls_path = get_tls_dir(self.name)
def remove(self):
if os.path.isdir(self.meta_path):
rmtree(self.meta_path)
if os.path.isdir(self.tls_path):
rmtree(self.tls_path)
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
def __str__(self):
return json.dumps(self.__call__(), indent=2)
def __call__(self):
result = self.Metadata
result.update(self.TLSMaterial)
result.update(self.Storage)
return result
def is_docker_host(self):
return self.context_type is None
@property
def Name(self):
return self.name
@property
def Host(self):
if not self.orchestrator or self.orchestrator == "swarm":
endpoint = self.endpoints.get("docker", None)
if endpoint:
return endpoint.get("Host", None)
return None
return self.endpoints[self.orchestrator].get("Host", None)
@property
def Orchestrator(self):
return self.orchestrator
@property
def Metadata(self):
meta = {}
if self.orchestrator:
meta = {"StackOrchestrator": self.orchestrator}
return {
"Name": self.name,
"Metadata": meta,
"Endpoints": self.endpoints
}
@property
def TLSConfig(self):
key = self.orchestrator
if not key or key == "swarm":
key = "docker"
if key in self.tls_cfg.keys():
return self.tls_cfg[key]
return None
@property
def TLSMaterial(self):
certs = {}
for endpoint, tls in self.tls_cfg.items():
cert, key = tls.cert
certs[endpoint] = list(
map(os.path.basename, [tls.ca_cert, cert, key]))
return {
"TLSMaterial": certs
}
@property
def Storage(self):
return {
"Storage": {
"MetadataPath": self.meta_path,
"TLSPath": self.tls_path
}}

View File

@@ -0,0 +1,4 @@
# flake8: noqa
from .store import Store
from .errors import StoreError, CredentialsNotFound
from .constants import *

View File

@@ -0,0 +1,4 @@
PROGRAM_PREFIX = 'docker-credential-'
DEFAULT_LINUX_STORE = 'secretservice'
DEFAULT_OSX_STORE = 'osxkeychain'
DEFAULT_WIN32_STORE = 'wincred'

View File

@@ -0,0 +1,25 @@
class StoreError(RuntimeError):
pass
class CredentialsNotFound(StoreError):
pass
class InitializationError(StoreError):
pass
def process_store_error(cpe, program):
message = cpe.output.decode('utf-8')
if 'credentials not found in native keychain' in message:
return CredentialsNotFound(
'No matching credentials in {}'.format(
program
)
)
return StoreError(
'Credentials store {} exited with "{}".'.format(
program, cpe.output.decode('utf-8').strip()
)
)

View File

@@ -0,0 +1,94 @@
import errno
import json
import shutil
import subprocess
from . import constants
from . import errors
from .utils import create_environment_dict
class Store:
def __init__(self, program, environment=None):
""" Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
and erasing credentials using `program`.
"""
self.program = constants.PROGRAM_PREFIX + program
self.exe = shutil.which(self.program)
self.environment = environment
if self.exe is None:
raise errors.InitializationError(
'{} not installed or not available in PATH'.format(
self.program
)
)
def get(self, server):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, bytes):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
f'No matching credentials in {self.program}'
)
return result
def store(self, server, username, secret):
""" Store credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
data_input = json.dumps({
'ServerURL': server,
'Username': username,
'Secret': secret
}).encode('utf-8')
return self._execute('store', data_input)
def erase(self, server):
""" Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
if not isinstance(server, bytes):
server = server.encode('utf-8')
self._execute('erase', server)
def list(self):
""" List stored credentials. Requires v0.4.0+ of the helper.
"""
data = self._execute('list', None)
return json.loads(data.decode('utf-8'))
def _execute(self, subcmd, data_input):
output = None
env = create_environment_dict(self.environment)
try:
output = subprocess.check_output(
[self.exe, subcmd], input=data_input, env=env,
)
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program)
except OSError as e:
if e.errno == errno.ENOENT:
raise errors.StoreError(
'{} not installed or not available in PATH'.format(
self.program
)
)
else:
raise errors.StoreError(
'Unexpected OS error "{}", errno={}'.format(
e.strerror, e.errno
)
)
return output

View File

@@ -0,0 +1,10 @@
import os
def create_environment_dict(overrides):
"""
Create and return a copy of os.environ with the specified overrides
"""
result = os.environ.copy()
result.update(overrides or {})
return result

View File

@@ -0,0 +1,207 @@
import requests
_image_not_found_explanation_fragments = frozenset(
fragment.lower() for fragment in [
'no such image',
'not found: does not exist or no pull access',
'repository does not exist',
'was found but does not match the specified platform',
]
)
class DockerException(Exception):
"""
A base class from which all other exceptions inherit.
If you want to catch all errors that the Docker SDK might raise,
catch this base exception.
"""
def create_api_error_from_http_exception(e):
"""
Create a suitable APIError from requests.exceptions.HTTPError.
"""
response = e.response
try:
explanation = response.json()['message']
except ValueError:
explanation = (response.content or '').strip()
cls = APIError
if response.status_code == 404:
explanation_msg = (explanation or '').lower()
if any(fragment in explanation_msg
for fragment in _image_not_found_explanation_fragments):
cls = ImageNotFound
else:
cls = NotFound
raise cls(e, response=response, explanation=explanation) from e
class APIError(requests.exceptions.HTTPError, DockerException):
"""
An HTTP error from the API.
"""
def __init__(self, message, response=None, explanation=None):
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 doesn't
super().__init__(message)
self.response = response
self.explanation = explanation
def __str__(self):
message = super().__str__()
if self.is_client_error():
message = '{} Client Error for {}: {}'.format(
self.response.status_code, self.response.url,
self.response.reason)
elif self.is_server_error():
message = '{} Server Error for {}: {}'.format(
self.response.status_code, self.response.url,
self.response.reason)
if self.explanation:
message = f'{message} ("{self.explanation}")'
return message
@property
def status_code(self):
if self.response is not None:
return self.response.status_code
def is_error(self):
return self.is_client_error() or self.is_server_error()
def is_client_error(self):
if self.status_code is None:
return False
return 400 <= self.status_code < 500
def is_server_error(self):
if self.status_code is None:
return False
return 500 <= self.status_code < 600
class NotFound(APIError):
pass
class ImageNotFound(NotFound):
pass
class InvalidVersion(DockerException):
pass
class InvalidRepository(DockerException):
pass
class InvalidConfigFile(DockerException):
pass
class InvalidArgument(DockerException):
pass
class DeprecatedMethod(DockerException):
pass
class TLSParameterError(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg + (". TLS configurations should map the Docker CLI "
"client configurations. See "
"https://docs.docker.com/engine/articles/https/ "
"for API details.")
class NullResource(DockerException, ValueError):
pass
class ContainerError(DockerException):
"""
Represents a container that has exited with a non-zero exit code.
"""
def __init__(self, container, exit_status, command, image, stderr):
self.container = container
self.exit_status = exit_status
self.command = command
self.image = image
self.stderr = stderr
err = f": {stderr}" if stderr is not None else ""
msg = ("Command '{}' in image '{}' returned non-zero exit "
"status {}{}").format(command, image, exit_status, err)
super().__init__(msg)
class StreamParseError(RuntimeError):
def __init__(self, reason):
self.msg = reason
class BuildError(DockerException):
def __init__(self, reason, build_log):
super().__init__(reason)
self.msg = reason
self.build_log = build_log
class ImageLoadError(DockerException):
pass
def create_unexpected_kwargs_error(name, kwargs):
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(', '.join(quoted_kwargs))
return TypeError(''.join(text))
class MissingContextParameter(DockerException):
def __init__(self, param):
self.param = param
def __str__(self):
return (f"missing parameter: {self.param}")
class ContextAlreadyExists(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return (f"context {self.name} already exists")
class ContextException(DockerException):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return (self.msg)
class ContextNotFound(DockerException):
def __init__(self, name):
self.name = name
def __str__(self):
return (f"context '{self.name}' not found")

View File

@@ -0,0 +1,69 @@
from ..api import APIClient
from .resource import Model, Collection
class Config(Model):
"""A config."""
id_attribute = 'ID'
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
return self.attrs['Spec']['Name']
def remove(self):
"""
Remove this config.
Raises:
:py:class:`docker.errors.APIError`
If config failed to remove.
"""
return self.client.api.remove_config(self.id)
class ConfigCollection(Collection):
"""Configs on the Docker server."""
model = Config
def create(self, **kwargs):
obj = self.client.api.create_config(**kwargs)
return self.prepare_model(obj)
create.__doc__ = APIClient.create_config.__doc__
def get(self, config_id):
"""
Get a config.
Args:
config_id (str): Config ID.
Returns:
(:py:class:`Config`): The config.
Raises:
:py:class:`docker.errors.NotFound`
If the config does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_config(config_id))
def list(self, **kwargs):
"""
List configs. Similar to the ``docker config ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Config`): The configs.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.configs(**kwargs)
return [self.prepare_model(obj) for obj in resp]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,507 @@
import itertools
import re
import warnings
from ..api import APIClient
from ..constants import DEFAULT_DATA_CHUNK_SIZE
from ..errors import BuildError, ImageLoadError, InvalidArgument
from ..utils import parse_repository_tag
from ..utils.json_stream import json_stream
from .resource import Collection, Model
class Image(Model):
"""
An image on the server.
"""
def __repr__(self):
return "<{}: '{}'>".format(
self.__class__.__name__,
"', '".join(self.tags),
)
@property
def labels(self):
"""
The labels of an image as dictionary.
"""
result = self.attrs['Config'].get('Labels')
return result or {}
@property
def short_id(self):
"""
The ID of the image truncated to 12 characters, plus the ``sha256:``
prefix.
"""
if self.id.startswith('sha256:'):
return self.id[:19]
return self.id[:12]
@property
def tags(self):
"""
The image's tags.
"""
tags = self.attrs.get('RepoTags')
if tags is None:
tags = []
return [tag for tag in tags if tag != '<none>:<none>']
def history(self):
"""
Show the history of an image.
Returns:
(str): The history of the image.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.history(self.id)
def remove(self, force=False, noprune=False):
"""
Remove this image.
Args:
force (bool): Force removal of the image
noprune (bool): Do not delete untagged parents
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_image(
self.id,
force=force,
noprune=noprune,
)
def save(self, chunk_size=DEFAULT_DATA_CHUNK_SIZE, named=False):
"""
Get a tarball of an image. Similar to the ``docker save`` command.
Args:
chunk_size (int): The generator will return up to that much data
per iteration, but may return less. If ``None``, data will be
streamed as it is received. Default: 2 MB
named (str or bool): If ``False`` (default), the tarball will not
retain repository and tag information for this image. If set
to ``True``, the first tag in the :py:attr:`~tags` list will
be used to identify the image. Alternatively, any element of
the :py:attr:`~tags` list can be used as an argument to use
that specific tag as the saved identifier.
Returns:
(generator): A stream of raw archive data.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> image = cli.images.get("busybox:latest")
>>> f = open('/tmp/busybox-latest.tar', 'wb')
>>> for chunk in image.save():
>>> f.write(chunk)
>>> f.close()
"""
img = self.id
if named:
img = self.tags[0] if self.tags else img
if isinstance(named, str):
if named not in self.tags:
raise InvalidArgument(
f"{named} is not a valid tag for this image"
)
img = named
return self.client.api.get_image(img, chunk_size)
def tag(self, repository, tag=None, **kwargs):
"""
Tag this image into a repository. Similar to the ``docker tag``
command.
Args:
repository (str): The repository to set for the tag
tag (str): The tag name
force (bool): Force
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Returns:
(bool): ``True`` if successful
"""
return self.client.api.tag(self.id, repository, tag=tag, **kwargs)
class RegistryData(Model):
"""
Image metadata stored on the registry, including available platforms.
"""
def __init__(self, image_name, *args, **kwargs):
super().__init__(*args, **kwargs)
self.image_name = image_name
@property
def id(self):
"""
The ID of the object.
"""
return self.attrs['Descriptor']['digest']
@property
def short_id(self):
"""
The ID of the image truncated to 12 characters, plus the ``sha256:``
prefix.
"""
return self.id[:19]
def pull(self, platform=None):
"""
Pull the image digest.
Args:
platform (str): The platform to pull the image for.
Default: ``None``
Returns:
(:py:class:`Image`): A reference to the pulled image.
"""
repository, _ = parse_repository_tag(self.image_name)
return self.collection.pull(repository, tag=self.id, platform=platform)
def has_platform(self, platform):
"""
Check whether the given platform identifier is available for this
digest.
Args:
platform (str or dict): A string using the ``os[/arch[/variant]]``
format, or a platform dictionary.
Returns:
(bool): ``True`` if the platform is recognized as available,
``False`` otherwise.
Raises:
:py:class:`docker.errors.InvalidArgument`
If the platform argument is not a valid descriptor.
"""
if platform and not isinstance(platform, dict):
parts = platform.split('/')
if len(parts) > 3 or len(parts) < 1:
raise InvalidArgument(
f'"{platform}" is not a valid platform descriptor'
)
platform = {'os': parts[0]}
if len(parts) > 2:
platform['variant'] = parts[2]
if len(parts) > 1:
platform['architecture'] = parts[1]
return normalize_platform(
platform, self.client.version()
) in self.attrs['Platforms']
def reload(self):
self.attrs = self.client.api.inspect_distribution(self.image_name)
reload.__doc__ = Model.reload.__doc__
class ImageCollection(Collection):
model = Image
def build(self, **kwargs):
"""
Build an image and return it. Similar to the ``docker build``
command. Either ``path`` or ``fileobj`` must be set.
If you already have a tar file for the Docker build context (including
a Dockerfile), pass a readable file-like object to ``fileobj``
and also pass ``custom_context=True``. If the stream is also
compressed, set ``encoding`` to the correct value (e.g ``gzip``).
If you want to get the raw output of the build, use the
:py:meth:`~docker.api.build.BuildApiMixin.build` method in the
low-level API.
Args:
path (str): Path to the directory containing the Dockerfile
fileobj: A file object to use as the Dockerfile. (Or a file-like
object)
tag (str): A tag to add to the final image
quiet (bool): Whether to return the status
nocache (bool): Don't use the cache when set to ``True``
rm (bool): Remove intermediate containers. The ``docker build``
command now defaults to ``--rm=true``, but we have kept the old
default of `False` to preserve backward compatibility
timeout (int): HTTP timeout
custom_context (bool): Optional if using ``fileobj``
encoding (str): The encoding for a stream. Set to ``gzip`` for
compressing
pull (bool): Downloads any updates to the FROM image in Dockerfiles
forcerm (bool): Always remove intermediate containers, even after
unsuccessful builds
dockerfile (str): path within the build context to the Dockerfile
buildargs (dict): A dictionary of build arguments
container_limits (dict): A dictionary of limits applied to each
container created by the build process. Valid keys:
- memory (int): set memory limit for build
- memswap (int): Total memory (memory + swap), -1 to disable
swap
- cpushares (int): CPU shares (relative weight)
- cpusetcpus (str): CPUs in which to allow execution, e.g.,
``"0-3"``, ``"0,1"``
shmsize (int): Size of `/dev/shm` in bytes. The size must be
greater than 0. If omitted the system uses 64MB
labels (dict): A dictionary of labels to set on the image
cache_from (list): A list of images used for build cache
resolution
target (str): Name of the build-stage to build in a multi-stage
Dockerfile
network_mode (str): networking mode for the run commands during
build
squash (bool): Squash the resulting images layers into a
single layer.
extra_hosts (dict): Extra hosts to add to /etc/hosts in building
containers, as a mapping of hostname to IP address.
platform (str): Platform in the format ``os[/arch[/variant]]``.
isolation (str): Isolation technology used during build.
Default: `None`.
use_config_proxy (bool): If ``True``, and if the docker client
configuration file (``~/.docker/config.json`` by default)
contains a proxy configuration, the corresponding environment
variables will be set in the container being built.
Returns:
(tuple): The first item is the :py:class:`Image` object for the
image that was built. The second item is a generator of the
build logs as JSON-decoded objects.
Raises:
:py:class:`docker.errors.BuildError`
If there is an error during the build.
:py:class:`docker.errors.APIError`
If the server returns any other error.
``TypeError``
If neither ``path`` nor ``fileobj`` is specified.
"""
resp = self.client.api.build(**kwargs)
if isinstance(resp, str):
return self.get(resp)
last_event = None
image_id = None
result_stream, internal_stream = itertools.tee(json_stream(resp))
for chunk in internal_stream:
if 'error' in chunk:
raise BuildError(chunk['error'], result_stream)
if 'stream' in chunk:
match = re.search(
r'(^Successfully built |sha256:)([0-9a-f]+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
last_event = chunk
if image_id:
return (self.get(image_id), result_stream)
raise BuildError(last_event or 'Unknown', result_stream)
def get(self, name):
"""
Gets an image.
Args:
name (str): The name of the image.
Returns:
(:py:class:`Image`): The image.
Raises:
:py:class:`docker.errors.ImageNotFound`
If the image does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_image(name))
def get_registry_data(self, name, auth_config=None):
"""
Gets the registry data for an image.
Args:
name (str): The name of the image.
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
Returns:
(:py:class:`RegistryData`): The data object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return RegistryData(
image_name=name,
attrs=self.client.api.inspect_distribution(name, auth_config),
client=self.client,
collection=self,
)
def list(self, name=None, all=False, filters=None):
"""
List images on the server.
Args:
name (str): Only show images belonging to the repository ``name``
all (bool): Show intermediate image layers. By default, these are
filtered out.
filters (dict): Filters to be processed on the image list.
Available filters:
- ``dangling`` (bool)
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.images(name=name, all=all, filters=filters)
return [self.get(r["Id"]) for r in resp]
def load(self, data):
"""
Load an image that was previously saved using
:py:meth:`~docker.models.images.Image.save` (or ``docker save``).
Similar to ``docker load``.
Args:
data (binary): Image data to be loaded.
Returns:
(list of :py:class:`Image`): The images.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.load_image(data)
images = []
for chunk in resp:
if 'stream' in chunk:
match = re.search(
r'(^Loaded image ID: |^Loaded image: )(.+)$',
chunk['stream']
)
if match:
image_id = match.group(2)
images.append(image_id)
if 'error' in chunk:
raise ImageLoadError(chunk['error'])
return [self.get(i) for i in images]
def pull(self, repository, tag=None, all_tags=False, **kwargs):
"""
Pull an image of the given name and return it. Similar to the
``docker pull`` command.
If ``tag`` is ``None`` or empty, it is set to ``latest``.
If ``all_tags`` is set, the ``tag`` parameter is ignored and all image
tags will be pulled.
If you want to get the raw pull output, use the
:py:meth:`~docker.api.image.ImageApiMixin.pull` method in the
low-level API.
Args:
repository (str): The repository to pull
tag (str): The tag to pull
auth_config (dict): Override the credentials that are found in the
config for this request. ``auth_config`` should contain the
``username`` and ``password`` keys to be valid.
platform (str): Platform in the format ``os[/arch[/variant]]``
all_tags (bool): Pull all image tags
Returns:
(:py:class:`Image` or list): The image that has been pulled.
If ``all_tags`` is True, the method will return a list
of :py:class:`Image` objects belonging to this repository.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> # Pull the image tagged `latest` in the busybox repo
>>> image = client.images.pull('busybox')
>>> # Pull all tags in the busybox repo
>>> images = client.images.pull('busybox', all_tags=True)
"""
repository, image_tag = parse_repository_tag(repository)
tag = tag or image_tag or 'latest'
if 'stream' in kwargs:
warnings.warn(
'`stream` is not a valid parameter for this method'
' and will be overridden'
)
del kwargs['stream']
pull_log = self.client.api.pull(
repository, tag=tag, stream=True, all_tags=all_tags, **kwargs
)
for _ in pull_log:
# We don't do anything with the logs, but we need
# to keep the connection alive and wait for the image
# to be pulled.
pass
if not all_tags:
return self.get('{0}{2}{1}'.format(
repository, tag, '@' if tag.startswith('sha256:') else ':'
))
return self.list(repository)
def push(self, repository, tag=None, **kwargs):
return self.client.api.push(repository, tag=tag, **kwargs)
push.__doc__ = APIClient.push.__doc__
def remove(self, *args, **kwargs):
self.client.api.remove_image(*args, **kwargs)
remove.__doc__ = APIClient.remove_image.__doc__
def search(self, *args, **kwargs):
return self.client.api.search(*args, **kwargs)
search.__doc__ = APIClient.search.__doc__
def prune(self, filters=None):
return self.client.api.prune_images(filters=filters)
prune.__doc__ = APIClient.prune_images.__doc__
def prune_builds(self, *args, **kwargs):
return self.client.api.prune_builds(*args, **kwargs)
prune_builds.__doc__ = APIClient.prune_builds.__doc__
def normalize_platform(platform, engine_info):
if platform is None:
platform = {}
if 'os' not in platform:
platform['os'] = engine_info['Os']
if 'architecture' not in platform:
platform['architecture'] = engine_info['Arch']
return platform

View File

@@ -0,0 +1,218 @@
from ..api import APIClient
from ..utils import version_gte
from .containers import Container
from .resource import Model, Collection
class Network(Model):
"""
A Docker network.
"""
@property
def name(self):
"""
The name of the network.
"""
return self.attrs.get('Name')
@property
def containers(self):
"""
The containers that are connected to the network, as a list of
:py:class:`~docker.models.containers.Container` objects.
"""
return [
self.client.containers.get(cid) for cid in
(self.attrs.get('Containers') or {}).keys()
]
def connect(self, container, *args, **kwargs):
"""
Connect a container to this network.
Args:
container (str): Container to connect to this network, as either
an ID, name, or :py:class:`~docker.models.containers.Container`
object.
aliases (:py:class:`list`): A list of aliases for this endpoint.
Names in that list can be used within the network to reach the
container. Defaults to ``None``.
links (:py:class:`list`): A list of links for this endpoint.
Containers declared in this list will be linkedto this
container. Defaults to ``None``.
ipv4_address (str): The IP address of this container on the
network, using the IPv4 protocol. Defaults to ``None``.
ipv6_address (str): The IP address of this container on the
network, using the IPv6 protocol. Defaults to ``None``.
link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6)
addresses.
driver_opt (dict): A dictionary of options to provide to the
network driver. Defaults to ``None``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(container, Container):
container = container.id
return self.client.api.connect_container_to_network(
container, self.id, *args, **kwargs
)
def disconnect(self, container, *args, **kwargs):
"""
Disconnect a container from this network.
Args:
container (str): Container to disconnect from this network, as
either an ID, name, or
:py:class:`~docker.models.containers.Container` object.
force (bool): Force the container to disconnect from a network.
Default: ``False``
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if isinstance(container, Container):
container = container.id
return self.client.api.disconnect_container_from_network(
container, self.id, *args, **kwargs
)
def remove(self):
"""
Remove this network.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_network(self.id)
class NetworkCollection(Collection):
"""
Networks on the Docker server.
"""
model = Network
def create(self, name, *args, **kwargs):
"""
Create a network. Similar to the ``docker network create``.
Args:
name (str): Name of the network
driver (str): Name of the driver used to create the network
options (dict): Driver options as a key-value dictionary
ipam (IPAMConfig): Optional custom IP scheme for the network.
check_duplicate (bool): Request daemon to check for networks with
same name. Default: ``None``.
internal (bool): Restrict external access to the network. Default
``False``.
labels (dict): Map of labels to set on the network. Default
``None``.
enable_ipv6 (bool): Enable IPv6 on the network. Default ``False``.
attachable (bool): If enabled, and the network is in the global
scope, non-service containers on worker nodes will be able to
connect to the network.
scope (str): Specify the network's scope (``local``, ``global`` or
``swarm``)
ingress (bool): If set, create an ingress network which provides
the routing-mesh in swarm mode.
Returns:
(:py:class:`Network`): The network that was created.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
A network using the bridge driver:
>>> client.networks.create("network1", driver="bridge")
You can also create more advanced networks with custom IPAM
configurations. For example, setting the subnet to
``192.168.52.0/24`` and gateway address to ``192.168.52.254``.
.. code-block:: python
>>> ipam_pool = docker.types.IPAMPool(
subnet='192.168.52.0/24',
gateway='192.168.52.254'
)
>>> ipam_config = docker.types.IPAMConfig(
pool_configs=[ipam_pool]
)
>>> client.networks.create(
"network1",
driver="bridge",
ipam=ipam_config
)
"""
resp = self.client.api.create_network(name, *args, **kwargs)
return self.get(resp['Id'])
def get(self, network_id, *args, **kwargs):
"""
Get a network by its ID.
Args:
network_id (str): The ID of the network.
verbose (bool): Retrieve the service details across the cluster in
swarm mode.
scope (str): Filter the network by scope (``swarm``, ``global``
or ``local``).
Returns:
(:py:class:`Network`) The network.
Raises:
:py:class:`docker.errors.NotFound`
If the network does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(
self.client.api.inspect_network(network_id, *args, **kwargs)
)
def list(self, *args, **kwargs):
"""
List networks. Similar to the ``docker networks ls`` command.
Args:
names (:py:class:`list`): List of names to filter by.
ids (:py:class:`list`): List of ids to filter by.
filters (dict): Filters to be processed on the network list.
Available filters:
- ``driver=[<driver-name>]`` Matches a network's driver.
- `label` (str|list): format either ``"key"``, ``"key=value"``
or a list of such.
- ``type=["custom"|"builtin"]`` Filters networks by type.
greedy (bool): Fetch more details for each network individually.
You might want this to get the containers attached to them.
Returns:
(list of :py:class:`Network`) The networks on the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
greedy = kwargs.pop('greedy', False)
resp = self.client.api.networks(*args, **kwargs)
networks = [self.prepare_model(item) for item in resp]
if greedy and version_gte(self.client.api._version, '1.28'):
for net in networks:
net.reload()
return networks
def prune(self, filters=None):
return self.client.api.prune_networks(filters=filters)
prune.__doc__ = APIClient.prune_networks.__doc__

View File

@@ -0,0 +1,107 @@
from .resource import Model, Collection
class Node(Model):
"""A node in a swarm."""
id_attribute = 'ID'
@property
def version(self):
"""
The version number of the service. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def update(self, node_spec):
"""
Update the node's configuration.
Args:
node_spec (dict): Configuration settings to update. Any values
not provided will be removed. Default: ``None``
Returns:
`True` if the request went through.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> node_spec = {'Availability': 'active',
'Name': 'node-name',
'Role': 'manager',
'Labels': {'foo': 'bar'}
}
>>> node.update(node_spec)
"""
return self.client.api.update_node(self.id, self.version, node_spec)
def remove(self, force=False):
"""
Remove this node from the swarm.
Args:
force (bool): Force remove an active node. Default: `False`
Returns:
`True` if the request was successful.
Raises:
:py:class:`docker.errors.NotFound`
If the node doesn't exist in the swarm.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_node(self.id, force=force)
class NodeCollection(Collection):
"""Nodes on the Docker server."""
model = Node
def get(self, node_id):
"""
Get a node.
Args:
node_id (string): ID of the node to be inspected.
Returns:
A :py:class:`Node` object.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_node(node_id))
def list(self, *args, **kwargs):
"""
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of :py:class:`Node` objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.nodes.list(filters={'role': 'manager'})
"""
return [
self.prepare_model(n)
for n in self.client.api.nodes(*args, **kwargs)
]

View File

@@ -0,0 +1,206 @@
from .. import errors
from .resource import Collection, Model
class Plugin(Model):
"""
A plugin on the server.
"""
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
"""
The plugin's name.
"""
return self.attrs.get('Name')
@property
def enabled(self):
"""
Whether the plugin is enabled.
"""
return self.attrs.get('Enabled')
@property
def settings(self):
"""
A dictionary representing the plugin's configuration.
"""
return self.attrs.get('Settings')
def configure(self, options):
"""
Update the plugin's settings.
Args:
options (dict): A key-value mapping of options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.configure_plugin(self.name, options)
self.reload()
def disable(self, force=False):
"""
Disable the plugin.
Args:
force (bool): Force disable. Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.disable_plugin(self.name, force)
self.reload()
def enable(self, timeout=0):
"""
Enable the plugin.
Args:
timeout (int): Timeout in seconds. Default: 0
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.enable_plugin(self.name, timeout)
self.reload()
def push(self):
"""
Push the plugin to a remote registry.
Returns:
A dict iterator streaming the status of the upload.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.push_plugin(self.name)
def remove(self, force=False):
"""
Remove the plugin from the server.
Args:
force (bool): Remove even if the plugin is enabled.
Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_plugin(self.name, force=force)
def upgrade(self, remote=None):
"""
Upgrade the plugin.
Args:
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
Default: this plugin's name.
Returns:
A generator streaming the decoded API logs
"""
if self.enabled:
raise errors.DockerError(
'Plugin must be disabled before upgrading.'
)
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
yield from self.client.api.upgrade_plugin(
self.name,
remote,
privileges,
)
self.reload()
class PluginCollection(Collection):
model = Plugin
def create(self, name, plugin_data_dir, gzip=False):
"""
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
(:py:class:`Plugin`): The newly created plugin.
"""
self.client.api.create_plugin(name, plugin_data_dir, gzip)
return self.get(name)
def get(self, name):
"""
Gets a plugin.
Args:
name (str): The name of the plugin.
Returns:
(:py:class:`Plugin`): The plugin.
Raises:
:py:class:`docker.errors.NotFound` If the plugin does not
exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_plugin(name))
def install(self, remote_name, local_name=None):
"""
Pull and install a plugin.
Args:
remote_name (string): Remote reference for the plugin to
install. The ``:latest`` tag is optional, and is the
default if omitted.
local_name (string): Local name for the pulled plugin.
The ``:latest`` tag is optional, and is the default if
omitted. Optional.
Returns:
(:py:class:`Plugin`): The installed plugin
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
privileges = self.client.api.plugin_privileges(remote_name)
it = self.client.api.pull_plugin(remote_name, privileges, local_name)
for data in it:
pass
return self.get(local_name or remote_name)
def list(self):
"""
List plugins installed on the server.
Returns:
(list of :py:class:`Plugin`): The plugins.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.plugins()
return [self.prepare_model(r) for r in resp]

View File

@@ -0,0 +1,92 @@
class Model:
"""
A base class for representing a single object on the server.
"""
id_attribute = 'Id'
def __init__(self, attrs=None, client=None, collection=None):
#: A client pointing at the server that this object is on.
self.client = client
#: The collection that this model is part of.
self.collection = collection
#: The raw representation of this object from the API
self.attrs = attrs
if self.attrs is None:
self.attrs = {}
def __repr__(self):
return f"<{self.__class__.__name__}: {self.short_id}>"
def __eq__(self, other):
return isinstance(other, self.__class__) and self.id == other.id
def __hash__(self):
return hash(f"{self.__class__.__name__}:{self.id}")
@property
def id(self):
"""
The ID of the object.
"""
return self.attrs.get(self.id_attribute)
@property
def short_id(self):
"""
The ID of the object, truncated to 12 characters.
"""
return self.id[:12]
def reload(self):
"""
Load this object from the server again and update ``attrs`` with the
new data.
"""
new_model = self.collection.get(self.id)
self.attrs = new_model.attrs
class Collection:
"""
A base class for representing all objects of a particular type on the
server.
"""
#: The type of object this collection represents, set by subclasses
model = None
def __init__(self, client=None):
#: The client pointing at the server that this collection of objects
#: is on.
self.client = client
def __call__(self, *args, **kwargs):
raise TypeError(
"'{}' object is not callable. You might be trying to use the old "
"(pre-2.0) API - use docker.APIClient if so."
.format(self.__class__.__name__))
def list(self):
raise NotImplementedError
def get(self, key):
raise NotImplementedError
def create(self, attrs=None):
raise NotImplementedError
def prepare_model(self, attrs):
"""
Create a model from a set of attributes.
"""
if isinstance(attrs, Model):
attrs.client = self.client
attrs.collection = self
return attrs
elif isinstance(attrs, dict):
return self.model(attrs=attrs, client=self.client, collection=self)
else:
raise Exception("Can't create %s from %s" %
(self.model.__name__, attrs))

View File

@@ -0,0 +1,70 @@
from ..api import APIClient
from .resource import Model, Collection
class Secret(Model):
"""A secret."""
id_attribute = 'ID'
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
return self.attrs['Spec']['Name']
def remove(self):
"""
Remove this secret.
Raises:
:py:class:`docker.errors.APIError`
If secret failed to remove.
"""
return self.client.api.remove_secret(self.id)
class SecretCollection(Collection):
"""Secrets on the Docker server."""
model = Secret
def create(self, **kwargs):
obj = self.client.api.create_secret(**kwargs)
obj.setdefault("Spec", {})["Name"] = kwargs.get("name")
return self.prepare_model(obj)
create.__doc__ = APIClient.create_secret.__doc__
def get(self, secret_id):
"""
Get a secret.
Args:
secret_id (str): Secret ID.
Returns:
(:py:class:`Secret`): The secret.
Raises:
:py:class:`docker.errors.NotFound`
If the secret does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_secret(secret_id))
def list(self, **kwargs):
"""
List secrets. Similar to the ``docker secret ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Secret`): The secrets.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.secrets(**kwargs)
return [self.prepare_model(obj) for obj in resp]

View File

@@ -0,0 +1,386 @@
import copy
from docker.errors import create_unexpected_kwargs_error, InvalidArgument
from docker.types import TaskTemplate, ContainerSpec, Placement, ServiceMode
from .resource import Model, Collection
class Service(Model):
"""A service."""
id_attribute = 'ID'
@property
def name(self):
"""The service's name."""
return self.attrs['Spec']['Name']
@property
def version(self):
"""
The version number of the service. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def remove(self):
"""
Stop and remove the service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_service(self.id)
def tasks(self, filters=None):
"""
List the tasks in this service.
Args:
filters (dict): A map of filters to process on the tasks list.
Valid filters: ``id``, ``name``, ``node``,
``label``, and ``desired-state``.
Returns:
:py:class:`list`: List of task dictionaries.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
if filters is None:
filters = {}
filters['service'] = self.id
return self.client.api.tasks(filters=filters)
def update(self, **kwargs):
"""
Update a service's configuration. Similar to the ``docker service
update`` command.
Takes the same parameters as :py:meth:`~ServiceCollection.create`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# Image is required, so if it hasn't been set, use current image
if 'image' not in kwargs:
spec = self.attrs['Spec']['TaskTemplate']['ContainerSpec']
kwargs['image'] = spec['Image']
if kwargs.get('force_update') is True:
task_template = self.attrs['Spec']['TaskTemplate']
current_value = int(task_template.get('ForceUpdate', 0))
kwargs['force_update'] = current_value + 1
create_kwargs = _get_create_service_kwargs('update', kwargs)
return self.client.api.update_service(
self.id,
self.version,
**create_kwargs
)
def logs(self, **kwargs):
"""
Get log stream for the service.
Note: This method works only for services with the ``json-file``
or ``journald`` logging drivers.
Args:
details (bool): Show extra details provided to logs.
Default: ``False``
follow (bool): Keep connection open to read logs as they are
sent by the Engine. Default: ``False``
stdout (bool): Return logs from ``stdout``. Default: ``False``
stderr (bool): Return logs from ``stderr``. Default: ``False``
since (int): UNIX timestamp for the logs staring point.
Default: 0
timestamps (bool): Add timestamps to every log line.
tail (string or int): Number of log lines to be returned,
counting from the current end of the logs. Specify an
integer or ``'all'`` to output all log lines.
Default: ``all``
Returns:
generator: Logs for the service.
"""
is_tty = self.attrs['Spec']['TaskTemplate']['ContainerSpec'].get(
'TTY', False
)
return self.client.api.service_logs(self.id, is_tty=is_tty, **kwargs)
def scale(self, replicas):
"""
Scale service container.
Args:
replicas (int): The number of containers that should be running.
Returns:
bool: ``True`` if successful.
"""
if 'Global' in self.attrs['Spec']['Mode'].keys():
raise InvalidArgument('Cannot scale a global container')
service_mode = ServiceMode('replicated', replicas)
return self.client.api.update_service(self.id, self.version,
mode=service_mode,
fetch_current_spec=True)
def force_update(self):
"""
Force update the service even if no changes require it.
Returns:
bool: ``True`` if successful.
"""
return self.update(force_update=True, fetch_current_spec=True)
class ServiceCollection(Collection):
"""Services on the Docker server."""
model = Service
def create(self, image, command=None, **kwargs):
"""
Create a service. Similar to the ``docker service create`` command.
Args:
image (str): The image name to use for the containers.
command (list of str or str): Command to run.
args (list of str): Arguments to the command.
constraints (list of str): :py:class:`~docker.types.Placement`
constraints.
preferences (list of tuple): :py:class:`~docker.types.Placement`
preferences.
maxreplicas (int): :py:class:`~docker.types.Placement` maxreplicas
or (int) representing maximum number of replicas per node.
platforms (list of tuple): A list of platform constraints
expressed as ``(arch, os)`` tuples.
container_labels (dict): Labels to apply to the container.
endpoint_spec (EndpointSpec): Properties that can be configured to
access and load balance a service. Default: ``None``.
env (list of str): Environment variables, in the form
``KEY=val``.
hostname (string): Hostname to set on the container.
init (boolean): Run an init inside the container that forwards
signals and reaps processes
isolation (string): Isolation technology used by the service's
containers. Only used for Windows containers.
labels (dict): Labels to apply to the service.
log_driver (str): Log driver to use for containers.
log_driver_options (dict): Log driver options.
mode (ServiceMode): Scheduling mode for the service.
Default:``None``
mounts (list of str): Mounts for the containers, in the form
``source:target:options``, where options is either
``ro`` or ``rw``.
name (str): Name to give to the service.
networks (:py:class:`list`): List of network names or IDs or
:py:class:`~docker.types.NetworkAttachmentConfig` to attach the
service to. Default: ``None``.
resources (Resources): Resource limits and reservations.
restart_policy (RestartPolicy): Restart policy for containers.
secrets (list of :py:class:`~docker.types.SecretReference`): List
of secrets accessible to containers for this service.
stop_grace_period (int): Amount of time to wait for
containers to terminate before forcefully killing them.
update_config (UpdateConfig): Specification for the update strategy
of the service. Default: ``None``
rollback_config (RollbackConfig): Specification for the rollback
strategy of the service. Default: ``None``
user (str): User to run commands as.
workdir (str): Working directory for commands to run.
tty (boolean): Whether a pseudo-TTY should be allocated.
groups (:py:class:`list`): A list of additional groups that the
container process will run as.
open_stdin (boolean): Open ``stdin``
read_only (boolean): Mount the container's root filesystem as read
only.
stop_signal (string): Set signal to stop the service's containers
healthcheck (Healthcheck): Healthcheck
configuration for this service.
hosts (:py:class:`dict`): A set of host to IP mappings to add to
the container's `hosts` file.
dns_config (DNSConfig): Specification for DNS
related configurations in resolver configuration file.
configs (:py:class:`list`): List of
:py:class:`~docker.types.ConfigReference` that will be exposed
to the service.
privileges (Privileges): Security options for the service's
containers.
cap_add (:py:class:`list`): A list of kernel capabilities to add to
the default set for the container.
cap_drop (:py:class:`list`): A list of kernel capabilities to drop
from the default set for the container.
sysctls (:py:class:`dict`): A dict of sysctl values to add to the
container
Returns:
:py:class:`Service`: The created service.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
kwargs['image'] = image
kwargs['command'] = command
create_kwargs = _get_create_service_kwargs('create', kwargs)
service_id = self.client.api.create_service(**create_kwargs)
return self.get(service_id)
def get(self, service_id, insert_defaults=None):
"""
Get a service.
Args:
service_id (str): The ID of the service.
insert_defaults (boolean): If true, default values will be merged
into the output.
Returns:
:py:class:`Service`: The service.
Raises:
:py:class:`docker.errors.NotFound`
If the service does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
:py:class:`docker.errors.InvalidVersion`
If one of the arguments is not supported with the current
API version.
"""
return self.prepare_model(
self.client.api.inspect_service(service_id, insert_defaults)
)
def list(self, **kwargs):
"""
List services.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name`` , ``label`` and ``mode``.
Default: ``None``.
Returns:
list of :py:class:`Service`: The services.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return [
self.prepare_model(s)
for s in self.client.api.services(**kwargs)
]
# kwargs to copy straight over to ContainerSpec
CONTAINER_SPEC_KWARGS = [
'args',
'cap_add',
'cap_drop',
'command',
'configs',
'dns_config',
'env',
'groups',
'healthcheck',
'hostname',
'hosts',
'image',
'init',
'isolation',
'labels',
'mounts',
'open_stdin',
'privileges',
'read_only',
'secrets',
'stop_grace_period',
'stop_signal',
'tty',
'user',
'workdir',
'sysctls',
]
# kwargs to copy straight over to TaskTemplate
TASK_TEMPLATE_KWARGS = [
'networks',
'resources',
'restart_policy',
]
# kwargs to copy straight over to create_service
CREATE_SERVICE_KWARGS = [
'name',
'labels',
'mode',
'update_config',
'rollback_config',
'endpoint_spec',
]
PLACEMENT_KWARGS = [
'constraints',
'preferences',
'platforms',
'maxreplicas',
]
def _get_create_service_kwargs(func_name, kwargs):
# Copy over things which can be copied directly
create_kwargs = {}
for key in copy.copy(kwargs):
if key in CREATE_SERVICE_KWARGS:
create_kwargs[key] = kwargs.pop(key)
container_spec_kwargs = {}
for key in copy.copy(kwargs):
if key in CONTAINER_SPEC_KWARGS:
container_spec_kwargs[key] = kwargs.pop(key)
task_template_kwargs = {}
for key in copy.copy(kwargs):
if key in TASK_TEMPLATE_KWARGS:
task_template_kwargs[key] = kwargs.pop(key)
if 'container_labels' in kwargs:
container_spec_kwargs['labels'] = kwargs.pop('container_labels')
placement = {}
for key in copy.copy(kwargs):
if key in PLACEMENT_KWARGS:
placement[key] = kwargs.pop(key)
placement = Placement(**placement)
task_template_kwargs['placement'] = placement
if 'log_driver' in kwargs:
task_template_kwargs['log_driver'] = {
'Name': kwargs.pop('log_driver'),
'Options': kwargs.pop('log_driver_options', {})
}
if func_name == 'update':
if 'force_update' in kwargs:
task_template_kwargs['force_update'] = kwargs.pop('force_update')
# fetch the current spec by default if updating the service
# through the model
fetch_current_spec = kwargs.pop('fetch_current_spec', True)
create_kwargs['fetch_current_spec'] = fetch_current_spec
# All kwargs should have been consumed by this point, so raise
# error if any are left
if kwargs:
raise create_unexpected_kwargs_error(func_name, kwargs)
container_spec = ContainerSpec(**container_spec_kwargs)
task_template_kwargs['container_spec'] = container_spec
create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs)
return create_kwargs

View File

@@ -0,0 +1,189 @@
from docker.api import APIClient
from docker.errors import APIError
from .resource import Model
class Swarm(Model):
"""
The server's Swarm state. This a singleton that must be reloaded to get
the current state of the Swarm.
"""
id_attribute = 'ID'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.client:
try:
self.reload()
except APIError as e:
# FIXME: https://github.com/docker/docker/issues/29192
if e.response.status_code not in (406, 503):
raise
@property
def version(self):
"""
The version number of the swarm. If this is not the same as the
server, the :py:meth:`update` function will not work and you will
need to call :py:meth:`reload` before calling it again.
"""
return self.attrs.get('Version').get('Index')
def get_unlock_key(self):
return self.client.api.get_unlock_key()
get_unlock_key.__doc__ = APIClient.get_unlock_key.__doc__
def init(self, advertise_addr=None, listen_addr='0.0.0.0:2377',
force_new_cluster=False, default_addr_pool=None,
subnet_size=None, data_path_addr=None, data_path_port=None,
**kwargs):
"""
Initialize a new swarm on this Engine.
Args:
advertise_addr (str): Externally reachable address advertised to
other nodes. This can either be an address/port combination in
the form ``192.168.1.1:4567``, or an interface followed by a
port number, like ``eth0:4567``. If the port number is omitted,
the port number from the listen address is used.
If not specified, it will be automatically detected when
possible.
listen_addr (str): Listen address used for inter-manager
communication, as well as determining the networking interface
used for the VXLAN Tunnel Endpoint (VTEP). This can either be
an address/port combination in the form ``192.168.1.1:4567``,
or an interface followed by a port number, like ``eth0:4567``.
If the port number is omitted, the default swarm listening port
is used. Default: ``0.0.0.0:2377``
force_new_cluster (bool): Force creating a new Swarm, even if
already part of one. Default: False
default_addr_pool (list of str): Default Address Pool specifies
default subnet pools for global scope networks. Each pool
should be specified as a CIDR block, like '10.0.0.0/8'.
Default: None
subnet_size (int): SubnetSize specifies the subnet size of the
networks created from the default subnet pool. Default: None
data_path_addr (string): Address or interface to use for data path
traffic. For example, 192.168.1.1, or an interface, like eth0.
data_path_port (int): Port number to use for data path traffic.
Acceptable port range is 1024 to 49151. If set to ``None`` or
0, the default port 4789 will be used. Default: None
task_history_retention_limit (int): Maximum number of tasks
history stored.
snapshot_interval (int): Number of logs entries between snapshot.
keep_old_snapshots (int): Number of snapshots to keep beyond the
current snapshot.
log_entries_for_slow_followers (int): Number of log entries to
keep around to sync up slow followers after a snapshot is
created.
heartbeat_tick (int): Amount of ticks (in seconds) between each
heartbeat.
election_tick (int): Amount of ticks (in seconds) needed without a
leader to trigger a new election.
dispatcher_heartbeat_period (int): The delay for an agent to send
a heartbeat to the dispatcher.
node_cert_expiry (int): Automatic expiry for nodes certificates.
external_ca (dict): Configuration for forwarding signing requests
to an external certificate authority. Use
``docker.types.SwarmExternalCA``.
name (string): Swarm's name
labels (dict): User-defined key/value metadata.
signing_ca_cert (str): The desired signing CA certificate for all
swarm node TLS leaf certificates, in PEM format.
signing_ca_key (str): The desired signing CA key for all swarm
node TLS leaf certificates, in PEM format.
ca_force_rotate (int): An integer whose purpose is to force swarm
to generate a new signing CA certificate and key, if none have
been specified.
autolock_managers (boolean): If set, generate a key and use it to
lock data stored on the managers.
log_driver (DriverConfig): The default log driver to use for tasks
created in the orchestrator.
Returns:
(str): The ID of the created node.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.swarm.init(
advertise_addr='eth0', listen_addr='0.0.0.0:5000',
force_new_cluster=False, default_addr_pool=['10.20.0.0/16],
subnet_size=24, snapshot_interval=5000,
log_entries_for_slow_followers=1200
)
"""
init_kwargs = {
'advertise_addr': advertise_addr,
'listen_addr': listen_addr,
'force_new_cluster': force_new_cluster,
'default_addr_pool': default_addr_pool,
'subnet_size': subnet_size,
'data_path_addr': data_path_addr,
'data_path_port': data_path_port,
}
init_kwargs['swarm_spec'] = self.client.api.create_swarm_spec(**kwargs)
node_id = self.client.api.init_swarm(**init_kwargs)
self.reload()
return node_id
def join(self, *args, **kwargs):
return self.client.api.join_swarm(*args, **kwargs)
join.__doc__ = APIClient.join_swarm.__doc__
def leave(self, *args, **kwargs):
return self.client.api.leave_swarm(*args, **kwargs)
leave.__doc__ = APIClient.leave_swarm.__doc__
def reload(self):
"""
Inspect the swarm on the server and store the response in
:py:attr:`attrs`.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.attrs = self.client.api.inspect_swarm()
def unlock(self, key):
return self.client.api.unlock_swarm(key)
unlock.__doc__ = APIClient.unlock_swarm.__doc__
def update(self, rotate_worker_token=False, rotate_manager_token=False,
rotate_manager_unlock_key=False, **kwargs):
"""
Update the swarm's configuration.
It takes the same arguments as :py:meth:`init`, except
``advertise_addr``, ``listen_addr``, and ``force_new_cluster``. In
addition, it takes these arguments:
Args:
rotate_worker_token (bool): Rotate the worker join token. Default:
``False``.
rotate_manager_token (bool): Rotate the manager join token.
Default: ``False``.
rotate_manager_unlock_key (bool): Rotate the manager unlock key.
Default: ``False``.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# this seems to have to be set
if kwargs.get('node_cert_expiry') is None:
kwargs['node_cert_expiry'] = 7776000000000000
return self.client.api.update_swarm(
version=self.version,
swarm_spec=self.client.api.create_swarm_spec(**kwargs),
rotate_worker_token=rotate_worker_token,
rotate_manager_token=rotate_manager_token,
rotate_manager_unlock_key=rotate_manager_unlock_key
)

View File

@@ -0,0 +1,99 @@
from ..api import APIClient
from .resource import Model, Collection
class Volume(Model):
"""A volume."""
id_attribute = 'Name'
@property
def name(self):
"""The name of the volume."""
return self.attrs['Name']
def remove(self, force=False):
"""
Remove this volume.
Args:
force (bool): Force removal of volumes that were already removed
out of band by the volume driver plugin.
Raises:
:py:class:`docker.errors.APIError`
If volume failed to remove.
"""
return self.client.api.remove_volume(self.id, force=force)
class VolumeCollection(Collection):
"""Volumes on the Docker server."""
model = Volume
def create(self, name=None, **kwargs):
"""
Create a volume.
Args:
name (str): Name of the volume. If not specified, the engine
generates a name.
driver (str): Name of the driver used to create the volume
driver_opts (dict): Driver options as a key-value dictionary
labels (dict): Labels to set on the volume
Returns:
(:py:class:`Volume`): The volume created.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> volume = client.volumes.create(name='foobar', driver='local',
driver_opts={'foo': 'bar', 'baz': 'false'},
labels={"key": "value"})
"""
obj = self.client.api.create_volume(name, **kwargs)
return self.prepare_model(obj)
def get(self, volume_id):
"""
Get a volume.
Args:
volume_id (str): Volume name.
Returns:
(:py:class:`Volume`): The volume.
Raises:
:py:class:`docker.errors.NotFound`
If the volume does not exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_volume(volume_id))
def list(self, **kwargs):
"""
List volumes. Similar to the ``docker volume ls`` command.
Args:
filters (dict): Server-side list filtering options.
Returns:
(list of :py:class:`Volume`): The volumes.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.volumes(**kwargs)
if not resp.get('Volumes'):
return []
return [self.prepare_model(obj) for obj in resp['Volumes']]
def prune(self, filters=None):
return self.client.api.prune_volumes(filters=filters)
prune.__doc__ = APIClient.prune_volumes.__doc__

View File

@@ -0,0 +1,94 @@
import os
import ssl
from . import errors
from .transport import SSLHTTPAdapter
class TLSConfig:
"""
TLS configuration.
Args:
client_cert (tuple of str): Path to client cert, path to client key.
ca_cert (str): Path to CA cert file.
verify (bool or str): This can be a bool or a path to a CA cert
file to verify against. If ``True``, verify using ca_cert;
if ``False`` or not specified, do not verify.
ssl_version (int): A valid `SSL version`_.
assert_hostname (bool): Verify the hostname of the server.
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
cert = None
ca_cert = None
verify = None
ssl_version = None
def __init__(self, client_cert=None, ca_cert=None, verify=None,
ssl_version=None, assert_hostname=None,
assert_fingerprint=None):
# Argument compatibility/mapping with
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving verify=False
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
# If the user provides an SSL version, we should use their preference
if ssl_version:
self.ssl_version = ssl_version
else:
self.ssl_version = ssl.PROTOCOL_TLS_CLIENT
# "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
if client_cert:
try:
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
'client_cert must be a tuple of'
' (client certificate, key file)'
)
if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
not os.path.isfile(tls_key)):
raise errors.TLSParameterError(
'Path to a certificate and key files must be provided'
' through the client_cert param'
)
self.cert = (tls_cert, tls_key)
# If verify is set, make sure the cert exists
self.verify = verify
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
'Invalid CA certificate provided for `ca_cert`.'
)
def configure_client(self, client):
"""
Configure a client with these TLS options.
"""
client.ssl_version = self.ssl_version
if self.verify and self.ca_cert:
client.verify = self.ca_cert
else:
client.verify = self.verify
if self.cert:
client.cert = self.cert
client.mount('https://', SSLHTTPAdapter(
ssl_version=self.ssl_version,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
))

View File

@@ -0,0 +1,13 @@
# flake8: noqa
from .unixconn import UnixHTTPAdapter
from .ssladapter import SSLHTTPAdapter
try:
from .npipeconn import NpipeHTTPAdapter
from .npipesocket import NpipeSocket
except ImportError:
pass
try:
from .sshconn import SSHHTTPAdapter
except ImportError:
pass

View File

@@ -0,0 +1,8 @@
import requests.adapters
class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
def close(self):
super().close()
if hasattr(self, 'pools'):
self.pools.clear()

View File

@@ -0,0 +1,107 @@
import queue
import requests.adapters
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
from .npipesocket import NpipeSocket
import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class NpipeHTTPConnection(httplib.HTTPConnection):
def __init__(self, npipe_path, timeout=60):
super().__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
self.timeout = timeout
def connect(self):
sock = NpipeSocket()
sock.settimeout(self.timeout)
sock.connect(self.npipe_path)
self.sock = sock
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
self.timeout = timeout
def _new_conn(self):
return NpipeHTTPConnection(
self.npipe_path, self.timeout
)
# When re-using connections, urllib3 tries to call select() on our
# NpipeSocket instance, causing a crash. To circumvent this, we override
# _get_conn, where that check happens.
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
)
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
'timeout',
'max_pool_size']
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = NpipeHTTPConnectionPool(
self.npipe_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url

View File

@@ -0,0 +1,213 @@
import functools
import time
import io
import win32file
import win32pipe
cERROR_PIPE_BUSY = 0xe7
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
MAXIMUM_RETRY_COUNT = 10
def check_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self._closed:
raise RuntimeError(
'Can not reuse socket after connection was closed.'
)
return f(self, *args, **kwargs)
return wrapped
class NpipeSocket:
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
def __init__(self, handle=None):
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
self._closed = False
def accept(self):
raise NotImplementedError()
def bind(self, address):
raise NotImplementedError()
def close(self):
self._handle.Close()
self._closed = True
@check_closed
def connect(self, address, retry_count=0):
try:
handle = win32file.CreateFile(
address,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
0
)
except win32pipe.error as e:
# See Remarks:
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
if e.winerror == cERROR_PIPE_BUSY:
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
retry_count = retry_count + 1
if (retry_count < MAXIMUM_RETRY_COUNT):
time.sleep(1)
return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
self._handle = handle
self._address = address
@check_closed
def connect_ex(self, address):
return self.connect(address)
@check_closed
def detach(self):
self._closed = True
return self._handle
@check_closed
def dup(self):
return NpipeSocket(self._handle)
def getpeername(self):
return self._address
def getsockname(self):
return self._address
def getsockopt(self, level, optname, buflen=None):
raise NotImplementedError()
def ioctl(self, control, option):
raise NotImplementedError()
def listen(self, backlog):
raise NotImplementedError()
def makefile(self, mode=None, bufsize=None):
if mode.strip('b') != 'r':
raise NotImplementedError()
rawio = NpipeFileIOBase(self)
if bufsize is None or bufsize <= 0:
bufsize = io.DEFAULT_BUFFER_SIZE
return io.BufferedReader(rawio, buffer_size=bufsize)
@check_closed
def recv(self, bufsize, flags=0):
err, data = win32file.ReadFile(self._handle, bufsize)
return data
@check_closed
def recvfrom(self, bufsize, flags=0):
data = self.recv(bufsize, flags)
return (data, self._address)
@check_closed
def recvfrom_into(self, buf, nbytes=0, flags=0):
return self.recv_into(buf, nbytes, flags), self._address
@check_closed
def recv_into(self, buf, nbytes=0):
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
err, data = win32file.ReadFile(
self._handle,
readbuf[:nbytes] if nbytes else readbuf
)
return len(data)
def _recv_into_py2(self, buf, nbytes):
err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
n = len(data)
buf[:n] = data
return n
@check_closed
def send(self, string, flags=0):
err, nbytes = win32file.WriteFile(self._handle, string)
return nbytes
@check_closed
def sendall(self, string, flags=0):
return self.send(string, flags)
@check_closed
def sendto(self, string, address):
self.connect(address)
return self.send(string)
def setblocking(self, flag):
if flag:
return self.settimeout(None)
return self.settimeout(0)
def settimeout(self, value):
if value is None:
# Blocking mode
self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
elif not isinstance(value, (float, int)) or value < 0:
raise ValueError('Timeout value out of range')
elif value == 0:
# Non-blocking mode
self._timeout = win32pipe.NMPWAIT_NO_WAIT
else:
# Timeout mode - Value converted to milliseconds
self._timeout = value * 1000
def gettimeout(self):
return self._timeout
def setsockopt(self, level, optname, value):
raise NotImplementedError()
@check_closed
def shutdown(self, how):
return self.close()
class NpipeFileIOBase(io.RawIOBase):
def __init__(self, npipe_socket):
self.sock = npipe_socket
def close(self):
super().close()
self.sock = None
def fileno(self):
return self.sock.fileno()
def isatty(self):
return False
def readable(self):
return True
def readinto(self, buf):
return self.sock.recv_into(buf)
def seekable(self):
return False
def writable(self):
return False

View File

@@ -0,0 +1,254 @@
import paramiko
import queue
import urllib.parse
import requests.adapters
import logging
import os
import signal
import socket
import subprocess
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class SSHSocket(socket.socket):
def __init__(self, host):
super().__init__(
socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = None
self.user = None
if ':' in self.host:
self.host, self.port = self.host.split(':')
if '@' in self.host:
self.user, self.host = self.host.split('@')
self.proc = None
def connect(self, **kwargs):
args = ['ssh']
if self.user:
args = args + ['-l', self.user]
if self.port:
args = args + ['-p', self.port]
args = args + ['--', self.host, 'docker system dial-stdio']
preexec_func = None
if not constants.IS_WINDOWS_PLATFORM:
def f():
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_func = f
env = dict(os.environ)
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
env.pop('LD_LIBRARY_PATH', None)
env.pop('SSL_CERT_FILE', None)
self.proc = subprocess.Popen(
args,
env=env,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=preexec_func)
def _write(self, data):
if not self.proc or self.proc.stdin.closed:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
written = self.proc.stdin.write(data)
self.proc.stdin.flush()
return written
def sendall(self, data):
self._write(data)
def send(self, data):
return self._write(data)
def recv(self, n):
if not self.proc:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
return self.proc.stdout.read(n)
def makefile(self, mode):
if not self.proc:
self.connect()
self.proc.stdout.channel = self
return self.proc.stdout
def close(self):
if not self.proc or self.proc.stdin.closed:
return
self.proc.stdin.write(b'\n\n')
self.proc.stdin.flush()
self.proc.terminate()
class SSHConnection(httplib.HTTPConnection):
def __init__(self, ssh_transport=None, timeout=60, host=None):
super().__init__(
'localhost', timeout=timeout
)
self.ssh_transport = ssh_transport
self.timeout = timeout
self.ssh_host = host
def connect(self):
if self.ssh_transport:
sock = self.ssh_transport.open_session()
sock.settimeout(self.timeout)
sock.exec_command('docker system dial-stdio')
else:
sock = SSHSocket(self.ssh_host)
sock.settimeout(self.timeout)
sock.connect()
self.sock = sock
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = 'ssh'
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.ssh_transport = None
self.timeout = timeout
if ssh_client:
self.ssh_transport = ssh_client.get_transport()
self.ssh_host = host
def _new_conn(self):
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
# we override _get_conn
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
)
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
]
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
shell_out=False):
self.ssh_client = None
if not shell_out:
self._create_paramiko_client(base_url)
self._connect()
self.ssh_host = base_url
if base_url.startswith('ssh://'):
self.ssh_host = base_url[len('ssh://'):]
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def _create_paramiko_client(self, base_url):
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.ssh_client = paramiko.SSHClient()
base_url = urllib.parse.urlparse(base_url)
self.ssh_params = {
"hostname": base_url.hostname,
"port": base_url.port,
"username": base_url.username
}
ssh_config_file = os.path.expanduser("~/.ssh/config")
if os.path.exists(ssh_config_file):
conf = paramiko.SSHConfig()
with open(ssh_config_file) as f:
conf.parse(f)
host_config = conf.lookup(base_url.hostname)
if 'proxycommand' in host_config:
self.ssh_params["sock"] = paramiko.ProxyCommand(
host_config['proxycommand']
)
if 'hostname' in host_config:
self.ssh_params['hostname'] = host_config['hostname']
if base_url.port is None and 'port' in host_config:
self.ssh_params['port'] = host_config['port']
if base_url.username is None and 'user' in host_config:
self.ssh_params['username'] = host_config['user']
if 'identityfile' in host_config:
self.ssh_params['key_filename'] = host_config['identityfile']
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
def _connect(self):
if self.ssh_client:
self.ssh_client.connect(**self.ssh_params)
def get_connection(self, url, proxies=None):
if not self.ssh_client:
return SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
# Connection is closed try a reconnect
if self.ssh_client and not self.ssh_client.get_transport():
self._connect()
pool = SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
self.pools[url] = pool
return pool
def close(self):
super().close()
if self.ssh_client:
self.ssh_client.close()

View File

@@ -0,0 +1,65 @@
""" Resolves OpenSSL issues in some servers:
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
https://github.com/kennethreitz/requests/pull/799
"""
from packaging.version import Version
from requests.adapters import HTTPAdapter
from docker.transport.basehttpadapter import BaseHTTPAdapter
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
PoolManager = urllib3.poolmanager.PoolManager
class SSLHTTPAdapter(BaseHTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
'assert_hostname',
'ssl_version']
def __init__(self, ssl_version=None, assert_hostname=None,
assert_fingerprint=None, **kwargs):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
super().__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
'block': block,
'assert_hostname': self.assert_hostname,
'assert_fingerprint': self.assert_fingerprint,
}
if self.ssl_version and self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
def get_connection(self, *args, **kwargs):
"""
Ensure assert_hostname is set correctly on our pool
We already take care of a normal poolmanager via init_poolmanager
But we still need to take care of when there is a proxy poolmanager
"""
conn = super().get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
def can_override_ssl_version(self):
urllib_ver = urllib3.__version__.split('-')[0]
if urllib_ver is None:
return False
if urllib_ver == 'dev':
return True
return Version(urllib_ver) > Version('1.5')

View File

@@ -0,0 +1,96 @@
import requests.adapters
import socket
import http.client as httplib
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPConnection(httplib.HTTPConnection):
def __init__(self, base_url, unix_socket, timeout=60):
super().__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.unix_socket)
self.sock = sock
def putheader(self, header, *values):
super().putheader(header, *values)
def response_class(self, sock, *args, **kwargs):
return httplib.HTTPResponse(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
return UnixHTTPConnection(
self.base_url, self.socket_path, self.timeout
)
class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path',
'timeout',
'max_pool_size']
def __init__(self, socket_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url

View File

@@ -0,0 +1,14 @@
# flake8: noqa
from .containers import (
ContainerConfig, HostConfig, LogConfig, Ulimit, DeviceRequest
)
from .daemon import CancellableStream
from .healthcheck import Healthcheck
from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig
from .services import (
ConfigReference, ContainerSpec, DNSConfig, DriverConfig, EndpointSpec,
Mount, Placement, PlacementPreference, Privileges, Resources,
RestartPolicy, RollbackConfig, SecretReference, ServiceMode, TaskTemplate,
UpdateConfig, NetworkAttachmentConfig
)
from .swarm import SwarmSpec, SwarmExternalCA

Some files were not shown because too many files have changed in this diff Show More