Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1 @@
__version__ = '1.29.2'

View File

@@ -0,0 +1,3 @@
from compose.cli.main import main
main()

View File

@@ -0,0 +1,64 @@
import enum
import os
from ..const import IS_WINDOWS_PLATFORM
NAMES = [
'grey',
'red',
'green',
'yellow',
'blue',
'magenta',
'cyan',
'white'
]
@enum.unique
class AnsiMode(enum.Enum):
"""Enumeration for when to output ANSI colors."""
NEVER = "never"
ALWAYS = "always"
AUTO = "auto"
def use_ansi_codes(self, stream):
if self is AnsiMode.ALWAYS:
return True
if self is AnsiMode.NEVER or os.environ.get('CLICOLOR') == '0':
return False
return stream.isatty()
def get_pairs():
for i, name in enumerate(NAMES):
yield (name, str(30 + i))
yield ('intense_' + name, str(30 + i) + ';1')
def ansi(code):
return '\033[{}m'.format(code)
def ansi_color(code, s):
return '{}{}{}'.format(ansi(code), s, ansi(0))
def make_color_fn(code):
return lambda s: ansi_color(code, s)
if IS_WINDOWS_PLATFORM:
import colorama
colorama.init(strip=False)
for (name, code) in get_pairs():
globals()[name] = make_color_fn(code)
def rainbow():
cs = ['cyan', 'yellow', 'green', 'magenta', 'blue',
'intense_cyan', 'intense_yellow', 'intense_green',
'intense_magenta', 'intense_blue']
for c in cs:
yield globals()[c]

View File

@@ -0,0 +1,210 @@
import logging
import os
import re
from . import errors
from .. import config
from .. import parallel
from ..config.environment import Environment
from ..const import LABEL_CONFIG_FILES
from ..const import LABEL_ENVIRONMENT_FILE
from ..const import LABEL_WORKING_DIR
from ..project import Project
from .docker_client import get_client
from .docker_client import load_context
from .docker_client import make_context
from .errors import UserError
log = logging.getLogger(__name__)
SILENT_COMMANDS = {
'events',
'exec',
'kill',
'logs',
'pause',
'ps',
'restart',
'rm',
'start',
'stop',
'top',
'unpause',
}
def project_from_options(project_dir, options, additional_options=None):
additional_options = additional_options or {}
override_dir = get_project_dir(options)
environment_file = options.get('--env-file')
environment = Environment.from_env_file(override_dir or project_dir, environment_file)
environment.silent = options.get('COMMAND', None) in SILENT_COMMANDS
set_parallel_limit(environment)
# get the context for the run
context = None
context_name = options.get('--context', None)
if context_name:
context = load_context(context_name)
if not context:
raise UserError("Context '{}' not found".format(context_name))
host = options.get('--host', None)
if host is not None:
if context:
raise UserError(
"-H, --host and -c, --context are mutually exclusive. Only one should be set.")
host = host.lstrip('=')
context = make_context(host, options, environment)
return get_project(
project_dir,
get_config_path_from_options(options, environment),
project_name=options.get('--project-name'),
verbose=options.get('--verbose'),
context=context,
environment=environment,
override_dir=override_dir,
interpolate=(not additional_options.get('--no-interpolate')),
environment_file=environment_file,
enabled_profiles=get_profiles_from_options(options, environment)
)
def set_parallel_limit(environment):
parallel_limit = environment.get('COMPOSE_PARALLEL_LIMIT')
if parallel_limit:
try:
parallel_limit = int(parallel_limit)
except ValueError:
raise errors.UserError(
'COMPOSE_PARALLEL_LIMIT must be an integer (found: "{}")'.format(
environment.get('COMPOSE_PARALLEL_LIMIT')
)
)
if parallel_limit <= 1:
raise errors.UserError('COMPOSE_PARALLEL_LIMIT can not be less than 2')
parallel.GlobalLimit.set_global_limit(parallel_limit)
def get_project_dir(options):
override_dir = None
files = get_config_path_from_options(options, os.environ)
if files:
if files[0] == '-':
return '.'
override_dir = os.path.dirname(files[0])
return options.get('--project-directory') or override_dir
def get_config_from_options(base_dir, options, additional_options=None):
additional_options = additional_options or {}
override_dir = get_project_dir(options)
environment_file = options.get('--env-file')
environment = Environment.from_env_file(override_dir or base_dir, environment_file)
config_path = get_config_path_from_options(options, environment)
return config.load(
config.find(base_dir, config_path, environment, override_dir),
not additional_options.get('--no-interpolate')
)
def get_config_path_from_options(options, environment):
def unicode_paths(paths):
return [p.decode('utf-8') if isinstance(p, bytes) else p for p in paths]
file_option = options.get('--file')
if file_option:
return unicode_paths(file_option)
config_files = environment.get('COMPOSE_FILE')
if config_files:
pathsep = environment.get('COMPOSE_PATH_SEPARATOR', os.pathsep)
return unicode_paths(config_files.split(pathsep))
return None
def get_profiles_from_options(options, environment):
profile_option = options.get('--profile')
if profile_option:
return profile_option
profiles = environment.get('COMPOSE_PROFILES')
if profiles:
return profiles.split(',')
return []
def get_project(project_dir, config_path=None, project_name=None, verbose=False,
context=None, environment=None, override_dir=None,
interpolate=True, environment_file=None, enabled_profiles=None):
if not environment:
environment = Environment.from_env_file(project_dir)
config_details = config.find(project_dir, config_path, environment, override_dir)
project_name = get_project_name(
config_details.working_dir, project_name, environment
)
config_data = config.load(config_details, interpolate)
api_version = environment.get('COMPOSE_API_VERSION')
client = get_client(
verbose=verbose, version=api_version, context=context, environment=environment
)
with errors.handle_connection_errors(client):
return Project.from_config(
project_name,
config_data,
client,
environment.get('DOCKER_DEFAULT_PLATFORM'),
execution_context_labels(config_details, environment_file),
enabled_profiles,
)
def execution_context_labels(config_details, environment_file):
extra_labels = [
'{}={}'.format(LABEL_WORKING_DIR, os.path.abspath(config_details.working_dir))
]
if not use_config_from_stdin(config_details):
extra_labels.append('{}={}'.format(LABEL_CONFIG_FILES, config_files_label(config_details)))
if environment_file is not None:
extra_labels.append('{}={}'.format(
LABEL_ENVIRONMENT_FILE,
os.path.normpath(environment_file))
)
return extra_labels
def use_config_from_stdin(config_details):
for c in config_details.config_files:
if not c.filename:
return True
return False
def config_files_label(config_details):
return ",".join(
os.path.normpath(c.filename) for c in config_details.config_files
)
def get_project_name(working_dir, project_name=None, environment=None):
def normalize_name(name):
return re.sub(r'[^-_a-z0-9]', '', name.lower())
if not environment:
environment = Environment.from_env_file(working_dir)
project_name = project_name or environment.get('COMPOSE_PROJECT_NAME')
if project_name:
return normalize_name(project_name)
project = os.path.basename(os.path.abspath(working_dir))
if project:
return normalize_name(project)
return 'default'

View File

@@ -0,0 +1,173 @@
import logging
import os.path
import ssl
from docker import APIClient
from docker import Context
from docker import ContextAPI
from docker import TLSConfig
from docker.errors import TLSParameterError
from docker.utils import kwargs_from_env
from docker.utils.config import home_dir
from . import verbose_proxy
from ..config.environment import Environment
from ..const import HTTP_TIMEOUT
from ..utils import unquote_path
from .errors import UserError
from .utils import generate_user_agent
from .utils import get_version_info
log = logging.getLogger(__name__)
def default_cert_path():
return os.path.join(home_dir(), '.docker')
def make_context(host, options, environment):
tls = tls_config_from_options(options, environment)
ctx = Context("compose", host=host, tls=tls.verify if tls else False)
if tls:
ctx.set_endpoint("docker", host, tls, skip_tls_verify=not tls.verify)
return ctx
def load_context(name=None):
return ContextAPI.get_context(name)
def get_client(environment, verbose=False, version=None, context=None):
client = docker_client(
version=version, context=context,
environment=environment, tls_version=get_tls_version(environment)
)
if verbose:
version_info = client.version().items()
log.info(get_version_info('full'))
log.info("Docker base_url: %s", client.base_url)
log.info("Docker version: %s",
", ".join("%s=%s" % item for item in version_info))
return verbose_proxy.VerboseProxy('docker', client)
return client
def get_tls_version(environment):
compose_tls_version = environment.get('COMPOSE_TLS_VERSION', None)
if not compose_tls_version:
return None
tls_attr_name = "PROTOCOL_{}".format(compose_tls_version)
if not hasattr(ssl, tls_attr_name):
log.warning(
'The "{}" protocol is unavailable. You may need to update your '
'version of Python or OpenSSL. Falling back to TLSv1 (default).'
.format(compose_tls_version)
)
return None
return getattr(ssl, tls_attr_name)
def tls_config_from_options(options, environment=None):
environment = environment or Environment()
cert_path = environment.get('DOCKER_CERT_PATH') or None
tls = options.get('--tls', False)
ca_cert = unquote_path(options.get('--tlscacert'))
cert = unquote_path(options.get('--tlscert'))
key = unquote_path(options.get('--tlskey'))
# verify is a special case - with docopt `--tlsverify` = False means it
# wasn't used, so we set it if either the environment or the flag is True
# see https://github.com/docker/compose/issues/5632
verify = options.get('--tlsverify') or environment.get_boolean('DOCKER_TLS_VERIFY')
skip_hostname_check = options.get('--skip-hostname-check', False)
if cert_path is not None and not any((ca_cert, cert, key)):
# FIXME: Modify TLSConfig to take a cert_path argument and do this internally
cert = os.path.join(cert_path, 'cert.pem')
key = os.path.join(cert_path, 'key.pem')
ca_cert = os.path.join(cert_path, 'ca.pem')
if verify and not any((ca_cert, cert, key)):
# Default location for cert files is ~/.docker
ca_cert = os.path.join(default_cert_path(), 'ca.pem')
cert = os.path.join(default_cert_path(), 'cert.pem')
key = os.path.join(default_cert_path(), 'key.pem')
tls_version = get_tls_version(environment)
advanced_opts = any([ca_cert, cert, key, verify, tls_version])
if tls is True and not advanced_opts:
return True
elif advanced_opts: # --tls is a noop
client_cert = None
if cert or key:
client_cert = (cert, key)
return TLSConfig(
client_cert=client_cert, verify=verify, ca_cert=ca_cert,
assert_hostname=False if skip_hostname_check else None,
ssl_version=tls_version
)
return None
def docker_client(environment, version=None, context=None, tls_version=None):
"""
Returns a docker-py client configured using environment variables
according to the same logic as the official Docker client.
"""
try:
kwargs = kwargs_from_env(environment=environment, ssl_version=tls_version)
except TLSParameterError:
raise UserError(
"TLS configuration is invalid - make sure your DOCKER_TLS_VERIFY "
"and DOCKER_CERT_PATH are set correctly.\n"
"You might need to run `eval \"$(docker-machine env default)\"`")
if not context:
# check env for DOCKER_HOST and certs path
host = kwargs.get("base_url", None)
tls = kwargs.get("tls", None)
verify = False if not tls else tls.verify
if host:
context = Context("compose", host=host, tls=verify)
else:
context = ContextAPI.get_current_context()
if tls:
context.set_endpoint("docker", host=host, tls_cfg=tls, skip_tls_verify=not verify)
if not context.is_docker_host():
raise UserError(
"The platform targeted with the current context is not supported.\n"
"Make sure the context in use targets a Docker Engine.\n")
kwargs['base_url'] = context.Host
if context.TLSConfig:
kwargs['tls'] = context.TLSConfig
if version:
kwargs['version'] = version
timeout = environment.get('COMPOSE_HTTP_TIMEOUT')
if timeout:
kwargs['timeout'] = int(timeout)
else:
kwargs['timeout'] = HTTP_TIMEOUT
kwargs['user_agent'] = generate_user_agent()
# Workaround for
# https://pyinstaller.readthedocs.io/en/v3.3.1/runtime-information.html#ld-library-path-libpath-considerations
if 'LD_LIBRARY_PATH_ORIG' in environment:
kwargs['credstore_env'] = {
'LD_LIBRARY_PATH': environment.get('LD_LIBRARY_PATH_ORIG'),
}
use_paramiko_ssh = int(environment.get('COMPOSE_PARAMIKO_SSH', 0))
client = APIClient(use_ssh_client=not use_paramiko_ssh, **kwargs)
client._original_base_url = kwargs.get('base_url')
return client

View File

@@ -0,0 +1,62 @@
from inspect import getdoc
from docopt import docopt
from docopt import DocoptExit
def docopt_full_help(docstring, *args, **kwargs):
try:
return docopt(docstring, *args, **kwargs)
except DocoptExit:
raise SystemExit(docstring)
class DocoptDispatcher:
def __init__(self, command_class, options):
self.command_class = command_class
self.options = options
@classmethod
def get_command_and_options(cls, doc_entity, argv, options):
command_help = getdoc(doc_entity)
opt = docopt_full_help(command_help, argv, **options)
command = opt['COMMAND']
return command_help, opt, command
def parse(self, argv):
command_help, options, command = DocoptDispatcher.get_command_and_options(
self.command_class, argv, self.options)
if command is None:
raise SystemExit(command_help)
handler = get_handler(self.command_class, command)
docstring = getdoc(handler)
if docstring is None:
raise NoSuchCommand(command, self)
command_options = docopt_full_help(docstring, options['ARGS'], options_first=True)
return options, handler, command_options
def get_handler(command_class, command):
command = command.replace('-', '_')
# we certainly want to have "exec" command, since that's what docker client has
# but in python exec is a keyword
if command == "exec":
command = "exec_command"
if not hasattr(command_class, command):
raise NoSuchCommand(command, command_class)
return getattr(command_class, command)
class NoSuchCommand(Exception):
def __init__(self, command, supercommand):
super().__init__("No such command: %s" % command)
self.command = command
self.supercommand = supercommand

View File

@@ -0,0 +1,165 @@
import contextlib
import logging
import socket
from distutils.spawn import find_executable
from textwrap import dedent
from docker.errors import APIError
from requests.exceptions import ConnectionError as RequestsConnectionError
from requests.exceptions import ReadTimeout
from requests.exceptions import SSLError
from requests.packages.urllib3.exceptions import ReadTimeoutError
from ..const import API_VERSION_TO_ENGINE_VERSION
from .utils import binarystr_to_unicode
from .utils import is_docker_for_mac_installed
from .utils import is_mac
from .utils import is_ubuntu
from .utils import is_windows
log = logging.getLogger(__name__)
class UserError(Exception):
def __init__(self, msg):
self.msg = dedent(msg).strip()
def __str__(self):
return self.msg
class ConnectionError(Exception):
pass
@contextlib.contextmanager
def handle_connection_errors(client):
try:
yield
except SSLError as e:
log.error('SSL error: %s' % e)
raise ConnectionError()
except RequestsConnectionError as e:
if e.args and isinstance(e.args[0], ReadTimeoutError):
log_timeout_error(client.timeout)
raise ConnectionError()
exit_with_error(get_conn_error_message(client.base_url))
except APIError as e:
log_api_error(e, client.api_version)
raise ConnectionError()
except (ReadTimeout, socket.timeout):
log_timeout_error(client.timeout)
raise ConnectionError()
except Exception as e:
if is_windows():
import pywintypes
if isinstance(e, pywintypes.error):
log_windows_pipe_error(e)
raise ConnectionError()
raise
def log_windows_pipe_error(exc):
if exc.winerror == 2:
log.error("Couldn't connect to Docker daemon. You might need to start Docker for Windows.")
elif exc.winerror == 232: # https://github.com/docker/compose/issues/5005
log.error(
"The current Compose file version is not compatible with your engine version. "
"Please upgrade your Compose file to a more recent version, or set "
"a COMPOSE_API_VERSION in your environment."
)
else:
log.error(
"Windows named pipe error: {} (code: {})".format(
binarystr_to_unicode(exc.strerror), exc.winerror
)
)
def log_timeout_error(timeout):
log.error(
"An HTTP request took too long to complete. Retry with --verbose to "
"obtain debug information.\n"
"If you encounter this issue regularly because of slow network "
"conditions, consider setting COMPOSE_HTTP_TIMEOUT to a higher "
"value (current value: %s)." % timeout)
def log_api_error(e, client_version):
explanation = binarystr_to_unicode(e.explanation)
if 'client is newer than server' not in explanation:
log.error(explanation)
return
version = API_VERSION_TO_ENGINE_VERSION.get(client_version)
if not version:
# They've set a custom API version
log.error(explanation)
return
log.error(
"The Docker Engine version is less than the minimum required by "
"Compose. Your current project requires a Docker Engine of "
"version {version} or greater.".format(version=version)
)
def exit_with_error(msg):
log.error(dedent(msg).strip())
raise ConnectionError()
def get_conn_error_message(url):
try:
if find_executable('docker') is None:
return docker_not_found_msg("Couldn't connect to Docker daemon.")
if is_docker_for_mac_installed():
return conn_error_docker_for_mac
if find_executable('docker-machine') is not None:
return conn_error_docker_machine
except UnicodeDecodeError:
# https://github.com/docker/compose/issues/5442
# Ignore the error and print the generic message instead.
pass
return conn_error_generic.format(url=url)
def docker_not_found_msg(problem):
return "{} You might need to install Docker:\n\n{}".format(
problem, docker_install_url())
def docker_install_url():
if is_mac():
return docker_install_url_mac
elif is_ubuntu():
return docker_install_url_ubuntu
elif is_windows():
return docker_install_url_windows
else:
return docker_install_url_generic
docker_install_url_mac = "https://docs.docker.com/engine/installation/mac/"
docker_install_url_ubuntu = "https://docs.docker.com/engine/installation/ubuntulinux/"
docker_install_url_windows = "https://docs.docker.com/engine/installation/windows/"
docker_install_url_generic = "https://docs.docker.com/engine/installation/"
conn_error_docker_machine = """
Couldn't connect to Docker daemon - you might need to run `docker-machine start default`.
"""
conn_error_docker_for_mac = """
Couldn't connect to Docker daemon. You might need to start Docker for Mac.
"""
conn_error_generic = """
Couldn't connect to Docker daemon at {url} - is it running?
If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable.
"""

View File

@@ -0,0 +1,54 @@
import logging
from shutil import get_terminal_size
import texttable
from compose.cli import colors
def get_tty_width():
try:
# get_terminal_size can't determine the size if compose is piped
# to another command. But in such case it doesn't make sense to
# try format the output by terminal size as this output is consumed
# by another command. So let's pretend we have a huge terminal so
# output is single-lined
width, _ = get_terminal_size(fallback=(999, 0))
return int(width)
except OSError:
return 0
class Formatter:
"""Format tabular data for printing."""
@staticmethod
def table(headers, rows):
table = texttable.Texttable(max_width=get_tty_width())
table.set_cols_dtype(['t' for h in headers])
table.add_rows([headers] + rows)
table.set_deco(table.HEADER)
table.set_chars(['-', '|', '+', '-'])
return table.draw()
class ConsoleWarningFormatter(logging.Formatter):
"""A logging.Formatter which prints WARNING and ERROR messages with
a prefix of the log level colored appropriate for the log level.
"""
def get_level_message(self, record):
separator = ': '
if record.levelno >= logging.ERROR:
return colors.red(record.levelname) + separator
if record.levelno >= logging.WARNING:
return colors.yellow(record.levelname) + separator
return ''
def format(self, record):
if isinstance(record.msg, bytes):
record.msg = record.msg.decode('utf-8')
message = super().format(record)
return '{}{}'.format(self.get_level_message(record), message)

View File

@@ -0,0 +1,255 @@
import _thread as thread
import sys
from collections import namedtuple
from itertools import cycle
from operator import attrgetter
from queue import Empty
from queue import Queue
from threading import Thread
from docker.errors import APIError
from . import colors
from compose.cli.signals import ShutdownException
from compose.utils import split_buffer
class LogPresenter:
def __init__(self, prefix_width, color_func, keep_prefix=True):
self.prefix_width = prefix_width
self.color_func = color_func
self.keep_prefix = keep_prefix
def present(self, container, line):
to_log = '{line}'.format(line=line)
if self.keep_prefix:
prefix = container.name_without_project.ljust(self.prefix_width)
to_log = '{prefix} '.format(prefix=self.color_func(prefix + ' |')) + to_log
return to_log
def build_log_presenters(service_names, monochrome, keep_prefix=True):
"""Return an iterable of functions.
Each function can be used to format the logs output of a container.
"""
prefix_width = max_name_width(service_names)
def no_color(text):
return text
for color_func in cycle([no_color] if monochrome else colors.rainbow()):
yield LogPresenter(prefix_width, color_func, keep_prefix)
def max_name_width(service_names, max_index_width=3):
"""Calculate the maximum width of container names so we can make the log
prefixes line up like so:
db_1 | Listening
web_1 | Listening
"""
return max(len(name) for name in service_names) + max_index_width
class LogPrinter:
"""Print logs from many containers to a single output stream."""
def __init__(self,
containers,
presenters,
event_stream,
output=sys.stdout,
cascade_stop=False,
log_args=None):
self.containers = containers
self.presenters = presenters
self.event_stream = event_stream
self.output = output
self.cascade_stop = cascade_stop
self.log_args = log_args or {}
def run(self):
if not self.containers:
return
queue = Queue()
thread_args = queue, self.log_args
thread_map = build_thread_map(self.containers, self.presenters, thread_args)
start_producer_thread((
thread_map,
self.event_stream,
self.presenters,
thread_args))
for line in consume_queue(queue, self.cascade_stop):
remove_stopped_threads(thread_map)
if self.cascade_stop:
matching_container = [cont.name for cont in self.containers if cont.name == line]
if line in matching_container:
# Returning the name of the container that started the
# the cascade_stop so we can return the correct exit code
return line
if not line:
if not thread_map:
# There are no running containers left to tail, so exit
return
# We got an empty line because of a timeout, but there are still
# active containers to tail, so continue
continue
self.write(line)
def write(self, line):
try:
self.output.write(line)
except UnicodeEncodeError:
# This may happen if the user's locale settings don't support UTF-8
# and UTF-8 characters are present in the log line. The following
# will output a "degraded" log with unsupported characters
# replaced by `?`
self.output.write(line.encode('ascii', 'replace').decode())
self.output.flush()
def remove_stopped_threads(thread_map):
for container_id, tailer_thread in list(thread_map.items()):
if not tailer_thread.is_alive():
thread_map.pop(container_id, None)
def build_thread(container, presenter, queue, log_args):
tailer = Thread(
target=tail_container_logs,
args=(container, presenter, queue, log_args))
tailer.daemon = True
tailer.start()
return tailer
def build_thread_map(initial_containers, presenters, thread_args):
return {
container.id: build_thread(container, next(presenters), *thread_args)
# Container order is unspecified, so they are sorted by name in order to make
# container:presenter (log color) assignment deterministic when given a list of containers
# with the same names.
for container in sorted(initial_containers, key=attrgetter('name'))
}
class QueueItem(namedtuple('_QueueItem', 'item is_stop exc')):
@classmethod
def new(cls, item):
return cls(item, None, None)
@classmethod
def exception(cls, exc):
return cls(None, None, exc)
@classmethod
def stop(cls, item=None):
return cls(item, True, None)
def tail_container_logs(container, presenter, queue, log_args):
try:
for item in build_log_generator(container, log_args):
queue.put(QueueItem.new(presenter.present(container, item)))
except Exception as e:
queue.put(QueueItem.exception(e))
return
if log_args.get('follow'):
queue.put(QueueItem.new(presenter.color_func(wait_on_exit(container))))
queue.put(QueueItem.stop(container.name))
def build_log_generator(container, log_args):
# if the container doesn't have a log_stream we need to attach to container
# before log printer starts running
if container.log_stream is None:
stream = container.logs(stdout=True, stderr=True, stream=True, **log_args)
else:
stream = container.log_stream
return split_buffer(stream)
def wait_on_exit(container):
try:
exit_code = container.wait()
return "{} exited with code {}\n".format(container.name, exit_code)
except APIError as e:
return "Unexpected API error for {} (HTTP code {})\nResponse body:\n{}\n".format(
container.name, e.response.status_code,
e.response.text or '[empty]'
)
def start_producer_thread(thread_args):
producer = Thread(target=watch_events, args=thread_args)
producer.daemon = True
producer.start()
def watch_events(thread_map, event_stream, presenters, thread_args):
crashed_containers = set()
for event in event_stream:
if event['action'] == 'stop':
thread_map.pop(event['id'], None)
if event['action'] == 'die':
thread_map.pop(event['id'], None)
crashed_containers.add(event['id'])
if event['action'] != 'start':
continue
if event['id'] in thread_map:
if thread_map[event['id']].is_alive():
continue
# Container was stopped and started, we need a new thread
thread_map.pop(event['id'], None)
# Container crashed so we should reattach to it
if event['id'] in crashed_containers:
container = event['container']
if not container.is_restarting:
try:
container.attach_log_stream()
except APIError:
# Just ignore errors when reattaching to already crashed containers
pass
crashed_containers.remove(event['id'])
thread_map[event['id']] = build_thread(
event['container'],
next(presenters),
*thread_args
)
def consume_queue(queue, cascade_stop):
"""Consume the queue by reading lines off of it and yielding them."""
while True:
try:
item = queue.get(timeout=0.1)
except Empty:
yield None
continue
# See https://github.com/docker/compose/issues/189
except thread.error:
raise ShutdownException()
if item.exc:
raise item.exc
if item.is_stop and not cascade_stop:
continue
yield item.item

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,41 @@
import signal
from ..const import IS_WINDOWS_PLATFORM
class ShutdownException(Exception):
pass
class HangUpException(Exception):
pass
def shutdown(signal, frame):
raise ShutdownException()
def set_signal_handler(handler):
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
def set_signal_handler_to_shutdown():
set_signal_handler(shutdown)
def hang_up(signal, frame):
raise HangUpException()
def set_signal_handler_to_hang_up():
# on Windows a ValueError will be raised if trying to set signal handler for SIGHUP
if not IS_WINDOWS_PLATFORM:
signal.signal(signal.SIGHUP, hang_up)
def ignore_sigpipe():
# Restore default behavior for SIGPIPE instead of raising
# an exception when encountered.
if not IS_WINDOWS_PLATFORM:
signal.signal(signal.SIGPIPE, signal.SIG_DFL)

View File

@@ -0,0 +1,144 @@
import math
import os
import platform
import ssl
import subprocess
import sys
import distro
import docker
import compose
from ..const import IS_WINDOWS_PLATFORM
def yesno(prompt, default=None):
"""
Prompt the user for a yes or no.
Can optionally specify a default value, which will only be
used if they enter a blank line.
Unrecognised input (anything other than "y", "n", "yes",
"no" or "") will return None.
"""
answer = input(prompt).strip().lower()
if answer == "y" or answer == "yes":
return True
elif answer == "n" or answer == "no":
return False
elif answer == "":
return default
else:
return None
def input(prompt):
"""
Version of input (raw_input in Python 2) which forces a flush of sys.stdout
to avoid problems where the prompt fails to appear due to line buffering
"""
sys.stdout.write(prompt)
sys.stdout.flush()
return sys.stdin.readline().rstrip('\n')
def call_silently(*args, **kwargs):
"""
Like subprocess.call(), but redirects stdout and stderr to /dev/null.
"""
with open(os.devnull, 'w') as shutup:
try:
return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs)
except OSError:
# On Windows, subprocess.call() can still raise exceptions. Normalize
# to POSIXy behaviour by returning a nonzero exit code.
return 1
def is_mac():
return platform.system() == 'Darwin'
def is_ubuntu():
return platform.system() == 'Linux' and distro.linux_distribution()[0] == 'Ubuntu'
def is_windows():
return IS_WINDOWS_PLATFORM
def get_version_info(scope):
versioninfo = 'docker-compose version {}, build {}'.format(
compose.__version__,
get_build_version())
if scope == 'compose':
return versioninfo
if scope == 'full':
return (
"{}\n"
"docker-py version: {}\n"
"{} version: {}\n"
"OpenSSL version: {}"
).format(
versioninfo,
docker.version,
platform.python_implementation(),
platform.python_version(),
ssl.OPENSSL_VERSION)
raise ValueError("{} is not a valid version scope".format(scope))
def get_build_version():
filename = os.path.join(os.path.dirname(compose.__file__), 'GITSHA')
if not os.path.exists(filename):
return 'unknown'
with open(filename) as fh:
return fh.read().strip()
def is_docker_for_mac_installed():
return is_mac() and os.path.isdir('/Applications/Docker.app')
def generate_user_agent():
parts = [
"docker-compose/{}".format(compose.__version__),
"docker-py/{}".format(docker.__version__),
]
try:
p_system = platform.system()
p_release = platform.release()
except OSError:
pass
else:
parts.append("{}/{}".format(p_system, p_release))
return " ".join(parts)
def human_readable_file_size(size):
suffixes = ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', ]
order = int(math.log(size, 1000)) if size else 0
if order >= len(suffixes):
order = len(suffixes) - 1
return '{:.4g} {}'.format(
size / pow(10, order * 3),
suffixes[order]
)
def binarystr_to_unicode(s):
if not isinstance(s, bytes):
return s
if IS_WINDOWS_PLATFORM:
try:
return s.decode('windows-1250')
except UnicodeDecodeError:
pass
return s.decode('utf-8', 'replace')

View File

@@ -0,0 +1,55 @@
import functools
import logging
import pprint
from itertools import chain
def format_call(args, kwargs):
args = (repr(a) for a in args)
kwargs = ("{!s}={!r}".format(*item) for item in kwargs.items())
return "({})".format(", ".join(chain(args, kwargs)))
def format_return(result, max_lines):
if isinstance(result, (list, tuple, set)):
return "({} with {} items)".format(type(result).__name__, len(result))
if result:
lines = pprint.pformat(result).split('\n')
extra = '\n...' if len(lines) > max_lines else ''
return '\n'.join(lines[:max_lines]) + extra
return result
class VerboseProxy:
"""Proxy all function calls to another class and log method name, arguments
and return values for each call.
"""
def __init__(self, obj_name, obj, log_name=None, max_lines=10):
self.obj_name = obj_name
self.obj = obj
self.max_lines = max_lines
self.log = logging.getLogger(log_name or __name__)
def __getattr__(self, name):
attr = getattr(self.obj, name)
if not callable(attr):
return attr
return functools.partial(self.proxy_callable, name)
def proxy_callable(self, call_name, *args, **kwargs):
self.log.info("%s %s <- %s",
self.obj_name,
call_name,
format_call(args, kwargs))
result = getattr(self.obj, call_name)(*args, **kwargs)
self.log.info("%s %s -> %s",
self.obj_name,
call_name,
format_return(result, self.max_lines))
return result

View File

@@ -0,0 +1,12 @@
# flake8: noqa
from . import environment
from .config import ConfigurationError
from .config import DOCKER_CONFIG_KEYS
from .config import find
from .config import is_url
from .config import load
from .config import merge_environment
from .config import merge_labels
from .config import parse_environment
from .config import parse_labels
from .config import resolve_build_args

View File

@@ -0,0 +1,812 @@
{
"$schema": "http://json-schema.org/draft/2019-09/schema#",
"id": "compose_spec.json",
"type": "object",
"title": "Compose Specification",
"description": "The Compose file is a YAML file defining a multi-containers based application.",
"properties": {
"version": {
"type": "string",
"description": "Version of the Compose specification used. Tools not implementing required version MUST reject the configuration file."
},
"services": {
"id": "#/properties/services",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false
},
"networks": {
"id": "#/properties/networks",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/network"
}
}
},
"volumes": {
"id": "#/properties/volumes",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/volume"
}
},
"additionalProperties": false
},
"secrets": {
"id": "#/properties/secrets",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/secret"
}
},
"additionalProperties": false
},
"configs": {
"id": "#/properties/configs",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/config"
}
},
"additionalProperties": false
}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"deploy": {"$ref": "#/definitions/deployment"},
"build": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"context": {"type": "string"},
"dockerfile": {"type": "string"},
"args": {"$ref": "#/definitions/list_or_dict"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"cache_from": {"type": "array", "items": {"type": "string"}},
"network": {"type": "string"},
"target": {"type": "string"},
"shm_size": {"type": ["integer", "string"]},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"isolation": {"type": "string"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
]
},
"blkio_config": {
"type": "object",
"properties": {
"device_read_bps": {
"type": "array",
"items": {"$ref": "#/definitions/blkio_limit"}
},
"device_read_iops": {
"type": "array",
"items": {"$ref": "#/definitions/blkio_limit"}
},
"device_write_bps": {
"type": "array",
"items": {"$ref": "#/definitions/blkio_limit"}
},
"device_write_iops": {
"type": "array",
"items": {"$ref": "#/definitions/blkio_limit"}
},
"weight": {"type": "integer"},
"weight_device": {
"type": "array",
"items": {"$ref": "#/definitions/blkio_weight"}
}
},
"additionalProperties": false
},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"configs": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
]
}
},
"container_name": {"type": "string"},
"cpu_count": {"type": "integer", "minimum": 0},
"cpu_percent": {"type": "integer", "minimum": 0, "maximum": 100},
"cpu_shares": {"type": ["number", "string"]},
"cpu_quota": {"type": ["number", "string"]},
"cpu_period": {"type": ["number", "string"]},
"cpu_rt_period": {"type": ["number", "string"]},
"cpu_rt_runtime": {"type": ["number", "string"]},
"cpus": {"type": ["number", "string"]},
"cpuset": {"type": "string"},
"credential_spec": {
"type": "object",
"properties": {
"config": {"type": "string"},
"file": {"type": "string"},
"registry": {"type": "string"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"depends_on": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"additionalProperties": false,
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"type": "object",
"additionalProperties": false,
"properties": {
"condition": {
"type": "string",
"enum": ["service_started", "service_healthy", "service_completed_successfully"]
}
},
"required": ["condition"]
}
}
}
]
},
"device_cgroup_rules": {"$ref": "#/definitions/list_of_strings"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_opt": {"type": "array","items": {"type": "string"}, "uniqueItems": true},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"extends": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"service": {"type": "string"},
"file": {"type": "string"}
},
"required": ["service"],
"additionalProperties": false
}
]
},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"group_add": {
"type": "array",
"items": {
"type": ["string", "number"]
},
"uniqueItems": true
},
"healthcheck": {"$ref": "#/definitions/healthcheck"},
"hostname": {"type": "string"},
"image": {"type": "string"},
"init": {"type": "boolean"},
"ipc": {"type": "string"},
"isolation": {"type": "string"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"logging": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"options": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number", "null"]}
}
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"mac_address": {"type": "string"},
"mem_limit": {"type": ["number", "string"]},
"mem_reservation": {"type": ["string", "integer"]},
"mem_swappiness": {"type": "integer"},
"memswap_limit": {"type": ["number", "string"]},
"network_mode": {"type": "string"},
"networks": {
"oneOf": [
{"$ref": "#/definitions/list_of_strings"},
{
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"oneOf": [
{
"type": "object",
"properties": {
"aliases": {"$ref": "#/definitions/list_of_strings"},
"ipv4_address": {"type": "string"},
"ipv6_address": {"type": "string"},
"link_local_ips": {"$ref": "#/definitions/list_of_strings"},
"priority": {"type": "number"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
{"type": "null"}
]
}
},
"additionalProperties": false
}
]
},
"oom_kill_disable": {"type": "boolean"},
"oom_score_adj": {"type": "integer", "minimum": -1000, "maximum": 1000},
"pid": {"type": ["string", "null"]},
"pids_limit": {"type": ["number", "string"]},
"platform": {"type": "string"},
"ports": {
"type": "array",
"items": {
"oneOf": [
{"type": "number", "format": "ports"},
{"type": "string", "format": "ports"},
{
"type": "object",
"properties": {
"mode": {"type": "string"},
"target": {"type": "integer"},
"published": {"type": "integer"},
"protocol": {"type": "string"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
]
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"profiles": {"$ref": "#/definitions/list_of_strings"},
"pull_policy": {"type": "string", "enum": [
"always", "never", "if_not_present", "build"
]},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"runtime": {
"type": "string"
},
"scale": {
"type": "integer"
},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"secrets": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"source": {"type": "string"},
"target": {"type": "string"},
"uid": {"type": "string"},
"gid": {"type": "string"},
"mode": {"type": "number"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
]
}
},
"sysctls": {"$ref": "#/definitions/list_or_dict"},
"stdin_open": {"type": "boolean"},
"stop_grace_period": {"type": "string", "format": "duration"},
"stop_signal": {"type": "string"},
"storage_opt": {"type": "object"},
"tmpfs": {"$ref": "#/definitions/string_or_list"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type": "object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
]
}
}
},
"user": {"type": "string"},
"userns_mode": {"type": "string"},
"volumes": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"required": ["type"],
"properties": {
"type": {"type": "string"},
"source": {"type": "string"},
"target": {"type": "string"},
"read_only": {"type": "boolean"},
"consistency": {"type": "string"},
"bind": {
"type": "object",
"properties": {
"propagation": {"type": "string"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"volume": {
"type": "object",
"properties": {
"nocopy": {"type": "boolean"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"tmpfs": {
"type": "object",
"properties": {
"size": {
"type": "integer",
"minimum": 0
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
]
},
"uniqueItems": true
},
"volumes_from": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"working_dir": {"type": "string"}
},
"patternProperties": {"^x-": {}},
"additionalProperties": false
},
"healthcheck": {
"id": "#/definitions/healthcheck",
"type": "object",
"properties": {
"disable": {"type": "boolean"},
"interval": {"type": "string", "format": "duration"},
"retries": {"type": "number"},
"test": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"timeout": {"type": "string", "format": "duration"},
"start_period": {"type": "string", "format": "duration"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"deployment": {
"id": "#/definitions/deployment",
"type": ["object", "null"],
"properties": {
"mode": {"type": "string"},
"endpoint_mode": {"type": "string"},
"replicas": {"type": "integer"},
"labels": {"$ref": "#/definitions/list_or_dict"},
"rollback_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"update_config": {
"type": "object",
"properties": {
"parallelism": {"type": "integer"},
"delay": {"type": "string", "format": "duration"},
"failure_action": {"type": "string"},
"monitor": {"type": "string", "format": "duration"},
"max_failure_ratio": {"type": "number"},
"order": {"type": "string", "enum": [
"start-first", "stop-first"
]}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"resources": {
"type": "object",
"properties": {
"limits": {
"type": "object",
"properties": {
"cpus": {"type": ["number", "string"]},
"memory": {"type": "string"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"reservations": {
"type": "object",
"properties": {
"cpus": {"type": ["number", "string"]},
"memory": {"type": "string"},
"generic_resources": {"$ref": "#/definitions/generic_resources"},
"devices": {"$ref": "#/definitions/devices"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"restart_policy": {
"type": "object",
"properties": {
"condition": {"type": "string"},
"delay": {"type": "string", "format": "duration"},
"max_attempts": {"type": "integer"},
"window": {"type": "string", "format": "duration"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"placement": {
"type": "object",
"properties": {
"constraints": {"type": "array", "items": {"type": "string"}},
"preferences": {
"type": "array",
"items": {
"type": "object",
"properties": {
"spread": {"type": "string"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
},
"max_replicas_per_node": {"type": "integer"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"generic_resources": {
"id": "#/definitions/generic_resources",
"type": "array",
"items": {
"type": "object",
"properties": {
"discrete_resource_spec": {
"type": "object",
"properties": {
"kind": {"type": "string"},
"value": {"type": "number"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
},
"devices": {
"id": "#/definitions/devices",
"type": "array",
"items": {
"type": "object",
"properties": {
"capabilities": {"$ref": "#/definitions/list_of_strings"},
"count": {"type": ["string", "integer"]},
"device_ids": {"$ref": "#/definitions/list_of_strings"},
"driver":{"type": "string"},
"options":{"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
},
"network": {
"id": "#/definitions/network",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"ipam": {
"type": "object",
"properties": {
"driver": {"type": "string"},
"config": {
"type": "array",
"items": {
"type": "object",
"properties": {
"subnet": {"type": "string", "format": "subnet_ip_address"},
"ip_range": {"type": "string"},
"gateway": {"type": "string"},
"aux_addresses": {
"type": "object",
"additionalProperties": false,
"patternProperties": {"^.+$": {"type": "string"}}
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
}
},
"options": {
"type": "object",
"additionalProperties": false,
"patternProperties": {"^.+$": {"type": "string"}}
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {
"deprecated": true,
"type": "string"
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"internal": {"type": "boolean"},
"enable_ipv6": {"type": "boolean"},
"attachable": {"type": "boolean"},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"volume": {
"id": "#/definitions/volume",
"type": ["object", "null"],
"properties": {
"name": {"type": "string"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {
"deprecated": true,
"type": "string"
}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"labels": {"$ref": "#/definitions/list_or_dict"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"secret": {
"id": "#/definitions/secret",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {"type": "string"}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"driver": {"type": "string"},
"driver_opts": {
"type": "object",
"patternProperties": {
"^.+$": {"type": ["string", "number"]}
}
},
"template_driver": {"type": "string"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"config": {
"id": "#/definitions/config",
"type": "object",
"properties": {
"name": {"type": "string"},
"file": {"type": "string"},
"external": {
"type": ["boolean", "object"],
"properties": {
"name": {
"deprecated": true,
"type": "string"
}
}
},
"labels": {"$ref": "#/definitions/list_or_dict"},
"template_driver": {"type": "string"}
},
"additionalProperties": false,
"patternProperties": {"^x-": {}}
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"blkio_limit": {
"type": "object",
"properties": {
"path": {"type": "string"},
"rate": {"type": ["integer", "string"]}
},
"additionalProperties": false
},
"blkio_weight": {
"type": "object",
"properties": {
"path": {"type": "string"},
"weight": {"type": "integer"}
},
"additionalProperties": false
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{"required": ["build"]},
{"required": ["image"]}
],
"properties": {
"build": {
"required": ["context"]
}
}
}
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,203 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"id": "config_schema_v1.json",
"type": "object",
"patternProperties": {
"^[a-zA-Z0-9._-]+$": {
"$ref": "#/definitions/service"
}
},
"additionalProperties": false,
"definitions": {
"service": {
"id": "#/definitions/service",
"type": "object",
"properties": {
"build": {"type": "string"},
"cap_add": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cap_drop": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"cgroup_parent": {"type": "string"},
"command": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"container_name": {"type": "string"},
"cpu_shares": {"type": ["number", "string"]},
"cpu_quota": {"type": ["number", "string"]},
"cpuset": {"type": "string"},
"devices": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"dns": {"$ref": "#/definitions/string_or_list"},
"dns_search": {"$ref": "#/definitions/string_or_list"},
"dockerfile": {"type": "string"},
"domainname": {"type": "string"},
"entrypoint": {
"oneOf": [
{"type": "string"},
{"type": "array", "items": {"type": "string"}}
]
},
"env_file": {"$ref": "#/definitions/string_or_list"},
"environment": {"$ref": "#/definitions/list_or_dict"},
"expose": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "expose"
},
"uniqueItems": true
},
"extends": {
"oneOf": [
{
"type": "string"
},
{
"type": "object",
"properties": {
"service": {"type": "string"},
"file": {"type": "string"}
},
"required": ["service"],
"additionalProperties": false
}
]
},
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
"external_links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"hostname": {"type": "string"},
"image": {"type": "string"},
"ipc": {"type": "string"},
"labels": {"$ref": "#/definitions/labels"},
"links": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"log_driver": {"type": "string"},
"log_opt": {"type": "object"},
"mac_address": {"type": "string"},
"mem_limit": {"type": ["number", "string"]},
"memswap_limit": {"type": ["number", "string"]},
"mem_swappiness": {"type": "integer"},
"net": {"type": "string"},
"pid": {"type": ["string", "null"]},
"ports": {
"type": "array",
"items": {
"type": ["string", "number"],
"format": "ports"
},
"uniqueItems": true
},
"privileged": {"type": "boolean"},
"read_only": {"type": "boolean"},
"restart": {"type": "string"},
"security_opt": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"shm_size": {"type": ["number", "string"]},
"stdin_open": {"type": "boolean"},
"stop_signal": {"type": "string"},
"tty": {"type": "boolean"},
"ulimits": {
"type": "object",
"patternProperties": {
"^[a-z]+$": {
"oneOf": [
{"type": "integer"},
{
"type":"object",
"properties": {
"hard": {"type": "integer"},
"soft": {"type": "integer"}
},
"required": ["soft", "hard"],
"additionalProperties": false
}
]
}
}
},
"user": {"type": "string"},
"volumes": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"volume_driver": {"type": "string"},
"volumes_from": {"type": "array", "items": {"type": "string"}, "uniqueItems": true},
"working_dir": {"type": "string"}
},
"dependencies": {
"memswap_limit": ["mem_limit"]
},
"additionalProperties": false
},
"string_or_list": {
"oneOf": [
{"type": "string"},
{"$ref": "#/definitions/list_of_strings"}
]
},
"list_of_strings": {
"type": "array",
"items": {"type": "string"},
"uniqueItems": true
},
"list_or_dict": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": ["string", "number", "null"]
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"labels": {
"oneOf": [
{
"type": "object",
"patternProperties": {
".+": {
"type": "string"
}
},
"additionalProperties": false
},
{"type": "array", "items": {"type": "string"}, "uniqueItems": true}
]
},
"constraints": {
"service": {
"id": "#/definitions/constraints/service",
"anyOf": [
{
"required": ["build"],
"not": {"required": ["image"]}
},
{
"required": ["image"],
"not": {"anyOf": [
{"required": ["build"]},
{"required": ["dockerfile"]}
]}
}
]
}
}
}
}

View File

@@ -0,0 +1,126 @@
import logging
import os
import re
import dotenv
from ..const import IS_WINDOWS_PLATFORM
from .errors import ConfigurationError
from .errors import EnvFileNotFound
log = logging.getLogger(__name__)
def split_env(env):
if isinstance(env, bytes):
env = env.decode('utf-8', 'replace')
key = value = None
if '=' in env:
key, value = env.split('=', 1)
else:
key = env
if re.search(r'\s', key):
raise ConfigurationError(
"environment variable name '{}' may not contain whitespace.".format(key)
)
return key, value
def env_vars_from_file(filename, interpolate=True):
"""
Read in a line delimited file of environment variables.
"""
if not os.path.exists(filename):
raise EnvFileNotFound("Couldn't find env file: {}".format(filename))
elif not os.path.isfile(filename):
raise EnvFileNotFound("{} is not a file.".format(filename))
env = dotenv.dotenv_values(dotenv_path=filename, encoding='utf-8-sig', interpolate=interpolate)
for k, v in env.items():
env[k] = v if interpolate else v.replace('$', '$$')
return env
class Environment(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.missing_keys = []
self.silent = False
@classmethod
def from_env_file(cls, base_dir, env_file=None):
def _initialize():
result = cls()
if base_dir is None:
return result
if env_file:
env_file_path = os.path.join(os.getcwd(), env_file)
return cls(env_vars_from_file(env_file_path))
env_file_path = os.path.join(base_dir, '.env')
try:
return cls(env_vars_from_file(env_file_path))
except EnvFileNotFound:
pass
return result
instance = _initialize()
instance.update(os.environ)
return instance
@classmethod
def from_command_line(cls, parsed_env_opts):
result = cls()
for k, v in parsed_env_opts.items():
# Values from the command line take priority, unless they're unset
# in which case they take the value from the system's environment
if v is None and k in os.environ:
result[k] = os.environ[k]
else:
result[k] = v
return result
def __getitem__(self, key):
try:
return super().__getitem__(key)
except KeyError:
if IS_WINDOWS_PLATFORM:
try:
return super().__getitem__(key.upper())
except KeyError:
pass
if not self.silent and key not in self.missing_keys:
log.warning(
"The {} variable is not set. Defaulting to a blank string."
.format(key)
)
self.missing_keys.append(key)
return ""
def __contains__(self, key):
result = super().__contains__(key)
if IS_WINDOWS_PLATFORM:
return (
result or super().__contains__(key.upper())
)
return result
def get(self, key, *args, **kwargs):
if IS_WINDOWS_PLATFORM:
return super().get(
key,
super().get(key.upper(), *args, **kwargs)
)
return super().get(key, *args, **kwargs)
def get_boolean(self, key, default=False):
# Convert a value to a boolean using "common sense" rules.
# Unset, empty, "0" and "false" (i-case) yield False.
# All other values yield True.
value = self.get(key)
if not value:
return default
if value.lower() in ['0', 'false']:
return False
return True

View File

@@ -0,0 +1,55 @@
VERSION_EXPLANATION = (
'You might be seeing this error because you\'re using the wrong Compose file version. '
'Either specify a supported version (e.g "2.2" or "3.3") and place '
'your service definitions under the `services` key, or omit the `version` key '
'and place your service definitions at the root of the file to use '
'version 1.\nFor more on the Compose file format versions, see '
'https://docs.docker.com/compose/compose-file/')
class ConfigurationError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class EnvFileNotFound(ConfigurationError):
pass
class DependencyError(ConfigurationError):
pass
class CircularReference(ConfigurationError):
def __init__(self, trail):
self.trail = trail
@property
def msg(self):
lines = [
"{} in {}".format(service_name, filename)
for (filename, service_name) in self.trail
]
return "Circular reference:\n {}".format("\n extends ".join(lines))
class ComposeFileNotFound(ConfigurationError):
def __init__(self, supported_filenames):
super().__init__("""
Can't find a suitable configuration file in this directory or any
parent. Are you in the right directory?
Supported filenames: %s
""" % ", ".join(supported_filenames))
class DuplicateOverrideFileFound(ConfigurationError):
def __init__(self, override_filenames):
self.override_filenames = override_filenames
super().__init__(
"Multiple override files found: {}. You may only use a single "
"override file.".format(", ".join(override_filenames))
)

View File

@@ -0,0 +1,296 @@
import logging
import re
from string import Template
from .errors import ConfigurationError
from compose.const import COMPOSEFILE_V1 as V1
from compose.utils import parse_bytes
from compose.utils import parse_nanoseconds_int
log = logging.getLogger(__name__)
class Interpolator:
def __init__(self, templater, mapping):
self.templater = templater
self.mapping = mapping
def interpolate(self, string):
try:
return self.templater(string).substitute(self.mapping)
except ValueError:
raise InvalidInterpolation(string)
def interpolate_environment_variables(version, config, section, environment):
if version == V1:
interpolator = Interpolator(Template, environment)
else:
interpolator = Interpolator(TemplateWithDefaults, environment)
def process_item(name, config_dict):
return {
key: interpolate_value(name, key, val, section, interpolator)
for key, val in (config_dict or {}).items()
}
return {
name: process_item(name, config_dict or {})
for name, config_dict in config.items()
}
def get_config_path(config_key, section, name):
return '{}/{}/{}'.format(section, name, config_key)
def interpolate_value(name, config_key, value, section, interpolator):
try:
return recursive_interpolate(value, interpolator, get_config_path(config_key, section, name))
except InvalidInterpolation as e:
raise ConfigurationError(
'Invalid interpolation format for "{config_key}" option '
'in {section} "{name}": "{string}"'.format(
config_key=config_key,
name=name,
section=section,
string=e.string))
except UnsetRequiredSubstitution as e:
raise ConfigurationError(
'Missing mandatory value for "{config_key}" option interpolating {value} '
'in {section} "{name}": {err}'.format(config_key=config_key,
value=value,
name=name,
section=section,
err=e.err)
)
def recursive_interpolate(obj, interpolator, config_path):
def append(config_path, key):
return '{}/{}'.format(config_path, key)
if isinstance(obj, str):
return converter.convert(config_path, interpolator.interpolate(obj))
if isinstance(obj, dict):
return {
key: recursive_interpolate(val, interpolator, append(config_path, key))
for key, val in obj.items()
}
if isinstance(obj, list):
return [recursive_interpolate(val, interpolator, config_path) for val in obj]
return converter.convert(config_path, obj)
class TemplateWithDefaults(Template):
pattern = r"""
{delim}(?:
(?P<escaped>{delim}) |
(?P<named>{id}) |
{{(?P<braced>{bid})}} |
(?P<invalid>)
)
""".format(
delim=re.escape('$'),
id=r'[_a-z][_a-z0-9]*',
bid=r'[_a-z][_a-z0-9]*(?:(?P<sep>:?[-?])[^}]*)?',
)
@staticmethod
def process_braced_group(braced, sep, mapping):
if ':-' == sep:
var, _, default = braced.partition(':-')
return mapping.get(var) or default
elif '-' == sep:
var, _, default = braced.partition('-')
return mapping.get(var, default)
elif ':?' == sep:
var, _, err = braced.partition(':?')
result = mapping.get(var)
if not result:
err = err or var
raise UnsetRequiredSubstitution(err)
return result
elif '?' == sep:
var, _, err = braced.partition('?')
if var in mapping:
return mapping.get(var)
err = err or var
raise UnsetRequiredSubstitution(err)
# Modified from python2.7/string.py
def substitute(self, mapping):
# Helper function for .sub()
def convert(mo):
named = mo.group('named') or mo.group('braced')
braced = mo.group('braced')
if braced is not None:
sep = mo.group('sep')
if sep:
return self.process_braced_group(braced, sep, mapping)
if named is not None:
val = mapping[named]
if isinstance(val, bytes):
val = val.decode('utf-8')
return '{}'.format(val)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
class InvalidInterpolation(Exception):
def __init__(self, string):
self.string = string
class UnsetRequiredSubstitution(Exception):
def __init__(self, custom_err_msg):
self.err = custom_err_msg
PATH_JOKER = '[^/]+'
FULL_JOKER = '.+'
def re_path(*args):
return re.compile('^{}$'.format('/'.join(args)))
def re_path_basic(section, name):
return re_path(section, PATH_JOKER, name)
def service_path(*args):
return re_path('service', PATH_JOKER, *args)
def to_boolean(s):
if not isinstance(s, str):
return s
s = s.lower()
if s in ['y', 'yes', 'true', 'on']:
return True
elif s in ['n', 'no', 'false', 'off']:
return False
raise ValueError('"{}" is not a valid boolean value'.format(s))
def to_int(s):
if not isinstance(s, str):
return s
# We must be able to handle octal representation for `mode` values notably
if re.match('^0[0-9]+$', s.strip()):
s = '0o' + s[1:]
try:
return int(s, base=0)
except ValueError:
raise ValueError('"{}" is not a valid integer'.format(s))
def to_float(s):
if not isinstance(s, str):
return s
try:
return float(s)
except ValueError:
raise ValueError('"{}" is not a valid float'.format(s))
def to_str(o):
if isinstance(o, (bool, float, int)):
return '{}'.format(o)
return o
def bytes_to_int(s):
v = parse_bytes(s)
if v is None:
raise ValueError('"{}" is not a valid byte value'.format(s))
return v
def to_microseconds(v):
if not isinstance(v, str):
return v
return int(parse_nanoseconds_int(v) / 1000)
class ConversionMap:
map = {
service_path('blkio_config', 'weight'): to_int,
service_path('blkio_config', 'weight_device', 'weight'): to_int,
service_path('build', 'labels', FULL_JOKER): to_str,
service_path('cpus'): to_float,
service_path('cpu_count'): to_int,
service_path('cpu_quota'): to_microseconds,
service_path('cpu_period'): to_microseconds,
service_path('cpu_rt_period'): to_microseconds,
service_path('cpu_rt_runtime'): to_microseconds,
service_path('configs', 'mode'): to_int,
service_path('secrets', 'mode'): to_int,
service_path('healthcheck', 'retries'): to_int,
service_path('healthcheck', 'disable'): to_boolean,
service_path('deploy', 'labels', PATH_JOKER): to_str,
service_path('deploy', 'replicas'): to_int,
service_path('deploy', 'placement', 'max_replicas_per_node'): to_int,
service_path('deploy', 'resources', 'limits', "cpus"): to_float,
service_path('deploy', 'update_config', 'parallelism'): to_int,
service_path('deploy', 'update_config', 'max_failure_ratio'): to_float,
service_path('deploy', 'rollback_config', 'parallelism'): to_int,
service_path('deploy', 'rollback_config', 'max_failure_ratio'): to_float,
service_path('deploy', 'restart_policy', 'max_attempts'): to_int,
service_path('mem_swappiness'): to_int,
service_path('labels', FULL_JOKER): to_str,
service_path('oom_kill_disable'): to_boolean,
service_path('oom_score_adj'): to_int,
service_path('ports', 'target'): to_int,
service_path('ports', 'published'): to_int,
service_path('scale'): to_int,
service_path('ulimits', PATH_JOKER): to_int,
service_path('ulimits', PATH_JOKER, 'soft'): to_int,
service_path('ulimits', PATH_JOKER, 'hard'): to_int,
service_path('privileged'): to_boolean,
service_path('read_only'): to_boolean,
service_path('stdin_open'): to_boolean,
service_path('tty'): to_boolean,
service_path('volumes', 'read_only'): to_boolean,
service_path('volumes', 'volume', 'nocopy'): to_boolean,
service_path('volumes', 'tmpfs', 'size'): bytes_to_int,
re_path_basic('network', 'attachable'): to_boolean,
re_path_basic('network', 'external'): to_boolean,
re_path_basic('network', 'internal'): to_boolean,
re_path('network', PATH_JOKER, 'labels', FULL_JOKER): to_str,
re_path_basic('volume', 'external'): to_boolean,
re_path('volume', PATH_JOKER, 'labels', FULL_JOKER): to_str,
re_path_basic('secret', 'external'): to_boolean,
re_path('secret', PATH_JOKER, 'labels', FULL_JOKER): to_str,
re_path_basic('config', 'external'): to_boolean,
re_path('config', PATH_JOKER, 'labels', FULL_JOKER): to_str,
}
def convert(self, path, value):
for rexp in self.map.keys():
if rexp.match(path):
try:
return self.map[rexp](value)
except ValueError as e:
raise ConfigurationError(
'Error while attempting to convert {} to appropriate type: {}'.format(
path.replace('/', '.'), e
)
)
return value
converter = ConversionMap()

View File

@@ -0,0 +1,149 @@
import yaml
from compose.config import types
from compose.const import COMPOSE_SPEC as VERSION
from compose.const import COMPOSEFILE_V1 as V1
def serialize_config_type(dumper, data):
representer = dumper.represent_str
return representer(data.repr())
def serialize_dict_type(dumper, data):
return dumper.represent_dict(data.repr())
def serialize_string(dumper, data):
""" Ensure boolean-like strings are quoted in the output """
representer = dumper.represent_str
if isinstance(data, bytes):
data = data.decode('utf-8')
if data.lower() in ('y', 'n', 'yes', 'no', 'on', 'off', 'true', 'false'):
# Empirically only y/n appears to be an issue, but this might change
# depending on which PyYaml version is being used. Err on safe side.
return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='"')
return representer(data)
def serialize_string_escape_dollar(dumper, data):
""" Ensure boolean-like strings are quoted in the output and escape $ characters """
data = data.replace('$', '$$')
return serialize_string(dumper, data)
yaml.SafeDumper.add_representer(types.MountSpec, serialize_dict_type)
yaml.SafeDumper.add_representer(types.VolumeFromSpec, serialize_config_type)
yaml.SafeDumper.add_representer(types.VolumeSpec, serialize_config_type)
yaml.SafeDumper.add_representer(types.SecurityOpt, serialize_config_type)
yaml.SafeDumper.add_representer(types.ServiceSecret, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServiceConfig, serialize_dict_type)
yaml.SafeDumper.add_representer(types.ServicePort, serialize_dict_type)
def denormalize_config(config, image_digests=None):
result = {'version': str(config.config_version)}
denormalized_services = [
denormalize_service_dict(
service_dict,
config.version,
image_digests[service_dict['name']] if image_digests else None)
for service_dict in config.services
]
result['services'] = {
service_dict.pop('name'): service_dict
for service_dict in denormalized_services
}
for key in ('networks', 'volumes', 'secrets', 'configs'):
config_dict = getattr(config, key)
if not config_dict:
continue
result[key] = config_dict.copy()
for name, conf in result[key].items():
if 'external_name' in conf:
del conf['external_name']
if 'name' in conf:
if 'external' in conf:
conf['external'] = bool(conf['external'])
return result
def serialize_config(config, image_digests=None, escape_dollar=True):
if escape_dollar:
yaml.SafeDumper.add_representer(str, serialize_string_escape_dollar)
yaml.SafeDumper.add_representer(str, serialize_string_escape_dollar)
else:
yaml.SafeDumper.add_representer(str, serialize_string)
yaml.SafeDumper.add_representer(str, serialize_string)
return yaml.safe_dump(
denormalize_config(config, image_digests),
default_flow_style=False,
indent=2,
width=80,
allow_unicode=True
)
def serialize_ns_time_value(value):
result = (value, 'ns')
table = [
(1000., 'us'),
(1000., 'ms'),
(1000., 's'),
(60., 'm'),
(60., 'h')
]
for stage in table:
tmp = value / stage[0]
if tmp == int(value / stage[0]):
value = tmp
result = (int(value), stage[1])
else:
break
return '{}{}'.format(*result)
def denormalize_service_dict(service_dict, version, image_digest=None):
service_dict = service_dict.copy()
if image_digest:
service_dict['image'] = image_digest
if 'restart' in service_dict:
service_dict['restart'] = types.serialize_restart_spec(
service_dict['restart']
)
if version == V1 and 'network_mode' not in service_dict:
service_dict['network_mode'] = 'bridge'
if 'healthcheck' in service_dict:
if 'interval' in service_dict['healthcheck']:
service_dict['healthcheck']['interval'] = serialize_ns_time_value(
service_dict['healthcheck']['interval']
)
if 'timeout' in service_dict['healthcheck']:
service_dict['healthcheck']['timeout'] = serialize_ns_time_value(
service_dict['healthcheck']['timeout']
)
if 'start_period' in service_dict['healthcheck']:
service_dict['healthcheck']['start_period'] = serialize_ns_time_value(
service_dict['healthcheck']['start_period']
)
if 'ports' in service_dict:
service_dict['ports'] = [
p.legacy_repr() if p.external_ip or version < VERSION else p
for p in service_dict['ports']
]
if 'volumes' in service_dict and (version == V1):
service_dict['volumes'] = [
v.legacy_repr() if isinstance(v, types.MountSpec) else v for v in service_dict['volumes']
]
return service_dict

View File

@@ -0,0 +1,71 @@
from compose.config.errors import DependencyError
def get_service_name_from_network_mode(network_mode):
return get_source_name_from_network_mode(network_mode, 'service')
def get_container_name_from_network_mode(network_mode):
return get_source_name_from_network_mode(network_mode, 'container')
def get_source_name_from_network_mode(network_mode, source_type):
if not network_mode:
return
if not network_mode.startswith(source_type+':'):
return
_, net_name = network_mode.split(':', 1)
return net_name
def get_service_names(links):
return [link.split(':', 1)[0] for link in links]
def get_service_names_from_volumes_from(volumes_from):
return [volume_from.source for volume_from in volumes_from]
def get_service_dependents(service_dict, services):
name = service_dict['name']
return [
service for service in services
if (name in get_service_names(service.get('links', [])) or
name in get_service_names_from_volumes_from(service.get('volumes_from', [])) or
name == get_service_name_from_network_mode(service.get('network_mode')) or
name == get_service_name_from_network_mode(service.get('pid')) or
name == get_service_name_from_network_mode(service.get('ipc')) or
name in service.get('depends_on', []))
]
def sort_service_dicts(services):
# Topological sort (Cormen/Tarjan algorithm).
unmarked = services[:]
temporary_marked = set()
sorted_services = []
def visit(n):
if n['name'] in temporary_marked:
if n['name'] in get_service_names(n.get('links', [])):
raise DependencyError('A service can not link to itself: %s' % n['name'])
if n['name'] in n.get('volumes_from', []):
raise DependencyError('A service can not mount itself as volume: %s' % n['name'])
if n['name'] in n.get('depends_on', []):
raise DependencyError('A service can not depend on itself: %s' % n['name'])
raise DependencyError('Circular dependency between %s' % ' and '.join(temporary_marked))
if n in unmarked:
temporary_marked.add(n['name'])
for m in get_service_dependents(n, services):
visit(m)
temporary_marked.remove(n['name'])
unmarked.remove(n)
sorted_services.insert(0, n)
while unmarked:
visit(unmarked[-1])
return sorted_services

View File

@@ -0,0 +1,500 @@
"""
Types for objects parsed from the configuration.
"""
import json
import ntpath
import os
import re
from collections import namedtuple
from docker.utils.ports import build_port_bindings
from ..const import COMPOSEFILE_V1 as V1
from ..utils import unquote_path
from .errors import ConfigurationError
from compose.const import IS_WINDOWS_PLATFORM
from compose.utils import splitdrive
win32_root_path_pattern = re.compile(r'^[A-Za-z]\:\\.*')
class VolumeFromSpec(namedtuple('_VolumeFromSpec', 'source mode type')):
# TODO: drop service_names arg when v1 is removed
@classmethod
def parse(cls, volume_from_config, service_names, version):
func = cls.parse_v1 if version == V1 else cls.parse_v2
return func(service_names, volume_from_config)
@classmethod
def parse_v1(cls, service_names, volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"volume_from {} has incorrect format, should be "
"service[:mode]".format(volume_from_config))
if len(parts) == 1:
source = parts[0]
mode = 'rw'
else:
source, mode = parts
type = 'service' if source in service_names else 'container'
return cls(source, mode, type)
@classmethod
def parse_v2(cls, service_names, volume_from_config):
parts = volume_from_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
"volume_from {} has incorrect format, should be one of "
"'<service name>[:<mode>]' or "
"'container:<container name>[:<mode>]'".format(volume_from_config))
if len(parts) == 1:
source = parts[0]
return cls(source, 'rw', 'service')
if len(parts) == 2:
if parts[0] == 'container':
type, source = parts
return cls(source, 'rw', type)
source, mode = parts
return cls(source, mode, 'service')
if len(parts) == 3:
type, source, mode = parts
if type not in ('service', 'container'):
raise ConfigurationError(
"Unknown volumes_from type '{}' in '{}'".format(
type,
volume_from_config))
return cls(source, mode, type)
def repr(self):
return '{v.type}:{v.source}:{v.mode}'.format(v=self)
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigurationError(
"Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
def serialize_restart_spec(restart_spec):
if not restart_spec:
return ''
parts = [restart_spec['Name']]
if restart_spec['MaximumRetryCount']:
parts.append(str(restart_spec['MaximumRetryCount']))
return ':'.join(parts)
def parse_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, dict):
return dict(extra_hosts_config)
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
# TODO: validate string contains ':' ?
host, ip = extra_hosts_line.split(':', 1)
extra_hosts_dict[host.strip()] = ip.strip()
return extra_hosts_dict
def normalize_path_for_engine(path):
"""Windows paths, c:\\my\\path\\shiny, need to be changed to be compatible with
the Engine. Volume paths are expected to be linux style /c/my/path/shiny/
"""
drive, tail = splitdrive(path)
if drive:
path = '/' + drive.lower().rstrip(':') + tail
return path.replace('\\', '/')
def normpath(path, win_host=False):
""" Custom path normalizer that handles Compose-specific edge cases like
UNIX paths on Windows hosts and vice-versa. """
sysnorm = ntpath.normpath if win_host else os.path.normpath
# If a path looks like a UNIX absolute path on Windows, it probably is;
# we'll need to revert the backslashes to forward slashes after normalization
flip_slashes = path.startswith('/') and IS_WINDOWS_PLATFORM
path = sysnorm(path)
if flip_slashes:
path = path.replace('\\', '/')
return path
class MountSpec:
options_map = {
'volume': {
'nocopy': 'no_copy'
},
'bind': {
'propagation': 'propagation'
},
'tmpfs': {
'size': 'tmpfs_size'
}
}
_fields = ['type', 'source', 'target', 'read_only', 'consistency']
@classmethod
def parse(cls, mount_dict, normalize=False, win_host=False):
if mount_dict.get('source'):
if mount_dict['type'] == 'tmpfs':
raise ConfigurationError('tmpfs mounts can not specify a source')
mount_dict['source'] = normpath(mount_dict['source'], win_host)
if normalize:
mount_dict['source'] = normalize_path_for_engine(mount_dict['source'])
return cls(**mount_dict)
def __init__(self, type, source=None, target=None, read_only=None, consistency=None, **kwargs):
self.type = type
self.source = source
self.target = target
self.read_only = read_only
self.consistency = consistency
self.options = None
if self.type in kwargs:
self.options = kwargs[self.type]
def as_volume_spec(self):
mode = 'ro' if self.read_only else 'rw'
return VolumeSpec(external=self.source, internal=self.target, mode=mode)
def legacy_repr(self):
return self.as_volume_spec().repr()
def repr(self):
res = {}
for field in self._fields:
if getattr(self, field, None):
res[field] = getattr(self, field)
if self.options:
res[self.type] = self.options
return res
@property
def is_named_volume(self):
return self.type == 'volume' and self.source
@property
def is_tmpfs(self):
return self.type == 'tmpfs'
@property
def external(self):
return self.source
class VolumeSpec(namedtuple('_VolumeSpec', 'external internal mode')):
win32 = False
@classmethod
def _parse_unix(cls, volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigurationError(
"Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external = None
internal = os.path.normpath(parts[0])
else:
external = os.path.normpath(parts[0])
internal = os.path.normpath(parts[1])
mode = 'rw'
if len(parts) == 3:
mode = parts[2]
return cls(external, internal, mode)
@classmethod
def _parse_win32(cls, volume_config, normalize):
# relative paths in windows expand to include the drive, eg C:\
# so we join the first 2 parts back together to count as one
mode = 'rw'
def separate_next_section(volume_config):
drive, tail = splitdrive(volume_config)
parts = tail.split(':', 1)
if drive:
parts[0] = drive + parts[0]
return parts
parts = separate_next_section(volume_config)
if len(parts) == 1:
internal = parts[0]
external = None
else:
external = parts[0]
parts = separate_next_section(parts[1])
external = normpath(external, True)
internal = parts[0]
if len(parts) > 1:
if ':' in parts[1]:
raise ConfigurationError(
"Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config
)
mode = parts[1]
if normalize:
external = normalize_path_for_engine(external) if external else None
result = cls(external, internal, mode)
result.win32 = True
return result
@classmethod
def parse(cls, volume_config, normalize=False, win_host=False):
"""Parse a volume_config path and split it into external:internal[:mode]
parts to be returned as a valid VolumeSpec.
"""
if IS_WINDOWS_PLATFORM or win_host:
return cls._parse_win32(volume_config, normalize)
else:
return cls._parse_unix(volume_config)
def repr(self):
external = self.external + ':' if self.external else ''
mode = ':' + self.mode if self.external else ''
return '{ext}{v.internal}{mode}'.format(mode=mode, ext=external, v=self)
@property
def is_named_volume(self):
res = self.external and not self.external.startswith(('.', '/', '~'))
if not self.win32:
return res
return (
res and not self.external.startswith('\\') and
not win32_root_path_pattern.match(self.external)
)
class ServiceLink(namedtuple('_ServiceLink', 'target alias')):
@classmethod
def parse(cls, link_spec):
target, _, alias = link_spec.partition(':')
if not alias:
alias = target
return cls(target, alias)
def repr(self):
if self.target == self.alias:
return self.target
return '{s.target}:{s.alias}'.format(s=self)
@property
def merge_field(self):
return self.alias
class ServiceConfigBase(namedtuple('_ServiceConfigBase', 'source target uid gid mode name')):
@classmethod
def parse(cls, spec):
if isinstance(spec, str):
return cls(spec, None, None, None, None, None)
return cls(
spec.get('source'),
spec.get('target'),
spec.get('uid'),
spec.get('gid'),
spec.get('mode'),
spec.get('name')
)
@property
def merge_field(self):
return self.source
def repr(self):
return {
k: v for k, v in zip(self._fields, self) if v is not None
}
class ServiceSecret(ServiceConfigBase):
pass
class ServiceConfig(ServiceConfigBase):
pass
class ServicePort(namedtuple('_ServicePort', 'target published protocol mode external_ip')):
def __new__(cls, target, published, *args, **kwargs):
try:
if target:
target = int(target)
except ValueError:
raise ConfigurationError('Invalid target port: {}'.format(target))
if published:
if isinstance(published, str) and '-' in published: # "x-y:z" format
a, b = published.split('-', 1)
if not a.isdigit() or not b.isdigit():
raise ConfigurationError('Invalid published port: {}'.format(published))
else:
try:
published = int(published)
except ValueError:
raise ConfigurationError('Invalid published port: {}'.format(published))
return super().__new__(
cls, target, published, *args, **kwargs
)
@classmethod
def parse(cls, spec):
if isinstance(spec, cls):
# When extending a service with ports, the port definitions have already been parsed
return [spec]
if not isinstance(spec, dict):
result = []
try:
for k, v in build_port_bindings([spec]).items():
if '/' in k:
target, proto = k.split('/', 1)
else:
target, proto = (k, None)
for pub in v:
if pub is None:
result.append(
cls(target, None, proto, None, None)
)
elif isinstance(pub, tuple):
result.append(
cls(target, pub[1], proto, None, pub[0])
)
else:
result.append(
cls(target, pub, proto, None, None)
)
except ValueError as e:
raise ConfigurationError(str(e))
return result
return [cls(
spec.get('target'),
spec.get('published'),
spec.get('protocol'),
spec.get('mode'),
None
)]
@property
def merge_field(self):
return (self.target, self.published, self.external_ip, self.protocol)
def repr(self):
return {
k: v for k, v in zip(self._fields, self) if v is not None
}
def legacy_repr(self):
return normalize_port_dict(self.repr())
class GenericResource(namedtuple('_GenericResource', 'kind value')):
@classmethod
def parse(cls, dct):
if 'discrete_resource_spec' not in dct:
raise ConfigurationError(
'generic_resource entry must include a discrete_resource_spec key'
)
if 'kind' not in dct['discrete_resource_spec']:
raise ConfigurationError(
'generic_resource entry must include a discrete_resource_spec.kind subkey'
)
return cls(
dct['discrete_resource_spec']['kind'],
dct['discrete_resource_spec'].get('value')
)
def repr(self):
return {
'discrete_resource_spec': {
'kind': self.kind,
'value': self.value,
}
}
@property
def merge_field(self):
return self.kind
def normalize_port_dict(port):
return '{external_ip}{has_ext_ip}{published}{is_pub}{target}/{protocol}'.format(
published=port.get('published', ''),
is_pub=(':' if port.get('published') is not None or port.get('external_ip') else ''),
target=port.get('target'),
protocol=port.get('protocol', 'tcp'),
external_ip=port.get('external_ip', ''),
has_ext_ip=(':' if port.get('external_ip') else ''),
)
class SecurityOpt(namedtuple('_SecurityOpt', 'value src_file')):
@classmethod
def parse(cls, value):
if not isinstance(value, str):
return value
# based on https://github.com/docker/cli/blob/9de1b162f/cli/command/container/opts.go#L673-L697
con = value.split('=', 2)
if len(con) == 1 and con[0] != 'no-new-privileges':
if ':' not in value:
raise ConfigurationError('Invalid security_opt: {}'.format(value))
con = value.split(':', 2)
if con[0] == 'seccomp' and con[1] != 'unconfined':
try:
with open(unquote_path(con[1])) as f:
seccomp_data = json.load(f)
except (OSError, ValueError) as e:
raise ConfigurationError('Error reading seccomp profile: {}'.format(e))
return cls(
'seccomp={}'.format(json.dumps(seccomp_data)), con[1]
)
return cls(value, None)
def repr(self):
if self.src_file is not None:
return 'seccomp:{}'.format(self.src_file)
return self.value
@property
def merge_field(self):
return self.value

View File

@@ -0,0 +1,569 @@
import json
import logging
import os
import re
import sys
from docker.utils.ports import split_port
from jsonschema import Draft4Validator
from jsonschema import FormatChecker
from jsonschema import RefResolver
from jsonschema import ValidationError
from ..const import COMPOSEFILE_V1 as V1
from ..const import NANOCPUS_SCALE
from .errors import ConfigurationError
from .errors import VERSION_EXPLANATION
from .sort_services import get_service_name_from_network_mode
log = logging.getLogger(__name__)
DOCKER_CONFIG_HINTS = {
'cpu_share': 'cpu_shares',
'add_host': 'extra_hosts',
'hosts': 'extra_hosts',
'extra_host': 'extra_hosts',
'device': 'devices',
'link': 'links',
'memory_swap': 'memswap_limit',
'port': 'ports',
'privilege': 'privileged',
'priviliged': 'privileged',
'privilige': 'privileged',
'volume': 'volumes',
'workdir': 'working_dir',
}
VALID_NAME_CHARS = r'[a-zA-Z0-9\._\-]'
VALID_EXPOSE_FORMAT = r'^\d+(\-\d+)?(\/[a-zA-Z]+)?$'
VALID_IPV4_SEG = r'(\d{1,2}|1\d{2}|2[0-4]\d|25[0-5])'
VALID_IPV4_ADDR = r"({IPV4_SEG}\.){{3}}{IPV4_SEG}".format(IPV4_SEG=VALID_IPV4_SEG)
VALID_REGEX_IPV4_CIDR = r"^{IPV4_ADDR}/(\d|[1-2]\d|3[0-2])$".format(IPV4_ADDR=VALID_IPV4_ADDR)
VALID_IPV6_SEG = r'[0-9a-fA-F]{1,4}'
VALID_REGEX_IPV6_CIDR = "".join(r"""
^
(
(({IPV6_SEG}:){{7}}{IPV6_SEG})|
(({IPV6_SEG}:){{1,7}}:)|
(({IPV6_SEG}:){{1,6}}(:{IPV6_SEG}){{1,1}})|
(({IPV6_SEG}:){{1,5}}(:{IPV6_SEG}){{1,2}})|
(({IPV6_SEG}:){{1,4}}(:{IPV6_SEG}){{1,3}})|
(({IPV6_SEG}:){{1,3}}(:{IPV6_SEG}){{1,4}})|
(({IPV6_SEG}:){{1,2}}(:{IPV6_SEG}){{1,5}})|
(({IPV6_SEG}:){{1,1}}(:{IPV6_SEG}){{1,6}})|
(:((:{IPV6_SEG}){{1,7}}|:))|
(fe80:(:{IPV6_SEG}){{0,4}}%[0-9a-zA-Z]{{1,}})|
(::(ffff(:0{{1,4}}){{0,1}}:){{0,1}}{IPV4_ADDR})|
(({IPV6_SEG}:){{1,4}}:{IPV4_ADDR})
)
/(\d|[1-9]\d|1[0-1]\d|12[0-8])
$
""".format(IPV6_SEG=VALID_IPV6_SEG, IPV4_ADDR=VALID_IPV4_ADDR).split())
@FormatChecker.cls_checks(format="ports", raises=ValidationError)
def format_ports(instance):
try:
split_port(instance)
except ValueError as e:
raise ValidationError(str(e))
return True
@FormatChecker.cls_checks(format="expose", raises=ValidationError)
def format_expose(instance):
if isinstance(instance, str):
if not re.match(VALID_EXPOSE_FORMAT, instance):
raise ValidationError(
"should be of the format 'PORT[/PROTOCOL]'")
return True
@FormatChecker.cls_checks("subnet_ip_address", raises=ValidationError)
def format_subnet_ip_address(instance):
if isinstance(instance, str):
if not re.match(VALID_REGEX_IPV4_CIDR, instance) and \
not re.match(VALID_REGEX_IPV6_CIDR, instance):
raise ValidationError("should use the CIDR format")
return True
def match_named_volumes(service_dict, project_volumes):
service_volumes = service_dict.get('volumes', [])
for volume_spec in service_volumes:
if volume_spec.is_named_volume and volume_spec.external not in project_volumes:
raise ConfigurationError(
'Named volume "{}" is used in service "{}" but no'
' declaration was found in the volumes section.'.format(
volume_spec.repr(), service_dict.get('name')
)
)
def python_type_to_yaml_type(type_):
type_name = type(type_).__name__
return {
'dict': 'mapping',
'list': 'array',
'int': 'number',
'float': 'number',
'bool': 'boolean',
'unicode': 'string',
'str': 'string',
'bytes': 'string',
}.get(type_name, type_name)
def validate_config_section(filename, config, section):
"""Validate the structure of a configuration section. This must be done
before interpolation so it's separate from schema validation.
"""
if not isinstance(config, dict):
raise ConfigurationError(
"In file '{filename}', {section} must be a mapping, not "
"{type}.".format(
filename=filename,
section=section,
type=anglicize_json_type(python_type_to_yaml_type(config))))
for key, value in config.items():
if not isinstance(key, str):
raise ConfigurationError(
"In file '{filename}', the {section} name {name} must be a "
"quoted string, i.e. '{name}'.".format(
filename=filename,
section=section,
name=key))
if not isinstance(value, (dict, type(None))):
raise ConfigurationError(
"In file '{filename}', {section} '{name}' must be a mapping not "
"{type}.".format(
filename=filename,
section=section,
name=key,
type=anglicize_json_type(python_type_to_yaml_type(value))))
def validate_top_level_object(config_file):
if not isinstance(config_file.config, dict):
raise ConfigurationError(
"Top level object in '{}' needs to be an object not '{}'.".format(
config_file.filename,
type(config_file.config)))
def validate_ulimits(service_config):
ulimit_config = service_config.config.get('ulimits', {})
for limit_name, soft_hard_values in ulimit_config.items():
if isinstance(soft_hard_values, dict):
if not soft_hard_values['soft'] <= soft_hard_values['hard']:
raise ConfigurationError(
"Service '{s.name}' has invalid ulimit '{ulimit}'. "
"'soft' value can not be greater than 'hard' value ".format(
s=service_config,
ulimit=ulimit_config))
def validate_extends_file_path(service_name, extends_options, filename):
"""
The service to be extended must either be defined in the config key 'file',
or within 'filename'.
"""
error_prefix = "Invalid 'extends' configuration for %s:" % service_name
if 'file' not in extends_options and filename is None:
raise ConfigurationError(
"%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix
)
def validate_network_mode(service_config, service_names):
network_mode = service_config.config.get('network_mode')
if not network_mode:
return
if 'networks' in service_config.config:
raise ConfigurationError("'network_mode' and 'networks' cannot be combined")
dependency = get_service_name_from_network_mode(network_mode)
if not dependency:
return
if dependency not in service_names:
raise ConfigurationError(
"Service '{s.name}' uses the network stack of service '{dep}' which "
"is undefined.".format(s=service_config, dep=dependency))
def validate_pid_mode(service_config, service_names):
pid_mode = service_config.config.get('pid')
if not pid_mode:
return
dependency = get_service_name_from_network_mode(pid_mode)
if not dependency:
return
if dependency not in service_names:
raise ConfigurationError(
"Service '{s.name}' uses the PID namespace of service '{dep}' which "
"is undefined.".format(s=service_config, dep=dependency)
)
def validate_ipc_mode(service_config, service_names):
ipc_mode = service_config.config.get('ipc')
if not ipc_mode:
return
dependency = get_service_name_from_network_mode(ipc_mode)
if not dependency:
return
if dependency not in service_names:
raise ConfigurationError(
"Service '{s.name}' uses the IPC namespace of service '{dep}' which "
"is undefined.".format(s=service_config, dep=dependency)
)
def validate_links(service_config, service_names):
for link in service_config.config.get('links', []):
if link.split(':')[0] not in service_names:
raise ConfigurationError(
"Service '{s.name}' has a link to service '{link}' which is "
"undefined.".format(s=service_config, link=link))
def validate_depends_on(service_config, service_names):
deps = service_config.config.get('depends_on', {})
for dependency in deps.keys():
if dependency not in service_names:
raise ConfigurationError(
"Service '{s.name}' depends on service '{dep}' which is "
"undefined.".format(s=service_config, dep=dependency)
)
def validate_credential_spec(service_config):
credential_spec = service_config.config.get('credential_spec')
if not credential_spec:
return
if 'registry' not in credential_spec and 'file' not in credential_spec:
raise ConfigurationError(
"Service '{s.name}' is missing 'credential_spec.file' or "
"credential_spec.registry'".format(s=service_config)
)
def get_unsupported_config_msg(path, error_key):
msg = "Unsupported config option for {}: '{}'".format(path_string(path), error_key)
if error_key in DOCKER_CONFIG_HINTS:
msg += " (did you mean '{}'?)".format(DOCKER_CONFIG_HINTS[error_key])
return msg
def anglicize_json_type(json_type):
if json_type.startswith(('a', 'e', 'i', 'o', 'u')):
return 'an ' + json_type
return 'a ' + json_type
def is_service_dict_schema(schema_id):
return schema_id in ('config_schema_v1.json', '#/properties/services')
def handle_error_for_schema_with_id(error, path):
schema_id = error.schema['id']
if is_service_dict_schema(schema_id) and error.validator == 'additionalProperties':
return "Invalid service name '{}' - only {} characters are allowed".format(
# The service_name is one of the keys in the json object
[i for i in list(error.instance) if not i or any(filter(
lambda c: not re.match(VALID_NAME_CHARS, c), i
))][0],
VALID_NAME_CHARS
)
if error.validator == 'additionalProperties':
if schema_id == '#/definitions/service':
invalid_config_key = parse_key_from_error_msg(error)
return get_unsupported_config_msg(path, invalid_config_key)
if schema_id.startswith('config_schema_'):
invalid_config_key = parse_key_from_error_msg(error)
return ('Invalid top-level property "{key}". Valid top-level '
'sections for this Compose file are: {properties}, and '
'extensions starting with "x-".\n\n{explanation}').format(
key=invalid_config_key,
properties=', '.join(error.schema['properties'].keys()),
explanation=VERSION_EXPLANATION
)
if not error.path:
return '{}\n\n{}'.format(error.message, VERSION_EXPLANATION)
def handle_generic_error(error, path):
msg_format = None
error_msg = error.message
if error.validator == 'oneOf':
msg_format = "{path} {msg}"
config_key, error_msg = _parse_oneof_validator(error)
if config_key:
path.append(config_key)
elif error.validator == 'type':
msg_format = "{path} contains an invalid type, it should be {msg}"
error_msg = _parse_valid_types_from_validator(error.validator_value)
elif error.validator == 'required':
error_msg = ", ".join(error.validator_value)
msg_format = "{path} is invalid, {msg} is required."
elif error.validator == 'dependencies':
config_key = list(error.validator_value.keys())[0]
required_keys = ",".join(error.validator_value[config_key])
msg_format = "{path} is invalid: {msg}"
path.append(config_key)
error_msg = "when defining '{}' you must set '{}' as well".format(
config_key,
required_keys)
elif error.cause:
error_msg = str(error.cause)
msg_format = "{path} is invalid: {msg}"
elif error.path:
msg_format = "{path} value {msg}"
if msg_format:
return msg_format.format(path=path_string(path), msg=error_msg)
return error.message
def parse_key_from_error_msg(error):
try:
return error.message.split("'")[1]
except IndexError:
return error.message.split('(')[1].split(' ')[0].strip("'")
def path_string(path):
return ".".join(c for c in path if isinstance(c, str))
def _parse_valid_types_from_validator(validator):
"""A validator value can be either an array of valid types or a string of
a valid type. Parse the valid types and prefix with the correct article.
"""
if not isinstance(validator, list):
return anglicize_json_type(validator)
if len(validator) == 1:
return anglicize_json_type(validator[0])
return "{}, or {}".format(
", ".join([anglicize_json_type(validator[0])] + validator[1:-1]),
anglicize_json_type(validator[-1]))
def _parse_oneof_validator(error):
"""oneOf has multiple schemas, so we need to reason about which schema, sub
schema or constraint the validation is failing on.
Inspecting the context value of a ValidationError gives us information about
which sub schema failed and which kind of error it is.
"""
types = []
for context in error.context:
if context.validator == 'oneOf':
_, error_msg = _parse_oneof_validator(context)
return path_string(context.path), error_msg
if context.validator == 'required':
return (None, context.message)
if context.validator == 'additionalProperties':
invalid_config_key = parse_key_from_error_msg(context)
return (None, "contains unsupported option: '{}'".format(invalid_config_key))
if context.validator == 'uniqueItems':
return (
path_string(context.path) if context.path else None,
"contains non-unique items, please remove duplicates from {}".format(
context.instance),
)
if context.path:
return (
path_string(context.path),
"contains {}, which is an invalid type, it should be {}".format(
json.dumps(context.instance),
_parse_valid_types_from_validator(context.validator_value)),
)
if context.validator == 'type':
types.append(context.validator_value)
valid_types = _parse_valid_types_from_validator(types)
return (None, "contains an invalid type, it should be {}".format(valid_types))
def process_service_constraint_errors(error, service_name, version):
if version == V1:
if 'image' in error.instance and 'build' in error.instance:
return (
"Service {} has both an image and build path specified. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
if 'image' in error.instance and 'dockerfile' in error.instance:
return (
"Service {} has both an image and alternate Dockerfile. "
"A service can either be built to image or use an existing "
"image, not both.".format(service_name))
if 'image' not in error.instance and 'build' not in error.instance:
return (
"Service {} has neither an image nor a build context specified. "
"At least one must be provided.".format(service_name))
def process_config_schema_errors(error):
path = list(error.path)
if 'id' in error.schema:
error_msg = handle_error_for_schema_with_id(error, path)
if error_msg:
return error_msg
return handle_generic_error(error, path)
def keys_to_str(config_file):
"""
Non-string keys may break validator with patterned fields.
"""
d = {}
for k, v in config_file.items():
d[str(k)] = v
if isinstance(v, dict):
d[str(k)] = keys_to_str(v)
return d
def validate_against_config_schema(config_file, version):
schema = load_jsonschema(version)
config = keys_to_str(config_file.config)
format_checker = FormatChecker(["ports", "expose", "subnet_ip_address"])
validator = Draft4Validator(
schema,
resolver=RefResolver(get_resolver_path(), schema),
format_checker=format_checker)
handle_errors(
validator.iter_errors(config),
process_config_schema_errors,
config_file.filename)
def validate_service_constraints(config, service_name, config_file):
def handler(errors):
return process_service_constraint_errors(
errors, service_name, config_file.version)
schema = load_jsonschema(config_file.version)
validator = Draft4Validator(schema['definitions']['constraints']['service'])
handle_errors(validator.iter_errors(config), handler, None)
def validate_cpu(service_config):
cpus = service_config.config.get('cpus')
if not cpus:
return
nano_cpus = cpus * NANOCPUS_SCALE
if isinstance(nano_cpus, float) and not nano_cpus.is_integer():
raise ConfigurationError(
"cpus must have nine or less digits after decimal point")
def get_schema_path():
return os.path.dirname(os.path.abspath(__file__))
def load_jsonschema(version):
name = "compose_spec"
if version == V1:
name = "config_schema_v1"
filename = os.path.join(
get_schema_path(),
"{}.json".format(name))
if not os.path.exists(filename):
raise ConfigurationError(
'Version in "{}" is unsupported. {}'
.format(filename, VERSION_EXPLANATION))
with open(filename) as fh:
return json.load(fh)
def get_resolver_path():
schema_path = get_schema_path()
if sys.platform == "win32":
scheme = "///"
# TODO: why is this necessary?
schema_path = schema_path.replace('\\', '/')
else:
scheme = "//"
return "file:{}{}/".format(scheme, schema_path)
def handle_errors(errors, format_error_func, filename):
"""jsonschema returns an error tree full of information to explain what has
gone wrong. Process each error and pull out relevant information and re-write
helpful error messages that are relevant.
"""
errors = sorted(errors, key=str)
if not errors:
return
error_msg = '\n'.join(format_error_func(error) for error in errors)
raise ConfigurationError(
"The Compose file{file_msg} is invalid because:\n{error_msg}".format(
file_msg=" '{}'".format(filename) if filename else "",
error_msg=error_msg))
def validate_healthcheck(service_config):
healthcheck = service_config.config.get('healthcheck', {})
if 'test' in healthcheck and isinstance(healthcheck['test'], list):
if len(healthcheck['test']) == 0:
raise ConfigurationError(
'Service "{}" defines an invalid healthcheck: '
'"test" is an empty list'
.format(service_config.name))
# when disable is true config.py::process_healthcheck adds "test: ['NONE']" to service_config
elif healthcheck['test'][0] == 'NONE' and len(healthcheck) > 1:
raise ConfigurationError(
'Service "{}" defines an invalid healthcheck: '
'"disable: true" cannot be combined with other options'
.format(service_config.name))
elif healthcheck['test'][0] not in ('NONE', 'CMD', 'CMD-SHELL'):
raise ConfigurationError(
'Service "{}" defines an invalid healthcheck: '
'when "test" is a list the first item must be either NONE, CMD or CMD-SHELL'
.format(service_config.name))

View File

@@ -0,0 +1,40 @@
import sys
from .version import ComposeVersion
DEFAULT_TIMEOUT = 10
HTTP_TIMEOUT = 60
IS_WINDOWS_PLATFORM = (sys.platform == "win32")
IS_LINUX_PLATFORM = (sys.platform == "linux")
LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number'
LABEL_ONE_OFF = 'com.docker.compose.oneoff'
LABEL_PROJECT = 'com.docker.compose.project'
LABEL_WORKING_DIR = 'com.docker.compose.project.working_dir'
LABEL_CONFIG_FILES = 'com.docker.compose.project.config_files'
LABEL_ENVIRONMENT_FILE = 'com.docker.compose.project.environment_file'
LABEL_SERVICE = 'com.docker.compose.service'
LABEL_NETWORK = 'com.docker.compose.network'
LABEL_VERSION = 'com.docker.compose.version'
LABEL_SLUG = 'com.docker.compose.slug'
LABEL_VOLUME = 'com.docker.compose.volume'
LABEL_CONFIG_HASH = 'com.docker.compose.config-hash'
NANOCPUS_SCALE = 1000000000
PARALLEL_LIMIT = 64
SECRETS_PATH = '/run/secrets'
WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
COMPOSEFILE_V1 = ComposeVersion('1')
COMPOSE_SPEC = ComposeVersion('3.9')
# minimum DOCKER ENGINE API version needed to support
# features for each compose schema version
API_VERSIONS = {
COMPOSEFILE_V1: '1.21',
COMPOSE_SPEC: '1.38',
}
API_VERSION_TO_ENGINE_VERSION = {
API_VERSIONS[COMPOSEFILE_V1]: '1.9.0',
API_VERSIONS[COMPOSE_SPEC]: '18.06.0',
}

View File

@@ -0,0 +1,322 @@
from functools import reduce
from docker.errors import ImageNotFound
from .const import LABEL_CONTAINER_NUMBER
from .const import LABEL_ONE_OFF
from .const import LABEL_PROJECT
from .const import LABEL_SERVICE
from .const import LABEL_SLUG
from .const import LABEL_VERSION
from .utils import truncate_id
from .version import ComposeVersion
class Container:
"""
Represents a Docker container, constructed from the output of
GET /containers/:id:/json.
"""
def __init__(self, client, dictionary, has_been_inspected=False):
self.client = client
self.dictionary = dictionary
self.has_been_inspected = has_been_inspected
self.log_stream = None
@classmethod
def from_ps(cls, client, dictionary, **kwargs):
"""
Construct a container object from the output of GET /containers/json.
"""
name = get_container_name(dictionary)
if name is None:
return None
new_dictionary = {
'Id': dictionary['Id'],
'Image': dictionary['Image'],
'Name': '/' + name,
}
return cls(client, new_dictionary, **kwargs)
@classmethod
def from_id(cls, client, id):
return cls(client, client.inspect_container(id), has_been_inspected=True)
@classmethod
def create(cls, client, **options):
response = client.create_container(**options)
return cls.from_id(client, response['Id'])
@property
def id(self):
return self.dictionary['Id']
@property
def image(self):
return self.dictionary['Image']
@property
def image_config(self):
return self.client.inspect_image(self.image)
@property
def short_id(self):
return self.id[:12]
@property
def name(self):
return self.dictionary['Name'][1:]
@property
def project(self):
return self.labels.get(LABEL_PROJECT)
@property
def service(self):
return self.labels.get(LABEL_SERVICE)
@property
def name_without_project(self):
if self.name.startswith('{}_{}'.format(self.project, self.service)):
return '{}_{}'.format(self.service, self.number if self.number is not None else self.slug)
else:
return self.name
@property
def number(self):
if self.one_off:
# One-off containers are no longer assigned numbers and use slugs instead.
return None
number = self.labels.get(LABEL_CONTAINER_NUMBER)
if not number:
raise ValueError("Container {} does not have a {} label".format(
self.short_id, LABEL_CONTAINER_NUMBER))
return int(number)
@property
def slug(self):
if not self.full_slug:
return None
return truncate_id(self.full_slug)
@property
def full_slug(self):
return self.labels.get(LABEL_SLUG)
@property
def one_off(self):
return self.labels.get(LABEL_ONE_OFF) == 'True'
@property
def ports(self):
self.inspect_if_not_inspected()
return self.get('NetworkSettings.Ports') or {}
@property
def human_readable_ports(self):
def format_port(private, public):
if not public:
return [private]
return [
'{HostIp}:{HostPort}->{private}'.format(private=private, **pub)
for pub in public
]
return ', '.join(
','.join(format_port(*item))
for item in sorted(self.ports.items())
)
@property
def labels(self):
return self.get('Config.Labels') or {}
@property
def stop_signal(self):
return self.get('Config.StopSignal')
@property
def log_config(self):
return self.get('HostConfig.LogConfig') or None
@property
def human_readable_state(self):
if self.is_paused:
return 'Paused'
if self.is_restarting:
return 'Restarting'
if self.is_running:
return 'Ghost' if self.get('State.Ghost') else self.human_readable_health_status
else:
return 'Exit %s' % self.get('State.ExitCode')
@property
def human_readable_command(self):
entrypoint = self.get('Config.Entrypoint') or []
cmd = self.get('Config.Cmd') or []
return ' '.join(entrypoint + cmd)
@property
def environment(self):
def parse_env(var):
if '=' in var:
return var.split("=", 1)
return var, None
return dict(parse_env(var) for var in self.get('Config.Env') or [])
@property
def exit_code(self):
return self.get('State.ExitCode')
@property
def is_running(self):
return self.get('State.Running')
@property
def is_restarting(self):
return self.get('State.Restarting')
@property
def is_paused(self):
return self.get('State.Paused')
@property
def log_driver(self):
return self.get('HostConfig.LogConfig.Type')
@property
def human_readable_health_status(self):
""" Generate UP status string with up time and health
"""
status_string = 'Up'
container_status = self.get('State.Health.Status')
if container_status == 'starting':
status_string += ' (health: starting)'
elif container_status is not None:
status_string += ' (%s)' % container_status
return status_string
def attach_log_stream(self):
self.log_stream = self.attach(stdout=True, stderr=True, stream=True)
def get(self, key):
"""Return a value from the container or None if the value is not set.
:param key: a string using dotted notation for nested dictionary
lookups
"""
self.inspect_if_not_inspected()
def get_value(dictionary, key):
return (dictionary or {}).get(key)
return reduce(get_value, key.split('.'), self.dictionary)
def get_local_port(self, port, protocol='tcp'):
port = self.ports.get("{}/{}".format(port, protocol))
return "{HostIp}:{HostPort}".format(**port[0]) if port else None
def get_mount(self, mount_dest):
for mount in self.get('Mounts'):
if mount['Destination'] == mount_dest:
return mount
return None
def start(self, **options):
return self.client.start(self.id, **options)
def stop(self, **options):
return self.client.stop(self.id, **options)
def pause(self, **options):
return self.client.pause(self.id, **options)
def unpause(self, **options):
return self.client.unpause(self.id, **options)
def kill(self, **options):
return self.client.kill(self.id, **options)
def restart(self, **options):
return self.client.restart(self.id, **options)
def remove(self, **options):
return self.client.remove_container(self.id, **options)
def create_exec(self, command, **options):
return self.client.exec_create(self.id, command, **options)
def start_exec(self, exec_id, **options):
return self.client.exec_start(exec_id, **options)
def rename_to_tmp_name(self):
"""Rename the container to a hopefully unique temporary container name
by prepending the short id.
"""
if not self.name.startswith(self.short_id):
self.client.rename(
self.id, '{}_{}'.format(self.short_id, self.name)
)
def inspect_if_not_inspected(self):
if not self.has_been_inspected:
self.inspect()
def wait(self):
return self.client.wait(self.id).get('StatusCode', 127)
def logs(self, *args, **kwargs):
return self.client.logs(self.id, *args, **kwargs)
def inspect(self):
self.dictionary = self.client.inspect_container(self.id)
self.has_been_inspected = True
return self.dictionary
def image_exists(self):
try:
self.client.inspect_image(self.image)
except ImageNotFound:
return False
return True
def reset_image(self, img_id):
""" If this container's image has been removed, temporarily replace the old image ID
with `img_id`.
"""
if not self.image_exists():
self.dictionary['Image'] = img_id
def attach(self, *args, **kwargs):
return self.client.attach(self.id, *args, **kwargs)
def has_legacy_proj_name(self, project_name):
return (
ComposeVersion(self.labels.get(LABEL_VERSION)) < ComposeVersion('1.21.0') and
self.project != project_name
)
def __repr__(self):
return '<Container: {} ({})>'.format(self.name, self.id[:6])
def __eq__(self, other):
if type(self) != type(other):
return False
return self.id == other.id
def __hash__(self):
return self.id.__hash__()
def get_container_name(container):
if not container.get('Name') and not container.get('Names'):
return None
# inspect
if 'Name' in container:
return container['Name']
# ps
shortest_name = min(container['Names'], key=lambda n: len(n.split('/')))
return shortest_name.split('/')[-1]

View File

@@ -0,0 +1,34 @@
class OperationFailedError(Exception):
def __init__(self, reason):
self.msg = reason
class StreamParseError(RuntimeError):
def __init__(self, reason):
self.msg = reason
class HealthCheckException(Exception):
def __init__(self, reason):
self.msg = reason
class HealthCheckFailed(HealthCheckException):
def __init__(self, container_id):
super().__init__(
'Container "{}" is unhealthy.'.format(container_id)
)
class NoHealthCheckConfigured(HealthCheckException):
def __init__(self, service_name):
super().__init__(
'Service "{}" is missing a healthcheck configuration'.format(
service_name
)
)
class CompletedUnsuccessfully(Exception):
def __init__(self, container_id, exit_code):
self.msg = 'Container "{}" exited with code {}.'.format(container_id, exit_code)

View File

@@ -0,0 +1,64 @@
import os
from enum import Enum
import requests
from docker import ContextAPI
from docker.transport import UnixHTTPAdapter
from compose.const import IS_WINDOWS_PLATFORM
if IS_WINDOWS_PLATFORM:
from docker.transport import NpipeHTTPAdapter
class Status(Enum):
SUCCESS = "success"
FAILURE = "failure"
CANCELED = "canceled"
class MetricsSource:
CLI = "docker-compose"
if IS_WINDOWS_PLATFORM:
METRICS_SOCKET_FILE = 'npipe://\\\\.\\pipe\\docker_cli'
else:
METRICS_SOCKET_FILE = 'http+unix:///var/run/docker-cli.sock'
class MetricsCommand(requests.Session):
"""
Representation of a command in the metrics.
"""
def __init__(self, command,
context_type=None, status=Status.SUCCESS,
source=MetricsSource.CLI, uri=None):
super().__init__()
self.command = ("compose " + command).strip() if command else "compose --help"
self.context = context_type or ContextAPI.get_current_context().context_type or 'moby'
self.source = source
self.status = status.value
self.uri = uri or os.environ.get("METRICS_SOCKET_FILE", METRICS_SOCKET_FILE)
if IS_WINDOWS_PLATFORM:
self.mount("http+unix://", NpipeHTTPAdapter(self.uri))
else:
self.mount("http+unix://", UnixHTTPAdapter(self.uri))
def send_metrics(self):
try:
return self.post("http+unix://localhost/usage",
json=self.to_map(),
timeout=.05,
headers={'Content-Type': 'application/json'})
except Exception as e:
return e
def to_map(self):
return {
'command': self.command,
'context': self.context,
'source': self.source,
'status': self.status,
}

View File

@@ -0,0 +1,21 @@
import functools
from compose.metrics.client import MetricsCommand
from compose.metrics.client import Status
class metrics:
def __init__(self, command_name=None):
self.command_name = command_name
def __call__(self, fn):
@functools.wraps(fn,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES)
def wrapper(*args, **kwargs):
if not self.command_name:
self.command_name = fn.__name__
result = fn(*args, **kwargs)
MetricsCommand(self.command_name, status=Status.SUCCESS).send_metrics()
return result
return wrapper

View File

@@ -0,0 +1,332 @@
import logging
import re
from collections import OrderedDict
from operator import itemgetter
from docker.errors import NotFound
from docker.types import IPAMConfig
from docker.types import IPAMPool
from docker.utils import version_gte
from docker.utils import version_lt
from . import __version__
from .config import ConfigurationError
from .const import LABEL_NETWORK
from .const import LABEL_PROJECT
from .const import LABEL_VERSION
log = logging.getLogger(__name__)
OPTS_EXCEPTIONS = [
'com.docker.network.driver.overlay.vxlanid_list',
'com.docker.network.windowsshim.hnsid',
'com.docker.network.windowsshim.networkname'
]
class Network:
def __init__(self, client, project, name, driver=None, driver_opts=None,
ipam=None, external=False, internal=False, enable_ipv6=False,
labels=None, custom_name=False):
self.client = client
self.project = project
self.name = name
self.driver = driver
self.driver_opts = driver_opts
self.ipam = create_ipam_config_from_dict(ipam)
self.external = external
self.internal = internal
self.enable_ipv6 = enable_ipv6
self.labels = labels
self.custom_name = custom_name
self.legacy = None
def ensure(self):
if self.external:
if self.driver == 'overlay':
# Swarm nodes do not register overlay networks that were
# created on a different node unless they're in use.
# See docker/compose#4399
return
try:
self.inspect()
log.debug(
'Network {} declared as external. No new '
'network will be created.'.format(self.name)
)
except NotFound:
raise ConfigurationError(
'Network {name} declared as external, but could'
' not be found. Please create the network manually'
' using `{command} {name}` and try again.'.format(
name=self.full_name,
command='docker network create'
)
)
return
self._set_legacy_flag()
try:
data = self.inspect(legacy=self.legacy)
check_remote_network_config(data, self)
except NotFound:
driver_name = 'the default driver'
if self.driver:
driver_name = 'driver "{}"'.format(self.driver)
log.info(
'Creating network "{}" with {}'.format(self.full_name, driver_name)
)
self.client.create_network(
name=self.full_name,
driver=self.driver,
options=self.driver_opts,
ipam=self.ipam,
internal=self.internal,
enable_ipv6=self.enable_ipv6,
labels=self._labels,
attachable=version_gte(self.client._version, '1.24') or None,
check_duplicate=True,
)
def remove(self):
if self.external:
log.info("Network %s is external, skipping", self.true_name)
return
log.info("Removing network {}".format(self.true_name))
self.client.remove_network(self.true_name)
def inspect(self, legacy=False):
if legacy:
return self.client.inspect_network(self.legacy_full_name)
return self.client.inspect_network(self.full_name)
@property
def legacy_full_name(self):
if self.custom_name:
return self.name
return '{}_{}'.format(
re.sub(r'[_-]', '', self.project), self.name
)
@property
def full_name(self):
if self.custom_name:
return self.name
return '{}_{}'.format(self.project, self.name)
@property
def true_name(self):
self._set_legacy_flag()
if self.legacy:
return self.legacy_full_name
return self.full_name
@property
def _labels(self):
if version_lt(self.client._version, '1.23'):
return None
labels = self.labels.copy() if self.labels else {}
labels.update({
LABEL_PROJECT: self.project,
LABEL_NETWORK: self.name,
LABEL_VERSION: __version__,
})
return labels
def _set_legacy_flag(self):
if self.legacy is not None:
return
try:
data = self.inspect(legacy=True)
self.legacy = data is not None
except NotFound:
self.legacy = False
def create_ipam_config_from_dict(ipam_dict):
if not ipam_dict:
return None
return IPAMConfig(
driver=ipam_dict.get('driver') or 'default',
pool_configs=[
IPAMPool(
subnet=config.get('subnet'),
iprange=config.get('ip_range'),
gateway=config.get('gateway'),
aux_addresses=config.get('aux_addresses'),
)
for config in ipam_dict.get('config', [])
],
options=ipam_dict.get('options')
)
class NetworkConfigChangedError(ConfigurationError):
def __init__(self, net_name, property_name):
super().__init__(
'Network "{}" needs to be recreated - {} has changed'.format(
net_name, property_name
)
)
def check_remote_ipam_config(remote, local):
remote_ipam = remote.get('IPAM')
ipam_dict = create_ipam_config_from_dict(local.ipam)
if local.ipam.get('driver') and local.ipam.get('driver') != remote_ipam.get('Driver'):
raise NetworkConfigChangedError(local.true_name, 'IPAM driver')
if len(ipam_dict['Config']) != 0:
if len(ipam_dict['Config']) != len(remote_ipam['Config']):
raise NetworkConfigChangedError(local.true_name, 'IPAM configs')
remote_configs = sorted(remote_ipam['Config'], key='Subnet')
local_configs = sorted(ipam_dict['Config'], key='Subnet')
while local_configs:
lc = local_configs.pop()
rc = remote_configs.pop()
if lc.get('Subnet') != rc.get('Subnet'):
raise NetworkConfigChangedError(local.true_name, 'IPAM config subnet')
if lc.get('Gateway') is not None and lc.get('Gateway') != rc.get('Gateway'):
raise NetworkConfigChangedError(local.true_name, 'IPAM config gateway')
if lc.get('IPRange') != rc.get('IPRange'):
raise NetworkConfigChangedError(local.true_name, 'IPAM config ip_range')
if sorted(lc.get('AuxiliaryAddresses')) != sorted(rc.get('AuxiliaryAddresses')):
raise NetworkConfigChangedError(local.true_name, 'IPAM config aux_addresses')
remote_opts = remote_ipam.get('Options') or {}
local_opts = local.ipam.get('Options') or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if remote_opts.get(k) != local_opts.get(k):
raise NetworkConfigChangedError(local.true_name, 'IPAM option "{}"'.format(k))
def check_remote_network_config(remote, local):
if local.driver and remote.get('Driver') != local.driver:
raise NetworkConfigChangedError(local.true_name, 'driver')
local_opts = local.driver_opts or {}
remote_opts = remote.get('Options') or {}
for k in set.union(set(remote_opts.keys()), set(local_opts.keys())):
if k in OPTS_EXCEPTIONS:
continue
if remote_opts.get(k) != local_opts.get(k):
raise NetworkConfigChangedError(local.true_name, 'option "{}"'.format(k))
if local.ipam is not None:
check_remote_ipam_config(remote, local)
if local.internal is not None and local.internal != remote.get('Internal', False):
raise NetworkConfigChangedError(local.true_name, 'internal')
if local.enable_ipv6 is not None and local.enable_ipv6 != remote.get('EnableIPv6', False):
raise NetworkConfigChangedError(local.true_name, 'enable_ipv6')
local_labels = local.labels or {}
remote_labels = remote.get('Labels') or {}
for k in set.union(set(remote_labels.keys()), set(local_labels.keys())):
if k.startswith('com.docker.'): # We are only interested in user-specified labels
continue
if remote_labels.get(k) != local_labels.get(k):
log.warning(
'Network {}: label "{}" has changed. It may need to be'
' recreated.'.format(local.true_name, k)
)
def build_networks(name, config_data, client):
network_config = config_data.networks or {}
networks = {
network_name: Network(
client=client, project=name,
name=data.get('name', network_name),
driver=data.get('driver'),
driver_opts=data.get('driver_opts'),
ipam=data.get('ipam'),
external=bool(data.get('external', False)),
internal=data.get('internal'),
enable_ipv6=data.get('enable_ipv6'),
labels=data.get('labels'),
custom_name=data.get('name') is not None,
)
for network_name, data in network_config.items()
}
if 'default' not in networks:
networks['default'] = Network(client, name, 'default')
return networks
class ProjectNetworks:
def __init__(self, networks, use_networking):
self.networks = networks or {}
self.use_networking = use_networking
@classmethod
def from_services(cls, services, networks, use_networking):
service_networks = {
network: networks.get(network)
for service in services
for network in get_network_names_for_service(service)
}
unused = set(networks) - set(service_networks) - {'default'}
if unused:
log.warning(
"Some networks were defined but are not used by any service: "
"{}".format(", ".join(unused)))
return cls(service_networks, use_networking)
def remove(self):
if not self.use_networking:
return
for network in self.networks.values():
try:
network.remove()
except NotFound:
log.warning("Network %s not found.", network.true_name)
def initialize(self):
if not self.use_networking:
return
for network in self.networks.values():
network.ensure()
def get_network_defs_for_service(service_dict):
if 'network_mode' in service_dict:
return {}
networks = service_dict.get('networks', {'default': None})
return {
net: (config or {})
for net, config in networks.items()
}
def get_network_names_for_service(service_dict):
return get_network_defs_for_service(service_dict).keys()
def get_networks(service_dict, network_definitions):
networks = {}
for name, netdef in get_network_defs_for_service(service_dict).items():
network = network_definitions.get(name)
if network:
networks[network.true_name] = netdef
else:
raise ConfigurationError(
'Service "{}" uses an undefined network "{}"'
.format(service_dict['name'], name))
if any([v.get('priority') for v in networks.values()]):
return OrderedDict(sorted(
networks.items(),
key=lambda t: t[1].get('priority') or 0, reverse=True
))
else:
# Ensure Compose will pick a consistent primary network if no
# priority is set
return OrderedDict(sorted(networks.items(), key=itemgetter(0)))

View File

@@ -0,0 +1,366 @@
import _thread as thread
import logging
import operator
import sys
from queue import Empty
from queue import Queue
from threading import Lock
from threading import Semaphore
from threading import Thread
from docker.errors import APIError
from docker.errors import ImageNotFound
from compose.cli.colors import AnsiMode
from compose.cli.colors import green
from compose.cli.colors import red
from compose.cli.signals import ShutdownException
from compose.const import PARALLEL_LIMIT
from compose.errors import CompletedUnsuccessfully
from compose.errors import HealthCheckFailed
from compose.errors import NoHealthCheckConfigured
from compose.errors import OperationFailedError
log = logging.getLogger(__name__)
STOP = object()
class GlobalLimit:
"""Simple class to hold a global semaphore limiter for a project. This class
should be treated as a singleton that is instantiated when the project is.
"""
global_limiter = Semaphore(PARALLEL_LIMIT)
@classmethod
def set_global_limit(cls, value):
if value is None:
value = PARALLEL_LIMIT
cls.global_limiter = Semaphore(value)
def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check):
""" Watch events from a parallel execution, update status and fill errors and results.
Returns exception to re-raise.
"""
error_to_reraise = None
for obj, result, exception in events:
if exception is None:
if fail_check is not None and fail_check(obj):
writer.write(msg, get_name(obj), 'failed', red)
else:
writer.write(msg, get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, ImageNotFound):
# This is to bubble up ImageNotFound exceptions to the client so we
# can prompt the user if they want to rebuild.
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
error_to_reraise = exception
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured,
CompletedUnsuccessfully)):
errors[get_name(obj)] = exception.msg
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, UpstreamError):
writer.write(msg, get_name(obj), 'error', red)
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
return error_to_reraise
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
fail_check is an additional failure check for cases that should display as a failure
in the CLI logs, but don't raise an exception (such as attempting to start 0 containers)
"""
objects = list(objects)
stream = sys.stderr
writer = ParallelStreamWriter.get_or_assign_instance(ParallelStreamWriter(stream))
for obj in objects:
writer.add_object(msg, get_name(obj))
for obj in objects:
writer.write_initial(msg, get_name(obj))
events = parallel_execute_iter(objects, func, get_deps, limit)
errors = {}
results = []
error_to_reraise = parallel_execute_watch(
events, writer, errors, results, msg, get_name, fail_check
)
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
if error_to_reraise:
raise error_to_reraise
return results, errors
def _no_deps(x):
return []
class State:
"""
Holds the state of a partially-complete parallel operation.
state.started: objects being processed
state.finished: objects which have been processed
state.failed: objects which either failed or whose dependencies failed
"""
def __init__(self, objects):
self.objects = objects
self.started = set()
self.finished = set()
self.failed = set()
def is_done(self):
return len(self.finished) + len(self.failed) >= len(self.objects)
def pending(self):
return set(self.objects) - self.started - self.finished - self.failed
class NoLimit:
def __enter__(self):
pass
def __exit__(self, *ex):
pass
def parallel_execute_iter(objects, func, get_deps, limit):
"""
Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
Returns an iterator of tuples which look like:
# if func returned normally when run on object
(object, result, None)
# if func raised an exception when run on object
(object, None, exception)
# if func raised an exception when run on one of object's dependencies
(object, None, UpstreamError())
"""
if get_deps is None:
get_deps = _no_deps
if limit is None:
limiter = NoLimit()
else:
limiter = Semaphore(limit)
results = Queue()
state = State(objects)
while True:
feed_queue(objects, func, get_deps, results, state, limiter)
try:
event = results.get(timeout=0.1)
except Empty:
continue
# See https://github.com/docker/compose/issues/189
except thread.error:
raise ShutdownException()
if event is STOP:
break
obj, _, exception = event
if exception is None:
log.debug('Finished processing: {}'.format(obj))
state.finished.add(obj)
else:
log.debug('Failed: {}'.format(obj))
state.failed.add(obj)
yield event
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter, GlobalLimit.global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
def feed_queue(objects, func, get_deps, results, state, limiter):
"""
Starts producer threads for any objects which are ready to be processed
(i.e. they have no dependencies which haven't been successfully processed).
Shortcuts any objects whose dependencies have failed and places an
(object, None, UpstreamError()) tuple on the results queue.
"""
pending = state.pending()
log.debug('Pending: {}'.format(pending))
for obj in pending:
deps = get_deps(obj)
try:
if any(dep[0] in state.failed for dep in deps):
log.debug('{} has upstream errors - not processing'.format(obj))
results.put((obj, None, UpstreamError()))
state.failed.add(obj)
elif all(
dep not in objects or (
dep in state.finished and (not ready_check or ready_check(dep))
) for dep, ready_check in deps
):
log.debug('Starting producer thread for {}'.format(obj))
t = Thread(target=producer, args=(obj, func, results, limiter))
t.daemon = True
t.start()
state.started.add(obj)
except (HealthCheckFailed, NoHealthCheckConfigured) as e:
log.debug(
'Healthcheck for service(s) upstream of {} failed - '
'not processing'.format(obj)
)
results.put((obj, None, e))
except CompletedUnsuccessfully as e:
log.debug(
'Service(s) upstream of {} did not completed successfully - '
'not processing'.format(obj)
)
results.put((obj, None, e))
if state.is_done():
results.put(STOP)
class UpstreamError(Exception):
pass
class ParallelStreamWriter:
"""Write out messages for operations happening in parallel.
Each operation has its own line, and ANSI code characters are used
to jump to the correct line, and write over the line.
"""
default_ansi_mode = AnsiMode.AUTO
write_lock = Lock()
instance = None
instance_lock = Lock()
@classmethod
def get_instance(cls):
return cls.instance
@classmethod
def get_or_assign_instance(cls, writer):
cls.instance_lock.acquire()
try:
if cls.instance is None:
cls.instance = writer
return cls.instance
finally:
cls.instance_lock.release()
@classmethod
def set_default_ansi_mode(cls, ansi_mode):
cls.default_ansi_mode = ansi_mode
def __init__(self, stream, ansi_mode=None):
if ansi_mode is None:
ansi_mode = self.default_ansi_mode
self.stream = stream
self.use_ansi_codes = ansi_mode.use_ansi_codes(stream)
self.lines = []
self.width = 0
def add_object(self, msg, obj_index):
if msg is None:
return
self.lines.append(msg + obj_index)
self.width = max(self.width, len(msg + ' ' + obj_index))
def write_initial(self, msg, obj_index):
if msg is None:
return
return self._write_noansi(msg, obj_index, '')
def _write_ansi(self, msg, obj_index, status):
self.write_lock.acquire()
position = self.lines.index(msg + obj_index)
diff = len(self.lines) - position
# move up
self.stream.write("%c[%dA" % (27, diff))
# erase
self.stream.write("%c[2K\r" % 27)
self.stream.write("{:<{width}} ... {}\r".format(msg + ' ' + obj_index,
status, width=self.width))
# move back down
self.stream.write("%c[%dB" % (27, diff))
self.stream.flush()
self.write_lock.release()
def _write_noansi(self, msg, obj_index, status):
self.stream.write(
"{:<{width}} ... {}\r\n".format(
msg + ' ' + obj_index, status, width=self.width
)
)
self.stream.flush()
def write(self, msg, obj_index, status, color_func):
if msg is None:
return
if self.use_ansi_codes:
self._write_ansi(msg, obj_index, color_func(status))
else:
self._write_noansi(msg, obj_index, status)
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
operator.methodcaller(operation, **options),
operator.attrgetter('name'),
message,
)
def parallel_remove(containers, options):
stopped_containers = [c for c in containers if not c.is_running]
parallel_operation(stopped_containers, 'remove', options, 'Removing')
def parallel_pause(containers, options):
parallel_operation(containers, 'pause', options, 'Pausing')
def parallel_unpause(containers, options):
parallel_operation(containers, 'unpause', options, 'Unpausing')
def parallel_kill(containers, options):
parallel_operation(containers, 'kill', options, 'Killing')

View File

@@ -0,0 +1,123 @@
from compose import utils
class StreamOutputError(Exception):
pass
def write_to_stream(s, stream):
try:
stream.write(s)
except UnicodeEncodeError:
encoding = getattr(stream, 'encoding', 'ascii')
stream.write(s.encode(encoding, errors='replace').decode(encoding))
def stream_output(output, stream):
is_terminal = hasattr(stream, 'isatty') and stream.isatty()
stream = stream
lines = {}
diff = 0
for event in utils.json_stream(output):
yield event
is_progress_event = 'progress' in event or 'progressDetail' in event
if not is_progress_event:
print_output_event(event, stream, is_terminal)
stream.flush()
continue
if not is_terminal:
continue
# if it's a progress event and we have a terminal, then display the progress bars
image_id = event.get('id')
if not image_id:
continue
if image_id not in lines:
lines[image_id] = len(lines)
write_to_stream("\n", stream)
diff = len(lines) - lines[image_id]
# move cursor up `diff` rows
write_to_stream("%c[%dA" % (27, diff), stream)
print_output_event(event, stream, is_terminal)
if 'id' in event:
# move cursor back down
write_to_stream("%c[%dB" % (27, diff), stream)
stream.flush()
def print_output_event(event, stream, is_terminal):
if 'errorDetail' in event:
raise StreamOutputError(event['errorDetail']['message'])
terminator = ''
if is_terminal and 'stream' not in event:
# erase current line
write_to_stream("%c[2K\r" % 27, stream)
terminator = "\r"
elif 'progressDetail' in event:
return
if 'time' in event:
write_to_stream("[%s] " % event['time'], stream)
if 'id' in event:
write_to_stream("%s: " % event['id'], stream)
if 'from' in event:
write_to_stream("(from %s) " % event['from'], stream)
status = event.get('status', '')
if 'progress' in event:
write_to_stream("{} {}{}".format(status, event['progress'], terminator), stream)
elif 'progressDetail' in event:
detail = event['progressDetail']
total = detail.get('total')
if 'current' in detail and total:
percentage = float(detail['current']) / float(total) * 100
write_to_stream('{} ({:.1f}%){}'.format(status, percentage, terminator), stream)
else:
write_to_stream('{}{}'.format(status, terminator), stream)
elif 'stream' in event:
write_to_stream("{}{}".format(event['stream'], terminator), stream)
else:
write_to_stream("{}{}\n".format(status, terminator), stream)
def get_digest_from_pull(events):
digest = None
for event in events:
status = event.get('status')
if not status or 'Digest' not in status:
continue
else:
digest = status.split(':', 1)[1].strip()
return digest
def get_digest_from_push(events):
for event in events:
digest = event.get('aux', {}).get('Digest')
if digest:
return digest
return None
def read_status(event):
status = event['status'].lower()
if 'progressDetail' in event:
detail = event['progressDetail']
if 'current' in detail and 'total' in detail:
percentage = float(detail['current']) / float(detail['total'])
status = '{} ({:.1%})'.format(status, percentage)
return status

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,92 @@
#!/usr/bin/env python
'''
timeparse.py
(c) Will Roberts <wildwilhelm@gmail.com> 1 February, 2014
This is a vendored and modified copy of:
github.com/wroberts/pytimeparse @ cc0550d
It has been modified to mimic the behaviour of
https://golang.org/pkg/time/#ParseDuration
'''
# MIT LICENSE
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
HOURS = r'(?P<hours>[\d.]+)h'
MINS = r'(?P<mins>[\d.]+)m'
SECS = r'(?P<secs>[\d.]+)s'
MILLI = r'(?P<milli>[\d.]+)ms'
MICRO = r'(?P<micro>[\d.]+)(?:us|µs)'
NANO = r'(?P<nano>[\d.]+)ns'
def opt(x):
return r'(?:{x})?'.format(x=x)
TIMEFORMAT = r'{HOURS}{MINS}{SECS}{MILLI}{MICRO}{NANO}'.format(
HOURS=opt(HOURS),
MINS=opt(MINS),
SECS=opt(SECS),
MILLI=opt(MILLI),
MICRO=opt(MICRO),
NANO=opt(NANO),
)
MULTIPLIERS = {
'hours': 60 * 60,
'mins': 60,
'secs': 1,
'milli': 1.0 / 1000,
'micro': 1.0 / 1000.0 / 1000,
'nano': 1.0 / 1000.0 / 1000.0 / 1000.0,
}
def timeparse(sval):
"""Parse a time expression, returning it as a number of seconds. If
possible, the return value will be an `int`; if this is not
possible, the return will be a `float`. Returns `None` if a time
expression cannot be parsed from the given string.
Arguments:
- `sval`: the string value to parse
>>> timeparse('1m24s')
84
>>> timeparse('1.2 minutes')
72
>>> timeparse('1.2 seconds')
1.2
"""
match = re.match(r'\s*' + TIMEFORMAT + r'\s*$', sval, re.I)
if not match or not match.group(0).strip():
return
mdict = match.groupdict()
return sum(
MULTIPLIERS[k] * cast(v) for (k, v) in mdict.items() if v is not None)
def cast(value):
return int(value) if value.isdigit() else float(value)

View File

@@ -0,0 +1,191 @@
import hashlib
import json.decoder
import logging
import ntpath
import random
from docker.errors import DockerException
from docker.utils import parse_bytes as sdk_parse_bytes
from .errors import StreamParseError
from .timeparse import MULTIPLIERS
from .timeparse import timeparse
json_decoder = json.JSONDecoder()
log = logging.getLogger(__name__)
def stream_as_text(stream):
"""Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once docker-py returns text streams instead
of byte streams.
"""
for data in stream:
if not isinstance(data, str):
data = data.decode('utf-8', 'replace')
yield data
def line_splitter(buffer, separator='\n'):
index = buffer.find(str(separator))
if index == -1:
return None
return buffer[:index + 1], buffer[index + 1:]
def split_buffer(stream, splitter=None, decoder=lambda a: a):
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
splitter = splitter or line_splitter
buffered = ''
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
log.error(
'Compose tried decoding the following data chunk, but failed:'
'\n%s' % repr(buffered)
)
raise StreamParseError(e)
def json_splitter(buffer):
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
return obj, rest
except ValueError:
return None
def json_stream(stream):
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def json_hash(obj):
dump = json.dumps(obj, sort_keys=True, separators=(',', ':'), default=lambda x: x.repr())
h = hashlib.sha256()
h.update(dump.encode('utf8'))
return h.hexdigest()
def microseconds_from_time_nano(time_nano):
return int(time_nano % 1000000000 / 1000)
def nanoseconds_from_time_seconds(time_seconds):
return int(time_seconds / MULTIPLIERS['nano'])
def parse_seconds_float(value):
return timeparse(value or '')
def parse_nanoseconds_int(value):
parsed = timeparse(value or '')
if parsed is None:
return None
return nanoseconds_from_time_seconds(parsed)
def build_string_dict(source_dict):
return {k: str(v if v is not None else '') for k, v in source_dict.items()}
def splitdrive(path):
if len(path) == 0:
return ('', '')
if path[0] in ['.', '\\', '/', '~']:
return ('', path)
return ntpath.splitdrive(path)
def parse_bytes(n):
try:
return sdk_parse_bytes(n)
except DockerException:
return None
def unquote_path(s):
if not s:
return s
if s[0] == '"' and s[-1] == '"':
return s[1:-1]
return s
def generate_random_id():
while True:
val = hex(random.getrandbits(32 * 8))[2:-1]
try:
int(truncate_id(val))
continue
except ValueError:
return val
def truncate_id(value):
if ':' in value:
value = value[value.index(':') + 1:]
if len(value) > 12:
return value[:12]
return value
def unique_everseen(iterable, key=lambda x: x):
"List unique elements, preserving order. Remember all elements ever seen."
seen = set()
for element in iterable:
unique_key = key(element)
if unique_key not in seen:
seen.add(unique_key)
yield element
def truncate_string(s, max_chars=35):
if len(s) > max_chars:
return s[:max_chars - 2] + '...'
return s
def filter_attached_for_up(items, service_names, attach_dependencies=False,
item_to_service_name=lambda x: x):
"""This function contains the logic of choosing which services to
attach when doing docker-compose up. It may be used both with containers
and services, and any other entities that map to service names -
this mapping is provided by item_to_service_name."""
if attach_dependencies or not service_names:
return items
return [
item
for item in items if item_to_service_name(item) in service_names
]

View File

@@ -0,0 +1,7 @@
from distutils.version import LooseVersion
class ComposeVersion(LooseVersion):
""" A hashable version object """
def __hash__(self):
return hash(self.vstring)

View File

@@ -0,0 +1,213 @@
import logging
import re
from itertools import chain
from docker.errors import NotFound
from docker.utils import version_lt
from . import __version__
from .config import ConfigurationError
from .config.types import VolumeSpec
from .const import LABEL_PROJECT
from .const import LABEL_VERSION
from .const import LABEL_VOLUME
log = logging.getLogger(__name__)
class Volume:
def __init__(self, client, project, name, driver=None, driver_opts=None,
external=False, labels=None, custom_name=False):
self.client = client
self.project = project
self.name = name
self.driver = driver
self.driver_opts = driver_opts
self.external = external
self.labels = labels
self.custom_name = custom_name
self.legacy = None
def create(self):
return self.client.create_volume(
self.full_name, self.driver, self.driver_opts, labels=self._labels
)
def remove(self):
if self.external:
log.info("Volume %s is external, skipping", self.true_name)
return
log.info("Removing volume %s", self.true_name)
return self.client.remove_volume(self.true_name)
def inspect(self, legacy=None):
if legacy:
return self.client.inspect_volume(self.legacy_full_name)
return self.client.inspect_volume(self.full_name)
def exists(self):
self._set_legacy_flag()
try:
self.inspect(legacy=self.legacy)
except NotFound:
return False
return True
@property
def full_name(self):
if self.custom_name:
return self.name
return '{}_{}'.format(self.project.lstrip('-_'), self.name)
@property
def legacy_full_name(self):
if self.custom_name:
return self.name
return '{}_{}'.format(
re.sub(r'[_-]', '', self.project), self.name
)
@property
def true_name(self):
self._set_legacy_flag()
if self.legacy:
return self.legacy_full_name
return self.full_name
@property
def _labels(self):
if version_lt(self.client._version, '1.23'):
return None
labels = self.labels.copy() if self.labels else {}
labels.update({
LABEL_PROJECT: self.project,
LABEL_VOLUME: self.name,
LABEL_VERSION: __version__,
})
return labels
def _set_legacy_flag(self):
if self.legacy is not None:
return
try:
data = self.inspect(legacy=True)
self.legacy = data is not None
except NotFound:
self.legacy = False
class ProjectVolumes:
def __init__(self, volumes):
self.volumes = volumes
@classmethod
def from_config(cls, name, config_data, client):
config_volumes = config_data.volumes or {}
volumes = {
vol_name: Volume(
client=client,
project=name,
name=data.get('name', vol_name),
driver=data.get('driver'),
driver_opts=data.get('driver_opts'),
custom_name=data.get('name') is not None,
labels=data.get('labels'),
external=bool(data.get('external', False))
)
for vol_name, data in config_volumes.items()
}
return cls(volumes)
def remove(self):
for volume in self.volumes.values():
try:
volume.remove()
except NotFound:
log.warning("Volume %s not found.", volume.true_name)
def initialize(self):
try:
for volume in self.volumes.values():
volume_exists = volume.exists()
if volume.external:
log.debug(
'Volume {} declared as external. No new '
'volume will be created.'.format(volume.name)
)
if not volume_exists:
raise ConfigurationError(
'Volume {name} declared as external, but could'
' not be found. Please create the volume manually'
' using `{command}{name}` and try again.'.format(
name=volume.full_name,
command='docker volume create --name='
)
)
continue
if not volume_exists:
log.info(
'Creating volume "{}" with {} driver'.format(
volume.full_name, volume.driver or 'default'
)
)
volume.create()
else:
check_remote_volume_config(volume.inspect(legacy=volume.legacy), volume)
except NotFound:
raise ConfigurationError(
'Volume {} specifies nonexistent driver {}'.format(volume.name, volume.driver)
)
def namespace_spec(self, volume_spec):
if not volume_spec.is_named_volume:
return volume_spec
if isinstance(volume_spec, VolumeSpec):
volume = self.volumes[volume_spec.external]
return volume_spec._replace(external=volume.true_name)
else:
volume_spec.source = self.volumes[volume_spec.source].true_name
return volume_spec
class VolumeConfigChangedError(ConfigurationError):
def __init__(self, local, property_name, local_value, remote_value):
super().__init__(
'Configuration for volume {vol_name} specifies {property_name} '
'{local_value}, but a volume with the same name uses a different '
'{property_name} ({remote_value}). If you wish to use the new '
'configuration, please remove the existing volume "{full_name}" '
'first:\n$ docker volume rm {full_name}'.format(
vol_name=local.name, property_name=property_name,
local_value=local_value, remote_value=remote_value,
full_name=local.true_name
)
)
def check_remote_volume_config(remote, local):
if local.driver and remote.get('Driver') != local.driver:
raise VolumeConfigChangedError(local, 'driver', local.driver, remote.get('Driver'))
local_opts = local.driver_opts or {}
remote_opts = remote.get('Options') or {}
for k in set(chain(remote_opts, local_opts)):
if k.startswith('com.docker.'): # These options are set internally
continue
if remote_opts.get(k) != local_opts.get(k):
raise VolumeConfigChangedError(
local, '"{}" driver_opt'.format(k), local_opts.get(k), remote_opts.get(k),
)
local_labels = local.labels or {}
remote_labels = remote.get('Labels') or {}
for k in set(chain(remote_labels, local_labels)):
if k.startswith('com.docker.'): # We are only interested in user-specified labels
continue
if remote_labels.get(k) != local_labels.get(k):
log.warning(
'Volume {}: label "{}" has changed. It may need to be'
' recreated.'.format(local.name, k)
)