Init: mediaserver

This commit is contained in:
2023-02-08 12:13:28 +01:00
parent 848bc9739c
commit f7c23d4ba9
31914 changed files with 6175775 additions and 0 deletions

View File

@@ -0,0 +1,13 @@
# flake8: noqa
from .unixconn import UnixHTTPAdapter
from .ssladapter import SSLHTTPAdapter
try:
from .npipeconn import NpipeHTTPAdapter
from .npipesocket import NpipeSocket
except ImportError:
pass
try:
from .sshconn import SSHHTTPAdapter
except ImportError:
pass

View File

@@ -0,0 +1,8 @@
import requests.adapters
class BaseHTTPAdapter(requests.adapters.HTTPAdapter):
def close(self):
super().close()
if hasattr(self, 'pools'):
self.pools.clear()

View File

@@ -0,0 +1,107 @@
import queue
import requests.adapters
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
from .npipesocket import NpipeSocket
import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class NpipeHTTPConnection(httplib.HTTPConnection):
def __init__(self, npipe_path, timeout=60):
super().__init__(
'localhost', timeout=timeout
)
self.npipe_path = npipe_path
self.timeout = timeout
def connect(self):
sock = NpipeSocket()
sock.settimeout(self.timeout)
sock.connect(self.npipe_path)
self.sock = sock
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, npipe_path, timeout=60, maxsize=10):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.npipe_path = npipe_path
self.timeout = timeout
def _new_conn(self):
return NpipeHTTPConnection(
self.npipe_path, self.timeout
)
# When re-using connections, urllib3 tries to call select() on our
# NpipeSocket instance, causing a crash. To circumvent this, we override
# _get_conn, where that check happens.
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
)
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path',
'pools',
'timeout',
'max_pool_size']
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
self.npipe_path = base_url.replace('npipe://', '')
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = NpipeHTTPConnectionPool(
self.npipe_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url

View File

@@ -0,0 +1,213 @@
import functools
import time
import io
import win32file
import win32pipe
cERROR_PIPE_BUSY = 0xe7
cSECURITY_SQOS_PRESENT = 0x100000
cSECURITY_ANONYMOUS = 0
MAXIMUM_RETRY_COUNT = 10
def check_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self._closed:
raise RuntimeError(
'Can not reuse socket after connection was closed.'
)
return f(self, *args, **kwargs)
return wrapped
class NpipeSocket:
""" Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
def __init__(self, handle=None):
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
self._closed = False
def accept(self):
raise NotImplementedError()
def bind(self, address):
raise NotImplementedError()
def close(self):
self._handle.Close()
self._closed = True
@check_closed
def connect(self, address, retry_count=0):
try:
handle = win32file.CreateFile(
address,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
0
)
except win32pipe.error as e:
# See Remarks:
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
if e.winerror == cERROR_PIPE_BUSY:
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
retry_count = retry_count + 1
if (retry_count < MAXIMUM_RETRY_COUNT):
time.sleep(1)
return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
self._handle = handle
self._address = address
@check_closed
def connect_ex(self, address):
return self.connect(address)
@check_closed
def detach(self):
self._closed = True
return self._handle
@check_closed
def dup(self):
return NpipeSocket(self._handle)
def getpeername(self):
return self._address
def getsockname(self):
return self._address
def getsockopt(self, level, optname, buflen=None):
raise NotImplementedError()
def ioctl(self, control, option):
raise NotImplementedError()
def listen(self, backlog):
raise NotImplementedError()
def makefile(self, mode=None, bufsize=None):
if mode.strip('b') != 'r':
raise NotImplementedError()
rawio = NpipeFileIOBase(self)
if bufsize is None or bufsize <= 0:
bufsize = io.DEFAULT_BUFFER_SIZE
return io.BufferedReader(rawio, buffer_size=bufsize)
@check_closed
def recv(self, bufsize, flags=0):
err, data = win32file.ReadFile(self._handle, bufsize)
return data
@check_closed
def recvfrom(self, bufsize, flags=0):
data = self.recv(bufsize, flags)
return (data, self._address)
@check_closed
def recvfrom_into(self, buf, nbytes=0, flags=0):
return self.recv_into(buf, nbytes, flags), self._address
@check_closed
def recv_into(self, buf, nbytes=0):
readbuf = buf
if not isinstance(buf, memoryview):
readbuf = memoryview(buf)
err, data = win32file.ReadFile(
self._handle,
readbuf[:nbytes] if nbytes else readbuf
)
return len(data)
def _recv_into_py2(self, buf, nbytes):
err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
n = len(data)
buf[:n] = data
return n
@check_closed
def send(self, string, flags=0):
err, nbytes = win32file.WriteFile(self._handle, string)
return nbytes
@check_closed
def sendall(self, string, flags=0):
return self.send(string, flags)
@check_closed
def sendto(self, string, address):
self.connect(address)
return self.send(string)
def setblocking(self, flag):
if flag:
return self.settimeout(None)
return self.settimeout(0)
def settimeout(self, value):
if value is None:
# Blocking mode
self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
elif not isinstance(value, (float, int)) or value < 0:
raise ValueError('Timeout value out of range')
elif value == 0:
# Non-blocking mode
self._timeout = win32pipe.NMPWAIT_NO_WAIT
else:
# Timeout mode - Value converted to milliseconds
self._timeout = value * 1000
def gettimeout(self):
return self._timeout
def setsockopt(self, level, optname, value):
raise NotImplementedError()
@check_closed
def shutdown(self, how):
return self.close()
class NpipeFileIOBase(io.RawIOBase):
def __init__(self, npipe_socket):
self.sock = npipe_socket
def close(self):
super().close()
self.sock = None
def fileno(self):
return self.sock.fileno()
def isatty(self):
return False
def readable(self):
return True
def readinto(self, buf):
return self.sock.recv_into(buf)
def seekable(self):
return False
def writable(self):
return False

View File

@@ -0,0 +1,254 @@
import paramiko
import queue
import urllib.parse
import requests.adapters
import logging
import os
import signal
import socket
import subprocess
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
import http.client as httplib
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class SSHSocket(socket.socket):
def __init__(self, host):
super().__init__(
socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = None
self.user = None
if ':' in self.host:
self.host, self.port = self.host.split(':')
if '@' in self.host:
self.user, self.host = self.host.split('@')
self.proc = None
def connect(self, **kwargs):
args = ['ssh']
if self.user:
args = args + ['-l', self.user]
if self.port:
args = args + ['-p', self.port]
args = args + ['--', self.host, 'docker system dial-stdio']
preexec_func = None
if not constants.IS_WINDOWS_PLATFORM:
def f():
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_func = f
env = dict(os.environ)
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
env.pop('LD_LIBRARY_PATH', None)
env.pop('SSL_CERT_FILE', None)
self.proc = subprocess.Popen(
args,
env=env,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=preexec_func)
def _write(self, data):
if not self.proc or self.proc.stdin.closed:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
written = self.proc.stdin.write(data)
self.proc.stdin.flush()
return written
def sendall(self, data):
self._write(data)
def send(self, data):
return self._write(data)
def recv(self, n):
if not self.proc:
raise Exception('SSH subprocess not initiated.'
'connect() must be called first.')
return self.proc.stdout.read(n)
def makefile(self, mode):
if not self.proc:
self.connect()
self.proc.stdout.channel = self
return self.proc.stdout
def close(self):
if not self.proc or self.proc.stdin.closed:
return
self.proc.stdin.write(b'\n\n')
self.proc.stdin.flush()
self.proc.terminate()
class SSHConnection(httplib.HTTPConnection):
def __init__(self, ssh_transport=None, timeout=60, host=None):
super().__init__(
'localhost', timeout=timeout
)
self.ssh_transport = ssh_transport
self.timeout = timeout
self.ssh_host = host
def connect(self):
if self.ssh_transport:
sock = self.ssh_transport.open_session()
sock.settimeout(self.timeout)
sock.exec_command('docker system dial-stdio')
else:
sock = SSHSocket(self.ssh_host)
sock.settimeout(self.timeout)
sock.connect()
self.sock = sock
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = 'ssh'
def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.ssh_transport = None
self.timeout = timeout
if ssh_client:
self.ssh_transport = ssh_client.get_transport()
self.ssh_host = host
def _new_conn(self):
return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
# we override _get_conn
def _get_conn(self, timeout):
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more "
"connections are allowed."
)
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + [
'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
]
def __init__(self, base_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
shell_out=False):
self.ssh_client = None
if not shell_out:
self._create_paramiko_client(base_url)
self._connect()
self.ssh_host = base_url
if base_url.startswith('ssh://'):
self.ssh_host = base_url[len('ssh://'):]
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def _create_paramiko_client(self, base_url):
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.ssh_client = paramiko.SSHClient()
base_url = urllib.parse.urlparse(base_url)
self.ssh_params = {
"hostname": base_url.hostname,
"port": base_url.port,
"username": base_url.username
}
ssh_config_file = os.path.expanduser("~/.ssh/config")
if os.path.exists(ssh_config_file):
conf = paramiko.SSHConfig()
with open(ssh_config_file) as f:
conf.parse(f)
host_config = conf.lookup(base_url.hostname)
if 'proxycommand' in host_config:
self.ssh_params["sock"] = paramiko.ProxyCommand(
host_config['proxycommand']
)
if 'hostname' in host_config:
self.ssh_params['hostname'] = host_config['hostname']
if base_url.port is None and 'port' in host_config:
self.ssh_params['port'] = host_config['port']
if base_url.username is None and 'user' in host_config:
self.ssh_params['username'] = host_config['user']
if 'identityfile' in host_config:
self.ssh_params['key_filename'] = host_config['identityfile']
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
def _connect(self):
if self.ssh_client:
self.ssh_client.connect(**self.ssh_params)
def get_connection(self, url, proxies=None):
if not self.ssh_client:
return SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
# Connection is closed try a reconnect
if self.ssh_client and not self.ssh_client.get_transport():
self._connect()
pool = SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host
)
self.pools[url] = pool
return pool
def close(self):
super().close()
if self.ssh_client:
self.ssh_client.close()

View File

@@ -0,0 +1,65 @@
""" Resolves OpenSSL issues in some servers:
https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
https://github.com/kennethreitz/requests/pull/799
"""
from packaging.version import Version
from requests.adapters import HTTPAdapter
from docker.transport.basehttpadapter import BaseHTTPAdapter
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
PoolManager = urllib3.poolmanager.PoolManager
class SSLHTTPAdapter(BaseHTTPAdapter):
'''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
__attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
'assert_hostname',
'ssl_version']
def __init__(self, ssl_version=None, assert_hostname=None,
assert_fingerprint=None, **kwargs):
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
super().__init__(**kwargs)
def init_poolmanager(self, connections, maxsize, block=False):
kwargs = {
'num_pools': connections,
'maxsize': maxsize,
'block': block,
'assert_hostname': self.assert_hostname,
'assert_fingerprint': self.assert_fingerprint,
}
if self.ssl_version and self.can_override_ssl_version():
kwargs['ssl_version'] = self.ssl_version
self.poolmanager = PoolManager(**kwargs)
def get_connection(self, *args, **kwargs):
"""
Ensure assert_hostname is set correctly on our pool
We already take care of a normal poolmanager via init_poolmanager
But we still need to take care of when there is a proxy poolmanager
"""
conn = super().get_connection(*args, **kwargs)
if conn.assert_hostname != self.assert_hostname:
conn.assert_hostname = self.assert_hostname
return conn
def can_override_ssl_version(self):
urllib_ver = urllib3.__version__.split('-')[0]
if urllib_ver is None:
return False
if urllib_ver == 'dev':
return True
return Version(urllib_ver) > Version('1.5')

View File

@@ -0,0 +1,96 @@
import requests.adapters
import socket
import http.client as httplib
from docker.transport.basehttpadapter import BaseHTTPAdapter
from .. import constants
try:
import requests.packages.urllib3 as urllib3
except ImportError:
import urllib3
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPConnection(httplib.HTTPConnection):
def __init__(self, base_url, unix_socket, timeout=60):
super().__init__(
'localhost', timeout=timeout
)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.unix_socket)
self.sock = sock
def putheader(self, header, *values):
super().putheader(header, *values)
def response_class(self, sock, *args, **kwargs):
return httplib.HTTPResponse(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
super().__init__(
'localhost', timeout=timeout, maxsize=maxsize
)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self):
return UnixHTTPConnection(
self.base_url, self.socket_path, self.timeout
)
class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools',
'socket_path',
'timeout',
'max_pool_size']
def __init__(self, socket_url, timeout=60,
pool_connections=constants.DEFAULT_NUM_POOLS,
max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
socket_path = socket_url.replace('http+unix://', '')
if not socket_path.startswith('/'):
socket_path = '/' + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def get_connection(self, url, proxies=None):
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout,
maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request, proxies):
# The select_proxy utility in requests errors out when the provided URL
# doesn't have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url