Vendor Galaxy Roles and Collections
Some checks failed
/ Ansible Lint (push) Failing after 5m45s
/ Ansible Lint (pull_request) Failing after 4m59s

This commit is contained in:
Stefan Bethke 2026-02-06 22:07:16 +01:00
commit 2aed20393f
3553 changed files with 387444 additions and 2 deletions

View file

@ -0,0 +1,102 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import traceback
import typing as t
REQUESTS_IMPORT_ERROR: str | None # pylint: disable=invalid-name
try:
from requests import Session # noqa: F401, pylint: disable=unused-import
from requests.adapters import ( # noqa: F401, pylint: disable=unused-import
HTTPAdapter,
)
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
HTTPError,
InvalidSchema,
)
except ImportError:
REQUESTS_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
class Session: # type: ignore
__attrs__: list[t.Never] = []
class HTTPAdapter: # type: ignore
__attrs__: list[t.Never] = []
class HTTPError(Exception): # type: ignore
pass
class InvalidSchema(Exception): # type: ignore
pass
else:
REQUESTS_IMPORT_ERROR = None # pylint: disable=invalid-name
URLLIB3_IMPORT_ERROR: str | None = None # pylint: disable=invalid-name
try:
from requests.packages import urllib3 # pylint: disable=unused-import
from requests.packages.urllib3 import ( # type: ignore # pylint: disable=unused-import # isort: skip
connection as urllib3_connection,
)
except ImportError:
try:
import urllib3 # pylint: disable=unused-import
from urllib3 import (
connection as urllib3_connection, # pylint: disable=unused-import
)
except ImportError:
URLLIB3_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
class _HTTPConnectionPool:
pass
class _HTTPConnection:
pass
class FakeURLLIB3:
def __init__(self) -> None:
self._collections = self
self.poolmanager = self
self.connection = self
self.connectionpool = self
self.RecentlyUsedContainer = object() # pylint: disable=invalid-name
self.PoolManager = object() # pylint: disable=invalid-name
self.match_hostname = object()
self.HTTPConnectionPool = ( # pylint: disable=invalid-name
_HTTPConnectionPool
)
class FakeURLLIB3Connection:
def __init__(self) -> None:
self.HTTPConnection = _HTTPConnection # pylint: disable=invalid-name
urllib3 = FakeURLLIB3()
urllib3_connection = FakeURLLIB3Connection()
def fail_on_missing_imports() -> None:
if REQUESTS_IMPORT_ERROR is not None:
from .errors import MissingRequirementException # pylint: disable=cyclic-import
raise MissingRequirementException(
"You have to install requests", "requests", REQUESTS_IMPORT_ERROR
)
if URLLIB3_IMPORT_ERROR is not None:
from .errors import MissingRequirementException # pylint: disable=cyclic-import
raise MissingRequirementException(
"You have to install urllib3", "urllib3", URLLIB3_IMPORT_ERROR
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,406 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import base64
import json
import logging
import typing as t
from . import errors
from .credentials.errors import CredentialsNotFound, StoreError
from .credentials.store import Store
from .utils import config
if t.TYPE_CHECKING:
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
APIClient,
)
INDEX_NAME = "docker.io"
INDEX_URL = f"https://index.{INDEX_NAME}/v1/"
TOKEN_USERNAME = "<token>"
log = logging.getLogger(__name__)
def resolve_repository_name(repo_name: str) -> tuple[str, str]:
if "://" in repo_name:
raise errors.InvalidRepository(
f"Repository name cannot contain a scheme ({repo_name})"
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == "-" or index_name[-1] == "-":
raise errors.InvalidRepository(
f"Invalid index name ({index_name}). Cannot begin or end with a hyphen."
)
return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name: str) -> str:
index_name = convert_to_hostname(index_name)
if index_name == "index." + INDEX_NAME:
index_name = INDEX_NAME
return index_name
def get_config_header(client: APIClient, registry: str) -> bytes | None:
log.debug("Looking for auth config")
if not client._auth_configs or client._auth_configs.is_empty:
log.debug("No auth config in memory - loading from filesystem")
client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug("Found auth config")
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
return encode_header(authcfg)
log.debug("No auth config found")
return None
def split_repo_name(repo_name: str) -> tuple[str, str]:
parts = repo_name.split("/", 1)
if len(parts) == 1 or (
"." not in parts[0] and ":" not in parts[0] and parts[0] != "localhost"
):
# This is a docker index repo (ex: username/foobar or ubuntu)
return INDEX_NAME, repo_name
return tuple(parts) # type: ignore
def get_credential_store(
authconfig: dict[str, t.Any] | AuthConfig, registry: str
) -> str | None:
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig)
return authconfig.get_credential_store(registry)
class AuthConfig(dict):
def __init__(
self, dct: dict[str, t.Any], credstore_env: dict[str, str] | None = None
):
if "auths" not in dct:
dct["auths"] = {}
self.update(dct)
self._credstore_env = credstore_env
self._stores: dict[str, Store] = {}
@classmethod
def parse_auth(
cls, entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
) -> dict[str, dict[str, t.Any]]:
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf: dict[str, dict[str, t.Any]] = {}
for registry, entry in entries.items():
if not isinstance(entry, dict):
log.debug("Config entry for key %s is not auth config", registry) # type: ignore
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
f"Invalid configuration for registry {registry}"
)
return {}
if "identitytoken" in entry:
log.debug("Found an IdentityToken entry for registry %s", registry)
conf[registry] = {"IdentityToken": entry["identitytoken"]}
continue # Other values are irrelevant if we have a token
if "auth" not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
"Auth data for %s is absent. Client might be using a credentials store instead.",
registry,
)
conf[registry] = {}
continue
username, password = decode_auth(entry["auth"])
log.debug(
"Found entry (registry=%s, username=%s)", repr(registry), repr(username)
)
conf[registry] = {
"username": username,
"password": password,
"email": entry.get("email"),
"serveraddress": registry,
}
return conf
@classmethod
def load_config(
cls,
config_path: str | None,
config_dict: dict[str, t.Any] | None,
credstore_env: dict[str, str] | None = None,
) -> t.Self:
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment
variable > ~/.docker/config.json > ~/.dockercfg
"""
if not config_dict:
config_file = config.find_config_file(config_path)
if not config_file:
return cls({}, credstore_env)
try:
with open(config_file, "rt", encoding="utf-8") as f:
config_dict = json.load(f)
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it is in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
return cls(_load_legacy_config(config_file), credstore_env)
res = {}
if config_dict.get("auths"):
log.debug("Found 'auths' section")
res.update(
{"auths": cls.parse_auth(config_dict.pop("auths"), raise_on_error=True)}
)
if config_dict.get("credsStore"):
log.debug("Found 'credsStore' section")
res.update({"credsStore": config_dict.pop("credsStore")})
if config_dict.get("credHelpers"):
log.debug("Found 'credHelpers' section")
res.update({"credHelpers": config_dict.pop("credHelpers")})
if res:
return cls(res, credstore_env)
log.debug(
"Could not find auth-related section ; attempting to interpret "
"as auth-only file"
)
return cls({"auths": cls.parse_auth(config_dict)}, credstore_env)
@property
def auths(self) -> dict[str, dict[str, t.Any]]:
return self.get("auths", {})
@property
def creds_store(self) -> str | None:
return self.get("credsStore", None)
@property
def cred_helpers(self) -> dict[str, t.Any]:
return self.get("credHelpers", {})
@property
def is_empty(self) -> bool:
return not self.auths and not self.creds_store and not self.cred_helpers
def resolve_authconfig(
self, registry: str | None = None
) -> dict[str, t.Any] | None:
"""
Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the
config with full URLs are stripped down to hostnames before checking
for a match. Returns None if no match was found.
"""
if self.creds_store or self.cred_helpers:
store_name = self.get_credential_store(registry)
if store_name is not None:
log.debug('Using credentials store "%s"', store_name)
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None:
return cfg
log.debug("No entry in credstore - fetching from auth dict")
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for %s", repr(registry))
if registry in self.auths:
log.debug("Found %s", repr(registry))
return self.auths[registry]
for key, conf in self.auths.items():
if resolve_index_name(key) == registry:
log.debug("Found %s", repr(key))
return conf
log.debug("No entry found")
return None
def _resolve_authconfig_credstore(
self, registry: str | None, credstore_name: str
) -> dict[str, t.Any] | None:
if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
log.debug("Looking for auth entry for %s", repr(registry))
store = self._get_store_instance(credstore_name)
try:
data = store.get(registry)
res = {
"ServerAddress": registry,
}
if data["Username"] == TOKEN_USERNAME:
res["IdentityToken"] = data["Secret"]
else:
res.update(
{
"Username": data["Username"],
"Password": data["Secret"],
}
)
return res
except CredentialsNotFound:
log.debug("No entry found")
return None
except StoreError as e:
raise errors.DockerException(f"Credentials store error: {e}") from e
def _get_store_instance(self, name: str) -> Store:
if name not in self._stores:
self._stores[name] = Store(name, environment=self._credstore_env)
return self._stores[name]
def get_credential_store(self, registry: str | None) -> str | None:
if not registry or registry == INDEX_NAME:
registry = INDEX_URL
return self.cred_helpers.get(registry) or self.creds_store
def get_all_credentials(self) -> dict[str, dict[str, t.Any] | None]:
auth_data: dict[str, dict[str, t.Any] | None] = self.auths.copy() # type: ignore
if self.creds_store:
# Retrieve all credentials from the default store
store = self._get_store_instance(self.creds_store)
for k in store.list():
auth_data[k] = self._resolve_authconfig_credstore(k, self.creds_store)
auth_data[convert_to_hostname(k)] = auth_data[k]
# credHelpers entries take priority over all others
for reg, store_name in self.cred_helpers.items():
auth_data[reg] = self._resolve_authconfig_credstore(reg, store_name)
auth_data[convert_to_hostname(reg)] = auth_data[reg]
return auth_data
def add_auth(self, reg: str, data: dict[str, t.Any]) -> None:
self["auths"][reg] = data
def resolve_authconfig(
authconfig: AuthConfig | dict[str, t.Any],
registry: str | None = None,
credstore_env: dict[str, str] | None = None,
) -> dict[str, t.Any] | None:
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig, credstore_env)
return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url: str) -> str:
return url.replace("http://", "").replace("https://", "").split("/", 1)[0]
def decode_auth(auth: str | bytes) -> tuple[str, str]:
if isinstance(auth, str):
auth = auth.encode("ascii")
s = base64.b64decode(auth)
login, pwd = s.split(b":", 1)
return login.decode("utf8"), pwd.decode("utf8")
def encode_header(auth: dict[str, t.Any]) -> bytes:
auth_json = json.dumps(auth).encode("ascii")
return base64.urlsafe_b64encode(auth_json)
def parse_auth(
entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
) -> dict[str, dict[str, t.Any]]:
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
return AuthConfig.parse_auth(entries, raise_on_error)
def load_config(
config_path: str | None = None,
config_dict: dict[str, t.Any] | None = None,
credstore_env: dict[str, str] | None = None,
) -> AuthConfig:
return AuthConfig.load_config(config_path, config_dict, credstore_env)
def _load_legacy_config(config_file: str) -> dict[str, dict[str, t.Any]]:
log.debug("Attempting to parse legacy auth file format")
try:
data = []
with open(config_file, "rt", encoding="utf-8") as f:
for line in f.readlines():
data.append(line.strip().split(" = ")[1])
if len(data) < 2:
# Not enough data
raise errors.InvalidConfigFile("Invalid or empty configuration file!")
username, password = decode_auth(data[0])
return {
"auths": {
INDEX_NAME: {
"username": username,
"password": password,
"email": data[1],
"serveraddress": INDEX_URL,
}
}
}
except Exception as e: # pylint: disable=broad-exception-caught
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}

View file

@ -0,0 +1,40 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import sys
MINIMUM_DOCKER_API_VERSION = "1.21"
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = ["memory", "memswap", "cpushares", "cpusetcpus"]
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
DEFAULT_NPIPE = "npipe:////./pipe/docker_engine"
BYTE_UNITS = {"b": 1, "k": 1024, "m": 1024 * 1024, "g": 1024 * 1024 * 1024}
IS_WINDOWS_PLATFORM = sys.platform == "win32"
WINDOWS_LONGPATH_PREFIX = "\\\\?\\"
DEFAULT_USER_AGENT = "ansible-community.docker"
DEFAULT_NUM_POOLS = 25
# The OpenSSH server default value for MaxSessions is 10 which means we can
# use up to 9, leaving the final session for the underlying SSH connection.
# For more details see: https://github.com/docker/docker-py/issues/2246
DEFAULT_NUM_POOLS_SSH = 9
DEFAULT_MAX_POOL_SIZE = 10
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048

View file

@ -0,0 +1,253 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2025 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import os
import typing as t
from .. import errors
from .config import (
METAFILE,
get_current_context_name,
get_meta_dir,
write_context_name_to_docker_config,
)
from .context import Context
if t.TYPE_CHECKING:
from ..tls import TLSConfig
def create_default_context() -> Context:
host = None
if os.environ.get("DOCKER_HOST"):
host = os.environ.get("DOCKER_HOST")
return Context(
"default", "swarm", host, description="Current DOCKER_HOST based configuration"
)
class ContextAPI:
"""Context API.
Contains methods for context management:
create, list, remove, get, inspect.
"""
DEFAULT_CONTEXT = None
@classmethod
def get_default_context(cls) -> Context:
context = cls.DEFAULT_CONTEXT
if context is None:
context = create_default_context()
cls.DEFAULT_CONTEXT = context
return context
@classmethod
def create_context(
cls,
name: str,
orchestrator: str | None = None,
host: str | None = None,
tls_cfg: TLSConfig | None = None,
default_namespace: str | None = None,
skip_tls_verify: bool = False,
) -> Context:
"""Creates a new context.
Returns:
(Context): a Context object.
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextAlreadyExists`
If a context with the name already exists.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.create_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException('"default" is a reserved context name')
ctx = Context.load_context(name)
if ctx:
raise errors.ContextAlreadyExists(name)
endpoint = "docker"
if orchestrator and orchestrator != "swarm":
endpoint = orchestrator
ctx = Context(name, orchestrator)
ctx.set_endpoint(
endpoint,
host,
tls_cfg,
skip_tls_verify=skip_tls_verify,
def_namespace=default_namespace,
)
ctx.save()
return ctx
@classmethod
def get_context(cls, name: str | None = None) -> Context | None:
"""Retrieves a context object.
Args:
name (str): The name of the context
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.get_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
name = get_current_context_name()
if name == "default":
return cls.get_default_context()
return Context.load_context(name)
@classmethod
def contexts(cls) -> list[Context]:
"""Context list.
Returns:
(Context): List of context objects.
Raises:
:py:class:`docker.errors.APIError`
If something goes wrong.
"""
names = []
for dirname, dummy, fnames in os.walk(get_meta_dir()):
for filename in fnames:
if filename == METAFILE:
filepath = os.path.join(dirname, filename)
try:
with open(filepath, "rt", encoding="utf-8") as f:
data = json.load(f)
name = data["Name"]
if name == "default":
raise ValueError('"default" is a reserved context name')
names.append(name)
except Exception as e:
raise errors.ContextException(
f"Failed to load metafile {filepath}: {e}"
) from e
contexts = [cls.get_default_context()]
for name in names:
context = Context.load_context(name)
if not context:
raise errors.ContextException(f"Context {name} cannot be found")
contexts.append(context)
return contexts
@classmethod
def get_current_context(cls) -> Context | None:
"""Get current context.
Returns:
(Context): current context object.
"""
return cls.get_context()
@classmethod
def set_current_context(cls, name: str = "default") -> None:
ctx = cls.get_context(name)
if not ctx:
raise errors.ContextNotFound(name)
err = write_context_name_to_docker_config(name)
if err:
raise errors.ContextException(f"Failed to set current context: {err}")
@classmethod
def remove_context(cls, name: str) -> None:
"""Remove a context. Similar to the ``docker context rm`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException('context "default" cannot be removed')
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
if name == get_current_context_name():
write_context_name_to_docker_config(None)
ctx.remove()
@classmethod
def inspect_context(cls, name: str = "default") -> dict[str, t.Any]:
"""Inspect a context. Similar to the ``docker context inspect`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
return cls.get_default_context()()
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
return ctx()

View file

@ -0,0 +1,107 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2025 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import hashlib
import json
import os
from ..constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
from ..utils.config import find_config_file, get_default_config_file
from ..utils.utils import parse_host
METAFILE = "meta.json"
def get_current_context_name_with_source() -> tuple[str, str]:
if os.environ.get("DOCKER_HOST"):
return "default", "DOCKER_HOST environment variable set"
if os.environ.get("DOCKER_CONTEXT"):
return os.environ["DOCKER_CONTEXT"], "DOCKER_CONTEXT environment variable set"
docker_cfg_path = find_config_file()
if docker_cfg_path:
try:
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
return (
json.load(f).get("currentContext", "default"),
f"configuration file {docker_cfg_path}",
)
except Exception: # pylint: disable=broad-exception-caught
pass
return "default", "fallback value"
def get_current_context_name() -> str:
return get_current_context_name_with_source()[0]
def write_context_name_to_docker_config(name: str | None = None) -> Exception | None:
if name == "default":
name = None
docker_cfg_path = find_config_file()
config = {}
if docker_cfg_path:
try:
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
config = json.load(f)
except Exception as e: # pylint: disable=broad-exception-caught
return e
current_context = config.get("currentContext", None)
if current_context and not name:
del config["currentContext"]
elif name:
config["currentContext"] = name
else:
return None
if not docker_cfg_path:
docker_cfg_path = get_default_config_file()
try:
with open(docker_cfg_path, "wt", encoding="utf-8") as f:
json.dump(config, f, indent=4)
return None
except Exception as e: # pylint: disable=broad-exception-caught
return e
def get_context_id(name: str) -> str:
return hashlib.sha256(name.encode("utf-8")).hexdigest()
def get_context_dir() -> str:
docker_cfg_path = find_config_file() or get_default_config_file()
return os.path.join(os.path.dirname(docker_cfg_path), "contexts")
def get_meta_dir(name: str | None = None) -> str:
meta_dir = os.path.join(get_context_dir(), "meta")
if name:
return os.path.join(meta_dir, get_context_id(name))
return meta_dir
def get_meta_file(name: str) -> str:
return os.path.join(get_meta_dir(name), METAFILE)
def get_tls_dir(name: str | None = None, endpoint: str = "") -> str:
context_dir = get_context_dir()
if name:
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
return os.path.join(context_dir, "tls")
def get_context_host(path: str | None = None, tls: bool = False) -> str:
host = parse_host(path, IS_WINDOWS_PLATFORM, tls)
if host == DEFAULT_UNIX_SOCKET and host.startswith("http+"):
# remove http+ from default docker socket url
host = host[5:]
return host

View file

@ -0,0 +1,286 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2025 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import os
import typing as t
from shutil import copyfile, rmtree
from ..errors import ContextException
from ..tls import TLSConfig
from .config import (
get_context_host,
get_meta_dir,
get_meta_file,
get_tls_dir,
)
IN_MEMORY = "IN MEMORY"
class Context:
"""A context."""
def __init__(
self,
name: str,
orchestrator: str | None = None,
host: str | None = None,
endpoints: dict[str, dict[str, t.Any]] | None = None,
skip_tls_verify: bool = False,
tls: bool = False,
description: str | None = None,
) -> None:
if not name:
raise ValueError("Name not provided")
self.name = name
self.context_type = None
self.orchestrator = orchestrator
self.endpoints = {}
self.tls_cfg: dict[str, TLSConfig] = {}
self.meta_path = IN_MEMORY
self.tls_path = IN_MEMORY
self.description = description
if not endpoints:
# set default docker endpoint if no endpoint is set
default_endpoint = (
"docker"
if (not orchestrator or orchestrator == "swarm")
else orchestrator
)
self.endpoints = {
default_endpoint: {
"Host": get_context_host(host, skip_tls_verify or tls),
"SkipTLSVerify": skip_tls_verify,
}
}
return
# check docker endpoints
for k, v in endpoints.items():
if not isinstance(v, dict):
# unknown format
raise ContextException(
f"Unknown endpoint format for context {name}: {v}",
)
self.endpoints[k] = v
if k != "docker":
continue
self.endpoints[k]["Host"] = v.get(
"Host", get_context_host(host, skip_tls_verify or tls)
)
self.endpoints[k]["SkipTLSVerify"] = bool(
v.get("SkipTLSVerify", skip_tls_verify)
)
def set_endpoint(
self,
name: str = "docker",
host: str | None = None,
tls_cfg: TLSConfig | None = None,
skip_tls_verify: bool = False,
def_namespace: str | None = None,
) -> None:
self.endpoints[name] = {
"Host": get_context_host(host, not skip_tls_verify or tls_cfg is not None),
"SkipTLSVerify": skip_tls_verify,
}
if def_namespace:
self.endpoints[name]["DefaultNamespace"] = def_namespace
if tls_cfg:
self.tls_cfg[name] = tls_cfg
def inspect(self) -> dict[str, t.Any]:
return self()
@classmethod
def load_context(cls, name: str) -> t.Self | None:
meta = Context._load_meta(name)
if meta:
instance = cls(
meta["Name"],
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
endpoints=meta.get("Endpoints", None),
description=meta["Metadata"].get("Description"),
)
instance.context_type = meta["Metadata"].get("Type", None)
instance._load_certs()
instance.meta_path = get_meta_dir(name)
return instance
return None
@classmethod
def _load_meta(cls, name: str) -> dict[str, t.Any] | None:
meta_file = get_meta_file(name)
if not os.path.isfile(meta_file):
return None
metadata: dict[str, t.Any] = {}
try:
with open(meta_file, "rt", encoding="utf-8") as f:
metadata = json.load(f)
except (OSError, KeyError, ValueError) as e:
# unknown format
raise RuntimeError(
f"Detected corrupted meta file for context {name} : {e}"
) from e
# for docker endpoints, set defaults for
# Host and SkipTLSVerify fields
for k, v in metadata["Endpoints"].items():
if k != "docker":
continue
metadata["Endpoints"][k]["Host"] = v.get(
"Host", get_context_host(None, False)
)
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
v.get("SkipTLSVerify", True)
)
return metadata
def _load_certs(self) -> None:
certs = {}
tls_dir = get_tls_dir(self.name)
for endpoint in self.endpoints:
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
continue
ca_cert = None
cert = None
key = None
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
if filename.startswith("ca"):
ca_cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("cert"):
cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("key"):
key = os.path.join(tls_dir, endpoint, filename)
if all([cert, key]) or ca_cert:
verify = None
if endpoint == "docker" and not self.endpoints["docker"].get(
"SkipTLSVerify", False
):
verify = True
certs[endpoint] = TLSConfig(
client_cert=(cert, key) if cert and key else None,
ca_cert=ca_cert,
verify=verify,
)
self.tls_cfg = certs
self.tls_path = tls_dir
def save(self) -> None:
meta_dir = get_meta_dir(self.name)
if not os.path.isdir(meta_dir):
os.makedirs(meta_dir)
with open(get_meta_file(self.name), "wt", encoding="utf-8") as f:
f.write(json.dumps(self.Metadata))
tls_dir = get_tls_dir(self.name)
for endpoint, tls in self.tls_cfg.items():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
os.makedirs(os.path.join(tls_dir, endpoint))
ca_file = tls.ca_cert
if ca_file:
copyfile(
ca_file, os.path.join(tls_dir, endpoint, os.path.basename(ca_file))
)
if tls.cert:
cert_file, key_file = tls.cert
copyfile(
cert_file,
os.path.join(tls_dir, endpoint, os.path.basename(cert_file)),
)
copyfile(
key_file,
os.path.join(tls_dir, endpoint, os.path.basename(key_file)),
)
self.meta_path = get_meta_dir(self.name)
self.tls_path = get_tls_dir(self.name)
def remove(self) -> None:
if os.path.isdir(self.meta_path):
rmtree(self.meta_path)
if os.path.isdir(self.tls_path):
rmtree(self.tls_path)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: '{self.name}'>"
def __str__(self) -> str:
return json.dumps(self.__call__(), indent=2)
def __call__(self) -> dict[str, t.Any]:
result = self.Metadata
result.update(self.TLSMaterial)
result.update(self.Storage)
return result
def is_docker_host(self) -> bool:
return self.context_type is None
@property
def Name(self) -> str: # pylint: disable=invalid-name
return self.name
@property
def Host(self) -> str | None: # pylint: disable=invalid-name
if not self.orchestrator or self.orchestrator == "swarm":
endpoint = self.endpoints.get("docker", None)
if endpoint:
return endpoint.get("Host", None) # type: ignore
return None
return self.endpoints[self.orchestrator].get("Host", None) # type: ignore
@property
def Orchestrator(self) -> str | None: # pylint: disable=invalid-name
return self.orchestrator
@property
def Metadata(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
meta: dict[str, t.Any] = {}
if self.orchestrator:
meta = {"StackOrchestrator": self.orchestrator}
return {"Name": self.name, "Metadata": meta, "Endpoints": self.endpoints}
@property
def TLSConfig(self) -> TLSConfig | None: # pylint: disable=invalid-name
key = self.orchestrator
if not key or key == "swarm":
key = "docker"
if key in self.tls_cfg:
return self.tls_cfg[key]
return None
@property
def TLSMaterial(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
certs: dict[str, t.Any] = {}
for endpoint, tls in self.tls_cfg.items():
paths = [tls.ca_cert, *tls.cert] if tls.cert else [tls.ca_cert]
certs[endpoint] = [
os.path.basename(path) if path else None for path in paths
]
return {"TLSMaterial": certs}
@property
def Storage(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
return {"Storage": {"MetadataPath": self.meta_path, "TLSPath": self.tls_path}}

View file

@ -0,0 +1,17 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
PROGRAM_PREFIX = "docker-credential-"
DEFAULT_LINUX_STORE = "secretservice"
DEFAULT_OSX_STORE = "osxkeychain"
DEFAULT_WIN32_STORE = "wincred"

View file

@ -0,0 +1,38 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
if t.TYPE_CHECKING:
from subprocess import CalledProcessError
class StoreError(RuntimeError):
pass
class CredentialsNotFound(StoreError):
pass
class InitializationError(StoreError):
pass
def process_store_error(cpe: CalledProcessError, program: str) -> StoreError:
message = cpe.output.decode("utf-8")
if "credentials not found in native keychain" in message:
return CredentialsNotFound(f"No matching credentials in {program}")
return StoreError(
f'Credentials store {program} exited with "{cpe.output.decode("utf-8").strip()}".'
)

View file

@ -0,0 +1,102 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import errno
import json
import subprocess
import typing as t
from . import constants, errors
from .utils import create_environment_dict, find_executable
class Store:
def __init__(self, program: str, environment: dict[str, str] | None = None) -> None:
"""Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
and erasing credentials using `program`.
"""
self.program = constants.PROGRAM_PREFIX + program
self.exe = find_executable(self.program)
self.environment = environment
if self.exe is None:
raise errors.InitializationError(
f"{self.program} not installed or not available in PATH"
)
def get(self, server: str | bytes) -> dict[str, t.Any]:
"""Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, bytes):
server = server.encode("utf-8")
data = self._execute("get", server)
result = json.loads(data.decode("utf-8"))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result["Username"] == "" and result["Secret"] == "":
raise errors.CredentialsNotFound(
f"No matching credentials in {self.program}"
)
return result
def store(self, server: str, username: str, secret: str) -> bytes:
"""Store credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
data_input = json.dumps(
{"ServerURL": server, "Username": username, "Secret": secret}
).encode("utf-8")
return self._execute("store", data_input)
def erase(self, server: str | bytes) -> None:
"""Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
if not isinstance(server, bytes):
server = server.encode("utf-8")
self._execute("erase", server)
def list(self) -> t.Any:
"""List stored credentials. Requires v0.4.0+ of the helper."""
data = self._execute("list", None)
return json.loads(data.decode("utf-8"))
def _execute(self, subcmd: str, data_input: bytes | None) -> bytes:
if self.exe is None:
raise errors.StoreError(
f"{self.program} not installed or not available in PATH"
)
output = None
env = create_environment_dict(self.environment)
try:
output = subprocess.check_output(
[self.exe, subcmd],
input=data_input,
env=env,
)
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program) from e
except OSError as e:
if e.errno == errno.ENOENT:
raise errors.StoreError(
f"{self.program} not installed or not available in PATH"
) from e
raise errors.StoreError(
f'Unexpected OS error "{e.strerror}", errno={e.errno}'
) from e
return output

View file

@ -0,0 +1,35 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import os
from shutil import which
def find_executable(executable: str, path: str | None = None) -> str | None:
"""
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
"""
# shutil.which() already uses PATHEXT on Windows, so on
# Python 3 we can simply use shutil.which() in all cases.
# (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
return which(executable, path=path)
def create_environment_dict(overrides: dict[str, str] | None) -> dict[str, str]:
"""
Create and return a copy of os.environ with the specified overrides
"""
result = os.environ.copy()
result.update(overrides or {})
return result

View file

@ -0,0 +1,244 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
from ansible.module_utils.common.text.converters import to_text
from ._import_helper import HTTPError as _HTTPError
if t.TYPE_CHECKING:
from requests import Response
class DockerException(Exception):
"""
A base class from which all other exceptions inherit.
If you want to catch all errors that the Docker SDK might raise,
catch this base exception.
"""
def create_api_error_from_http_exception(e: _HTTPError) -> t.NoReturn:
"""
Create a suitable APIError from requests.exceptions.HTTPError.
"""
response = e.response
try:
explanation = response.json()["message"]
except ValueError:
explanation = to_text((response.content or "").strip())
cls = APIError
if response.status_code == 404:
if explanation and (
"No such image" in str(explanation)
or "not found: does not exist or no pull access" in str(explanation)
or "repository does not exist" in str(explanation)
):
cls = ImageNotFound
else:
cls = NotFound
raise cls(e, response=response, explanation=explanation) from e
class APIError(_HTTPError, DockerException):
"""
An HTTP error from the API.
"""
def __init__(
self,
message: str | Exception,
response: Response | None = None,
explanation: str | None = None,
) -> None:
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 does not
super().__init__(message)
self.response = response
self.explanation = explanation or ""
def __str__(self) -> str:
message = super().__str__()
if self.is_client_error():
message = f"{self.response.status_code} Client Error for {self.response.url}: {self.response.reason}"
elif self.is_server_error():
message = f"{self.response.status_code} Server Error for {self.response.url}: {self.response.reason}"
if self.explanation:
message = f'{message} ("{self.explanation}")'
return message
@property
def status_code(self) -> int | None:
if self.response is not None:
return self.response.status_code
return None
def is_error(self) -> bool:
return self.is_client_error() or self.is_server_error()
def is_client_error(self) -> bool:
if self.status_code is None:
return False
return 400 <= self.status_code < 500
def is_server_error(self) -> bool:
if self.status_code is None:
return False
return 500 <= self.status_code < 600
class NotFound(APIError):
pass
class ImageNotFound(NotFound):
pass
class InvalidVersion(DockerException):
pass
class InvalidRepository(DockerException):
pass
class InvalidConfigFile(DockerException):
pass
class InvalidArgument(DockerException):
pass
class DeprecatedMethod(DockerException):
pass
class TLSParameterError(DockerException):
def __init__(self, msg: str) -> None:
self.msg = msg
def __str__(self) -> str:
return self.msg + (
". TLS configurations should map the Docker CLI "
"client configurations. See "
"https://docs.docker.com/engine/articles/https/ "
"for API details."
)
class NullResource(DockerException, ValueError):
pass
class ContainerError(DockerException):
"""
Represents a container that has exited with a non-zero exit code.
"""
def __init__(
self,
container: str,
exit_status: int,
command: list[str],
image: str,
stderr: str | None,
):
self.container = container
self.exit_status = exit_status
self.command = command
self.image = image
self.stderr = stderr
err = f": {stderr}" if stderr is not None else ""
msg = f"Command '{command}' in image '{image}' returned non-zero exit status {exit_status}{err}"
super().__init__(msg)
class StreamParseError(RuntimeError):
def __init__(self, reason: Exception) -> None:
self.msg = reason
class BuildError(DockerException):
def __init__(self, reason: str, build_log: str) -> None:
super().__init__(reason)
self.msg = reason
self.build_log = build_log
class ImageLoadError(DockerException):
pass
def create_unexpected_kwargs_error(name: str, kwargs: dict[str, t.Any]) -> TypeError:
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(", ".join(quoted_kwargs))
return TypeError("".join(text))
class MissingContextParameter(DockerException):
def __init__(self, param: str) -> None:
self.param = param
def __str__(self) -> str:
return f"missing parameter: {self.param}"
class ContextAlreadyExists(DockerException):
def __init__(self, name: str) -> None:
self.name = name
def __str__(self) -> str:
return f"context {self.name} already exists"
class ContextException(DockerException):
def __init__(self, msg: str) -> None:
self.msg = msg
def __str__(self) -> str:
return self.msg
class ContextNotFound(DockerException):
def __init__(self, name: str) -> None:
self.name = name
def __str__(self) -> str:
return f"context '{self.name}' not found"
class MissingRequirementException(DockerException):
def __init__(
self, msg: str, requirement: str, import_exception: ImportError | str
) -> None:
self.msg = msg
self.requirement = requirement
self.import_exception = import_exception
def __str__(self) -> str:
return self.msg

View file

@ -0,0 +1,107 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import os
import typing as t
from . import errors
from .transport.ssladapter import SSLHTTPAdapter
if t.TYPE_CHECKING:
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
APIClient,
)
class TLSConfig:
"""
TLS configuration.
Args:
client_cert (tuple of str): Path to client cert, path to client key.
ca_cert (str): Path to CA cert file.
verify (bool or str): This can be ``False`` or a path to a CA cert
file.
assert_hostname (bool): Verify the hostname of the server.
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
cert: tuple[str, str] | None = None
ca_cert: str | None = None
verify: bool | None = None
def __init__(
self,
client_cert: tuple[str, str] | None = None,
ca_cert: str | None = None,
verify: bool | None = None,
assert_hostname: bool | None = None,
):
# Argument compatibility/mapping with
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving verify=False
self.assert_hostname = assert_hostname
# "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
if client_cert:
try:
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
"client_cert must be a tuple of (client certificate, key file)"
) from None
if not (tls_cert and tls_key) or (
not os.path.isfile(tls_cert) or not os.path.isfile(tls_key)
):
raise errors.TLSParameterError(
"Path to a certificate and key files must be provided"
" through the client_cert param"
)
self.cert = (tls_cert, tls_key)
# If verify is set, make sure the cert exists
self.verify = verify
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
"Invalid CA certificate provided for `ca_cert`."
)
def configure_client(self, client: APIClient) -> None:
"""
Configure a client with these TLS options.
"""
if self.verify and self.ca_cert:
client.verify = self.ca_cert
else:
client.verify = self.verify
if self.cert:
client.cert = self.cert
client.mount(
"https://",
SSLHTTPAdapter(
assert_hostname=self.assert_hostname,
),
)

View file

@ -0,0 +1,35 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
from .._import_helper import HTTPAdapter as _HTTPAdapter
class BaseHTTPAdapter(_HTTPAdapter):
def close(self) -> None:
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
# pylint: disable-next=no-member
super().close()
if hasattr(self, "pools"):
self.pools.clear()
# Hotfix for requests 2.32.0 and 2.32.1: its commit
# https://github.com/psf/requests/commit/c0813a2d910ea6b4f8438b91d315b8d181302356
# changes requests.adapters.HTTPAdapter to no longer call get_connection() from
# send(), but instead call _get_connection().
def _get_connection(self, request, *args, **kwargs): # type: ignore
return self.get_connection(request.url, kwargs.get("proxies"))
# Fix for requests 2.32.2+:
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): # type: ignore
return self.get_connection(request.url, proxies)

View file

@ -0,0 +1,123 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
from queue import Empty
from .. import constants
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
from .basehttpadapter import BaseHTTPAdapter
from .npipesocket import NpipeSocket
if t.TYPE_CHECKING:
from collections.abc import Mapping
from requests import PreparedRequest
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class NpipeHTTPConnection(urllib3_connection.HTTPConnection):
def __init__(self, npipe_path: str, timeout: int | float = 60) -> None:
super().__init__("localhost", timeout=timeout)
self.npipe_path = npipe_path
self.timeout = timeout
def connect(self) -> None:
sock = NpipeSocket()
sock.settimeout(self.timeout)
sock.connect(self.npipe_path)
self.sock = sock
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(
self, npipe_path: str, timeout: int | float = 60, maxsize: int = 10
) -> None:
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
self.npipe_path = npipe_path
self.timeout = timeout
def _new_conn(self) -> NpipeHTTPConnection:
return NpipeHTTPConnection(self.npipe_path, self.timeout)
# When re-using connections, urllib3 tries to call select() on our
# NpipeSocket instance, causing a crash. To circumvent this, we override
# _get_conn, where that check happens.
def _get_conn(self, timeout: int | float) -> NpipeHTTPConnection:
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as exc: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
except Empty as exc:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
) from exc
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + [
"npipe_path",
"pools",
"timeout",
"max_pool_size",
]
def __init__(
self,
base_url: str,
timeout: int | float = 60,
pool_connections: int = constants.DEFAULT_NUM_POOLS,
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
) -> None:
self.npipe_path = base_url.replace("npipe://", "")
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def get_connection(
self, url: str | bytes, proxies: Mapping[str, str] | None = None
) -> NpipeHTTPConnectionPool:
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = NpipeHTTPConnectionPool(
self.npipe_path, self.timeout, maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(
self, request: PreparedRequest, proxies: Mapping[str, str] | None
) -> str:
# The select_proxy utility in requests errors out when the provided URL
# does not have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url

View file

@ -0,0 +1,277 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import functools
import io
import time
import traceback
import typing as t
PYWIN32_IMPORT_ERROR: str | None # pylint: disable=invalid-name
try:
import pywintypes
import win32api
import win32event
import win32file
import win32pipe
except ImportError:
PYWIN32_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
else:
PYWIN32_IMPORT_ERROR = None # pylint: disable=invalid-name
if t.TYPE_CHECKING:
from collections.abc import Buffer, Callable
_Self = t.TypeVar("_Self")
_P = t.ParamSpec("_P")
_R = t.TypeVar("_R")
ERROR_PIPE_BUSY = 0xE7
SECURITY_SQOS_PRESENT = 0x100000
SECURITY_ANONYMOUS = 0
MAXIMUM_RETRY_COUNT = 10
def check_closed(
f: Callable[t.Concatenate[_Self, _P], _R],
) -> Callable[t.Concatenate[_Self, _P], _R]:
@functools.wraps(f)
def wrapped(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
if self._closed: # type: ignore
raise RuntimeError("Can not reuse socket after connection was closed.")
return f(self, *args, **kwargs)
return wrapped
class NpipeSocket:
"""Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
def __init__(self, handle: t.Any | None = None) -> None:
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
self._address: str | None = None
self._closed = False
self.flags: int | None = None
def accept(self) -> t.NoReturn:
raise NotImplementedError()
def bind(self, address: t.Any) -> t.NoReturn:
raise NotImplementedError()
def close(self) -> None:
if self._handle is None:
raise ValueError("Handle not present")
self._handle.Close()
self._closed = True
@check_closed
def connect(self, address: str, retry_count: int = 0) -> None:
try:
handle = win32file.CreateFile(
address,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
(
SECURITY_ANONYMOUS
| SECURITY_SQOS_PRESENT
| win32file.FILE_FLAG_OVERLAPPED
),
0,
)
except win32pipe.error as e:
# See Remarks:
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
if e.winerror == ERROR_PIPE_BUSY:
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
retry_count = retry_count + 1
if retry_count < MAXIMUM_RETRY_COUNT:
time.sleep(1)
return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0] # type: ignore
self._handle = handle
self._address = address
@check_closed
def connect_ex(self, address: str) -> None:
self.connect(address)
@check_closed
def detach(self) -> t.Any:
self._closed = True
return self._handle
@check_closed
def dup(self) -> NpipeSocket:
return NpipeSocket(self._handle)
def getpeername(self) -> str | None:
return self._address
def getsockname(self) -> str | None:
return self._address
def getsockopt(
self, level: t.Any, optname: t.Any, buflen: t.Any = None
) -> t.NoReturn:
raise NotImplementedError()
def ioctl(self, control: t.Any, option: t.Any) -> t.NoReturn:
raise NotImplementedError()
def listen(self, backlog: t.Any) -> t.NoReturn:
raise NotImplementedError()
def makefile(self, mode: str, bufsize: int | None = None) -> t.IO[bytes]:
if mode.strip("b") != "r":
raise NotImplementedError()
rawio = NpipeFileIOBase(self)
if bufsize is None or bufsize <= 0:
bufsize = io.DEFAULT_BUFFER_SIZE
return io.BufferedReader(rawio, buffer_size=bufsize)
@check_closed
def recv(self, bufsize: int, flags: int = 0) -> str:
if self._handle is None:
raise ValueError("Handle not present")
dummy_err, data = win32file.ReadFile(self._handle, bufsize)
return data
@check_closed
def recvfrom(self, bufsize: int, flags: int = 0) -> tuple[str, str | None]:
data = self.recv(bufsize, flags)
return (data, self._address)
@check_closed
def recvfrom_into(
self, buf: Buffer, nbytes: int = 0, flags: int = 0
) -> tuple[int, str | None]:
return self.recv_into(buf, nbytes), self._address
@check_closed
def recv_into(self, buf: Buffer, nbytes: int = 0) -> int:
if self._handle is None:
raise ValueError("Handle not present")
readbuf = buf if isinstance(buf, memoryview) else memoryview(buf)
event = win32event.CreateEvent(None, True, True, None)
try:
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
dummy_err, dummy_data = win32file.ReadFile( # type: ignore
self._handle, readbuf[:nbytes] if nbytes else readbuf, overlapped
)
wait_result = win32event.WaitForSingleObject(event, self._timeout)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle)
raise TimeoutError
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
finally:
win32api.CloseHandle(event)
@check_closed
def send(self, string: Buffer, flags: int = 0) -> int:
if self._handle is None:
raise ValueError("Handle not present")
event = win32event.CreateEvent(None, True, True, None)
try:
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
win32file.WriteFile(self._handle, string, overlapped) # type: ignore
wait_result = win32event.WaitForSingleObject(event, self._timeout)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle)
raise TimeoutError
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
finally:
win32api.CloseHandle(event)
@check_closed
def sendall(self, string: Buffer, flags: int = 0) -> int:
return self.send(string, flags)
@check_closed
def sendto(self, string: Buffer, address: str) -> int:
self.connect(address)
return self.send(string)
def setblocking(self, flag: bool) -> None:
if flag:
return self.settimeout(None)
return self.settimeout(0)
def settimeout(self, value: int | float | None) -> None:
if value is None:
# Blocking mode
self._timeout = win32event.INFINITE
elif not isinstance(value, (float, int)) or value < 0:
raise ValueError("Timeout value out of range")
else:
# Timeout mode - Value converted to milliseconds
self._timeout = int(value * 1000)
def gettimeout(self) -> int | float | None:
return self._timeout
def setsockopt(self, level: t.Any, optname: t.Any, value: t.Any) -> t.NoReturn:
raise NotImplementedError()
@check_closed
def shutdown(self, how: t.Any) -> None:
return self.close()
class NpipeFileIOBase(io.RawIOBase):
def __init__(self, npipe_socket: NpipeSocket | None) -> None:
self.sock = npipe_socket
def close(self) -> None:
super().close()
self.sock = None
def fileno(self) -> int:
if self.sock is None:
raise RuntimeError("socket is closed")
# TODO: This is definitely a bug, NpipeSocket.fileno() does not exist!
return self.sock.fileno() # type: ignore
def isatty(self) -> bool:
return False
def readable(self) -> bool:
return True
def readinto(self, buf: Buffer) -> int:
if self.sock is None:
raise RuntimeError("socket is closed")
return self.sock.recv_into(buf)
def seekable(self) -> bool:
return False
def writable(self) -> bool:
return False

View file

@ -0,0 +1,311 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import logging
import os
import signal
import socket
import subprocess
import traceback
import typing as t
from queue import Empty
from urllib.parse import urlparse
from .. import constants
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
from .basehttpadapter import BaseHTTPAdapter
PARAMIKO_IMPORT_ERROR: str | None # pylint: disable=invalid-name
try:
import paramiko
except ImportError:
PARAMIKO_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
else:
PARAMIKO_IMPORT_ERROR = None # pylint: disable=invalid-name
if t.TYPE_CHECKING:
from collections.abc import Buffer, Mapping
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class SSHSocket(socket.socket):
def __init__(self, host: str) -> None:
super().__init__(socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = None
self.user = None
if ":" in self.host:
self.host, self.port = self.host.split(":")
if "@" in self.host:
self.user, self.host = self.host.split("@")
self.proc: subprocess.Popen | None = None
def connect(self, *args_: t.Any, **kwargs: t.Any) -> None:
args = ["ssh"]
if self.user:
args = args + ["-l", self.user]
if self.port:
args = args + ["-p", self.port]
args = args + ["--", self.host, "docker system dial-stdio"]
preexec_func = None
if not constants.IS_WINDOWS_PLATFORM:
def f() -> None:
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_func = f
env = dict(os.environ)
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
env.pop("LD_LIBRARY_PATH", None)
env.pop("SSL_CERT_FILE", None)
self.proc = subprocess.Popen( # pylint: disable=consider-using-with
args,
env=env,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=preexec_func,
)
def _write(self, data: Buffer) -> int:
if not self.proc:
raise RuntimeError(
"SSH subprocess not initiated. connect() must be called first."
)
assert self.proc.stdin is not None
if self.proc.stdin.closed:
raise RuntimeError(
"SSH subprocess not initiated. connect() must be called first after close()."
)
written = self.proc.stdin.write(data)
self.proc.stdin.flush()
return written
def sendall(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> None:
self._write(data)
def send(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> int:
return self._write(data)
def recv(self, n: int, *args: t.Any, **kwargs: t.Any) -> bytes:
if not self.proc:
raise RuntimeError(
"SSH subprocess not initiated. connect() must be called first."
)
assert self.proc.stdout is not None
return self.proc.stdout.read(n)
def makefile(self, mode: str, *args: t.Any, **kwargs: t.Any) -> t.IO: # type: ignore
if not self.proc:
self.connect()
assert self.proc is not None
assert self.proc.stdout is not None
self.proc.stdout.channel = self # type: ignore
return self.proc.stdout
def close(self) -> None:
if not self.proc:
return
assert self.proc.stdin is not None
if self.proc.stdin.closed:
return
self.proc.stdin.write(b"\n\n")
self.proc.stdin.flush()
self.proc.terminate()
class SSHConnection(urllib3_connection.HTTPConnection):
def __init__(
self,
*,
ssh_transport: paramiko.Transport | None = None,
timeout: int | float = 60,
host: str,
) -> None:
super().__init__("localhost", timeout=timeout)
self.ssh_transport = ssh_transport
self.timeout = timeout
self.ssh_host = host
self.sock: paramiko.Channel | SSHSocket | None = None
def connect(self) -> None:
if self.ssh_transport:
channel = self.ssh_transport.open_session()
channel.settimeout(self.timeout)
channel.exec_command("docker system dial-stdio")
self.sock = channel
else:
sock = SSHSocket(self.ssh_host)
sock.settimeout(self.timeout)
sock.connect()
self.sock = sock
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = "ssh"
def __init__(
self,
*,
ssh_client: paramiko.SSHClient | None = None,
timeout: int | float = 60,
maxsize: int = 10,
host: str,
) -> None:
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
self.ssh_transport: paramiko.Transport | None = None
self.timeout = timeout
if ssh_client:
self.ssh_transport = ssh_client.get_transport()
self.ssh_host = host
def _new_conn(self) -> SSHConnection:
return SSHConnection(
ssh_transport=self.ssh_transport,
timeout=self.timeout,
host=self.ssh_host,
)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
# we override _get_conn
def _get_conn(self, timeout: int | float) -> SSHConnection:
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as exc: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
except Empty as exc:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
) from exc
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + [
"pools",
"timeout",
"ssh_client",
"ssh_params",
"max_pool_size",
]
def __init__(
self,
base_url: str,
timeout: int | float = 60,
pool_connections: int = constants.DEFAULT_NUM_POOLS,
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
shell_out: bool = False,
) -> None:
self.ssh_client: paramiko.SSHClient | None = None
if not shell_out:
self._create_paramiko_client(base_url)
self._connect()
self.ssh_host = base_url
if base_url.startswith("ssh://"):
self.ssh_host = base_url[len("ssh://") :]
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def _create_paramiko_client(self, base_url: str) -> None:
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.ssh_client = paramiko.SSHClient()
base_url_p = urlparse(base_url)
assert base_url_p.hostname is not None
self.ssh_params: dict[str, t.Any] = {
"hostname": base_url_p.hostname,
"port": base_url_p.port,
"username": base_url_p.username,
}
ssh_config_file = os.path.expanduser("~/.ssh/config")
if os.path.exists(ssh_config_file):
conf = paramiko.SSHConfig()
with open(ssh_config_file, "rt", encoding="utf-8") as f:
conf.parse(f)
host_config = conf.lookup(base_url_p.hostname)
if "proxycommand" in host_config:
self.ssh_params["sock"] = paramiko.ProxyCommand(
host_config["proxycommand"]
)
if "hostname" in host_config:
self.ssh_params["hostname"] = host_config["hostname"]
if base_url_p.port is None and "port" in host_config:
self.ssh_params["port"] = host_config["port"]
if base_url_p.username is None and "user" in host_config:
self.ssh_params["username"] = host_config["user"]
if "identityfile" in host_config:
self.ssh_params["key_filename"] = host_config["identityfile"]
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
def _connect(self) -> None:
if self.ssh_client:
self.ssh_client.connect(**self.ssh_params)
def get_connection(
self, url: str | bytes, proxies: Mapping[str, str] | None = None
) -> SSHConnectionPool:
if not self.ssh_client:
return SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host,
)
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
# Connection is closed try a reconnect
if self.ssh_client and not self.ssh_client.get_transport():
self._connect()
pool = SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host,
)
self.pools[url] = pool
return pool
def close(self) -> None:
super().close()
if self.ssh_client:
self.ssh_client.close()

View file

@ -0,0 +1,71 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
from .._import_helper import HTTPAdapter, urllib3
from .basehttpadapter import BaseHTTPAdapter
# Resolves OpenSSL issues in some servers:
# https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
# https://github.com/kennethreitz/requests/pull/799
PoolManager = urllib3.poolmanager.PoolManager
class SSLHTTPAdapter(BaseHTTPAdapter):
"""An HTTPS Transport Adapter that uses an arbitrary SSL version."""
__attrs__ = HTTPAdapter.__attrs__ + ["assert_hostname"]
def __init__(
self,
assert_hostname: bool | None = None,
**kwargs: t.Any,
) -> None:
self.assert_hostname = assert_hostname
super().__init__(**kwargs)
def init_poolmanager(
self, connections: int, maxsize: int, block: bool = False, **kwargs: t.Any
) -> None:
kwargs = {
"num_pools": connections,
"maxsize": maxsize,
"block": block,
}
if self.assert_hostname is not None:
kwargs["assert_hostname"] = self.assert_hostname
self.poolmanager = PoolManager(**kwargs)
def get_connection(self, *args: t.Any, **kwargs: t.Any) -> urllib3.ConnectionPool:
"""
Ensure assert_hostname is set correctly on our pool
We already take care of a normal poolmanager via init_poolmanager
But we still need to take care of when there is a proxy poolmanager
Note that this method is no longer called for newer requests versions.
"""
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
# pylint: disable-next=no-member
conn = super().get_connection(*args, **kwargs)
if (
self.assert_hostname is not None
and conn.assert_hostname != self.assert_hostname # type: ignore
):
conn.assert_hostname = self.assert_hostname # type: ignore
return conn

View file

@ -0,0 +1,126 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import socket
import typing as t
from .. import constants
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
from .basehttpadapter import BaseHTTPAdapter
if t.TYPE_CHECKING:
from collections.abc import Mapping
from requests import PreparedRequest
from ..._socket_helper import SocketLike
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPConnection(urllib3_connection.HTTPConnection):
def __init__(
self, base_url: str | bytes, unix_socket: str, timeout: int | float = 60
) -> None:
super().__init__("localhost", timeout=timeout)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
self.disable_buffering = False
def connect(self) -> None:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.unix_socket)
self.sock = sock
def putheader(self, header: str, *values: str) -> None:
super().putheader(header, *values)
if header == "Connection" and "Upgrade" in values:
self.disable_buffering = True
def response_class(self, sock: SocketLike, *args: t.Any, **kwargs: t.Any) -> t.Any:
# FIXME: We may need to disable buffering on Py3,
# but there's no clear way to do it at the moment. See:
# https://github.com/docker/docker-py/issues/1799
return super().response_class(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(
self,
base_url: str | bytes,
socket_path: str,
timeout: int | float = 60,
maxsize: int = 10,
) -> None:
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self) -> UnixHTTPConnection:
return UnixHTTPConnection(self.base_url, self.socket_path, self.timeout)
class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + [
"pools",
"socket_path",
"timeout",
"max_pool_size",
]
def __init__(
self,
socket_url: str,
timeout: int | float = 60,
pool_connections: int = constants.DEFAULT_NUM_POOLS,
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
) -> None:
socket_path = socket_url.replace("http+unix://", "")
if not socket_path.startswith("/"):
socket_path = "/" + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.max_pool_size = max_pool_size
def f(p: t.Any) -> None:
p.close()
self.pools = RecentlyUsedContainer(pool_connections, dispose_func=f)
super().__init__()
def get_connection(
self, url: str | bytes, proxies: Mapping[str, str] | None = None
) -> UnixHTTPConnectionPool:
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout, maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request: PreparedRequest, proxies: Mapping[str, str]) -> str:
# The select_proxy utility in requests errors out when the provided URL
# does not have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url

View file

@ -0,0 +1,90 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import socket
import typing as t
from .._import_helper import urllib3
from ..errors import DockerException
if t.TYPE_CHECKING:
from requests import Response
_T = t.TypeVar("_T")
class CancellableStream(t.Generic[_T]):
"""
Stream wrapper for real-time events, logs, etc. from the server.
Example:
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
def __init__(self, stream: t.Generator[_T], response: Response) -> None:
self._stream = stream
self._response = response
def __iter__(self) -> t.Self:
return self
def __next__(self) -> _T:
try:
return next(self._stream)
except urllib3.exceptions.ProtocolError as exc:
raise StopIteration from exc
except socket.error as exc:
raise StopIteration from exc
next = __next__
def close(self) -> None:
"""
Closes the event streaming.
"""
if not self._response.raw.closed:
# find the underlying socket object
# based on api.client._get_raw_response_socket
sock_fp = self._response.raw._fp.fp # type: ignore
if hasattr(sock_fp, "raw"):
sock_raw = sock_fp.raw
if hasattr(sock_raw, "sock"):
sock = sock_raw.sock
elif hasattr(sock_raw, "_sock"):
sock = sock_raw._sock
elif hasattr(sock_fp, "channel"):
# We are working with a paramiko (SSH) channel, which does not
# support cancelable streams with the current implementation
raise DockerException(
"Cancellable streams not supported for the SSH protocol"
)
else:
sock = sock_fp._sock # type: ignore
if hasattr(urllib3.contrib, "pyopenssl") and isinstance(
sock, urllib3.contrib.pyopenssl.WrappedSocket
):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
sock.close()

View file

@ -0,0 +1,310 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import io
import os
import random
import re
import tarfile
import tempfile
import typing as t
from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
from . import fnmatch
if t.TYPE_CHECKING:
from collections.abc import Sequence
_SEP = re.compile("/|\\\\") if IS_WINDOWS_PLATFORM else re.compile("/")
def tar(
path: str,
exclude: list[str] | None = None,
dockerfile: tuple[str, str | None] | tuple[None, None] | None = None,
fileobj: t.IO[bytes] | None = None,
gzip: bool = False,
) -> t.IO[bytes]:
root = os.path.abspath(path)
exclude = exclude or []
dockerfile = dockerfile or (None, None)
extra_files: list[tuple[str, str]] = []
if dockerfile[1] is not None:
assert dockerfile[0] is not None
dockerignore_contents = "\n".join(
(exclude or [".dockerignore"]) + [dockerfile[0]]
)
extra_files = [
(".dockerignore", dockerignore_contents),
dockerfile, # type: ignore
]
return create_archive(
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
root=root,
fileobj=fileobj,
gzip=gzip,
extra_files=extra_files,
)
def exclude_paths(
root: str, patterns: list[str], dockerfile: str | None = None
) -> set[str]:
"""
Given a root directory path and a list of .dockerignore patterns, return
an iterator of all paths (both regular files and directories) in the root
directory that do *not* match any of the patterns.
All paths returned are relative to the root.
"""
if dockerfile is None:
dockerfile = "Dockerfile"
patterns.append("!" + dockerfile)
pm = PatternMatcher(patterns)
return set(pm.walk(root))
def build_file_list(root: str) -> list[str]:
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
def create_archive(
root: str,
files: Sequence[str] | None = None,
fileobj: t.IO[bytes] | None = None,
gzip: bool = False,
extra_files: Sequence[tuple[str, str]] | None = None,
) -> t.IO[bytes]:
extra_files = extra_files or []
if not fileobj:
# pylint: disable-next=consider-using-with
fileobj = tempfile.NamedTemporaryFile() # noqa: SIM115
with tarfile.open(mode="w:gz" if gzip else "w", fileobj=fileobj) as tarf:
if files is None:
files = build_file_list(root)
extra_names = set(e[0] for e in extra_files)
for path in files:
if path in extra_names:
# Extra files override context files with the same name
continue
full_path = os.path.join(root, path)
i = tarf.gettarinfo(full_path, arcname=path)
if i is None:
# This happens when we encounter a socket file. We can safely
# ignore it and proceed.
continue # type: ignore
# Workaround https://bugs.python.org/issue32713
if i.mtime < 0 or i.mtime > 8**11 - 1:
i.mtime = int(i.mtime)
if IS_WINDOWS_PLATFORM:
# Windows does not keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111
if i.isfile():
try:
with open(full_path, "rb") as f:
tarf.addfile(i, f)
except IOError as exc:
raise IOError(f"Can not read file in context: {full_path}") from exc
else:
# Directories, FIFOs, symlinks... do not need to be read.
tarf.addfile(i, None)
for name, contents in extra_files:
info = tarfile.TarInfo(name)
contents_encoded = contents.encode("utf-8")
info.size = len(contents_encoded)
tarf.addfile(info, io.BytesIO(contents_encoded))
fileobj.seek(0)
return fileobj
def mkbuildcontext(dockerfile: io.BytesIO | t.IO[bytes]) -> t.IO[bytes]:
# pylint: disable-next=consider-using-with
f = tempfile.NamedTemporaryFile() # noqa: SIM115
try:
with tarfile.open(mode="w", fileobj=f) as tarf:
if isinstance(dockerfile, io.StringIO): # type: ignore
raise TypeError("Please use io.BytesIO to create in-memory Dockerfiles")
if isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
else:
dfinfo = tarf.gettarinfo(fileobj=dockerfile, arcname="Dockerfile")
tarf.addfile(dfinfo, dockerfile)
f.seek(0)
except Exception: # noqa: E722
f.close()
raise
return f
def split_path(p: str) -> list[str]:
return [pt for pt in re.split(_SEP, p) if pt and pt != "."]
def normalize_slashes(p: str) -> str:
if IS_WINDOWS_PLATFORM:
return "/".join(split_path(p))
return p
def walk(root: str, patterns: Sequence[str], default: bool = True) -> t.Generator[str]:
pm = PatternMatcher(patterns)
return pm.walk(root)
# Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
class PatternMatcher:
def __init__(self, patterns: Sequence[str]) -> None:
self.patterns = list(filter(lambda p: p.dirs, [Pattern(p) for p in patterns]))
self.patterns.append(Pattern("!.dockerignore"))
def matches(self, filepath: str) -> bool:
matched = False
parent_path = os.path.dirname(filepath)
parent_path_dirs = split_path(parent_path)
for pattern in self.patterns:
negative = pattern.exclusion
match = pattern.match(filepath)
if (
not match
and parent_path != ""
and len(pattern.dirs) <= len(parent_path_dirs)
):
match = pattern.match(
os.path.sep.join(parent_path_dirs[: len(pattern.dirs)])
)
if match:
matched = not negative
return matched
def walk(self, root: str) -> t.Generator[str]:
def rec_walk(current_dir: str) -> t.Generator[str]:
for f in os.listdir(current_dir):
fpath = os.path.join(os.path.relpath(current_dir, root), f)
if fpath.startswith("." + os.path.sep):
fpath = fpath[2:]
match = self.matches(fpath)
if not match:
yield fpath
cur = os.path.join(root, fpath)
if not os.path.isdir(cur) or os.path.islink(cur):
continue
if match:
# If we want to skip this file and it is a directory
# then we should first check to see if there's an
# excludes pattern (e.g. !dir/file) that starts with this
# dir. If so then we cannot skip this dir.
skip = True
for pat in self.patterns:
if not pat.exclusion:
continue
if pat.cleaned_pattern.startswith(normalize_slashes(fpath)):
skip = False
break
if skip:
continue
yield from rec_walk(cur)
return rec_walk(root)
class Pattern:
def __init__(self, pattern_str: str) -> None:
self.exclusion = False
if pattern_str.startswith("!"):
self.exclusion = True
pattern_str = pattern_str[1:]
self.dirs = self.normalize(pattern_str)
self.cleaned_pattern = "/".join(self.dirs)
@classmethod
def normalize(cls, p: str) -> list[str]:
# Remove trailing spaces
p = p.strip()
# Leading and trailing slashes are not relevant. Yes,
# "foo.py/" must exclude the "foo.py" regular file. "."
# components are not relevant either, even if the whole
# pattern is only ".", as the Docker reference states: "For
# historical reasons, the pattern . is ignored."
# ".." component must be cleared with the potential previous
# component, regardless of whether it exists: "A preprocessing
# step [...] eliminates . and .. elements using Go's
# filepath.".
i = 0
split = split_path(p)
while i < len(split):
if split[i] == "..":
del split[i]
if i > 0:
del split[i - 1]
i -= 1
else:
i += 1
return split
def match(self, filepath: str) -> bool:
return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
def process_dockerfile(
dockerfile: str | None, path: str
) -> tuple[str, str | None] | tuple[None, None]:
if not dockerfile:
return (None, None)
abs_dockerfile = dockerfile
if not os.path.isabs(dockerfile):
abs_dockerfile = os.path.join(path, dockerfile)
if IS_WINDOWS_PLATFORM and path.startswith(WINDOWS_LONGPATH_PREFIX):
abs_dockerfile = f"{WINDOWS_LONGPATH_PREFIX}{os.path.normpath(abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX) :])}"
if os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[
0
] or os.path.relpath(abs_dockerfile, path).startswith(".."):
# Dockerfile not in context - read data to insert into tar later
with open(abs_dockerfile, "rt", encoding="utf-8") as df:
return (f".dockerfile.{random.getrandbits(160):x}", df.read())
# Dockerfile is inside the context - return path relative to context root
if dockerfile == abs_dockerfile:
# Only calculate relpath if necessary to avoid errors
# on Windows client -> Linux Docker
# see https://github.com/docker/compose/issues/5969
dockerfile = os.path.relpath(abs_dockerfile, path)
return (dockerfile, None)

View file

@ -0,0 +1,89 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import logging
import os
import typing as t
from ..constants import IS_WINDOWS_PLATFORM
DOCKER_CONFIG_FILENAME = os.path.join(".docker", "config.json")
LEGACY_DOCKER_CONFIG_FILENAME = ".dockercfg"
log = logging.getLogger(__name__)
def get_default_config_file() -> str:
return os.path.join(home_dir(), DOCKER_CONFIG_FILENAME)
def find_config_file(config_path: str | None = None) -> str | None:
homedir = home_dir()
paths = list(
filter(
None,
[
config_path, # 1
config_path_from_environment(), # 2
os.path.join(homedir, DOCKER_CONFIG_FILENAME), # 3
os.path.join(homedir, LEGACY_DOCKER_CONFIG_FILENAME), # 4
],
)
)
log.debug("Trying paths: %s", repr(paths))
for path in paths:
if os.path.exists(path):
log.debug("Found file at path: %s", path)
return path
log.debug("No config file found")
return None
def config_path_from_environment() -> str | None:
config_dir = os.environ.get("DOCKER_CONFIG")
if not config_dir:
return None
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
def home_dir() -> str:
"""
Get the user's home directory, using the same logic as the Docker Engine
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
"""
if IS_WINDOWS_PLATFORM:
return os.environ.get("USERPROFILE", "")
return os.path.expanduser("~")
def load_general_config(config_path: str | None = None) -> dict[str, t.Any]:
config_file = find_config_file(config_path)
if not config_file:
return {}
try:
with open(config_file, "rt", encoding="utf-8") as f:
return json.load(f)
except (IOError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we will not
# be able to load any JSON data.
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}

View file

@ -0,0 +1,67 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import functools
import typing as t
from .. import errors
from . import utils
if t.TYPE_CHECKING:
from collections.abc import Callable
from ..api.client import APIClient
_Self = t.TypeVar("_Self")
_P = t.ParamSpec("_P")
_R = t.TypeVar("_R")
def minimum_version(
version: str,
) -> Callable[
[Callable[t.Concatenate[_Self, _P], _R]],
Callable[t.Concatenate[_Self, _P], _R],
]:
def decorator(
f: Callable[t.Concatenate[_Self, _P], _R],
) -> Callable[t.Concatenate[_Self, _P], _R]:
@functools.wraps(f)
def wrapper(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
# We use _Self instead of APIClient since this is used for mixins for APIClient.
# This unfortunately means that self._version does not exist in the mixin,
# it only exists after mixing in. This is why we ignore types here.
if utils.version_lt(self._version, version): # type: ignore
raise errors.InvalidVersion(
f"{f.__name__} is not available for version < {version}"
)
return f(self, *args, **kwargs)
return wrapper
return decorator
def update_headers(
f: Callable[t.Concatenate[APIClient, _P], _R],
) -> Callable[t.Concatenate[APIClient, _P], _R]:
def inner(self: APIClient, *args: _P.args, **kwargs: _P.kwargs) -> _R:
if "HttpHeaders" in self._general_configs:
if not kwargs.get("headers"):
kwargs["headers"] = self._general_configs["HttpHeaders"]
else:
# We cannot (yet) model that kwargs["headers"] should be a dictionary
kwargs["headers"].update(self._general_configs["HttpHeaders"]) # type: ignore
return f(self, *args, **kwargs)
return inner

View file

@ -0,0 +1,128 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
from __future__ import annotations
import re
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache: dict[str, re.Pattern] = {}
_MAXCACHE = 100
def _purge() -> None:
"""Clear the pattern cache"""
_cache.clear()
def fnmatch(name: str, pat: str) -> bool:
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you do not want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = name.lower()
pat = pat.lower()
return fnmatchcase(name, pat)
def fnmatchcase(name: str, pat: str) -> bool:
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which does not case-normalize
its arguments.
"""
try:
re_pat = _cache[pat]
except KeyError:
res = translate(pat)
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[pat] = re_pat = re.compile(res)
return re_pat.match(name) is not None
def translate(pat: str) -> str:
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = "^"
while i < n:
c = pat[i]
i = i + 1
if c == "*":
if i < n and pat[i] == "*":
# is some flavor of "**"
i = i + 1
# Treat **/ as ** so eat the "/"
if i < n and pat[i] == "/":
i = i + 1
if i >= n:
# is "**EOF" - to align with .gitignore just accept all
res = res + ".*"
else:
# is "**"
# Note that this allows for any # of /'s (even 0) because
# the .* will eat everything, even /'s
res = res + "(.*/)?"
else:
# is "*" so map it to anything but "/"
res = res + "[^/]*"
elif c == "?":
# "?" is any char except "/"
res = res + "[^/]"
elif c == "[":
j = i
if j < n and pat[j] == "!":
j = j + 1
if j < n and pat[j] == "]":
j = j + 1
while j < n and pat[j] != "]":
j = j + 1
if j >= n:
res = res + "\\["
else:
stuff = pat[i:j].replace("\\", "\\\\")
i = j + 1
if stuff[0] == "!":
stuff = "^" + stuff[1:]
elif stuff[0] == "^":
stuff = "\\" + stuff
res = f"{res}[{stuff}]"
else:
res = res + re.escape(c)
return res + "$"

View file

@ -0,0 +1,100 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import json.decoder
import typing as t
from ..errors import StreamParseError
if t.TYPE_CHECKING:
import re
from collections.abc import Callable
_T = t.TypeVar("_T")
json_decoder = json.JSONDecoder()
def stream_as_text(stream: t.Generator[bytes | str]) -> t.Generator[str]:
"""
Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
"""
for data in stream:
if not isinstance(data, str):
data = data.decode("utf-8", "replace")
yield data
def json_splitter(buffer: str) -> tuple[t.Any, str] | None:
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
ws: re.Pattern = json.decoder.WHITESPACE # type: ignore[attr-defined]
m = ws.match(buffer, index)
rest = buffer[m.end() :] if m else buffer[index:]
return obj, rest
except ValueError:
return None
def json_stream(stream: t.Generator[str | bytes]) -> t.Generator[t.Any]:
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def line_splitter(buffer: str, separator: str = "\n") -> tuple[str, str] | None:
index = buffer.find(str(separator))
if index == -1:
return None
return buffer[: index + 1], buffer[index + 1 :]
def split_buffer(
stream: t.Generator[str | bytes],
splitter: Callable[[str], tuple[_T, str] | None],
decoder: Callable[[str], _T],
) -> t.Generator[_T | str]:
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
buffered = ""
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
raise StreamParseError(e) from e

View file

@ -0,0 +1,136 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import re
import typing as t
if t.TYPE_CHECKING:
from collections.abc import Collection, Sequence
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
"$" # Match full string
)
def add_port_mapping(
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
internal_port: str,
external: str | tuple[str, str | None] | None,
) -> None:
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
def add_port(
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
internal_port_range: list[str],
external_range: list[str] | list[tuple[str, str | None]] | None,
) -> None:
if external_range is None:
for internal_port in internal_port_range:
add_port_mapping(port_bindings, internal_port, None)
else:
for internal_port, external_port in zip(internal_port_range, external_range):
# mypy loses the exact type of eternal_port elements for some reason...
add_port_mapping(port_bindings, internal_port, external_port) # type: ignore
def build_port_bindings(
ports: Collection[str],
) -> dict[str, list[str | tuple[str, str | None] | None]]:
port_bindings: dict[str, list[str | tuple[str, str | None] | None]] = {}
for port in ports:
internal_port_range, external_range = split_port(port)
add_port(port_bindings, internal_port_range, external_range)
return port_bindings
def _raise_invalid_port(port: str) -> t.NoReturn:
raise ValueError(
f'Invalid port "{port}", should be '
"[[remote_ip:]remote_port[-remote_port]:]"
"port[/protocol]"
)
@t.overload
def port_range(
start: str,
end: str | None,
proto: str,
randomly_available_port: bool = False,
) -> list[str]: ...
@t.overload
def port_range(
start: str | None,
end: str | None,
proto: str,
randomly_available_port: bool = False,
) -> list[str] | None: ...
def port_range(
start: str | None,
end: str | None,
proto: str,
randomly_available_port: bool = False,
) -> list[str] | None:
if start is None:
return start
if end is None:
return [f"{start}{proto}"]
if randomly_available_port:
return [f"{start}-{end}{proto}"]
return [f"{port}{proto}" for port in range(int(start), int(end) + 1)]
def split_port(
port: str | int,
) -> tuple[list[str], list[str] | list[tuple[str, str | None]] | None]:
port = str(port)
match = PORT_SPEC.match(port)
if match is None:
_raise_invalid_port(port)
parts = match.groupdict()
host: str | None = parts["host"]
proto: str = parts["proto"] or ""
int_p: str = parts["int"]
ext_p: str = parts["ext"]
internal: list[str] = port_range(int_p, parts["int_end"], proto) # type: ignore
external = port_range(ext_p or None, parts["ext_end"], "", len(internal) == 1)
if host is None:
if (external is not None and len(internal) != len(external)) or ext_p == "":
raise ValueError("Port ranges don't match in length")
return internal, external
external_or_none: Sequence[str | None]
if not external:
external_or_none = [None] * len(internal)
else:
external_or_none = external
if len(internal) != len(external_or_none):
raise ValueError("Port ranges don't match in length")
return internal, [(host, ext_port) for ext_port in external_or_none]

View file

@ -0,0 +1,98 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
from .utils import format_environment
class ProxyConfig(dict):
"""
Hold the client's proxy configuration
"""
@property
def http(self) -> str | None:
return self.get("http")
@property
def https(self) -> str | None:
return self.get("https")
@property
def ftp(self) -> str | None:
return self.get("ftp")
@property
def no_proxy(self) -> str | None:
return self.get("no_proxy")
@staticmethod
def from_dict(config: dict[str, str]) -> ProxyConfig:
"""
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
"""
return ProxyConfig(
http=config.get("httpProxy"),
https=config.get("httpsProxy"),
ftp=config.get("ftpProxy"),
no_proxy=config.get("noProxy"),
)
def get_environment(self) -> dict[str, str]:
"""
Return a dictionary representing the environment variables used to
set the proxy settings.
"""
env = {}
if self.http:
env["http_proxy"] = env["HTTP_PROXY"] = self.http
if self.https:
env["https_proxy"] = env["HTTPS_PROXY"] = self.https
if self.ftp:
env["ftp_proxy"] = env["FTP_PROXY"] = self.ftp
if self.no_proxy:
env["no_proxy"] = env["NO_PROXY"] = self.no_proxy
return env
@t.overload
def inject_proxy_environment(self, environment: list[str]) -> list[str]: ...
@t.overload
def inject_proxy_environment(
self, environment: list[str] | None
) -> list[str] | None: ...
def inject_proxy_environment(
self, environment: list[str] | None
) -> list[str] | None:
"""
Given a list of strings representing environment variables, prepend the
environment variables corresponding to the proxy settings.
"""
if not self:
return environment
proxy_env = format_environment(self.get_environment())
if not environment:
return proxy_env
# It is important to prepend our variables, because we want the
# variables defined in "environment" to take precedence.
return proxy_env + environment
def __str__(self) -> str:
return f"ProxyConfig(http={self.http}, https={self.https}, ftp={self.ftp}, no_proxy={self.no_proxy})"

View file

@ -0,0 +1,242 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import errno
import os
import select
import socket as pysocket
import struct
import typing as t
from ..transport.npipesocket import NpipeSocket
if t.TYPE_CHECKING:
from collections.abc import Sequence
from ..._socket_helper import SocketLike
STDOUT = 1
STDERR = 2
class SocketError(Exception):
pass
# NpipeSockets have their own error types
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
NPIPE_ENDED = 109
def read(socket: SocketLike, n: int = 4096) -> bytes | None:
"""
Reads at most n bytes from socket
"""
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if not isinstance(socket, NpipeSocket): # type: ignore[unreachable]
if not hasattr(select, "poll"):
# Limited to 1024
select.select([socket], [], [])
else:
poll = select.poll()
poll.register(socket, select.POLLIN | select.POLLPRI)
poll.poll()
try:
if hasattr(socket, "recv"):
return socket.recv(n)
if isinstance(socket, pysocket.SocketIO): # type: ignore
return socket.read(n) # type: ignore[unreachable]
return os.read(socket.fileno(), n)
except EnvironmentError as e:
if e.errno not in recoverable_errors:
raise
return None # TODO ???
except Exception as e:
is_pipe_ended = (
isinstance(socket, NpipeSocket) # type: ignore[unreachable]
and len(e.args) > 0
and e.args[0] == NPIPE_ENDED
)
if is_pipe_ended:
# npipes do not support duplex sockets, so we interpret
# a PIPE_ENDED error as a close operation (0-length read).
return b""
raise
def read_exactly(socket: SocketLike, n: int) -> bytes:
"""
Reads exactly n bytes from socket
Raises SocketError if there is not enough data
"""
data = b""
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
raise SocketError("Unexpected EOF")
data += next_data
return data
def next_frame_header(socket: SocketLike) -> tuple[int, int]:
"""
Returns the stream and size of the next frame of data waiting to be read
from socket, according to the protocol defined here:
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
return (-1, -1)
stream, actual = struct.unpack(">BxxxL", data)
return (stream, actual)
def frames_iter(socket: SocketLike, tty: bool) -> t.Generator[tuple[int, bytes]]:
"""
Return a generator of frames read from socket. A frame is a tuple where
the first item is the stream number and the second item is a chunk of data.
If the tty setting is enabled, the streams are multiplexed into the stdout
stream.
"""
if tty:
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
return frames_iter_no_tty(socket)
def frames_iter_no_tty(socket: SocketLike) -> t.Generator[tuple[int, bytes]]:
"""
Returns a generator of data read from the socket when the tty setting is
not enabled.
"""
while True:
(stream, n) = next_frame_header(socket)
if n < 0:
break
while n > 0:
result = read(socket, n)
if result is None:
continue
data_length = len(result)
if data_length == 0:
# We have reached EOF
return
n -= data_length
yield (stream, result)
def frames_iter_tty(socket: SocketLike) -> t.Generator[bytes]:
"""
Return a generator of data read from the socket when the tty setting is
enabled.
"""
while True:
result = read(socket)
if not result:
# We have reached EOF
return
yield result
@t.overload
def consume_socket_output(
frames: Sequence[bytes] | t.Generator[bytes], demux: t.Literal[False] = False
) -> bytes: ...
@t.overload
def consume_socket_output(
frames: (
Sequence[tuple[bytes | None, bytes | None]]
| t.Generator[tuple[bytes | None, bytes | None]]
),
demux: t.Literal[True],
) -> tuple[bytes, bytes]: ...
@t.overload
def consume_socket_output(
frames: (
Sequence[bytes]
| Sequence[tuple[bytes | None, bytes | None]]
| t.Generator[bytes]
| t.Generator[tuple[bytes | None, bytes | None]]
),
demux: bool = False,
) -> bytes | tuple[bytes, bytes]: ...
def consume_socket_output(
frames: (
Sequence[bytes]
| Sequence[tuple[bytes | None, bytes | None]]
| t.Generator[bytes]
| t.Generator[tuple[bytes | None, bytes | None]]
),
demux: bool = False,
) -> bytes | tuple[bytes, bytes]:
"""
Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream.
"""
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
return b"".join(frames) # type: ignore
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
out: list[bytes | None] = [None, None]
frame: tuple[bytes | None, bytes | None]
for frame in frames: # type: ignore
# It is guaranteed that for each frame, one and only one stream
# is not None.
if frame == (None, None):
raise AssertionError(f"frame must be (None, None), but got {frame}")
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]
else:
out[0] += frame[0]
else:
if out[1] is None:
out[1] = frame[1]
else:
out[1] += frame[1] # type: ignore[operator]
return tuple(out) # type: ignore
def demux_adaptor(stream_id: int, data: bytes) -> tuple[bytes | None, bytes | None]:
"""
Utility to demultiplex stdout and stderr when reading frames from the
socket.
"""
if stream_id == STDOUT:
return (data, None)
if stream_id == STDERR:
return (None, data)
raise ValueError(f"{stream_id} is not a valid stream")

View file

@ -0,0 +1,519 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import base64
import collections
import json
import os
import os.path
import shlex
import string
import typing as t
from urllib.parse import urlparse, urlunparse
from ansible_collections.community.docker.plugins.module_utils._version import (
StrictVersion,
)
from .. import errors
from ..constants import (
BYTE_UNITS,
DEFAULT_HTTP_HOST,
DEFAULT_NPIPE,
DEFAULT_UNIX_SOCKET,
)
from ..tls import TLSConfig
if t.TYPE_CHECKING:
from collections.abc import Mapping, Sequence
URLComponents = collections.namedtuple(
"URLComponents",
"scheme netloc url params query fragment",
)
def decode_json_header(header: str | bytes) -> dict[str, t.Any]:
data = base64.b64decode(header).decode("utf-8")
return json.loads(data)
def compare_version(v1: str, v2: str) -> t.Literal[-1, 0, 1]:
"""Compare docker versions
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0
if s1 > s2:
return -1
return 1
def version_lt(v1: str, v2: str) -> bool:
return compare_version(v1, v2) > 0
def version_gte(v1: str, v2: str) -> bool:
return not version_lt(v1, v2)
def _convert_port_binding(
binding: (
tuple[str, str | int | None]
| tuple[str | int | None]
| dict[str, str]
| str
| int
),
) -> dict[str, str]:
result = {"HostIp": "", "HostPort": ""}
host_port: str | int | None = ""
if isinstance(binding, tuple):
if len(binding) == 2:
host_port = binding[1] # type: ignore
result["HostIp"] = binding[0]
elif isinstance(binding[0], str):
result["HostIp"] = binding[0]
else:
host_port = binding[0]
elif isinstance(binding, dict):
if "HostPort" in binding:
host_port = binding["HostPort"]
if "HostIp" in binding:
result["HostIp"] = binding["HostIp"]
else:
raise ValueError(binding)
else:
host_port = binding
result["HostPort"] = str(host_port) if host_port is not None else ""
return result
def convert_port_bindings(
port_bindings: dict[
str | int,
tuple[str, str | int | None]
| tuple[str | int | None]
| dict[str, str]
| str
| int
| list[
tuple[str, str | int | None]
| tuple[str | int | None]
| dict[str, str]
| str
| int
],
],
) -> dict[str, list[dict[str, str]]]:
result = {}
for k, v in port_bindings.items():
key = str(k)
if "/" not in key:
key += "/tcp"
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
result[key] = [_convert_port_binding(v)]
return result
def convert_volume_binds(
binds: (
list[str]
| Mapping[
str | bytes, dict[str, str | bytes] | dict[str, str] | bytes | str | int
]
),
) -> list[str]:
if isinstance(binds, list):
return binds # type: ignore
result = []
for k, v in binds.items():
if isinstance(k, bytes):
k = k.decode("utf-8")
if isinstance(v, dict):
if "ro" in v and "mode" in v:
raise ValueError(f'Binding cannot contain both "ro" and "mode": {v!r}')
bind = v["bind"]
if isinstance(bind, bytes):
bind = bind.decode("utf-8")
if "ro" in v:
mode = "ro" if v["ro"] else "rw"
elif "mode" in v:
mode = v["mode"] # type: ignore # TODO
else:
mode = "rw"
# NOTE: this is only relevant for Linux hosts
# (does not apply in Docker Desktop)
propagation_modes = [
"rshared",
"shared",
"rslave",
"slave",
"rprivate",
"private",
]
if "propagation" in v and v["propagation"] in propagation_modes:
if mode:
mode = ",".join([mode, v["propagation"]]) # type: ignore # TODO
else:
mode = v["propagation"] # type: ignore # TODO
result.append(f"{k}:{bind}:{mode}")
else:
if isinstance(v, bytes):
v = v.decode("utf-8")
result.append(f"{k}:{v}:rw")
return result
def convert_tmpfs_mounts(tmpfs: dict[str, str] | list[str]) -> dict[str, str]:
if isinstance(tmpfs, dict):
return tmpfs
if not isinstance(tmpfs, list):
raise ValueError(
f"Expected tmpfs value to be either a list or a dict, found: {type(tmpfs).__name__}"
)
result = {}
for mount in tmpfs:
if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
name = mount
options = ""
else:
raise ValueError(
f"Expected item in tmpfs list to be a string, found: {type(mount).__name__}"
)
result[name] = options
return result
def convert_service_networks(
networks: list[str | dict[str, str]],
) -> list[dict[str, str]]:
if not networks:
return networks # type: ignore
if not isinstance(networks, list):
raise TypeError("networks parameter must be a list.")
result = []
for n in networks:
if isinstance(n, str):
n = {"Target": n}
result.append(n)
return result
def parse_repository_tag(repo_name: str) -> tuple[str, str | None]:
parts = repo_name.rsplit("@", 1)
if len(parts) == 2:
return tuple(parts) # type: ignore
parts = repo_name.rsplit(":", 1)
if len(parts) == 2 and "/" not in parts[1]:
return tuple(parts) # type: ignore
return repo_name, None
def parse_host(addr: str | None, is_win32: bool = False, tls: bool = False) -> str:
# Sensible defaults
if not addr and is_win32:
return DEFAULT_NPIPE
if not addr or addr.strip() == "unix://":
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
parsed_url = urlparse(addr)
proto = parsed_url.scheme
if not proto or any(x not in string.ascii_letters + "+" for x in proto):
# https://bugs.python.org/issue754016
parsed_url = urlparse("//" + addr, "tcp")
proto = "tcp"
if proto == "fd":
raise errors.DockerException("fd protocol is not implemented")
# These protos are valid aliases for our library but not for the
# official spec
if proto in ("http", "https"):
tls = proto == "https"
proto = "tcp"
elif proto == "http+unix":
proto = "unix"
if proto not in ("tcp", "unix", "npipe", "ssh"):
raise errors.DockerException(f"Invalid bind address protocol: {addr}")
if proto == "tcp" and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(f"Invalid bind address format: {addr}")
if any(
[parsed_url.params, parsed_url.query, parsed_url.fragment, parsed_url.password]
):
raise errors.DockerException(f"Invalid bind address format: {addr}")
if parsed_url.path and proto == "ssh":
raise errors.DockerException(
f"Invalid bind address format: no path allowed for this protocol: {addr}"
)
path = parsed_url.path
if proto == "unix" and parsed_url.hostname is not None:
# For legacy reasons, we consider unix://path
# to be valid and equivalent to unix:///path
path = f"{parsed_url.hostname}/{path}"
netloc = parsed_url.netloc
if proto in ("tcp", "ssh"):
port = parsed_url.port or 0
if port <= 0:
port = 22 if proto == "ssh" else (2375 if tls else 2376)
netloc = f"{parsed_url.netloc}:{port}"
if not parsed_url.hostname:
netloc = f"{DEFAULT_HTTP_HOST}:{port}"
# Rewrite schemes to fit library internals (requests adapters)
if proto == "tcp":
proto = f"http{'s' if tls else ''}"
elif proto == "unix":
proto = "http+unix"
if proto in ("http+unix", "npipe"):
return f"{proto}://{path}".rstrip("/")
return urlunparse(
URLComponents(
scheme=proto,
netloc=netloc,
url=path,
params="",
query="",
fragment="",
)
).rstrip("/")
def parse_devices(devices: Sequence[dict[str, str] | str]) -> list[dict[str, str]]:
device_list = []
for device in devices:
if isinstance(device, dict):
device_list.append(device)
continue
if not isinstance(device, str):
raise errors.DockerException(f"Invalid device type {type(device)}")
device_mapping = device.split(":")
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
path_in_container = device_mapping[1]
else:
path_in_container = path_on_host
if len(device_mapping) > 2:
permissions = device_mapping[2]
else:
permissions = "rwm"
device_list.append(
{
"PathOnHost": path_on_host,
"PathInContainer": path_in_container,
"CgroupPermissions": permissions,
}
)
return device_list
def kwargs_from_env(
assert_hostname: bool | None = None,
environment: Mapping[str, str] | None = None,
) -> dict[str, t.Any]:
if not environment:
environment = os.environ
host = environment.get("DOCKER_HOST")
# empty string for cert path is the same as unset.
cert_path = environment.get("DOCKER_CERT_PATH") or None
# empty string for tls verify counts as "false".
# Any value or 'unset' counts as true.
tls_verify_str = environment.get("DOCKER_TLS_VERIFY")
if tls_verify_str == "":
tls_verify = False
else:
tls_verify = tls_verify_str is not None
enable_tls = cert_path or tls_verify
params: dict[str, t.Any] = {}
if host:
params["base_url"] = host
if not enable_tls:
return params
if not cert_path:
cert_path = os.path.join(os.path.expanduser("~"), ".docker")
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it is not set already then set it to false.
assert_hostname = False
params["tls"] = TLSConfig(
client_cert=(
os.path.join(cert_path, "cert.pem"),
os.path.join(cert_path, "key.pem"),
),
ca_cert=os.path.join(cert_path, "ca.pem"),
verify=tls_verify,
assert_hostname=assert_hostname,
)
return params
def convert_filters(
filters: Mapping[str, bool | str | int | list[int] | list[str] | list[str | int]],
) -> str:
result = {}
for k, v in filters.items():
if isinstance(v, bool):
v = "true" if v else "false"
if not isinstance(v, list):
v = [
v,
]
result[k] = [str(item) if not isinstance(item, str) else item for item in v]
return json.dumps(result)
def parse_bytes(s: int | float | str) -> int | float:
if isinstance(s, (int, float)):
return s
if len(s) == 0:
return 0
if s[-2:-1].isalpha() and s[-1].isalpha() and (s[-1] == "b" or s[-1] == "B"):
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = "b"
else:
digits_part = s[:-1]
if suffix in units or suffix.isdigit():
try:
digits = float(digits_part)
except ValueError as exc:
raise errors.DockerException(
f"Failed converting the string value for memory ({digits_part}) to an integer."
) from exc
# Reconvert to long for the final result
s = int(digits * units[suffix])
else:
raise errors.DockerException(
f"The specified value for memory ({s}) should specify the units. The postfix should be one of the `b` `k` `m` `g` characters"
)
return s
def normalize_links(links: dict[str, str] | Sequence[tuple[str, str]]) -> list[str]:
if isinstance(links, dict):
sorted_links = sorted(links.items())
else:
sorted_links = sorted(links)
return [f"{k}:{v}" if v else k for k, v in sorted_links]
def parse_env_file(env_file: str | os.PathLike) -> dict[str, str]:
"""
Reads a line-separated environment file.
The format of each line should be "key=value".
"""
environment = {}
with open(env_file, "rt", encoding="utf-8") as f:
for line in f:
if line[0] == "#":
continue
line = line.strip()
if not line:
continue
parse_line = line.split("=", 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
else:
raise errors.DockerException(
f"Invalid line in environment file {env_file}:\n{line}"
)
return environment
def split_command(command: str) -> list[str]:
return shlex.split(command)
def format_environment(environment: Mapping[str, str | bytes | None]) -> list[str]:
def format_env(key: str, value: str | bytes | None) -> str:
if value is None:
return key
if isinstance(value, bytes):
value = value.decode("utf-8")
return f"{key}={value}"
return [format_env(*var) for var in environment.items()]
def format_extra_hosts(extra_hosts: Mapping[str, str], task: bool = False) -> list[str]:
# Use format dictated by Swarm API if container is part of a task
if task:
return [f"{v} {k}" for k, v in sorted(extra_hosts.items())]
return [f"{k}:{v}" for k, v in sorted(extra_hosts.items())]