Vendor Galaxy Roles and Collections
Some checks failed
/ Ansible Lint (push) Failing after 5m45s
/ Ansible Lint (pull_request) Failing after 4m59s

This commit is contained in:
Stefan Bethke 2026-02-06 22:07:16 +01:00
commit 2aed20393f
3553 changed files with 387444 additions and 2 deletions

View file

@ -0,0 +1,102 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import traceback
import typing as t
REQUESTS_IMPORT_ERROR: str | None # pylint: disable=invalid-name
try:
from requests import Session # noqa: F401, pylint: disable=unused-import
from requests.adapters import ( # noqa: F401, pylint: disable=unused-import
HTTPAdapter,
)
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
HTTPError,
InvalidSchema,
)
except ImportError:
REQUESTS_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
class Session: # type: ignore
__attrs__: list[t.Never] = []
class HTTPAdapter: # type: ignore
__attrs__: list[t.Never] = []
class HTTPError(Exception): # type: ignore
pass
class InvalidSchema(Exception): # type: ignore
pass
else:
REQUESTS_IMPORT_ERROR = None # pylint: disable=invalid-name
URLLIB3_IMPORT_ERROR: str | None = None # pylint: disable=invalid-name
try:
from requests.packages import urllib3 # pylint: disable=unused-import
from requests.packages.urllib3 import ( # type: ignore # pylint: disable=unused-import # isort: skip
connection as urllib3_connection,
)
except ImportError:
try:
import urllib3 # pylint: disable=unused-import
from urllib3 import (
connection as urllib3_connection, # pylint: disable=unused-import
)
except ImportError:
URLLIB3_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
class _HTTPConnectionPool:
pass
class _HTTPConnection:
pass
class FakeURLLIB3:
def __init__(self) -> None:
self._collections = self
self.poolmanager = self
self.connection = self
self.connectionpool = self
self.RecentlyUsedContainer = object() # pylint: disable=invalid-name
self.PoolManager = object() # pylint: disable=invalid-name
self.match_hostname = object()
self.HTTPConnectionPool = ( # pylint: disable=invalid-name
_HTTPConnectionPool
)
class FakeURLLIB3Connection:
def __init__(self) -> None:
self.HTTPConnection = _HTTPConnection # pylint: disable=invalid-name
urllib3 = FakeURLLIB3()
urllib3_connection = FakeURLLIB3Connection()
def fail_on_missing_imports() -> None:
if REQUESTS_IMPORT_ERROR is not None:
from .errors import MissingRequirementException # pylint: disable=cyclic-import
raise MissingRequirementException(
"You have to install requests", "requests", REQUESTS_IMPORT_ERROR
)
if URLLIB3_IMPORT_ERROR is not None:
from .errors import MissingRequirementException # pylint: disable=cyclic-import
raise MissingRequirementException(
"You have to install urllib3", "urllib3", URLLIB3_IMPORT_ERROR
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,406 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import base64
import json
import logging
import typing as t
from . import errors
from .credentials.errors import CredentialsNotFound, StoreError
from .credentials.store import Store
from .utils import config
if t.TYPE_CHECKING:
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
APIClient,
)
INDEX_NAME = "docker.io"
INDEX_URL = f"https://index.{INDEX_NAME}/v1/"
TOKEN_USERNAME = "<token>"
log = logging.getLogger(__name__)
def resolve_repository_name(repo_name: str) -> tuple[str, str]:
if "://" in repo_name:
raise errors.InvalidRepository(
f"Repository name cannot contain a scheme ({repo_name})"
)
index_name, remote_name = split_repo_name(repo_name)
if index_name[0] == "-" or index_name[-1] == "-":
raise errors.InvalidRepository(
f"Invalid index name ({index_name}). Cannot begin or end with a hyphen."
)
return resolve_index_name(index_name), remote_name
def resolve_index_name(index_name: str) -> str:
index_name = convert_to_hostname(index_name)
if index_name == "index." + INDEX_NAME:
index_name = INDEX_NAME
return index_name
def get_config_header(client: APIClient, registry: str) -> bytes | None:
log.debug("Looking for auth config")
if not client._auth_configs or client._auth_configs.is_empty:
log.debug("No auth config in memory - loading from filesystem")
client._auth_configs = load_config(credstore_env=client.credstore_env)
authcfg = resolve_authconfig(
client._auth_configs, registry, credstore_env=client.credstore_env
)
# Do not fail here if no authentication exists for this
# specific registry as we can have a readonly pull. Just
# put the header if we can.
if authcfg:
log.debug("Found auth config")
# auth_config needs to be a dict in the format used by
# auth.py username , password, serveraddress, email
return encode_header(authcfg)
log.debug("No auth config found")
return None
def split_repo_name(repo_name: str) -> tuple[str, str]:
parts = repo_name.split("/", 1)
if len(parts) == 1 or (
"." not in parts[0] and ":" not in parts[0] and parts[0] != "localhost"
):
# This is a docker index repo (ex: username/foobar or ubuntu)
return INDEX_NAME, repo_name
return tuple(parts) # type: ignore
def get_credential_store(
authconfig: dict[str, t.Any] | AuthConfig, registry: str
) -> str | None:
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig)
return authconfig.get_credential_store(registry)
class AuthConfig(dict):
def __init__(
self, dct: dict[str, t.Any], credstore_env: dict[str, str] | None = None
):
if "auths" not in dct:
dct["auths"] = {}
self.update(dct)
self._credstore_env = credstore_env
self._stores: dict[str, Store] = {}
@classmethod
def parse_auth(
cls, entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
) -> dict[str, dict[str, t.Any]]:
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
conf: dict[str, dict[str, t.Any]] = {}
for registry, entry in entries.items():
if not isinstance(entry, dict):
log.debug("Config entry for key %s is not auth config", registry) # type: ignore
# We sometimes fall back to parsing the whole config as if it
# was the auth config by itself, for legacy purposes. In that
# case, we fail silently and return an empty conf if any of the
# keys is not formatted properly.
if raise_on_error:
raise errors.InvalidConfigFile(
f"Invalid configuration for registry {registry}"
)
return {}
if "identitytoken" in entry:
log.debug("Found an IdentityToken entry for registry %s", registry)
conf[registry] = {"IdentityToken": entry["identitytoken"]}
continue # Other values are irrelevant if we have a token
if "auth" not in entry:
# Starting with engine v1.11 (API 1.23), an empty dictionary is
# a valid value in the auths config.
# https://github.com/docker/compose/issues/3265
log.debug(
"Auth data for %s is absent. Client might be using a credentials store instead.",
registry,
)
conf[registry] = {}
continue
username, password = decode_auth(entry["auth"])
log.debug(
"Found entry (registry=%s, username=%s)", repr(registry), repr(username)
)
conf[registry] = {
"username": username,
"password": password,
"email": entry.get("email"),
"serveraddress": registry,
}
return conf
@classmethod
def load_config(
cls,
config_path: str | None,
config_dict: dict[str, t.Any] | None,
credstore_env: dict[str, str] | None = None,
) -> t.Self:
"""
Loads authentication data from a Docker configuration file in the given
root directory or if config_path is passed use given path.
Lookup priority:
explicit config_path parameter > DOCKER_CONFIG environment
variable > ~/.docker/config.json > ~/.dockercfg
"""
if not config_dict:
config_file = config.find_config_file(config_path)
if not config_file:
return cls({}, credstore_env)
try:
with open(config_file, "rt", encoding="utf-8") as f:
config_dict = json.load(f)
except (IOError, KeyError, ValueError) as e:
# Likely missing new Docker config file or it is in an
# unknown format, continue to attempt to read old location
# and format.
log.debug(e)
return cls(_load_legacy_config(config_file), credstore_env)
res = {}
if config_dict.get("auths"):
log.debug("Found 'auths' section")
res.update(
{"auths": cls.parse_auth(config_dict.pop("auths"), raise_on_error=True)}
)
if config_dict.get("credsStore"):
log.debug("Found 'credsStore' section")
res.update({"credsStore": config_dict.pop("credsStore")})
if config_dict.get("credHelpers"):
log.debug("Found 'credHelpers' section")
res.update({"credHelpers": config_dict.pop("credHelpers")})
if res:
return cls(res, credstore_env)
log.debug(
"Could not find auth-related section ; attempting to interpret "
"as auth-only file"
)
return cls({"auths": cls.parse_auth(config_dict)}, credstore_env)
@property
def auths(self) -> dict[str, dict[str, t.Any]]:
return self.get("auths", {})
@property
def creds_store(self) -> str | None:
return self.get("credsStore", None)
@property
def cred_helpers(self) -> dict[str, t.Any]:
return self.get("credHelpers", {})
@property
def is_empty(self) -> bool:
return not self.auths and not self.creds_store and not self.cred_helpers
def resolve_authconfig(
self, registry: str | None = None
) -> dict[str, t.Any] | None:
"""
Returns the authentication data from the given auth configuration for a
specific registry. As with the Docker client, legacy entries in the
config with full URLs are stripped down to hostnames before checking
for a match. Returns None if no match was found.
"""
if self.creds_store or self.cred_helpers:
store_name = self.get_credential_store(registry)
if store_name is not None:
log.debug('Using credentials store "%s"', store_name)
cfg = self._resolve_authconfig_credstore(registry, store_name)
if cfg is not None:
return cfg
log.debug("No entry in credstore - fetching from auth dict")
# Default to the public index server
registry = resolve_index_name(registry) if registry else INDEX_NAME
log.debug("Looking for auth entry for %s", repr(registry))
if registry in self.auths:
log.debug("Found %s", repr(registry))
return self.auths[registry]
for key, conf in self.auths.items():
if resolve_index_name(key) == registry:
log.debug("Found %s", repr(key))
return conf
log.debug("No entry found")
return None
def _resolve_authconfig_credstore(
self, registry: str | None, credstore_name: str
) -> dict[str, t.Any] | None:
if not registry or registry == INDEX_NAME:
# The ecosystem is a little schizophrenic with index.docker.io VS
# docker.io - in that case, it seems the full URL is necessary.
registry = INDEX_URL
log.debug("Looking for auth entry for %s", repr(registry))
store = self._get_store_instance(credstore_name)
try:
data = store.get(registry)
res = {
"ServerAddress": registry,
}
if data["Username"] == TOKEN_USERNAME:
res["IdentityToken"] = data["Secret"]
else:
res.update(
{
"Username": data["Username"],
"Password": data["Secret"],
}
)
return res
except CredentialsNotFound:
log.debug("No entry found")
return None
except StoreError as e:
raise errors.DockerException(f"Credentials store error: {e}") from e
def _get_store_instance(self, name: str) -> Store:
if name not in self._stores:
self._stores[name] = Store(name, environment=self._credstore_env)
return self._stores[name]
def get_credential_store(self, registry: str | None) -> str | None:
if not registry or registry == INDEX_NAME:
registry = INDEX_URL
return self.cred_helpers.get(registry) or self.creds_store
def get_all_credentials(self) -> dict[str, dict[str, t.Any] | None]:
auth_data: dict[str, dict[str, t.Any] | None] = self.auths.copy() # type: ignore
if self.creds_store:
# Retrieve all credentials from the default store
store = self._get_store_instance(self.creds_store)
for k in store.list():
auth_data[k] = self._resolve_authconfig_credstore(k, self.creds_store)
auth_data[convert_to_hostname(k)] = auth_data[k]
# credHelpers entries take priority over all others
for reg, store_name in self.cred_helpers.items():
auth_data[reg] = self._resolve_authconfig_credstore(reg, store_name)
auth_data[convert_to_hostname(reg)] = auth_data[reg]
return auth_data
def add_auth(self, reg: str, data: dict[str, t.Any]) -> None:
self["auths"][reg] = data
def resolve_authconfig(
authconfig: AuthConfig | dict[str, t.Any],
registry: str | None = None,
credstore_env: dict[str, str] | None = None,
) -> dict[str, t.Any] | None:
if not isinstance(authconfig, AuthConfig):
authconfig = AuthConfig(authconfig, credstore_env)
return authconfig.resolve_authconfig(registry)
def convert_to_hostname(url: str) -> str:
return url.replace("http://", "").replace("https://", "").split("/", 1)[0]
def decode_auth(auth: str | bytes) -> tuple[str, str]:
if isinstance(auth, str):
auth = auth.encode("ascii")
s = base64.b64decode(auth)
login, pwd = s.split(b":", 1)
return login.decode("utf8"), pwd.decode("utf8")
def encode_header(auth: dict[str, t.Any]) -> bytes:
auth_json = json.dumps(auth).encode("ascii")
return base64.urlsafe_b64encode(auth_json)
def parse_auth(
entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
) -> dict[str, dict[str, t.Any]]:
"""
Parses authentication entries
Args:
entries: Dict of authentication entries.
raise_on_error: If set to true, an invalid format will raise
InvalidConfigFile
Returns:
Authentication registry.
"""
return AuthConfig.parse_auth(entries, raise_on_error)
def load_config(
config_path: str | None = None,
config_dict: dict[str, t.Any] | None = None,
credstore_env: dict[str, str] | None = None,
) -> AuthConfig:
return AuthConfig.load_config(config_path, config_dict, credstore_env)
def _load_legacy_config(config_file: str) -> dict[str, dict[str, t.Any]]:
log.debug("Attempting to parse legacy auth file format")
try:
data = []
with open(config_file, "rt", encoding="utf-8") as f:
for line in f.readlines():
data.append(line.strip().split(" = ")[1])
if len(data) < 2:
# Not enough data
raise errors.InvalidConfigFile("Invalid or empty configuration file!")
username, password = decode_auth(data[0])
return {
"auths": {
INDEX_NAME: {
"username": username,
"password": password,
"email": data[1],
"serveraddress": INDEX_URL,
}
}
}
except Exception as e: # pylint: disable=broad-exception-caught
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}

View file

@ -0,0 +1,40 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import sys
MINIMUM_DOCKER_API_VERSION = "1.21"
DEFAULT_TIMEOUT_SECONDS = 60
STREAM_HEADER_SIZE_BYTES = 8
CONTAINER_LIMITS_KEYS = ["memory", "memswap", "cpushares", "cpusetcpus"]
DEFAULT_HTTP_HOST = "127.0.0.1"
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
DEFAULT_NPIPE = "npipe:////./pipe/docker_engine"
BYTE_UNITS = {"b": 1, "k": 1024, "m": 1024 * 1024, "g": 1024 * 1024 * 1024}
IS_WINDOWS_PLATFORM = sys.platform == "win32"
WINDOWS_LONGPATH_PREFIX = "\\\\?\\"
DEFAULT_USER_AGENT = "ansible-community.docker"
DEFAULT_NUM_POOLS = 25
# The OpenSSH server default value for MaxSessions is 10 which means we can
# use up to 9, leaving the final session for the underlying SSH connection.
# For more details see: https://github.com/docker/docker-py/issues/2246
DEFAULT_NUM_POOLS_SSH = 9
DEFAULT_MAX_POOL_SIZE = 10
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048

View file

@ -0,0 +1,253 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2025 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import os
import typing as t
from .. import errors
from .config import (
METAFILE,
get_current_context_name,
get_meta_dir,
write_context_name_to_docker_config,
)
from .context import Context
if t.TYPE_CHECKING:
from ..tls import TLSConfig
def create_default_context() -> Context:
host = None
if os.environ.get("DOCKER_HOST"):
host = os.environ.get("DOCKER_HOST")
return Context(
"default", "swarm", host, description="Current DOCKER_HOST based configuration"
)
class ContextAPI:
"""Context API.
Contains methods for context management:
create, list, remove, get, inspect.
"""
DEFAULT_CONTEXT = None
@classmethod
def get_default_context(cls) -> Context:
context = cls.DEFAULT_CONTEXT
if context is None:
context = create_default_context()
cls.DEFAULT_CONTEXT = context
return context
@classmethod
def create_context(
cls,
name: str,
orchestrator: str | None = None,
host: str | None = None,
tls_cfg: TLSConfig | None = None,
default_namespace: str | None = None,
skip_tls_verify: bool = False,
) -> Context:
"""Creates a new context.
Returns:
(Context): a Context object.
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextAlreadyExists`
If a context with the name already exists.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.create_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException('"default" is a reserved context name')
ctx = Context.load_context(name)
if ctx:
raise errors.ContextAlreadyExists(name)
endpoint = "docker"
if orchestrator and orchestrator != "swarm":
endpoint = orchestrator
ctx = Context(name, orchestrator)
ctx.set_endpoint(
endpoint,
host,
tls_cfg,
skip_tls_verify=skip_tls_verify,
def_namespace=default_namespace,
)
ctx.save()
return ctx
@classmethod
def get_context(cls, name: str | None = None) -> Context | None:
"""Retrieves a context object.
Args:
name (str): The name of the context
Example:
>>> from docker.context import ContextAPI
>>> ctx = ContextAPI.get_context(name='test')
>>> print(ctx.Metadata)
{
"Name": "test",
"Metadata": {},
"Endpoints": {
"docker": {
"Host": "unix:///var/run/docker.sock",
"SkipTLSVerify": false
}
}
}
"""
if not name:
name = get_current_context_name()
if name == "default":
return cls.get_default_context()
return Context.load_context(name)
@classmethod
def contexts(cls) -> list[Context]:
"""Context list.
Returns:
(Context): List of context objects.
Raises:
:py:class:`docker.errors.APIError`
If something goes wrong.
"""
names = []
for dirname, dummy, fnames in os.walk(get_meta_dir()):
for filename in fnames:
if filename == METAFILE:
filepath = os.path.join(dirname, filename)
try:
with open(filepath, "rt", encoding="utf-8") as f:
data = json.load(f)
name = data["Name"]
if name == "default":
raise ValueError('"default" is a reserved context name')
names.append(name)
except Exception as e:
raise errors.ContextException(
f"Failed to load metafile {filepath}: {e}"
) from e
contexts = [cls.get_default_context()]
for name in names:
context = Context.load_context(name)
if not context:
raise errors.ContextException(f"Context {name} cannot be found")
contexts.append(context)
return contexts
@classmethod
def get_current_context(cls) -> Context | None:
"""Get current context.
Returns:
(Context): current context object.
"""
return cls.get_context()
@classmethod
def set_current_context(cls, name: str = "default") -> None:
ctx = cls.get_context(name)
if not ctx:
raise errors.ContextNotFound(name)
err = write_context_name_to_docker_config(name)
if err:
raise errors.ContextException(f"Failed to set current context: {err}")
@classmethod
def remove_context(cls, name: str) -> None:
"""Remove a context. Similar to the ``docker context rm`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
:py:class:`docker.errors.ContextException`
If name is default.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
raise errors.ContextException('context "default" cannot be removed')
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
if name == get_current_context_name():
write_context_name_to_docker_config(None)
ctx.remove()
@classmethod
def inspect_context(cls, name: str = "default") -> dict[str, t.Any]:
"""Inspect a context. Similar to the ``docker context inspect`` command.
Args:
name (str): The name of the context
Raises:
:py:class:`docker.errors.MissingContextParameter`
If a context name is not provided.
:py:class:`docker.errors.ContextNotFound`
If a context with the name does not exist.
Example:
>>> from docker.context import ContextAPI
>>> ContextAPI.remove_context(name='test')
>>>
"""
if not name:
raise errors.MissingContextParameter("name")
if name == "default":
return cls.get_default_context()()
ctx = Context.load_context(name)
if not ctx:
raise errors.ContextNotFound(name)
return ctx()

View file

@ -0,0 +1,107 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2025 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import hashlib
import json
import os
from ..constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
from ..utils.config import find_config_file, get_default_config_file
from ..utils.utils import parse_host
METAFILE = "meta.json"
def get_current_context_name_with_source() -> tuple[str, str]:
if os.environ.get("DOCKER_HOST"):
return "default", "DOCKER_HOST environment variable set"
if os.environ.get("DOCKER_CONTEXT"):
return os.environ["DOCKER_CONTEXT"], "DOCKER_CONTEXT environment variable set"
docker_cfg_path = find_config_file()
if docker_cfg_path:
try:
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
return (
json.load(f).get("currentContext", "default"),
f"configuration file {docker_cfg_path}",
)
except Exception: # pylint: disable=broad-exception-caught
pass
return "default", "fallback value"
def get_current_context_name() -> str:
return get_current_context_name_with_source()[0]
def write_context_name_to_docker_config(name: str | None = None) -> Exception | None:
if name == "default":
name = None
docker_cfg_path = find_config_file()
config = {}
if docker_cfg_path:
try:
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
config = json.load(f)
except Exception as e: # pylint: disable=broad-exception-caught
return e
current_context = config.get("currentContext", None)
if current_context and not name:
del config["currentContext"]
elif name:
config["currentContext"] = name
else:
return None
if not docker_cfg_path:
docker_cfg_path = get_default_config_file()
try:
with open(docker_cfg_path, "wt", encoding="utf-8") as f:
json.dump(config, f, indent=4)
return None
except Exception as e: # pylint: disable=broad-exception-caught
return e
def get_context_id(name: str) -> str:
return hashlib.sha256(name.encode("utf-8")).hexdigest()
def get_context_dir() -> str:
docker_cfg_path = find_config_file() or get_default_config_file()
return os.path.join(os.path.dirname(docker_cfg_path), "contexts")
def get_meta_dir(name: str | None = None) -> str:
meta_dir = os.path.join(get_context_dir(), "meta")
if name:
return os.path.join(meta_dir, get_context_id(name))
return meta_dir
def get_meta_file(name: str) -> str:
return os.path.join(get_meta_dir(name), METAFILE)
def get_tls_dir(name: str | None = None, endpoint: str = "") -> str:
context_dir = get_context_dir()
if name:
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
return os.path.join(context_dir, "tls")
def get_context_host(path: str | None = None, tls: bool = False) -> str:
host = parse_host(path, IS_WINDOWS_PLATFORM, tls)
if host == DEFAULT_UNIX_SOCKET and host.startswith("http+"):
# remove http+ from default docker socket url
host = host[5:]
return host

View file

@ -0,0 +1,286 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2025 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import os
import typing as t
from shutil import copyfile, rmtree
from ..errors import ContextException
from ..tls import TLSConfig
from .config import (
get_context_host,
get_meta_dir,
get_meta_file,
get_tls_dir,
)
IN_MEMORY = "IN MEMORY"
class Context:
"""A context."""
def __init__(
self,
name: str,
orchestrator: str | None = None,
host: str | None = None,
endpoints: dict[str, dict[str, t.Any]] | None = None,
skip_tls_verify: bool = False,
tls: bool = False,
description: str | None = None,
) -> None:
if not name:
raise ValueError("Name not provided")
self.name = name
self.context_type = None
self.orchestrator = orchestrator
self.endpoints = {}
self.tls_cfg: dict[str, TLSConfig] = {}
self.meta_path = IN_MEMORY
self.tls_path = IN_MEMORY
self.description = description
if not endpoints:
# set default docker endpoint if no endpoint is set
default_endpoint = (
"docker"
if (not orchestrator or orchestrator == "swarm")
else orchestrator
)
self.endpoints = {
default_endpoint: {
"Host": get_context_host(host, skip_tls_verify or tls),
"SkipTLSVerify": skip_tls_verify,
}
}
return
# check docker endpoints
for k, v in endpoints.items():
if not isinstance(v, dict):
# unknown format
raise ContextException(
f"Unknown endpoint format for context {name}: {v}",
)
self.endpoints[k] = v
if k != "docker":
continue
self.endpoints[k]["Host"] = v.get(
"Host", get_context_host(host, skip_tls_verify or tls)
)
self.endpoints[k]["SkipTLSVerify"] = bool(
v.get("SkipTLSVerify", skip_tls_verify)
)
def set_endpoint(
self,
name: str = "docker",
host: str | None = None,
tls_cfg: TLSConfig | None = None,
skip_tls_verify: bool = False,
def_namespace: str | None = None,
) -> None:
self.endpoints[name] = {
"Host": get_context_host(host, not skip_tls_verify or tls_cfg is not None),
"SkipTLSVerify": skip_tls_verify,
}
if def_namespace:
self.endpoints[name]["DefaultNamespace"] = def_namespace
if tls_cfg:
self.tls_cfg[name] = tls_cfg
def inspect(self) -> dict[str, t.Any]:
return self()
@classmethod
def load_context(cls, name: str) -> t.Self | None:
meta = Context._load_meta(name)
if meta:
instance = cls(
meta["Name"],
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
endpoints=meta.get("Endpoints", None),
description=meta["Metadata"].get("Description"),
)
instance.context_type = meta["Metadata"].get("Type", None)
instance._load_certs()
instance.meta_path = get_meta_dir(name)
return instance
return None
@classmethod
def _load_meta(cls, name: str) -> dict[str, t.Any] | None:
meta_file = get_meta_file(name)
if not os.path.isfile(meta_file):
return None
metadata: dict[str, t.Any] = {}
try:
with open(meta_file, "rt", encoding="utf-8") as f:
metadata = json.load(f)
except (OSError, KeyError, ValueError) as e:
# unknown format
raise RuntimeError(
f"Detected corrupted meta file for context {name} : {e}"
) from e
# for docker endpoints, set defaults for
# Host and SkipTLSVerify fields
for k, v in metadata["Endpoints"].items():
if k != "docker":
continue
metadata["Endpoints"][k]["Host"] = v.get(
"Host", get_context_host(None, False)
)
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
v.get("SkipTLSVerify", True)
)
return metadata
def _load_certs(self) -> None:
certs = {}
tls_dir = get_tls_dir(self.name)
for endpoint in self.endpoints:
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
continue
ca_cert = None
cert = None
key = None
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
if filename.startswith("ca"):
ca_cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("cert"):
cert = os.path.join(tls_dir, endpoint, filename)
elif filename.startswith("key"):
key = os.path.join(tls_dir, endpoint, filename)
if all([cert, key]) or ca_cert:
verify = None
if endpoint == "docker" and not self.endpoints["docker"].get(
"SkipTLSVerify", False
):
verify = True
certs[endpoint] = TLSConfig(
client_cert=(cert, key) if cert and key else None,
ca_cert=ca_cert,
verify=verify,
)
self.tls_cfg = certs
self.tls_path = tls_dir
def save(self) -> None:
meta_dir = get_meta_dir(self.name)
if not os.path.isdir(meta_dir):
os.makedirs(meta_dir)
with open(get_meta_file(self.name), "wt", encoding="utf-8") as f:
f.write(json.dumps(self.Metadata))
tls_dir = get_tls_dir(self.name)
for endpoint, tls in self.tls_cfg.items():
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
os.makedirs(os.path.join(tls_dir, endpoint))
ca_file = tls.ca_cert
if ca_file:
copyfile(
ca_file, os.path.join(tls_dir, endpoint, os.path.basename(ca_file))
)
if tls.cert:
cert_file, key_file = tls.cert
copyfile(
cert_file,
os.path.join(tls_dir, endpoint, os.path.basename(cert_file)),
)
copyfile(
key_file,
os.path.join(tls_dir, endpoint, os.path.basename(key_file)),
)
self.meta_path = get_meta_dir(self.name)
self.tls_path = get_tls_dir(self.name)
def remove(self) -> None:
if os.path.isdir(self.meta_path):
rmtree(self.meta_path)
if os.path.isdir(self.tls_path):
rmtree(self.tls_path)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: '{self.name}'>"
def __str__(self) -> str:
return json.dumps(self.__call__(), indent=2)
def __call__(self) -> dict[str, t.Any]:
result = self.Metadata
result.update(self.TLSMaterial)
result.update(self.Storage)
return result
def is_docker_host(self) -> bool:
return self.context_type is None
@property
def Name(self) -> str: # pylint: disable=invalid-name
return self.name
@property
def Host(self) -> str | None: # pylint: disable=invalid-name
if not self.orchestrator or self.orchestrator == "swarm":
endpoint = self.endpoints.get("docker", None)
if endpoint:
return endpoint.get("Host", None) # type: ignore
return None
return self.endpoints[self.orchestrator].get("Host", None) # type: ignore
@property
def Orchestrator(self) -> str | None: # pylint: disable=invalid-name
return self.orchestrator
@property
def Metadata(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
meta: dict[str, t.Any] = {}
if self.orchestrator:
meta = {"StackOrchestrator": self.orchestrator}
return {"Name": self.name, "Metadata": meta, "Endpoints": self.endpoints}
@property
def TLSConfig(self) -> TLSConfig | None: # pylint: disable=invalid-name
key = self.orchestrator
if not key or key == "swarm":
key = "docker"
if key in self.tls_cfg:
return self.tls_cfg[key]
return None
@property
def TLSMaterial(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
certs: dict[str, t.Any] = {}
for endpoint, tls in self.tls_cfg.items():
paths = [tls.ca_cert, *tls.cert] if tls.cert else [tls.ca_cert]
certs[endpoint] = [
os.path.basename(path) if path else None for path in paths
]
return {"TLSMaterial": certs}
@property
def Storage(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
return {"Storage": {"MetadataPath": self.meta_path, "TLSPath": self.tls_path}}

View file

@ -0,0 +1,17 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
PROGRAM_PREFIX = "docker-credential-"
DEFAULT_LINUX_STORE = "secretservice"
DEFAULT_OSX_STORE = "osxkeychain"
DEFAULT_WIN32_STORE = "wincred"

View file

@ -0,0 +1,38 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
if t.TYPE_CHECKING:
from subprocess import CalledProcessError
class StoreError(RuntimeError):
pass
class CredentialsNotFound(StoreError):
pass
class InitializationError(StoreError):
pass
def process_store_error(cpe: CalledProcessError, program: str) -> StoreError:
message = cpe.output.decode("utf-8")
if "credentials not found in native keychain" in message:
return CredentialsNotFound(f"No matching credentials in {program}")
return StoreError(
f'Credentials store {program} exited with "{cpe.output.decode("utf-8").strip()}".'
)

View file

@ -0,0 +1,102 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import errno
import json
import subprocess
import typing as t
from . import constants, errors
from .utils import create_environment_dict, find_executable
class Store:
def __init__(self, program: str, environment: dict[str, str] | None = None) -> None:
"""Create a store object that acts as an interface to
perform the basic operations for storing, retrieving
and erasing credentials using `program`.
"""
self.program = constants.PROGRAM_PREFIX + program
self.exe = find_executable(self.program)
self.environment = environment
if self.exe is None:
raise errors.InitializationError(
f"{self.program} not installed or not available in PATH"
)
def get(self, server: str | bytes) -> dict[str, t.Any]:
"""Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, bytes):
server = server.encode("utf-8")
data = self._execute("get", server)
result = json.loads(data.decode("utf-8"))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result["Username"] == "" and result["Secret"] == "":
raise errors.CredentialsNotFound(
f"No matching credentials in {self.program}"
)
return result
def store(self, server: str, username: str, secret: str) -> bytes:
"""Store credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
data_input = json.dumps(
{"ServerURL": server, "Username": username, "Secret": secret}
).encode("utf-8")
return self._execute("store", data_input)
def erase(self, server: str | bytes) -> None:
"""Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
if not isinstance(server, bytes):
server = server.encode("utf-8")
self._execute("erase", server)
def list(self) -> t.Any:
"""List stored credentials. Requires v0.4.0+ of the helper."""
data = self._execute("list", None)
return json.loads(data.decode("utf-8"))
def _execute(self, subcmd: str, data_input: bytes | None) -> bytes:
if self.exe is None:
raise errors.StoreError(
f"{self.program} not installed or not available in PATH"
)
output = None
env = create_environment_dict(self.environment)
try:
output = subprocess.check_output(
[self.exe, subcmd],
input=data_input,
env=env,
)
except subprocess.CalledProcessError as e:
raise errors.process_store_error(e, self.program) from e
except OSError as e:
if e.errno == errno.ENOENT:
raise errors.StoreError(
f"{self.program} not installed or not available in PATH"
) from e
raise errors.StoreError(
f'Unexpected OS error "{e.strerror}", errno={e.errno}'
) from e
return output

View file

@ -0,0 +1,35 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import os
from shutil import which
def find_executable(executable: str, path: str | None = None) -> str | None:
"""
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
"""
# shutil.which() already uses PATHEXT on Windows, so on
# Python 3 we can simply use shutil.which() in all cases.
# (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
return which(executable, path=path)
def create_environment_dict(overrides: dict[str, str] | None) -> dict[str, str]:
"""
Create and return a copy of os.environ with the specified overrides
"""
result = os.environ.copy()
result.update(overrides or {})
return result

View file

@ -0,0 +1,244 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
from ansible.module_utils.common.text.converters import to_text
from ._import_helper import HTTPError as _HTTPError
if t.TYPE_CHECKING:
from requests import Response
class DockerException(Exception):
"""
A base class from which all other exceptions inherit.
If you want to catch all errors that the Docker SDK might raise,
catch this base exception.
"""
def create_api_error_from_http_exception(e: _HTTPError) -> t.NoReturn:
"""
Create a suitable APIError from requests.exceptions.HTTPError.
"""
response = e.response
try:
explanation = response.json()["message"]
except ValueError:
explanation = to_text((response.content or "").strip())
cls = APIError
if response.status_code == 404:
if explanation and (
"No such image" in str(explanation)
or "not found: does not exist or no pull access" in str(explanation)
or "repository does not exist" in str(explanation)
):
cls = ImageNotFound
else:
cls = NotFound
raise cls(e, response=response, explanation=explanation) from e
class APIError(_HTTPError, DockerException):
"""
An HTTP error from the API.
"""
def __init__(
self,
message: str | Exception,
response: Response | None = None,
explanation: str | None = None,
) -> None:
# requests 1.2 supports response as a keyword argument, but
# requests 1.1 does not
super().__init__(message)
self.response = response
self.explanation = explanation or ""
def __str__(self) -> str:
message = super().__str__()
if self.is_client_error():
message = f"{self.response.status_code} Client Error for {self.response.url}: {self.response.reason}"
elif self.is_server_error():
message = f"{self.response.status_code} Server Error for {self.response.url}: {self.response.reason}"
if self.explanation:
message = f'{message} ("{self.explanation}")'
return message
@property
def status_code(self) -> int | None:
if self.response is not None:
return self.response.status_code
return None
def is_error(self) -> bool:
return self.is_client_error() or self.is_server_error()
def is_client_error(self) -> bool:
if self.status_code is None:
return False
return 400 <= self.status_code < 500
def is_server_error(self) -> bool:
if self.status_code is None:
return False
return 500 <= self.status_code < 600
class NotFound(APIError):
pass
class ImageNotFound(NotFound):
pass
class InvalidVersion(DockerException):
pass
class InvalidRepository(DockerException):
pass
class InvalidConfigFile(DockerException):
pass
class InvalidArgument(DockerException):
pass
class DeprecatedMethod(DockerException):
pass
class TLSParameterError(DockerException):
def __init__(self, msg: str) -> None:
self.msg = msg
def __str__(self) -> str:
return self.msg + (
". TLS configurations should map the Docker CLI "
"client configurations. See "
"https://docs.docker.com/engine/articles/https/ "
"for API details."
)
class NullResource(DockerException, ValueError):
pass
class ContainerError(DockerException):
"""
Represents a container that has exited with a non-zero exit code.
"""
def __init__(
self,
container: str,
exit_status: int,
command: list[str],
image: str,
stderr: str | None,
):
self.container = container
self.exit_status = exit_status
self.command = command
self.image = image
self.stderr = stderr
err = f": {stderr}" if stderr is not None else ""
msg = f"Command '{command}' in image '{image}' returned non-zero exit status {exit_status}{err}"
super().__init__(msg)
class StreamParseError(RuntimeError):
def __init__(self, reason: Exception) -> None:
self.msg = reason
class BuildError(DockerException):
def __init__(self, reason: str, build_log: str) -> None:
super().__init__(reason)
self.msg = reason
self.build_log = build_log
class ImageLoadError(DockerException):
pass
def create_unexpected_kwargs_error(name: str, kwargs: dict[str, t.Any]) -> TypeError:
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
text = [f"{name}() "]
if len(quoted_kwargs) == 1:
text.append("got an unexpected keyword argument ")
else:
text.append("got unexpected keyword arguments ")
text.append(", ".join(quoted_kwargs))
return TypeError("".join(text))
class MissingContextParameter(DockerException):
def __init__(self, param: str) -> None:
self.param = param
def __str__(self) -> str:
return f"missing parameter: {self.param}"
class ContextAlreadyExists(DockerException):
def __init__(self, name: str) -> None:
self.name = name
def __str__(self) -> str:
return f"context {self.name} already exists"
class ContextException(DockerException):
def __init__(self, msg: str) -> None:
self.msg = msg
def __str__(self) -> str:
return self.msg
class ContextNotFound(DockerException):
def __init__(self, name: str) -> None:
self.name = name
def __str__(self) -> str:
return f"context '{self.name}' not found"
class MissingRequirementException(DockerException):
def __init__(
self, msg: str, requirement: str, import_exception: ImportError | str
) -> None:
self.msg = msg
self.requirement = requirement
self.import_exception = import_exception
def __str__(self) -> str:
return self.msg

View file

@ -0,0 +1,107 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import os
import typing as t
from . import errors
from .transport.ssladapter import SSLHTTPAdapter
if t.TYPE_CHECKING:
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
APIClient,
)
class TLSConfig:
"""
TLS configuration.
Args:
client_cert (tuple of str): Path to client cert, path to client key.
ca_cert (str): Path to CA cert file.
verify (bool or str): This can be ``False`` or a path to a CA cert
file.
assert_hostname (bool): Verify the hostname of the server.
.. _`SSL version`:
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
"""
cert: tuple[str, str] | None = None
ca_cert: str | None = None
verify: bool | None = None
def __init__(
self,
client_cert: tuple[str, str] | None = None,
ca_cert: str | None = None,
verify: bool | None = None,
assert_hostname: bool | None = None,
):
# Argument compatibility/mapping with
# https://docs.docker.com/engine/articles/https/
# This diverges from the Docker CLI in that users can specify 'tls'
# here, but also disable any public/default CA pool verification by
# leaving verify=False
self.assert_hostname = assert_hostname
# "client_cert" must have both or neither cert/key files. In
# either case, Alert the user when both are expected, but any are
# missing.
if client_cert:
try:
tls_cert, tls_key = client_cert
except ValueError:
raise errors.TLSParameterError(
"client_cert must be a tuple of (client certificate, key file)"
) from None
if not (tls_cert and tls_key) or (
not os.path.isfile(tls_cert) or not os.path.isfile(tls_key)
):
raise errors.TLSParameterError(
"Path to a certificate and key files must be provided"
" through the client_cert param"
)
self.cert = (tls_cert, tls_key)
# If verify is set, make sure the cert exists
self.verify = verify
self.ca_cert = ca_cert
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
raise errors.TLSParameterError(
"Invalid CA certificate provided for `ca_cert`."
)
def configure_client(self, client: APIClient) -> None:
"""
Configure a client with these TLS options.
"""
if self.verify and self.ca_cert:
client.verify = self.ca_cert
else:
client.verify = self.verify
if self.cert:
client.cert = self.cert
client.mount(
"https://",
SSLHTTPAdapter(
assert_hostname=self.assert_hostname,
),
)

View file

@ -0,0 +1,35 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
from .._import_helper import HTTPAdapter as _HTTPAdapter
class BaseHTTPAdapter(_HTTPAdapter):
def close(self) -> None:
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
# pylint: disable-next=no-member
super().close()
if hasattr(self, "pools"):
self.pools.clear()
# Hotfix for requests 2.32.0 and 2.32.1: its commit
# https://github.com/psf/requests/commit/c0813a2d910ea6b4f8438b91d315b8d181302356
# changes requests.adapters.HTTPAdapter to no longer call get_connection() from
# send(), but instead call _get_connection().
def _get_connection(self, request, *args, **kwargs): # type: ignore
return self.get_connection(request.url, kwargs.get("proxies"))
# Fix for requests 2.32.2+:
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): # type: ignore
return self.get_connection(request.url, proxies)

View file

@ -0,0 +1,123 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
from queue import Empty
from .. import constants
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
from .basehttpadapter import BaseHTTPAdapter
from .npipesocket import NpipeSocket
if t.TYPE_CHECKING:
from collections.abc import Mapping
from requests import PreparedRequest
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class NpipeHTTPConnection(urllib3_connection.HTTPConnection):
def __init__(self, npipe_path: str, timeout: int | float = 60) -> None:
super().__init__("localhost", timeout=timeout)
self.npipe_path = npipe_path
self.timeout = timeout
def connect(self) -> None:
sock = NpipeSocket()
sock.settimeout(self.timeout)
sock.connect(self.npipe_path)
self.sock = sock
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(
self, npipe_path: str, timeout: int | float = 60, maxsize: int = 10
) -> None:
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
self.npipe_path = npipe_path
self.timeout = timeout
def _new_conn(self) -> NpipeHTTPConnection:
return NpipeHTTPConnection(self.npipe_path, self.timeout)
# When re-using connections, urllib3 tries to call select() on our
# NpipeSocket instance, causing a crash. To circumvent this, we override
# _get_conn, where that check happens.
def _get_conn(self, timeout: int | float) -> NpipeHTTPConnection:
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as exc: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
except Empty as exc:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
) from exc
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class NpipeHTTPAdapter(BaseHTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + [
"npipe_path",
"pools",
"timeout",
"max_pool_size",
]
def __init__(
self,
base_url: str,
timeout: int | float = 60,
pool_connections: int = constants.DEFAULT_NUM_POOLS,
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
) -> None:
self.npipe_path = base_url.replace("npipe://", "")
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def get_connection(
self, url: str | bytes, proxies: Mapping[str, str] | None = None
) -> NpipeHTTPConnectionPool:
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = NpipeHTTPConnectionPool(
self.npipe_path, self.timeout, maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(
self, request: PreparedRequest, proxies: Mapping[str, str] | None
) -> str:
# The select_proxy utility in requests errors out when the provided URL
# does not have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-sdk-python/issues/811
return request.path_url

View file

@ -0,0 +1,277 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import functools
import io
import time
import traceback
import typing as t
PYWIN32_IMPORT_ERROR: str | None # pylint: disable=invalid-name
try:
import pywintypes
import win32api
import win32event
import win32file
import win32pipe
except ImportError:
PYWIN32_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
else:
PYWIN32_IMPORT_ERROR = None # pylint: disable=invalid-name
if t.TYPE_CHECKING:
from collections.abc import Buffer, Callable
_Self = t.TypeVar("_Self")
_P = t.ParamSpec("_P")
_R = t.TypeVar("_R")
ERROR_PIPE_BUSY = 0xE7
SECURITY_SQOS_PRESENT = 0x100000
SECURITY_ANONYMOUS = 0
MAXIMUM_RETRY_COUNT = 10
def check_closed(
f: Callable[t.Concatenate[_Self, _P], _R],
) -> Callable[t.Concatenate[_Self, _P], _R]:
@functools.wraps(f)
def wrapped(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
if self._closed: # type: ignore
raise RuntimeError("Can not reuse socket after connection was closed.")
return f(self, *args, **kwargs)
return wrapped
class NpipeSocket:
"""Partial implementation of the socket API over windows named pipes.
This implementation is only designed to be used as a client socket,
and server-specific methods (bind, listen, accept...) are not
implemented.
"""
def __init__(self, handle: t.Any | None = None) -> None:
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
self._handle = handle
self._address: str | None = None
self._closed = False
self.flags: int | None = None
def accept(self) -> t.NoReturn:
raise NotImplementedError()
def bind(self, address: t.Any) -> t.NoReturn:
raise NotImplementedError()
def close(self) -> None:
if self._handle is None:
raise ValueError("Handle not present")
self._handle.Close()
self._closed = True
@check_closed
def connect(self, address: str, retry_count: int = 0) -> None:
try:
handle = win32file.CreateFile(
address,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
(
SECURITY_ANONYMOUS
| SECURITY_SQOS_PRESENT
| win32file.FILE_FLAG_OVERLAPPED
),
0,
)
except win32pipe.error as e:
# See Remarks:
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
if e.winerror == ERROR_PIPE_BUSY:
# Another program or thread has grabbed our pipe instance
# before we got to it. Wait for availability and attempt to
# connect again.
retry_count = retry_count + 1
if retry_count < MAXIMUM_RETRY_COUNT:
time.sleep(1)
return self.connect(address, retry_count)
raise e
self.flags = win32pipe.GetNamedPipeInfo(handle)[0] # type: ignore
self._handle = handle
self._address = address
@check_closed
def connect_ex(self, address: str) -> None:
self.connect(address)
@check_closed
def detach(self) -> t.Any:
self._closed = True
return self._handle
@check_closed
def dup(self) -> NpipeSocket:
return NpipeSocket(self._handle)
def getpeername(self) -> str | None:
return self._address
def getsockname(self) -> str | None:
return self._address
def getsockopt(
self, level: t.Any, optname: t.Any, buflen: t.Any = None
) -> t.NoReturn:
raise NotImplementedError()
def ioctl(self, control: t.Any, option: t.Any) -> t.NoReturn:
raise NotImplementedError()
def listen(self, backlog: t.Any) -> t.NoReturn:
raise NotImplementedError()
def makefile(self, mode: str, bufsize: int | None = None) -> t.IO[bytes]:
if mode.strip("b") != "r":
raise NotImplementedError()
rawio = NpipeFileIOBase(self)
if bufsize is None or bufsize <= 0:
bufsize = io.DEFAULT_BUFFER_SIZE
return io.BufferedReader(rawio, buffer_size=bufsize)
@check_closed
def recv(self, bufsize: int, flags: int = 0) -> str:
if self._handle is None:
raise ValueError("Handle not present")
dummy_err, data = win32file.ReadFile(self._handle, bufsize)
return data
@check_closed
def recvfrom(self, bufsize: int, flags: int = 0) -> tuple[str, str | None]:
data = self.recv(bufsize, flags)
return (data, self._address)
@check_closed
def recvfrom_into(
self, buf: Buffer, nbytes: int = 0, flags: int = 0
) -> tuple[int, str | None]:
return self.recv_into(buf, nbytes), self._address
@check_closed
def recv_into(self, buf: Buffer, nbytes: int = 0) -> int:
if self._handle is None:
raise ValueError("Handle not present")
readbuf = buf if isinstance(buf, memoryview) else memoryview(buf)
event = win32event.CreateEvent(None, True, True, None)
try:
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
dummy_err, dummy_data = win32file.ReadFile( # type: ignore
self._handle, readbuf[:nbytes] if nbytes else readbuf, overlapped
)
wait_result = win32event.WaitForSingleObject(event, self._timeout)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle)
raise TimeoutError
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
finally:
win32api.CloseHandle(event)
@check_closed
def send(self, string: Buffer, flags: int = 0) -> int:
if self._handle is None:
raise ValueError("Handle not present")
event = win32event.CreateEvent(None, True, True, None)
try:
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = event
win32file.WriteFile(self._handle, string, overlapped) # type: ignore
wait_result = win32event.WaitForSingleObject(event, self._timeout)
if wait_result == win32event.WAIT_TIMEOUT:
win32file.CancelIo(self._handle)
raise TimeoutError
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
finally:
win32api.CloseHandle(event)
@check_closed
def sendall(self, string: Buffer, flags: int = 0) -> int:
return self.send(string, flags)
@check_closed
def sendto(self, string: Buffer, address: str) -> int:
self.connect(address)
return self.send(string)
def setblocking(self, flag: bool) -> None:
if flag:
return self.settimeout(None)
return self.settimeout(0)
def settimeout(self, value: int | float | None) -> None:
if value is None:
# Blocking mode
self._timeout = win32event.INFINITE
elif not isinstance(value, (float, int)) or value < 0:
raise ValueError("Timeout value out of range")
else:
# Timeout mode - Value converted to milliseconds
self._timeout = int(value * 1000)
def gettimeout(self) -> int | float | None:
return self._timeout
def setsockopt(self, level: t.Any, optname: t.Any, value: t.Any) -> t.NoReturn:
raise NotImplementedError()
@check_closed
def shutdown(self, how: t.Any) -> None:
return self.close()
class NpipeFileIOBase(io.RawIOBase):
def __init__(self, npipe_socket: NpipeSocket | None) -> None:
self.sock = npipe_socket
def close(self) -> None:
super().close()
self.sock = None
def fileno(self) -> int:
if self.sock is None:
raise RuntimeError("socket is closed")
# TODO: This is definitely a bug, NpipeSocket.fileno() does not exist!
return self.sock.fileno() # type: ignore
def isatty(self) -> bool:
return False
def readable(self) -> bool:
return True
def readinto(self, buf: Buffer) -> int:
if self.sock is None:
raise RuntimeError("socket is closed")
return self.sock.recv_into(buf)
def seekable(self) -> bool:
return False
def writable(self) -> bool:
return False

View file

@ -0,0 +1,311 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import logging
import os
import signal
import socket
import subprocess
import traceback
import typing as t
from queue import Empty
from urllib.parse import urlparse
from .. import constants
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
from .basehttpadapter import BaseHTTPAdapter
PARAMIKO_IMPORT_ERROR: str | None # pylint: disable=invalid-name
try:
import paramiko
except ImportError:
PARAMIKO_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
else:
PARAMIKO_IMPORT_ERROR = None # pylint: disable=invalid-name
if t.TYPE_CHECKING:
from collections.abc import Buffer, Mapping
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class SSHSocket(socket.socket):
def __init__(self, host: str) -> None:
super().__init__(socket.AF_INET, socket.SOCK_STREAM)
self.host = host
self.port = None
self.user = None
if ":" in self.host:
self.host, self.port = self.host.split(":")
if "@" in self.host:
self.user, self.host = self.host.split("@")
self.proc: subprocess.Popen | None = None
def connect(self, *args_: t.Any, **kwargs: t.Any) -> None:
args = ["ssh"]
if self.user:
args = args + ["-l", self.user]
if self.port:
args = args + ["-p", self.port]
args = args + ["--", self.host, "docker system dial-stdio"]
preexec_func = None
if not constants.IS_WINDOWS_PLATFORM:
def f() -> None:
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_func = f
env = dict(os.environ)
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
env.pop("LD_LIBRARY_PATH", None)
env.pop("SSL_CERT_FILE", None)
self.proc = subprocess.Popen( # pylint: disable=consider-using-with
args,
env=env,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
preexec_fn=preexec_func,
)
def _write(self, data: Buffer) -> int:
if not self.proc:
raise RuntimeError(
"SSH subprocess not initiated. connect() must be called first."
)
assert self.proc.stdin is not None
if self.proc.stdin.closed:
raise RuntimeError(
"SSH subprocess not initiated. connect() must be called first after close()."
)
written = self.proc.stdin.write(data)
self.proc.stdin.flush()
return written
def sendall(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> None:
self._write(data)
def send(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> int:
return self._write(data)
def recv(self, n: int, *args: t.Any, **kwargs: t.Any) -> bytes:
if not self.proc:
raise RuntimeError(
"SSH subprocess not initiated. connect() must be called first."
)
assert self.proc.stdout is not None
return self.proc.stdout.read(n)
def makefile(self, mode: str, *args: t.Any, **kwargs: t.Any) -> t.IO: # type: ignore
if not self.proc:
self.connect()
assert self.proc is not None
assert self.proc.stdout is not None
self.proc.stdout.channel = self # type: ignore
return self.proc.stdout
def close(self) -> None:
if not self.proc:
return
assert self.proc.stdin is not None
if self.proc.stdin.closed:
return
self.proc.stdin.write(b"\n\n")
self.proc.stdin.flush()
self.proc.terminate()
class SSHConnection(urllib3_connection.HTTPConnection):
def __init__(
self,
*,
ssh_transport: paramiko.Transport | None = None,
timeout: int | float = 60,
host: str,
) -> None:
super().__init__("localhost", timeout=timeout)
self.ssh_transport = ssh_transport
self.timeout = timeout
self.ssh_host = host
self.sock: paramiko.Channel | SSHSocket | None = None
def connect(self) -> None:
if self.ssh_transport:
channel = self.ssh_transport.open_session()
channel.settimeout(self.timeout)
channel.exec_command("docker system dial-stdio")
self.sock = channel
else:
sock = SSHSocket(self.ssh_host)
sock.settimeout(self.timeout)
sock.connect()
self.sock = sock
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
scheme = "ssh"
def __init__(
self,
*,
ssh_client: paramiko.SSHClient | None = None,
timeout: int | float = 60,
maxsize: int = 10,
host: str,
) -> None:
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
self.ssh_transport: paramiko.Transport | None = None
self.timeout = timeout
if ssh_client:
self.ssh_transport = ssh_client.get_transport()
self.ssh_host = host
def _new_conn(self) -> SSHConnection:
return SSHConnection(
ssh_transport=self.ssh_transport,
timeout=self.timeout,
host=self.ssh_host,
)
# When re-using connections, urllib3 calls fileno() on our
# SSH channel instance, quickly overloading our fd limit. To avoid this,
# we override _get_conn
def _get_conn(self, timeout: int | float) -> SSHConnection:
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError as exc: # self.pool is None
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
except Empty as exc:
if self.block:
raise urllib3.exceptions.EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
) from exc
# Oh well, we'll create a new connection then
return conn or self._new_conn()
class SSHHTTPAdapter(BaseHTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + [
"pools",
"timeout",
"ssh_client",
"ssh_params",
"max_pool_size",
]
def __init__(
self,
base_url: str,
timeout: int | float = 60,
pool_connections: int = constants.DEFAULT_NUM_POOLS,
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
shell_out: bool = False,
) -> None:
self.ssh_client: paramiko.SSHClient | None = None
if not shell_out:
self._create_paramiko_client(base_url)
self._connect()
self.ssh_host = base_url
if base_url.startswith("ssh://"):
self.ssh_host = base_url[len("ssh://") :]
self.timeout = timeout
self.max_pool_size = max_pool_size
self.pools = RecentlyUsedContainer(
pool_connections, dispose_func=lambda p: p.close()
)
super().__init__()
def _create_paramiko_client(self, base_url: str) -> None:
logging.getLogger("paramiko").setLevel(logging.WARNING)
self.ssh_client = paramiko.SSHClient()
base_url_p = urlparse(base_url)
assert base_url_p.hostname is not None
self.ssh_params: dict[str, t.Any] = {
"hostname": base_url_p.hostname,
"port": base_url_p.port,
"username": base_url_p.username,
}
ssh_config_file = os.path.expanduser("~/.ssh/config")
if os.path.exists(ssh_config_file):
conf = paramiko.SSHConfig()
with open(ssh_config_file, "rt", encoding="utf-8") as f:
conf.parse(f)
host_config = conf.lookup(base_url_p.hostname)
if "proxycommand" in host_config:
self.ssh_params["sock"] = paramiko.ProxyCommand(
host_config["proxycommand"]
)
if "hostname" in host_config:
self.ssh_params["hostname"] = host_config["hostname"]
if base_url_p.port is None and "port" in host_config:
self.ssh_params["port"] = host_config["port"]
if base_url_p.username is None and "user" in host_config:
self.ssh_params["username"] = host_config["user"]
if "identityfile" in host_config:
self.ssh_params["key_filename"] = host_config["identityfile"]
self.ssh_client.load_system_host_keys()
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
def _connect(self) -> None:
if self.ssh_client:
self.ssh_client.connect(**self.ssh_params)
def get_connection(
self, url: str | bytes, proxies: Mapping[str, str] | None = None
) -> SSHConnectionPool:
if not self.ssh_client:
return SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host,
)
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
# Connection is closed try a reconnect
if self.ssh_client and not self.ssh_client.get_transport():
self._connect()
pool = SSHConnectionPool(
ssh_client=self.ssh_client,
timeout=self.timeout,
maxsize=self.max_pool_size,
host=self.ssh_host,
)
self.pools[url] = pool
return pool
def close(self) -> None:
super().close()
if self.ssh_client:
self.ssh_client.close()

View file

@ -0,0 +1,71 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
from .._import_helper import HTTPAdapter, urllib3
from .basehttpadapter import BaseHTTPAdapter
# Resolves OpenSSL issues in some servers:
# https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
# https://github.com/kennethreitz/requests/pull/799
PoolManager = urllib3.poolmanager.PoolManager
class SSLHTTPAdapter(BaseHTTPAdapter):
"""An HTTPS Transport Adapter that uses an arbitrary SSL version."""
__attrs__ = HTTPAdapter.__attrs__ + ["assert_hostname"]
def __init__(
self,
assert_hostname: bool | None = None,
**kwargs: t.Any,
) -> None:
self.assert_hostname = assert_hostname
super().__init__(**kwargs)
def init_poolmanager(
self, connections: int, maxsize: int, block: bool = False, **kwargs: t.Any
) -> None:
kwargs = {
"num_pools": connections,
"maxsize": maxsize,
"block": block,
}
if self.assert_hostname is not None:
kwargs["assert_hostname"] = self.assert_hostname
self.poolmanager = PoolManager(**kwargs)
def get_connection(self, *args: t.Any, **kwargs: t.Any) -> urllib3.ConnectionPool:
"""
Ensure assert_hostname is set correctly on our pool
We already take care of a normal poolmanager via init_poolmanager
But we still need to take care of when there is a proxy poolmanager
Note that this method is no longer called for newer requests versions.
"""
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
# pylint: disable-next=no-member
conn = super().get_connection(*args, **kwargs)
if (
self.assert_hostname is not None
and conn.assert_hostname != self.assert_hostname # type: ignore
):
conn.assert_hostname = self.assert_hostname # type: ignore
return conn

View file

@ -0,0 +1,126 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import socket
import typing as t
from .. import constants
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
from .basehttpadapter import BaseHTTPAdapter
if t.TYPE_CHECKING:
from collections.abc import Mapping
from requests import PreparedRequest
from ..._socket_helper import SocketLike
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
class UnixHTTPConnection(urllib3_connection.HTTPConnection):
def __init__(
self, base_url: str | bytes, unix_socket: str, timeout: int | float = 60
) -> None:
super().__init__("localhost", timeout=timeout)
self.base_url = base_url
self.unix_socket = unix_socket
self.timeout = timeout
self.disable_buffering = False
def connect(self) -> None:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(self.timeout)
sock.connect(self.unix_socket)
self.sock = sock
def putheader(self, header: str, *values: str) -> None:
super().putheader(header, *values)
if header == "Connection" and "Upgrade" in values:
self.disable_buffering = True
def response_class(self, sock: SocketLike, *args: t.Any, **kwargs: t.Any) -> t.Any:
# FIXME: We may need to disable buffering on Py3,
# but there's no clear way to do it at the moment. See:
# https://github.com/docker/docker-py/issues/1799
return super().response_class(sock, *args, **kwargs)
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
def __init__(
self,
base_url: str | bytes,
socket_path: str,
timeout: int | float = 60,
maxsize: int = 10,
) -> None:
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
self.base_url = base_url
self.socket_path = socket_path
self.timeout = timeout
def _new_conn(self) -> UnixHTTPConnection:
return UnixHTTPConnection(self.base_url, self.socket_path, self.timeout)
class UnixHTTPAdapter(BaseHTTPAdapter):
__attrs__ = HTTPAdapter.__attrs__ + [
"pools",
"socket_path",
"timeout",
"max_pool_size",
]
def __init__(
self,
socket_url: str,
timeout: int | float = 60,
pool_connections: int = constants.DEFAULT_NUM_POOLS,
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
) -> None:
socket_path = socket_url.replace("http+unix://", "")
if not socket_path.startswith("/"):
socket_path = "/" + socket_path
self.socket_path = socket_path
self.timeout = timeout
self.max_pool_size = max_pool_size
def f(p: t.Any) -> None:
p.close()
self.pools = RecentlyUsedContainer(pool_connections, dispose_func=f)
super().__init__()
def get_connection(
self, url: str | bytes, proxies: Mapping[str, str] | None = None
) -> UnixHTTPConnectionPool:
with self.pools.lock:
pool = self.pools.get(url)
if pool:
return pool
pool = UnixHTTPConnectionPool(
url, self.socket_path, self.timeout, maxsize=self.max_pool_size
)
self.pools[url] = pool
return pool
def request_url(self, request: PreparedRequest, proxies: Mapping[str, str]) -> str:
# The select_proxy utility in requests errors out when the provided URL
# does not have a hostname, like is the case when using a UNIX socket.
# Since proxies are an irrelevant notion in the case of UNIX sockets
# anyway, we simply return the path URL directly.
# See also: https://github.com/docker/docker-py/issues/811
return request.path_url

View file

@ -0,0 +1,90 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import socket
import typing as t
from .._import_helper import urllib3
from ..errors import DockerException
if t.TYPE_CHECKING:
from requests import Response
_T = t.TypeVar("_T")
class CancellableStream(t.Generic[_T]):
"""
Stream wrapper for real-time events, logs, etc. from the server.
Example:
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
def __init__(self, stream: t.Generator[_T], response: Response) -> None:
self._stream = stream
self._response = response
def __iter__(self) -> t.Self:
return self
def __next__(self) -> _T:
try:
return next(self._stream)
except urllib3.exceptions.ProtocolError as exc:
raise StopIteration from exc
except socket.error as exc:
raise StopIteration from exc
next = __next__
def close(self) -> None:
"""
Closes the event streaming.
"""
if not self._response.raw.closed:
# find the underlying socket object
# based on api.client._get_raw_response_socket
sock_fp = self._response.raw._fp.fp # type: ignore
if hasattr(sock_fp, "raw"):
sock_raw = sock_fp.raw
if hasattr(sock_raw, "sock"):
sock = sock_raw.sock
elif hasattr(sock_raw, "_sock"):
sock = sock_raw._sock
elif hasattr(sock_fp, "channel"):
# We are working with a paramiko (SSH) channel, which does not
# support cancelable streams with the current implementation
raise DockerException(
"Cancellable streams not supported for the SSH protocol"
)
else:
sock = sock_fp._sock # type: ignore
if hasattr(urllib3.contrib, "pyopenssl") and isinstance(
sock, urllib3.contrib.pyopenssl.WrappedSocket
):
sock = sock.socket
sock.shutdown(socket.SHUT_RDWR)
sock.close()

View file

@ -0,0 +1,310 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import io
import os
import random
import re
import tarfile
import tempfile
import typing as t
from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
from . import fnmatch
if t.TYPE_CHECKING:
from collections.abc import Sequence
_SEP = re.compile("/|\\\\") if IS_WINDOWS_PLATFORM else re.compile("/")
def tar(
path: str,
exclude: list[str] | None = None,
dockerfile: tuple[str, str | None] | tuple[None, None] | None = None,
fileobj: t.IO[bytes] | None = None,
gzip: bool = False,
) -> t.IO[bytes]:
root = os.path.abspath(path)
exclude = exclude or []
dockerfile = dockerfile or (None, None)
extra_files: list[tuple[str, str]] = []
if dockerfile[1] is not None:
assert dockerfile[0] is not None
dockerignore_contents = "\n".join(
(exclude or [".dockerignore"]) + [dockerfile[0]]
)
extra_files = [
(".dockerignore", dockerignore_contents),
dockerfile, # type: ignore
]
return create_archive(
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
root=root,
fileobj=fileobj,
gzip=gzip,
extra_files=extra_files,
)
def exclude_paths(
root: str, patterns: list[str], dockerfile: str | None = None
) -> set[str]:
"""
Given a root directory path and a list of .dockerignore patterns, return
an iterator of all paths (both regular files and directories) in the root
directory that do *not* match any of the patterns.
All paths returned are relative to the root.
"""
if dockerfile is None:
dockerfile = "Dockerfile"
patterns.append("!" + dockerfile)
pm = PatternMatcher(patterns)
return set(pm.walk(root))
def build_file_list(root: str) -> list[str]:
files = []
for dirname, dirnames, fnames in os.walk(root):
for filename in fnames + dirnames:
longpath = os.path.join(dirname, filename)
files.append(longpath.replace(root, "", 1).lstrip("/"))
return files
def create_archive(
root: str,
files: Sequence[str] | None = None,
fileobj: t.IO[bytes] | None = None,
gzip: bool = False,
extra_files: Sequence[tuple[str, str]] | None = None,
) -> t.IO[bytes]:
extra_files = extra_files or []
if not fileobj:
# pylint: disable-next=consider-using-with
fileobj = tempfile.NamedTemporaryFile() # noqa: SIM115
with tarfile.open(mode="w:gz" if gzip else "w", fileobj=fileobj) as tarf:
if files is None:
files = build_file_list(root)
extra_names = set(e[0] for e in extra_files)
for path in files:
if path in extra_names:
# Extra files override context files with the same name
continue
full_path = os.path.join(root, path)
i = tarf.gettarinfo(full_path, arcname=path)
if i is None:
# This happens when we encounter a socket file. We can safely
# ignore it and proceed.
continue # type: ignore
# Workaround https://bugs.python.org/issue32713
if i.mtime < 0 or i.mtime > 8**11 - 1:
i.mtime = int(i.mtime)
if IS_WINDOWS_PLATFORM:
# Windows does not keep track of the execute bit, so we make files
# and directories executable by default.
i.mode = i.mode & 0o755 | 0o111
if i.isfile():
try:
with open(full_path, "rb") as f:
tarf.addfile(i, f)
except IOError as exc:
raise IOError(f"Can not read file in context: {full_path}") from exc
else:
# Directories, FIFOs, symlinks... do not need to be read.
tarf.addfile(i, None)
for name, contents in extra_files:
info = tarfile.TarInfo(name)
contents_encoded = contents.encode("utf-8")
info.size = len(contents_encoded)
tarf.addfile(info, io.BytesIO(contents_encoded))
fileobj.seek(0)
return fileobj
def mkbuildcontext(dockerfile: io.BytesIO | t.IO[bytes]) -> t.IO[bytes]:
# pylint: disable-next=consider-using-with
f = tempfile.NamedTemporaryFile() # noqa: SIM115
try:
with tarfile.open(mode="w", fileobj=f) as tarf:
if isinstance(dockerfile, io.StringIO): # type: ignore
raise TypeError("Please use io.BytesIO to create in-memory Dockerfiles")
if isinstance(dockerfile, io.BytesIO):
dfinfo = tarfile.TarInfo("Dockerfile")
dfinfo.size = len(dockerfile.getvalue())
dockerfile.seek(0)
else:
dfinfo = tarf.gettarinfo(fileobj=dockerfile, arcname="Dockerfile")
tarf.addfile(dfinfo, dockerfile)
f.seek(0)
except Exception: # noqa: E722
f.close()
raise
return f
def split_path(p: str) -> list[str]:
return [pt for pt in re.split(_SEP, p) if pt and pt != "."]
def normalize_slashes(p: str) -> str:
if IS_WINDOWS_PLATFORM:
return "/".join(split_path(p))
return p
def walk(root: str, patterns: Sequence[str], default: bool = True) -> t.Generator[str]:
pm = PatternMatcher(patterns)
return pm.walk(root)
# Heavily based on
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
class PatternMatcher:
def __init__(self, patterns: Sequence[str]) -> None:
self.patterns = list(filter(lambda p: p.dirs, [Pattern(p) for p in patterns]))
self.patterns.append(Pattern("!.dockerignore"))
def matches(self, filepath: str) -> bool:
matched = False
parent_path = os.path.dirname(filepath)
parent_path_dirs = split_path(parent_path)
for pattern in self.patterns:
negative = pattern.exclusion
match = pattern.match(filepath)
if (
not match
and parent_path != ""
and len(pattern.dirs) <= len(parent_path_dirs)
):
match = pattern.match(
os.path.sep.join(parent_path_dirs[: len(pattern.dirs)])
)
if match:
matched = not negative
return matched
def walk(self, root: str) -> t.Generator[str]:
def rec_walk(current_dir: str) -> t.Generator[str]:
for f in os.listdir(current_dir):
fpath = os.path.join(os.path.relpath(current_dir, root), f)
if fpath.startswith("." + os.path.sep):
fpath = fpath[2:]
match = self.matches(fpath)
if not match:
yield fpath
cur = os.path.join(root, fpath)
if not os.path.isdir(cur) or os.path.islink(cur):
continue
if match:
# If we want to skip this file and it is a directory
# then we should first check to see if there's an
# excludes pattern (e.g. !dir/file) that starts with this
# dir. If so then we cannot skip this dir.
skip = True
for pat in self.patterns:
if not pat.exclusion:
continue
if pat.cleaned_pattern.startswith(normalize_slashes(fpath)):
skip = False
break
if skip:
continue
yield from rec_walk(cur)
return rec_walk(root)
class Pattern:
def __init__(self, pattern_str: str) -> None:
self.exclusion = False
if pattern_str.startswith("!"):
self.exclusion = True
pattern_str = pattern_str[1:]
self.dirs = self.normalize(pattern_str)
self.cleaned_pattern = "/".join(self.dirs)
@classmethod
def normalize(cls, p: str) -> list[str]:
# Remove trailing spaces
p = p.strip()
# Leading and trailing slashes are not relevant. Yes,
# "foo.py/" must exclude the "foo.py" regular file. "."
# components are not relevant either, even if the whole
# pattern is only ".", as the Docker reference states: "For
# historical reasons, the pattern . is ignored."
# ".." component must be cleared with the potential previous
# component, regardless of whether it exists: "A preprocessing
# step [...] eliminates . and .. elements using Go's
# filepath.".
i = 0
split = split_path(p)
while i < len(split):
if split[i] == "..":
del split[i]
if i > 0:
del split[i - 1]
i -= 1
else:
i += 1
return split
def match(self, filepath: str) -> bool:
return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
def process_dockerfile(
dockerfile: str | None, path: str
) -> tuple[str, str | None] | tuple[None, None]:
if not dockerfile:
return (None, None)
abs_dockerfile = dockerfile
if not os.path.isabs(dockerfile):
abs_dockerfile = os.path.join(path, dockerfile)
if IS_WINDOWS_PLATFORM and path.startswith(WINDOWS_LONGPATH_PREFIX):
abs_dockerfile = f"{WINDOWS_LONGPATH_PREFIX}{os.path.normpath(abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX) :])}"
if os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[
0
] or os.path.relpath(abs_dockerfile, path).startswith(".."):
# Dockerfile not in context - read data to insert into tar later
with open(abs_dockerfile, "rt", encoding="utf-8") as df:
return (f".dockerfile.{random.getrandbits(160):x}", df.read())
# Dockerfile is inside the context - return path relative to context root
if dockerfile == abs_dockerfile:
# Only calculate relpath if necessary to avoid errors
# on Windows client -> Linux Docker
# see https://github.com/docker/compose/issues/5969
dockerfile = os.path.relpath(abs_dockerfile, path)
return (dockerfile, None)

View file

@ -0,0 +1,89 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import logging
import os
import typing as t
from ..constants import IS_WINDOWS_PLATFORM
DOCKER_CONFIG_FILENAME = os.path.join(".docker", "config.json")
LEGACY_DOCKER_CONFIG_FILENAME = ".dockercfg"
log = logging.getLogger(__name__)
def get_default_config_file() -> str:
return os.path.join(home_dir(), DOCKER_CONFIG_FILENAME)
def find_config_file(config_path: str | None = None) -> str | None:
homedir = home_dir()
paths = list(
filter(
None,
[
config_path, # 1
config_path_from_environment(), # 2
os.path.join(homedir, DOCKER_CONFIG_FILENAME), # 3
os.path.join(homedir, LEGACY_DOCKER_CONFIG_FILENAME), # 4
],
)
)
log.debug("Trying paths: %s", repr(paths))
for path in paths:
if os.path.exists(path):
log.debug("Found file at path: %s", path)
return path
log.debug("No config file found")
return None
def config_path_from_environment() -> str | None:
config_dir = os.environ.get("DOCKER_CONFIG")
if not config_dir:
return None
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
def home_dir() -> str:
"""
Get the user's home directory, using the same logic as the Docker Engine
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
"""
if IS_WINDOWS_PLATFORM:
return os.environ.get("USERPROFILE", "")
return os.path.expanduser("~")
def load_general_config(config_path: str | None = None) -> dict[str, t.Any]:
config_file = find_config_file(config_path)
if not config_file:
return {}
try:
with open(config_file, "rt", encoding="utf-8") as f:
return json.load(f)
except (IOError, ValueError) as e:
# In the case of a legacy `.dockercfg` file, we will not
# be able to load any JSON data.
log.debug(e)
log.debug("All parsing attempts failed - returning empty config")
return {}

View file

@ -0,0 +1,67 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import functools
import typing as t
from .. import errors
from . import utils
if t.TYPE_CHECKING:
from collections.abc import Callable
from ..api.client import APIClient
_Self = t.TypeVar("_Self")
_P = t.ParamSpec("_P")
_R = t.TypeVar("_R")
def minimum_version(
version: str,
) -> Callable[
[Callable[t.Concatenate[_Self, _P], _R]],
Callable[t.Concatenate[_Self, _P], _R],
]:
def decorator(
f: Callable[t.Concatenate[_Self, _P], _R],
) -> Callable[t.Concatenate[_Self, _P], _R]:
@functools.wraps(f)
def wrapper(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
# We use _Self instead of APIClient since this is used for mixins for APIClient.
# This unfortunately means that self._version does not exist in the mixin,
# it only exists after mixing in. This is why we ignore types here.
if utils.version_lt(self._version, version): # type: ignore
raise errors.InvalidVersion(
f"{f.__name__} is not available for version < {version}"
)
return f(self, *args, **kwargs)
return wrapper
return decorator
def update_headers(
f: Callable[t.Concatenate[APIClient, _P], _R],
) -> Callable[t.Concatenate[APIClient, _P], _R]:
def inner(self: APIClient, *args: _P.args, **kwargs: _P.kwargs) -> _R:
if "HttpHeaders" in self._general_configs:
if not kwargs.get("headers"):
kwargs["headers"] = self._general_configs["HttpHeaders"]
else:
# We cannot (yet) model that kwargs["headers"] should be a dictionary
kwargs["headers"].update(self._general_configs["HttpHeaders"]) # type: ignore
return f(self, *args, **kwargs)
return inner

View file

@ -0,0 +1,128 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
from __future__ import annotations
import re
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache: dict[str, re.Pattern] = {}
_MAXCACHE = 100
def _purge() -> None:
"""Clear the pattern cache"""
_cache.clear()
def fnmatch(name: str, pat: str) -> bool:
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you do not want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = name.lower()
pat = pat.lower()
return fnmatchcase(name, pat)
def fnmatchcase(name: str, pat: str) -> bool:
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which does not case-normalize
its arguments.
"""
try:
re_pat = _cache[pat]
except KeyError:
res = translate(pat)
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[pat] = re_pat = re.compile(res)
return re_pat.match(name) is not None
def translate(pat: str) -> str:
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = "^"
while i < n:
c = pat[i]
i = i + 1
if c == "*":
if i < n and pat[i] == "*":
# is some flavor of "**"
i = i + 1
# Treat **/ as ** so eat the "/"
if i < n and pat[i] == "/":
i = i + 1
if i >= n:
# is "**EOF" - to align with .gitignore just accept all
res = res + ".*"
else:
# is "**"
# Note that this allows for any # of /'s (even 0) because
# the .* will eat everything, even /'s
res = res + "(.*/)?"
else:
# is "*" so map it to anything but "/"
res = res + "[^/]*"
elif c == "?":
# "?" is any char except "/"
res = res + "[^/]"
elif c == "[":
j = i
if j < n and pat[j] == "!":
j = j + 1
if j < n and pat[j] == "]":
j = j + 1
while j < n and pat[j] != "]":
j = j + 1
if j >= n:
res = res + "\\["
else:
stuff = pat[i:j].replace("\\", "\\\\")
i = j + 1
if stuff[0] == "!":
stuff = "^" + stuff[1:]
elif stuff[0] == "^":
stuff = "\\" + stuff
res = f"{res}[{stuff}]"
else:
res = res + re.escape(c)
return res + "$"

View file

@ -0,0 +1,100 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import json.decoder
import typing as t
from ..errors import StreamParseError
if t.TYPE_CHECKING:
import re
from collections.abc import Callable
_T = t.TypeVar("_T")
json_decoder = json.JSONDecoder()
def stream_as_text(stream: t.Generator[bytes | str]) -> t.Generator[str]:
"""
Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
"""
for data in stream:
if not isinstance(data, str):
data = data.decode("utf-8", "replace")
yield data
def json_splitter(buffer: str) -> tuple[t.Any, str] | None:
"""Attempt to parse a json object from a buffer. If there is at least one
object, return it and the rest of the buffer, otherwise return None.
"""
buffer = buffer.strip()
try:
obj, index = json_decoder.raw_decode(buffer)
ws: re.Pattern = json.decoder.WHITESPACE # type: ignore[attr-defined]
m = ws.match(buffer, index)
rest = buffer[m.end() :] if m else buffer[index:]
return obj, rest
except ValueError:
return None
def json_stream(stream: t.Generator[str | bytes]) -> t.Generator[t.Any]:
"""Given a stream of text, return a stream of json objects.
This handles streams which are inconsistently buffered (some entries may
be newline delimited, and others are not).
"""
return split_buffer(stream, json_splitter, json_decoder.decode)
def line_splitter(buffer: str, separator: str = "\n") -> tuple[str, str] | None:
index = buffer.find(str(separator))
if index == -1:
return None
return buffer[: index + 1], buffer[index + 1 :]
def split_buffer(
stream: t.Generator[str | bytes],
splitter: Callable[[str], tuple[_T, str] | None],
decoder: Callable[[str], _T],
) -> t.Generator[_T | str]:
"""Given a generator which yields strings and a splitter function,
joins all input, splits on the separator and yields each chunk.
Unlike string.split(), each chunk includes the trailing
separator, except for the last one if none was found on the end
of the input.
"""
buffered = ""
for data in stream_as_text(stream):
buffered += data
while True:
buffer_split = splitter(buffered)
if buffer_split is None:
break
item, buffered = buffer_split
yield item
if buffered:
try:
yield decoder(buffered)
except Exception as e:
raise StreamParseError(e) from e

View file

@ -0,0 +1,136 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import re
import typing as t
if t.TYPE_CHECKING:
from collections.abc import Collection, Sequence
PORT_SPEC = re.compile(
"^" # Match full string
"(" # External part
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
")?"
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
"$" # Match full string
)
def add_port_mapping(
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
internal_port: str,
external: str | tuple[str, str | None] | None,
) -> None:
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
def add_port(
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
internal_port_range: list[str],
external_range: list[str] | list[tuple[str, str | None]] | None,
) -> None:
if external_range is None:
for internal_port in internal_port_range:
add_port_mapping(port_bindings, internal_port, None)
else:
for internal_port, external_port in zip(internal_port_range, external_range):
# mypy loses the exact type of eternal_port elements for some reason...
add_port_mapping(port_bindings, internal_port, external_port) # type: ignore
def build_port_bindings(
ports: Collection[str],
) -> dict[str, list[str | tuple[str, str | None] | None]]:
port_bindings: dict[str, list[str | tuple[str, str | None] | None]] = {}
for port in ports:
internal_port_range, external_range = split_port(port)
add_port(port_bindings, internal_port_range, external_range)
return port_bindings
def _raise_invalid_port(port: str) -> t.NoReturn:
raise ValueError(
f'Invalid port "{port}", should be '
"[[remote_ip:]remote_port[-remote_port]:]"
"port[/protocol]"
)
@t.overload
def port_range(
start: str,
end: str | None,
proto: str,
randomly_available_port: bool = False,
) -> list[str]: ...
@t.overload
def port_range(
start: str | None,
end: str | None,
proto: str,
randomly_available_port: bool = False,
) -> list[str] | None: ...
def port_range(
start: str | None,
end: str | None,
proto: str,
randomly_available_port: bool = False,
) -> list[str] | None:
if start is None:
return start
if end is None:
return [f"{start}{proto}"]
if randomly_available_port:
return [f"{start}-{end}{proto}"]
return [f"{port}{proto}" for port in range(int(start), int(end) + 1)]
def split_port(
port: str | int,
) -> tuple[list[str], list[str] | list[tuple[str, str | None]] | None]:
port = str(port)
match = PORT_SPEC.match(port)
if match is None:
_raise_invalid_port(port)
parts = match.groupdict()
host: str | None = parts["host"]
proto: str = parts["proto"] or ""
int_p: str = parts["int"]
ext_p: str = parts["ext"]
internal: list[str] = port_range(int_p, parts["int_end"], proto) # type: ignore
external = port_range(ext_p or None, parts["ext_end"], "", len(internal) == 1)
if host is None:
if (external is not None and len(internal) != len(external)) or ext_p == "":
raise ValueError("Port ranges don't match in length")
return internal, external
external_or_none: Sequence[str | None]
if not external:
external_or_none = [None] * len(internal)
else:
external_or_none = external
if len(internal) != len(external_or_none):
raise ValueError("Port ranges don't match in length")
return internal, [(host, ext_port) for ext_port in external_or_none]

View file

@ -0,0 +1,98 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import typing as t
from .utils import format_environment
class ProxyConfig(dict):
"""
Hold the client's proxy configuration
"""
@property
def http(self) -> str | None:
return self.get("http")
@property
def https(self) -> str | None:
return self.get("https")
@property
def ftp(self) -> str | None:
return self.get("ftp")
@property
def no_proxy(self) -> str | None:
return self.get("no_proxy")
@staticmethod
def from_dict(config: dict[str, str]) -> ProxyConfig:
"""
Instantiate a new ProxyConfig from a dictionary that represents a
client configuration, as described in `the documentation`_.
.. _the documentation:
https://docs.docker.com/network/proxy/#configure-the-docker-client
"""
return ProxyConfig(
http=config.get("httpProxy"),
https=config.get("httpsProxy"),
ftp=config.get("ftpProxy"),
no_proxy=config.get("noProxy"),
)
def get_environment(self) -> dict[str, str]:
"""
Return a dictionary representing the environment variables used to
set the proxy settings.
"""
env = {}
if self.http:
env["http_proxy"] = env["HTTP_PROXY"] = self.http
if self.https:
env["https_proxy"] = env["HTTPS_PROXY"] = self.https
if self.ftp:
env["ftp_proxy"] = env["FTP_PROXY"] = self.ftp
if self.no_proxy:
env["no_proxy"] = env["NO_PROXY"] = self.no_proxy
return env
@t.overload
def inject_proxy_environment(self, environment: list[str]) -> list[str]: ...
@t.overload
def inject_proxy_environment(
self, environment: list[str] | None
) -> list[str] | None: ...
def inject_proxy_environment(
self, environment: list[str] | None
) -> list[str] | None:
"""
Given a list of strings representing environment variables, prepend the
environment variables corresponding to the proxy settings.
"""
if not self:
return environment
proxy_env = format_environment(self.get_environment())
if not environment:
return proxy_env
# It is important to prepend our variables, because we want the
# variables defined in "environment" to take precedence.
return proxy_env + environment
def __str__(self) -> str:
return f"ProxyConfig(http={self.http}, https={self.https}, ftp={self.ftp}, no_proxy={self.no_proxy})"

View file

@ -0,0 +1,242 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import errno
import os
import select
import socket as pysocket
import struct
import typing as t
from ..transport.npipesocket import NpipeSocket
if t.TYPE_CHECKING:
from collections.abc import Sequence
from ..._socket_helper import SocketLike
STDOUT = 1
STDERR = 2
class SocketError(Exception):
pass
# NpipeSockets have their own error types
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
NPIPE_ENDED = 109
def read(socket: SocketLike, n: int = 4096) -> bytes | None:
"""
Reads at most n bytes from socket
"""
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
if not isinstance(socket, NpipeSocket): # type: ignore[unreachable]
if not hasattr(select, "poll"):
# Limited to 1024
select.select([socket], [], [])
else:
poll = select.poll()
poll.register(socket, select.POLLIN | select.POLLPRI)
poll.poll()
try:
if hasattr(socket, "recv"):
return socket.recv(n)
if isinstance(socket, pysocket.SocketIO): # type: ignore
return socket.read(n) # type: ignore[unreachable]
return os.read(socket.fileno(), n)
except EnvironmentError as e:
if e.errno not in recoverable_errors:
raise
return None # TODO ???
except Exception as e:
is_pipe_ended = (
isinstance(socket, NpipeSocket) # type: ignore[unreachable]
and len(e.args) > 0
and e.args[0] == NPIPE_ENDED
)
if is_pipe_ended:
# npipes do not support duplex sockets, so we interpret
# a PIPE_ENDED error as a close operation (0-length read).
return b""
raise
def read_exactly(socket: SocketLike, n: int) -> bytes:
"""
Reads exactly n bytes from socket
Raises SocketError if there is not enough data
"""
data = b""
while len(data) < n:
next_data = read(socket, n - len(data))
if not next_data:
raise SocketError("Unexpected EOF")
data += next_data
return data
def next_frame_header(socket: SocketLike) -> tuple[int, int]:
"""
Returns the stream and size of the next frame of data waiting to be read
from socket, according to the protocol defined here:
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
"""
try:
data = read_exactly(socket, 8)
except SocketError:
return (-1, -1)
stream, actual = struct.unpack(">BxxxL", data)
return (stream, actual)
def frames_iter(socket: SocketLike, tty: bool) -> t.Generator[tuple[int, bytes]]:
"""
Return a generator of frames read from socket. A frame is a tuple where
the first item is the stream number and the second item is a chunk of data.
If the tty setting is enabled, the streams are multiplexed into the stdout
stream.
"""
if tty:
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
return frames_iter_no_tty(socket)
def frames_iter_no_tty(socket: SocketLike) -> t.Generator[tuple[int, bytes]]:
"""
Returns a generator of data read from the socket when the tty setting is
not enabled.
"""
while True:
(stream, n) = next_frame_header(socket)
if n < 0:
break
while n > 0:
result = read(socket, n)
if result is None:
continue
data_length = len(result)
if data_length == 0:
# We have reached EOF
return
n -= data_length
yield (stream, result)
def frames_iter_tty(socket: SocketLike) -> t.Generator[bytes]:
"""
Return a generator of data read from the socket when the tty setting is
enabled.
"""
while True:
result = read(socket)
if not result:
# We have reached EOF
return
yield result
@t.overload
def consume_socket_output(
frames: Sequence[bytes] | t.Generator[bytes], demux: t.Literal[False] = False
) -> bytes: ...
@t.overload
def consume_socket_output(
frames: (
Sequence[tuple[bytes | None, bytes | None]]
| t.Generator[tuple[bytes | None, bytes | None]]
),
demux: t.Literal[True],
) -> tuple[bytes, bytes]: ...
@t.overload
def consume_socket_output(
frames: (
Sequence[bytes]
| Sequence[tuple[bytes | None, bytes | None]]
| t.Generator[bytes]
| t.Generator[tuple[bytes | None, bytes | None]]
),
demux: bool = False,
) -> bytes | tuple[bytes, bytes]: ...
def consume_socket_output(
frames: (
Sequence[bytes]
| Sequence[tuple[bytes | None, bytes | None]]
| t.Generator[bytes]
| t.Generator[tuple[bytes | None, bytes | None]]
),
demux: bool = False,
) -> bytes | tuple[bytes, bytes]:
"""
Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream.
"""
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
return b"".join(frames) # type: ignore
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
out: list[bytes | None] = [None, None]
frame: tuple[bytes | None, bytes | None]
for frame in frames: # type: ignore
# It is guaranteed that for each frame, one and only one stream
# is not None.
if frame == (None, None):
raise AssertionError(f"frame must be (None, None), but got {frame}")
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]
else:
out[0] += frame[0]
else:
if out[1] is None:
out[1] = frame[1]
else:
out[1] += frame[1] # type: ignore[operator]
return tuple(out) # type: ignore
def demux_adaptor(stream_id: int, data: bytes) -> tuple[bytes | None, bytes | None]:
"""
Utility to demultiplex stdout and stderr when reading frames from the
socket.
"""
if stream_id == STDOUT:
return (data, None)
if stream_id == STDERR:
return (None, data)
raise ValueError(f"{stream_id} is not a valid stream")

View file

@ -0,0 +1,519 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
#
# Copyright (c) 2016-2022 Docker, Inc.
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import base64
import collections
import json
import os
import os.path
import shlex
import string
import typing as t
from urllib.parse import urlparse, urlunparse
from ansible_collections.community.docker.plugins.module_utils._version import (
StrictVersion,
)
from .. import errors
from ..constants import (
BYTE_UNITS,
DEFAULT_HTTP_HOST,
DEFAULT_NPIPE,
DEFAULT_UNIX_SOCKET,
)
from ..tls import TLSConfig
if t.TYPE_CHECKING:
from collections.abc import Mapping, Sequence
URLComponents = collections.namedtuple(
"URLComponents",
"scheme netloc url params query fragment",
)
def decode_json_header(header: str | bytes) -> dict[str, t.Any]:
data = base64.b64decode(header).decode("utf-8")
return json.loads(data)
def compare_version(v1: str, v2: str) -> t.Literal[-1, 0, 1]:
"""Compare docker versions
>>> v1 = '1.9'
>>> v2 = '1.10'
>>> compare_version(v1, v2)
1
>>> compare_version(v2, v1)
-1
>>> compare_version(v2, v2)
0
"""
s1 = StrictVersion(v1)
s2 = StrictVersion(v2)
if s1 == s2:
return 0
if s1 > s2:
return -1
return 1
def version_lt(v1: str, v2: str) -> bool:
return compare_version(v1, v2) > 0
def version_gte(v1: str, v2: str) -> bool:
return not version_lt(v1, v2)
def _convert_port_binding(
binding: (
tuple[str, str | int | None]
| tuple[str | int | None]
| dict[str, str]
| str
| int
),
) -> dict[str, str]:
result = {"HostIp": "", "HostPort": ""}
host_port: str | int | None = ""
if isinstance(binding, tuple):
if len(binding) == 2:
host_port = binding[1] # type: ignore
result["HostIp"] = binding[0]
elif isinstance(binding[0], str):
result["HostIp"] = binding[0]
else:
host_port = binding[0]
elif isinstance(binding, dict):
if "HostPort" in binding:
host_port = binding["HostPort"]
if "HostIp" in binding:
result["HostIp"] = binding["HostIp"]
else:
raise ValueError(binding)
else:
host_port = binding
result["HostPort"] = str(host_port) if host_port is not None else ""
return result
def convert_port_bindings(
port_bindings: dict[
str | int,
tuple[str, str | int | None]
| tuple[str | int | None]
| dict[str, str]
| str
| int
| list[
tuple[str, str | int | None]
| tuple[str | int | None]
| dict[str, str]
| str
| int
],
],
) -> dict[str, list[dict[str, str]]]:
result = {}
for k, v in port_bindings.items():
key = str(k)
if "/" not in key:
key += "/tcp"
if isinstance(v, list):
result[key] = [_convert_port_binding(binding) for binding in v]
else:
result[key] = [_convert_port_binding(v)]
return result
def convert_volume_binds(
binds: (
list[str]
| Mapping[
str | bytes, dict[str, str | bytes] | dict[str, str] | bytes | str | int
]
),
) -> list[str]:
if isinstance(binds, list):
return binds # type: ignore
result = []
for k, v in binds.items():
if isinstance(k, bytes):
k = k.decode("utf-8")
if isinstance(v, dict):
if "ro" in v and "mode" in v:
raise ValueError(f'Binding cannot contain both "ro" and "mode": {v!r}')
bind = v["bind"]
if isinstance(bind, bytes):
bind = bind.decode("utf-8")
if "ro" in v:
mode = "ro" if v["ro"] else "rw"
elif "mode" in v:
mode = v["mode"] # type: ignore # TODO
else:
mode = "rw"
# NOTE: this is only relevant for Linux hosts
# (does not apply in Docker Desktop)
propagation_modes = [
"rshared",
"shared",
"rslave",
"slave",
"rprivate",
"private",
]
if "propagation" in v and v["propagation"] in propagation_modes:
if mode:
mode = ",".join([mode, v["propagation"]]) # type: ignore # TODO
else:
mode = v["propagation"] # type: ignore # TODO
result.append(f"{k}:{bind}:{mode}")
else:
if isinstance(v, bytes):
v = v.decode("utf-8")
result.append(f"{k}:{v}:rw")
return result
def convert_tmpfs_mounts(tmpfs: dict[str, str] | list[str]) -> dict[str, str]:
if isinstance(tmpfs, dict):
return tmpfs
if not isinstance(tmpfs, list):
raise ValueError(
f"Expected tmpfs value to be either a list or a dict, found: {type(tmpfs).__name__}"
)
result = {}
for mount in tmpfs:
if isinstance(mount, str):
if ":" in mount:
name, options = mount.split(":", 1)
else:
name = mount
options = ""
else:
raise ValueError(
f"Expected item in tmpfs list to be a string, found: {type(mount).__name__}"
)
result[name] = options
return result
def convert_service_networks(
networks: list[str | dict[str, str]],
) -> list[dict[str, str]]:
if not networks:
return networks # type: ignore
if not isinstance(networks, list):
raise TypeError("networks parameter must be a list.")
result = []
for n in networks:
if isinstance(n, str):
n = {"Target": n}
result.append(n)
return result
def parse_repository_tag(repo_name: str) -> tuple[str, str | None]:
parts = repo_name.rsplit("@", 1)
if len(parts) == 2:
return tuple(parts) # type: ignore
parts = repo_name.rsplit(":", 1)
if len(parts) == 2 and "/" not in parts[1]:
return tuple(parts) # type: ignore
return repo_name, None
def parse_host(addr: str | None, is_win32: bool = False, tls: bool = False) -> str:
# Sensible defaults
if not addr and is_win32:
return DEFAULT_NPIPE
if not addr or addr.strip() == "unix://":
return DEFAULT_UNIX_SOCKET
addr = addr.strip()
parsed_url = urlparse(addr)
proto = parsed_url.scheme
if not proto or any(x not in string.ascii_letters + "+" for x in proto):
# https://bugs.python.org/issue754016
parsed_url = urlparse("//" + addr, "tcp")
proto = "tcp"
if proto == "fd":
raise errors.DockerException("fd protocol is not implemented")
# These protos are valid aliases for our library but not for the
# official spec
if proto in ("http", "https"):
tls = proto == "https"
proto = "tcp"
elif proto == "http+unix":
proto = "unix"
if proto not in ("tcp", "unix", "npipe", "ssh"):
raise errors.DockerException(f"Invalid bind address protocol: {addr}")
if proto == "tcp" and not parsed_url.netloc:
# "tcp://" is exceptionally disallowed by convention;
# omitting a hostname for other protocols is fine
raise errors.DockerException(f"Invalid bind address format: {addr}")
if any(
[parsed_url.params, parsed_url.query, parsed_url.fragment, parsed_url.password]
):
raise errors.DockerException(f"Invalid bind address format: {addr}")
if parsed_url.path and proto == "ssh":
raise errors.DockerException(
f"Invalid bind address format: no path allowed for this protocol: {addr}"
)
path = parsed_url.path
if proto == "unix" and parsed_url.hostname is not None:
# For legacy reasons, we consider unix://path
# to be valid and equivalent to unix:///path
path = f"{parsed_url.hostname}/{path}"
netloc = parsed_url.netloc
if proto in ("tcp", "ssh"):
port = parsed_url.port or 0
if port <= 0:
port = 22 if proto == "ssh" else (2375 if tls else 2376)
netloc = f"{parsed_url.netloc}:{port}"
if not parsed_url.hostname:
netloc = f"{DEFAULT_HTTP_HOST}:{port}"
# Rewrite schemes to fit library internals (requests adapters)
if proto == "tcp":
proto = f"http{'s' if tls else ''}"
elif proto == "unix":
proto = "http+unix"
if proto in ("http+unix", "npipe"):
return f"{proto}://{path}".rstrip("/")
return urlunparse(
URLComponents(
scheme=proto,
netloc=netloc,
url=path,
params="",
query="",
fragment="",
)
).rstrip("/")
def parse_devices(devices: Sequence[dict[str, str] | str]) -> list[dict[str, str]]:
device_list = []
for device in devices:
if isinstance(device, dict):
device_list.append(device)
continue
if not isinstance(device, str):
raise errors.DockerException(f"Invalid device type {type(device)}")
device_mapping = device.split(":")
if device_mapping:
path_on_host = device_mapping[0]
if len(device_mapping) > 1:
path_in_container = device_mapping[1]
else:
path_in_container = path_on_host
if len(device_mapping) > 2:
permissions = device_mapping[2]
else:
permissions = "rwm"
device_list.append(
{
"PathOnHost": path_on_host,
"PathInContainer": path_in_container,
"CgroupPermissions": permissions,
}
)
return device_list
def kwargs_from_env(
assert_hostname: bool | None = None,
environment: Mapping[str, str] | None = None,
) -> dict[str, t.Any]:
if not environment:
environment = os.environ
host = environment.get("DOCKER_HOST")
# empty string for cert path is the same as unset.
cert_path = environment.get("DOCKER_CERT_PATH") or None
# empty string for tls verify counts as "false".
# Any value or 'unset' counts as true.
tls_verify_str = environment.get("DOCKER_TLS_VERIFY")
if tls_verify_str == "":
tls_verify = False
else:
tls_verify = tls_verify_str is not None
enable_tls = cert_path or tls_verify
params: dict[str, t.Any] = {}
if host:
params["base_url"] = host
if not enable_tls:
return params
if not cert_path:
cert_path = os.path.join(os.path.expanduser("~"), ".docker")
if not tls_verify and assert_hostname is None:
# assert_hostname is a subset of TLS verification,
# so if it is not set already then set it to false.
assert_hostname = False
params["tls"] = TLSConfig(
client_cert=(
os.path.join(cert_path, "cert.pem"),
os.path.join(cert_path, "key.pem"),
),
ca_cert=os.path.join(cert_path, "ca.pem"),
verify=tls_verify,
assert_hostname=assert_hostname,
)
return params
def convert_filters(
filters: Mapping[str, bool | str | int | list[int] | list[str] | list[str | int]],
) -> str:
result = {}
for k, v in filters.items():
if isinstance(v, bool):
v = "true" if v else "false"
if not isinstance(v, list):
v = [
v,
]
result[k] = [str(item) if not isinstance(item, str) else item for item in v]
return json.dumps(result)
def parse_bytes(s: int | float | str) -> int | float:
if isinstance(s, (int, float)):
return s
if len(s) == 0:
return 0
if s[-2:-1].isalpha() and s[-1].isalpha() and (s[-1] == "b" or s[-1] == "B"):
s = s[:-1]
units = BYTE_UNITS
suffix = s[-1].lower()
# Check if the variable is a string representation of an int
# without a units part. Assuming that the units are bytes.
if suffix.isdigit():
digits_part = s
suffix = "b"
else:
digits_part = s[:-1]
if suffix in units or suffix.isdigit():
try:
digits = float(digits_part)
except ValueError as exc:
raise errors.DockerException(
f"Failed converting the string value for memory ({digits_part}) to an integer."
) from exc
# Reconvert to long for the final result
s = int(digits * units[suffix])
else:
raise errors.DockerException(
f"The specified value for memory ({s}) should specify the units. The postfix should be one of the `b` `k` `m` `g` characters"
)
return s
def normalize_links(links: dict[str, str] | Sequence[tuple[str, str]]) -> list[str]:
if isinstance(links, dict):
sorted_links = sorted(links.items())
else:
sorted_links = sorted(links)
return [f"{k}:{v}" if v else k for k, v in sorted_links]
def parse_env_file(env_file: str | os.PathLike) -> dict[str, str]:
"""
Reads a line-separated environment file.
The format of each line should be "key=value".
"""
environment = {}
with open(env_file, "rt", encoding="utf-8") as f:
for line in f:
if line[0] == "#":
continue
line = line.strip()
if not line:
continue
parse_line = line.split("=", 1)
if len(parse_line) == 2:
k, v = parse_line
environment[k] = v
else:
raise errors.DockerException(
f"Invalid line in environment file {env_file}:\n{line}"
)
return environment
def split_command(command: str) -> list[str]:
return shlex.split(command)
def format_environment(environment: Mapping[str, str | bytes | None]) -> list[str]:
def format_env(key: str, value: str | bytes | None) -> str:
if value is None:
return key
if isinstance(value, bytes):
value = value.decode("utf-8")
return f"{key}={value}"
return [format_env(*var) for var in environment.items()]
def format_extra_hosts(extra_hosts: Mapping[str, str], task: bool = False) -> list[str]:
# Use format dictated by Swarm API if container is part of a task
if task:
return [f"{v} {k}" for k, v in sorted(extra_hosts.items())]
return [f"{k}:{v}" for k, v in sorted(extra_hosts.items())]

View file

@ -0,0 +1,555 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import abc
import os
import platform
import re
import sys
import traceback
import typing as t
from collections.abc import Mapping, Sequence
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
from ansible_collections.community.docker.plugins.module_utils._util import (
DEFAULT_DOCKER_HOST,
DEFAULT_TIMEOUT_SECONDS,
DEFAULT_TLS,
DEFAULT_TLS_VERIFY,
DOCKER_COMMON_ARGS,
DOCKER_MUTUALLY_EXCLUSIVE,
DOCKER_REQUIRED_TOGETHER,
sanitize_result,
update_tls_hostname,
)
from ansible_collections.community.docker.plugins.module_utils._version import (
LooseVersion,
)
HAS_DOCKER_PY_2 = False # pylint: disable=invalid-name
HAS_DOCKER_PY_3 = False # pylint: disable=invalid-name
HAS_DOCKER_ERROR: None | str # pylint: disable=invalid-name
HAS_DOCKER_TRACEBACK: None | str # pylint: disable=invalid-name
docker_version: str | None # pylint: disable=invalid-name
try:
from docker import __version__ as docker_version
from docker.errors import APIError, TLSParameterError
from docker.tls import TLSConfig
if LooseVersion(docker_version) >= LooseVersion("3.0.0"):
HAS_DOCKER_PY_3 = True # pylint: disable=invalid-name
from docker import APIClient as Client
elif LooseVersion(docker_version) >= LooseVersion("2.0.0"):
HAS_DOCKER_PY_2 = True # pylint: disable=invalid-name
from docker import APIClient as Client
else:
from docker import Client # type: ignore
except ImportError as exc:
HAS_DOCKER_ERROR = str(exc) # pylint: disable=invalid-name
HAS_DOCKER_TRACEBACK = traceback.format_exc() # pylint: disable=invalid-name
HAS_DOCKER_PY = False # pylint: disable=invalid-name
docker_version = None # pylint: disable=invalid-name
else:
HAS_DOCKER_PY = True # pylint: disable=invalid-name
HAS_DOCKER_ERROR = None # pylint: disable=invalid-name
HAS_DOCKER_TRACEBACK = None # pylint: disable=invalid-name
try:
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
RequestException,
)
except ImportError:
# Either Docker SDK for Python is no longer using requests, or Docker SDK for Python is not around either,
# or Docker SDK for Python's dependency requests is missing. In any case, define an exception
# class RequestException so that our code does not break.
class RequestException(Exception): # type: ignore
pass
if t.TYPE_CHECKING:
from collections.abc import Callable
MIN_DOCKER_VERSION = "2.0.0"
if not HAS_DOCKER_PY:
# No Docker SDK for Python. Create a place holder client to allow
# instantiation of AnsibleModule and proper error handing
class Client: # type: ignore # noqa: F811, pylint: disable=function-redefined
def __init__(self, **kwargs: t.Any) -> None:
pass
class APIError(Exception): # type: ignore # noqa: F811, pylint: disable=function-redefined
pass
class NotFound(Exception): # type: ignore # noqa: F811, pylint: disable=function-redefined
pass
def _get_tls_config(
fail_function: Callable[[str], t.NoReturn], **kwargs: t.Any
) -> TLSConfig:
if "assert_hostname" in kwargs and LooseVersion(docker_version) >= LooseVersion(
"7.0.0b1"
):
assert_hostname = kwargs.pop("assert_hostname")
if assert_hostname is not None:
fail_function(
"tls_hostname is not compatible with Docker SDK for Python 7.0.0+. You are using"
f" Docker SDK for Python {docker_version}. The tls_hostname option (value: {assert_hostname})"
" has either been set directly or with the environment variable DOCKER_TLS_HOSTNAME."
" Make sure it is not set, or switch to an older version of Docker SDK for Python."
)
# Filter out all None parameters
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
try:
return TLSConfig(**kwargs)
except TLSParameterError as exc:
fail_function(f"TLS config error: {exc}")
def is_using_tls(auth_data: dict[str, t.Any]) -> bool:
return auth_data["tls_verify"] or auth_data["tls"]
def get_connect_params(
auth_data: dict[str, t.Any], fail_function: Callable[[str], t.NoReturn]
) -> dict[str, t.Any]:
if is_using_tls(auth_data):
auth_data["docker_host"] = auth_data["docker_host"].replace(
"tcp://", "https://"
)
result = {
"base_url": auth_data["docker_host"],
"version": auth_data["api_version"],
"timeout": auth_data["timeout"],
}
if auth_data["tls_verify"]:
# TLS with verification
tls_config: dict[str, t.Any] = {
"verify": True,
}
if auth_data["tls_hostname"] is not None:
tls_config["assert_hostname"] = auth_data["tls_hostname"]
if auth_data["cert_path"] and auth_data["key_path"]:
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
if auth_data["cacert_path"]:
tls_config["ca_cert"] = auth_data["cacert_path"]
result["tls"] = _get_tls_config(fail_function=fail_function, **tls_config)
elif auth_data["tls"]:
# TLS without verification
tls_config = {
"verify": False,
}
if auth_data["cert_path"] and auth_data["key_path"]:
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
result["tls"] = _get_tls_config(fail_function=fail_function, **tls_config)
if auth_data.get("use_ssh_client"):
if LooseVersion(docker_version) < LooseVersion("4.4.0"):
fail_function(
"use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer"
)
result["use_ssh_client"] = True
# No TLS
return result
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = (
"Try `pip uninstall docker-py` followed by `pip install docker`."
)
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
class AnsibleDockerClientBase(Client):
def __init__(
self,
min_docker_version: str | None = None,
min_docker_api_version: str | None = None,
) -> None:
if min_docker_version is None:
min_docker_version = MIN_DOCKER_VERSION
self.docker_py_version = LooseVersion(docker_version)
if not HAS_DOCKER_PY:
msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0")
msg = f"{msg}, for example via `pip install docker`. The error was: {HAS_DOCKER_ERROR}"
self.fail(msg, exception=HAS_DOCKER_TRACEBACK)
if self.docker_py_version < LooseVersion(min_docker_version):
msg = (
f"Error: Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
f" Minimum version required is {min_docker_version}."
)
if docker_version < LooseVersion("2.0"):
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
else:
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
self.fail(msg)
self._connect_params = get_connect_params(
self.auth_params, fail_function=self.fail
)
try:
super().__init__(**self._connect_params)
self.docker_api_version_str = self.api_version
except APIError as exc:
self.fail(f"Docker API error: {exc}")
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error connecting: {exc}")
self.docker_api_version = LooseVersion(self.docker_api_version_str)
min_docker_api_version = min_docker_api_version or "1.25"
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail(
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
)
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
pass
# if self.debug:
# from .util import log_debug
# log_debug(msg, pretty_print=pretty_print)
@abc.abstractmethod
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
pass
@abc.abstractmethod
def deprecate(
self,
msg: str,
version: str | None = None,
date: str | None = None,
collection_name: str | None = None,
) -> None:
pass
@staticmethod
def _get_value(
param_name: str,
param_value: t.Any,
env_variable: str | None,
default_value: t.Any | None,
value_type: t.Literal["str", "bool", "int"] = "str",
) -> t.Any:
if param_value is not None:
# take module parameter value
if value_type == "bool":
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return bool(param_value)
if value_type == "int":
return int(param_value)
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == "cert_path":
return os.path.join(env_value, "cert.pem")
if param_name == "cacert_path":
return os.path.join(env_value, "ca.pem")
if param_name == "key_path":
return os.path.join(env_value, "key.pem")
if value_type == "bool":
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return bool(env_value)
if value_type == "int":
return int(env_value)
return env_value
# take the default
return default_value
@abc.abstractmethod
def _get_params(self) -> dict[str, t.Any]:
pass
@property
def auth_params(self) -> dict[str, t.Any]:
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log("Getting credentials")
client_params = self._get_params()
params = {}
for key in DOCKER_COMMON_ARGS:
params[key] = client_params.get(key)
result = {
"docker_host": self._get_value(
"docker_host",
params["docker_host"],
"DOCKER_HOST",
DEFAULT_DOCKER_HOST,
value_type="str",
),
"tls_hostname": self._get_value(
"tls_hostname",
params["tls_hostname"],
"DOCKER_TLS_HOSTNAME",
None,
value_type="str",
),
"api_version": self._get_value(
"api_version",
params["api_version"],
"DOCKER_API_VERSION",
"auto",
value_type="str",
),
"cacert_path": self._get_value(
"cacert_path",
params["ca_path"],
"DOCKER_CERT_PATH",
None,
value_type="str",
),
"cert_path": self._get_value(
"cert_path",
params["client_cert"],
"DOCKER_CERT_PATH",
None,
value_type="str",
),
"key_path": self._get_value(
"key_path",
params["client_key"],
"DOCKER_CERT_PATH",
None,
value_type="str",
),
"tls": self._get_value(
"tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool"
),
"tls_verify": self._get_value(
"validate_certs",
params["validate_certs"],
"DOCKER_TLS_VERIFY",
DEFAULT_TLS_VERIFY,
value_type="bool",
),
"timeout": self._get_value(
"timeout",
params["timeout"],
"DOCKER_TIMEOUT",
DEFAULT_TIMEOUT_SECONDS,
value_type="int",
),
"use_ssh_client": self._get_value(
"use_ssh_client",
params["use_ssh_client"],
None,
False,
value_type="bool",
),
}
if LooseVersion(docker_version) < LooseVersion("7.0.0b1"):
update_tls_hostname(result)
return result
def _handle_ssl_error(self, error: Exception) -> t.NoReturn:
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
hostname = self.auth_params["tls_hostname"]
self.fail(
f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
"setting the `tls` parameter to true."
)
self.fail(f"SSL Exception: {error}")
class AnsibleDockerClient(AnsibleDockerClientBase):
def __init__(
self,
argument_spec: dict[str, t.Any] | None = None,
supports_check_mode: bool = False,
mutually_exclusive: Sequence[Sequence[str]] | None = None,
required_together: Sequence[Sequence[str]] | None = None,
required_if: (
Sequence[
tuple[str, t.Any, Sequence[str]]
| tuple[str, t.Any, Sequence[str], bool]
]
| None
) = None,
required_one_of: Sequence[Sequence[str]] | None = None,
required_by: dict[str, Sequence[str]] | None = None,
min_docker_version: str | None = None,
min_docker_api_version: str | None = None,
option_minimal_versions: dict[str, t.Any] | None = None,
option_minimal_versions_ignore_params: Sequence[str] | None = None,
fail_results: dict[str, t.Any] | None = None,
):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = {}
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params: list[Sequence[str]] = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params: list[Sequence[str]] = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if,
required_one_of=required_one_of,
required_by=required_by or {},
)
self.debug = self.module.params.get("debug")
self.check_mode = self.module.check_mode
super().__init__(
min_docker_version=min_docker_version,
min_docker_api_version=min_docker_api_version,
)
if option_minimal_versions is not None:
self._get_minimal_versions(
option_minimal_versions, option_minimal_versions_ignore_params
)
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
def deprecate(
self,
msg: str,
version: str | None = None,
date: str | None = None,
collection_name: str | None = None,
) -> None:
self.module.deprecate(
msg, version=version, date=date, collection_name=collection_name
)
def _get_params(self) -> dict[str, t.Any]:
return self.module.params
def _get_minimal_versions(
self,
option_minimal_versions: dict[str, t.Any],
ignore_params: Sequence[str] | None = None,
) -> None:
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
for option in self.module.argument_spec:
if ignore_params is not None and option in ignore_params:
continue
self.option_minimal_versions[option] = {}
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_py = True
support_docker_api = True
if "docker_py_version" in data:
support_docker_py = self.docker_py_version >= LooseVersion(
data["docker_py_version"]
)
if "docker_api_version" in data:
support_docker_api = self.docker_api_version >= LooseVersion(
data["docker_api_version"]
)
data["supported"] = support_docker_py and support_docker_api
# Fail if option is not supported but used
if not data["supported"]:
# Test whether option is specified
if "detect_usage" in data:
used = data["detect_usage"](self)
else:
used = self.module.params.get(option) is not None
if used and "default" in self.module.argument_spec[option]:
used = (
self.module.params[option]
!= self.module.argument_spec[option]["default"]
)
if used:
# If the option is used, compose error message.
if "usage_msg" in data:
usg = data["usage_msg"]
else:
usg = f"set {option} option"
if not support_docker_api:
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
elif not support_docker_py:
msg = (
f"Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
f" Minimum version required is {data['docker_py_version']} to {usg}. {DOCKERPYUPGRADE_UPGRADE_DOCKER}"
)
else:
# should not happen
msg = f"Cannot {usg} with your configuration."
self.fail(msg)
def report_warnings(
self, result: t.Any, warnings_key: Sequence[str] | None = None
) -> None:
"""
Checks result of client operation for warnings, and if present, outputs them.
warnings_key should be a list of keys used to crawl the result dictionary.
For example, if warnings_key == ['a', 'b'], the function will consider
result['a']['b'] if these keys exist. If the result is a non-empty string, it
will be reported as a warning. If the result is a list, every entry will be
reported as a warning.
In most cases (if warnings are returned at all), warnings_key should be
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
"""
if warnings_key is None:
warnings_key = ["Warnings"]
for key in warnings_key:
if not isinstance(result, Mapping):
return
result = result.get(key)
if isinstance(result, Sequence):
for warning in result:
self.module.warn(f"Docker warning: {warning}")
elif isinstance(result, str) and result:
self.module.warn(f"Docker warning: {result}")

View file

@ -0,0 +1,729 @@
# Copyright 2016 Red Hat | Ansible
# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import abc
import os
import re
import typing as t
from collections.abc import Mapping, Sequence
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
from ansible_collections.community.docker.plugins.module_utils._version import (
LooseVersion,
)
try:
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
RequestException,
SSLError,
)
except ImportError:
# Define an exception class RequestException so that our code does not break.
class RequestException(Exception): # type: ignore
pass
from ansible_collections.community.docker.plugins.module_utils._api import auth
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
APIClient as Client,
)
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
APIError,
MissingRequirementException,
NotFound,
TLSParameterError,
)
from ansible_collections.community.docker.plugins.module_utils._api.tls import TLSConfig
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
convert_filters,
parse_repository_tag,
)
from ansible_collections.community.docker.plugins.module_utils._util import (
DEFAULT_DOCKER_HOST,
DEFAULT_TIMEOUT_SECONDS,
DEFAULT_TLS,
DEFAULT_TLS_VERIFY,
DOCKER_COMMON_ARGS,
DOCKER_MUTUALLY_EXCLUSIVE,
DOCKER_REQUIRED_TOGETHER,
sanitize_result,
update_tls_hostname,
)
if t.TYPE_CHECKING:
from collections.abc import Callable
def _get_tls_config(
fail_function: Callable[[str], t.NoReturn], **kwargs: t.Any
) -> TLSConfig:
try:
return TLSConfig(**kwargs)
except TLSParameterError as exc:
fail_function(f"TLS config error: {exc}")
def is_using_tls(auth_data: dict[str, t.Any]) -> bool:
return auth_data["tls_verify"] or auth_data["tls"]
def get_connect_params(
auth_data: dict[str, t.Any], fail_function: Callable[[str], t.NoReturn]
) -> dict[str, t.Any]:
if is_using_tls(auth_data):
auth_data["docker_host"] = auth_data["docker_host"].replace(
"tcp://", "https://"
)
result = {
"base_url": auth_data["docker_host"],
"version": auth_data["api_version"],
"timeout": auth_data["timeout"],
}
if auth_data["tls_verify"]:
# TLS with verification
tls_config = {
"verify": True,
"assert_hostname": auth_data["tls_hostname"],
"fail_function": fail_function,
}
if auth_data["cert_path"] and auth_data["key_path"]:
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
if auth_data["cacert_path"]:
tls_config["ca_cert"] = auth_data["cacert_path"]
result["tls"] = _get_tls_config(**tls_config)
elif auth_data["tls"]:
# TLS without verification
tls_config = {
"verify": False,
"fail_function": fail_function,
}
if auth_data["cert_path"] and auth_data["key_path"]:
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
result["tls"] = _get_tls_config(**tls_config)
if auth_data.get("use_ssh_client"):
result["use_ssh_client"] = True
# No TLS
return result
class AnsibleDockerClientBase(Client):
def __init__(self, min_docker_api_version: str | None = None) -> None:
self._connect_params = get_connect_params(
self.auth_params, fail_function=self.fail
)
try:
super().__init__(**self._connect_params)
self.docker_api_version_str = self.api_version
except MissingRequirementException as exc:
self.fail(
missing_required_lib(exc.requirement), exception=exc.import_exception
)
except APIError as exc:
self.fail(f"Docker API error: {exc}")
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error connecting: {exc}")
self.docker_api_version = LooseVersion(self.docker_api_version_str)
min_docker_api_version = min_docker_api_version or "1.25"
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail(
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
)
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
pass
# if self.debug:
# from .util import log_debug
# log_debug(msg, pretty_print=pretty_print)
@abc.abstractmethod
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
pass
@abc.abstractmethod
def deprecate(
self,
msg: str,
version: str | None = None,
date: str | None = None,
collection_name: str | None = None,
) -> None:
pass
@staticmethod
def _get_value(
param_name: str,
param_value: t.Any,
env_variable: str | None,
default_value: t.Any | None,
value_type: t.Literal["str", "bool", "int"] = "str",
) -> t.Any:
if param_value is not None:
# take module parameter value
if value_type == "bool":
if param_value in BOOLEANS_TRUE:
return True
if param_value in BOOLEANS_FALSE:
return False
return bool(param_value)
if value_type == "int":
return int(param_value)
return param_value
if env_variable is not None:
env_value = os.environ.get(env_variable)
if env_value is not None:
# take the env variable value
if param_name == "cert_path":
return os.path.join(env_value, "cert.pem")
if param_name == "cacert_path":
return os.path.join(env_value, "ca.pem")
if param_name == "key_path":
return os.path.join(env_value, "key.pem")
if value_type == "bool":
if env_value in BOOLEANS_TRUE:
return True
if env_value in BOOLEANS_FALSE:
return False
return bool(env_value)
if value_type == "int":
return int(env_value)
return env_value
# take the default
return default_value
@abc.abstractmethod
def _get_params(self) -> dict[str, t.Any]:
pass
@property
def auth_params(self) -> dict[str, t.Any]:
# Get authentication credentials.
# Precedence: module parameters-> environment variables-> defaults.
self.log("Getting credentials")
client_params = self._get_params()
params = {}
for key in DOCKER_COMMON_ARGS:
params[key] = client_params.get(key)
result = {
"docker_host": self._get_value(
"docker_host",
params["docker_host"],
"DOCKER_HOST",
DEFAULT_DOCKER_HOST,
value_type="str",
),
"tls_hostname": self._get_value(
"tls_hostname",
params["tls_hostname"],
"DOCKER_TLS_HOSTNAME",
None,
value_type="str",
),
"api_version": self._get_value(
"api_version",
params["api_version"],
"DOCKER_API_VERSION",
"auto",
value_type="str",
),
"cacert_path": self._get_value(
"cacert_path",
params["ca_path"],
"DOCKER_CERT_PATH",
None,
value_type="str",
),
"cert_path": self._get_value(
"cert_path",
params["client_cert"],
"DOCKER_CERT_PATH",
None,
value_type="str",
),
"key_path": self._get_value(
"key_path",
params["client_key"],
"DOCKER_CERT_PATH",
None,
value_type="str",
),
"tls": self._get_value(
"tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool"
),
"tls_verify": self._get_value(
"validate_certs",
params["validate_certs"],
"DOCKER_TLS_VERIFY",
DEFAULT_TLS_VERIFY,
value_type="bool",
),
"timeout": self._get_value(
"timeout",
params["timeout"],
"DOCKER_TIMEOUT",
DEFAULT_TIMEOUT_SECONDS,
value_type="int",
),
"use_ssh_client": self._get_value(
"use_ssh_client",
params["use_ssh_client"],
None,
False,
value_type="bool",
),
}
def depr(*args: t.Any, **kwargs: t.Any) -> None:
self.deprecate(*args, **kwargs)
update_tls_hostname(
result,
old_behavior=True,
deprecate_function=depr,
uses_tls=is_using_tls(result),
)
return result
def _handle_ssl_error(self, error: Exception) -> t.NoReturn:
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
if match:
hostname = self.auth_params["tls_hostname"]
self.fail(
f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
"setting the `tls` parameter to true."
)
self.fail(f"SSL Exception: {error}")
def get_container_by_id(self, container_id: str) -> dict[str, t.Any] | None:
try:
self.log(f"Inspecting container Id {container_id}")
result = self.get_json("/containers/{0}/json", container_id)
self.log("Completed container inspection")
return result
except NotFound:
return None
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error inspecting container: {exc}")
def get_container(self, name: str | None) -> dict[str, t.Any] | None:
"""
Lookup a container and return the inspection results.
"""
if name is None:
return None
search_name = name
if not name.startswith("/"):
search_name = "/" + name
result = None
try:
params = {
"limit": -1,
"all": 1,
"size": 0,
"trunc_cmd": 0,
}
containers = self.get_json("/containers/json", params=params)
for container in containers:
self.log(f"testing container: {container['Names']}")
if (
isinstance(container["Names"], list)
and search_name in container["Names"]
):
result = container
break
if container["Id"].startswith(name):
result = container
break
if container["Id"] == name:
result = container
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error retrieving container list: {exc}")
if result is None:
return None
return self.get_container_by_id(result["Id"])
def get_network(
self, name: str | None = None, network_id: str | None = None
) -> dict[str, t.Any] | None:
"""
Lookup a network and return the inspection results.
"""
if name is None and network_id is None:
return None
result = None
if network_id is None:
try:
networks = self.get_json("/networks")
for network in networks:
self.log(f"testing network: {network['Name']}")
if name == network["Name"]:
result = network
break
if network["Id"].startswith(name):
result = network
break
except SSLError as exc:
self._handle_ssl_error(exc)
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error retrieving network list: {exc}")
if result is not None:
network_id = result["Id"]
if network_id is not None:
try:
self.log(f"Inspecting network Id {network_id}")
result = self.get_json("/networks/{0}", network_id)
self.log("Completed network inspection")
except NotFound:
return None
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error inspecting network: {exc}")
return result
def _image_lookup(self, name: str, tag: str | None) -> list[dict[str, t.Any]]:
"""
Including a tag in the name parameter sent to the Docker SDK for Python images method
does not work consistently. Instead, get the result set for name and manually check
if the tag exists.
"""
try:
params: dict[str, t.Any] = {
"only_ids": 0,
"all": 0,
}
if LooseVersion(self.api_version) < LooseVersion("1.25"):
# only use "filter" on API 1.24 and under, as it is deprecated
params["filter"] = name
else:
params["filters"] = convert_filters({"reference": name})
images = self.get_json("/images/json", params=params)
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error searching for image {name} - {exc}")
if tag:
lookup = f"{name}:{tag}"
lookup_digest = f"{name}@{tag}"
response = images
images = []
for image in response:
tags = image.get("RepoTags")
digests = image.get("RepoDigests")
if (tags and lookup in tags) or (digests and lookup_digest in digests):
images = [image]
break
return images
def find_image(self, name: str, tag: str | None) -> dict[str, t.Any] | None:
"""
Lookup an image (by name and tag) and return the inspection results.
"""
if not name:
return None
self.log(f"Find image {name}:{tag}")
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = auth.resolve_repository_name(name)
if registry == "docker.io":
# If docker.io is explicitly there in name, the image
# is not found in some cases (#41509)
self.log(f"Check for docker.io image: {repo_name}")
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith("library/"):
# Sometimes library/xxx images are not found
lookup = repo_name[len("library/") :]
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io was not there,
# it can be that the image was not found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = f"{registry}/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images and "/" not in repo_name:
# This seems to be happening with podman-docker
# (https://github.com/ansible-collections/community.docker/issues/291)
lookup = f"{registry}/library/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail(f"Daemon returned more than one result for {name}:{tag}")
if len(images) == 1:
try:
return self.get_json("/images/{0}/json", images[0]["Id"])
except NotFound:
self.log(f"Image {name}:{tag} not found.")
return None
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error inspecting image {name}:{tag} - {exc}")
self.log(f"Image {name}:{tag} not found.")
return None
def find_image_by_id(
self, image_id: str, accept_missing_image: bool = False
) -> dict[str, t.Any] | None:
"""
Lookup an image (by ID) and return the inspection results.
"""
if not image_id:
return None
self.log(f"Find image {image_id} (by ID)")
try:
return self.get_json("/images/{0}/json", image_id)
except NotFound as exc:
if not accept_missing_image:
self.fail(f"Error inspecting image ID {image_id} - {exc}")
self.log(f"Image {image_id} not found.")
return None
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error inspecting image ID {image_id} - {exc}")
@staticmethod
def _compare_images(
img1: dict[str, t.Any] | None, img2: dict[str, t.Any] | None
) -> bool:
if img1 is None or img2 is None:
return img1 == img2
filter_keys = {"Metadata"}
img1_filtered = {k: v for k, v in img1.items() if k not in filter_keys}
img2_filtered = {k: v for k, v in img2.items() if k not in filter_keys}
return img1_filtered == img2_filtered
def pull_image(
self, name: str, tag: str = "latest", image_platform: str | None = None
) -> tuple[dict[str, t.Any] | None, bool]:
"""
Pull an image
"""
self.log(f"Pulling image {name}:{tag}")
old_image = self.find_image(name, tag)
try:
repository, image_tag = parse_repository_tag(name)
registry, dummy_repo_name = auth.resolve_repository_name(repository)
params = {
"tag": tag or image_tag or "latest",
"fromImage": repository,
}
if image_platform is not None:
params["platform"] = image_platform
headers = {}
header = auth.get_config_header(self, registry)
if header:
headers["X-Registry-Auth"] = header
response = self._post(
self._url("/images/create"),
params=params,
headers=headers,
stream=True,
timeout=None,
)
self._raise_for_status(response)
for line in self._stream_helper(response, decode=True):
self.log(line, pretty_print=True)
if line.get("error"):
if line.get("errorDetail"):
error_detail = line.get("errorDetail")
self.fail(
f"Error pulling {name} - code: {error_detail.get('code')} message: {error_detail.get('message')}"
)
else:
self.fail(f"Error pulling {name} - {line.get('error')}")
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error pulling image {name}:{tag} - {exc}")
new_image = self.find_image(name, tag)
return new_image, self._compare_images(old_image, new_image)
class AnsibleDockerClient(AnsibleDockerClientBase):
def __init__(
self,
argument_spec: dict[str, t.Any] | None = None,
supports_check_mode: bool = False,
mutually_exclusive: Sequence[Sequence[str]] | None = None,
required_together: Sequence[Sequence[str]] | None = None,
required_if: (
Sequence[
tuple[str, t.Any, Sequence[str]]
| tuple[str, t.Any, Sequence[str], bool]
]
| None
) = None,
required_one_of: Sequence[Sequence[str]] | None = None,
required_by: dict[str, Sequence[str]] | None = None,
min_docker_api_version: str | None = None,
option_minimal_versions: dict[str, t.Any] | None = None,
option_minimal_versions_ignore_params: Sequence[str] | None = None,
fail_results: dict[str, t.Any] | None = None,
):
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = {}
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params: list[Sequence[str]] = []
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params: list[Sequence[str]] = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if,
required_one_of=required_one_of,
required_by=required_by or {},
)
self.debug = self.module.params.get("debug")
self.check_mode = self.module.check_mode
super().__init__(min_docker_api_version=min_docker_api_version)
if option_minimal_versions is not None:
self._get_minimal_versions(
option_minimal_versions, option_minimal_versions_ignore_params
)
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
def deprecate(
self,
msg: str,
version: str | None = None,
date: str | None = None,
collection_name: str | None = None,
) -> None:
self.module.deprecate(
msg, version=version, date=date, collection_name=collection_name
)
def _get_params(self) -> dict[str, t.Any]:
return self.module.params
def _get_minimal_versions(
self,
option_minimal_versions: dict[str, t.Any],
ignore_params: Sequence[str] | None = None,
) -> None:
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
for option in self.module.argument_spec:
if ignore_params is not None and option in ignore_params:
continue
self.option_minimal_versions[option] = {}
self.option_minimal_versions.update(option_minimal_versions)
for option, data in self.option_minimal_versions.items():
# Test whether option is supported, and store result
support_docker_api = True
if "docker_api_version" in data:
support_docker_api = self.docker_api_version >= LooseVersion(
data["docker_api_version"]
)
data["supported"] = support_docker_api
# Fail if option is not supported but used
if not data["supported"]:
# Test whether option is specified
if "detect_usage" in data:
used = data["detect_usage"](self)
else:
used = self.module.params.get(option) is not None
if used and "default" in self.module.argument_spec[option]:
used = (
self.module.params[option]
!= self.module.argument_spec[option]["default"]
)
if used:
# If the option is used, compose error message.
if "usage_msg" in data:
usg = data["usage_msg"]
else:
usg = f"set {option} option"
if not support_docker_api:
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
else:
# should not happen
msg = f"Cannot {usg} with your configuration."
self.fail(msg)
def report_warnings(
self, result: t.Any, warnings_key: Sequence[str] | None = None
) -> None:
"""
Checks result of client operation for warnings, and if present, outputs them.
warnings_key should be a list of keys used to crawl the result dictionary.
For example, if warnings_key == ['a', 'b'], the function will consider
result['a']['b'] if these keys exist. If the result is a non-empty string, it
will be reported as a warning. If the result is a list, every entry will be
reported as a warning.
In most cases (if warnings are returned at all), warnings_key should be
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
"""
if warnings_key is None:
warnings_key = ["Warnings"]
for key in warnings_key:
if not isinstance(result, Mapping):
return
result = result.get(key)
if isinstance(result, Sequence):
for warning in result:
self.module.warn(f"Docker warning: {warning}")
elif isinstance(result, str) and result:
self.module.warn(f"Docker warning: {result}")

View file

@ -0,0 +1,489 @@
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import abc
import json
import shlex
import typing as t
from ansible.module_utils.basic import AnsibleModule, env_fallback
from ansible.module_utils.common.process import get_bin_path
from ansible.module_utils.common.text.converters import to_text
from ansible_collections.community.docker.plugins.module_utils._api.auth import (
resolve_repository_name,
)
from ansible_collections.community.docker.plugins.module_utils._util import (
DEFAULT_DOCKER_HOST,
DEFAULT_TLS,
DEFAULT_TLS_VERIFY,
DOCKER_MUTUALLY_EXCLUSIVE,
DOCKER_REQUIRED_TOGETHER,
sanitize_result,
)
from ansible_collections.community.docker.plugins.module_utils._version import (
LooseVersion,
)
if t.TYPE_CHECKING:
from collections.abc import Mapping, Sequence
DOCKER_COMMON_ARGS = {
"docker_cli": {"type": "path"},
"docker_host": {
"type": "str",
"fallback": (env_fallback, ["DOCKER_HOST"]),
"aliases": ["docker_url"],
},
"tls_hostname": {
"type": "str",
"fallback": (env_fallback, ["DOCKER_TLS_HOSTNAME"]),
},
"api_version": {
"type": "str",
"default": "auto",
"fallback": (env_fallback, ["DOCKER_API_VERSION"]),
"aliases": ["docker_api_version"],
},
"ca_path": {"type": "path", "aliases": ["ca_cert", "tls_ca_cert", "cacert_path"]},
"client_cert": {"type": "path", "aliases": ["tls_client_cert", "cert_path"]},
"client_key": {"type": "path", "aliases": ["tls_client_key", "key_path"]},
"tls": {
"type": "bool",
"default": DEFAULT_TLS,
"fallback": (env_fallback, ["DOCKER_TLS"]),
},
"validate_certs": {
"type": "bool",
"default": DEFAULT_TLS_VERIFY,
"fallback": (env_fallback, ["DOCKER_TLS_VERIFY"]),
"aliases": ["tls_verify"],
},
# "debug": {"type": "bool", "default: False},
"cli_context": {"type": "str"},
}
class DockerException(Exception):
pass
class AnsibleDockerClientBase:
docker_api_version_str: str | None
docker_api_version: LooseVersion | None
def __init__(
self,
common_args: dict[str, t.Any],
min_docker_api_version: str | None = None,
needs_api_version: bool = True,
) -> None:
self._environment: dict[str, str] = {}
if common_args["tls_hostname"]:
self._environment["DOCKER_TLS_HOSTNAME"] = common_args["tls_hostname"]
if common_args["api_version"] and common_args["api_version"] != "auto":
self._environment["DOCKER_API_VERSION"] = common_args["api_version"]
cli = common_args.get("docker_cli")
if cli is None:
try:
cli = get_bin_path("docker")
except ValueError:
self.fail(
"Cannot find docker CLI in path. Please provide it explicitly with the docker_cli parameter"
)
self._cli = cli
self._cli_base = [self._cli]
docker_host = common_args["docker_host"]
if not docker_host and not common_args["cli_context"]:
docker_host = DEFAULT_DOCKER_HOST
if docker_host:
self._cli_base.extend(["--host", docker_host])
if common_args["validate_certs"]:
self._cli_base.append("--tlsverify")
elif common_args["tls"]:
self._cli_base.append("--tls")
if common_args["ca_path"]:
self._cli_base.extend(["--tlscacert", common_args["ca_path"]])
if common_args["client_cert"]:
self._cli_base.extend(["--tlscert", common_args["client_cert"]])
if common_args["client_key"]:
self._cli_base.extend(["--tlskey", common_args["client_key"]])
if common_args["cli_context"]:
self._cli_base.extend(["--context", common_args["cli_context"]])
# `--format json` was only added as a shorthand for `--format {{ json . }}` in Docker 23.0
dummy, self._version, dummy2 = self.call_cli_json(
"version", "--format", "{{ json . }}", check_rc=True
)
self._info: dict[str, t.Any] | None = None
if needs_api_version:
api_version_string = self._version["Server"].get(
"ApiVersion"
) or self._version["Server"].get("APIVersion")
if not isinstance(self._version.get("Server"), dict) or not isinstance(
api_version_string, str
):
self.fail(
"Cannot determine Docker Daemon information. Are you maybe using podman instead of docker?"
)
self.docker_api_version_str = to_text(api_version_string)
self.docker_api_version = LooseVersion(self.docker_api_version_str)
min_docker_api_version = min_docker_api_version or "1.25"
if self.docker_api_version < LooseVersion(min_docker_api_version):
self.fail(
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
)
else:
self.docker_api_version_str = None
self.docker_api_version = None
if min_docker_api_version is not None:
self.fail(
"Internal error: cannot have needs_api_version=False with min_docker_api_version not None"
)
def log(self, msg: str, pretty_print: bool = False) -> None:
pass
# if self.debug:
# from .util import log_debug
# log_debug(msg, pretty_print=pretty_print)
def get_cli(self) -> str:
return self._cli
def get_version_info(self) -> str:
return self._version
def _compose_cmd(self, args: t.Sequence[str]) -> list[str]:
return self._cli_base + list(args)
def _compose_cmd_str(self, args: t.Sequence[str]) -> str:
return " ".join(shlex.quote(a) for a in self._compose_cmd(args))
@abc.abstractmethod
def call_cli(
self,
*args: str,
check_rc: bool = False,
data: bytes | None = None,
cwd: str | None = None,
environ_update: dict[str, str] | None = None,
) -> tuple[int, bytes, bytes]:
pass
def call_cli_json(
self,
*args: str,
check_rc: bool = False,
data: bytes | None = None,
cwd: str | None = None,
environ_update: dict[str, str] | None = None,
warn_on_stderr: bool = False,
) -> tuple[int, t.Any, bytes]:
rc, stdout, stderr = self.call_cli(
*args, check_rc=check_rc, data=data, cwd=cwd, environ_update=environ_update
)
if warn_on_stderr and stderr:
self.warn(to_text(stderr))
try:
data = json.loads(stdout)
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
cmd=self._compose_cmd_str(args),
rc=rc,
stdout=stdout,
stderr=stderr,
)
return rc, data, stderr
def call_cli_json_stream(
self,
*args: str,
check_rc: bool = False,
data: bytes | None = None,
cwd: str | None = None,
environ_update: dict[str, str] | None = None,
warn_on_stderr: bool = False,
) -> tuple[int, list[t.Any], bytes]:
rc, stdout, stderr = self.call_cli(
*args, check_rc=check_rc, data=data, cwd=cwd, environ_update=environ_update
)
if warn_on_stderr and stderr:
self.warn(to_text(stderr))
result = []
try:
for line in stdout.splitlines():
line = line.strip()
if line.startswith(b"{"):
result.append(json.loads(line))
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
cmd=self._compose_cmd_str(args),
rc=rc,
stdout=stdout,
stderr=stderr,
)
return rc, result, stderr
@abc.abstractmethod
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
pass
@abc.abstractmethod
def warn(self, msg: str) -> None:
pass
@abc.abstractmethod
def deprecate(
self,
msg: str,
version: str | None = None,
date: str | None = None,
collection_name: str | None = None,
) -> None:
pass
def get_cli_info(self) -> dict[str, t.Any]:
if self._info is None:
dummy, self._info, dummy2 = self.call_cli_json(
"info", "--format", "{{ json . }}", check_rc=True
)
return self._info
def get_client_plugin_info(self, component: str) -> dict[str, t.Any] | None:
cli_info = self.get_cli_info()
if not isinstance(cli_info.get("ClientInfo"), dict):
self.fail(
"Cannot determine Docker client information. Are you maybe using podman instead of docker?"
)
for plugin in cli_info["ClientInfo"].get("Plugins") or []:
if plugin.get("Name") == component:
return plugin
return None
def _image_lookup(self, name: str, tag: str) -> list[dict[str, t.Any]]:
"""
Including a tag in the name parameter sent to the Docker SDK for Python images method
does not work consistently. Instead, get the result set for name and manually check
if the tag exists.
"""
dummy, images, dummy2 = self.call_cli_json_stream(
"image",
"ls",
"--format",
"{{ json . }}",
"--no-trunc",
"--filter",
f"reference={name}",
check_rc=True,
)
if tag:
response = images
images = []
for image in response:
if image.get("Tag") == tag or image.get("Digest") == tag:
images = [image]
break
return images
@t.overload
def find_image(self, name: None, tag: str) -> None: ...
@t.overload
def find_image(self, name: str, tag: str) -> dict[str, t.Any] | None: ...
def find_image(self, name: str | None, tag: str) -> dict[str, t.Any] | None:
"""
Lookup an image (by name and tag) and return the inspection results.
"""
if not name:
return None
self.log(f"Find image {name}:{tag}")
images = self._image_lookup(name, tag)
if not images:
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
registry, repo_name = resolve_repository_name(name)
if registry == "docker.io":
# If docker.io is explicitly there in name, the image
# is not found in some cases (#41509)
self.log(f"Check for docker.io image: {repo_name}")
images = self._image_lookup(repo_name, tag)
if not images and repo_name.startswith("library/"):
# Sometimes library/xxx images are not found
lookup = repo_name[len("library/") :]
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images:
# Last case for some Docker versions: if docker.io was not there,
# it can be that the image was not found either
# (https://github.com/ansible/ansible/pull/15586)
lookup = f"{registry}/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if not images and "/" not in repo_name:
# This seems to be happening with podman-docker
# (https://github.com/ansible-collections/community.docker/issues/291)
lookup = f"{registry}/library/{repo_name}"
self.log(f"Check for docker.io image: {lookup}")
images = self._image_lookup(lookup, tag)
if len(images) > 1:
self.fail(f"Daemon returned more than one result for {name}:{tag}")
if len(images) == 1:
rc, image, stderr = self.call_cli_json("image", "inspect", images[0]["ID"])
if not image:
self.log(f"Image {name}:{tag} not found.")
return None
if rc != 0:
self.fail(f"Error inspecting image {name}:{tag} - {to_text(stderr)}")
return image[0]
self.log(f"Image {name}:{tag} not found.")
return None
@t.overload
def find_image_by_id(
self, image_id: None, accept_missing_image: bool = False
) -> None: ...
@t.overload
def find_image_by_id(
self, image_id: str | None, accept_missing_image: bool = False
) -> dict[str, t.Any] | None: ...
def find_image_by_id(
self, image_id: str | None, accept_missing_image: bool = False
) -> dict[str, t.Any] | None:
"""
Lookup an image (by ID) and return the inspection results.
"""
if not image_id:
return None
self.log(f"Find image {image_id} (by ID)")
rc, image, stderr = self.call_cli_json("image", "inspect", image_id)
if not image:
if not accept_missing_image:
self.fail(f"Error inspecting image ID {image_id} - {to_text(stderr)}")
self.log(f"Image {image_id} not found.")
return None
if rc != 0:
self.fail(f"Error inspecting image ID {image_id} - {to_text(stderr)}")
return image[0]
class AnsibleModuleDockerClient(AnsibleDockerClientBase):
def __init__(
self,
argument_spec: dict[str, t.Any] | None = None,
supports_check_mode: bool = False,
mutually_exclusive: Sequence[Sequence[str]] | None = None,
required_together: Sequence[Sequence[str]] | None = None,
required_if: (
Sequence[
tuple[str, t.Any, Sequence[str]]
| tuple[str, t.Any, Sequence[str], bool]
]
| None
) = None,
required_one_of: Sequence[Sequence[str]] | None = None,
required_by: Mapping[str, Sequence[str]] | None = None,
min_docker_api_version: str | None = None,
fail_results: dict[str, t.Any] | None = None,
needs_api_version: bool = True,
) -> None:
# Modules can put information in here which will always be returned
# in case client.fail() is called.
self.fail_results = fail_results or {}
merged_arg_spec = {}
merged_arg_spec.update(DOCKER_COMMON_ARGS)
if argument_spec:
merged_arg_spec.update(argument_spec)
self.arg_spec = merged_arg_spec
mutually_exclusive_params: list[Sequence[str]] = [
("docker_host", "cli_context")
]
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
if mutually_exclusive:
mutually_exclusive_params += mutually_exclusive
required_together_params: list[Sequence[str]] = []
required_together_params += DOCKER_REQUIRED_TOGETHER
if required_together:
required_together_params += required_together
self.module = AnsibleModule(
argument_spec=merged_arg_spec,
supports_check_mode=supports_check_mode,
mutually_exclusive=mutually_exclusive_params,
required_together=required_together_params,
required_if=required_if,
required_one_of=required_one_of,
required_by=required_by or {},
)
self.debug = False # self.module.params['debug']
self.check_mode = self.module.check_mode
self.diff = self.module._diff
common_args = dict((k, self.module.params[k]) for k in DOCKER_COMMON_ARGS)
super().__init__(
common_args,
min_docker_api_version=min_docker_api_version,
needs_api_version=needs_api_version,
)
def call_cli(
self,
*args: str,
check_rc: bool = False,
data: bytes | None = None,
cwd: str | None = None,
environ_update: dict[str, str] | None = None,
) -> tuple[int, bytes, bytes]:
environment = self._environment.copy()
if environ_update:
environment.update(environ_update)
rc, stdout, stderr = self.module.run_command(
self._compose_cmd(args),
binary_data=True,
check_rc=check_rc,
cwd=cwd,
data=data,
encoding=None,
environ_update=environment,
expand_user_and_vars=False,
ignore_invalid_cwd=False,
)
return rc, stdout, stderr
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
self.fail_results.update(kwargs)
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
def warn(self, msg: str) -> None:
self.module.warn(msg)
def deprecate(
self,
msg: str,
version: str | None = None,
date: str | None = None,
collection_name: str | None = None,
) -> None:
self.module.deprecate(
msg, version=version, date=date, collection_name=collection_name
)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,590 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import base64
import datetime
import io
import json
import os
import os.path
import shutil
import stat
import tarfile
import typing as t
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
APIError,
NotFound,
)
if t.TYPE_CHECKING:
from collections.abc import Callable
from _typeshed import WriteableBuffer
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
APIClient,
)
class DockerFileCopyError(Exception):
pass
class DockerUnexpectedError(DockerFileCopyError):
pass
class DockerFileNotFound(DockerFileCopyError):
pass
def _put_archive(
client: APIClient, container: str, path: str, data: bytes | t.Generator[bytes]
) -> bool:
# data can also be file object for streaming. This is because _put uses requests's put().
# See https://requests.readthedocs.io/en/latest/user/advanced/#streaming-uploads
url = client._url("/containers/{0}/archive", container)
res = client._put(url, params={"path": path}, data=data)
client._raise_for_status(res)
return res.status_code == 200
def _symlink_tar_creator(
b_in_path: bytes,
file_stat: os.stat_result,
out_file: str | bytes,
user_id: int,
group_id: int,
mode: int | None = None,
user_name: str | None = None,
) -> bytes:
if not stat.S_ISLNK(file_stat.st_mode):
raise DockerUnexpectedError("stat information is not for a symlink")
bio = io.BytesIO()
with tarfile.open(
fileobj=bio, mode="w|", dereference=False, encoding="utf-8"
) as tar:
# Note that without both name (bytes) and arcname (unicode), this either fails for
# Python 2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
# form) it works with Python 2.7, 3.5, 3.6, and 3.7 up to 3.11
tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
tarinfo.uid = user_id
tarinfo.uname = ""
if user_name:
tarinfo.uname = user_name
tarinfo.gid = group_id
tarinfo.gname = ""
tarinfo.mode &= 0o700
if mode is not None:
tarinfo.mode = mode
if not tarinfo.issym():
raise DockerUnexpectedError("stat information is not for a symlink")
tar.addfile(tarinfo)
return bio.getvalue()
def _symlink_tar_generator(
b_in_path: bytes,
file_stat: os.stat_result,
out_file: str | bytes,
user_id: int,
group_id: int,
mode: int | None = None,
user_name: str | None = None,
) -> t.Generator[bytes]:
yield _symlink_tar_creator(
b_in_path, file_stat, out_file, user_id, group_id, mode, user_name
)
def _regular_file_tar_generator(
b_in_path: bytes,
file_stat: os.stat_result,
out_file: str | bytes,
user_id: int,
group_id: int,
mode: int | None = None,
user_name: str | None = None,
) -> t.Generator[bytes]:
if not stat.S_ISREG(file_stat.st_mode):
raise DockerUnexpectedError("stat information is not for a regular file")
tarinfo = tarfile.TarInfo()
tarinfo.name = (
os.path.splitdrive(to_text(out_file))[1].replace(os.sep, "/").lstrip("/")
)
tarinfo.mode = (file_stat.st_mode & 0o700) if mode is None else mode
tarinfo.uid = user_id
tarinfo.gid = group_id
tarinfo.size = file_stat.st_size
tarinfo.mtime = file_stat.st_mtime
tarinfo.type = tarfile.REGTYPE
tarinfo.linkname = ""
if user_name:
tarinfo.uname = user_name
tarinfo_buf = tarinfo.tobuf()
total_size = len(tarinfo_buf)
yield tarinfo_buf
size = tarinfo.size
total_size += size
with open(b_in_path, "rb") as f:
while size > 0:
to_read = min(size, 65536)
buf = f.read(to_read)
if not buf:
break
size -= len(buf)
yield buf
if size:
# If for some reason the file shrunk, fill up to the announced size with zeros.
# (If it enlarged, ignore the remainder.)
yield tarfile.NUL * size
remainder = tarinfo.size % tarfile.BLOCKSIZE
if remainder:
# We need to write a multiple of 512 bytes. Fill up with zeros.
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
total_size += tarfile.BLOCKSIZE - remainder
# End with two zeroed blocks
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
total_size += 2 * tarfile.BLOCKSIZE
remainder = total_size % tarfile.RECORDSIZE
if remainder > 0:
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
def _regular_content_tar_generator(
content: bytes,
out_file: str | bytes,
user_id: int,
group_id: int,
mode: int,
user_name: str | None = None,
) -> t.Generator[bytes]:
tarinfo = tarfile.TarInfo()
tarinfo.name = (
os.path.splitdrive(to_text(out_file))[1].replace(os.sep, "/").lstrip("/")
)
tarinfo.mode = mode
tarinfo.uid = user_id
tarinfo.gid = group_id
tarinfo.size = len(content)
tarinfo.mtime = int(datetime.datetime.now().timestamp())
tarinfo.type = tarfile.REGTYPE
tarinfo.linkname = ""
if user_name:
tarinfo.uname = user_name
tarinfo_buf = tarinfo.tobuf()
total_size = len(tarinfo_buf)
yield tarinfo_buf
total_size += len(content)
yield content
remainder = tarinfo.size % tarfile.BLOCKSIZE
if remainder:
# We need to write a multiple of 512 bytes. Fill up with zeros.
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
total_size += tarfile.BLOCKSIZE - remainder
# End with two zeroed blocks
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
total_size += 2 * tarfile.BLOCKSIZE
remainder = total_size % tarfile.RECORDSIZE
if remainder > 0:
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
def put_file(
client: APIClient,
container: str,
in_path: str,
out_path: str,
user_id: int,
group_id: int,
mode: int | None = None,
user_name: str | None = None,
follow_links: bool = False,
) -> None:
"""Transfer a file from local to Docker container."""
if not os.path.exists(to_bytes(in_path, errors="surrogate_or_strict")):
raise DockerFileNotFound(f"file or module does not exist: {to_text(in_path)}")
b_in_path = to_bytes(in_path, errors="surrogate_or_strict")
out_dir, out_file = os.path.split(out_path)
if follow_links:
file_stat = os.stat(b_in_path)
else:
file_stat = os.lstat(b_in_path)
if stat.S_ISREG(file_stat.st_mode):
stream = _regular_file_tar_generator(
b_in_path,
file_stat,
out_file,
user_id,
group_id,
mode=mode,
user_name=user_name,
)
elif stat.S_ISLNK(file_stat.st_mode):
stream = _symlink_tar_generator(
b_in_path,
file_stat,
out_file,
user_id,
group_id,
mode=mode,
user_name=user_name,
)
else:
file_part = " referenced by" if follow_links else ""
raise DockerFileCopyError(
f"File{file_part} {in_path} is neither a regular file nor a symlink (stat mode {oct(file_stat.st_mode)})."
)
ok = _put_archive(client, container, out_dir, stream)
if not ok:
raise DockerUnexpectedError(
f'Unknown error while creating file "{out_path}" in container "{container}".'
)
def put_file_content(
client: APIClient,
container: str,
content: bytes,
out_path: str,
user_id: int,
group_id: int,
mode: int,
user_name: str | None = None,
) -> None:
"""Transfer a file from local to Docker container."""
out_dir, out_file = os.path.split(out_path)
stream = _regular_content_tar_generator(
content, out_file, user_id, group_id, mode, user_name=user_name
)
ok = _put_archive(client, container, out_dir, stream)
if not ok:
raise DockerUnexpectedError(
f'Unknown error while creating file "{out_path}" in container "{container}".'
)
def stat_file(
client: APIClient,
container: str,
in_path: str,
follow_links: bool = False,
log: Callable[[str], None] | None = None,
) -> tuple[str, dict[str, t.Any] | None, str | None]:
"""Fetch information on a file from a Docker container to local.
Return a tuple ``(path, stat_data, link_target)`` where:
:path: is the resolved path in case ``follow_links=True``;
:stat_data: is ``None`` if the file does not exist, or a dictionary with fields
``name`` (string), ``size`` (integer), ``mode`` (integer, see https://pkg.go.dev/io/fs#FileMode),
``mtime`` (string), and ``linkTarget`` (string);
:link_target: is ``None`` if the file is not a symlink or when ``follow_links=False``,
and a string with the symlink target otherwise.
"""
considered_in_paths = set()
while True:
if in_path in considered_in_paths:
raise DockerFileCopyError(
f"Found infinite symbolic link loop when trying to stating {in_path!r}"
)
considered_in_paths.add(in_path)
if log:
log(f"FETCH: Stating {in_path!r}")
response = client._head(
client._url("/containers/{0}/archive", container),
params={"path": in_path},
)
if response.status_code == 404:
return in_path, None, None
client._raise_for_status(response)
header = response.headers.get("x-docker-container-path-stat")
try:
if header is None:
raise ValueError("x-docker-container-path-stat header not present")
stat_data = json.loads(base64.b64decode(header))
except Exception as exc:
raise DockerUnexpectedError(
f"When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}"
) from exc
# https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink
if stat_data["mode"] & (1 << (32 - 5)) != 0:
link_target = stat_data["linkTarget"]
if not follow_links:
return in_path, stat_data, link_target
in_path = os.path.join(os.path.split(in_path)[0], link_target)
continue
return in_path, stat_data, None
class _RawGeneratorFileobj(io.RawIOBase):
def __init__(self, stream: t.Generator[bytes]):
self._stream = stream
self._buf = b""
def readable(self) -> bool:
return True
def _readinto_from_buf(self, b: WriteableBuffer, index: int, length: int) -> int:
cpy = min(length - index, len(self._buf))
if cpy:
b[index : index + cpy] = self._buf[:cpy] # type: ignore # TODO!
self._buf = self._buf[cpy:]
index += cpy
return index
def readinto(self, b: WriteableBuffer) -> int:
index = 0
length = len(b) # type: ignore # TODO!
index = self._readinto_from_buf(b, index, length)
if index == length:
return index
try:
self._buf += next(self._stream)
except StopIteration:
return index
return self._readinto_from_buf(b, index, length)
def _stream_generator_to_fileobj(stream: t.Generator[bytes]) -> io.BufferedReader:
"""Given a generator that generates chunks of bytes, create a readable buffered stream."""
raw = _RawGeneratorFileobj(stream)
return io.BufferedReader(raw)
_T = t.TypeVar("_T")
def fetch_file_ex(
client: APIClient,
container: str,
in_path: str,
process_none: Callable[[str], _T],
process_regular: Callable[[str, tarfile.TarFile, tarfile.TarInfo], _T],
process_symlink: Callable[[str, tarfile.TarInfo], _T],
process_other: Callable[[str, tarfile.TarInfo], _T],
follow_links: bool = False,
log: Callable[[str], None] | None = None,
) -> _T:
"""Fetch a file (as a tar file entry) from a Docker container to local."""
considered_in_paths: set[str] = set()
while True:
if in_path in considered_in_paths:
raise DockerFileCopyError(
f'Found infinite symbolic link loop when trying to fetch "{in_path}"'
)
considered_in_paths.add(in_path)
if log:
log(f'FETCH: Fetching "{in_path}"')
try:
stream = client.get_raw_stream(
"/containers/{0}/archive",
container,
params={"path": in_path},
headers={"Accept-Encoding": "identity"},
)
except NotFound:
return process_none(in_path)
with tarfile.open(
fileobj=_stream_generator_to_fileobj(stream), mode="r|"
) as tar:
symlink_member: tarfile.TarInfo | None = None
result: _T | None = None
found = False
for member in tar:
if found:
raise DockerUnexpectedError(
"Received tarfile contains more than one file!"
)
found = True
if member.issym():
symlink_member = member
continue
if member.isfile():
result = process_regular(in_path, tar, member)
continue
result = process_other(in_path, member)
if symlink_member:
if not follow_links:
return process_symlink(in_path, symlink_member)
in_path = os.path.join(
os.path.split(in_path)[0], symlink_member.linkname
)
if log:
log(f'FETCH: Following symbolic link to "{in_path}"')
continue
if found:
return result # type: ignore
raise DockerUnexpectedError("Received tarfile is empty!")
def fetch_file(
client: APIClient,
container: str,
in_path: str,
out_path: str,
follow_links: bool = False,
log: Callable[[str], None] | None = None,
) -> str:
b_out_path = to_bytes(out_path, errors="surrogate_or_strict")
def process_none(in_path: str) -> str:
raise DockerFileNotFound(
f"File {in_path} does not exist in container {container}"
)
def process_regular(
in_path: str, tar: tarfile.TarFile, member: tarfile.TarInfo
) -> str:
if not follow_links and os.path.exists(b_out_path):
os.unlink(b_out_path)
reader = tar.extractfile(member)
if reader:
with reader as in_f, open(b_out_path, "wb") as out_f:
shutil.copyfileobj(in_f, out_f)
return in_path
def process_symlink(in_path: str, member: tarfile.TarInfo) -> str:
if os.path.exists(b_out_path):
os.unlink(b_out_path)
os.symlink(member.linkname, b_out_path)
return in_path
def process_other(in_path: str, member: tarfile.TarInfo) -> str:
raise DockerFileCopyError(
f'Remote file "{in_path}" is not a regular file or a symbolic link'
)
return fetch_file_ex(
client,
container,
in_path,
process_none,
process_regular,
process_symlink,
process_other,
follow_links=follow_links,
log=log,
)
def _execute_command(
client: APIClient,
container: str,
command: list[str],
log: Callable[[str], None] | None = None,
check_rc: bool = False,
) -> tuple[int, bytes, bytes]:
if log:
log(f"Executing {command} in {container}")
data = {
"Container": container,
"User": "",
"Privileged": False,
"Tty": False,
"AttachStdin": False,
"AttachStdout": True,
"AttachStderr": True,
"Cmd": command,
}
if "detachKeys" in client._general_configs:
data["detachKeys"] = client._general_configs["detachKeys"]
try:
exec_data = client.post_json_to_json(
"/containers/{0}/exec", container, data=data
)
except NotFound as e:
raise DockerFileCopyError(f'Could not find container "{container}"') from e
except APIError as e:
if e.response is not None and e.response.status_code == 409:
raise DockerFileCopyError(
f'Cannot execute command in paused container "{container}"'
) from e
raise
exec_id = exec_data["Id"]
data = {"Tty": False, "Detach": False}
stdout, stderr = client.post_json_to_stream(
"/exec/{0}/start", exec_id, stream=False, demux=True, tty=False
)
result = client.get_json("/exec/{0}/json", exec_id)
rc: int = result.get("ExitCode") or 0
stdout = stdout or b""
stderr = stderr or b""
if log:
log(f"Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}")
if check_rc and rc != 0:
command_str = " ".join(command)
raise DockerUnexpectedError(
f'Obtained unexpected exit code {rc} when running "{command_str}" in {container}.\nSTDOUT: {stdout!r}\nSTDERR: {stderr!r}'
)
return rc, stdout, stderr
def determine_user_group(
client: APIClient, container: str, log: Callable[[str], None] | None = None
) -> tuple[int, int]:
dummy_rc, stdout, dummy_stderr = _execute_command(
client, container, ["/bin/sh", "-c", "id -u && id -g"], check_rc=True, log=log
)
stdout_lines = stdout.splitlines()
if len(stdout_lines) != 2:
raise DockerUnexpectedError(
f"Expected two-line output to obtain user and group ID for container {container}, but got {len(stdout_lines)} lines:\n{stdout!r}"
)
user_id, group_id = stdout_lines
try:
return int(user_id), int(group_id)
except ValueError as exc:
raise DockerUnexpectedError(
f"Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got {user_id!r} and {group_id!r} instead"
) from exc

View file

@ -0,0 +1,166 @@
# Copyright 2022 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import os
import tarfile
class ImageArchiveManifestSummary:
"""
Represents data extracted from a manifest.json found in the tar archive output of the
"docker image save some:tag > some.tar" command.
"""
def __init__(self, image_id: str, repo_tags: list[str]) -> None:
"""
:param image_id: File name portion of Config entry, e.g. abcde12345 from abcde12345.json
:param repo_tags Docker image names, e.g. ["hello-world:latest"]
"""
self.image_id = image_id
self.repo_tags = repo_tags
class ImageArchiveInvalidException(Exception):
pass
def api_image_id(archive_image_id: str) -> str:
"""
Accepts an image hash in the format stored in manifest.json, and returns an equivalent identifier
that represents the same image hash, but in the format presented by the Docker Engine API.
:param archive_image_id: plain image hash
:returns: Prefixed hash used by REST api
"""
return f"sha256:{archive_image_id}"
def load_archived_image_manifest(
archive_path: str,
) -> list[ImageArchiveManifestSummary] | None:
"""
Attempts to get image IDs and image names from metadata stored in the image
archive tar file.
The tar should contain a file "manifest.json" with an array with one or more entries,
and every entry should have a Config field with the image ID in its file name, as
well as a RepoTags list, which typically has only one entry.
:raises:
ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
:param archive_path: Tar file to read
:return: None, if no file at archive_path, or a list of ImageArchiveManifestSummary objects.
"""
try:
# FileNotFoundError does not exist in Python 2
if not os.path.isfile(archive_path):
return None
with tarfile.open(archive_path, "r") as tf:
try:
try:
reader = tf.extractfile("manifest.json")
if reader is None:
raise ImageArchiveInvalidException(
"Failed to read manifest.json"
)
with reader as ef:
manifest = json.load(ef)
except ImageArchiveInvalidException:
raise
except Exception as exc:
raise ImageArchiveInvalidException(
f"Failed to decode and deserialize manifest.json: {exc}"
) from exc
if len(manifest) == 0:
raise ImageArchiveInvalidException(
"Expected to have at least one entry in manifest.json but found none"
)
result = []
for index, meta in enumerate(manifest):
try:
config_file = meta["Config"]
except KeyError as exc:
raise ImageArchiveInvalidException(
f"Failed to get Config entry from {index + 1}th manifest in manifest.json: {exc}"
) from exc
# Extracts hash without 'sha256:' prefix
try:
# Strip off .json filename extension, leaving just the hash.
image_id = os.path.splitext(config_file)[0]
except Exception as exc:
raise ImageArchiveInvalidException(
f"Failed to extract image id from config file name {config_file}: {exc}"
) from exc
for prefix in ("blobs/sha256/",): # Moby 25.0.0, Docker API 1.44
if image_id.startswith(prefix):
image_id = image_id[len(prefix) :]
try:
repo_tags = meta["RepoTags"]
except KeyError as exc:
raise ImageArchiveInvalidException(
f"Failed to get RepoTags entry from {index + 1}th manifest in manifest.json: {exc}"
) from exc
result.append(
ImageArchiveManifestSummary(
image_id=image_id, repo_tags=repo_tags
)
)
return result
except ImageArchiveInvalidException:
raise
except Exception as exc:
raise ImageArchiveInvalidException(
f"Failed to extract manifest.json from tar file {archive_path}: {exc}"
) from exc
except ImageArchiveInvalidException:
raise
except Exception as exc:
raise ImageArchiveInvalidException(
f"Failed to open tar file {archive_path}: {exc}"
) from exc
def archived_image_manifest(archive_path: str) -> ImageArchiveManifestSummary | None:
"""
Attempts to get Image.Id and image name from metadata stored in the image
archive tar file.
The tar should contain a file "manifest.json" with an array with a single entry,
and the entry should have a Config field with the image ID in its file name, as
well as a RepoTags list, which typically has only one entry.
:raises:
ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
:param archive_path: Tar file to read
:return: None, if no file at archive_path, or the extracted image ID, which will not have a sha256: prefix.
"""
results = load_archived_image_manifest(archive_path)
if results is None:
return None
if len(results) == 1:
return results[0]
raise ImageArchiveInvalidException(
f"Expected to have one entry in manifest.json but found {len(results)}"
)

View file

@ -0,0 +1,116 @@
# Copyright (c) 2025 Felix Fontein
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import re
import typing as t
from dataclasses import dataclass
_PATH_RE = re.compile(
r"^[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*(\/[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*)*$"
)
_TAG_RE = re.compile(r"^[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127}$")
_DIGEST_RE = re.compile(r"^sha256:[0-9a-fA-F]{64}$")
def is_digest(name: str, allow_empty: bool = False) -> bool:
"""Check whether the given name is in fact an image ID (hash)."""
if not name:
return allow_empty
return _DIGEST_RE.match(name) is not None
def is_tag(name: str, allow_empty: bool = False) -> bool:
"""Check whether the given name can be an image tag."""
if not name:
return allow_empty
return _TAG_RE.match(name) is not None
@dataclass
class ImageName:
registry: str | None
path: str
tag: str | None
digest: str | None
@classmethod
def parse(cls, name: str) -> t.Self:
registry: str | None = None
tag: str | None = None
digest: str | None = None
parts = name.rsplit("@", 1)
if len(parts) == 2:
name, digest = parts
parts = name.rsplit(":", 1)
if len(parts) == 2 and "/" not in parts[1]:
name, tag = parts
parts = name.split("/", 1)
if len(parts) == 2 and (
"." in parts[0] or ":" in parts[0] or parts[0] == "localhost"
):
registry, name = parts
return cls(registry, name, tag, digest)
def validate(self) -> t.Self:
if self.registry:
if self.registry[0] == "-" or self.registry[-1] == "-":
raise ValueError(
f'Invalid registry name ({self.registry}): must not begin or end with a "-".'
)
if self.registry[-1] == ":":
raise ValueError(
f'Invalid registry name ({self.registry}): must not end with ":".'
)
if not _PATH_RE.match(self.path):
raise ValueError(f"Invalid path ({self.path}).")
if self.tag and not is_tag(self.tag):
raise ValueError(f"Invalid tag ({self.tag}).")
if self.digest and not is_digest(self.digest):
raise ValueError(f"Invalid digest ({self.digest}).")
return self
def combine(self) -> str:
parts = []
if self.registry:
parts.append(self.registry)
if self.path:
parts.append("/")
parts.append(self.path)
if self.tag:
parts.append(":")
parts.append(self.tag)
if self.digest:
parts.append("@")
parts.append(self.digest)
return "".join(parts)
def normalize(self) -> ImageName:
registry = self.registry
path = self.path
if registry in ("", None, "index.docker.io", "registry.hub.docker.com"):
registry = "docker.io"
if registry == "docker.io" and "/" not in path and path:
path = f"library/{path}"
return ImageName(registry, path, self.tag, self.digest)
def get_hostname_and_port(self) -> tuple[str, int]:
if self.registry is None:
raise ValueError(
"Cannot get hostname when there is no registry. Normalize first!"
)
if self.registry == "docker.io":
return "index.docker.io", 443
parts = self.registry.split(":", 1)
if len(parts) == 2:
try:
port = int(parts[1])
except (TypeError, ValueError) as exc:
raise ValueError(f"Cannot parse port {parts[1]!r}") from exc
return parts[0], port
return self.registry, 443

View file

@ -0,0 +1,211 @@
# Copyright (c) 2024, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
"""
Parse go logfmt messages.
See https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc for information on the format.
"""
from __future__ import annotations
import typing as t
from enum import Enum
# The format is defined in https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc
# (look for "EBNFish")
class InvalidLogFmt(Exception):
pass
class _Mode(Enum):
GARBAGE = 0
KEY = 1
EQUAL = 2
IDENT_VALUE = 3
QUOTED_VALUE = 4
_ESCAPE_DICT = {
'"': '"',
"\\": "\\",
"'": "'",
"/": "/",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
}
_HEX_DICT = {
"0": 0,
"1": 1,
"2": 2,
"3": 3,
"4": 4,
"5": 5,
"6": 6,
"7": 7,
"8": 8,
"9": 9,
"a": 0xA,
"b": 0xB,
"c": 0xC,
"d": 0xD,
"e": 0xE,
"f": 0xF,
"A": 0xA,
"B": 0xB,
"C": 0xC,
"D": 0xD,
"E": 0xE,
"F": 0xF,
}
def _is_ident(cur: str) -> bool:
return cur > " " and cur not in ('"', "=")
class _Parser:
def __init__(self, line: str) -> None:
self.line = line
self.index = 0
self.length = len(line)
def done(self) -> bool:
return self.index >= self.length
def cur(self) -> str:
return self.line[self.index]
def next(self) -> None:
self.index += 1
def prev(self) -> None:
self.index -= 1
def parse_unicode_sequence(self) -> str:
if self.index + 6 > self.length:
raise InvalidLogFmt("Not enough space for unicode escape")
if self.line[self.index : self.index + 2] != "\\u":
raise InvalidLogFmt("Invalid unicode escape start")
v = 0
for dummy_index in range(self.index + 2, self.index + 6):
v <<= 4
try:
v += _HEX_DICT[self.line[self.index]]
except KeyError:
raise InvalidLogFmt(
f"Invalid unicode escape digit {self.line[self.index]!r}"
) from None
self.index += 6
return chr(v)
def parse_line(line: str, logrus_mode: bool = False) -> dict[str, t.Any]:
result: dict[str, t.Any] = {}
parser = _Parser(line)
key: list[str] = []
value: list[str] = []
mode = _Mode.GARBAGE
def handle_kv(has_no_value: bool = False) -> None:
k = "".join(key)
v = None if has_no_value else "".join(value)
result[k] = v
del key[:]
del value[:]
def parse_garbage(cur: str) -> _Mode:
if _is_ident(cur):
return _Mode.KEY
parser.next()
return _Mode.GARBAGE
def parse_key(cur: str) -> _Mode:
if _is_ident(cur):
key.append(cur)
parser.next()
return _Mode.KEY
if cur == "=":
parser.next()
return _Mode.EQUAL
if logrus_mode:
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
handle_kv(has_no_value=True)
parser.next()
return _Mode.GARBAGE
def parse_equal(cur: str) -> _Mode:
if _is_ident(cur):
value.append(cur)
parser.next()
return _Mode.IDENT_VALUE
if cur == '"':
parser.next()
return _Mode.QUOTED_VALUE
handle_kv()
parser.next()
return _Mode.GARBAGE
def parse_ident_value(cur: str) -> _Mode:
if _is_ident(cur):
value.append(cur)
parser.next()
return _Mode.IDENT_VALUE
handle_kv()
parser.next()
return _Mode.GARBAGE
def parse_quoted_value(cur: str) -> _Mode:
if cur == "\\":
parser.next()
if parser.done():
raise InvalidLogFmt("Unterminated escape sequence in quoted string")
cur = parser.cur()
if cur in _ESCAPE_DICT:
value.append(_ESCAPE_DICT[cur])
elif cur != "u":
es = f"\\{cur}"
raise InvalidLogFmt(f"Unknown escape sequence {es!r}")
else:
parser.prev()
value.append(parser.parse_unicode_sequence())
parser.next()
return _Mode.QUOTED_VALUE
if cur == '"':
handle_kv()
parser.next()
return _Mode.GARBAGE
if cur < " ":
raise InvalidLogFmt("Control characters in quoted string are not allowed")
value.append(cur)
parser.next()
return _Mode.QUOTED_VALUE
parsers = {
_Mode.GARBAGE: parse_garbage,
_Mode.KEY: parse_key,
_Mode.EQUAL: parse_equal,
_Mode.IDENT_VALUE: parse_ident_value,
_Mode.QUOTED_VALUE: parse_quoted_value,
}
while not parser.done():
mode = parsers[mode](parser.cur())
if mode == _Mode.KEY and logrus_mode:
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
if mode in (_Mode.KEY, _Mode.EQUAL):
handle_kv(has_no_value=True)
elif mode == _Mode.IDENT_VALUE:
handle_kv()
elif mode == _Mode.QUOTED_VALUE:
raise InvalidLogFmt("Unterminated quoted string")
return result

View file

@ -0,0 +1,259 @@
# This code is part of the Ansible collection community.docker, but is an independent component.
# This particular file, and this file only, is based on containerd's platforms Go module
# (https://github.com/containerd/containerd/tree/main/platforms)
#
# Copyright (c) 2023 Felix Fontein <felix@fontein.de>
# Copyright The containerd Authors
#
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
# SPDX-License-Identifier: Apache-2.0
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import re
import typing as t
_VALID_STR = re.compile("^[A-Za-z0-9_-]+$")
def _validate_part(string: str, part: str, part_name: str) -> str:
if not part:
raise ValueError(f'Invalid platform string "{string}": {part_name} is empty')
if not _VALID_STR.match(part):
raise ValueError(
f'Invalid platform string "{string}": {part_name} has invalid characters'
)
return part
# See https://github.com/containerd/containerd/blob/main/platforms/database.go#L32-L38
_KNOWN_OS = (
"aix",
"android",
"darwin",
"dragonfly",
"freebsd",
"hurd",
"illumos",
"ios",
"js",
"linux",
"nacl",
"netbsd",
"openbsd",
"plan9",
"solaris",
"windows",
"zos",
)
# See https://github.com/containerd/containerd/blob/main/platforms/database.go#L54-L60
_KNOWN_ARCH = (
"386",
"amd64",
"amd64p32",
"arm",
"armbe",
"arm64",
"arm64be",
"ppc64",
"ppc64le",
"loong64",
"mips",
"mipsle",
"mips64",
"mips64le",
"mips64p32",
"mips64p32le",
"ppc",
"riscv",
"riscv64",
"s390",
"s390x",
"sparc",
"sparc64",
"wasm",
)
def _normalize_os(os_str: str) -> str:
# See normalizeOS() in https://github.com/containerd/containerd/blob/main/platforms/database.go
os_str = os_str.lower()
if os_str == "macos":
os_str = "darwin"
return os_str
_NORMALIZE_ARCH = {
("i386", None): ("386", ""),
("x86_64", "v1"): ("amd64", ""),
("x86-64", "v1"): ("amd64", ""),
("amd64", "v1"): ("amd64", ""),
("x86_64", None): ("amd64", None),
("x86-64", None): ("amd64", None),
("amd64", None): ("amd64", None),
("aarch64", "8"): ("arm64", ""),
("arm64", "8"): ("arm64", ""),
("aarch64", "v8"): ("arm64", ""),
("arm64", "v8"): ("arm64", ""),
("aarch64", None): ("arm64", None),
("arm64", None): ("arm64", None),
("armhf", None): ("arm", "v7"),
("armel", None): ("arm", "v6"),
("arm", ""): ("arm", "v7"),
("arm", "5"): ("arm", "v5"),
("arm", "6"): ("arm", "v6"),
("arm", "7"): ("arm", "v7"),
("arm", "8"): ("arm", "v8"),
("arm", None): ("arm", None),
}
def _normalize_arch(arch_str: str, variant_str: str) -> tuple[str, str]:
# See normalizeArch() in https://github.com/containerd/containerd/blob/main/platforms/database.go
arch_str = arch_str.lower()
variant_str = variant_str.lower()
res = _NORMALIZE_ARCH.get((arch_str, variant_str))
if res is None:
res = _NORMALIZE_ARCH.get((arch_str, None))
if res is None:
return arch_str, variant_str
arch_str = res[0]
if res[1] is not None:
variant_str = res[1]
return arch_str, variant_str
class _Platform:
def __init__(
self, os: str | None = None, arch: str | None = None, variant: str | None = None
) -> None:
self.os = os
self.arch = arch
self.variant = variant
if variant is not None:
if arch is None:
raise ValueError("If variant is given, architecture must be given too")
if os is None:
raise ValueError("If variant is given, os must be given too")
@classmethod
def parse_platform_string(
cls,
string: str | None,
daemon_os: str | None = None,
daemon_arch: str | None = None,
) -> t.Self:
# See Parse() in https://github.com/containerd/containerd/blob/main/platforms/platforms.go
if string is None:
return cls()
if not string:
raise ValueError("Platform string must be non-empty")
parts = string.split("/", 2)
arch = None
variant = None
if len(parts) == 1:
_validate_part(string, string, "OS/architecture")
# The part is either OS or architecture
os = _normalize_os(string)
if os in _KNOWN_OS:
if daemon_arch is not None:
arch, variant = _normalize_arch(daemon_arch, "")
return cls(os=os, arch=arch, variant=variant)
arch, variant = _normalize_arch(os, "")
if arch in _KNOWN_ARCH:
return cls(
os=_normalize_os(daemon_os) if daemon_os else None,
arch=arch or None,
variant=variant or None,
)
raise ValueError(
f'Invalid platform string "{string}": unknown OS or architecture'
)
os = _validate_part(string, parts[0], "OS")
if not os:
raise ValueError(f'Invalid platform string "{string}": OS is empty')
arch = (
_validate_part(string, parts[1], "architecture") if len(parts) > 1 else None
)
if arch is not None and not arch:
raise ValueError(
f'Invalid platform string "{string}": architecture is empty'
)
variant = (
_validate_part(string, parts[2], "variant") if len(parts) > 2 else None
)
if variant is not None and not variant:
raise ValueError(f'Invalid platform string "{string}": variant is empty')
assert arch is not None # otherwise variant would be None as well
arch, variant = _normalize_arch(arch, variant or "")
if len(parts) == 2 and arch == "arm" and variant == "v7":
variant = None
if len(parts) == 3 and arch == "arm64" and variant == "":
variant = "v8"
return cls(os=_normalize_os(os), arch=arch, variant=variant or None)
def __str__(self) -> str:
if self.variant:
assert (
self.os is not None and self.arch is not None
) # ensured in constructor
parts: list[str] = [self.os, self.arch, self.variant]
elif self.os:
if self.arch:
parts = [self.os, self.arch]
else:
parts = [self.os]
elif self.arch is not None:
parts = [self.arch]
else:
parts = []
return "/".join(parts)
def __repr__(self) -> str:
return (
f"_Platform(os={self.os!r}, arch={self.arch!r}, variant={self.variant!r})"
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Platform):
return NotImplemented
return (
self.os == other.os
and self.arch == other.arch
and self.variant == other.variant
)
def normalize_platform_string(
string: str, daemon_os: str | None = None, daemon_arch: str | None = None
) -> str:
return str(
_Platform.parse_platform_string(
string, daemon_os=daemon_os, daemon_arch=daemon_arch
)
)
def compose_platform_string(
os: str | None = None,
arch: str | None = None,
variant: str | None = None,
daemon_os: str | None = None,
daemon_arch: str | None = None,
) -> str:
if os is None and daemon_os is not None:
os = _normalize_os(daemon_os)
if arch is None and daemon_arch is not None:
arch, variant = _normalize_arch(daemon_arch, variant or "")
variant = variant or None
return str(_Platform(os=os, arch=arch, variant=variant or None))
def compare_platform_strings(string1: str, string2: str) -> bool:
return _Platform.parse_platform_string(string1) == _Platform.parse_platform_string(
string2
)

View file

@ -0,0 +1,46 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import base64
import random
from ansible.module_utils.common.text.converters import to_bytes, to_text
def generate_insecure_key() -> bytes:
"""Do NOT use this for cryptographic purposes!"""
while True:
# Generate a one-byte key. Right now the functions below do not use more
# than one byte, so this is sufficient.
key = bytes([random.randint(0, 255)])
# Return anything that is not zero
if key != b"\x00":
return key
def scramble(value: str, key: bytes) -> str:
"""Do NOT use this for cryptographic purposes!"""
if len(key) < 1:
raise ValueError("Key must be at least one byte")
b_value = to_bytes(value)
k = key[0]
b_value = bytes([k ^ b for b in b_value])
return f"=S={to_text(base64.b64encode(b_value))}"
def unscramble(value: str, key: bytes) -> str:
"""Do NOT use this for cryptographic purposes!"""
if len(key) < 1:
raise ValueError("Key must be at least one byte")
if not value.startswith("=S="):
raise ValueError("Value does not start with indicator")
b_value = base64.b64decode(value[3:])
k = key[0]
b_value = bytes([k ^ b for b in b_value])
return to_text(b_value)

View file

@ -0,0 +1,240 @@
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import os
import os.path
import selectors
import socket as pysocket
import struct
import typing as t
from ansible_collections.community.docker.plugins.module_utils._api.utils import (
socket as docker_socket,
)
from ansible_collections.community.docker.plugins.module_utils._socket_helper import (
make_unblocking,
shutdown_writing,
write_to_socket,
)
if t.TYPE_CHECKING:
from collections.abc import Callable
from types import TracebackType
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.docker.plugins.module_utils._socket_helper import (
SocketLike,
)
PARAMIKO_POLL_TIMEOUT = 0.01 # 10 milliseconds
def _empty_writer(msg: str) -> None:
pass
class DockerSocketHandlerBase:
def __init__(
self, sock: SocketLike, log: Callable[[str], None] | None = None
) -> None:
make_unblocking(sock)
self._log = log or _empty_writer
self._paramiko_read_workaround = hasattr(
sock, "send_ready"
) and "paramiko" in str(type(sock))
self._sock = sock
self._block_done_callback: Callable[[int, bytes], None] | None = None
self._block_buffer: list[tuple[int, bytes]] = []
self._eof = False
self._read_buffer = b""
self._write_buffer = b""
self._end_of_writing = False
self._current_stream: int | None = None
self._current_missing = 0
self._current_buffer = b""
self._selector = selectors.DefaultSelector()
self._selector.register(self._sock, selectors.EVENT_READ)
def __enter__(self) -> t.Self:
return self
def __exit__(
self,
type_: type[BaseException] | None,
value: BaseException | None,
tb: TracebackType | None,
) -> None:
self._selector.close()
def set_block_done_callback(
self, block_done_callback: Callable[[int, bytes], None]
) -> None:
self._block_done_callback = block_done_callback
if self._block_done_callback is not None:
while self._block_buffer:
elt = self._block_buffer.pop(0)
self._block_done_callback(*elt)
def _add_block(self, stream_id: int, data: bytes) -> None:
if self._block_done_callback is not None:
self._block_done_callback(stream_id, data)
else:
self._block_buffer.append((stream_id, data))
def _read(self) -> None:
if self._eof:
return
data: bytes | None
if hasattr(self._sock, "recv"):
try:
data = self._sock.recv(262144)
except Exception as e: # pylint: disable=broad-exception-caught
# After calling self._sock.shutdown(), OpenSSL's/urllib3's
# WrappedSocket seems to eventually raise ZeroReturnError in
# case of EOF
if "OpenSSL.SSL.ZeroReturnError" in str(type(e)):
self._eof = True
return
raise
elif isinstance(self._sock, pysocket.SocketIO): # type: ignore[unreachable]
data = self._sock.read() # type: ignore
else:
data = os.read(self._sock.fileno()) # type: ignore # TODO does this really work?!
if data is None:
# no data available
return # type: ignore[unreachable]
self._log(f"read {len(data)} bytes")
if len(data) == 0:
# Stream EOF
self._eof = True
return
self._read_buffer += data
while len(self._read_buffer) > 0:
if self._current_missing > 0:
n = min(len(self._read_buffer), self._current_missing)
self._current_buffer += self._read_buffer[:n]
self._read_buffer = self._read_buffer[n:]
self._current_missing -= n
if self._current_missing == 0:
assert self._current_stream is not None
self._add_block(self._current_stream, self._current_buffer)
self._current_buffer = b""
if len(self._read_buffer) < 8:
break
self._current_stream, self._current_missing = struct.unpack(
">BxxxL", self._read_buffer[:8]
)
self._read_buffer = self._read_buffer[8:]
if self._current_missing < 0:
# Stream EOF (as reported by docker daemon)
self._eof = True
break
def _handle_end_of_writing(self) -> None:
if self._end_of_writing and len(self._write_buffer) == 0:
self._end_of_writing = False
self._log("Shutting socket down for writing")
shutdown_writing(self._sock, self._log)
def _write(self) -> None:
if len(self._write_buffer) > 0:
written = write_to_socket(self._sock, self._write_buffer)
self._write_buffer = self._write_buffer[written:]
self._log(f"wrote {written} bytes, {len(self._write_buffer)} are left")
if len(self._write_buffer) > 0:
self._selector.modify(
self._sock, selectors.EVENT_READ | selectors.EVENT_WRITE
)
else:
self._selector.modify(self._sock, selectors.EVENT_READ)
self._handle_end_of_writing()
def select(
self, timeout: int | float | None = None, _internal_recursion: bool = False
) -> bool:
if (
not _internal_recursion
and self._paramiko_read_workaround
and len(self._write_buffer) > 0
):
# When the SSH transport is used, Docker SDK for Python internally uses Paramiko, whose
# Channel object supports select(), but only for reading
# (https://github.com/paramiko/paramiko/issues/695).
if self._sock.send_ready(): # type: ignore
self._write()
return True
while timeout is None or timeout > PARAMIKO_POLL_TIMEOUT:
result = int(
self.select(PARAMIKO_POLL_TIMEOUT, _internal_recursion=True)
)
if self._sock.send_ready(): # type: ignore
self._read()
result += 1
if result > 0:
return True
if timeout is not None:
timeout -= PARAMIKO_POLL_TIMEOUT
self._log(f"select... ({timeout})")
events = self._selector.select(timeout)
for key, event in events:
if key.fileobj == self._sock:
ev_read = event & selectors.EVENT_READ != 0
ev_write = event & selectors.EVENT_WRITE != 0
self._log(f"select event read:{ev_read} write:{ev_write}")
if event & selectors.EVENT_READ != 0:
self._read()
if event & selectors.EVENT_WRITE != 0:
self._write()
result = len(events)
if self._paramiko_read_workaround and len(self._write_buffer) > 0 and self._sock.send_ready(): # type: ignore
self._write()
result += 1
return result > 0
def is_eof(self) -> bool:
return self._eof
def end_of_writing(self) -> None:
self._end_of_writing = True
self._handle_end_of_writing()
def consume(self) -> tuple[bytes, bytes]:
stdout = []
stderr = []
def append_block(stream_id: int, data: bytes) -> None:
if stream_id == docker_socket.STDOUT:
stdout.append(data)
elif stream_id == docker_socket.STDERR:
stderr.append(data)
else:
raise ValueError(f"{stream_id} is not a valid stream ID")
self.end_of_writing()
self.set_block_done_callback(append_block)
while not self._eof:
self.select()
return b"".join(stdout), b"".join(stderr)
def write(self, str_to_write: bytes) -> None:
self._write_buffer += str_to_write
if len(self._write_buffer) == len(str_to_write):
self._write()
class DockerSocketHandlerModule(DockerSocketHandlerBase):
def __init__(self, sock: SocketLike, module: AnsibleModule) -> None:
super().__init__(sock, module.debug)

View file

@ -0,0 +1,79 @@
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import fcntl
import os
import os.path
import socket as pysocket
import typing as t
from collections.abc import Callable
if t.TYPE_CHECKING:
SocketLike = pysocket.socket
def make_file_unblocking(file: SocketLike) -> None:
fcntl.fcntl(
file.fileno(),
fcntl.F_SETFL,
fcntl.fcntl(file.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK,
)
def make_file_blocking(file: SocketLike) -> None:
fcntl.fcntl(
file.fileno(),
fcntl.F_SETFL,
fcntl.fcntl(file.fileno(), fcntl.F_GETFL) & ~os.O_NONBLOCK,
)
def make_unblocking(sock: SocketLike) -> None:
if hasattr(sock, "_sock"):
sock._sock.setblocking(0)
elif hasattr(sock, "setblocking"):
sock.setblocking(0) # type: ignore # TODO: CHECK!
else:
make_file_unblocking(sock)
def _empty_writer(msg: str) -> None:
pass
def shutdown_writing(
sock: SocketLike, log: Callable[[str], None] = _empty_writer
) -> None:
# FIXME: This does **not work with SSLSocket**! Apparently SSLSocket does not allow to send
# a close_notify TLS alert without completely shutting down the connection.
# Calling sock.shutdown(pysocket.SHUT_WR) simply turns of TLS encryption and from that
# point on the raw encrypted data is returned when sock.recv() is called. :-(
if hasattr(sock, "shutdown_write"):
sock.shutdown_write()
elif hasattr(sock, "shutdown"):
try:
sock.shutdown(pysocket.SHUT_WR)
except TypeError as e:
# probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
log(f"Shutting down for writing not possible; trying shutdown instead: {e}")
sock.shutdown() # type: ignore
elif isinstance(sock, pysocket.SocketIO): # type: ignore
sock._sock.shutdown(pysocket.SHUT_WR) # type: ignore[unreachable]
else:
log("No idea how to signal end of writing")
def write_to_socket(sock: SocketLike, data: bytes) -> int:
if hasattr(sock, "_send_until_done"):
# WrappedSocket (urllib3/contrib/pyopenssl) does not have `send`, but
# only `sendall`, which uses `_send_until_done` under the hood.
return sock._send_until_done(data)
if hasattr(sock, "send"):
return sock.send(data)
return os.write(sock.fileno(), data)

View file

@ -0,0 +1,312 @@
# Copyright (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
# Copyright (c) Thierry Bouvet (@tbouvet)
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import json
import typing as t
from time import sleep
try:
from docker.errors import APIError, NotFound
except ImportError:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
from ansible_collections.community.docker.plugins.module_utils._common import (
AnsibleDockerClient,
)
from ansible_collections.community.docker.plugins.module_utils._version import (
LooseVersion,
)
class AnsibleDockerSwarmClient(AnsibleDockerClient):
def get_swarm_node_id(self) -> str | None:
"""
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
of Docker host the module is executed on
:return:
NodeID of host or 'None' if not part of Swarm
"""
try:
info = self.info()
except APIError as exc:
self.fail(f"Failed to get node information for {exc}")
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info["Swarm"]["NodeID"]:
return swarm_info["Swarm"]["NodeID"]
return None
def check_if_swarm_node(self, node_id: str | None = None) -> bool | None:
"""
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
it is not executed on Swarm manager
:param node_id: Node identifier
:return:
bool: True if node is part of Swarm, False otherwise
"""
if node_id is None:
try:
info = self.info()
except APIError:
self.fail("Failed to get host information.")
if info:
json_str = json.dumps(info, ensure_ascii=False)
swarm_info = json.loads(json_str)
if swarm_info["Swarm"]["NodeID"]:
return True
return swarm_info["Swarm"]["LocalNodeState"] in (
"active",
"pending",
"locked",
)
return False
try:
node_info = self.get_node_inspect(node_id=node_id)
except APIError:
return None
return node_info["ID"] is not None
def check_if_swarm_manager(self) -> bool:
"""
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
is performed. The inspect_swarm() will fail if node is not a manager
:return: True if node is Swarm Manager, False otherwise
"""
try:
self.inspect_swarm()
return True
except APIError:
return False
def fail_task_if_not_swarm_manager(self) -> None:
"""
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
"""
if not self.check_if_swarm_manager():
self.fail(
"Error running docker swarm module: must run on swarm manager node"
)
def check_if_swarm_worker(self) -> bool:
"""
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
:return: True if node is Swarm Worker, False otherwise
"""
return bool(self.check_if_swarm_node() and not self.check_if_swarm_manager())
def check_if_swarm_node_is_down(
self, node_id: str | None = None, repeat_check: int = 1
) -> bool:
"""
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
host that is not part of Swarm it will fail the playbook
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
True if node is part of swarm but its state is down, False otherwise
"""
repeat_check = max(1, repeat_check)
if node_id is None:
node_id = self.get_swarm_node_id()
for retry in range(0, repeat_check):
if retry > 0:
sleep(5)
node_info = self.get_node_inspect(node_id=node_id)
if node_info["Status"]["State"] == "down":
return True
return False
@t.overload
def get_node_inspect(
self, node_id: str | None = None, skip_missing: t.Literal[False] = False
) -> dict[str, t.Any]: ...
@t.overload
def get_node_inspect(
self, node_id: str | None = None, skip_missing: bool = False
) -> dict[str, t.Any] | None: ...
def get_node_inspect(
self, node_id: str | None = None, skip_missing: bool = False
) -> dict[str, t.Any] | None:
"""
Returns Swarm node info as in 'docker node inspect' command about single node
:param skip_missing: if True then function will return None instead of failing the task
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
:return:
Single node information structure
"""
if node_id is None:
node_id = self.get_swarm_node_id()
if node_id is None:
self.fail("Failed to get node information.")
try:
node_info = self.inspect_node(node_id=node_id)
except APIError as exc:
if exc.status_code == 503:
self.fail(
"Cannot inspect node: To inspect node execute module on Swarm Manager"
)
if exc.status_code == 404 and skip_missing:
return None
self.fail(f"Error while reading from Swarm manager: {exc}")
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error inspecting swarm node: {exc}")
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
if "ManagerStatus" in node_info and node_info["ManagerStatus"].get("Leader"):
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
# Check moby/moby#35437 for details
count_colons = node_info["ManagerStatus"]["Addr"].count(":")
if count_colons == 1:
swarm_leader_ip = (
node_info["ManagerStatus"]["Addr"].split(":", 1)[0]
or node_info["Status"]["Addr"]
)
else:
swarm_leader_ip = node_info["Status"]["Addr"]
node_info["Status"]["Addr"] = swarm_leader_ip
return node_info
def get_all_nodes_inspect(self) -> list[dict[str, t.Any]]:
"""
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
:return:
Structure with information about all nodes
"""
try:
node_info = self.nodes()
except APIError as exc:
if exc.status_code == 503:
self.fail(
"Cannot inspect node: To inspect node execute module on Swarm Manager"
)
self.fail(f"Error while reading from Swarm manager: {exc}")
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error inspecting swarm node: {exc}")
json_str = json.dumps(node_info, ensure_ascii=False)
node_info = json.loads(json_str)
return node_info
@t.overload
def get_all_nodes_list(self, output: t.Literal["short"] = "short") -> list[str]: ...
@t.overload
def get_all_nodes_list(
self, output: t.Literal["long"]
) -> list[dict[str, t.Any]]: ...
def get_all_nodes_list(
self, output: t.Literal["short", "long"] = "short"
) -> list[str] | list[dict[str, t.Any]]:
"""
Returns list of nodes registered in Swarm
:param output: Defines format of returned data
:return:
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
if 'output' is 'long' then returns data is list of dict containing the attributes as in
output of command 'docker node ls'
"""
nodes_inspect = self.get_all_nodes_inspect()
if output == "short":
nodes_list = []
for node in nodes_inspect:
nodes_list.append(node["Description"]["Hostname"])
return nodes_list
if output == "long":
nodes_info_list = []
for node in nodes_inspect:
node_property: dict[str, t.Any] = {}
node_property["ID"] = node["ID"]
node_property["Hostname"] = node["Description"]["Hostname"]
node_property["Status"] = node["Status"]["State"]
node_property["Availability"] = node["Spec"]["Availability"]
if "ManagerStatus" in node:
if node["ManagerStatus"]["Leader"] is True:
node_property["Leader"] = True
node_property["ManagerStatus"] = node["ManagerStatus"][
"Reachability"
]
node_property["EngineVersion"] = node["Description"]["Engine"][
"EngineVersion"
]
nodes_info_list.append(node_property)
return nodes_info_list
def get_node_name_by_id(self, nodeid: str) -> str:
return self.get_node_inspect(nodeid)["Description"]["Hostname"]
def get_unlock_key(self) -> dict[str, t.Any] | None:
if self.docker_py_version < LooseVersion("2.7.0"):
return None
return super().get_unlock_key()
def get_service_inspect(
self, service_id: str, skip_missing: bool = False
) -> dict[str, t.Any] | None:
"""
Returns Swarm service info as in 'docker service inspect' command about single service
:param service_id: service ID or name
:param skip_missing: if True then function will return None instead of failing the task
:return:
Single service information structure
"""
try:
service_info = self.inspect_service(service_id)
except NotFound as exc:
if skip_missing is False:
self.fail(f"Error while reading from Swarm manager: {exc}")
else:
return None
except APIError as exc:
if exc.status_code == 503:
self.fail(
"Cannot inspect service: To inspect service execute module on Swarm Manager"
)
self.fail(f"Error inspecting swarm service: {exc}")
except Exception as exc: # pylint: disable=broad-exception-caught
self.fail(f"Error inspecting swarm service: {exc}")
json_str = json.dumps(service_info, ensure_ascii=False)
service_info = json.loads(json_str)
return service_info

View file

@ -0,0 +1,551 @@
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
from __future__ import annotations
import ipaddress
import json
import re
import typing as t
from datetime import timedelta
from urllib.parse import urlparse
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.common.collections import is_sequence
from ansible.module_utils.common.text.converters import to_text
if t.TYPE_CHECKING:
from collections.abc import Callable
from ansible.module_utils.basic import AnsibleModule
from ._common import AnsibleDockerClientBase as CADCB
from ._common_api import AnsibleDockerClientBase as CAPIADCB
from ._common_cli import AnsibleDockerClientBase as CCLIADCB
Client = t.Union[CADCB, CAPIADCB, CCLIADCB] # noqa: UP007
DEFAULT_DOCKER_HOST = "unix:///var/run/docker.sock"
DEFAULT_TLS = False
DEFAULT_TLS_VERIFY = False
DEFAULT_TLS_HOSTNAME = "localhost" # deprecated
DEFAULT_TIMEOUT_SECONDS = 60
DOCKER_COMMON_ARGS = {
"docker_host": {
"type": "str",
"default": DEFAULT_DOCKER_HOST,
"fallback": (env_fallback, ["DOCKER_HOST"]),
"aliases": ["docker_url"],
},
"tls_hostname": {
"type": "str",
"fallback": (env_fallback, ["DOCKER_TLS_HOSTNAME"]),
},
"api_version": {
"type": "str",
"default": "auto",
"fallback": (env_fallback, ["DOCKER_API_VERSION"]),
"aliases": ["docker_api_version"],
},
"timeout": {
"type": "int",
"default": DEFAULT_TIMEOUT_SECONDS,
"fallback": (env_fallback, ["DOCKER_TIMEOUT"]),
},
"ca_path": {"type": "path", "aliases": ["ca_cert", "tls_ca_cert", "cacert_path"]},
"client_cert": {"type": "path", "aliases": ["tls_client_cert", "cert_path"]},
"client_key": {"type": "path", "aliases": ["tls_client_key", "key_path"]},
"tls": {
"type": "bool",
"default": DEFAULT_TLS,
"fallback": (env_fallback, ["DOCKER_TLS"]),
},
"use_ssh_client": {"type": "bool", "default": False},
"validate_certs": {
"type": "bool",
"default": DEFAULT_TLS_VERIFY,
"fallback": (env_fallback, ["DOCKER_TLS_VERIFY"]),
"aliases": ["tls_verify"],
},
"debug": {"type": "bool", "default": False},
}
DOCKER_COMMON_ARGS_VARS = {
option_name: f"ansible_docker_{option_name}"
for option_name in DOCKER_COMMON_ARGS
if option_name != "debug"
}
DOCKER_MUTUALLY_EXCLUSIVE: list[tuple[str, ...] | list[str]] = []
DOCKER_REQUIRED_TOGETHER: list[tuple[str, ...] | list[str]] = [
["client_cert", "client_key"]
]
DEFAULT_DOCKER_REGISTRY = "https://index.docker.io/v1/"
BYTE_SUFFIXES = ["B", "KB", "MB", "GB", "TB", "PB"]
def is_image_name_id(name: str) -> bool:
"""Check whether the given image name is in fact an image ID (hash)."""
return bool(re.match("^sha256:[0-9a-fA-F]{64}$", name))
def is_valid_tag(tag: str, allow_empty: bool = False) -> bool:
"""Check whether the given string is a valid docker tag name."""
if not tag:
return allow_empty
# See here ("Extended description") for a definition what tags can be:
# https://docs.docker.com/engine/reference/commandline/tag/
return bool(re.match("^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$", tag))
def sanitize_result(data: t.Any) -> t.Any:
"""Sanitize data object for return to Ansible.
When the data object contains types such as docker.types.containers.HostConfig,
Ansible will fail when these are returned via exit_json or fail_json.
HostConfig is derived from dict, but its constructor requires additional
arguments. This function sanitizes data structures by recursively converting
everything derived from dict to dict and everything derived from list (and tuple)
to a list.
"""
if isinstance(data, dict):
return dict((k, sanitize_result(v)) for k, v in data.items())
if isinstance(data, (list, tuple)):
return [sanitize_result(v) for v in data]
return data
def log_debug(msg: t.Any, pretty_print: bool = False) -> None:
"""Write a log message to docker.log.
If ``pretty_print=True``, the message will be pretty-printed as JSON.
"""
with open("docker.log", "at", encoding="utf-8") as log_file:
if pretty_print:
log_file.write(
json.dumps(msg, sort_keys=True, indent=4, separators=(",", ": "))
)
log_file.write("\n")
else:
log_file.write(f"{msg}\n")
class DockerBaseClass:
def __init__(self) -> None:
self.debug = False
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
pass
# if self.debug:
# log_debug(msg, pretty_print=pretty_print)
def update_tls_hostname(
result: dict[str, t.Any],
old_behavior: bool = False,
deprecate_function: Callable[[str], None] | None = None,
uses_tls: bool = True,
) -> None:
if result["tls_hostname"] is None:
# get default machine name from the url
parsed_url = urlparse(result["docker_host"])
result["tls_hostname"] = parsed_url.netloc.rsplit(":", 1)[0]
def compare_dict_allow_more_present(av: dict, bv: dict) -> bool:
"""
Compare two dictionaries for whether every entry of the first is in the second.
"""
for key, value in av.items():
if key not in bv:
return False
if bv[key] != value:
return False
return True
def compare_generic(
a: t.Any,
b: t.Any,
method: t.Literal["ignore", "strict", "allow_more_present"],
datatype: t.Literal["value", "list", "set", "set(dict)", "dict"],
) -> bool:
"""
Compare values a and b as described by method and datatype.
Returns ``True`` if the values compare equal, and ``False`` if not.
``a`` is usually the module's parameter, while ``b`` is a property
of the current object. ``a`` must not be ``None`` (except for
``datatype == 'value'``).
Valid values for ``method`` are:
- ``ignore`` (always compare as equal);
- ``strict`` (only compare if really equal)
- ``allow_more_present`` (allow b to have elements which a does not have).
Valid values for ``datatype`` are:
- ``value``: for simple values (strings, numbers, ...);
- ``list``: for ``list``s or ``tuple``s where order matters;
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
matter;
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
not matter and which contain ``dict``s; ``allow_more_present`` is used
for the ``dict``s, and these are assumed to be dictionaries of values;
- ``dict``: for dictionaries of values.
"""
if method == "ignore":
return True
# If a or b is None:
if a is None or b is None:
# If both are None: equality
if a == b:
return True
# Otherwise, not equal for values, and equal
# if the other is empty for set/list/dict
if datatype == "value":
return False
# For allow_more_present, allow a to be None
if method == "allow_more_present" and a is None:
return True
# Otherwise, the iterable object which is not None must have length 0
return len(b if a is None else a) == 0
# Do proper comparison (both objects not None)
if datatype == "value":
return a == b
if datatype == "list":
if method == "strict":
return a == b
i = 0
for v in a:
while i < len(b) and b[i] != v:
i += 1
if i == len(b):
return False
i += 1
return True
if datatype == "dict":
if method == "strict":
return a == b
return compare_dict_allow_more_present(a, b)
if datatype == "set":
set_a = set(a)
set_b = set(b)
if method == "strict":
return set_a == set_b
return set_b >= set_a
if datatype == "set(dict)":
for av in a:
found = False
for bv in b:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
if method == "strict":
# If we would know that both a and b do not contain duplicates,
# we could simply compare len(a) to len(b) to finish this test.
# We can assume that b has no duplicates (as it is returned by
# docker), but we do not know for a.
for bv in b:
found = False
for av in a:
if compare_dict_allow_more_present(av, bv):
found = True
break
if not found:
return False
return True
class DifferenceTracker:
def __init__(self) -> None:
self._diff: list[dict[str, t.Any]] = []
def add(self, name: str, parameter: t.Any = None, active: t.Any = None) -> None:
self._diff.append(
{
"name": name,
"parameter": parameter,
"active": active,
}
)
def merge(self, other_tracker: DifferenceTracker) -> None:
self._diff.extend(other_tracker._diff)
@property
def empty(self) -> bool:
return len(self._diff) == 0
def get_before_after(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
"""
Return texts ``before`` and ``after``.
"""
before = {}
after = {}
for item in self._diff:
before[item["name"]] = item["active"]
after[item["name"]] = item["parameter"]
return before, after
def has_difference_for(self, name: str) -> bool:
"""
Returns a boolean if a difference exists for name
"""
return any(diff for diff in self._diff if diff["name"] == name)
def get_legacy_docker_container_diffs(self) -> list[dict[str, t.Any]]:
"""
Return differences in the docker_container legacy format.
"""
result = []
for entry in self._diff:
item = {}
item[entry["name"]] = {
"parameter": entry["parameter"],
"container": entry["active"],
}
result.append(item)
return result
def get_legacy_docker_diffs(self) -> list[str]:
"""
Return differences in the docker_container legacy format.
"""
result = [entry["name"] for entry in self._diff]
return result
def sanitize_labels(
labels: dict[str, t.Any] | None,
labels_field: str,
client: Client | None = None,
module: AnsibleModule | None = None,
) -> None:
def fail(msg: str) -> t.NoReturn:
if client is not None:
client.fail(msg)
if module is not None:
module.fail_json(msg=msg)
raise ValueError(msg)
if labels is None:
return
for k, v in list(labels.items()):
if not isinstance(k, str):
fail(f"The key {k!r} of {labels_field} is not a string!")
if isinstance(v, (bool, float)):
fail(
f"The value {v!r} for {k!r} of {labels_field} is not a string or something than can be safely converted to a string!"
)
labels[k] = to_text(v)
@t.overload
def clean_dict_booleans_for_docker_api(
data: dict[str, t.Any], *, allow_sequences: t.Literal[False] = False
) -> dict[str, str]: ...
@t.overload
def clean_dict_booleans_for_docker_api(
data: dict[str, t.Any], *, allow_sequences: bool
) -> dict[str, str | list[str]]: ...
def clean_dict_booleans_for_docker_api(
data: dict[str, t.Any] | None, *, allow_sequences: bool = False
) -> dict[str, str] | dict[str, str | list[str]]:
"""
Go does not like Python booleans 'True' or 'False', while Ansible is just
fine with them in YAML. As such, they need to be converted in cases where
we pass dictionaries to the Docker API (e.g. docker_network's
driver_options and docker_prune's filters). When `allow_sequences=True`
YAML sequences (lists, tuples) are converted to [str] instead of str([...])
which is the expected format of filters which accept lists such as labels.
"""
def sanitize(value: t.Any) -> str:
if value is True:
return "true"
if value is False:
return "false"
return str(value)
result = {}
if data is not None:
for k, v in data.items():
result[str(k)] = (
[sanitize(e) for e in v]
if allow_sequences and is_sequence(v)
else sanitize(v)
)
return result
def convert_duration_to_nanosecond(time_str: str) -> int:
"""
Return time duration in nanosecond.
"""
if not isinstance(time_str, str):
raise ValueError(f"Missing unit in duration - {time_str}")
regex = re.compile(
r"^(((?P<hours>\d+)h)?"
r"((?P<minutes>\d+)m(?!s))?"
r"((?P<seconds>\d+)s)?"
r"((?P<milliseconds>\d+)ms)?"
r"((?P<microseconds>\d+)us)?)$"
)
parts = regex.match(time_str)
if not parts:
raise ValueError(f"Invalid time duration - {time_str}")
parts_dict = parts.groupdict()
time_params = {}
for name, value in parts_dict.items():
if value:
time_params[name] = int(value)
delta = timedelta(**time_params)
time_in_nanoseconds = (
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6
) * 10**3
return time_in_nanoseconds
def normalize_healthcheck_test(test: t.Any) -> list[str]:
if isinstance(test, (tuple, list)):
return [str(e) for e in test]
return ["CMD-SHELL", str(test)]
def normalize_healthcheck(
healthcheck: dict[str, t.Any], normalize_test: bool = False
) -> dict[str, t.Any]:
"""
Return dictionary of healthcheck parameters.
"""
result = {}
# All supported healthcheck parameters
options = (
"test",
"test_cli_compatible",
"interval",
"timeout",
"start_period",
"start_interval",
"retries",
)
duration_options = ("interval", "timeout", "start_period", "start_interval")
for key in options:
if key in healthcheck:
value = healthcheck[key]
if value is None:
# due to recursive argument_spec, all keys are always present
# (but have default value None if not specified)
continue
if key in duration_options:
value = convert_duration_to_nanosecond(value)
if not value and not (
healthcheck.get("test_cli_compatible") and key == "test"
):
continue
if key == "retries":
try:
value = int(value)
except ValueError as exc:
raise ValueError(
f'Cannot parse number of retries for healthcheck. Expected an integer, got "{value}".'
) from exc
if key == "test" and value and normalize_test:
value = normalize_healthcheck_test(value)
result[key] = value
return result
def parse_healthcheck(
healthcheck: dict[str, t.Any] | None,
) -> tuple[dict[str, t.Any] | None, bool | None]:
"""
Return dictionary of healthcheck parameters and boolean if
healthcheck defined in image was requested to be disabled.
"""
if (not healthcheck) or (not healthcheck.get("test")):
return None, None
result = normalize_healthcheck(healthcheck, normalize_test=True)
if result["test"] == ["NONE"]:
# If the user explicitly disables the healthcheck, return None
# as the healthcheck object, and set disable_healthcheck to True
return None, True
return result, False
def omit_none_from_dict(d: dict[str, t.Any]) -> dict[str, t.Any]:
"""
Return a copy of the dictionary with all keys with value None omitted.
"""
return {k: v for (k, v) in d.items() if v is not None}
@t.overload
def normalize_ip_address(ip_address: str) -> str: ...
@t.overload
def normalize_ip_address(ip_address: str | None) -> str | None: ...
def normalize_ip_address(ip_address: str | None) -> str | None:
"""
Given an IP address as a string, normalize it so that it can be
used to compare IP addresses as strings.
"""
if ip_address is None:
return None
try:
return ipaddress.ip_address(ip_address).compressed
except ValueError:
# Fallback for invalid addresses: simply return the input
return ip_address
@t.overload
def normalize_ip_network(network: str) -> str: ...
@t.overload
def normalize_ip_network(network: str | None) -> str | None: ...
def normalize_ip_network(network: str | None) -> str | None:
"""
Given a network in CIDR notation as a string, normalize it so that it can be
used to compare networks as strings.
"""
if network is None:
return None
try:
return ipaddress.ip_network(network).compressed
except ValueError:
# Fallback for invalid networks: simply return the input
return network

View file

@ -0,0 +1,15 @@
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
# Do not use this from other collections or standalone plugins/modules!
"""Provide version object to compare version numbers."""
from __future__ import annotations
from ansible.module_utils.compat.version import ( # noqa: F401, pylint: disable=unused-import
LooseVersion,
StrictVersion,
)