forked from CCCHH/ansible-infra
Vendor Galaxy Roles and Collections
This commit is contained in:
parent
c1e1897cda
commit
2aed20393f
3553 changed files with 387444 additions and 2 deletions
|
|
@ -0,0 +1,49 @@
|
|||
# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import typing as t
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible.plugins.action import ActionBase
|
||||
from ansible.utils.vars import merge_hash
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._scramble import (
|
||||
unscramble,
|
||||
)
|
||||
|
||||
|
||||
class ActionModule(ActionBase):
|
||||
# Set to True when transferring files to the remote
|
||||
TRANSFERS_FILES = False
|
||||
|
||||
def run(
|
||||
self, tmp: str | None = None, task_vars: dict[str, t.Any] | None = None
|
||||
) -> dict[str, t.Any]:
|
||||
self._supports_check_mode = True
|
||||
self._supports_async = True
|
||||
|
||||
result = super().run(tmp, task_vars)
|
||||
del tmp # tmp no longer has any effect
|
||||
|
||||
# pylint: disable-next=no-member
|
||||
max_file_size_for_diff: int = C.MAX_FILE_SIZE_FOR_DIFF # type: ignore
|
||||
self._task.args["_max_file_size_for_diff"] = max_file_size_for_diff
|
||||
|
||||
result = merge_hash(
|
||||
result,
|
||||
self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val),
|
||||
)
|
||||
|
||||
if "diff" in result and result["diff"].get("scrambled_diff"):
|
||||
# Scrambling is not done for security, but to avoid no_log screwing up the diff
|
||||
diff = result["diff"]
|
||||
key = base64.b64decode(diff.pop("scrambled_diff"))
|
||||
for k in ("before", "after"):
|
||||
if k in diff:
|
||||
diff[k] = unscramble(diff[k], key)
|
||||
|
||||
return result
|
||||
|
|
@ -0,0 +1,623 @@
|
|||
# Based on the chroot connection plugin by Maykel Moya
|
||||
#
|
||||
# (c) 2014, Lorin Hochstein
|
||||
# (c) 2015, Leendert Brouwer (https://github.com/objectified)
|
||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
author:
|
||||
- Lorin Hochestein (!UNKNOWN)
|
||||
- Leendert Brouwer (!UNKNOWN)
|
||||
name: docker
|
||||
short_description: Run tasks in docker containers
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses the Docker CLI to execute commands in the container. If you prefer to directly connect to the Docker daemon, use
|
||||
the P(community.docker.docker_api#connection) connection plugin.
|
||||
options:
|
||||
remote_addr:
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
remote_user:
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
- If Docker is too old to allow this (< 1.7), the one set by Docker itself will be used.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
docker_extra_args:
|
||||
description:
|
||||
- Extra arguments to pass to the docker command line.
|
||||
default: ''
|
||||
vars:
|
||||
- name: ansible_docker_extra_args
|
||||
ini:
|
||||
- section: docker_connection
|
||||
key: extra_cli_args
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
extra_env:
|
||||
description:
|
||||
- Provide extra environment variables to set when running commands in the Docker container.
|
||||
- This option can currently only be provided as Ansible variables due to limitations of ansible-core's configuration
|
||||
manager.
|
||||
vars:
|
||||
- name: ansible_docker_extra_env
|
||||
type: dict
|
||||
version_added: 3.12.0
|
||||
working_dir:
|
||||
description:
|
||||
- The directory inside the container to run commands in.
|
||||
- Requires Docker CLI version 18.06 or later.
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_WORKING_DIR
|
||||
ini:
|
||||
- key: working_dir
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_working_dir
|
||||
type: string
|
||||
version_added: 3.12.0
|
||||
privileged:
|
||||
description:
|
||||
- Whether commands should be run with extended privileges.
|
||||
- B(Note) that this allows command to potentially break out of the container. Use with care!
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_PRIVILEGED
|
||||
ini:
|
||||
- key: privileged
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_privileged
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 3.12.0
|
||||
"""
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import selectors
|
||||
import subprocess
|
||||
import typing as t
|
||||
from shlex import quote
|
||||
|
||||
from ansible.errors import AnsibleConnectionFailure, AnsibleError, AnsibleFileNotFound
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.plugins.connection import BUFSIZE, ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
"""Local docker based connections"""
|
||||
|
||||
transport = "community.docker.docker"
|
||||
has_pipelining = True
|
||||
|
||||
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
# Note: docker supports running as non-root in some configurations.
|
||||
# (For instance, setting the UNIX socket file to be readable and
|
||||
# writable by a specific UNIX group and then putting users into that
|
||||
# group). Therefore we do not check that the user is root when using
|
||||
# this connection. But if the user is getting a permission denied
|
||||
# error it probably means that docker on their system is only
|
||||
# configured to be connected to by root and they are not running as
|
||||
# root.
|
||||
|
||||
self._docker_args: list[bytes | str] = []
|
||||
self._container_user_cache: dict[str, str | None] = {}
|
||||
self._version: str | None = None
|
||||
self.remote_user: str | None = None
|
||||
self.timeout: int | float | None = None
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = (".ps1", ".exe", "")
|
||||
|
||||
if "docker_command" in kwargs:
|
||||
self.docker_cmd = kwargs["docker_command"]
|
||||
else:
|
||||
try:
|
||||
self.docker_cmd = get_bin_path("docker")
|
||||
except ValueError as exc:
|
||||
raise AnsibleError("docker command not found in PATH") from exc
|
||||
|
||||
@staticmethod
|
||||
def _sanitize_version(version: str) -> str:
|
||||
version = re.sub("[^0-9a-zA-Z.]", "", version)
|
||||
version = re.sub("^v", "", version)
|
||||
return version
|
||||
|
||||
def _old_docker_version(self) -> tuple[list[str], str, bytes, int]:
|
||||
cmd_args = self._docker_args
|
||||
|
||||
old_version_subcommand = ["version"]
|
||||
|
||||
old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
|
||||
with subprocess.Popen(
|
||||
old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
) as p:
|
||||
cmd_output, err = p.communicate()
|
||||
|
||||
return old_docker_cmd, to_text(cmd_output), err, p.returncode
|
||||
|
||||
def _new_docker_version(self) -> tuple[list[str], str, bytes, int]:
|
||||
# no result yet, must be newer Docker version
|
||||
cmd_args = self._docker_args
|
||||
|
||||
new_version_subcommand = ["version", "--format", "'{{.Server.Version}}'"]
|
||||
|
||||
new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
|
||||
with subprocess.Popen(
|
||||
new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
) as p:
|
||||
cmd_output, err = p.communicate()
|
||||
return new_docker_cmd, to_text(cmd_output), err, p.returncode
|
||||
|
||||
def _get_docker_version(self) -> str:
|
||||
cmd, cmd_output, err, returncode = self._old_docker_version()
|
||||
if returncode == 0:
|
||||
for line in to_text(cmd_output, errors="surrogate_or_strict").split("\n"):
|
||||
if line.startswith("Server version:"): # old docker versions
|
||||
return self._sanitize_version(line.split()[2])
|
||||
|
||||
cmd, cmd_output, err, returncode = self._new_docker_version()
|
||||
if returncode:
|
||||
raise AnsibleError(
|
||||
f"Docker version check ({to_text(cmd)}) failed: {to_text(err)}"
|
||||
)
|
||||
|
||||
return self._sanitize_version(to_text(cmd_output, errors="surrogate_or_strict"))
|
||||
|
||||
def _get_docker_remote_user(self) -> str | None:
|
||||
"""Get the default user configured in the docker container"""
|
||||
container = self.get_option("remote_addr")
|
||||
if container in self._container_user_cache:
|
||||
return self._container_user_cache[container]
|
||||
with subprocess.Popen(
|
||||
[self.docker_cmd, "inspect", "--format", "{{.Config.User}}", container],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
) as p:
|
||||
out_b, err_b = p.communicate()
|
||||
out = to_text(out_b, errors="surrogate_or_strict")
|
||||
|
||||
if p.returncode != 0:
|
||||
display.warning(
|
||||
f"unable to retrieve default user from docker container: {out} {to_text(err_b)}"
|
||||
)
|
||||
self._container_user_cache[container] = None
|
||||
return None
|
||||
|
||||
# The default exec user is root, unless it was changed in the Dockerfile with USER
|
||||
user = out.strip() or "root"
|
||||
self._container_user_cache[container] = user
|
||||
return user
|
||||
|
||||
def _build_exec_cmd(self, cmd: list[bytes | str]) -> list[bytes | str]:
|
||||
"""Build the local docker exec command to run cmd on remote_host
|
||||
|
||||
If remote_user is available and is supported by the docker
|
||||
version we are using, it will be provided to docker exec.
|
||||
"""
|
||||
|
||||
local_cmd = [self.docker_cmd]
|
||||
|
||||
if self._docker_args:
|
||||
local_cmd += self._docker_args
|
||||
|
||||
local_cmd += [b"exec"]
|
||||
|
||||
if self.remote_user is not None:
|
||||
local_cmd += [b"-u", self.remote_user]
|
||||
|
||||
if self.get_option("extra_env"):
|
||||
for k, v in self.get_option("extra_env").items():
|
||||
for val, what in ((k, "Key"), (v, "Value")):
|
||||
if not isinstance(val, str):
|
||||
raise AnsibleConnectionFailure(
|
||||
f"Non-string {what.lower()} found for extra_env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"{what}: {val!r}"
|
||||
)
|
||||
local_cmd += [
|
||||
b"-e",
|
||||
b"%s=%s"
|
||||
% (
|
||||
to_bytes(k, errors="surrogate_or_strict"),
|
||||
to_bytes(v, errors="surrogate_or_strict"),
|
||||
),
|
||||
]
|
||||
|
||||
if self.get_option("working_dir") is not None:
|
||||
local_cmd += [
|
||||
b"-w",
|
||||
to_bytes(self.get_option("working_dir"), errors="surrogate_or_strict"),
|
||||
]
|
||||
if self.docker_version != "dev" and LooseVersion(
|
||||
self.docker_version
|
||||
) < LooseVersion("18.06"):
|
||||
# https://github.com/docker/cli/pull/732, first appeared in release 18.06.0
|
||||
raise AnsibleConnectionFailure(
|
||||
f"Providing the working directory requires Docker CLI version 18.06 or newer. You have Docker CLI version {self.docker_version}."
|
||||
)
|
||||
|
||||
if self.get_option("privileged"):
|
||||
local_cmd += [b"--privileged"]
|
||||
|
||||
# -i is needed to keep stdin open which allows pipelining to work
|
||||
local_cmd += [b"-i", self.get_option("remote_addr")] + cmd
|
||||
|
||||
return local_cmd
|
||||
|
||||
def _set_docker_args(self) -> None:
|
||||
# TODO: this is mostly for backwards compatibility, play_context is used as fallback for older versions
|
||||
# docker arguments
|
||||
del self._docker_args[:]
|
||||
extra_args = self.get_option("docker_extra_args") or getattr(
|
||||
self._play_context, "docker_extra_args", ""
|
||||
)
|
||||
if extra_args:
|
||||
self._docker_args += extra_args.split(" ")
|
||||
|
||||
def _set_conn_data(self) -> None:
|
||||
"""initialize for the connection, cannot do only in init since all data is not ready at that point"""
|
||||
|
||||
self._set_docker_args()
|
||||
|
||||
self.remote_user = self.get_option("remote_user")
|
||||
if self.remote_user is None and self._play_context.remote_user is not None:
|
||||
self.remote_user = self._play_context.remote_user
|
||||
|
||||
# timeout, use unless default and pc is different, backwards compat
|
||||
self.timeout = self.get_option("container_timeout")
|
||||
if self.timeout == 10 and self.timeout != self._play_context.timeout:
|
||||
self.timeout = self._play_context.timeout
|
||||
|
||||
@property
|
||||
def docker_version(self) -> str:
|
||||
if not self._version:
|
||||
self._set_docker_args()
|
||||
|
||||
self._version = self._get_docker_version()
|
||||
if self._version == "dev":
|
||||
display.warning(
|
||||
'Docker version number is "dev". Will assume latest version.'
|
||||
)
|
||||
if self._version != "dev" and LooseVersion(self._version) < LooseVersion(
|
||||
"1.3"
|
||||
):
|
||||
raise AnsibleError(
|
||||
"docker connection type requires docker 1.3 or higher"
|
||||
)
|
||||
return self._version
|
||||
|
||||
def _get_actual_user(self) -> str | None:
|
||||
if self.remote_user is not None:
|
||||
# An explicit user is provided
|
||||
if self.docker_version == "dev" or LooseVersion(
|
||||
self.docker_version
|
||||
) >= LooseVersion("1.7"):
|
||||
# Support for specifying the exec user was added in docker 1.7
|
||||
return self.remote_user
|
||||
self.remote_user = None
|
||||
actual_user = self._get_docker_remote_user()
|
||||
if actual_user != self.get_option("remote_user"):
|
||||
display.warning(
|
||||
f"docker {self.docker_version} does not support remote_user, using container default: {actual_user or '?'}"
|
||||
)
|
||||
return actual_user
|
||||
if self._display.verbosity > 2:
|
||||
# Since we are not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we do not need to.
|
||||
return self._get_docker_remote_user()
|
||||
return None
|
||||
|
||||
def _connect(self) -> t.Self:
|
||||
"""Connect to the container. Nothing to do"""
|
||||
super()._connect() # type: ignore[safe-super]
|
||||
if not self._connected:
|
||||
self._set_conn_data()
|
||||
actual_user = self._get_actual_user()
|
||||
display.vvv(
|
||||
f"ESTABLISH DOCKER CONNECTION FOR USER: {actual_user or '?'}",
|
||||
host=self.get_option("remote_addr"),
|
||||
)
|
||||
self._connected = True
|
||||
return self
|
||||
|
||||
def exec_command(
|
||||
self, cmd: str, in_data: bytes | None = None, sudoable: bool = False
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
"""Run a command on the docker host"""
|
||||
|
||||
self._set_conn_data()
|
||||
|
||||
super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # type: ignore[safe-super]
|
||||
|
||||
local_cmd = self._build_exec_cmd([self._play_context.executable, "-c", cmd])
|
||||
|
||||
display.vvv(f"EXEC {to_text(local_cmd)}", host=self.get_option("remote_addr"))
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
local_cmd = [to_bytes(i, errors="surrogate_or_strict") for i in local_cmd]
|
||||
|
||||
with subprocess.Popen(
|
||||
local_cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
) as p:
|
||||
assert p.stdin is not None
|
||||
assert p.stdout is not None
|
||||
assert p.stderr is not None
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(
|
||||
p.stdout,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stderr,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||
)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b""
|
||||
try:
|
||||
while not self.become.check_success(
|
||||
become_output
|
||||
) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError(
|
||||
"timeout waiting for privilege escalation password prompt:\n"
|
||||
+ to_text(become_output)
|
||||
)
|
||||
|
||||
chunks = b""
|
||||
for key, dummy_event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
if chunk:
|
||||
chunks += chunk
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
if chunk:
|
||||
chunks += chunk
|
||||
|
||||
if not chunks:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError(
|
||||
"privilege output closed while waiting for password prompt:\n"
|
||||
+ to_text(become_output)
|
||||
)
|
||||
become_output += chunks
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option(
|
||||
"become_pass", playcontext=self._play_context
|
||||
)
|
||||
p.stdin.write(
|
||||
to_bytes(become_pass, errors="surrogate_or_strict") + b"\n"
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stdout,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stderr,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||
)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
display.debug("done with docker.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path: str) -> str:
|
||||
"""Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we are not guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we are choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it is a problem
|
||||
"""
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
|
||||
return ntpath.normpath(remote_path)
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path: str, out_path: str) -> None:
|
||||
"""Transfer a file from local to docker container"""
|
||||
self._set_conn_data()
|
||||
super().put_file(in_path, out_path) # type: ignore[safe-super]
|
||||
display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option("remote_addr"))
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
if not os.path.exists(to_bytes(in_path, errors="surrogate_or_strict")):
|
||||
raise AnsibleFileNotFound(
|
||||
f"file or module does not exist: {to_text(in_path)}"
|
||||
)
|
||||
|
||||
out_path = quote(out_path)
|
||||
# Older docker does not have native support for copying files into
|
||||
# running containers, so we use docker exec to implement this
|
||||
# Although docker version 1.8 and later provide support, the
|
||||
# owner and group of the files are always set to root
|
||||
with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
|
||||
if not os.fstat(in_file.fileno()).st_size:
|
||||
count = " count=0"
|
||||
else:
|
||||
count = ""
|
||||
args = self._build_exec_cmd(
|
||||
[
|
||||
self._play_context.executable,
|
||||
"-c",
|
||||
f"dd of={out_path} bs={BUFSIZE}{count}",
|
||||
]
|
||||
)
|
||||
args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
|
||||
try:
|
||||
# pylint: disable-next=consider-using-with
|
||||
p = subprocess.Popen(
|
||||
args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
)
|
||||
except OSError as exc:
|
||||
raise AnsibleError(
|
||||
"docker connection requires dd command in the container to put files"
|
||||
) from exc
|
||||
stdout, stderr = p.communicate()
|
||||
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError(
|
||||
f"failed to transfer file {to_text(in_path)} to {to_text(out_path)}:\n{to_text(stdout)}\n{to_text(stderr)}"
|
||||
)
|
||||
|
||||
def fetch_file(self, in_path: str, out_path: str) -> None:
|
||||
"""Fetch a file from container to local."""
|
||||
self._set_conn_data()
|
||||
super().fetch_file(in_path, out_path) # type: ignore[safe-super]
|
||||
display.vvv(
|
||||
f"FETCH {in_path} TO {out_path}", host=self.get_option("remote_addr")
|
||||
)
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
# out_path is the final file path, but docker takes a directory, not a
|
||||
# file path
|
||||
out_dir = os.path.dirname(out_path)
|
||||
|
||||
args = [
|
||||
self.docker_cmd,
|
||||
"cp",
|
||||
f"{self.get_option('remote_addr')}:{in_path}",
|
||||
out_dir,
|
||||
]
|
||||
args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
|
||||
|
||||
with subprocess.Popen(
|
||||
args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
|
||||
) as p:
|
||||
p.communicate()
|
||||
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
|
||||
actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
|
||||
else:
|
||||
actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
|
||||
|
||||
if p.returncode != 0:
|
||||
# Older docker does not have native support for fetching files command `cp`
|
||||
# If `cp` fails, try to use `dd` instead
|
||||
args = self._build_exec_cmd(
|
||||
[
|
||||
self._play_context.executable,
|
||||
"-c",
|
||||
f"dd if={in_path} bs={BUFSIZE}",
|
||||
]
|
||||
)
|
||||
args = [to_bytes(i, errors="surrogate_or_strict") for i in args]
|
||||
with open(
|
||||
to_bytes(actual_out_path, errors="surrogate_or_strict"), "wb"
|
||||
) as out_file:
|
||||
try:
|
||||
# pylint: disable-next=consider-using-with
|
||||
pp = subprocess.Popen(
|
||||
args,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=out_file,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
except OSError as exc:
|
||||
raise AnsibleError(
|
||||
"docker connection requires dd command in the container to put files"
|
||||
) from exc
|
||||
stdout, stderr = pp.communicate()
|
||||
|
||||
if pp.returncode != 0:
|
||||
raise AnsibleError(
|
||||
f"failed to fetch file {in_path} to {out_path}:\n{stdout!r}\n{stderr!r}"
|
||||
)
|
||||
|
||||
# Rename if needed
|
||||
if actual_out_path != out_path:
|
||||
os.rename(
|
||||
to_bytes(actual_out_path, errors="strict"),
|
||||
to_bytes(out_path, errors="strict"),
|
||||
)
|
||||
|
||||
def close(self) -> None:
|
||||
"""Terminate the connection. Nothing to do for Docker"""
|
||||
super().close() # type: ignore[safe-super]
|
||||
self._connected = False
|
||||
|
||||
def reset(self) -> None:
|
||||
# Clear container user cache
|
||||
self._container_user_cache = {}
|
||||
|
|
@ -0,0 +1,479 @@
|
|||
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
name: docker_api
|
||||
short_description: Run tasks in docker containers
|
||||
version_added: 1.1.0
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing docker container.
|
||||
- Uses the L(requests library,https://pypi.org/project/requests/) to interact directly with the Docker daemon instead of
|
||||
using the Docker CLI. Use the P(community.docker.docker#connection) connection plugin if you want to use the Docker CLI.
|
||||
notes:
|
||||
- Does B(not work with TCP TLS sockets)! This is caused by the inability to send C(close_notify) without closing the connection
|
||||
with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._docker.var_names
|
||||
options:
|
||||
remote_user:
|
||||
type: str
|
||||
description:
|
||||
- The user to execute as inside the container.
|
||||
vars:
|
||||
- name: ansible_user
|
||||
- name: ansible_docker_user
|
||||
ini:
|
||||
- section: defaults
|
||||
key: remote_user
|
||||
env:
|
||||
- name: ANSIBLE_REMOTE_USER
|
||||
cli:
|
||||
- name: user
|
||||
keyword:
|
||||
- name: remote_user
|
||||
remote_addr:
|
||||
type: str
|
||||
description:
|
||||
- The name of the container you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: inventory_hostname
|
||||
- name: ansible_host
|
||||
- name: ansible_docker_host
|
||||
container_timeout:
|
||||
default: 10
|
||||
description:
|
||||
- Controls how long we can wait to access reading output from the container once execution started.
|
||||
env:
|
||||
- name: ANSIBLE_TIMEOUT
|
||||
- name: ANSIBLE_DOCKER_TIMEOUT
|
||||
version_added: 2.2.0
|
||||
ini:
|
||||
- key: timeout
|
||||
section: defaults
|
||||
- key: timeout
|
||||
section: docker_connection
|
||||
version_added: 2.2.0
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
version_added: 2.2.0
|
||||
cli:
|
||||
- name: timeout
|
||||
type: integer
|
||||
extra_env:
|
||||
description:
|
||||
- Provide extra environment variables to set when running commands in the Docker container.
|
||||
- This option can currently only be provided as Ansible variables due to limitations of ansible-core's configuration
|
||||
manager.
|
||||
vars:
|
||||
- name: ansible_docker_extra_env
|
||||
type: dict
|
||||
version_added: 3.12.0
|
||||
working_dir:
|
||||
description:
|
||||
- The directory inside the container to run commands in.
|
||||
- Requires Docker API version 1.35 or later.
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_WORKING_DIR
|
||||
ini:
|
||||
- key: working_dir
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_working_dir
|
||||
type: string
|
||||
version_added: 3.12.0
|
||||
privileged:
|
||||
description:
|
||||
- Whether commands should be run with extended privileges.
|
||||
- B(Note) that this allows command to potentially break out of the container. Use with care!
|
||||
env:
|
||||
- name: ANSIBLE_DOCKER_PRIVILEGED
|
||||
ini:
|
||||
- key: privileged
|
||||
section: docker_connection
|
||||
vars:
|
||||
- name: ansible_docker_privileged
|
||||
type: boolean
|
||||
default: false
|
||||
version_added: 3.12.0
|
||||
"""
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleConnectionFailure, AnsibleFileNotFound
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
DockerException,
|
||||
NotFound,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._copy import (
|
||||
DockerFileCopyError,
|
||||
DockerFileNotFound,
|
||||
fetch_file,
|
||||
put_file,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._socket_handler import (
|
||||
DockerSocketHandler,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
_T = t.TypeVar("_T")
|
||||
|
||||
|
||||
MIN_DOCKER_API = None
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
"""Local docker based connections"""
|
||||
|
||||
transport = "community.docker.docker_api"
|
||||
has_pipelining = True
|
||||
|
||||
def _call_client(
|
||||
self,
|
||||
f: Callable[[AnsibleDockerClient], _T],
|
||||
not_found_can_be_resource: bool = False,
|
||||
) -> _T:
|
||||
if self.client is None:
|
||||
raise AssertionError("Client must be present")
|
||||
remote_addr = self.get_option("remote_addr")
|
||||
try:
|
||||
return f(self.client)
|
||||
except NotFound as e:
|
||||
if not_found_can_be_resource:
|
||||
raise AnsibleConnectionFailure(
|
||||
f'Could not find container "{remote_addr}" or resource in it ({e})'
|
||||
) from e
|
||||
raise AnsibleConnectionFailure(
|
||||
f'Could not find container "{remote_addr}" ({e})'
|
||||
) from e
|
||||
except APIError as e:
|
||||
if e.response is not None and e.response.status_code == 409:
|
||||
raise AnsibleConnectionFailure(
|
||||
f'The container "{remote_addr}" has been paused ({e})'
|
||||
) from e
|
||||
self.client.fail(
|
||||
f'An unexpected Docker error occurred for container "{remote_addr}": {e}'
|
||||
)
|
||||
except DockerException as e:
|
||||
self.client.fail(
|
||||
f'An unexpected Docker error occurred for container "{remote_addr}": {e}'
|
||||
)
|
||||
except RequestException as e:
|
||||
self.client.fail(
|
||||
f'An unexpected requests error occurred for container "{remote_addr}" when trying to talk to the Docker daemon: {e}'
|
||||
)
|
||||
|
||||
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.client: AnsibleDockerClient | None = None
|
||||
self.ids: dict[str | None, tuple[int, int]] = {}
|
||||
|
||||
# Windows uses Powershell modules
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
self.module_implementation_preferences = (".ps1", ".exe", "")
|
||||
|
||||
self.actual_user: str | None = None
|
||||
|
||||
def _connect(self) -> Connection:
|
||||
"""Connect to the container. Nothing to do"""
|
||||
super()._connect() # type: ignore[safe-super]
|
||||
if not self._connected:
|
||||
self.actual_user = self.get_option("remote_user")
|
||||
display.vvv(
|
||||
f"ESTABLISH DOCKER CONNECTION FOR USER: {self.actual_user or '?'}",
|
||||
host=self.get_option("remote_addr"),
|
||||
)
|
||||
if self.client is None:
|
||||
self.client = AnsibleDockerClient(
|
||||
self, min_docker_api_version=MIN_DOCKER_API
|
||||
)
|
||||
self._connected = True
|
||||
|
||||
if self.actual_user is None and display.verbosity > 2:
|
||||
# Since we are not setting the actual_user, look it up so we have it for logging later
|
||||
# Only do this if display verbosity is high enough that we'll need the value
|
||||
# This saves overhead from calling into docker when we do not need to
|
||||
display.vvv("Trying to determine actual user")
|
||||
result = self._call_client(
|
||||
lambda client: client.get_json(
|
||||
"/containers/{0}/json", self.get_option("remote_addr")
|
||||
)
|
||||
)
|
||||
if result.get("Config"):
|
||||
self.actual_user = result["Config"].get("User")
|
||||
if self.actual_user is not None:
|
||||
display.vvv(f"Actual user is '{self.actual_user}'")
|
||||
|
||||
return self
|
||||
|
||||
def exec_command(
|
||||
self, cmd: str, in_data: bytes | None = None, sudoable: bool = False
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
"""Run a command on the docker host"""
|
||||
|
||||
super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # type: ignore[safe-super]
|
||||
|
||||
if self.client is None:
|
||||
raise AssertionError("Client must be present")
|
||||
|
||||
command = [self._play_context.executable, "-c", cmd]
|
||||
|
||||
do_become = self.become and self.become.expect_prompt() and sudoable
|
||||
|
||||
stdin_part = (
|
||||
f", with stdin ({len(in_data)} bytes)" if in_data is not None else ""
|
||||
)
|
||||
become_part = ", with become prompt" if do_become else ""
|
||||
display.vvv(
|
||||
f"EXEC {to_text(command)}{stdin_part}{become_part}",
|
||||
host=self.get_option("remote_addr"),
|
||||
)
|
||||
|
||||
need_stdin = bool((in_data is not None) or do_become)
|
||||
|
||||
data = {
|
||||
"Container": self.get_option("remote_addr"),
|
||||
"User": self.get_option("remote_user") or "",
|
||||
"Privileged": self.get_option("privileged"),
|
||||
"Tty": False,
|
||||
"AttachStdin": need_stdin,
|
||||
"AttachStdout": True,
|
||||
"AttachStderr": True,
|
||||
"Cmd": command,
|
||||
}
|
||||
|
||||
if "detachKeys" in self.client._general_configs:
|
||||
data["detachKeys"] = self.client._general_configs["detachKeys"]
|
||||
|
||||
if self.get_option("extra_env"):
|
||||
data["Env"] = []
|
||||
for k, v in self.get_option("extra_env").items():
|
||||
for val, what in ((k, "Key"), (v, "Value")):
|
||||
if not isinstance(val, str):
|
||||
raise AnsibleConnectionFailure(
|
||||
f"Non-string {what.lower()} found for extra_env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"{what}: {val!r}"
|
||||
)
|
||||
data["Env"].append(f"{k}={v}")
|
||||
|
||||
if self.get_option("working_dir") is not None:
|
||||
data["WorkingDir"] = self.get_option("working_dir")
|
||||
if self.client.docker_api_version < LooseVersion("1.35"):
|
||||
raise AnsibleConnectionFailure(
|
||||
"Providing the working directory requires Docker API version 1.35 or newer."
|
||||
f" The Docker daemon the connection is using has API version {self.client.docker_api_version_str}."
|
||||
)
|
||||
|
||||
exec_data = self._call_client(
|
||||
lambda client: client.post_json_to_json(
|
||||
"/containers/{0}/exec", self.get_option("remote_addr"), data=data
|
||||
)
|
||||
)
|
||||
exec_id = exec_data["Id"]
|
||||
|
||||
data = {"Tty": False, "Detach": False}
|
||||
if need_stdin:
|
||||
exec_socket = self._call_client(
|
||||
lambda client: client.post_json_to_stream_socket(
|
||||
"/exec/{0}/start", exec_id, data=data
|
||||
)
|
||||
)
|
||||
try:
|
||||
with DockerSocketHandler(
|
||||
display, exec_socket, container=self.get_option("remote_addr")
|
||||
) as exec_socket_handler:
|
||||
if do_become:
|
||||
assert self.become is not None
|
||||
|
||||
become_output = [b""]
|
||||
|
||||
def append_become_output(stream_id: int, data: bytes) -> None:
|
||||
become_output[0] += data
|
||||
|
||||
exec_socket_handler.set_block_done_callback(
|
||||
append_become_output
|
||||
)
|
||||
|
||||
while not self.become.check_success(
|
||||
become_output[0]
|
||||
) and not self.become.check_password_prompt(become_output[0]):
|
||||
if not exec_socket_handler.select(
|
||||
self.get_option("container_timeout")
|
||||
):
|
||||
stdout, stderr = exec_socket_handler.consume()
|
||||
raise AnsibleConnectionFailure(
|
||||
"timeout waiting for privilege escalation password prompt:\n"
|
||||
+ to_text(become_output[0])
|
||||
)
|
||||
|
||||
if exec_socket_handler.is_eof():
|
||||
raise AnsibleConnectionFailure(
|
||||
"privilege output closed while waiting for password prompt:\n"
|
||||
+ to_text(become_output[0])
|
||||
)
|
||||
|
||||
if not self.become.check_success(become_output[0]):
|
||||
become_pass = self.become.get_option(
|
||||
"become_pass", playcontext=self._play_context
|
||||
)
|
||||
exec_socket_handler.write(
|
||||
to_bytes(become_pass, errors="surrogate_or_strict")
|
||||
+ b"\n"
|
||||
)
|
||||
|
||||
if in_data is not None:
|
||||
exec_socket_handler.write(in_data)
|
||||
|
||||
stdout, stderr = exec_socket_handler.consume()
|
||||
finally:
|
||||
exec_socket.close()
|
||||
else:
|
||||
stdout, stderr = self._call_client(
|
||||
lambda client: client.post_json_to_stream(
|
||||
"/exec/{0}/start",
|
||||
exec_id,
|
||||
stream=False,
|
||||
demux=True,
|
||||
tty=False,
|
||||
data=data,
|
||||
)
|
||||
)
|
||||
|
||||
result = self._call_client(
|
||||
lambda client: client.get_json("/exec/{0}/json", exec_id)
|
||||
)
|
||||
|
||||
return result.get("ExitCode") or 0, stdout or b"", stderr or b""
|
||||
|
||||
def _prefix_login_path(self, remote_path: str) -> str:
|
||||
"""Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we are not guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we are choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it is a problem
|
||||
"""
|
||||
if getattr(self._shell, "_IS_WINDOWS", False):
|
||||
import ntpath
|
||||
|
||||
return ntpath.normpath(remote_path)
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path: str, out_path: str) -> None:
|
||||
"""Transfer a file from local to docker container"""
|
||||
super().put_file(in_path, out_path) # type: ignore[safe-super]
|
||||
display.vvv(f"PUT {in_path} TO {out_path}", host=self.get_option("remote_addr"))
|
||||
|
||||
if self.client is None:
|
||||
raise AssertionError("Client must be present")
|
||||
|
||||
out_path = self._prefix_login_path(out_path)
|
||||
|
||||
if self.actual_user not in self.ids:
|
||||
dummy, ids, dummy2 = self.exec_command("id -u && id -g")
|
||||
remote_addr = self.get_option("remote_addr")
|
||||
try:
|
||||
b_user_id, b_group_id = ids.splitlines()
|
||||
user_id, group_id = int(b_user_id), int(b_group_id)
|
||||
self.ids[self.actual_user] = user_id, group_id
|
||||
display.vvvv(
|
||||
f'PUT: Determined uid={user_id} and gid={group_id} for user "{self.actual_user}"',
|
||||
host=remote_addr,
|
||||
)
|
||||
except Exception as e:
|
||||
raise AnsibleConnectionFailure(
|
||||
f'Error while determining user and group ID of current user in container "{remote_addr}": {e}\nGot value: {ids!r}'
|
||||
) from e
|
||||
|
||||
user_id, group_id = self.ids[self.actual_user]
|
||||
try:
|
||||
self._call_client(
|
||||
lambda client: put_file(
|
||||
client,
|
||||
container=self.get_option("remote_addr"),
|
||||
in_path=in_path,
|
||||
out_path=out_path,
|
||||
user_id=user_id,
|
||||
group_id=group_id,
|
||||
user_name=self.actual_user,
|
||||
follow_links=True,
|
||||
),
|
||||
not_found_can_be_resource=True,
|
||||
)
|
||||
except DockerFileNotFound as exc:
|
||||
raise AnsibleFileNotFound(to_text(exc)) from exc
|
||||
except DockerFileCopyError as exc:
|
||||
raise AnsibleConnectionFailure(to_text(exc)) from exc
|
||||
|
||||
def fetch_file(self, in_path: str, out_path: str) -> None:
|
||||
"""Fetch a file from container to local."""
|
||||
super().fetch_file(in_path, out_path) # type: ignore[safe-super]
|
||||
display.vvv(
|
||||
f"FETCH {in_path} TO {out_path}", host=self.get_option("remote_addr")
|
||||
)
|
||||
|
||||
if self.client is None:
|
||||
raise AssertionError("Client must be present")
|
||||
|
||||
in_path = self._prefix_login_path(in_path)
|
||||
|
||||
try:
|
||||
self._call_client(
|
||||
lambda client: fetch_file(
|
||||
client,
|
||||
container=self.get_option("remote_addr"),
|
||||
in_path=in_path,
|
||||
out_path=out_path,
|
||||
follow_links=True,
|
||||
log=lambda msg: display.vvvv(
|
||||
msg, host=self.get_option("remote_addr")
|
||||
),
|
||||
),
|
||||
not_found_can_be_resource=True,
|
||||
)
|
||||
except DockerFileNotFound as exc:
|
||||
raise AnsibleFileNotFound(to_text(exc)) from exc
|
||||
except DockerFileCopyError as exc:
|
||||
raise AnsibleConnectionFailure(to_text(exc)) from exc
|
||||
|
||||
def close(self) -> None:
|
||||
"""Terminate the connection. Nothing to do for Docker"""
|
||||
super().close() # type: ignore[safe-super]
|
||||
self._connected = False
|
||||
|
||||
def reset(self) -> None:
|
||||
self.ids.clear()
|
||||
|
|
@ -0,0 +1,286 @@
|
|||
# Copyright (c) 2021 Jeff Goldschrafe <jeff@holyhandgrenade.org>
|
||||
# Based on Ansible local connection plugin by:
|
||||
# Copyright (c) 2012 Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Copyright (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: nsenter
|
||||
short_description: execute on host running controller container
|
||||
version_added: 1.9.0
|
||||
description:
|
||||
- This connection plugin allows Ansible, running in a privileged container, to execute tasks on the container host instead
|
||||
of in the container itself.
|
||||
- This is useful for running Ansible in a pull model, while still keeping the Ansible control node containerized.
|
||||
- It relies on having privileged access to run C(nsenter) in the host's PID namespace, allowing it to enter the namespaces
|
||||
of the provided PID (default PID 1, or init/systemd).
|
||||
author: Jeff Goldschrafe (@jgoldschrafe)
|
||||
options:
|
||||
nsenter_pid:
|
||||
description:
|
||||
- PID to attach with using nsenter.
|
||||
- The default should be fine unless you are attaching as a non-root user.
|
||||
type: int
|
||||
default: 1
|
||||
vars:
|
||||
- name: ansible_nsenter_pid
|
||||
env:
|
||||
- name: ANSIBLE_NSENTER_PID
|
||||
ini:
|
||||
- section: nsenter_connection
|
||||
key: nsenter_pid
|
||||
notes:
|
||||
- The remote user is ignored; this plugin always runs as root.
|
||||
- "This plugin requires the Ansible controller container to be launched in the following way: (1) The container image contains
|
||||
the C(nsenter) program; (2) The container is launched in privileged mode; (3) The container is launched in the host's
|
||||
PID namespace (C(--pid host))."
|
||||
"""
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import pty
|
||||
import selectors
|
||||
import shlex
|
||||
import subprocess
|
||||
import typing as t
|
||||
|
||||
import ansible.constants as C
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
from ansible.plugins.connection import ConnectionBase
|
||||
from ansible.utils.display import Display
|
||||
from ansible.utils.path import unfrackpath
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
"""Connections to a container host using nsenter"""
|
||||
|
||||
transport = "community.docker.nsenter"
|
||||
has_pipelining = False
|
||||
|
||||
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
super().__init__(*args, **kwargs)
|
||||
self.cwd = None
|
||||
self._nsenter_pid = None
|
||||
|
||||
def _connect(self) -> t.Self:
|
||||
self._nsenter_pid = self.get_option("nsenter_pid")
|
||||
|
||||
# Because nsenter requires very high privileges, our remote user
|
||||
# is always assumed to be root.
|
||||
self._play_context.remote_user = "root"
|
||||
|
||||
if not self._connected:
|
||||
display.vvv(
|
||||
f"ESTABLISH NSENTER CONNECTION FOR USER: {self._play_context.remote_user}",
|
||||
host=self._play_context.remote_addr,
|
||||
)
|
||||
self._connected = True
|
||||
return self
|
||||
|
||||
def exec_command(
|
||||
self, cmd: str, in_data: bytes | None = None, sudoable: bool = True
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
super().exec_command(cmd, in_data=in_data, sudoable=sudoable) # type: ignore[safe-super]
|
||||
|
||||
display.debug("in nsenter.exec_command()")
|
||||
|
||||
# pylint: disable-next=no-member
|
||||
def_executable: str | None = C.DEFAULT_EXECUTABLE # type: ignore[attr-defined]
|
||||
executable = def_executable.split()[0] if def_executable else None
|
||||
|
||||
if not os.path.exists(to_bytes(executable, errors="surrogate_or_strict")):
|
||||
raise AnsibleError(
|
||||
f"failed to find the executable specified {executable}."
|
||||
" Please verify if the executable exists and re-try."
|
||||
)
|
||||
|
||||
# Rewrite the provided command to prefix it with nsenter
|
||||
nsenter_cmd_parts = [
|
||||
"nsenter",
|
||||
"--ipc",
|
||||
"--mount",
|
||||
"--net",
|
||||
"--pid",
|
||||
"--uts",
|
||||
"--preserve-credentials",
|
||||
f"--target={self._nsenter_pid}",
|
||||
"--",
|
||||
]
|
||||
|
||||
cmd_parts = nsenter_cmd_parts + [cmd]
|
||||
cmd_b = to_bytes(" ".join(cmd_parts))
|
||||
|
||||
display.vvv(f"EXEC {to_text(cmd_b)}", host=self._play_context.remote_addr)
|
||||
display.debug("opening command with Popen()")
|
||||
|
||||
master = None
|
||||
stdin = subprocess.PIPE
|
||||
|
||||
# This plugin does not support pipelining. This diverges from the behavior of
|
||||
# the core "local" connection plugin that this one derives from.
|
||||
if sudoable and self.become and self.become.expect_prompt():
|
||||
# Create a pty if sudoable for privilege escalation that needs it.
|
||||
# Falls back to using a standard pipe if this fails, which may
|
||||
# cause the command to fail in certain situations where we are escalating
|
||||
# privileges or the command otherwise needs a pty.
|
||||
try:
|
||||
master, stdin = pty.openpty()
|
||||
except (IOError, OSError) as e:
|
||||
display.debug(f"Unable to open pty: {e}")
|
||||
|
||||
with subprocess.Popen(
|
||||
cmd_b,
|
||||
shell=True,
|
||||
executable=executable,
|
||||
cwd=self.cwd,
|
||||
stdin=stdin,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
) as p:
|
||||
assert p.stderr is not None
|
||||
assert p.stdin is not None
|
||||
assert p.stdout is not None
|
||||
# if we created a master, we can close the other half of the pty now, otherwise master is stdin
|
||||
if master is not None:
|
||||
os.close(stdin)
|
||||
|
||||
display.debug("done running command with Popen()")
|
||||
|
||||
if self.become and self.become.expect_prompt() and sudoable:
|
||||
fcntl.fcntl(
|
||||
p.stdout,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stderr,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||
)
|
||||
selector = selectors.DefaultSelector()
|
||||
selector.register(p.stdout, selectors.EVENT_READ)
|
||||
selector.register(p.stderr, selectors.EVENT_READ)
|
||||
|
||||
become_output = b""
|
||||
try:
|
||||
while not self.become.check_success(
|
||||
become_output
|
||||
) and not self.become.check_password_prompt(become_output):
|
||||
events = selector.select(self._play_context.timeout)
|
||||
if not events:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError(
|
||||
"timeout waiting for privilege escalation password prompt:\n"
|
||||
+ to_text(become_output)
|
||||
)
|
||||
|
||||
chunks = b""
|
||||
for key, dummy_event in events:
|
||||
if key.fileobj == p.stdout:
|
||||
chunk = p.stdout.read()
|
||||
if chunk:
|
||||
chunks += chunk
|
||||
elif key.fileobj == p.stderr:
|
||||
chunk = p.stderr.read()
|
||||
if chunk:
|
||||
chunks += chunk
|
||||
|
||||
if not chunks:
|
||||
stdout, stderr = p.communicate()
|
||||
raise AnsibleError(
|
||||
"privilege output closed while waiting for password prompt:\n"
|
||||
+ to_text(become_output)
|
||||
)
|
||||
become_output += chunks
|
||||
finally:
|
||||
selector.close()
|
||||
|
||||
if not self.become.check_success(become_output):
|
||||
become_pass = self.become.get_option(
|
||||
"become_pass", playcontext=self._play_context
|
||||
)
|
||||
if master is None:
|
||||
p.stdin.write(
|
||||
to_bytes(become_pass, errors="surrogate_or_strict") + b"\n"
|
||||
)
|
||||
else:
|
||||
os.write(
|
||||
master,
|
||||
to_bytes(become_pass, errors="surrogate_or_strict") + b"\n",
|
||||
)
|
||||
|
||||
fcntl.fcntl(
|
||||
p.stdout,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||
)
|
||||
fcntl.fcntl(
|
||||
p.stderr,
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||
)
|
||||
|
||||
display.debug("getting output with communicate()")
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
display.debug("done communicating")
|
||||
|
||||
# finally, close the other half of the pty, if it was created
|
||||
if master:
|
||||
os.close(master)
|
||||
|
||||
display.debug("done with nsenter.exec_command()")
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def put_file(self, in_path: str, out_path: str) -> None:
|
||||
super().put_file(in_path, out_path) # type: ignore[safe-super]
|
||||
|
||||
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||
|
||||
display.vvv(f"PUT {in_path} to {out_path}", host=self._play_context.remote_addr)
|
||||
try:
|
||||
with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
|
||||
in_data = in_file.read()
|
||||
rc, dummy_out, err = self.exec_command(
|
||||
cmd=f"tee {shlex.quote(out_path)}", in_data=in_data
|
||||
)
|
||||
if rc != 0:
|
||||
raise AnsibleError(
|
||||
f"failed to transfer file to {out_path}: {to_text(err)}"
|
||||
)
|
||||
except IOError as e:
|
||||
raise AnsibleError(f"failed to transfer file to {out_path}: {e}") from e
|
||||
|
||||
def fetch_file(self, in_path: str, out_path: str) -> None:
|
||||
super().fetch_file(in_path, out_path) # type: ignore[safe-super]
|
||||
|
||||
in_path = unfrackpath(in_path, basedir=self.cwd)
|
||||
out_path = unfrackpath(out_path, basedir=self.cwd)
|
||||
|
||||
try:
|
||||
rc, out, err = self.exec_command(cmd=f"cat {shlex.quote(in_path)}")
|
||||
display.vvv(
|
||||
f"FETCH {in_path} TO {out_path}", host=self._play_context.remote_addr
|
||||
)
|
||||
if rc != 0:
|
||||
raise AnsibleError(
|
||||
f"failed to transfer file to {in_path}: {to_text(err)}"
|
||||
)
|
||||
with open(
|
||||
to_bytes(out_path, errors="surrogate_or_strict"), "wb"
|
||||
) as out_file:
|
||||
out_file.write(out)
|
||||
except IOError as e:
|
||||
raise AnsibleError(
|
||||
f"failed to transfer file to {to_text(out_path)}: {e}"
|
||||
) from e
|
||||
|
||||
def close(self) -> None:
|
||||
"""terminate the connection; nothing to do here"""
|
||||
self._connected = False
|
||||
|
|
@ -0,0 +1,110 @@
|
|||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
class ModuleDocFragment:
|
||||
|
||||
# Standard documentation fragment
|
||||
DOCUMENTATION = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
description: Can run in C(check_mode) and return changed status prediction without modifying target.
|
||||
diff_mode:
|
||||
description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
|
||||
idempotent:
|
||||
description:
|
||||
- When run twice in a row outside check mode, with the same arguments, the second invocation indicates no change.
|
||||
- This assumes that the system controlled/queried by the module has not changed in a relevant way.
|
||||
"""
|
||||
|
||||
# Should be used together with the standard fragment
|
||||
IDEMPOTENT_NOT_MODIFY_STATE = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
idempotent:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
"""
|
||||
|
||||
# Should be used together with the standard fragment
|
||||
INFO_MODULE = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
diff_mode:
|
||||
support: N/A
|
||||
details:
|
||||
- This action does not modify state.
|
||||
"""
|
||||
|
||||
ACTIONGROUP_DOCKER = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
action_group:
|
||||
description: Use C(group/docker) or C(group/community.docker.docker) in C(module_defaults) to set defaults for this module.
|
||||
support: full
|
||||
membership:
|
||||
- community.docker.docker
|
||||
- docker
|
||||
"""
|
||||
|
||||
CONN = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
become:
|
||||
description: Is usable alongside C(become) keywords.
|
||||
connection:
|
||||
description: Uses the target's configured connection information to execute code on it.
|
||||
delegation:
|
||||
description: Can be used in conjunction with C(delegate_to) and related keywords.
|
||||
"""
|
||||
|
||||
FACTS = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
facts:
|
||||
description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
|
||||
"""
|
||||
|
||||
# Should be used together with the standard fragment and the FACTS fragment
|
||||
FACTS_MODULE = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
diff_mode:
|
||||
support: N/A
|
||||
details:
|
||||
- This action does not modify state.
|
||||
facts:
|
||||
support: full
|
||||
"""
|
||||
|
||||
FILES = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
safe_file_operations:
|
||||
description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
|
||||
"""
|
||||
|
||||
FLOW = r"""
|
||||
options: {}
|
||||
attributes:
|
||||
action:
|
||||
description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
|
||||
async:
|
||||
description: Supports being used with the C(async) keyword.
|
||||
"""
|
||||
|
|
@ -0,0 +1,82 @@
|
|||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
class ModuleDocFragment:
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
project_src:
|
||||
description:
|
||||
- Path to a directory containing a Compose file (C(compose.yml), C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)).
|
||||
- If O(files) is provided, will look for these files in this directory instead.
|
||||
- Mutually exclusive with O(definition). One of O(project_src) and O(definition) must be provided.
|
||||
type: path
|
||||
project_name:
|
||||
description:
|
||||
- Provide a project name. If not provided, the project name is taken from the basename of O(project_src).
|
||||
- Required when O(definition) is provided.
|
||||
type: str
|
||||
files:
|
||||
description:
|
||||
- List of Compose file names relative to O(project_src) to be used instead of the main Compose file (C(compose.yml),
|
||||
C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)).
|
||||
- Files are loaded and merged in the order given.
|
||||
- Mutually exclusive with O(definition).
|
||||
type: list
|
||||
elements: path
|
||||
version_added: 3.7.0
|
||||
definition:
|
||||
description:
|
||||
- Compose file describing one or more services, networks and volumes.
|
||||
- Mutually exclusive with O(project_src) and O(files). One of O(project_src) and O(definition) must be provided.
|
||||
- If provided, PyYAML must be available to this module, and O(project_name) must be specified.
|
||||
- Note that a temporary directory will be created and deleted afterwards when using this option.
|
||||
type: dict
|
||||
version_added: 3.9.0
|
||||
env_files:
|
||||
description:
|
||||
- By default environment files are loaded from a C(.env) file located directly under the O(project_src) directory.
|
||||
- O(env_files) can be used to specify the path of one or multiple custom environment files instead.
|
||||
- The path is relative to the O(project_src) directory.
|
||||
type: list
|
||||
elements: path
|
||||
profiles:
|
||||
description:
|
||||
- List of profiles to enable when starting services.
|
||||
- Equivalent to C(docker compose --profile).
|
||||
type: list
|
||||
elements: str
|
||||
check_files_existing:
|
||||
description:
|
||||
- If set to V(false), the module will not check whether one of the files C(compose.yaml), C(compose.yml), C(docker-compose.yaml),
|
||||
or C(docker-compose.yml) exists in O(project_src) if O(files) is not provided.
|
||||
- This can be useful if environment files with C(COMPOSE_FILE) are used to configure a different filename. The module
|
||||
currently does not check for C(COMPOSE_FILE) in environment files or the current environment.
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 3.9.0
|
||||
requirements:
|
||||
- "PyYAML if O(definition) is used"
|
||||
notes:
|
||||
- |-
|
||||
The Docker compose CLI plugin has no stable output format (see for example U(https://github.com/docker/compose/issues/10872)),
|
||||
and for the main operations also no machine friendly output format. The module tries to accomodate this with various
|
||||
version-dependent behavior adjustments and with testing older and newer versions of the Docker compose CLI plugin.
|
||||
Currently the module is tested with multiple plugin versions between 2.18.1 and 2.23.3. The exact list of plugin versions
|
||||
will change over time. New releases of the Docker compose CLI plugin can break this module at any time.
|
||||
"""
|
||||
|
||||
# The following needs to be kept in sync with the compose_v2 module utils
|
||||
MINIMUM_VERSION = r"""
|
||||
options: {}
|
||||
requirements:
|
||||
- "Docker CLI with Docker compose plugin 2.18.0 or later"
|
||||
"""
|
||||
|
|
@ -0,0 +1,389 @@
|
|||
# Copyright (c) Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
|
||||
class ModuleDocFragment:
|
||||
|
||||
# Docker doc fragment
|
||||
DOCUMENTATION = r"""
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the TCP connection
|
||||
string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, the module will automatically
|
||||
replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix:///var/run/docker.sock
|
||||
aliases:
|
||||
- docker_url
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
- Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0.
|
||||
- B(Note:) this option is no longer supported for Docker SDK for Python 7.0.0+. Specifying it with Docker SDK for Python
|
||||
7.0.0 or newer will lead to an error.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases:
|
||||
- docker_api_version
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(ca.pem)
|
||||
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has been added
|
||||
as an alias and can still be used.
|
||||
type: path
|
||||
aliases:
|
||||
- ca_cert
|
||||
- tls_ca_cert
|
||||
- cacert_path
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(cert.pem)
|
||||
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_cert
|
||||
- cert_path
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, the file C(key.pem)
|
||||
from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_key
|
||||
- key_path
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Note that
|
||||
if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used instead. If
|
||||
the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
aliases:
|
||||
- tls_verify
|
||||
debug:
|
||||
description:
|
||||
- Debug mode.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables. You can define
|
||||
E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH), E(DOCKER_TLS), E(DOCKER_TLS_VERIFY)
|
||||
and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped with the product that sets up the environment.
|
||||
It will set these variables for you. See U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- When connecting to Docker daemon with TLS, you might need to install additional Python packages. For the Docker SDK for
|
||||
Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
|
||||
- Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified, and
|
||||
use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
"""
|
||||
|
||||
# For plugins: allow to define common options with Ansible variables
|
||||
|
||||
VAR_NAMES = r"""
|
||||
options:
|
||||
docker_host:
|
||||
vars:
|
||||
- name: ansible_docker_docker_host
|
||||
tls_hostname:
|
||||
vars:
|
||||
- name: ansible_docker_tls_hostname
|
||||
api_version:
|
||||
vars:
|
||||
- name: ansible_docker_api_version
|
||||
timeout:
|
||||
vars:
|
||||
- name: ansible_docker_timeout
|
||||
ca_path:
|
||||
vars:
|
||||
- name: ansible_docker_ca_cert
|
||||
- name: ansible_docker_ca_path
|
||||
version_added: 3.6.0
|
||||
client_cert:
|
||||
vars:
|
||||
- name: ansible_docker_client_cert
|
||||
client_key:
|
||||
vars:
|
||||
- name: ansible_docker_client_key
|
||||
tls:
|
||||
vars:
|
||||
- name: ansible_docker_tls
|
||||
validate_certs:
|
||||
vars:
|
||||
- name: ansible_docker_validate_certs
|
||||
"""
|
||||
|
||||
# Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
|
||||
|
||||
DOCKER_PY_2_DOCUMENTATION = r"""
|
||||
options: {}
|
||||
notes:
|
||||
- This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon.
|
||||
requirements:
|
||||
- "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
|
||||
Python module has been superseded by L(docker,https://pypi.org/project/docker/)
|
||||
(see L(here,https://github.com/docker/docker-py/issues/1310) for details).
|
||||
This module does B(not) work with docker-py."
|
||||
"""
|
||||
|
||||
# Docker doc fragment when using the vendored API access code
|
||||
API_DOCUMENTATION = r"""
|
||||
options:
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: unix:///var/run/docker.sock
|
||||
aliases:
|
||||
- docker_url
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
- Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by this collection and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases:
|
||||
- docker_api_version
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has
|
||||
been added as an alias and can still be used.
|
||||
type: path
|
||||
aliases:
|
||||
- ca_cert
|
||||
- tls_ca_cert
|
||||
- cacert_path
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_cert
|
||||
- cert_path
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_key
|
||||
- key_path
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
aliases:
|
||||
- tls_verify
|
||||
debug:
|
||||
description:
|
||||
- Debug mode
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH),
|
||||
E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
# - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
|
||||
# In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified,
|
||||
# and use C($DOCKER_CONFIG/config.json) otherwise.
|
||||
- This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon. It uses code derived from the Docker SDK or Python that is included in this
|
||||
collection.
|
||||
requirements:
|
||||
- requests
|
||||
- pywin32 (when using named pipes on Windows 32)
|
||||
- paramiko (when using SSH with O(use_ssh_client=false))
|
||||
- pyOpenSSL (when using TLS)
|
||||
"""
|
||||
|
||||
# Docker doc fragment when using the Docker CLI
|
||||
CLI_DOCUMENTATION = r"""
|
||||
options:
|
||||
docker_cli:
|
||||
description:
|
||||
- Path to the Docker CLI. If not provided, will search for Docker CLI on the E(PATH).
|
||||
type: path
|
||||
docker_host:
|
||||
description:
|
||||
- The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
|
||||
TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
|
||||
the module will automatically replace C(tcp) in the connection URL with C(https).
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
- Mutually exclusive with O(cli_context). If neither O(docker_host) nor O(cli_context) are provided, the
|
||||
value V(unix:///var/run/docker.sock) is used.
|
||||
type: str
|
||||
aliases:
|
||||
- docker_url
|
||||
tls_hostname:
|
||||
description:
|
||||
- When verifying the authenticity of the Docker Host server, provide the expected name of the server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will
|
||||
be used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by this collection and the docker daemon.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: str
|
||||
default: auto
|
||||
aliases:
|
||||
- docker_api_version
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- ca_cert
|
||||
- tls_ca_cert
|
||||
- cacert_path
|
||||
client_cert:
|
||||
description:
|
||||
- Path to the client's TLS certificate file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_cert
|
||||
- cert_path
|
||||
client_key:
|
||||
description:
|
||||
- Path to the client's TLS key file.
|
||||
- If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set,
|
||||
the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used.
|
||||
type: path
|
||||
aliases:
|
||||
- tls_client_key
|
||||
- key_path
|
||||
tls:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
|
||||
server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used
|
||||
instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
validate_certs:
|
||||
description:
|
||||
- Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be
|
||||
used instead. If the environment variable is not set, the default value will be used.
|
||||
type: bool
|
||||
default: false
|
||||
aliases:
|
||||
- tls_verify
|
||||
# debug:
|
||||
# description:
|
||||
# - Debug mode
|
||||
# type: bool
|
||||
# default: false
|
||||
cli_context:
|
||||
description:
|
||||
- The Docker CLI context to use.
|
||||
- Mutually exclusive with O(docker_host).
|
||||
type: str
|
||||
|
||||
notes:
|
||||
- Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
|
||||
You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH),
|
||||
E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
|
||||
with the product that sets up the environment. It will set these variables for you. See
|
||||
U(https://docs.docker.com/machine/reference/env/) for more details.
|
||||
- This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
|
||||
communicate with the Docker daemon. It directly calls the Docker CLI program.
|
||||
"""
|
||||
|
|
@ -0,0 +1,423 @@
|
|||
# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
|
||||
# For the parts taken from the docker inventory script:
|
||||
# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
|
||||
# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
|
||||
# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_containers
|
||||
short_description: Ansible dynamic inventory plugin for Docker containers
|
||||
version_added: 1.1.0
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.docker._docker.api_documentation
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Reads inventories from the Docker API.
|
||||
- Uses a YAML configuration file that ends with V(docker.(yml|yaml\)).
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker.yml) or V(docker.yaml). Other filenames will
|
||||
not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description:
|
||||
- The name of this plugin, it should always be set to V(community.docker.docker_containers) for this plugin to recognize
|
||||
it as its own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [community.docker.docker_containers]
|
||||
|
||||
connection_type:
|
||||
description:
|
||||
- Which connection type to use the containers.
|
||||
- One way to connect to containers is to use SSH (V(ssh)). For this, the options O(default_ip) and O(private_ssh_port)
|
||||
are used. This requires that a SSH daemon is running inside the containers.
|
||||
- Alternatively, V(docker-cli) selects the P(community.docker.docker#connection) connection plugin, and V(docker-api)
|
||||
(default) selects the P(community.docker.docker_api#connection) connection plugin.
|
||||
- When V(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin to the connection
|
||||
plugin. This can be controlled with O(configure_docker_daemon).
|
||||
- Note that the P(community.docker.docker_api#connection) does B(not work with TCP TLS sockets)!
|
||||
See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
|
||||
type: str
|
||||
default: docker-api
|
||||
choices:
|
||||
- ssh
|
||||
- docker-cli
|
||||
- docker-api
|
||||
|
||||
configure_docker_daemon:
|
||||
description:
|
||||
- Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
|
||||
- Only used when O(connection_type=docker-api).
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 1.8.0
|
||||
|
||||
verbose_output:
|
||||
description:
|
||||
- Toggle to (not) include all available inspection metadata.
|
||||
- Note that all top-level keys will be transformed to the format C(docker_xxx). For example, C(HostConfig) is converted
|
||||
to C(docker_hostconfig).
|
||||
- If this is V(false), these values can only be used during O(compose), O(groups), and O(keyed_groups).
|
||||
- The C(docker) inventory script always added these variables, so for compatibility set this to V(true).
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
default_ip:
|
||||
description:
|
||||
- The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
|
||||
- Only used if O(connection_type) is V(ssh).
|
||||
type: str
|
||||
default: 127.0.0.1
|
||||
|
||||
private_ssh_port:
|
||||
description:
|
||||
- The port containers use for SSH.
|
||||
- Only used if O(connection_type) is V(ssh).
|
||||
type: int
|
||||
default: 22
|
||||
|
||||
add_legacy_groups:
|
||||
description:
|
||||
- 'Add the same groups as the C(docker) inventory script does. These are the following:'
|
||||
- 'C(<container id>): contains the container of this ID.'
|
||||
- 'C(<container name>): contains the container that has this name.'
|
||||
- 'C(<container short id>): contains the containers that have this short ID (first 13 letters of ID).'
|
||||
- 'C(image_<image name>): contains the containers that have the image C(<image name>).'
|
||||
- 'C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>).'
|
||||
- 'C(service_<service name>): contains the containers that belong to the service C(<service name>).'
|
||||
- 'C(<docker_host>): contains the containers which belong to the Docker daemon O(docker_host). Useful if you run this
|
||||
plugin against multiple Docker daemons.'
|
||||
- 'C(running): contains all containers that are running.'
|
||||
- 'C(stopped): contains all containers that are not running.'
|
||||
- If this is not set to V(true), you should use keyed groups to add the containers to groups. See the examples for how
|
||||
to do that.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
# Minimal example using local Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: unix:///var/run/docker.sock
|
||||
|
||||
---
|
||||
# Minimal example using remote Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
---
|
||||
# Example using remote Docker daemon with unverified TLS
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: true
|
||||
|
||||
---
|
||||
# Example using remote Docker daemon with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: true
|
||||
ca_path: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
---
|
||||
# Example using constructed features to create groups
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: false
|
||||
keyed_groups:
|
||||
# Add containers with primary network foo to a network_foo group
|
||||
- prefix: network
|
||||
key: 'docker_hostconfig.NetworkMode'
|
||||
# Add Linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: docker_platform
|
||||
|
||||
---
|
||||
# Example using SSH connection with an explicit fallback for when port 22 has not been
|
||||
# exported: use container name as ansible_ssh_host and 22 as ansible_ssh_port
|
||||
plugin: community.docker.docker_containers
|
||||
connection_type: ssh
|
||||
compose:
|
||||
ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true)
|
||||
ansible_ssh_port: ansible_ssh_port | default(22, true)
|
||||
|
||||
---
|
||||
# Only consider containers which have a label 'foo', or whose name starts with 'a'
|
||||
plugin: community.docker.docker_containers
|
||||
filters:
|
||||
# Accept all containers which have a label called 'foo'
|
||||
- include: >-
|
||||
"foo" in docker_config.Labels
|
||||
# Next accept all containers whose inventory_hostname starts with 'a'
|
||||
- include: >-
|
||||
inventory_hostname.startswith("a")
|
||||
# Exclude all containers that did not match any of the above filters
|
||||
- exclude: true
|
||||
"""
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||
filter_host,
|
||||
parse_filters,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DOCKER_COMMON_ARGS_VARS,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||
make_unsafe,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
|
||||
MIN_DOCKER_API = None
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
"""Host inventory parser for ansible using Docker daemon as source."""
|
||||
|
||||
NAME = "community.docker.docker_containers"
|
||||
|
||||
def _slugify(self, value: str) -> str:
|
||||
slug = re.sub(r"[^\w-]", "_", value).lower().lstrip("_")
|
||||
return f"docker_{slug}"
|
||||
|
||||
def _populate(self, client: AnsibleDockerClient) -> None:
|
||||
strict = self.get_option("strict")
|
||||
|
||||
ssh_port = self.get_option("private_ssh_port")
|
||||
default_ip = self.get_option("default_ip")
|
||||
hostname = self.get_option("docker_host")
|
||||
verbose_output = self.get_option("verbose_output")
|
||||
connection_type = self.get_option("connection_type")
|
||||
add_legacy_groups = self.get_option("add_legacy_groups")
|
||||
|
||||
if self.inventory is None:
|
||||
raise AssertionError("Inventory must be there")
|
||||
|
||||
try:
|
||||
params = {
|
||||
"limit": -1,
|
||||
"all": 1,
|
||||
"size": 0,
|
||||
"trunc_cmd": 0,
|
||||
"since": None,
|
||||
"before": None,
|
||||
}
|
||||
containers = client.get_json("/containers/json", params=params)
|
||||
except APIError as exc:
|
||||
raise AnsibleError(f"Error listing containers: {exc}") from exc
|
||||
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group("running")
|
||||
self.inventory.add_group("stopped")
|
||||
|
||||
extra_facts = {}
|
||||
if self.get_option("configure_docker_daemon"):
|
||||
for option_name, var_name in DOCKER_COMMON_ARGS_VARS.items():
|
||||
value = self.get_option(option_name)
|
||||
if value is not None:
|
||||
extra_facts[var_name] = value
|
||||
|
||||
filters = parse_filters(self.get_option("filters"))
|
||||
for container in containers:
|
||||
container_id = container.get("Id")
|
||||
short_container_id = container_id[:13]
|
||||
|
||||
try:
|
||||
name = container.get("Names", [])[0].lstrip("/")
|
||||
full_name = name
|
||||
except IndexError:
|
||||
name = short_container_id
|
||||
full_name = container_id
|
||||
|
||||
facts = {
|
||||
"docker_name": make_unsafe(name),
|
||||
"docker_short_id": make_unsafe(short_container_id),
|
||||
}
|
||||
full_facts = {}
|
||||
|
||||
try:
|
||||
inspect = client.get_json("/containers/{0}/json", container_id)
|
||||
except APIError as exc:
|
||||
raise AnsibleError(
|
||||
f"Error inspecting container {name} - {exc}"
|
||||
) from exc
|
||||
|
||||
state = inspect.get("State") or {}
|
||||
config = inspect.get("Config") or {}
|
||||
labels = config.get("Labels") or {}
|
||||
|
||||
running = state.get("Running")
|
||||
|
||||
groups = []
|
||||
|
||||
# Add container to groups
|
||||
image_name = config.get("Image")
|
||||
if image_name and add_legacy_groups:
|
||||
groups.append(f"image_{image_name}")
|
||||
|
||||
stack_name = labels.get("com.docker.stack.namespace")
|
||||
if stack_name:
|
||||
full_facts["docker_stack"] = stack_name
|
||||
if add_legacy_groups:
|
||||
groups.append(f"stack_{stack_name}")
|
||||
|
||||
service_name = labels.get("com.docker.swarm.service.name")
|
||||
if service_name:
|
||||
full_facts["docker_service"] = service_name
|
||||
if add_legacy_groups:
|
||||
groups.append(f"service_{service_name}")
|
||||
|
||||
ansible_connection = None
|
||||
if connection_type == "ssh":
|
||||
# Figure out ssh IP and Port
|
||||
try:
|
||||
# Lookup the public facing port Nat'ed to ssh port.
|
||||
network_settings = inspect.get("NetworkSettings") or {}
|
||||
port_settings = network_settings.get("Ports") or {}
|
||||
port = port_settings.get(f"{ssh_port}/tcp")[0] # type: ignore[index]
|
||||
except (IndexError, AttributeError, TypeError):
|
||||
port = {}
|
||||
|
||||
try:
|
||||
ip = default_ip if port["HostIp"] == "0.0.0.0" else port["HostIp"]
|
||||
except KeyError:
|
||||
ip = ""
|
||||
|
||||
facts.update(
|
||||
{
|
||||
"ansible_ssh_host": ip,
|
||||
"ansible_ssh_port": port.get("HostPort", 0),
|
||||
}
|
||||
)
|
||||
elif connection_type == "docker-cli":
|
||||
facts.update(
|
||||
{
|
||||
"ansible_host": full_name,
|
||||
}
|
||||
)
|
||||
ansible_connection = "community.docker.docker"
|
||||
elif connection_type == "docker-api":
|
||||
facts.update(
|
||||
{
|
||||
"ansible_host": full_name,
|
||||
}
|
||||
)
|
||||
facts.update(extra_facts)
|
||||
ansible_connection = "community.docker.docker_api"
|
||||
|
||||
full_facts.update(facts)
|
||||
for key, value in inspect.items():
|
||||
fact_key = self._slugify(key)
|
||||
full_facts[fact_key] = value
|
||||
|
||||
full_facts = make_unsafe(full_facts)
|
||||
|
||||
if ansible_connection:
|
||||
for d in (facts, full_facts):
|
||||
if "ansible_connection" not in d:
|
||||
d["ansible_connection"] = ansible_connection
|
||||
|
||||
if not filter_host(self, name, full_facts, filters):
|
||||
continue
|
||||
|
||||
if verbose_output:
|
||||
facts.update(full_facts)
|
||||
|
||||
self.inventory.add_host(name)
|
||||
for group in groups:
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_host(name, group=group)
|
||||
|
||||
for key, value in facts.items():
|
||||
self.inventory.set_variable(name, key, value)
|
||||
|
||||
# Use constructed if applicable
|
||||
# Composed variables
|
||||
self._set_composite_vars(
|
||||
self.get_option("compose"), full_facts, name, strict=strict
|
||||
)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option("groups"), full_facts, name, strict=strict
|
||||
)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option("keyed_groups"), full_facts, name, strict=strict
|
||||
)
|
||||
|
||||
# We need to do this last since we also add a group called `name`.
|
||||
# When we do this before a set_variable() call, the variables are assigned
|
||||
# to the group, and not to the host.
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group(container_id)
|
||||
self.inventory.add_host(name, group=container_id)
|
||||
self.inventory.add_group(name)
|
||||
self.inventory.add_host(name, group=name)
|
||||
self.inventory.add_group(short_container_id)
|
||||
self.inventory.add_host(name, group=short_container_id)
|
||||
self.inventory.add_group(hostname)
|
||||
self.inventory.add_host(name, group=hostname)
|
||||
|
||||
if running is True:
|
||||
self.inventory.add_host(name, group="running")
|
||||
else:
|
||||
self.inventory.add_host(name, group="stopped")
|
||||
|
||||
def verify_file(self, path: str) -> bool:
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return super().verify_file(path) and path.endswith(
|
||||
("docker.yaml", "docker.yml")
|
||||
)
|
||||
|
||||
def _create_client(self) -> AnsibleDockerClient:
|
||||
return AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
|
||||
|
||||
def parse(
|
||||
self,
|
||||
inventory: InventoryData,
|
||||
loader: DataLoader,
|
||||
path: str,
|
||||
cache: bool = True,
|
||||
) -> None:
|
||||
super().parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
client = self._create_client()
|
||||
try:
|
||||
self._populate(client)
|
||||
except DockerException as e:
|
||||
raise AnsibleError(f"An unexpected Docker error occurred: {e}") from e
|
||||
except RequestException as e:
|
||||
raise AnsibleError(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}"
|
||||
) from e
|
||||
|
|
@ -0,0 +1,359 @@
|
|||
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_machine
|
||||
author: Ximon Eighteen (@ximon18)
|
||||
short_description: Docker Machine inventory source
|
||||
requirements:
|
||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Get inventory hosts from Docker Machine.
|
||||
- Uses a YAML configuration file that ends with V(docker_machine.(yml|yaml\)).
|
||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||
- The plugin stores the Docker Machine 'env' output variables in C(dm_) prefixed host variables.
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker_machine.yml) or V(docker_machine.yaml). Other
|
||||
filenames will not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: true
|
||||
choices: ['docker_machine', 'community.docker.docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
- With V(require) and V(require-silently), fetch them and skip any host for which they cannot be fetched. A warning
|
||||
will be issued for any skipped host if the choice is V(require).
|
||||
- With V(optional) and V(optional-silently), fetch them and not skip hosts for which they cannot be fetched. A warning
|
||||
will be issued for hosts where they cannot be fetched if the choice is V(optional).
|
||||
- With V(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||
- If fetched successfully, the variables will be prefixed with C(dm_) and stored as host variables.
|
||||
type: str
|
||||
choices:
|
||||
- require
|
||||
- require-silently
|
||||
- optional
|
||||
- optional-silently
|
||||
- skip
|
||||
default: require
|
||||
running_required:
|
||||
description:
|
||||
- When V(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||
type: bool
|
||||
default: true
|
||||
verbose_output:
|
||||
description:
|
||||
- When V(true), include all available nodes metadata (for example C(Image), C(Region), C(Size)) as a JSON object named
|
||||
C(docker_machine_node_attributes).
|
||||
type: bool
|
||||
default: true
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
# Minimal example
|
||||
plugin: community.docker.docker_machine
|
||||
|
||||
---
|
||||
# Example using constructed features to create a group per Docker Machine driver
|
||||
# (https://docs.docker.com/machine/drivers/), for example:
|
||||
# $ docker-machine create --driver digitalocean ... mymachine
|
||||
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
||||
# {
|
||||
# ...
|
||||
# "digitalocean": {
|
||||
# "hosts": [
|
||||
# "mymachine"
|
||||
# ]
|
||||
# ...
|
||||
# }
|
||||
plugin: community.docker.docker_machine
|
||||
strict: false
|
||||
keyed_groups:
|
||||
- separator: ''
|
||||
key: docker_machine_node_attributes.DriverName
|
||||
|
||||
---
|
||||
# Example grouping hosts by Digital Machine tag
|
||||
plugin: community.docker.docker_machine
|
||||
strict: false
|
||||
keyed_groups:
|
||||
- prefix: tag
|
||||
key: 'dm_tags'
|
||||
|
||||
---
|
||||
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||
plugin: community.docker.docker_machine
|
||||
compose:
|
||||
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, Constructable
|
||||
from ansible.utils.display import Display
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||
filter_host,
|
||||
parse_filters,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||
make_unsafe,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
DaemonEnv = t.Literal[
|
||||
"require", "require-silently", "optional", "optional-silently", "skip"
|
||||
]
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
"""Host inventory parser for ansible using Docker machine as source."""
|
||||
|
||||
NAME = "community.docker.docker_machine"
|
||||
|
||||
docker_machine_path: str | None = None
|
||||
|
||||
def _run_command(self, args: list[str]) -> str:
|
||||
if not self.docker_machine_path:
|
||||
try:
|
||||
self.docker_machine_path = get_bin_path("docker-machine")
|
||||
except ValueError as e:
|
||||
raise AnsibleError(to_text(e)) from e
|
||||
|
||||
command = [self.docker_machine_path]
|
||||
command.extend(args)
|
||||
display.debug(f"Executing command {command}")
|
||||
try:
|
||||
result = subprocess.check_output(command)
|
||||
except subprocess.CalledProcessError as e:
|
||||
display.warning(
|
||||
f"Exception {type(e).__name__} caught while executing command {command}, this was the original exception: {e}"
|
||||
)
|
||||
raise e
|
||||
|
||||
return to_text(result).strip()
|
||||
|
||||
def _get_docker_daemon_variables(self, machine_name: str) -> list[tuple[str, str]]:
|
||||
"""
|
||||
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||
"""
|
||||
try:
|
||||
env_lines = self._run_command(
|
||||
["env", "--shell=sh", machine_name]
|
||||
).splitlines()
|
||||
except subprocess.CalledProcessError:
|
||||
# This can happen when the machine is created but provisioning is incomplete
|
||||
return []
|
||||
|
||||
# example output of docker-machine env --shell=sh:
|
||||
# export DOCKER_TLS_VERIFY="1"
|
||||
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
||||
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
||||
# export DOCKER_MACHINE_NAME="routinator"
|
||||
# # Run this command to configure your shell:
|
||||
# # eval $(docker-machine env --shell=bash routinator)
|
||||
|
||||
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||
# with the same name and value but with a dm_ name prefix.
|
||||
env_vars = []
|
||||
for line in env_lines:
|
||||
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||
if match:
|
||||
env_var_name = match.group(1)
|
||||
env_var_value = match.group(2)
|
||||
env_vars.append((env_var_name, env_var_value))
|
||||
|
||||
return env_vars
|
||||
|
||||
def _get_machine_names(self) -> list[str]:
|
||||
# Filter out machines that are not in the Running state as we probably cannot do anything useful actions
|
||||
# with them.
|
||||
ls_command = ["ls", "-q"]
|
||||
if self.get_option("running_required"):
|
||||
ls_command.extend(["--filter", "state=Running"])
|
||||
|
||||
try:
|
||||
ls_lines = self._run_command(ls_command)
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
return ls_lines.splitlines()
|
||||
|
||||
def _inspect_docker_machine_host(self, node: str) -> t.Any | None:
|
||||
try:
|
||||
inspect_lines = self._run_command(["inspect", node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return json.loads(inspect_lines)
|
||||
|
||||
def _ip_addr_docker_machine_host(self, node: str) -> t.Any | None:
|
||||
try:
|
||||
ip_addr = self._run_command(["ip", node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return ip_addr
|
||||
|
||||
def _should_skip_host(
|
||||
self,
|
||||
machine_name: str,
|
||||
env_var_tuples: list[tuple[str, str]],
|
||||
daemon_env: DaemonEnv,
|
||||
) -> bool:
|
||||
if not env_var_tuples:
|
||||
warning_prefix = f"Unable to fetch Docker daemon env vars from Docker Machine for host {machine_name}"
|
||||
if daemon_env in ("require", "require-silently"):
|
||||
if daemon_env == "require":
|
||||
display.warning(f"{warning_prefix}: host will be skipped")
|
||||
return True
|
||||
if daemon_env == "optional":
|
||||
display.warning(
|
||||
f"{warning_prefix}: host will lack dm_DOCKER_xxx variables"
|
||||
)
|
||||
# daemon_env is 'optional-silently'
|
||||
return False
|
||||
|
||||
def _populate(self) -> None:
|
||||
if self.inventory is None:
|
||||
raise AssertionError("Inventory must be there")
|
||||
|
||||
daemon_env: DaemonEnv = self.get_option("daemon_env")
|
||||
filters = parse_filters(self.get_option("filters"))
|
||||
try:
|
||||
for node in self._get_machine_names():
|
||||
node_attrs = self._inspect_docker_machine_host(node)
|
||||
if not node_attrs:
|
||||
continue
|
||||
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
|
||||
machine_name = unsafe_node_attrs["Driver"]["MachineName"]
|
||||
if not filter_host(self, machine_name, unsafe_node_attrs, filters):
|
||||
continue
|
||||
|
||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||
# that could be used to set environment variables to influence a local Docker client:
|
||||
if daemon_env == "skip":
|
||||
env_var_tuples = []
|
||||
else:
|
||||
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
||||
continue
|
||||
|
||||
# add an entry in the inventory for this host
|
||||
self.inventory.add_host(machine_name)
|
||||
|
||||
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
||||
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
||||
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
||||
if unsafe_node_attrs["Driver"]["IPAddress"]:
|
||||
ip_addr = unsafe_node_attrs["Driver"]["IPAddress"]
|
||||
else:
|
||||
ip_addr = self._ip_addr_docker_machine_host(node)
|
||||
|
||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||
self.inventory.set_variable(
|
||||
machine_name, "ansible_host", make_unsafe(ip_addr)
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
machine_name, "ansible_port", unsafe_node_attrs["Driver"]["SSHPort"]
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
machine_name, "ansible_user", unsafe_node_attrs["Driver"]["SSHUser"]
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
machine_name,
|
||||
"ansible_ssh_private_key_file",
|
||||
unsafe_node_attrs["Driver"]["SSHKeyPath"],
|
||||
)
|
||||
|
||||
# set variables based on Docker Machine tags
|
||||
tags = unsafe_node_attrs["Driver"].get("Tags") or ""
|
||||
self.inventory.set_variable(machine_name, "dm_tags", make_unsafe(tags))
|
||||
|
||||
# set variables based on Docker Machine env variables
|
||||
for kv in env_var_tuples:
|
||||
self.inventory.set_variable(
|
||||
machine_name, f"dm_{kv[0]}", make_unsafe(kv[1])
|
||||
)
|
||||
|
||||
if self.get_option("verbose_output"):
|
||||
self.inventory.set_variable(
|
||||
machine_name,
|
||||
"docker_machine_node_attributes",
|
||||
unsafe_node_attrs,
|
||||
)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option("strict")
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(
|
||||
self.get_option("compose"),
|
||||
unsafe_node_attrs,
|
||||
machine_name,
|
||||
strict=strict,
|
||||
)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option("groups"),
|
||||
unsafe_node_attrs,
|
||||
machine_name,
|
||||
strict=strict,
|
||||
)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option("keyed_groups"),
|
||||
unsafe_node_attrs,
|
||||
machine_name,
|
||||
strict=strict,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError(
|
||||
f"Unable to fetch hosts from Docker Machine, this was the original exception: {e}"
|
||||
) from e
|
||||
|
||||
def verify_file(self, path: str) -> bool:
|
||||
"""Return the possibility of a file being consumable by this plugin."""
|
||||
return super().verify_file(path) and path.endswith(
|
||||
("docker_machine.yaml", "docker_machine.yml")
|
||||
)
|
||||
|
||||
def parse(
|
||||
self,
|
||||
inventory: InventoryData,
|
||||
loader: DataLoader,
|
||||
path: str,
|
||||
cache: bool = True,
|
||||
) -> None:
|
||||
super().parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
|
|
@ -0,0 +1,338 @@
|
|||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_swarm
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes
|
||||
requirements:
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file that ends with V(docker_swarm.(yml|yaml\)).
|
||||
- 'The plugin returns following groups of swarm nodes: C(all) - all hosts; C(workers) - all worker nodes; C(managers) -
|
||||
all manager nodes; C(leader) - the swarm leader node; C(nonleaders) - all nodes except the swarm leader.'
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker_swarm.yml) or V(docker_swarm.yaml). Other
|
||||
filenames will not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to V(community.docker.docker_swarm) for this plugin to recognize
|
||||
it as its own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [docker_swarm, community.docker.docker_swarm]
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- Use V(unix:///var/run/docker.sock) to connect through a local socket.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [docker_url]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS), C(EngineVersion)).
|
||||
type: bool
|
||||
default: true
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: false
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: false
|
||||
aliases: [tls_verify]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [tls_client_key, key_path]
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has been added
|
||||
as an alias and can still be used.
|
||||
type: path
|
||||
aliases: [ca_cert, tls_ca_cert, cacert_path]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [tls_client_cert, cert_path]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of the server.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python.
|
||||
type: str
|
||||
aliases: [docker_api_version]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT). will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [time_out]
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the swarm leader
|
||||
in format of V(tcp://172.16.0.1:2376). This value may be used without additional modification as value of option O(docker_host)
|
||||
in Docker Swarm modules when connecting through the API. The port always defaults to V(2376).
|
||||
type: bool
|
||||
default: false
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in C(ansible_host_uri).
|
||||
type: int
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
# Minimal example using local docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: unix:///var/run/docker.sock
|
||||
|
||||
---
|
||||
# Minimal example using remote docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
---
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: true
|
||||
|
||||
---
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: true
|
||||
ca_path: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
---
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: false
|
||||
keyed_groups:
|
||||
# add for example x86_64 hosts to an arch_x86_64 group
|
||||
- prefix: arch
|
||||
key: 'Description.Platform.Architecture'
|
||||
# add for example linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: 'Description.Platform.OS'
|
||||
# create a group per node label
|
||||
# for exomple a node labeled w/ "production" ends up in group "label_production"
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'Spec.Labels'
|
||||
prefix: label
|
||||
"""
|
||||
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||
filter_host,
|
||||
parse_filters,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
get_connect_params,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
update_tls_hostname,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||
make_unsafe,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
|
||||
try:
|
||||
import docker
|
||||
|
||||
HAS_DOCKER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
"""Host inventory parser for ansible using Docker swarm as source."""
|
||||
|
||||
NAME = "community.docker.docker_swarm"
|
||||
|
||||
def _fail(self, msg: str) -> t.NoReturn:
|
||||
raise AnsibleError(msg)
|
||||
|
||||
def _populate(self) -> None:
|
||||
if self.inventory is None:
|
||||
raise AssertionError("Inventory must be there")
|
||||
|
||||
raw_params = {
|
||||
"docker_host": self.get_option("docker_host"),
|
||||
"tls": self.get_option("tls"),
|
||||
"tls_verify": self.get_option("validate_certs"),
|
||||
"key_path": self.get_option("client_key"),
|
||||
"cacert_path": self.get_option("ca_path"),
|
||||
"cert_path": self.get_option("client_cert"),
|
||||
"tls_hostname": self.get_option("tls_hostname"),
|
||||
"api_version": self.get_option("api_version"),
|
||||
"timeout": self.get_option("timeout"),
|
||||
"use_ssh_client": self.get_option("use_ssh_client"),
|
||||
"debug": None,
|
||||
}
|
||||
update_tls_hostname(raw_params)
|
||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||
client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group("all")
|
||||
self.inventory.add_group("manager")
|
||||
self.inventory.add_group("worker")
|
||||
self.inventory.add_group("leader")
|
||||
self.inventory.add_group("nonleaders")
|
||||
|
||||
filters = parse_filters(self.get_option("filters"))
|
||||
|
||||
if self.get_option("include_host_uri"):
|
||||
if self.get_option("include_host_uri_port"):
|
||||
host_uri_port = str(self.get_option("include_host_uri_port"))
|
||||
elif self.get_option("tls") or self.get_option("validate_certs"):
|
||||
host_uri_port = "2376"
|
||||
else:
|
||||
host_uri_port = "2375"
|
||||
|
||||
try:
|
||||
nodes = client.nodes.list()
|
||||
for node in nodes:
|
||||
node_attrs = client.nodes.get(node.id).attrs
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
if not filter_host(
|
||||
self, unsafe_node_attrs["ID"], unsafe_node_attrs, filters
|
||||
):
|
||||
continue
|
||||
self.inventory.add_host(unsafe_node_attrs["ID"])
|
||||
self.inventory.add_host(
|
||||
unsafe_node_attrs["ID"], group=unsafe_node_attrs["Spec"]["Role"]
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host",
|
||||
unsafe_node_attrs["Status"]["Addr"],
|
||||
)
|
||||
if self.get_option("include_host_uri"):
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host_uri",
|
||||
make_unsafe(
|
||||
"tcp://"
|
||||
+ unsafe_node_attrs["Status"]["Addr"]
|
||||
+ ":"
|
||||
+ host_uri_port
|
||||
),
|
||||
)
|
||||
if self.get_option("verbose_output"):
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"docker_swarm_node_attributes",
|
||||
unsafe_node_attrs,
|
||||
)
|
||||
if "ManagerStatus" in unsafe_node_attrs:
|
||||
if unsafe_node_attrs["ManagerStatus"].get("Leader"):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = (
|
||||
parse_address(node_attrs["ManagerStatus"]["Addr"])[0]
|
||||
or unsafe_node_attrs["Status"]["Addr"]
|
||||
)
|
||||
if self.get_option("include_host_uri"):
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host_uri",
|
||||
make_unsafe(
|
||||
"tcp://" + swarm_leader_ip + ":" + host_uri_port
|
||||
),
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host",
|
||||
make_unsafe(swarm_leader_ip),
|
||||
)
|
||||
self.inventory.add_host(unsafe_node_attrs["ID"], group="leader")
|
||||
else:
|
||||
self.inventory.add_host(
|
||||
unsafe_node_attrs["ID"], group="nonleaders"
|
||||
)
|
||||
else:
|
||||
self.inventory.add_host(unsafe_node_attrs["ID"], group="nonleaders")
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option("strict")
|
||||
# Composed variables
|
||||
self._set_composite_vars(
|
||||
self.get_option("compose"),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs["ID"],
|
||||
strict=strict,
|
||||
)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option("groups"),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs["ID"],
|
||||
strict=strict,
|
||||
)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option("keyed_groups"),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs["ID"],
|
||||
strict=strict,
|
||||
)
|
||||
except Exception as e:
|
||||
raise AnsibleError(
|
||||
f"Unable to fetch hosts from Docker swarm API, this was the original exception: {e}"
|
||||
) from e
|
||||
|
||||
def verify_file(self, path: str) -> bool:
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return super().verify_file(path) and path.endswith(
|
||||
("docker_swarm.yaml", "docker_swarm.yml")
|
||||
)
|
||||
|
||||
def parse(
|
||||
self,
|
||||
inventory: InventoryData,
|
||||
loader: DataLoader,
|
||||
path: str,
|
||||
cache: bool = True,
|
||||
) -> None:
|
||||
if not HAS_DOCKER:
|
||||
raise AnsibleError(
|
||||
"The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: "
|
||||
"https://github.com/docker/docker-py."
|
||||
)
|
||||
super().parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
REQUESTS_IMPORT_ERROR: str | None # pylint: disable=invalid-name
|
||||
try:
|
||||
from requests import Session # noqa: F401, pylint: disable=unused-import
|
||||
from requests.adapters import ( # noqa: F401, pylint: disable=unused-import
|
||||
HTTPAdapter,
|
||||
)
|
||||
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
|
||||
HTTPError,
|
||||
InvalidSchema,
|
||||
)
|
||||
except ImportError:
|
||||
REQUESTS_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||
|
||||
class Session: # type: ignore
|
||||
__attrs__: list[t.Never] = []
|
||||
|
||||
class HTTPAdapter: # type: ignore
|
||||
__attrs__: list[t.Never] = []
|
||||
|
||||
class HTTPError(Exception): # type: ignore
|
||||
pass
|
||||
|
||||
class InvalidSchema(Exception): # type: ignore
|
||||
pass
|
||||
|
||||
else:
|
||||
REQUESTS_IMPORT_ERROR = None # pylint: disable=invalid-name
|
||||
|
||||
|
||||
URLLIB3_IMPORT_ERROR: str | None = None # pylint: disable=invalid-name
|
||||
try:
|
||||
from requests.packages import urllib3 # pylint: disable=unused-import
|
||||
|
||||
from requests.packages.urllib3 import ( # type: ignore # pylint: disable=unused-import # isort: skip
|
||||
connection as urllib3_connection,
|
||||
)
|
||||
except ImportError:
|
||||
try:
|
||||
import urllib3 # pylint: disable=unused-import
|
||||
from urllib3 import (
|
||||
connection as urllib3_connection, # pylint: disable=unused-import
|
||||
)
|
||||
except ImportError:
|
||||
URLLIB3_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||
|
||||
class _HTTPConnectionPool:
|
||||
pass
|
||||
|
||||
class _HTTPConnection:
|
||||
pass
|
||||
|
||||
class FakeURLLIB3:
|
||||
def __init__(self) -> None:
|
||||
self._collections = self
|
||||
self.poolmanager = self
|
||||
self.connection = self
|
||||
self.connectionpool = self
|
||||
|
||||
self.RecentlyUsedContainer = object() # pylint: disable=invalid-name
|
||||
self.PoolManager = object() # pylint: disable=invalid-name
|
||||
self.match_hostname = object()
|
||||
self.HTTPConnectionPool = ( # pylint: disable=invalid-name
|
||||
_HTTPConnectionPool
|
||||
)
|
||||
|
||||
class FakeURLLIB3Connection:
|
||||
def __init__(self) -> None:
|
||||
self.HTTPConnection = _HTTPConnection # pylint: disable=invalid-name
|
||||
|
||||
urllib3 = FakeURLLIB3()
|
||||
urllib3_connection = FakeURLLIB3Connection()
|
||||
|
||||
|
||||
def fail_on_missing_imports() -> None:
|
||||
if REQUESTS_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException # pylint: disable=cyclic-import
|
||||
|
||||
raise MissingRequirementException(
|
||||
"You have to install requests", "requests", REQUESTS_IMPORT_ERROR
|
||||
)
|
||||
if URLLIB3_IMPORT_ERROR is not None:
|
||||
from .errors import MissingRequirementException # pylint: disable=cyclic-import
|
||||
|
||||
raise MissingRequirementException(
|
||||
"You have to install urllib3", "urllib3", URLLIB3_IMPORT_ERROR
|
||||
)
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,406 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import typing as t
|
||||
|
||||
from . import errors
|
||||
from .credentials.errors import CredentialsNotFound, StoreError
|
||||
from .credentials.store import Store
|
||||
from .utils import config
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||
APIClient,
|
||||
)
|
||||
|
||||
|
||||
INDEX_NAME = "docker.io"
|
||||
INDEX_URL = f"https://index.{INDEX_NAME}/v1/"
|
||||
TOKEN_USERNAME = "<token>"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def resolve_repository_name(repo_name: str) -> tuple[str, str]:
|
||||
if "://" in repo_name:
|
||||
raise errors.InvalidRepository(
|
||||
f"Repository name cannot contain a scheme ({repo_name})"
|
||||
)
|
||||
|
||||
index_name, remote_name = split_repo_name(repo_name)
|
||||
if index_name[0] == "-" or index_name[-1] == "-":
|
||||
raise errors.InvalidRepository(
|
||||
f"Invalid index name ({index_name}). Cannot begin or end with a hyphen."
|
||||
)
|
||||
return resolve_index_name(index_name), remote_name
|
||||
|
||||
|
||||
def resolve_index_name(index_name: str) -> str:
|
||||
index_name = convert_to_hostname(index_name)
|
||||
if index_name == "index." + INDEX_NAME:
|
||||
index_name = INDEX_NAME
|
||||
return index_name
|
||||
|
||||
|
||||
def get_config_header(client: APIClient, registry: str) -> bytes | None:
|
||||
log.debug("Looking for auth config")
|
||||
if not client._auth_configs or client._auth_configs.is_empty:
|
||||
log.debug("No auth config in memory - loading from filesystem")
|
||||
client._auth_configs = load_config(credstore_env=client.credstore_env)
|
||||
authcfg = resolve_authconfig(
|
||||
client._auth_configs, registry, credstore_env=client.credstore_env
|
||||
)
|
||||
# Do not fail here if no authentication exists for this
|
||||
# specific registry as we can have a readonly pull. Just
|
||||
# put the header if we can.
|
||||
if authcfg:
|
||||
log.debug("Found auth config")
|
||||
# auth_config needs to be a dict in the format used by
|
||||
# auth.py username , password, serveraddress, email
|
||||
return encode_header(authcfg)
|
||||
log.debug("No auth config found")
|
||||
return None
|
||||
|
||||
|
||||
def split_repo_name(repo_name: str) -> tuple[str, str]:
|
||||
parts = repo_name.split("/", 1)
|
||||
if len(parts) == 1 or (
|
||||
"." not in parts[0] and ":" not in parts[0] and parts[0] != "localhost"
|
||||
):
|
||||
# This is a docker index repo (ex: username/foobar or ubuntu)
|
||||
return INDEX_NAME, repo_name
|
||||
return tuple(parts) # type: ignore
|
||||
|
||||
|
||||
def get_credential_store(
|
||||
authconfig: dict[str, t.Any] | AuthConfig, registry: str
|
||||
) -> str | None:
|
||||
if not isinstance(authconfig, AuthConfig):
|
||||
authconfig = AuthConfig(authconfig)
|
||||
return authconfig.get_credential_store(registry)
|
||||
|
||||
|
||||
class AuthConfig(dict):
|
||||
def __init__(
|
||||
self, dct: dict[str, t.Any], credstore_env: dict[str, str] | None = None
|
||||
):
|
||||
if "auths" not in dct:
|
||||
dct["auths"] = {}
|
||||
self.update(dct)
|
||||
self._credstore_env = credstore_env
|
||||
self._stores: dict[str, Store] = {}
|
||||
|
||||
@classmethod
|
||||
def parse_auth(
|
||||
cls, entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
|
||||
) -> dict[str, dict[str, t.Any]]:
|
||||
"""
|
||||
Parses authentication entries
|
||||
|
||||
Args:
|
||||
entries: Dict of authentication entries.
|
||||
raise_on_error: If set to true, an invalid format will raise
|
||||
InvalidConfigFile
|
||||
|
||||
Returns:
|
||||
Authentication registry.
|
||||
"""
|
||||
|
||||
conf: dict[str, dict[str, t.Any]] = {}
|
||||
for registry, entry in entries.items():
|
||||
if not isinstance(entry, dict):
|
||||
log.debug("Config entry for key %s is not auth config", registry) # type: ignore
|
||||
# We sometimes fall back to parsing the whole config as if it
|
||||
# was the auth config by itself, for legacy purposes. In that
|
||||
# case, we fail silently and return an empty conf if any of the
|
||||
# keys is not formatted properly.
|
||||
if raise_on_error:
|
||||
raise errors.InvalidConfigFile(
|
||||
f"Invalid configuration for registry {registry}"
|
||||
)
|
||||
return {}
|
||||
if "identitytoken" in entry:
|
||||
log.debug("Found an IdentityToken entry for registry %s", registry)
|
||||
conf[registry] = {"IdentityToken": entry["identitytoken"]}
|
||||
continue # Other values are irrelevant if we have a token
|
||||
|
||||
if "auth" not in entry:
|
||||
# Starting with engine v1.11 (API 1.23), an empty dictionary is
|
||||
# a valid value in the auths config.
|
||||
# https://github.com/docker/compose/issues/3265
|
||||
log.debug(
|
||||
"Auth data for %s is absent. Client might be using a credentials store instead.",
|
||||
registry,
|
||||
)
|
||||
conf[registry] = {}
|
||||
continue
|
||||
|
||||
username, password = decode_auth(entry["auth"])
|
||||
log.debug(
|
||||
"Found entry (registry=%s, username=%s)", repr(registry), repr(username)
|
||||
)
|
||||
|
||||
conf[registry] = {
|
||||
"username": username,
|
||||
"password": password,
|
||||
"email": entry.get("email"),
|
||||
"serveraddress": registry,
|
||||
}
|
||||
return conf
|
||||
|
||||
@classmethod
|
||||
def load_config(
|
||||
cls,
|
||||
config_path: str | None,
|
||||
config_dict: dict[str, t.Any] | None,
|
||||
credstore_env: dict[str, str] | None = None,
|
||||
) -> t.Self:
|
||||
"""
|
||||
Loads authentication data from a Docker configuration file in the given
|
||||
root directory or if config_path is passed use given path.
|
||||
Lookup priority:
|
||||
explicit config_path parameter > DOCKER_CONFIG environment
|
||||
variable > ~/.docker/config.json > ~/.dockercfg
|
||||
"""
|
||||
|
||||
if not config_dict:
|
||||
config_file = config.find_config_file(config_path)
|
||||
|
||||
if not config_file:
|
||||
return cls({}, credstore_env)
|
||||
try:
|
||||
with open(config_file, "rt", encoding="utf-8") as f:
|
||||
config_dict = json.load(f)
|
||||
except (IOError, KeyError, ValueError) as e:
|
||||
# Likely missing new Docker config file or it is in an
|
||||
# unknown format, continue to attempt to read old location
|
||||
# and format.
|
||||
log.debug(e)
|
||||
return cls(_load_legacy_config(config_file), credstore_env)
|
||||
|
||||
res = {}
|
||||
if config_dict.get("auths"):
|
||||
log.debug("Found 'auths' section")
|
||||
res.update(
|
||||
{"auths": cls.parse_auth(config_dict.pop("auths"), raise_on_error=True)}
|
||||
)
|
||||
if config_dict.get("credsStore"):
|
||||
log.debug("Found 'credsStore' section")
|
||||
res.update({"credsStore": config_dict.pop("credsStore")})
|
||||
if config_dict.get("credHelpers"):
|
||||
log.debug("Found 'credHelpers' section")
|
||||
res.update({"credHelpers": config_dict.pop("credHelpers")})
|
||||
if res:
|
||||
return cls(res, credstore_env)
|
||||
|
||||
log.debug(
|
||||
"Could not find auth-related section ; attempting to interpret "
|
||||
"as auth-only file"
|
||||
)
|
||||
return cls({"auths": cls.parse_auth(config_dict)}, credstore_env)
|
||||
|
||||
@property
|
||||
def auths(self) -> dict[str, dict[str, t.Any]]:
|
||||
return self.get("auths", {})
|
||||
|
||||
@property
|
||||
def creds_store(self) -> str | None:
|
||||
return self.get("credsStore", None)
|
||||
|
||||
@property
|
||||
def cred_helpers(self) -> dict[str, t.Any]:
|
||||
return self.get("credHelpers", {})
|
||||
|
||||
@property
|
||||
def is_empty(self) -> bool:
|
||||
return not self.auths and not self.creds_store and not self.cred_helpers
|
||||
|
||||
def resolve_authconfig(
|
||||
self, registry: str | None = None
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Returns the authentication data from the given auth configuration for a
|
||||
specific registry. As with the Docker client, legacy entries in the
|
||||
config with full URLs are stripped down to hostnames before checking
|
||||
for a match. Returns None if no match was found.
|
||||
"""
|
||||
|
||||
if self.creds_store or self.cred_helpers:
|
||||
store_name = self.get_credential_store(registry)
|
||||
if store_name is not None:
|
||||
log.debug('Using credentials store "%s"', store_name)
|
||||
cfg = self._resolve_authconfig_credstore(registry, store_name)
|
||||
if cfg is not None:
|
||||
return cfg
|
||||
log.debug("No entry in credstore - fetching from auth dict")
|
||||
|
||||
# Default to the public index server
|
||||
registry = resolve_index_name(registry) if registry else INDEX_NAME
|
||||
log.debug("Looking for auth entry for %s", repr(registry))
|
||||
|
||||
if registry in self.auths:
|
||||
log.debug("Found %s", repr(registry))
|
||||
return self.auths[registry]
|
||||
|
||||
for key, conf in self.auths.items():
|
||||
if resolve_index_name(key) == registry:
|
||||
log.debug("Found %s", repr(key))
|
||||
return conf
|
||||
|
||||
log.debug("No entry found")
|
||||
return None
|
||||
|
||||
def _resolve_authconfig_credstore(
|
||||
self, registry: str | None, credstore_name: str
|
||||
) -> dict[str, t.Any] | None:
|
||||
if not registry or registry == INDEX_NAME:
|
||||
# The ecosystem is a little schizophrenic with index.docker.io VS
|
||||
# docker.io - in that case, it seems the full URL is necessary.
|
||||
registry = INDEX_URL
|
||||
log.debug("Looking for auth entry for %s", repr(registry))
|
||||
store = self._get_store_instance(credstore_name)
|
||||
try:
|
||||
data = store.get(registry)
|
||||
res = {
|
||||
"ServerAddress": registry,
|
||||
}
|
||||
if data["Username"] == TOKEN_USERNAME:
|
||||
res["IdentityToken"] = data["Secret"]
|
||||
else:
|
||||
res.update(
|
||||
{
|
||||
"Username": data["Username"],
|
||||
"Password": data["Secret"],
|
||||
}
|
||||
)
|
||||
return res
|
||||
except CredentialsNotFound:
|
||||
log.debug("No entry found")
|
||||
return None
|
||||
except StoreError as e:
|
||||
raise errors.DockerException(f"Credentials store error: {e}") from e
|
||||
|
||||
def _get_store_instance(self, name: str) -> Store:
|
||||
if name not in self._stores:
|
||||
self._stores[name] = Store(name, environment=self._credstore_env)
|
||||
return self._stores[name]
|
||||
|
||||
def get_credential_store(self, registry: str | None) -> str | None:
|
||||
if not registry or registry == INDEX_NAME:
|
||||
registry = INDEX_URL
|
||||
|
||||
return self.cred_helpers.get(registry) or self.creds_store
|
||||
|
||||
def get_all_credentials(self) -> dict[str, dict[str, t.Any] | None]:
|
||||
auth_data: dict[str, dict[str, t.Any] | None] = self.auths.copy() # type: ignore
|
||||
if self.creds_store:
|
||||
# Retrieve all credentials from the default store
|
||||
store = self._get_store_instance(self.creds_store)
|
||||
for k in store.list():
|
||||
auth_data[k] = self._resolve_authconfig_credstore(k, self.creds_store)
|
||||
auth_data[convert_to_hostname(k)] = auth_data[k]
|
||||
|
||||
# credHelpers entries take priority over all others
|
||||
for reg, store_name in self.cred_helpers.items():
|
||||
auth_data[reg] = self._resolve_authconfig_credstore(reg, store_name)
|
||||
auth_data[convert_to_hostname(reg)] = auth_data[reg]
|
||||
|
||||
return auth_data
|
||||
|
||||
def add_auth(self, reg: str, data: dict[str, t.Any]) -> None:
|
||||
self["auths"][reg] = data
|
||||
|
||||
|
||||
def resolve_authconfig(
|
||||
authconfig: AuthConfig | dict[str, t.Any],
|
||||
registry: str | None = None,
|
||||
credstore_env: dict[str, str] | None = None,
|
||||
) -> dict[str, t.Any] | None:
|
||||
if not isinstance(authconfig, AuthConfig):
|
||||
authconfig = AuthConfig(authconfig, credstore_env)
|
||||
return authconfig.resolve_authconfig(registry)
|
||||
|
||||
|
||||
def convert_to_hostname(url: str) -> str:
|
||||
return url.replace("http://", "").replace("https://", "").split("/", 1)[0]
|
||||
|
||||
|
||||
def decode_auth(auth: str | bytes) -> tuple[str, str]:
|
||||
if isinstance(auth, str):
|
||||
auth = auth.encode("ascii")
|
||||
s = base64.b64decode(auth)
|
||||
login, pwd = s.split(b":", 1)
|
||||
return login.decode("utf8"), pwd.decode("utf8")
|
||||
|
||||
|
||||
def encode_header(auth: dict[str, t.Any]) -> bytes:
|
||||
auth_json = json.dumps(auth).encode("ascii")
|
||||
return base64.urlsafe_b64encode(auth_json)
|
||||
|
||||
|
||||
def parse_auth(
|
||||
entries: dict[str, dict[str, t.Any]], raise_on_error: bool = False
|
||||
) -> dict[str, dict[str, t.Any]]:
|
||||
"""
|
||||
Parses authentication entries
|
||||
|
||||
Args:
|
||||
entries: Dict of authentication entries.
|
||||
raise_on_error: If set to true, an invalid format will raise
|
||||
InvalidConfigFile
|
||||
|
||||
Returns:
|
||||
Authentication registry.
|
||||
"""
|
||||
|
||||
return AuthConfig.parse_auth(entries, raise_on_error)
|
||||
|
||||
|
||||
def load_config(
|
||||
config_path: str | None = None,
|
||||
config_dict: dict[str, t.Any] | None = None,
|
||||
credstore_env: dict[str, str] | None = None,
|
||||
) -> AuthConfig:
|
||||
return AuthConfig.load_config(config_path, config_dict, credstore_env)
|
||||
|
||||
|
||||
def _load_legacy_config(config_file: str) -> dict[str, dict[str, t.Any]]:
|
||||
log.debug("Attempting to parse legacy auth file format")
|
||||
try:
|
||||
data = []
|
||||
with open(config_file, "rt", encoding="utf-8") as f:
|
||||
for line in f.readlines():
|
||||
data.append(line.strip().split(" = ")[1])
|
||||
if len(data) < 2:
|
||||
# Not enough data
|
||||
raise errors.InvalidConfigFile("Invalid or empty configuration file!")
|
||||
|
||||
username, password = decode_auth(data[0])
|
||||
return {
|
||||
"auths": {
|
||||
INDEX_NAME: {
|
||||
"username": username,
|
||||
"password": password,
|
||||
"email": data[1],
|
||||
"serveraddress": INDEX_URL,
|
||||
}
|
||||
}
|
||||
}
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
log.debug(e)
|
||||
|
||||
log.debug("All parsing attempts failed - returning empty config")
|
||||
return {}
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
|
||||
MINIMUM_DOCKER_API_VERSION = "1.21"
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
STREAM_HEADER_SIZE_BYTES = 8
|
||||
CONTAINER_LIMITS_KEYS = ["memory", "memswap", "cpushares", "cpusetcpus"]
|
||||
|
||||
DEFAULT_HTTP_HOST = "127.0.0.1"
|
||||
DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
|
||||
DEFAULT_NPIPE = "npipe:////./pipe/docker_engine"
|
||||
|
||||
BYTE_UNITS = {"b": 1, "k": 1024, "m": 1024 * 1024, "g": 1024 * 1024 * 1024}
|
||||
|
||||
IS_WINDOWS_PLATFORM = sys.platform == "win32"
|
||||
WINDOWS_LONGPATH_PREFIX = "\\\\?\\"
|
||||
|
||||
DEFAULT_USER_AGENT = "ansible-community.docker"
|
||||
DEFAULT_NUM_POOLS = 25
|
||||
|
||||
# The OpenSSH server default value for MaxSessions is 10 which means we can
|
||||
# use up to 9, leaving the final session for the underlying SSH connection.
|
||||
# For more details see: https://github.com/docker/docker-py/issues/2246
|
||||
DEFAULT_NUM_POOLS_SSH = 9
|
||||
|
||||
DEFAULT_MAX_POOL_SIZE = 10
|
||||
|
||||
DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
|
||||
|
|
@ -0,0 +1,253 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import typing as t
|
||||
|
||||
from .. import errors
|
||||
from .config import (
|
||||
METAFILE,
|
||||
get_current_context_name,
|
||||
get_meta_dir,
|
||||
write_context_name_to_docker_config,
|
||||
)
|
||||
from .context import Context
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ..tls import TLSConfig
|
||||
|
||||
|
||||
def create_default_context() -> Context:
|
||||
host = None
|
||||
if os.environ.get("DOCKER_HOST"):
|
||||
host = os.environ.get("DOCKER_HOST")
|
||||
return Context(
|
||||
"default", "swarm", host, description="Current DOCKER_HOST based configuration"
|
||||
)
|
||||
|
||||
|
||||
class ContextAPI:
|
||||
"""Context API.
|
||||
Contains methods for context management:
|
||||
create, list, remove, get, inspect.
|
||||
"""
|
||||
|
||||
DEFAULT_CONTEXT = None
|
||||
|
||||
@classmethod
|
||||
def get_default_context(cls) -> Context:
|
||||
context = cls.DEFAULT_CONTEXT
|
||||
if context is None:
|
||||
context = create_default_context()
|
||||
cls.DEFAULT_CONTEXT = context
|
||||
return context
|
||||
|
||||
@classmethod
|
||||
def create_context(
|
||||
cls,
|
||||
name: str,
|
||||
orchestrator: str | None = None,
|
||||
host: str | None = None,
|
||||
tls_cfg: TLSConfig | None = None,
|
||||
default_namespace: str | None = None,
|
||||
skip_tls_verify: bool = False,
|
||||
) -> Context:
|
||||
"""Creates a new context.
|
||||
Returns:
|
||||
(Context): a Context object.
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextAlreadyExists`
|
||||
If a context with the name already exists.
|
||||
:py:class:`docker.errors.ContextException`
|
||||
If name is default.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ctx = ContextAPI.create_context(name='test')
|
||||
>>> print(ctx.Metadata)
|
||||
{
|
||||
"Name": "test",
|
||||
"Metadata": {},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "unix:///var/run/docker.sock",
|
||||
"SkipTLSVerify": false
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
raise errors.ContextException('"default" is a reserved context name')
|
||||
ctx = Context.load_context(name)
|
||||
if ctx:
|
||||
raise errors.ContextAlreadyExists(name)
|
||||
endpoint = "docker"
|
||||
if orchestrator and orchestrator != "swarm":
|
||||
endpoint = orchestrator
|
||||
ctx = Context(name, orchestrator)
|
||||
ctx.set_endpoint(
|
||||
endpoint,
|
||||
host,
|
||||
tls_cfg,
|
||||
skip_tls_verify=skip_tls_verify,
|
||||
def_namespace=default_namespace,
|
||||
)
|
||||
ctx.save()
|
||||
return ctx
|
||||
|
||||
@classmethod
|
||||
def get_context(cls, name: str | None = None) -> Context | None:
|
||||
"""Retrieves a context object.
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ctx = ContextAPI.get_context(name='test')
|
||||
>>> print(ctx.Metadata)
|
||||
{
|
||||
"Name": "test",
|
||||
"Metadata": {},
|
||||
"Endpoints": {
|
||||
"docker": {
|
||||
"Host": "unix:///var/run/docker.sock",
|
||||
"SkipTLSVerify": false
|
||||
}
|
||||
}
|
||||
}
|
||||
"""
|
||||
if not name:
|
||||
name = get_current_context_name()
|
||||
if name == "default":
|
||||
return cls.get_default_context()
|
||||
return Context.load_context(name)
|
||||
|
||||
@classmethod
|
||||
def contexts(cls) -> list[Context]:
|
||||
"""Context list.
|
||||
Returns:
|
||||
(Context): List of context objects.
|
||||
Raises:
|
||||
:py:class:`docker.errors.APIError`
|
||||
If something goes wrong.
|
||||
"""
|
||||
names = []
|
||||
for dirname, dummy, fnames in os.walk(get_meta_dir()):
|
||||
for filename in fnames:
|
||||
if filename == METAFILE:
|
||||
filepath = os.path.join(dirname, filename)
|
||||
try:
|
||||
with open(filepath, "rt", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
name = data["Name"]
|
||||
if name == "default":
|
||||
raise ValueError('"default" is a reserved context name')
|
||||
names.append(name)
|
||||
except Exception as e:
|
||||
raise errors.ContextException(
|
||||
f"Failed to load metafile {filepath}: {e}"
|
||||
) from e
|
||||
|
||||
contexts = [cls.get_default_context()]
|
||||
for name in names:
|
||||
context = Context.load_context(name)
|
||||
if not context:
|
||||
raise errors.ContextException(f"Context {name} cannot be found")
|
||||
contexts.append(context)
|
||||
return contexts
|
||||
|
||||
@classmethod
|
||||
def get_current_context(cls) -> Context | None:
|
||||
"""Get current context.
|
||||
Returns:
|
||||
(Context): current context object.
|
||||
"""
|
||||
return cls.get_context()
|
||||
|
||||
@classmethod
|
||||
def set_current_context(cls, name: str = "default") -> None:
|
||||
ctx = cls.get_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
|
||||
err = write_context_name_to_docker_config(name)
|
||||
if err:
|
||||
raise errors.ContextException(f"Failed to set current context: {err}")
|
||||
|
||||
@classmethod
|
||||
def remove_context(cls, name: str) -> None:
|
||||
"""Remove a context. Similar to the ``docker context rm`` command.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextNotFound`
|
||||
If a context with the name does not exist.
|
||||
:py:class:`docker.errors.ContextException`
|
||||
If name is default.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ContextAPI.remove_context(name='test')
|
||||
>>>
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
raise errors.ContextException('context "default" cannot be removed')
|
||||
ctx = Context.load_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
if name == get_current_context_name():
|
||||
write_context_name_to_docker_config(None)
|
||||
ctx.remove()
|
||||
|
||||
@classmethod
|
||||
def inspect_context(cls, name: str = "default") -> dict[str, t.Any]:
|
||||
"""Inspect a context. Similar to the ``docker context inspect`` command.
|
||||
|
||||
Args:
|
||||
name (str): The name of the context
|
||||
|
||||
Raises:
|
||||
:py:class:`docker.errors.MissingContextParameter`
|
||||
If a context name is not provided.
|
||||
:py:class:`docker.errors.ContextNotFound`
|
||||
If a context with the name does not exist.
|
||||
|
||||
Example:
|
||||
|
||||
>>> from docker.context import ContextAPI
|
||||
>>> ContextAPI.remove_context(name='test')
|
||||
>>>
|
||||
"""
|
||||
if not name:
|
||||
raise errors.MissingContextParameter("name")
|
||||
if name == "default":
|
||||
return cls.get_default_context()()
|
||||
ctx = Context.load_context(name)
|
||||
if not ctx:
|
||||
raise errors.ContextNotFound(name)
|
||||
|
||||
return ctx()
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
|
||||
from ..constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM
|
||||
from ..utils.config import find_config_file, get_default_config_file
|
||||
from ..utils.utils import parse_host
|
||||
|
||||
METAFILE = "meta.json"
|
||||
|
||||
|
||||
def get_current_context_name_with_source() -> tuple[str, str]:
|
||||
if os.environ.get("DOCKER_HOST"):
|
||||
return "default", "DOCKER_HOST environment variable set"
|
||||
if os.environ.get("DOCKER_CONTEXT"):
|
||||
return os.environ["DOCKER_CONTEXT"], "DOCKER_CONTEXT environment variable set"
|
||||
docker_cfg_path = find_config_file()
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
|
||||
return (
|
||||
json.load(f).get("currentContext", "default"),
|
||||
f"configuration file {docker_cfg_path}",
|
||||
)
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
pass
|
||||
return "default", "fallback value"
|
||||
|
||||
|
||||
def get_current_context_name() -> str:
|
||||
return get_current_context_name_with_source()[0]
|
||||
|
||||
|
||||
def write_context_name_to_docker_config(name: str | None = None) -> Exception | None:
|
||||
if name == "default":
|
||||
name = None
|
||||
docker_cfg_path = find_config_file()
|
||||
config = {}
|
||||
if docker_cfg_path:
|
||||
try:
|
||||
with open(docker_cfg_path, "rt", encoding="utf-8") as f:
|
||||
config = json.load(f)
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
return e
|
||||
current_context = config.get("currentContext", None)
|
||||
if current_context and not name:
|
||||
del config["currentContext"]
|
||||
elif name:
|
||||
config["currentContext"] = name
|
||||
else:
|
||||
return None
|
||||
if not docker_cfg_path:
|
||||
docker_cfg_path = get_default_config_file()
|
||||
try:
|
||||
with open(docker_cfg_path, "wt", encoding="utf-8") as f:
|
||||
json.dump(config, f, indent=4)
|
||||
return None
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
return e
|
||||
|
||||
|
||||
def get_context_id(name: str) -> str:
|
||||
return hashlib.sha256(name.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def get_context_dir() -> str:
|
||||
docker_cfg_path = find_config_file() or get_default_config_file()
|
||||
return os.path.join(os.path.dirname(docker_cfg_path), "contexts")
|
||||
|
||||
|
||||
def get_meta_dir(name: str | None = None) -> str:
|
||||
meta_dir = os.path.join(get_context_dir(), "meta")
|
||||
if name:
|
||||
return os.path.join(meta_dir, get_context_id(name))
|
||||
return meta_dir
|
||||
|
||||
|
||||
def get_meta_file(name: str) -> str:
|
||||
return os.path.join(get_meta_dir(name), METAFILE)
|
||||
|
||||
|
||||
def get_tls_dir(name: str | None = None, endpoint: str = "") -> str:
|
||||
context_dir = get_context_dir()
|
||||
if name:
|
||||
return os.path.join(context_dir, "tls", get_context_id(name), endpoint)
|
||||
return os.path.join(context_dir, "tls")
|
||||
|
||||
|
||||
def get_context_host(path: str | None = None, tls: bool = False) -> str:
|
||||
host = parse_host(path, IS_WINDOWS_PLATFORM, tls)
|
||||
if host == DEFAULT_UNIX_SOCKET and host.startswith("http+"):
|
||||
# remove http+ from default docker socket url
|
||||
host = host[5:]
|
||||
return host
|
||||
|
|
@ -0,0 +1,286 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2025 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import typing as t
|
||||
from shutil import copyfile, rmtree
|
||||
|
||||
from ..errors import ContextException
|
||||
from ..tls import TLSConfig
|
||||
from .config import (
|
||||
get_context_host,
|
||||
get_meta_dir,
|
||||
get_meta_file,
|
||||
get_tls_dir,
|
||||
)
|
||||
|
||||
IN_MEMORY = "IN MEMORY"
|
||||
|
||||
|
||||
class Context:
|
||||
"""A context."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
orchestrator: str | None = None,
|
||||
host: str | None = None,
|
||||
endpoints: dict[str, dict[str, t.Any]] | None = None,
|
||||
skip_tls_verify: bool = False,
|
||||
tls: bool = False,
|
||||
description: str | None = None,
|
||||
) -> None:
|
||||
if not name:
|
||||
raise ValueError("Name not provided")
|
||||
self.name = name
|
||||
self.context_type = None
|
||||
self.orchestrator = orchestrator
|
||||
self.endpoints = {}
|
||||
self.tls_cfg: dict[str, TLSConfig] = {}
|
||||
self.meta_path = IN_MEMORY
|
||||
self.tls_path = IN_MEMORY
|
||||
self.description = description
|
||||
|
||||
if not endpoints:
|
||||
# set default docker endpoint if no endpoint is set
|
||||
default_endpoint = (
|
||||
"docker"
|
||||
if (not orchestrator or orchestrator == "swarm")
|
||||
else orchestrator
|
||||
)
|
||||
|
||||
self.endpoints = {
|
||||
default_endpoint: {
|
||||
"Host": get_context_host(host, skip_tls_verify or tls),
|
||||
"SkipTLSVerify": skip_tls_verify,
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
# check docker endpoints
|
||||
for k, v in endpoints.items():
|
||||
if not isinstance(v, dict):
|
||||
# unknown format
|
||||
raise ContextException(
|
||||
f"Unknown endpoint format for context {name}: {v}",
|
||||
)
|
||||
|
||||
self.endpoints[k] = v
|
||||
if k != "docker":
|
||||
continue
|
||||
|
||||
self.endpoints[k]["Host"] = v.get(
|
||||
"Host", get_context_host(host, skip_tls_verify or tls)
|
||||
)
|
||||
self.endpoints[k]["SkipTLSVerify"] = bool(
|
||||
v.get("SkipTLSVerify", skip_tls_verify)
|
||||
)
|
||||
|
||||
def set_endpoint(
|
||||
self,
|
||||
name: str = "docker",
|
||||
host: str | None = None,
|
||||
tls_cfg: TLSConfig | None = None,
|
||||
skip_tls_verify: bool = False,
|
||||
def_namespace: str | None = None,
|
||||
) -> None:
|
||||
self.endpoints[name] = {
|
||||
"Host": get_context_host(host, not skip_tls_verify or tls_cfg is not None),
|
||||
"SkipTLSVerify": skip_tls_verify,
|
||||
}
|
||||
if def_namespace:
|
||||
self.endpoints[name]["DefaultNamespace"] = def_namespace
|
||||
|
||||
if tls_cfg:
|
||||
self.tls_cfg[name] = tls_cfg
|
||||
|
||||
def inspect(self) -> dict[str, t.Any]:
|
||||
return self()
|
||||
|
||||
@classmethod
|
||||
def load_context(cls, name: str) -> t.Self | None:
|
||||
meta = Context._load_meta(name)
|
||||
if meta:
|
||||
instance = cls(
|
||||
meta["Name"],
|
||||
orchestrator=meta["Metadata"].get("StackOrchestrator", None),
|
||||
endpoints=meta.get("Endpoints", None),
|
||||
description=meta["Metadata"].get("Description"),
|
||||
)
|
||||
instance.context_type = meta["Metadata"].get("Type", None)
|
||||
instance._load_certs()
|
||||
instance.meta_path = get_meta_dir(name)
|
||||
return instance
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def _load_meta(cls, name: str) -> dict[str, t.Any] | None:
|
||||
meta_file = get_meta_file(name)
|
||||
if not os.path.isfile(meta_file):
|
||||
return None
|
||||
|
||||
metadata: dict[str, t.Any] = {}
|
||||
try:
|
||||
with open(meta_file, "rt", encoding="utf-8") as f:
|
||||
metadata = json.load(f)
|
||||
except (OSError, KeyError, ValueError) as e:
|
||||
# unknown format
|
||||
raise RuntimeError(
|
||||
f"Detected corrupted meta file for context {name} : {e}"
|
||||
) from e
|
||||
|
||||
# for docker endpoints, set defaults for
|
||||
# Host and SkipTLSVerify fields
|
||||
for k, v in metadata["Endpoints"].items():
|
||||
if k != "docker":
|
||||
continue
|
||||
metadata["Endpoints"][k]["Host"] = v.get(
|
||||
"Host", get_context_host(None, False)
|
||||
)
|
||||
metadata["Endpoints"][k]["SkipTLSVerify"] = bool(
|
||||
v.get("SkipTLSVerify", True)
|
||||
)
|
||||
|
||||
return metadata
|
||||
|
||||
def _load_certs(self) -> None:
|
||||
certs = {}
|
||||
tls_dir = get_tls_dir(self.name)
|
||||
for endpoint in self.endpoints:
|
||||
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||
continue
|
||||
ca_cert = None
|
||||
cert = None
|
||||
key = None
|
||||
for filename in os.listdir(os.path.join(tls_dir, endpoint)):
|
||||
if filename.startswith("ca"):
|
||||
ca_cert = os.path.join(tls_dir, endpoint, filename)
|
||||
elif filename.startswith("cert"):
|
||||
cert = os.path.join(tls_dir, endpoint, filename)
|
||||
elif filename.startswith("key"):
|
||||
key = os.path.join(tls_dir, endpoint, filename)
|
||||
if all([cert, key]) or ca_cert:
|
||||
verify = None
|
||||
if endpoint == "docker" and not self.endpoints["docker"].get(
|
||||
"SkipTLSVerify", False
|
||||
):
|
||||
verify = True
|
||||
certs[endpoint] = TLSConfig(
|
||||
client_cert=(cert, key) if cert and key else None,
|
||||
ca_cert=ca_cert,
|
||||
verify=verify,
|
||||
)
|
||||
self.tls_cfg = certs
|
||||
self.tls_path = tls_dir
|
||||
|
||||
def save(self) -> None:
|
||||
meta_dir = get_meta_dir(self.name)
|
||||
if not os.path.isdir(meta_dir):
|
||||
os.makedirs(meta_dir)
|
||||
with open(get_meta_file(self.name), "wt", encoding="utf-8") as f:
|
||||
f.write(json.dumps(self.Metadata))
|
||||
|
||||
tls_dir = get_tls_dir(self.name)
|
||||
for endpoint, tls in self.tls_cfg.items():
|
||||
if not os.path.isdir(os.path.join(tls_dir, endpoint)):
|
||||
os.makedirs(os.path.join(tls_dir, endpoint))
|
||||
|
||||
ca_file = tls.ca_cert
|
||||
if ca_file:
|
||||
copyfile(
|
||||
ca_file, os.path.join(tls_dir, endpoint, os.path.basename(ca_file))
|
||||
)
|
||||
|
||||
if tls.cert:
|
||||
cert_file, key_file = tls.cert
|
||||
copyfile(
|
||||
cert_file,
|
||||
os.path.join(tls_dir, endpoint, os.path.basename(cert_file)),
|
||||
)
|
||||
copyfile(
|
||||
key_file,
|
||||
os.path.join(tls_dir, endpoint, os.path.basename(key_file)),
|
||||
)
|
||||
|
||||
self.meta_path = get_meta_dir(self.name)
|
||||
self.tls_path = get_tls_dir(self.name)
|
||||
|
||||
def remove(self) -> None:
|
||||
if os.path.isdir(self.meta_path):
|
||||
rmtree(self.meta_path)
|
||||
if os.path.isdir(self.tls_path):
|
||||
rmtree(self.tls_path)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<{self.__class__.__name__}: '{self.name}'>"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return json.dumps(self.__call__(), indent=2)
|
||||
|
||||
def __call__(self) -> dict[str, t.Any]:
|
||||
result = self.Metadata
|
||||
result.update(self.TLSMaterial)
|
||||
result.update(self.Storage)
|
||||
return result
|
||||
|
||||
def is_docker_host(self) -> bool:
|
||||
return self.context_type is None
|
||||
|
||||
@property
|
||||
def Name(self) -> str: # pylint: disable=invalid-name
|
||||
return self.name
|
||||
|
||||
@property
|
||||
def Host(self) -> str | None: # pylint: disable=invalid-name
|
||||
if not self.orchestrator or self.orchestrator == "swarm":
|
||||
endpoint = self.endpoints.get("docker", None)
|
||||
if endpoint:
|
||||
return endpoint.get("Host", None) # type: ignore
|
||||
return None
|
||||
|
||||
return self.endpoints[self.orchestrator].get("Host", None) # type: ignore
|
||||
|
||||
@property
|
||||
def Orchestrator(self) -> str | None: # pylint: disable=invalid-name
|
||||
return self.orchestrator
|
||||
|
||||
@property
|
||||
def Metadata(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
|
||||
meta: dict[str, t.Any] = {}
|
||||
if self.orchestrator:
|
||||
meta = {"StackOrchestrator": self.orchestrator}
|
||||
return {"Name": self.name, "Metadata": meta, "Endpoints": self.endpoints}
|
||||
|
||||
@property
|
||||
def TLSConfig(self) -> TLSConfig | None: # pylint: disable=invalid-name
|
||||
key = self.orchestrator
|
||||
if not key or key == "swarm":
|
||||
key = "docker"
|
||||
if key in self.tls_cfg:
|
||||
return self.tls_cfg[key]
|
||||
return None
|
||||
|
||||
@property
|
||||
def TLSMaterial(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
|
||||
certs: dict[str, t.Any] = {}
|
||||
for endpoint, tls in self.tls_cfg.items():
|
||||
paths = [tls.ca_cert, *tls.cert] if tls.cert else [tls.ca_cert]
|
||||
certs[endpoint] = [
|
||||
os.path.basename(path) if path else None for path in paths
|
||||
]
|
||||
return {"TLSMaterial": certs}
|
||||
|
||||
@property
|
||||
def Storage(self) -> dict[str, t.Any]: # pylint: disable=invalid-name
|
||||
return {"Storage": {"MetadataPath": self.meta_path, "TLSPath": self.tls_path}}
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
PROGRAM_PREFIX = "docker-credential-"
|
||||
DEFAULT_LINUX_STORE = "secretservice"
|
||||
DEFAULT_OSX_STORE = "osxkeychain"
|
||||
DEFAULT_WIN32_STORE = "wincred"
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
|
||||
class StoreError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class CredentialsNotFound(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
class InitializationError(StoreError):
|
||||
pass
|
||||
|
||||
|
||||
def process_store_error(cpe: CalledProcessError, program: str) -> StoreError:
|
||||
message = cpe.output.decode("utf-8")
|
||||
if "credentials not found in native keychain" in message:
|
||||
return CredentialsNotFound(f"No matching credentials in {program}")
|
||||
return StoreError(
|
||||
f'Credentials store {program} exited with "{cpe.output.decode("utf-8").strip()}".'
|
||||
)
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import json
|
||||
import subprocess
|
||||
import typing as t
|
||||
|
||||
from . import constants, errors
|
||||
from .utils import create_environment_dict, find_executable
|
||||
|
||||
|
||||
class Store:
|
||||
def __init__(self, program: str, environment: dict[str, str] | None = None) -> None:
|
||||
"""Create a store object that acts as an interface to
|
||||
perform the basic operations for storing, retrieving
|
||||
and erasing credentials using `program`.
|
||||
"""
|
||||
self.program = constants.PROGRAM_PREFIX + program
|
||||
self.exe = find_executable(self.program)
|
||||
self.environment = environment
|
||||
if self.exe is None:
|
||||
raise errors.InitializationError(
|
||||
f"{self.program} not installed or not available in PATH"
|
||||
)
|
||||
|
||||
def get(self, server: str | bytes) -> dict[str, t.Any]:
|
||||
"""Retrieve credentials for `server`. If no credentials are found,
|
||||
a `StoreError` will be raised.
|
||||
"""
|
||||
if not isinstance(server, bytes):
|
||||
server = server.encode("utf-8")
|
||||
data = self._execute("get", server)
|
||||
result = json.loads(data.decode("utf-8"))
|
||||
|
||||
# docker-credential-pass will return an object for inexistent servers
|
||||
# whereas other helpers will exit with returncode != 0. For
|
||||
# consistency, if no significant data is returned,
|
||||
# raise CredentialsNotFound
|
||||
if result["Username"] == "" and result["Secret"] == "":
|
||||
raise errors.CredentialsNotFound(
|
||||
f"No matching credentials in {self.program}"
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def store(self, server: str, username: str, secret: str) -> bytes:
|
||||
"""Store credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
data_input = json.dumps(
|
||||
{"ServerURL": server, "Username": username, "Secret": secret}
|
||||
).encode("utf-8")
|
||||
return self._execute("store", data_input)
|
||||
|
||||
def erase(self, server: str | bytes) -> None:
|
||||
"""Erase credentials for `server`. Raises a `StoreError` if an error
|
||||
occurs.
|
||||
"""
|
||||
if not isinstance(server, bytes):
|
||||
server = server.encode("utf-8")
|
||||
self._execute("erase", server)
|
||||
|
||||
def list(self) -> t.Any:
|
||||
"""List stored credentials. Requires v0.4.0+ of the helper."""
|
||||
data = self._execute("list", None)
|
||||
return json.loads(data.decode("utf-8"))
|
||||
|
||||
def _execute(self, subcmd: str, data_input: bytes | None) -> bytes:
|
||||
if self.exe is None:
|
||||
raise errors.StoreError(
|
||||
f"{self.program} not installed or not available in PATH"
|
||||
)
|
||||
output = None
|
||||
env = create_environment_dict(self.environment)
|
||||
try:
|
||||
output = subprocess.check_output(
|
||||
[self.exe, subcmd],
|
||||
input=data_input,
|
||||
env=env,
|
||||
)
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise errors.process_store_error(e, self.program) from e
|
||||
except OSError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
raise errors.StoreError(
|
||||
f"{self.program} not installed or not available in PATH"
|
||||
) from e
|
||||
raise errors.StoreError(
|
||||
f'Unexpected OS error "{e.strerror}", errno={e.errno}'
|
||||
) from e
|
||||
return output
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from shutil import which
|
||||
|
||||
|
||||
def find_executable(executable: str, path: str | None = None) -> str | None:
|
||||
"""
|
||||
As distutils.spawn.find_executable, but on Windows, look up
|
||||
every extension declared in PATHEXT instead of just `.exe`
|
||||
"""
|
||||
# shutil.which() already uses PATHEXT on Windows, so on
|
||||
# Python 3 we can simply use shutil.which() in all cases.
|
||||
# (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
|
||||
return which(executable, path=path)
|
||||
|
||||
|
||||
def create_environment_dict(overrides: dict[str, str] | None) -> dict[str, str]:
|
||||
"""
|
||||
Create and return a copy of os.environ with the specified overrides
|
||||
"""
|
||||
result = os.environ.copy()
|
||||
result.update(overrides or {})
|
||||
return result
|
||||
|
|
@ -0,0 +1,244 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ._import_helper import HTTPError as _HTTPError
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from requests import Response
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
"""
|
||||
A base class from which all other exceptions inherit.
|
||||
|
||||
If you want to catch all errors that the Docker SDK might raise,
|
||||
catch this base exception.
|
||||
"""
|
||||
|
||||
|
||||
def create_api_error_from_http_exception(e: _HTTPError) -> t.NoReturn:
|
||||
"""
|
||||
Create a suitable APIError from requests.exceptions.HTTPError.
|
||||
"""
|
||||
response = e.response
|
||||
try:
|
||||
explanation = response.json()["message"]
|
||||
except ValueError:
|
||||
explanation = to_text((response.content or "").strip())
|
||||
cls = APIError
|
||||
if response.status_code == 404:
|
||||
if explanation and (
|
||||
"No such image" in str(explanation)
|
||||
or "not found: does not exist or no pull access" in str(explanation)
|
||||
or "repository does not exist" in str(explanation)
|
||||
):
|
||||
cls = ImageNotFound
|
||||
else:
|
||||
cls = NotFound
|
||||
raise cls(e, response=response, explanation=explanation) from e
|
||||
|
||||
|
||||
class APIError(_HTTPError, DockerException):
|
||||
"""
|
||||
An HTTP error from the API.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message: str | Exception,
|
||||
response: Response | None = None,
|
||||
explanation: str | None = None,
|
||||
) -> None:
|
||||
# requests 1.2 supports response as a keyword argument, but
|
||||
# requests 1.1 does not
|
||||
super().__init__(message)
|
||||
self.response = response
|
||||
self.explanation = explanation or ""
|
||||
|
||||
def __str__(self) -> str:
|
||||
message = super().__str__()
|
||||
|
||||
if self.is_client_error():
|
||||
message = f"{self.response.status_code} Client Error for {self.response.url}: {self.response.reason}"
|
||||
|
||||
elif self.is_server_error():
|
||||
message = f"{self.response.status_code} Server Error for {self.response.url}: {self.response.reason}"
|
||||
|
||||
if self.explanation:
|
||||
message = f'{message} ("{self.explanation}")'
|
||||
|
||||
return message
|
||||
|
||||
@property
|
||||
def status_code(self) -> int | None:
|
||||
if self.response is not None:
|
||||
return self.response.status_code
|
||||
return None
|
||||
|
||||
def is_error(self) -> bool:
|
||||
return self.is_client_error() or self.is_server_error()
|
||||
|
||||
def is_client_error(self) -> bool:
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 400 <= self.status_code < 500
|
||||
|
||||
def is_server_error(self) -> bool:
|
||||
if self.status_code is None:
|
||||
return False
|
||||
return 500 <= self.status_code < 600
|
||||
|
||||
|
||||
class NotFound(APIError):
|
||||
pass
|
||||
|
||||
|
||||
class ImageNotFound(NotFound):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidVersion(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidRepository(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidConfigFile(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidArgument(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class DeprecatedMethod(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
class TLSParameterError(DockerException):
|
||||
def __init__(self, msg: str) -> None:
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.msg + (
|
||||
". TLS configurations should map the Docker CLI "
|
||||
"client configurations. See "
|
||||
"https://docs.docker.com/engine/articles/https/ "
|
||||
"for API details."
|
||||
)
|
||||
|
||||
|
||||
class NullResource(DockerException, ValueError):
|
||||
pass
|
||||
|
||||
|
||||
class ContainerError(DockerException):
|
||||
"""
|
||||
Represents a container that has exited with a non-zero exit code.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
container: str,
|
||||
exit_status: int,
|
||||
command: list[str],
|
||||
image: str,
|
||||
stderr: str | None,
|
||||
):
|
||||
self.container = container
|
||||
self.exit_status = exit_status
|
||||
self.command = command
|
||||
self.image = image
|
||||
self.stderr = stderr
|
||||
|
||||
err = f": {stderr}" if stderr is not None else ""
|
||||
msg = f"Command '{command}' in image '{image}' returned non-zero exit status {exit_status}{err}"
|
||||
|
||||
super().__init__(msg)
|
||||
|
||||
|
||||
class StreamParseError(RuntimeError):
|
||||
def __init__(self, reason: Exception) -> None:
|
||||
self.msg = reason
|
||||
|
||||
|
||||
class BuildError(DockerException):
|
||||
def __init__(self, reason: str, build_log: str) -> None:
|
||||
super().__init__(reason)
|
||||
self.msg = reason
|
||||
self.build_log = build_log
|
||||
|
||||
|
||||
class ImageLoadError(DockerException):
|
||||
pass
|
||||
|
||||
|
||||
def create_unexpected_kwargs_error(name: str, kwargs: dict[str, t.Any]) -> TypeError:
|
||||
quoted_kwargs = [f"'{k}'" for k in sorted(kwargs)]
|
||||
text = [f"{name}() "]
|
||||
if len(quoted_kwargs) == 1:
|
||||
text.append("got an unexpected keyword argument ")
|
||||
else:
|
||||
text.append("got unexpected keyword arguments ")
|
||||
text.append(", ".join(quoted_kwargs))
|
||||
return TypeError("".join(text))
|
||||
|
||||
|
||||
class MissingContextParameter(DockerException):
|
||||
def __init__(self, param: str) -> None:
|
||||
self.param = param
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"missing parameter: {self.param}"
|
||||
|
||||
|
||||
class ContextAlreadyExists(DockerException):
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = name
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"context {self.name} already exists"
|
||||
|
||||
|
||||
class ContextException(DockerException):
|
||||
def __init__(self, msg: str) -> None:
|
||||
self.msg = msg
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.msg
|
||||
|
||||
|
||||
class ContextNotFound(DockerException):
|
||||
def __init__(self, name: str) -> None:
|
||||
self.name = name
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"context '{self.name}' not found"
|
||||
|
||||
|
||||
class MissingRequirementException(DockerException):
|
||||
def __init__(
|
||||
self, msg: str, requirement: str, import_exception: ImportError | str
|
||||
) -> None:
|
||||
self.msg = msg
|
||||
self.requirement = requirement
|
||||
self.import_exception = import_exception
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.msg
|
||||
|
|
@ -0,0 +1,107 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import typing as t
|
||||
|
||||
from . import errors
|
||||
from .transport.ssladapter import SSLHTTPAdapter
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||
APIClient,
|
||||
)
|
||||
|
||||
|
||||
class TLSConfig:
|
||||
"""
|
||||
TLS configuration.
|
||||
|
||||
Args:
|
||||
client_cert (tuple of str): Path to client cert, path to client key.
|
||||
ca_cert (str): Path to CA cert file.
|
||||
verify (bool or str): This can be ``False`` or a path to a CA cert
|
||||
file.
|
||||
assert_hostname (bool): Verify the hostname of the server.
|
||||
|
||||
.. _`SSL version`:
|
||||
https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
|
||||
"""
|
||||
|
||||
cert: tuple[str, str] | None = None
|
||||
ca_cert: str | None = None
|
||||
verify: bool | None = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client_cert: tuple[str, str] | None = None,
|
||||
ca_cert: str | None = None,
|
||||
verify: bool | None = None,
|
||||
assert_hostname: bool | None = None,
|
||||
):
|
||||
# Argument compatibility/mapping with
|
||||
# https://docs.docker.com/engine/articles/https/
|
||||
# This diverges from the Docker CLI in that users can specify 'tls'
|
||||
# here, but also disable any public/default CA pool verification by
|
||||
# leaving verify=False
|
||||
|
||||
self.assert_hostname = assert_hostname
|
||||
|
||||
# "client_cert" must have both or neither cert/key files. In
|
||||
# either case, Alert the user when both are expected, but any are
|
||||
# missing.
|
||||
|
||||
if client_cert:
|
||||
try:
|
||||
tls_cert, tls_key = client_cert
|
||||
except ValueError:
|
||||
raise errors.TLSParameterError(
|
||||
"client_cert must be a tuple of (client certificate, key file)"
|
||||
) from None
|
||||
|
||||
if not (tls_cert and tls_key) or (
|
||||
not os.path.isfile(tls_cert) or not os.path.isfile(tls_key)
|
||||
):
|
||||
raise errors.TLSParameterError(
|
||||
"Path to a certificate and key files must be provided"
|
||||
" through the client_cert param"
|
||||
)
|
||||
self.cert = (tls_cert, tls_key)
|
||||
|
||||
# If verify is set, make sure the cert exists
|
||||
self.verify = verify
|
||||
self.ca_cert = ca_cert
|
||||
if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
|
||||
raise errors.TLSParameterError(
|
||||
"Invalid CA certificate provided for `ca_cert`."
|
||||
)
|
||||
|
||||
def configure_client(self, client: APIClient) -> None:
|
||||
"""
|
||||
Configure a client with these TLS options.
|
||||
"""
|
||||
|
||||
if self.verify and self.ca_cert:
|
||||
client.verify = self.ca_cert
|
||||
else:
|
||||
client.verify = self.verify
|
||||
|
||||
if self.cert:
|
||||
client.cert = self.cert
|
||||
|
||||
client.mount(
|
||||
"https://",
|
||||
SSLHTTPAdapter(
|
||||
assert_hostname=self.assert_hostname,
|
||||
),
|
||||
)
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .._import_helper import HTTPAdapter as _HTTPAdapter
|
||||
|
||||
|
||||
class BaseHTTPAdapter(_HTTPAdapter):
|
||||
def close(self) -> None:
|
||||
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
|
||||
# pylint: disable-next=no-member
|
||||
super().close()
|
||||
if hasattr(self, "pools"):
|
||||
self.pools.clear()
|
||||
|
||||
# Hotfix for requests 2.32.0 and 2.32.1: its commit
|
||||
# https://github.com/psf/requests/commit/c0813a2d910ea6b4f8438b91d315b8d181302356
|
||||
# changes requests.adapters.HTTPAdapter to no longer call get_connection() from
|
||||
# send(), but instead call _get_connection().
|
||||
def _get_connection(self, request, *args, **kwargs): # type: ignore
|
||||
return self.get_connection(request.url, kwargs.get("proxies"))
|
||||
|
||||
# Fix for requests 2.32.2+:
|
||||
# https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05
|
||||
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): # type: ignore
|
||||
return self.get_connection(request.url, proxies)
|
||||
|
|
@ -0,0 +1,123 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from queue import Empty
|
||||
|
||||
from .. import constants
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
from .npipesocket import NpipeSocket
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Mapping
|
||||
|
||||
from requests import PreparedRequest
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class NpipeHTTPConnection(urllib3_connection.HTTPConnection):
|
||||
def __init__(self, npipe_path: str, timeout: int | float = 60) -> None:
|
||||
super().__init__("localhost", timeout=timeout)
|
||||
self.npipe_path = npipe_path
|
||||
self.timeout = timeout
|
||||
|
||||
def connect(self) -> None:
|
||||
sock = NpipeSocket()
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect(self.npipe_path)
|
||||
self.sock = sock
|
||||
|
||||
|
||||
class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(
|
||||
self, npipe_path: str, timeout: int | float = 60, maxsize: int = 10
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
|
||||
self.npipe_path = npipe_path
|
||||
self.timeout = timeout
|
||||
|
||||
def _new_conn(self) -> NpipeHTTPConnection:
|
||||
return NpipeHTTPConnection(self.npipe_path, self.timeout)
|
||||
|
||||
# When re-using connections, urllib3 tries to call select() on our
|
||||
# NpipeSocket instance, causing a crash. To circumvent this, we override
|
||||
# _get_conn, where that check happens.
|
||||
def _get_conn(self, timeout: int | float) -> NpipeHTTPConnection:
|
||||
conn = None
|
||||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
|
||||
except AttributeError as exc: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
|
||||
|
||||
except Empty as exc:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more connections are allowed.",
|
||||
) from exc
|
||||
# Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
||||
class NpipeHTTPAdapter(BaseHTTPAdapter):
|
||||
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||
"npipe_path",
|
||||
"pools",
|
||||
"timeout",
|
||||
"max_pool_size",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str,
|
||||
timeout: int | float = 60,
|
||||
pool_connections: int = constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
|
||||
) -> None:
|
||||
self.npipe_path = base_url.replace("npipe://", "")
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super().__init__()
|
||||
|
||||
def get_connection(
|
||||
self, url: str | bytes, proxies: Mapping[str, str] | None = None
|
||||
) -> NpipeHTTPConnectionPool:
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
pool = NpipeHTTPConnectionPool(
|
||||
self.npipe_path, self.timeout, maxsize=self.max_pool_size
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def request_url(
|
||||
self, request: PreparedRequest, proxies: Mapping[str, str] | None
|
||||
) -> str:
|
||||
# The select_proxy utility in requests errors out when the provided URL
|
||||
# does not have a hostname, like is the case when using a UNIX socket.
|
||||
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||
# anyway, we simply return the path URL directly.
|
||||
# See also: https://github.com/docker/docker-sdk-python/issues/811
|
||||
return request.path_url
|
||||
|
|
@ -0,0 +1,277 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import io
|
||||
import time
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
PYWIN32_IMPORT_ERROR: str | None # pylint: disable=invalid-name
|
||||
try:
|
||||
import pywintypes
|
||||
import win32api
|
||||
import win32event
|
||||
import win32file
|
||||
import win32pipe
|
||||
except ImportError:
|
||||
PYWIN32_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||
else:
|
||||
PYWIN32_IMPORT_ERROR = None # pylint: disable=invalid-name
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Buffer, Callable
|
||||
|
||||
_Self = t.TypeVar("_Self")
|
||||
_P = t.ParamSpec("_P")
|
||||
_R = t.TypeVar("_R")
|
||||
|
||||
|
||||
ERROR_PIPE_BUSY = 0xE7
|
||||
SECURITY_SQOS_PRESENT = 0x100000
|
||||
SECURITY_ANONYMOUS = 0
|
||||
|
||||
MAXIMUM_RETRY_COUNT = 10
|
||||
|
||||
|
||||
def check_closed(
|
||||
f: Callable[t.Concatenate[_Self, _P], _R],
|
||||
) -> Callable[t.Concatenate[_Self, _P], _R]:
|
||||
@functools.wraps(f)
|
||||
def wrapped(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||
if self._closed: # type: ignore
|
||||
raise RuntimeError("Can not reuse socket after connection was closed.")
|
||||
return f(self, *args, **kwargs)
|
||||
|
||||
return wrapped
|
||||
|
||||
|
||||
class NpipeSocket:
|
||||
"""Partial implementation of the socket API over windows named pipes.
|
||||
This implementation is only designed to be used as a client socket,
|
||||
and server-specific methods (bind, listen, accept...) are not
|
||||
implemented.
|
||||
"""
|
||||
|
||||
def __init__(self, handle: t.Any | None = None) -> None:
|
||||
self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
|
||||
self._handle = handle
|
||||
self._address: str | None = None
|
||||
self._closed = False
|
||||
self.flags: int | None = None
|
||||
|
||||
def accept(self) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def bind(self, address: t.Any) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def close(self) -> None:
|
||||
if self._handle is None:
|
||||
raise ValueError("Handle not present")
|
||||
self._handle.Close()
|
||||
self._closed = True
|
||||
|
||||
@check_closed
|
||||
def connect(self, address: str, retry_count: int = 0) -> None:
|
||||
try:
|
||||
handle = win32file.CreateFile(
|
||||
address,
|
||||
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
|
||||
0,
|
||||
None,
|
||||
win32file.OPEN_EXISTING,
|
||||
(
|
||||
SECURITY_ANONYMOUS
|
||||
| SECURITY_SQOS_PRESENT
|
||||
| win32file.FILE_FLAG_OVERLAPPED
|
||||
),
|
||||
0,
|
||||
)
|
||||
except win32pipe.error as e:
|
||||
# See Remarks:
|
||||
# https://msdn.microsoft.com/en-us/library/aa365800.aspx
|
||||
if e.winerror == ERROR_PIPE_BUSY:
|
||||
# Another program or thread has grabbed our pipe instance
|
||||
# before we got to it. Wait for availability and attempt to
|
||||
# connect again.
|
||||
retry_count = retry_count + 1
|
||||
if retry_count < MAXIMUM_RETRY_COUNT:
|
||||
time.sleep(1)
|
||||
return self.connect(address, retry_count)
|
||||
raise e
|
||||
|
||||
self.flags = win32pipe.GetNamedPipeInfo(handle)[0] # type: ignore
|
||||
|
||||
self._handle = handle
|
||||
self._address = address
|
||||
|
||||
@check_closed
|
||||
def connect_ex(self, address: str) -> None:
|
||||
self.connect(address)
|
||||
|
||||
@check_closed
|
||||
def detach(self) -> t.Any:
|
||||
self._closed = True
|
||||
return self._handle
|
||||
|
||||
@check_closed
|
||||
def dup(self) -> NpipeSocket:
|
||||
return NpipeSocket(self._handle)
|
||||
|
||||
def getpeername(self) -> str | None:
|
||||
return self._address
|
||||
|
||||
def getsockname(self) -> str | None:
|
||||
return self._address
|
||||
|
||||
def getsockopt(
|
||||
self, level: t.Any, optname: t.Any, buflen: t.Any = None
|
||||
) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def ioctl(self, control: t.Any, option: t.Any) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def listen(self, backlog: t.Any) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
def makefile(self, mode: str, bufsize: int | None = None) -> t.IO[bytes]:
|
||||
if mode.strip("b") != "r":
|
||||
raise NotImplementedError()
|
||||
rawio = NpipeFileIOBase(self)
|
||||
if bufsize is None or bufsize <= 0:
|
||||
bufsize = io.DEFAULT_BUFFER_SIZE
|
||||
return io.BufferedReader(rawio, buffer_size=bufsize)
|
||||
|
||||
@check_closed
|
||||
def recv(self, bufsize: int, flags: int = 0) -> str:
|
||||
if self._handle is None:
|
||||
raise ValueError("Handle not present")
|
||||
dummy_err, data = win32file.ReadFile(self._handle, bufsize)
|
||||
return data
|
||||
|
||||
@check_closed
|
||||
def recvfrom(self, bufsize: int, flags: int = 0) -> tuple[str, str | None]:
|
||||
data = self.recv(bufsize, flags)
|
||||
return (data, self._address)
|
||||
|
||||
@check_closed
|
||||
def recvfrom_into(
|
||||
self, buf: Buffer, nbytes: int = 0, flags: int = 0
|
||||
) -> tuple[int, str | None]:
|
||||
return self.recv_into(buf, nbytes), self._address
|
||||
|
||||
@check_closed
|
||||
def recv_into(self, buf: Buffer, nbytes: int = 0) -> int:
|
||||
if self._handle is None:
|
||||
raise ValueError("Handle not present")
|
||||
readbuf = buf if isinstance(buf, memoryview) else memoryview(buf)
|
||||
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
dummy_err, dummy_data = win32file.ReadFile( # type: ignore
|
||||
self._handle, readbuf[:nbytes] if nbytes else readbuf, overlapped
|
||||
)
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
@check_closed
|
||||
def send(self, string: Buffer, flags: int = 0) -> int:
|
||||
if self._handle is None:
|
||||
raise ValueError("Handle not present")
|
||||
event = win32event.CreateEvent(None, True, True, None)
|
||||
try:
|
||||
overlapped = pywintypes.OVERLAPPED()
|
||||
overlapped.hEvent = event
|
||||
win32file.WriteFile(self._handle, string, overlapped) # type: ignore
|
||||
wait_result = win32event.WaitForSingleObject(event, self._timeout)
|
||||
if wait_result == win32event.WAIT_TIMEOUT:
|
||||
win32file.CancelIo(self._handle)
|
||||
raise TimeoutError
|
||||
return win32file.GetOverlappedResult(self._handle, overlapped, 0)
|
||||
finally:
|
||||
win32api.CloseHandle(event)
|
||||
|
||||
@check_closed
|
||||
def sendall(self, string: Buffer, flags: int = 0) -> int:
|
||||
return self.send(string, flags)
|
||||
|
||||
@check_closed
|
||||
def sendto(self, string: Buffer, address: str) -> int:
|
||||
self.connect(address)
|
||||
return self.send(string)
|
||||
|
||||
def setblocking(self, flag: bool) -> None:
|
||||
if flag:
|
||||
return self.settimeout(None)
|
||||
return self.settimeout(0)
|
||||
|
||||
def settimeout(self, value: int | float | None) -> None:
|
||||
if value is None:
|
||||
# Blocking mode
|
||||
self._timeout = win32event.INFINITE
|
||||
elif not isinstance(value, (float, int)) or value < 0:
|
||||
raise ValueError("Timeout value out of range")
|
||||
else:
|
||||
# Timeout mode - Value converted to milliseconds
|
||||
self._timeout = int(value * 1000)
|
||||
|
||||
def gettimeout(self) -> int | float | None:
|
||||
return self._timeout
|
||||
|
||||
def setsockopt(self, level: t.Any, optname: t.Any, value: t.Any) -> t.NoReturn:
|
||||
raise NotImplementedError()
|
||||
|
||||
@check_closed
|
||||
def shutdown(self, how: t.Any) -> None:
|
||||
return self.close()
|
||||
|
||||
|
||||
class NpipeFileIOBase(io.RawIOBase):
|
||||
def __init__(self, npipe_socket: NpipeSocket | None) -> None:
|
||||
self.sock = npipe_socket
|
||||
|
||||
def close(self) -> None:
|
||||
super().close()
|
||||
self.sock = None
|
||||
|
||||
def fileno(self) -> int:
|
||||
if self.sock is None:
|
||||
raise RuntimeError("socket is closed")
|
||||
# TODO: This is definitely a bug, NpipeSocket.fileno() does not exist!
|
||||
return self.sock.fileno() # type: ignore
|
||||
|
||||
def isatty(self) -> bool:
|
||||
return False
|
||||
|
||||
def readable(self) -> bool:
|
||||
return True
|
||||
|
||||
def readinto(self, buf: Buffer) -> int:
|
||||
if self.sock is None:
|
||||
raise RuntimeError("socket is closed")
|
||||
return self.sock.recv_into(buf)
|
||||
|
||||
def seekable(self) -> bool:
|
||||
return False
|
||||
|
||||
def writable(self) -> bool:
|
||||
return False
|
||||
|
|
@ -0,0 +1,311 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import socket
|
||||
import subprocess
|
||||
import traceback
|
||||
import typing as t
|
||||
from queue import Empty
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from .. import constants
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
PARAMIKO_IMPORT_ERROR: str | None # pylint: disable=invalid-name
|
||||
try:
|
||||
import paramiko
|
||||
except ImportError:
|
||||
PARAMIKO_IMPORT_ERROR = traceback.format_exc() # pylint: disable=invalid-name
|
||||
else:
|
||||
PARAMIKO_IMPORT_ERROR = None # pylint: disable=invalid-name
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Buffer, Mapping
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class SSHSocket(socket.socket):
|
||||
def __init__(self, host: str) -> None:
|
||||
super().__init__(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.host = host
|
||||
self.port = None
|
||||
self.user = None
|
||||
if ":" in self.host:
|
||||
self.host, self.port = self.host.split(":")
|
||||
if "@" in self.host:
|
||||
self.user, self.host = self.host.split("@")
|
||||
|
||||
self.proc: subprocess.Popen | None = None
|
||||
|
||||
def connect(self, *args_: t.Any, **kwargs: t.Any) -> None:
|
||||
args = ["ssh"]
|
||||
if self.user:
|
||||
args = args + ["-l", self.user]
|
||||
|
||||
if self.port:
|
||||
args = args + ["-p", self.port]
|
||||
|
||||
args = args + ["--", self.host, "docker system dial-stdio"]
|
||||
|
||||
preexec_func = None
|
||||
if not constants.IS_WINDOWS_PLATFORM:
|
||||
|
||||
def f() -> None:
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
preexec_func = f
|
||||
|
||||
env = dict(os.environ)
|
||||
|
||||
# drop LD_LIBRARY_PATH and SSL_CERT_FILE
|
||||
env.pop("LD_LIBRARY_PATH", None)
|
||||
env.pop("SSL_CERT_FILE", None)
|
||||
|
||||
self.proc = subprocess.Popen( # pylint: disable=consider-using-with
|
||||
args,
|
||||
env=env,
|
||||
stdout=subprocess.PIPE,
|
||||
stdin=subprocess.PIPE,
|
||||
preexec_fn=preexec_func,
|
||||
)
|
||||
|
||||
def _write(self, data: Buffer) -> int:
|
||||
if not self.proc:
|
||||
raise RuntimeError(
|
||||
"SSH subprocess not initiated. connect() must be called first."
|
||||
)
|
||||
assert self.proc.stdin is not None
|
||||
if self.proc.stdin.closed:
|
||||
raise RuntimeError(
|
||||
"SSH subprocess not initiated. connect() must be called first after close()."
|
||||
)
|
||||
written = self.proc.stdin.write(data)
|
||||
self.proc.stdin.flush()
|
||||
return written
|
||||
|
||||
def sendall(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> None:
|
||||
self._write(data)
|
||||
|
||||
def send(self, data: Buffer, *args: t.Any, **kwargs: t.Any) -> int:
|
||||
return self._write(data)
|
||||
|
||||
def recv(self, n: int, *args: t.Any, **kwargs: t.Any) -> bytes:
|
||||
if not self.proc:
|
||||
raise RuntimeError(
|
||||
"SSH subprocess not initiated. connect() must be called first."
|
||||
)
|
||||
assert self.proc.stdout is not None
|
||||
return self.proc.stdout.read(n)
|
||||
|
||||
def makefile(self, mode: str, *args: t.Any, **kwargs: t.Any) -> t.IO: # type: ignore
|
||||
if not self.proc:
|
||||
self.connect()
|
||||
assert self.proc is not None
|
||||
assert self.proc.stdout is not None
|
||||
self.proc.stdout.channel = self # type: ignore
|
||||
|
||||
return self.proc.stdout
|
||||
|
||||
def close(self) -> None:
|
||||
if not self.proc:
|
||||
return
|
||||
assert self.proc.stdin is not None
|
||||
if self.proc.stdin.closed:
|
||||
return
|
||||
self.proc.stdin.write(b"\n\n")
|
||||
self.proc.stdin.flush()
|
||||
self.proc.terminate()
|
||||
|
||||
|
||||
class SSHConnection(urllib3_connection.HTTPConnection):
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
ssh_transport: paramiko.Transport | None = None,
|
||||
timeout: int | float = 60,
|
||||
host: str,
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout)
|
||||
self.ssh_transport = ssh_transport
|
||||
self.timeout = timeout
|
||||
self.ssh_host = host
|
||||
self.sock: paramiko.Channel | SSHSocket | None = None
|
||||
|
||||
def connect(self) -> None:
|
||||
if self.ssh_transport:
|
||||
channel = self.ssh_transport.open_session()
|
||||
channel.settimeout(self.timeout)
|
||||
channel.exec_command("docker system dial-stdio")
|
||||
self.sock = channel
|
||||
else:
|
||||
sock = SSHSocket(self.ssh_host)
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect()
|
||||
self.sock = sock
|
||||
|
||||
|
||||
class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
scheme = "ssh"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
ssh_client: paramiko.SSHClient | None = None,
|
||||
timeout: int | float = 60,
|
||||
maxsize: int = 10,
|
||||
host: str,
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
|
||||
self.ssh_transport: paramiko.Transport | None = None
|
||||
self.timeout = timeout
|
||||
if ssh_client:
|
||||
self.ssh_transport = ssh_client.get_transport()
|
||||
self.ssh_host = host
|
||||
|
||||
def _new_conn(self) -> SSHConnection:
|
||||
return SSHConnection(
|
||||
ssh_transport=self.ssh_transport,
|
||||
timeout=self.timeout,
|
||||
host=self.ssh_host,
|
||||
)
|
||||
|
||||
# When re-using connections, urllib3 calls fileno() on our
|
||||
# SSH channel instance, quickly overloading our fd limit. To avoid this,
|
||||
# we override _get_conn
|
||||
def _get_conn(self, timeout: int | float) -> SSHConnection:
|
||||
conn = None
|
||||
try:
|
||||
conn = self.pool.get(block=self.block, timeout=timeout)
|
||||
|
||||
except AttributeError as exc: # self.pool is None
|
||||
raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from exc
|
||||
|
||||
except Empty as exc:
|
||||
if self.block:
|
||||
raise urllib3.exceptions.EmptyPoolError(
|
||||
self,
|
||||
"Pool reached maximum size and no more connections are allowed.",
|
||||
) from exc
|
||||
# Oh well, we'll create a new connection then
|
||||
|
||||
return conn or self._new_conn()
|
||||
|
||||
|
||||
class SSHHTTPAdapter(BaseHTTPAdapter):
|
||||
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||
"pools",
|
||||
"timeout",
|
||||
"ssh_client",
|
||||
"ssh_params",
|
||||
"max_pool_size",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str,
|
||||
timeout: int | float = 60,
|
||||
pool_connections: int = constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
|
||||
shell_out: bool = False,
|
||||
) -> None:
|
||||
self.ssh_client: paramiko.SSHClient | None = None
|
||||
if not shell_out:
|
||||
self._create_paramiko_client(base_url)
|
||||
self._connect()
|
||||
|
||||
self.ssh_host = base_url
|
||||
if base_url.startswith("ssh://"):
|
||||
self.ssh_host = base_url[len("ssh://") :]
|
||||
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
self.pools = RecentlyUsedContainer(
|
||||
pool_connections, dispose_func=lambda p: p.close()
|
||||
)
|
||||
super().__init__()
|
||||
|
||||
def _create_paramiko_client(self, base_url: str) -> None:
|
||||
logging.getLogger("paramiko").setLevel(logging.WARNING)
|
||||
self.ssh_client = paramiko.SSHClient()
|
||||
base_url_p = urlparse(base_url)
|
||||
assert base_url_p.hostname is not None
|
||||
self.ssh_params: dict[str, t.Any] = {
|
||||
"hostname": base_url_p.hostname,
|
||||
"port": base_url_p.port,
|
||||
"username": base_url_p.username,
|
||||
}
|
||||
ssh_config_file = os.path.expanduser("~/.ssh/config")
|
||||
if os.path.exists(ssh_config_file):
|
||||
conf = paramiko.SSHConfig()
|
||||
with open(ssh_config_file, "rt", encoding="utf-8") as f:
|
||||
conf.parse(f)
|
||||
host_config = conf.lookup(base_url_p.hostname)
|
||||
if "proxycommand" in host_config:
|
||||
self.ssh_params["sock"] = paramiko.ProxyCommand(
|
||||
host_config["proxycommand"]
|
||||
)
|
||||
if "hostname" in host_config:
|
||||
self.ssh_params["hostname"] = host_config["hostname"]
|
||||
if base_url_p.port is None and "port" in host_config:
|
||||
self.ssh_params["port"] = host_config["port"]
|
||||
if base_url_p.username is None and "user" in host_config:
|
||||
self.ssh_params["username"] = host_config["user"]
|
||||
if "identityfile" in host_config:
|
||||
self.ssh_params["key_filename"] = host_config["identityfile"]
|
||||
|
||||
self.ssh_client.load_system_host_keys()
|
||||
self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
|
||||
|
||||
def _connect(self) -> None:
|
||||
if self.ssh_client:
|
||||
self.ssh_client.connect(**self.ssh_params)
|
||||
|
||||
def get_connection(
|
||||
self, url: str | bytes, proxies: Mapping[str, str] | None = None
|
||||
) -> SSHConnectionPool:
|
||||
if not self.ssh_client:
|
||||
return SSHConnectionPool(
|
||||
ssh_client=self.ssh_client,
|
||||
timeout=self.timeout,
|
||||
maxsize=self.max_pool_size,
|
||||
host=self.ssh_host,
|
||||
)
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
# Connection is closed try a reconnect
|
||||
if self.ssh_client and not self.ssh_client.get_transport():
|
||||
self._connect()
|
||||
|
||||
pool = SSHConnectionPool(
|
||||
ssh_client=self.ssh_client,
|
||||
timeout=self.timeout,
|
||||
maxsize=self.max_pool_size,
|
||||
host=self.ssh_host,
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def close(self) -> None:
|
||||
super().close()
|
||||
if self.ssh_client:
|
||||
self.ssh_client.close()
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from .._import_helper import HTTPAdapter, urllib3
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
# Resolves OpenSSL issues in some servers:
|
||||
# https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
|
||||
# https://github.com/kennethreitz/requests/pull/799
|
||||
|
||||
|
||||
PoolManager = urllib3.poolmanager.PoolManager
|
||||
|
||||
|
||||
class SSLHTTPAdapter(BaseHTTPAdapter):
|
||||
"""An HTTPS Transport Adapter that uses an arbitrary SSL version."""
|
||||
|
||||
__attrs__ = HTTPAdapter.__attrs__ + ["assert_hostname"]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
assert_hostname: bool | None = None,
|
||||
**kwargs: t.Any,
|
||||
) -> None:
|
||||
self.assert_hostname = assert_hostname
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def init_poolmanager(
|
||||
self, connections: int, maxsize: int, block: bool = False, **kwargs: t.Any
|
||||
) -> None:
|
||||
kwargs = {
|
||||
"num_pools": connections,
|
||||
"maxsize": maxsize,
|
||||
"block": block,
|
||||
}
|
||||
if self.assert_hostname is not None:
|
||||
kwargs["assert_hostname"] = self.assert_hostname
|
||||
|
||||
self.poolmanager = PoolManager(**kwargs)
|
||||
|
||||
def get_connection(self, *args: t.Any, **kwargs: t.Any) -> urllib3.ConnectionPool:
|
||||
"""
|
||||
Ensure assert_hostname is set correctly on our pool
|
||||
|
||||
We already take care of a normal poolmanager via init_poolmanager
|
||||
|
||||
But we still need to take care of when there is a proxy poolmanager
|
||||
|
||||
Note that this method is no longer called for newer requests versions.
|
||||
"""
|
||||
# pylint finds our HTTPAdapter stub instead of requests.adapters.HTTPAdapter:
|
||||
# pylint: disable-next=no-member
|
||||
conn = super().get_connection(*args, **kwargs)
|
||||
if (
|
||||
self.assert_hostname is not None
|
||||
and conn.assert_hostname != self.assert_hostname # type: ignore
|
||||
):
|
||||
conn.assert_hostname = self.assert_hostname # type: ignore
|
||||
return conn
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
import typing as t
|
||||
|
||||
from .. import constants
|
||||
from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
|
||||
from .basehttpadapter import BaseHTTPAdapter
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Mapping
|
||||
|
||||
from requests import PreparedRequest
|
||||
|
||||
from ..._socket_helper import SocketLike
|
||||
|
||||
|
||||
RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
|
||||
|
||||
|
||||
class UnixHTTPConnection(urllib3_connection.HTTPConnection):
|
||||
def __init__(
|
||||
self, base_url: str | bytes, unix_socket: str, timeout: int | float = 60
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout)
|
||||
self.base_url = base_url
|
||||
self.unix_socket = unix_socket
|
||||
self.timeout = timeout
|
||||
self.disable_buffering = False
|
||||
|
||||
def connect(self) -> None:
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
sock.settimeout(self.timeout)
|
||||
sock.connect(self.unix_socket)
|
||||
self.sock = sock
|
||||
|
||||
def putheader(self, header: str, *values: str) -> None:
|
||||
super().putheader(header, *values)
|
||||
if header == "Connection" and "Upgrade" in values:
|
||||
self.disable_buffering = True
|
||||
|
||||
def response_class(self, sock: SocketLike, *args: t.Any, **kwargs: t.Any) -> t.Any:
|
||||
# FIXME: We may need to disable buffering on Py3,
|
||||
# but there's no clear way to do it at the moment. See:
|
||||
# https://github.com/docker/docker-py/issues/1799
|
||||
return super().response_class(sock, *args, **kwargs)
|
||||
|
||||
|
||||
class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
|
||||
def __init__(
|
||||
self,
|
||||
base_url: str | bytes,
|
||||
socket_path: str,
|
||||
timeout: int | float = 60,
|
||||
maxsize: int = 10,
|
||||
) -> None:
|
||||
super().__init__("localhost", timeout=timeout, maxsize=maxsize)
|
||||
self.base_url = base_url
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
|
||||
def _new_conn(self) -> UnixHTTPConnection:
|
||||
return UnixHTTPConnection(self.base_url, self.socket_path, self.timeout)
|
||||
|
||||
|
||||
class UnixHTTPAdapter(BaseHTTPAdapter):
|
||||
__attrs__ = HTTPAdapter.__attrs__ + [
|
||||
"pools",
|
||||
"socket_path",
|
||||
"timeout",
|
||||
"max_pool_size",
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
socket_url: str,
|
||||
timeout: int | float = 60,
|
||||
pool_connections: int = constants.DEFAULT_NUM_POOLS,
|
||||
max_pool_size: int = constants.DEFAULT_MAX_POOL_SIZE,
|
||||
) -> None:
|
||||
socket_path = socket_url.replace("http+unix://", "")
|
||||
if not socket_path.startswith("/"):
|
||||
socket_path = "/" + socket_path
|
||||
self.socket_path = socket_path
|
||||
self.timeout = timeout
|
||||
self.max_pool_size = max_pool_size
|
||||
|
||||
def f(p: t.Any) -> None:
|
||||
p.close()
|
||||
|
||||
self.pools = RecentlyUsedContainer(pool_connections, dispose_func=f)
|
||||
super().__init__()
|
||||
|
||||
def get_connection(
|
||||
self, url: str | bytes, proxies: Mapping[str, str] | None = None
|
||||
) -> UnixHTTPConnectionPool:
|
||||
with self.pools.lock:
|
||||
pool = self.pools.get(url)
|
||||
if pool:
|
||||
return pool
|
||||
|
||||
pool = UnixHTTPConnectionPool(
|
||||
url, self.socket_path, self.timeout, maxsize=self.max_pool_size
|
||||
)
|
||||
self.pools[url] = pool
|
||||
|
||||
return pool
|
||||
|
||||
def request_url(self, request: PreparedRequest, proxies: Mapping[str, str]) -> str:
|
||||
# The select_proxy utility in requests errors out when the provided URL
|
||||
# does not have a hostname, like is the case when using a UNIX socket.
|
||||
# Since proxies are an irrelevant notion in the case of UNIX sockets
|
||||
# anyway, we simply return the path URL directly.
|
||||
# See also: https://github.com/docker/docker-py/issues/811
|
||||
return request.path_url
|
||||
|
|
@ -0,0 +1,90 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
import typing as t
|
||||
|
||||
from .._import_helper import urllib3
|
||||
from ..errors import DockerException
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from requests import Response
|
||||
|
||||
_T = t.TypeVar("_T")
|
||||
|
||||
|
||||
class CancellableStream(t.Generic[_T]):
|
||||
"""
|
||||
Stream wrapper for real-time events, logs, etc. from the server.
|
||||
|
||||
Example:
|
||||
>>> events = client.events()
|
||||
>>> for event in events:
|
||||
... print(event)
|
||||
>>> # and cancel from another thread
|
||||
>>> events.close()
|
||||
"""
|
||||
|
||||
def __init__(self, stream: t.Generator[_T], response: Response) -> None:
|
||||
self._stream = stream
|
||||
self._response = response
|
||||
|
||||
def __iter__(self) -> t.Self:
|
||||
return self
|
||||
|
||||
def __next__(self) -> _T:
|
||||
try:
|
||||
return next(self._stream)
|
||||
except urllib3.exceptions.ProtocolError as exc:
|
||||
raise StopIteration from exc
|
||||
except socket.error as exc:
|
||||
raise StopIteration from exc
|
||||
|
||||
next = __next__
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Closes the event streaming.
|
||||
"""
|
||||
|
||||
if not self._response.raw.closed:
|
||||
# find the underlying socket object
|
||||
# based on api.client._get_raw_response_socket
|
||||
|
||||
sock_fp = self._response.raw._fp.fp # type: ignore
|
||||
|
||||
if hasattr(sock_fp, "raw"):
|
||||
sock_raw = sock_fp.raw
|
||||
|
||||
if hasattr(sock_raw, "sock"):
|
||||
sock = sock_raw.sock
|
||||
|
||||
elif hasattr(sock_raw, "_sock"):
|
||||
sock = sock_raw._sock
|
||||
|
||||
elif hasattr(sock_fp, "channel"):
|
||||
# We are working with a paramiko (SSH) channel, which does not
|
||||
# support cancelable streams with the current implementation
|
||||
raise DockerException(
|
||||
"Cancellable streams not supported for the SSH protocol"
|
||||
)
|
||||
else:
|
||||
sock = sock_fp._sock # type: ignore
|
||||
|
||||
if hasattr(urllib3.contrib, "pyopenssl") and isinstance(
|
||||
sock, urllib3.contrib.pyopenssl.WrappedSocket
|
||||
):
|
||||
sock = sock.socket
|
||||
|
||||
sock.shutdown(socket.SHUT_RDWR)
|
||||
sock.close()
|
||||
|
|
@ -0,0 +1,310 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import io
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import tarfile
|
||||
import tempfile
|
||||
import typing as t
|
||||
|
||||
from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
|
||||
from . import fnmatch
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Sequence
|
||||
|
||||
|
||||
_SEP = re.compile("/|\\\\") if IS_WINDOWS_PLATFORM else re.compile("/")
|
||||
|
||||
|
||||
def tar(
|
||||
path: str,
|
||||
exclude: list[str] | None = None,
|
||||
dockerfile: tuple[str, str | None] | tuple[None, None] | None = None,
|
||||
fileobj: t.IO[bytes] | None = None,
|
||||
gzip: bool = False,
|
||||
) -> t.IO[bytes]:
|
||||
root = os.path.abspath(path)
|
||||
exclude = exclude or []
|
||||
dockerfile = dockerfile or (None, None)
|
||||
extra_files: list[tuple[str, str]] = []
|
||||
if dockerfile[1] is not None:
|
||||
assert dockerfile[0] is not None
|
||||
dockerignore_contents = "\n".join(
|
||||
(exclude or [".dockerignore"]) + [dockerfile[0]]
|
||||
)
|
||||
extra_files = [
|
||||
(".dockerignore", dockerignore_contents),
|
||||
dockerfile, # type: ignore
|
||||
]
|
||||
return create_archive(
|
||||
files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
|
||||
root=root,
|
||||
fileobj=fileobj,
|
||||
gzip=gzip,
|
||||
extra_files=extra_files,
|
||||
)
|
||||
|
||||
|
||||
def exclude_paths(
|
||||
root: str, patterns: list[str], dockerfile: str | None = None
|
||||
) -> set[str]:
|
||||
"""
|
||||
Given a root directory path and a list of .dockerignore patterns, return
|
||||
an iterator of all paths (both regular files and directories) in the root
|
||||
directory that do *not* match any of the patterns.
|
||||
|
||||
All paths returned are relative to the root.
|
||||
"""
|
||||
|
||||
if dockerfile is None:
|
||||
dockerfile = "Dockerfile"
|
||||
|
||||
patterns.append("!" + dockerfile)
|
||||
pm = PatternMatcher(patterns)
|
||||
return set(pm.walk(root))
|
||||
|
||||
|
||||
def build_file_list(root: str) -> list[str]:
|
||||
files = []
|
||||
for dirname, dirnames, fnames in os.walk(root):
|
||||
for filename in fnames + dirnames:
|
||||
longpath = os.path.join(dirname, filename)
|
||||
files.append(longpath.replace(root, "", 1).lstrip("/"))
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def create_archive(
|
||||
root: str,
|
||||
files: Sequence[str] | None = None,
|
||||
fileobj: t.IO[bytes] | None = None,
|
||||
gzip: bool = False,
|
||||
extra_files: Sequence[tuple[str, str]] | None = None,
|
||||
) -> t.IO[bytes]:
|
||||
extra_files = extra_files or []
|
||||
if not fileobj:
|
||||
# pylint: disable-next=consider-using-with
|
||||
fileobj = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||
|
||||
with tarfile.open(mode="w:gz" if gzip else "w", fileobj=fileobj) as tarf:
|
||||
if files is None:
|
||||
files = build_file_list(root)
|
||||
extra_names = set(e[0] for e in extra_files)
|
||||
for path in files:
|
||||
if path in extra_names:
|
||||
# Extra files override context files with the same name
|
||||
continue
|
||||
full_path = os.path.join(root, path)
|
||||
|
||||
i = tarf.gettarinfo(full_path, arcname=path)
|
||||
if i is None:
|
||||
# This happens when we encounter a socket file. We can safely
|
||||
# ignore it and proceed.
|
||||
continue # type: ignore
|
||||
|
||||
# Workaround https://bugs.python.org/issue32713
|
||||
if i.mtime < 0 or i.mtime > 8**11 - 1:
|
||||
i.mtime = int(i.mtime)
|
||||
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
# Windows does not keep track of the execute bit, so we make files
|
||||
# and directories executable by default.
|
||||
i.mode = i.mode & 0o755 | 0o111
|
||||
|
||||
if i.isfile():
|
||||
try:
|
||||
with open(full_path, "rb") as f:
|
||||
tarf.addfile(i, f)
|
||||
except IOError as exc:
|
||||
raise IOError(f"Can not read file in context: {full_path}") from exc
|
||||
else:
|
||||
# Directories, FIFOs, symlinks... do not need to be read.
|
||||
tarf.addfile(i, None)
|
||||
|
||||
for name, contents in extra_files:
|
||||
info = tarfile.TarInfo(name)
|
||||
contents_encoded = contents.encode("utf-8")
|
||||
info.size = len(contents_encoded)
|
||||
tarf.addfile(info, io.BytesIO(contents_encoded))
|
||||
|
||||
fileobj.seek(0)
|
||||
return fileobj
|
||||
|
||||
|
||||
def mkbuildcontext(dockerfile: io.BytesIO | t.IO[bytes]) -> t.IO[bytes]:
|
||||
# pylint: disable-next=consider-using-with
|
||||
f = tempfile.NamedTemporaryFile() # noqa: SIM115
|
||||
try:
|
||||
with tarfile.open(mode="w", fileobj=f) as tarf:
|
||||
if isinstance(dockerfile, io.StringIO): # type: ignore
|
||||
raise TypeError("Please use io.BytesIO to create in-memory Dockerfiles")
|
||||
if isinstance(dockerfile, io.BytesIO):
|
||||
dfinfo = tarfile.TarInfo("Dockerfile")
|
||||
dfinfo.size = len(dockerfile.getvalue())
|
||||
dockerfile.seek(0)
|
||||
else:
|
||||
dfinfo = tarf.gettarinfo(fileobj=dockerfile, arcname="Dockerfile")
|
||||
tarf.addfile(dfinfo, dockerfile)
|
||||
f.seek(0)
|
||||
except Exception: # noqa: E722
|
||||
f.close()
|
||||
raise
|
||||
return f
|
||||
|
||||
|
||||
def split_path(p: str) -> list[str]:
|
||||
return [pt for pt in re.split(_SEP, p) if pt and pt != "."]
|
||||
|
||||
|
||||
def normalize_slashes(p: str) -> str:
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return "/".join(split_path(p))
|
||||
return p
|
||||
|
||||
|
||||
def walk(root: str, patterns: Sequence[str], default: bool = True) -> t.Generator[str]:
|
||||
pm = PatternMatcher(patterns)
|
||||
return pm.walk(root)
|
||||
|
||||
|
||||
# Heavily based on
|
||||
# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
|
||||
class PatternMatcher:
|
||||
def __init__(self, patterns: Sequence[str]) -> None:
|
||||
self.patterns = list(filter(lambda p: p.dirs, [Pattern(p) for p in patterns]))
|
||||
self.patterns.append(Pattern("!.dockerignore"))
|
||||
|
||||
def matches(self, filepath: str) -> bool:
|
||||
matched = False
|
||||
parent_path = os.path.dirname(filepath)
|
||||
parent_path_dirs = split_path(parent_path)
|
||||
|
||||
for pattern in self.patterns:
|
||||
negative = pattern.exclusion
|
||||
match = pattern.match(filepath)
|
||||
if (
|
||||
not match
|
||||
and parent_path != ""
|
||||
and len(pattern.dirs) <= len(parent_path_dirs)
|
||||
):
|
||||
match = pattern.match(
|
||||
os.path.sep.join(parent_path_dirs[: len(pattern.dirs)])
|
||||
)
|
||||
|
||||
if match:
|
||||
matched = not negative
|
||||
|
||||
return matched
|
||||
|
||||
def walk(self, root: str) -> t.Generator[str]:
|
||||
def rec_walk(current_dir: str) -> t.Generator[str]:
|
||||
for f in os.listdir(current_dir):
|
||||
fpath = os.path.join(os.path.relpath(current_dir, root), f)
|
||||
if fpath.startswith("." + os.path.sep):
|
||||
fpath = fpath[2:]
|
||||
match = self.matches(fpath)
|
||||
if not match:
|
||||
yield fpath
|
||||
|
||||
cur = os.path.join(root, fpath)
|
||||
if not os.path.isdir(cur) or os.path.islink(cur):
|
||||
continue
|
||||
|
||||
if match:
|
||||
# If we want to skip this file and it is a directory
|
||||
# then we should first check to see if there's an
|
||||
# excludes pattern (e.g. !dir/file) that starts with this
|
||||
# dir. If so then we cannot skip this dir.
|
||||
skip = True
|
||||
|
||||
for pat in self.patterns:
|
||||
if not pat.exclusion:
|
||||
continue
|
||||
if pat.cleaned_pattern.startswith(normalize_slashes(fpath)):
|
||||
skip = False
|
||||
break
|
||||
if skip:
|
||||
continue
|
||||
yield from rec_walk(cur)
|
||||
|
||||
return rec_walk(root)
|
||||
|
||||
|
||||
class Pattern:
|
||||
def __init__(self, pattern_str: str) -> None:
|
||||
self.exclusion = False
|
||||
if pattern_str.startswith("!"):
|
||||
self.exclusion = True
|
||||
pattern_str = pattern_str[1:]
|
||||
|
||||
self.dirs = self.normalize(pattern_str)
|
||||
self.cleaned_pattern = "/".join(self.dirs)
|
||||
|
||||
@classmethod
|
||||
def normalize(cls, p: str) -> list[str]:
|
||||
# Remove trailing spaces
|
||||
p = p.strip()
|
||||
|
||||
# Leading and trailing slashes are not relevant. Yes,
|
||||
# "foo.py/" must exclude the "foo.py" regular file. "."
|
||||
# components are not relevant either, even if the whole
|
||||
# pattern is only ".", as the Docker reference states: "For
|
||||
# historical reasons, the pattern . is ignored."
|
||||
# ".." component must be cleared with the potential previous
|
||||
# component, regardless of whether it exists: "A preprocessing
|
||||
# step [...] eliminates . and .. elements using Go's
|
||||
# filepath.".
|
||||
i = 0
|
||||
split = split_path(p)
|
||||
while i < len(split):
|
||||
if split[i] == "..":
|
||||
del split[i]
|
||||
if i > 0:
|
||||
del split[i - 1]
|
||||
i -= 1
|
||||
else:
|
||||
i += 1
|
||||
return split
|
||||
|
||||
def match(self, filepath: str) -> bool:
|
||||
return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
|
||||
|
||||
|
||||
def process_dockerfile(
|
||||
dockerfile: str | None, path: str
|
||||
) -> tuple[str, str | None] | tuple[None, None]:
|
||||
if not dockerfile:
|
||||
return (None, None)
|
||||
|
||||
abs_dockerfile = dockerfile
|
||||
if not os.path.isabs(dockerfile):
|
||||
abs_dockerfile = os.path.join(path, dockerfile)
|
||||
if IS_WINDOWS_PLATFORM and path.startswith(WINDOWS_LONGPATH_PREFIX):
|
||||
abs_dockerfile = f"{WINDOWS_LONGPATH_PREFIX}{os.path.normpath(abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX) :])}"
|
||||
if os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[
|
||||
0
|
||||
] or os.path.relpath(abs_dockerfile, path).startswith(".."):
|
||||
# Dockerfile not in context - read data to insert into tar later
|
||||
with open(abs_dockerfile, "rt", encoding="utf-8") as df:
|
||||
return (f".dockerfile.{random.getrandbits(160):x}", df.read())
|
||||
|
||||
# Dockerfile is inside the context - return path relative to context root
|
||||
if dockerfile == abs_dockerfile:
|
||||
# Only calculate relpath if necessary to avoid errors
|
||||
# on Windows client -> Linux Docker
|
||||
# see https://github.com/docker/compose/issues/5969
|
||||
dockerfile = os.path.relpath(abs_dockerfile, path)
|
||||
return (dockerfile, None)
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import typing as t
|
||||
|
||||
from ..constants import IS_WINDOWS_PLATFORM
|
||||
|
||||
DOCKER_CONFIG_FILENAME = os.path.join(".docker", "config.json")
|
||||
LEGACY_DOCKER_CONFIG_FILENAME = ".dockercfg"
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_default_config_file() -> str:
|
||||
return os.path.join(home_dir(), DOCKER_CONFIG_FILENAME)
|
||||
|
||||
|
||||
def find_config_file(config_path: str | None = None) -> str | None:
|
||||
homedir = home_dir()
|
||||
paths = list(
|
||||
filter(
|
||||
None,
|
||||
[
|
||||
config_path, # 1
|
||||
config_path_from_environment(), # 2
|
||||
os.path.join(homedir, DOCKER_CONFIG_FILENAME), # 3
|
||||
os.path.join(homedir, LEGACY_DOCKER_CONFIG_FILENAME), # 4
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
log.debug("Trying paths: %s", repr(paths))
|
||||
|
||||
for path in paths:
|
||||
if os.path.exists(path):
|
||||
log.debug("Found file at path: %s", path)
|
||||
return path
|
||||
|
||||
log.debug("No config file found")
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def config_path_from_environment() -> str | None:
|
||||
config_dir = os.environ.get("DOCKER_CONFIG")
|
||||
if not config_dir:
|
||||
return None
|
||||
return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
|
||||
|
||||
|
||||
def home_dir() -> str:
|
||||
"""
|
||||
Get the user's home directory, using the same logic as the Docker Engine
|
||||
client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
|
||||
"""
|
||||
if IS_WINDOWS_PLATFORM:
|
||||
return os.environ.get("USERPROFILE", "")
|
||||
return os.path.expanduser("~")
|
||||
|
||||
|
||||
def load_general_config(config_path: str | None = None) -> dict[str, t.Any]:
|
||||
config_file = find_config_file(config_path)
|
||||
|
||||
if not config_file:
|
||||
return {}
|
||||
|
||||
try:
|
||||
with open(config_file, "rt", encoding="utf-8") as f:
|
||||
return json.load(f)
|
||||
except (IOError, ValueError) as e:
|
||||
# In the case of a legacy `.dockercfg` file, we will not
|
||||
# be able to load any JSON data.
|
||||
log.debug(e)
|
||||
|
||||
log.debug("All parsing attempts failed - returning empty config")
|
||||
return {}
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import functools
|
||||
import typing as t
|
||||
|
||||
from .. import errors
|
||||
from . import utils
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
from ..api.client import APIClient
|
||||
|
||||
_Self = t.TypeVar("_Self")
|
||||
_P = t.ParamSpec("_P")
|
||||
_R = t.TypeVar("_R")
|
||||
|
||||
|
||||
def minimum_version(
|
||||
version: str,
|
||||
) -> Callable[
|
||||
[Callable[t.Concatenate[_Self, _P], _R]],
|
||||
Callable[t.Concatenate[_Self, _P], _R],
|
||||
]:
|
||||
def decorator(
|
||||
f: Callable[t.Concatenate[_Self, _P], _R],
|
||||
) -> Callable[t.Concatenate[_Self, _P], _R]:
|
||||
@functools.wraps(f)
|
||||
def wrapper(self: _Self, *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||
# We use _Self instead of APIClient since this is used for mixins for APIClient.
|
||||
# This unfortunately means that self._version does not exist in the mixin,
|
||||
# it only exists after mixing in. This is why we ignore types here.
|
||||
if utils.version_lt(self._version, version): # type: ignore
|
||||
raise errors.InvalidVersion(
|
||||
f"{f.__name__} is not available for version < {version}"
|
||||
)
|
||||
return f(self, *args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def update_headers(
|
||||
f: Callable[t.Concatenate[APIClient, _P], _R],
|
||||
) -> Callable[t.Concatenate[APIClient, _P], _R]:
|
||||
def inner(self: APIClient, *args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||
if "HttpHeaders" in self._general_configs:
|
||||
if not kwargs.get("headers"):
|
||||
kwargs["headers"] = self._general_configs["HttpHeaders"]
|
||||
else:
|
||||
# We cannot (yet) model that kwargs["headers"] should be a dictionary
|
||||
kwargs["headers"].update(self._general_configs["HttpHeaders"]) # type: ignore
|
||||
return f(self, *args, **kwargs)
|
||||
|
||||
return inner
|
||||
|
|
@ -0,0 +1,128 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
"""Filename matching with shell patterns.
|
||||
|
||||
fnmatch(FILENAME, PATTERN) matches according to the local convention.
|
||||
fnmatchcase(FILENAME, PATTERN) always takes case in account.
|
||||
|
||||
The functions operate by translating the pattern into a regular
|
||||
expression. They cache the compiled regular expressions for speed.
|
||||
|
||||
The function translate(PATTERN) returns a regular expression
|
||||
corresponding to PATTERN. (It does not compile it.)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
|
||||
__all__ = ["fnmatch", "fnmatchcase", "translate"]
|
||||
|
||||
_cache: dict[str, re.Pattern] = {}
|
||||
_MAXCACHE = 100
|
||||
|
||||
|
||||
def _purge() -> None:
|
||||
"""Clear the pattern cache"""
|
||||
_cache.clear()
|
||||
|
||||
|
||||
def fnmatch(name: str, pat: str) -> bool:
|
||||
"""Test whether FILENAME matches PATTERN.
|
||||
|
||||
Patterns are Unix shell style:
|
||||
|
||||
* matches everything
|
||||
? matches any single character
|
||||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
An initial period in FILENAME is not special.
|
||||
Both FILENAME and PATTERN are first case-normalized
|
||||
if the operating system requires it.
|
||||
If you do not want this, use fnmatchcase(FILENAME, PATTERN).
|
||||
"""
|
||||
|
||||
name = name.lower()
|
||||
pat = pat.lower()
|
||||
return fnmatchcase(name, pat)
|
||||
|
||||
|
||||
def fnmatchcase(name: str, pat: str) -> bool:
|
||||
"""Test whether FILENAME matches PATTERN, including case.
|
||||
This is a version of fnmatch() which does not case-normalize
|
||||
its arguments.
|
||||
"""
|
||||
|
||||
try:
|
||||
re_pat = _cache[pat]
|
||||
except KeyError:
|
||||
res = translate(pat)
|
||||
if len(_cache) >= _MAXCACHE:
|
||||
_cache.clear()
|
||||
_cache[pat] = re_pat = re.compile(res)
|
||||
return re_pat.match(name) is not None
|
||||
|
||||
|
||||
def translate(pat: str) -> str:
|
||||
"""Translate a shell PATTERN to a regular expression.
|
||||
|
||||
There is no way to quote meta-characters.
|
||||
"""
|
||||
i, n = 0, len(pat)
|
||||
res = "^"
|
||||
while i < n:
|
||||
c = pat[i]
|
||||
i = i + 1
|
||||
if c == "*":
|
||||
if i < n and pat[i] == "*":
|
||||
# is some flavor of "**"
|
||||
i = i + 1
|
||||
# Treat **/ as ** so eat the "/"
|
||||
if i < n and pat[i] == "/":
|
||||
i = i + 1
|
||||
if i >= n:
|
||||
# is "**EOF" - to align with .gitignore just accept all
|
||||
res = res + ".*"
|
||||
else:
|
||||
# is "**"
|
||||
# Note that this allows for any # of /'s (even 0) because
|
||||
# the .* will eat everything, even /'s
|
||||
res = res + "(.*/)?"
|
||||
else:
|
||||
# is "*" so map it to anything but "/"
|
||||
res = res + "[^/]*"
|
||||
elif c == "?":
|
||||
# "?" is any char except "/"
|
||||
res = res + "[^/]"
|
||||
elif c == "[":
|
||||
j = i
|
||||
if j < n and pat[j] == "!":
|
||||
j = j + 1
|
||||
if j < n and pat[j] == "]":
|
||||
j = j + 1
|
||||
while j < n and pat[j] != "]":
|
||||
j = j + 1
|
||||
if j >= n:
|
||||
res = res + "\\["
|
||||
else:
|
||||
stuff = pat[i:j].replace("\\", "\\\\")
|
||||
i = j + 1
|
||||
if stuff[0] == "!":
|
||||
stuff = "^" + stuff[1:]
|
||||
elif stuff[0] == "^":
|
||||
stuff = "\\" + stuff
|
||||
res = f"{res}[{stuff}]"
|
||||
else:
|
||||
res = res + re.escape(c)
|
||||
|
||||
return res + "$"
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import json.decoder
|
||||
import typing as t
|
||||
|
||||
from ..errors import StreamParseError
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
import re
|
||||
from collections.abc import Callable
|
||||
|
||||
_T = t.TypeVar("_T")
|
||||
|
||||
|
||||
json_decoder = json.JSONDecoder()
|
||||
|
||||
|
||||
def stream_as_text(stream: t.Generator[bytes | str]) -> t.Generator[str]:
|
||||
"""
|
||||
Given a stream of bytes or text, if any of the items in the stream
|
||||
are bytes convert them to text.
|
||||
This function can be removed once we return text streams
|
||||
instead of byte streams.
|
||||
"""
|
||||
for data in stream:
|
||||
if not isinstance(data, str):
|
||||
data = data.decode("utf-8", "replace")
|
||||
yield data
|
||||
|
||||
|
||||
def json_splitter(buffer: str) -> tuple[t.Any, str] | None:
|
||||
"""Attempt to parse a json object from a buffer. If there is at least one
|
||||
object, return it and the rest of the buffer, otherwise return None.
|
||||
"""
|
||||
buffer = buffer.strip()
|
||||
try:
|
||||
obj, index = json_decoder.raw_decode(buffer)
|
||||
ws: re.Pattern = json.decoder.WHITESPACE # type: ignore[attr-defined]
|
||||
m = ws.match(buffer, index)
|
||||
rest = buffer[m.end() :] if m else buffer[index:]
|
||||
return obj, rest
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
|
||||
def json_stream(stream: t.Generator[str | bytes]) -> t.Generator[t.Any]:
|
||||
"""Given a stream of text, return a stream of json objects.
|
||||
This handles streams which are inconsistently buffered (some entries may
|
||||
be newline delimited, and others are not).
|
||||
"""
|
||||
return split_buffer(stream, json_splitter, json_decoder.decode)
|
||||
|
||||
|
||||
def line_splitter(buffer: str, separator: str = "\n") -> tuple[str, str] | None:
|
||||
index = buffer.find(str(separator))
|
||||
if index == -1:
|
||||
return None
|
||||
return buffer[: index + 1], buffer[index + 1 :]
|
||||
|
||||
|
||||
def split_buffer(
|
||||
stream: t.Generator[str | bytes],
|
||||
splitter: Callable[[str], tuple[_T, str] | None],
|
||||
decoder: Callable[[str], _T],
|
||||
) -> t.Generator[_T | str]:
|
||||
"""Given a generator which yields strings and a splitter function,
|
||||
joins all input, splits on the separator and yields each chunk.
|
||||
Unlike string.split(), each chunk includes the trailing
|
||||
separator, except for the last one if none was found on the end
|
||||
of the input.
|
||||
"""
|
||||
buffered = ""
|
||||
|
||||
for data in stream_as_text(stream):
|
||||
buffered += data
|
||||
while True:
|
||||
buffer_split = splitter(buffered)
|
||||
if buffer_split is None:
|
||||
break
|
||||
|
||||
item, buffered = buffer_split
|
||||
yield item
|
||||
|
||||
if buffered:
|
||||
try:
|
||||
yield decoder(buffered)
|
||||
except Exception as e:
|
||||
raise StreamParseError(e) from e
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Collection, Sequence
|
||||
|
||||
|
||||
PORT_SPEC = re.compile(
|
||||
"^" # Match full string
|
||||
"(" # External part
|
||||
r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
|
||||
r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
|
||||
")?"
|
||||
r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
|
||||
"(?P<proto>/(udp|tcp|sctp))?" # Protocol
|
||||
"$" # Match full string
|
||||
)
|
||||
|
||||
|
||||
def add_port_mapping(
|
||||
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
|
||||
internal_port: str,
|
||||
external: str | tuple[str, str | None] | None,
|
||||
) -> None:
|
||||
if internal_port in port_bindings:
|
||||
port_bindings[internal_port].append(external)
|
||||
else:
|
||||
port_bindings[internal_port] = [external]
|
||||
|
||||
|
||||
def add_port(
|
||||
port_bindings: dict[str, list[str | tuple[str, str | None] | None]],
|
||||
internal_port_range: list[str],
|
||||
external_range: list[str] | list[tuple[str, str | None]] | None,
|
||||
) -> None:
|
||||
if external_range is None:
|
||||
for internal_port in internal_port_range:
|
||||
add_port_mapping(port_bindings, internal_port, None)
|
||||
else:
|
||||
for internal_port, external_port in zip(internal_port_range, external_range):
|
||||
# mypy loses the exact type of eternal_port elements for some reason...
|
||||
add_port_mapping(port_bindings, internal_port, external_port) # type: ignore
|
||||
|
||||
|
||||
def build_port_bindings(
|
||||
ports: Collection[str],
|
||||
) -> dict[str, list[str | tuple[str, str | None] | None]]:
|
||||
port_bindings: dict[str, list[str | tuple[str, str | None] | None]] = {}
|
||||
for port in ports:
|
||||
internal_port_range, external_range = split_port(port)
|
||||
add_port(port_bindings, internal_port_range, external_range)
|
||||
return port_bindings
|
||||
|
||||
|
||||
def _raise_invalid_port(port: str) -> t.NoReturn:
|
||||
raise ValueError(
|
||||
f'Invalid port "{port}", should be '
|
||||
"[[remote_ip:]remote_port[-remote_port]:]"
|
||||
"port[/protocol]"
|
||||
)
|
||||
|
||||
|
||||
@t.overload
|
||||
def port_range(
|
||||
start: str,
|
||||
end: str | None,
|
||||
proto: str,
|
||||
randomly_available_port: bool = False,
|
||||
) -> list[str]: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def port_range(
|
||||
start: str | None,
|
||||
end: str | None,
|
||||
proto: str,
|
||||
randomly_available_port: bool = False,
|
||||
) -> list[str] | None: ...
|
||||
|
||||
|
||||
def port_range(
|
||||
start: str | None,
|
||||
end: str | None,
|
||||
proto: str,
|
||||
randomly_available_port: bool = False,
|
||||
) -> list[str] | None:
|
||||
if start is None:
|
||||
return start
|
||||
if end is None:
|
||||
return [f"{start}{proto}"]
|
||||
if randomly_available_port:
|
||||
return [f"{start}-{end}{proto}"]
|
||||
return [f"{port}{proto}" for port in range(int(start), int(end) + 1)]
|
||||
|
||||
|
||||
def split_port(
|
||||
port: str | int,
|
||||
) -> tuple[list[str], list[str] | list[tuple[str, str | None]] | None]:
|
||||
port = str(port)
|
||||
match = PORT_SPEC.match(port)
|
||||
if match is None:
|
||||
_raise_invalid_port(port)
|
||||
parts = match.groupdict()
|
||||
|
||||
host: str | None = parts["host"]
|
||||
proto: str = parts["proto"] or ""
|
||||
int_p: str = parts["int"]
|
||||
ext_p: str = parts["ext"]
|
||||
internal: list[str] = port_range(int_p, parts["int_end"], proto) # type: ignore
|
||||
external = port_range(ext_p or None, parts["ext_end"], "", len(internal) == 1)
|
||||
|
||||
if host is None:
|
||||
if (external is not None and len(internal) != len(external)) or ext_p == "":
|
||||
raise ValueError("Port ranges don't match in length")
|
||||
return internal, external
|
||||
external_or_none: Sequence[str | None]
|
||||
if not external:
|
||||
external_or_none = [None] * len(internal)
|
||||
else:
|
||||
external_or_none = external
|
||||
if len(internal) != len(external_or_none):
|
||||
raise ValueError("Port ranges don't match in length")
|
||||
return internal, [(host, ext_port) for ext_port in external_or_none]
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from .utils import format_environment
|
||||
|
||||
|
||||
class ProxyConfig(dict):
|
||||
"""
|
||||
Hold the client's proxy configuration
|
||||
"""
|
||||
|
||||
@property
|
||||
def http(self) -> str | None:
|
||||
return self.get("http")
|
||||
|
||||
@property
|
||||
def https(self) -> str | None:
|
||||
return self.get("https")
|
||||
|
||||
@property
|
||||
def ftp(self) -> str | None:
|
||||
return self.get("ftp")
|
||||
|
||||
@property
|
||||
def no_proxy(self) -> str | None:
|
||||
return self.get("no_proxy")
|
||||
|
||||
@staticmethod
|
||||
def from_dict(config: dict[str, str]) -> ProxyConfig:
|
||||
"""
|
||||
Instantiate a new ProxyConfig from a dictionary that represents a
|
||||
client configuration, as described in `the documentation`_.
|
||||
|
||||
.. _the documentation:
|
||||
https://docs.docker.com/network/proxy/#configure-the-docker-client
|
||||
"""
|
||||
return ProxyConfig(
|
||||
http=config.get("httpProxy"),
|
||||
https=config.get("httpsProxy"),
|
||||
ftp=config.get("ftpProxy"),
|
||||
no_proxy=config.get("noProxy"),
|
||||
)
|
||||
|
||||
def get_environment(self) -> dict[str, str]:
|
||||
"""
|
||||
Return a dictionary representing the environment variables used to
|
||||
set the proxy settings.
|
||||
"""
|
||||
env = {}
|
||||
if self.http:
|
||||
env["http_proxy"] = env["HTTP_PROXY"] = self.http
|
||||
if self.https:
|
||||
env["https_proxy"] = env["HTTPS_PROXY"] = self.https
|
||||
if self.ftp:
|
||||
env["ftp_proxy"] = env["FTP_PROXY"] = self.ftp
|
||||
if self.no_proxy:
|
||||
env["no_proxy"] = env["NO_PROXY"] = self.no_proxy
|
||||
return env
|
||||
|
||||
@t.overload
|
||||
def inject_proxy_environment(self, environment: list[str]) -> list[str]: ...
|
||||
|
||||
@t.overload
|
||||
def inject_proxy_environment(
|
||||
self, environment: list[str] | None
|
||||
) -> list[str] | None: ...
|
||||
|
||||
def inject_proxy_environment(
|
||||
self, environment: list[str] | None
|
||||
) -> list[str] | None:
|
||||
"""
|
||||
Given a list of strings representing environment variables, prepend the
|
||||
environment variables corresponding to the proxy settings.
|
||||
"""
|
||||
if not self:
|
||||
return environment
|
||||
|
||||
proxy_env = format_environment(self.get_environment())
|
||||
if not environment:
|
||||
return proxy_env
|
||||
# It is important to prepend our variables, because we want the
|
||||
# variables defined in "environment" to take precedence.
|
||||
return proxy_env + environment
|
||||
|
||||
def __str__(self) -> str:
|
||||
return f"ProxyConfig(http={self.http}, https={self.https}, ftp={self.ftp}, no_proxy={self.no_proxy})"
|
||||
|
|
@ -0,0 +1,242 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import os
|
||||
import select
|
||||
import socket as pysocket
|
||||
import struct
|
||||
import typing as t
|
||||
|
||||
from ..transport.npipesocket import NpipeSocket
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Sequence
|
||||
|
||||
from ..._socket_helper import SocketLike
|
||||
|
||||
|
||||
STDOUT = 1
|
||||
STDERR = 2
|
||||
|
||||
|
||||
class SocketError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# NpipeSockets have their own error types
|
||||
# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
|
||||
NPIPE_ENDED = 109
|
||||
|
||||
|
||||
def read(socket: SocketLike, n: int = 4096) -> bytes | None:
|
||||
"""
|
||||
Reads at most n bytes from socket
|
||||
"""
|
||||
|
||||
recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
|
||||
|
||||
if not isinstance(socket, NpipeSocket): # type: ignore[unreachable]
|
||||
if not hasattr(select, "poll"):
|
||||
# Limited to 1024
|
||||
select.select([socket], [], [])
|
||||
else:
|
||||
poll = select.poll()
|
||||
poll.register(socket, select.POLLIN | select.POLLPRI)
|
||||
poll.poll()
|
||||
|
||||
try:
|
||||
if hasattr(socket, "recv"):
|
||||
return socket.recv(n)
|
||||
if isinstance(socket, pysocket.SocketIO): # type: ignore
|
||||
return socket.read(n) # type: ignore[unreachable]
|
||||
return os.read(socket.fileno(), n)
|
||||
except EnvironmentError as e:
|
||||
if e.errno not in recoverable_errors:
|
||||
raise
|
||||
return None # TODO ???
|
||||
except Exception as e:
|
||||
is_pipe_ended = (
|
||||
isinstance(socket, NpipeSocket) # type: ignore[unreachable]
|
||||
and len(e.args) > 0
|
||||
and e.args[0] == NPIPE_ENDED
|
||||
)
|
||||
if is_pipe_ended:
|
||||
# npipes do not support duplex sockets, so we interpret
|
||||
# a PIPE_ENDED error as a close operation (0-length read).
|
||||
return b""
|
||||
raise
|
||||
|
||||
|
||||
def read_exactly(socket: SocketLike, n: int) -> bytes:
|
||||
"""
|
||||
Reads exactly n bytes from socket
|
||||
Raises SocketError if there is not enough data
|
||||
"""
|
||||
data = b""
|
||||
while len(data) < n:
|
||||
next_data = read(socket, n - len(data))
|
||||
if not next_data:
|
||||
raise SocketError("Unexpected EOF")
|
||||
data += next_data
|
||||
return data
|
||||
|
||||
|
||||
def next_frame_header(socket: SocketLike) -> tuple[int, int]:
|
||||
"""
|
||||
Returns the stream and size of the next frame of data waiting to be read
|
||||
from socket, according to the protocol defined here:
|
||||
|
||||
https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
|
||||
"""
|
||||
try:
|
||||
data = read_exactly(socket, 8)
|
||||
except SocketError:
|
||||
return (-1, -1)
|
||||
|
||||
stream, actual = struct.unpack(">BxxxL", data)
|
||||
return (stream, actual)
|
||||
|
||||
|
||||
def frames_iter(socket: SocketLike, tty: bool) -> t.Generator[tuple[int, bytes]]:
|
||||
"""
|
||||
Return a generator of frames read from socket. A frame is a tuple where
|
||||
the first item is the stream number and the second item is a chunk of data.
|
||||
|
||||
If the tty setting is enabled, the streams are multiplexed into the stdout
|
||||
stream.
|
||||
"""
|
||||
if tty:
|
||||
return ((STDOUT, frame) for frame in frames_iter_tty(socket))
|
||||
return frames_iter_no_tty(socket)
|
||||
|
||||
|
||||
def frames_iter_no_tty(socket: SocketLike) -> t.Generator[tuple[int, bytes]]:
|
||||
"""
|
||||
Returns a generator of data read from the socket when the tty setting is
|
||||
not enabled.
|
||||
"""
|
||||
while True:
|
||||
(stream, n) = next_frame_header(socket)
|
||||
if n < 0:
|
||||
break
|
||||
while n > 0:
|
||||
result = read(socket, n)
|
||||
if result is None:
|
||||
continue
|
||||
data_length = len(result)
|
||||
if data_length == 0:
|
||||
# We have reached EOF
|
||||
return
|
||||
n -= data_length
|
||||
yield (stream, result)
|
||||
|
||||
|
||||
def frames_iter_tty(socket: SocketLike) -> t.Generator[bytes]:
|
||||
"""
|
||||
Return a generator of data read from the socket when the tty setting is
|
||||
enabled.
|
||||
"""
|
||||
while True:
|
||||
result = read(socket)
|
||||
if not result:
|
||||
# We have reached EOF
|
||||
return
|
||||
yield result
|
||||
|
||||
|
||||
@t.overload
|
||||
def consume_socket_output(
|
||||
frames: Sequence[bytes] | t.Generator[bytes], demux: t.Literal[False] = False
|
||||
) -> bytes: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def consume_socket_output(
|
||||
frames: (
|
||||
Sequence[tuple[bytes | None, bytes | None]]
|
||||
| t.Generator[tuple[bytes | None, bytes | None]]
|
||||
),
|
||||
demux: t.Literal[True],
|
||||
) -> tuple[bytes, bytes]: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def consume_socket_output(
|
||||
frames: (
|
||||
Sequence[bytes]
|
||||
| Sequence[tuple[bytes | None, bytes | None]]
|
||||
| t.Generator[bytes]
|
||||
| t.Generator[tuple[bytes | None, bytes | None]]
|
||||
),
|
||||
demux: bool = False,
|
||||
) -> bytes | tuple[bytes, bytes]: ...
|
||||
|
||||
|
||||
def consume_socket_output(
|
||||
frames: (
|
||||
Sequence[bytes]
|
||||
| Sequence[tuple[bytes | None, bytes | None]]
|
||||
| t.Generator[bytes]
|
||||
| t.Generator[tuple[bytes | None, bytes | None]]
|
||||
),
|
||||
demux: bool = False,
|
||||
) -> bytes | tuple[bytes, bytes]:
|
||||
"""
|
||||
Iterate through frames read from the socket and return the result.
|
||||
|
||||
Args:
|
||||
|
||||
demux (bool):
|
||||
If False, stdout and stderr are multiplexed, and the result is the
|
||||
concatenation of all the frames. If True, the streams are
|
||||
demultiplexed, and the result is a 2-tuple where each item is the
|
||||
concatenation of frames belonging to the same stream.
|
||||
"""
|
||||
if demux is False:
|
||||
# If the streams are multiplexed, the generator returns strings, that
|
||||
# we just need to concatenate.
|
||||
return b"".join(frames) # type: ignore
|
||||
|
||||
# If the streams are demultiplexed, the generator yields tuples
|
||||
# (stdout, stderr)
|
||||
out: list[bytes | None] = [None, None]
|
||||
frame: tuple[bytes | None, bytes | None]
|
||||
for frame in frames: # type: ignore
|
||||
# It is guaranteed that for each frame, one and only one stream
|
||||
# is not None.
|
||||
if frame == (None, None):
|
||||
raise AssertionError(f"frame must be (None, None), but got {frame}")
|
||||
if frame[0] is not None:
|
||||
if out[0] is None:
|
||||
out[0] = frame[0]
|
||||
else:
|
||||
out[0] += frame[0]
|
||||
else:
|
||||
if out[1] is None:
|
||||
out[1] = frame[1]
|
||||
else:
|
||||
out[1] += frame[1] # type: ignore[operator]
|
||||
return tuple(out) # type: ignore
|
||||
|
||||
|
||||
def demux_adaptor(stream_id: int, data: bytes) -> tuple[bytes | None, bytes | None]:
|
||||
"""
|
||||
Utility to demultiplex stdout and stderr when reading frames from the
|
||||
socket.
|
||||
"""
|
||||
if stream_id == STDOUT:
|
||||
return (data, None)
|
||||
if stream_id == STDERR:
|
||||
return (None, data)
|
||||
raise ValueError(f"{stream_id} is not a valid stream")
|
||||
|
|
@ -0,0 +1,519 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
|
||||
#
|
||||
# Copyright (c) 2016-2022 Docker, Inc.
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import collections
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import shlex
|
||||
import string
|
||||
import typing as t
|
||||
from urllib.parse import urlparse, urlunparse
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
StrictVersion,
|
||||
)
|
||||
|
||||
from .. import errors
|
||||
from ..constants import (
|
||||
BYTE_UNITS,
|
||||
DEFAULT_HTTP_HOST,
|
||||
DEFAULT_NPIPE,
|
||||
DEFAULT_UNIX_SOCKET,
|
||||
)
|
||||
from ..tls import TLSConfig
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
|
||||
URLComponents = collections.namedtuple(
|
||||
"URLComponents",
|
||||
"scheme netloc url params query fragment",
|
||||
)
|
||||
|
||||
|
||||
def decode_json_header(header: str | bytes) -> dict[str, t.Any]:
|
||||
data = base64.b64decode(header).decode("utf-8")
|
||||
return json.loads(data)
|
||||
|
||||
|
||||
def compare_version(v1: str, v2: str) -> t.Literal[-1, 0, 1]:
|
||||
"""Compare docker versions
|
||||
|
||||
>>> v1 = '1.9'
|
||||
>>> v2 = '1.10'
|
||||
>>> compare_version(v1, v2)
|
||||
1
|
||||
>>> compare_version(v2, v1)
|
||||
-1
|
||||
>>> compare_version(v2, v2)
|
||||
0
|
||||
"""
|
||||
s1 = StrictVersion(v1)
|
||||
s2 = StrictVersion(v2)
|
||||
if s1 == s2:
|
||||
return 0
|
||||
if s1 > s2:
|
||||
return -1
|
||||
return 1
|
||||
|
||||
|
||||
def version_lt(v1: str, v2: str) -> bool:
|
||||
return compare_version(v1, v2) > 0
|
||||
|
||||
|
||||
def version_gte(v1: str, v2: str) -> bool:
|
||||
return not version_lt(v1, v2)
|
||||
|
||||
|
||||
def _convert_port_binding(
|
||||
binding: (
|
||||
tuple[str, str | int | None]
|
||||
| tuple[str | int | None]
|
||||
| dict[str, str]
|
||||
| str
|
||||
| int
|
||||
),
|
||||
) -> dict[str, str]:
|
||||
result = {"HostIp": "", "HostPort": ""}
|
||||
host_port: str | int | None = ""
|
||||
if isinstance(binding, tuple):
|
||||
if len(binding) == 2:
|
||||
host_port = binding[1] # type: ignore
|
||||
result["HostIp"] = binding[0]
|
||||
elif isinstance(binding[0], str):
|
||||
result["HostIp"] = binding[0]
|
||||
else:
|
||||
host_port = binding[0]
|
||||
elif isinstance(binding, dict):
|
||||
if "HostPort" in binding:
|
||||
host_port = binding["HostPort"]
|
||||
if "HostIp" in binding:
|
||||
result["HostIp"] = binding["HostIp"]
|
||||
else:
|
||||
raise ValueError(binding)
|
||||
else:
|
||||
host_port = binding
|
||||
|
||||
result["HostPort"] = str(host_port) if host_port is not None else ""
|
||||
return result
|
||||
|
||||
|
||||
def convert_port_bindings(
|
||||
port_bindings: dict[
|
||||
str | int,
|
||||
tuple[str, str | int | None]
|
||||
| tuple[str | int | None]
|
||||
| dict[str, str]
|
||||
| str
|
||||
| int
|
||||
| list[
|
||||
tuple[str, str | int | None]
|
||||
| tuple[str | int | None]
|
||||
| dict[str, str]
|
||||
| str
|
||||
| int
|
||||
],
|
||||
],
|
||||
) -> dict[str, list[dict[str, str]]]:
|
||||
result = {}
|
||||
for k, v in port_bindings.items():
|
||||
key = str(k)
|
||||
if "/" not in key:
|
||||
key += "/tcp"
|
||||
if isinstance(v, list):
|
||||
result[key] = [_convert_port_binding(binding) for binding in v]
|
||||
else:
|
||||
result[key] = [_convert_port_binding(v)]
|
||||
return result
|
||||
|
||||
|
||||
def convert_volume_binds(
|
||||
binds: (
|
||||
list[str]
|
||||
| Mapping[
|
||||
str | bytes, dict[str, str | bytes] | dict[str, str] | bytes | str | int
|
||||
]
|
||||
),
|
||||
) -> list[str]:
|
||||
if isinstance(binds, list):
|
||||
return binds # type: ignore
|
||||
|
||||
result = []
|
||||
for k, v in binds.items():
|
||||
if isinstance(k, bytes):
|
||||
k = k.decode("utf-8")
|
||||
|
||||
if isinstance(v, dict):
|
||||
if "ro" in v and "mode" in v:
|
||||
raise ValueError(f'Binding cannot contain both "ro" and "mode": {v!r}')
|
||||
|
||||
bind = v["bind"]
|
||||
if isinstance(bind, bytes):
|
||||
bind = bind.decode("utf-8")
|
||||
|
||||
if "ro" in v:
|
||||
mode = "ro" if v["ro"] else "rw"
|
||||
elif "mode" in v:
|
||||
mode = v["mode"] # type: ignore # TODO
|
||||
else:
|
||||
mode = "rw"
|
||||
|
||||
# NOTE: this is only relevant for Linux hosts
|
||||
# (does not apply in Docker Desktop)
|
||||
propagation_modes = [
|
||||
"rshared",
|
||||
"shared",
|
||||
"rslave",
|
||||
"slave",
|
||||
"rprivate",
|
||||
"private",
|
||||
]
|
||||
if "propagation" in v and v["propagation"] in propagation_modes:
|
||||
if mode:
|
||||
mode = ",".join([mode, v["propagation"]]) # type: ignore # TODO
|
||||
else:
|
||||
mode = v["propagation"] # type: ignore # TODO
|
||||
|
||||
result.append(f"{k}:{bind}:{mode}")
|
||||
else:
|
||||
if isinstance(v, bytes):
|
||||
v = v.decode("utf-8")
|
||||
result.append(f"{k}:{v}:rw")
|
||||
return result
|
||||
|
||||
|
||||
def convert_tmpfs_mounts(tmpfs: dict[str, str] | list[str]) -> dict[str, str]:
|
||||
if isinstance(tmpfs, dict):
|
||||
return tmpfs
|
||||
|
||||
if not isinstance(tmpfs, list):
|
||||
raise ValueError(
|
||||
f"Expected tmpfs value to be either a list or a dict, found: {type(tmpfs).__name__}"
|
||||
)
|
||||
|
||||
result = {}
|
||||
for mount in tmpfs:
|
||||
if isinstance(mount, str):
|
||||
if ":" in mount:
|
||||
name, options = mount.split(":", 1)
|
||||
else:
|
||||
name = mount
|
||||
options = ""
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Expected item in tmpfs list to be a string, found: {type(mount).__name__}"
|
||||
)
|
||||
|
||||
result[name] = options
|
||||
return result
|
||||
|
||||
|
||||
def convert_service_networks(
|
||||
networks: list[str | dict[str, str]],
|
||||
) -> list[dict[str, str]]:
|
||||
if not networks:
|
||||
return networks # type: ignore
|
||||
if not isinstance(networks, list):
|
||||
raise TypeError("networks parameter must be a list.")
|
||||
|
||||
result = []
|
||||
for n in networks:
|
||||
if isinstance(n, str):
|
||||
n = {"Target": n}
|
||||
result.append(n)
|
||||
return result
|
||||
|
||||
|
||||
def parse_repository_tag(repo_name: str) -> tuple[str, str | None]:
|
||||
parts = repo_name.rsplit("@", 1)
|
||||
if len(parts) == 2:
|
||||
return tuple(parts) # type: ignore
|
||||
parts = repo_name.rsplit(":", 1)
|
||||
if len(parts) == 2 and "/" not in parts[1]:
|
||||
return tuple(parts) # type: ignore
|
||||
return repo_name, None
|
||||
|
||||
|
||||
def parse_host(addr: str | None, is_win32: bool = False, tls: bool = False) -> str:
|
||||
# Sensible defaults
|
||||
if not addr and is_win32:
|
||||
return DEFAULT_NPIPE
|
||||
if not addr or addr.strip() == "unix://":
|
||||
return DEFAULT_UNIX_SOCKET
|
||||
|
||||
addr = addr.strip()
|
||||
|
||||
parsed_url = urlparse(addr)
|
||||
proto = parsed_url.scheme
|
||||
if not proto or any(x not in string.ascii_letters + "+" for x in proto):
|
||||
# https://bugs.python.org/issue754016
|
||||
parsed_url = urlparse("//" + addr, "tcp")
|
||||
proto = "tcp"
|
||||
|
||||
if proto == "fd":
|
||||
raise errors.DockerException("fd protocol is not implemented")
|
||||
|
||||
# These protos are valid aliases for our library but not for the
|
||||
# official spec
|
||||
if proto in ("http", "https"):
|
||||
tls = proto == "https"
|
||||
proto = "tcp"
|
||||
elif proto == "http+unix":
|
||||
proto = "unix"
|
||||
|
||||
if proto not in ("tcp", "unix", "npipe", "ssh"):
|
||||
raise errors.DockerException(f"Invalid bind address protocol: {addr}")
|
||||
|
||||
if proto == "tcp" and not parsed_url.netloc:
|
||||
# "tcp://" is exceptionally disallowed by convention;
|
||||
# omitting a hostname for other protocols is fine
|
||||
raise errors.DockerException(f"Invalid bind address format: {addr}")
|
||||
|
||||
if any(
|
||||
[parsed_url.params, parsed_url.query, parsed_url.fragment, parsed_url.password]
|
||||
):
|
||||
raise errors.DockerException(f"Invalid bind address format: {addr}")
|
||||
|
||||
if parsed_url.path and proto == "ssh":
|
||||
raise errors.DockerException(
|
||||
f"Invalid bind address format: no path allowed for this protocol: {addr}"
|
||||
)
|
||||
path = parsed_url.path
|
||||
if proto == "unix" and parsed_url.hostname is not None:
|
||||
# For legacy reasons, we consider unix://path
|
||||
# to be valid and equivalent to unix:///path
|
||||
path = f"{parsed_url.hostname}/{path}"
|
||||
|
||||
netloc = parsed_url.netloc
|
||||
if proto in ("tcp", "ssh"):
|
||||
port = parsed_url.port or 0
|
||||
if port <= 0:
|
||||
port = 22 if proto == "ssh" else (2375 if tls else 2376)
|
||||
netloc = f"{parsed_url.netloc}:{port}"
|
||||
|
||||
if not parsed_url.hostname:
|
||||
netloc = f"{DEFAULT_HTTP_HOST}:{port}"
|
||||
|
||||
# Rewrite schemes to fit library internals (requests adapters)
|
||||
if proto == "tcp":
|
||||
proto = f"http{'s' if tls else ''}"
|
||||
elif proto == "unix":
|
||||
proto = "http+unix"
|
||||
|
||||
if proto in ("http+unix", "npipe"):
|
||||
return f"{proto}://{path}".rstrip("/")
|
||||
return urlunparse(
|
||||
URLComponents(
|
||||
scheme=proto,
|
||||
netloc=netloc,
|
||||
url=path,
|
||||
params="",
|
||||
query="",
|
||||
fragment="",
|
||||
)
|
||||
).rstrip("/")
|
||||
|
||||
|
||||
def parse_devices(devices: Sequence[dict[str, str] | str]) -> list[dict[str, str]]:
|
||||
device_list = []
|
||||
for device in devices:
|
||||
if isinstance(device, dict):
|
||||
device_list.append(device)
|
||||
continue
|
||||
if not isinstance(device, str):
|
||||
raise errors.DockerException(f"Invalid device type {type(device)}")
|
||||
device_mapping = device.split(":")
|
||||
if device_mapping:
|
||||
path_on_host = device_mapping[0]
|
||||
if len(device_mapping) > 1:
|
||||
path_in_container = device_mapping[1]
|
||||
else:
|
||||
path_in_container = path_on_host
|
||||
if len(device_mapping) > 2:
|
||||
permissions = device_mapping[2]
|
||||
else:
|
||||
permissions = "rwm"
|
||||
device_list.append(
|
||||
{
|
||||
"PathOnHost": path_on_host,
|
||||
"PathInContainer": path_in_container,
|
||||
"CgroupPermissions": permissions,
|
||||
}
|
||||
)
|
||||
return device_list
|
||||
|
||||
|
||||
def kwargs_from_env(
|
||||
assert_hostname: bool | None = None,
|
||||
environment: Mapping[str, str] | None = None,
|
||||
) -> dict[str, t.Any]:
|
||||
if not environment:
|
||||
environment = os.environ
|
||||
host = environment.get("DOCKER_HOST")
|
||||
|
||||
# empty string for cert path is the same as unset.
|
||||
cert_path = environment.get("DOCKER_CERT_PATH") or None
|
||||
|
||||
# empty string for tls verify counts as "false".
|
||||
# Any value or 'unset' counts as true.
|
||||
tls_verify_str = environment.get("DOCKER_TLS_VERIFY")
|
||||
if tls_verify_str == "":
|
||||
tls_verify = False
|
||||
else:
|
||||
tls_verify = tls_verify_str is not None
|
||||
enable_tls = cert_path or tls_verify
|
||||
|
||||
params: dict[str, t.Any] = {}
|
||||
|
||||
if host:
|
||||
params["base_url"] = host
|
||||
|
||||
if not enable_tls:
|
||||
return params
|
||||
|
||||
if not cert_path:
|
||||
cert_path = os.path.join(os.path.expanduser("~"), ".docker")
|
||||
|
||||
if not tls_verify and assert_hostname is None:
|
||||
# assert_hostname is a subset of TLS verification,
|
||||
# so if it is not set already then set it to false.
|
||||
assert_hostname = False
|
||||
|
||||
params["tls"] = TLSConfig(
|
||||
client_cert=(
|
||||
os.path.join(cert_path, "cert.pem"),
|
||||
os.path.join(cert_path, "key.pem"),
|
||||
),
|
||||
ca_cert=os.path.join(cert_path, "ca.pem"),
|
||||
verify=tls_verify,
|
||||
assert_hostname=assert_hostname,
|
||||
)
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def convert_filters(
|
||||
filters: Mapping[str, bool | str | int | list[int] | list[str] | list[str | int]],
|
||||
) -> str:
|
||||
result = {}
|
||||
for k, v in filters.items():
|
||||
if isinstance(v, bool):
|
||||
v = "true" if v else "false"
|
||||
if not isinstance(v, list):
|
||||
v = [
|
||||
v,
|
||||
]
|
||||
result[k] = [str(item) if not isinstance(item, str) else item for item in v]
|
||||
return json.dumps(result)
|
||||
|
||||
|
||||
def parse_bytes(s: int | float | str) -> int | float:
|
||||
if isinstance(s, (int, float)):
|
||||
return s
|
||||
if len(s) == 0:
|
||||
return 0
|
||||
|
||||
if s[-2:-1].isalpha() and s[-1].isalpha() and (s[-1] == "b" or s[-1] == "B"):
|
||||
s = s[:-1]
|
||||
units = BYTE_UNITS
|
||||
suffix = s[-1].lower()
|
||||
|
||||
# Check if the variable is a string representation of an int
|
||||
# without a units part. Assuming that the units are bytes.
|
||||
if suffix.isdigit():
|
||||
digits_part = s
|
||||
suffix = "b"
|
||||
else:
|
||||
digits_part = s[:-1]
|
||||
|
||||
if suffix in units or suffix.isdigit():
|
||||
try:
|
||||
digits = float(digits_part)
|
||||
except ValueError as exc:
|
||||
raise errors.DockerException(
|
||||
f"Failed converting the string value for memory ({digits_part}) to an integer."
|
||||
) from exc
|
||||
|
||||
# Reconvert to long for the final result
|
||||
s = int(digits * units[suffix])
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
f"The specified value for memory ({s}) should specify the units. The postfix should be one of the `b` `k` `m` `g` characters"
|
||||
)
|
||||
|
||||
return s
|
||||
|
||||
|
||||
def normalize_links(links: dict[str, str] | Sequence[tuple[str, str]]) -> list[str]:
|
||||
if isinstance(links, dict):
|
||||
sorted_links = sorted(links.items())
|
||||
else:
|
||||
sorted_links = sorted(links)
|
||||
|
||||
return [f"{k}:{v}" if v else k for k, v in sorted_links]
|
||||
|
||||
|
||||
def parse_env_file(env_file: str | os.PathLike) -> dict[str, str]:
|
||||
"""
|
||||
Reads a line-separated environment file.
|
||||
The format of each line should be "key=value".
|
||||
"""
|
||||
environment = {}
|
||||
|
||||
with open(env_file, "rt", encoding="utf-8") as f:
|
||||
for line in f:
|
||||
if line[0] == "#":
|
||||
continue
|
||||
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
|
||||
parse_line = line.split("=", 1)
|
||||
if len(parse_line) == 2:
|
||||
k, v = parse_line
|
||||
environment[k] = v
|
||||
else:
|
||||
raise errors.DockerException(
|
||||
f"Invalid line in environment file {env_file}:\n{line}"
|
||||
)
|
||||
|
||||
return environment
|
||||
|
||||
|
||||
def split_command(command: str) -> list[str]:
|
||||
return shlex.split(command)
|
||||
|
||||
|
||||
def format_environment(environment: Mapping[str, str | bytes | None]) -> list[str]:
|
||||
def format_env(key: str, value: str | bytes | None) -> str:
|
||||
if value is None:
|
||||
return key
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode("utf-8")
|
||||
|
||||
return f"{key}={value}"
|
||||
|
||||
return [format_env(*var) for var in environment.items()]
|
||||
|
||||
|
||||
def format_extra_hosts(extra_hosts: Mapping[str, str], task: bool = False) -> list[str]:
|
||||
# Use format dictated by Swarm API if container is part of a task
|
||||
if task:
|
||||
return [f"{v} {k}" for k, v in sorted(extra_hosts.items())]
|
||||
|
||||
return [f"{k}:{v}" for k, v in sorted(extra_hosts.items())]
|
||||
|
|
@ -0,0 +1,555 @@
|
|||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import sys
|
||||
import traceback
|
||||
import typing as t
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DEFAULT_DOCKER_HOST,
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
DEFAULT_TLS,
|
||||
DEFAULT_TLS_VERIFY,
|
||||
DOCKER_COMMON_ARGS,
|
||||
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||
DOCKER_REQUIRED_TOGETHER,
|
||||
sanitize_result,
|
||||
update_tls_hostname,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
HAS_DOCKER_PY_2 = False # pylint: disable=invalid-name
|
||||
HAS_DOCKER_PY_3 = False # pylint: disable=invalid-name
|
||||
HAS_DOCKER_ERROR: None | str # pylint: disable=invalid-name
|
||||
HAS_DOCKER_TRACEBACK: None | str # pylint: disable=invalid-name
|
||||
docker_version: str | None # pylint: disable=invalid-name
|
||||
|
||||
try:
|
||||
from docker import __version__ as docker_version
|
||||
from docker.errors import APIError, TLSParameterError
|
||||
from docker.tls import TLSConfig
|
||||
|
||||
if LooseVersion(docker_version) >= LooseVersion("3.0.0"):
|
||||
HAS_DOCKER_PY_3 = True # pylint: disable=invalid-name
|
||||
from docker import APIClient as Client
|
||||
elif LooseVersion(docker_version) >= LooseVersion("2.0.0"):
|
||||
HAS_DOCKER_PY_2 = True # pylint: disable=invalid-name
|
||||
from docker import APIClient as Client
|
||||
else:
|
||||
from docker import Client # type: ignore
|
||||
|
||||
except ImportError as exc:
|
||||
HAS_DOCKER_ERROR = str(exc) # pylint: disable=invalid-name
|
||||
HAS_DOCKER_TRACEBACK = traceback.format_exc() # pylint: disable=invalid-name
|
||||
HAS_DOCKER_PY = False # pylint: disable=invalid-name
|
||||
docker_version = None # pylint: disable=invalid-name
|
||||
else:
|
||||
HAS_DOCKER_PY = True # pylint: disable=invalid-name
|
||||
HAS_DOCKER_ERROR = None # pylint: disable=invalid-name
|
||||
HAS_DOCKER_TRACEBACK = None # pylint: disable=invalid-name
|
||||
|
||||
|
||||
try:
|
||||
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
|
||||
RequestException,
|
||||
)
|
||||
except ImportError:
|
||||
# Either Docker SDK for Python is no longer using requests, or Docker SDK for Python is not around either,
|
||||
# or Docker SDK for Python's dependency requests is missing. In any case, define an exception
|
||||
# class RequestException so that our code does not break.
|
||||
class RequestException(Exception): # type: ignore
|
||||
pass
|
||||
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
|
||||
MIN_DOCKER_VERSION = "2.0.0"
|
||||
|
||||
|
||||
if not HAS_DOCKER_PY:
|
||||
# No Docker SDK for Python. Create a place holder client to allow
|
||||
# instantiation of AnsibleModule and proper error handing
|
||||
class Client: # type: ignore # noqa: F811, pylint: disable=function-redefined
|
||||
def __init__(self, **kwargs: t.Any) -> None:
|
||||
pass
|
||||
|
||||
class APIError(Exception): # type: ignore # noqa: F811, pylint: disable=function-redefined
|
||||
pass
|
||||
|
||||
class NotFound(Exception): # type: ignore # noqa: F811, pylint: disable=function-redefined
|
||||
pass
|
||||
|
||||
|
||||
def _get_tls_config(
|
||||
fail_function: Callable[[str], t.NoReturn], **kwargs: t.Any
|
||||
) -> TLSConfig:
|
||||
if "assert_hostname" in kwargs and LooseVersion(docker_version) >= LooseVersion(
|
||||
"7.0.0b1"
|
||||
):
|
||||
assert_hostname = kwargs.pop("assert_hostname")
|
||||
if assert_hostname is not None:
|
||||
fail_function(
|
||||
"tls_hostname is not compatible with Docker SDK for Python 7.0.0+. You are using"
|
||||
f" Docker SDK for Python {docker_version}. The tls_hostname option (value: {assert_hostname})"
|
||||
" has either been set directly or with the environment variable DOCKER_TLS_HOSTNAME."
|
||||
" Make sure it is not set, or switch to an older version of Docker SDK for Python."
|
||||
)
|
||||
# Filter out all None parameters
|
||||
kwargs = dict((k, v) for k, v in kwargs.items() if v is not None)
|
||||
try:
|
||||
return TLSConfig(**kwargs)
|
||||
except TLSParameterError as exc:
|
||||
fail_function(f"TLS config error: {exc}")
|
||||
|
||||
|
||||
def is_using_tls(auth_data: dict[str, t.Any]) -> bool:
|
||||
return auth_data["tls_verify"] or auth_data["tls"]
|
||||
|
||||
|
||||
def get_connect_params(
|
||||
auth_data: dict[str, t.Any], fail_function: Callable[[str], t.NoReturn]
|
||||
) -> dict[str, t.Any]:
|
||||
if is_using_tls(auth_data):
|
||||
auth_data["docker_host"] = auth_data["docker_host"].replace(
|
||||
"tcp://", "https://"
|
||||
)
|
||||
|
||||
result = {
|
||||
"base_url": auth_data["docker_host"],
|
||||
"version": auth_data["api_version"],
|
||||
"timeout": auth_data["timeout"],
|
||||
}
|
||||
|
||||
if auth_data["tls_verify"]:
|
||||
# TLS with verification
|
||||
tls_config: dict[str, t.Any] = {
|
||||
"verify": True,
|
||||
}
|
||||
if auth_data["tls_hostname"] is not None:
|
||||
tls_config["assert_hostname"] = auth_data["tls_hostname"]
|
||||
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||
if auth_data["cacert_path"]:
|
||||
tls_config["ca_cert"] = auth_data["cacert_path"]
|
||||
result["tls"] = _get_tls_config(fail_function=fail_function, **tls_config)
|
||||
elif auth_data["tls"]:
|
||||
# TLS without verification
|
||||
tls_config = {
|
||||
"verify": False,
|
||||
}
|
||||
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||
result["tls"] = _get_tls_config(fail_function=fail_function, **tls_config)
|
||||
|
||||
if auth_data.get("use_ssh_client"):
|
||||
if LooseVersion(docker_version) < LooseVersion("4.4.0"):
|
||||
fail_function(
|
||||
"use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer"
|
||||
)
|
||||
result["use_ssh_client"] = True
|
||||
|
||||
# No TLS
|
||||
return result
|
||||
|
||||
|
||||
DOCKERPYUPGRADE_SWITCH_TO_DOCKER = (
|
||||
"Try `pip uninstall docker-py` followed by `pip install docker`."
|
||||
)
|
||||
DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
|
||||
|
||||
|
||||
class AnsibleDockerClientBase(Client):
|
||||
def __init__(
|
||||
self,
|
||||
min_docker_version: str | None = None,
|
||||
min_docker_api_version: str | None = None,
|
||||
) -> None:
|
||||
if min_docker_version is None:
|
||||
min_docker_version = MIN_DOCKER_VERSION
|
||||
|
||||
self.docker_py_version = LooseVersion(docker_version)
|
||||
|
||||
if not HAS_DOCKER_PY:
|
||||
msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0")
|
||||
msg = f"{msg}, for example via `pip install docker`. The error was: {HAS_DOCKER_ERROR}"
|
||||
self.fail(msg, exception=HAS_DOCKER_TRACEBACK)
|
||||
|
||||
if self.docker_py_version < LooseVersion(min_docker_version):
|
||||
msg = (
|
||||
f"Error: Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
|
||||
f" Minimum version required is {min_docker_version}."
|
||||
)
|
||||
if docker_version < LooseVersion("2.0"):
|
||||
msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
|
||||
else:
|
||||
msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
|
||||
self.fail(msg)
|
||||
|
||||
self._connect_params = get_connect_params(
|
||||
self.auth_params, fail_function=self.fail
|
||||
)
|
||||
|
||||
try:
|
||||
super().__init__(**self._connect_params)
|
||||
self.docker_api_version_str = self.api_version
|
||||
except APIError as exc:
|
||||
self.fail(f"Docker API error: {exc}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error connecting: {exc}")
|
||||
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or "1.25"
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
self.fail(
|
||||
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
|
||||
)
|
||||
|
||||
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
|
||||
pass
|
||||
# if self.debug:
|
||||
# from .util import log_debug
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
@abc.abstractmethod
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _get_value(
|
||||
param_name: str,
|
||||
param_value: t.Any,
|
||||
env_variable: str | None,
|
||||
default_value: t.Any | None,
|
||||
value_type: t.Literal["str", "bool", "int"] = "str",
|
||||
) -> t.Any:
|
||||
if param_value is not None:
|
||||
# take module parameter value
|
||||
if value_type == "bool":
|
||||
if param_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if param_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(param_value)
|
||||
if value_type == "int":
|
||||
return int(param_value)
|
||||
return param_value
|
||||
|
||||
if env_variable is not None:
|
||||
env_value = os.environ.get(env_variable)
|
||||
if env_value is not None:
|
||||
# take the env variable value
|
||||
if param_name == "cert_path":
|
||||
return os.path.join(env_value, "cert.pem")
|
||||
if param_name == "cacert_path":
|
||||
return os.path.join(env_value, "ca.pem")
|
||||
if param_name == "key_path":
|
||||
return os.path.join(env_value, "key.pem")
|
||||
if value_type == "bool":
|
||||
if env_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if env_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(env_value)
|
||||
if value_type == "int":
|
||||
return int(env_value)
|
||||
return env_value
|
||||
|
||||
# take the default
|
||||
return default_value
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def auth_params(self) -> dict[str, t.Any]:
|
||||
# Get authentication credentials.
|
||||
# Precedence: module parameters-> environment variables-> defaults.
|
||||
|
||||
self.log("Getting credentials")
|
||||
|
||||
client_params = self._get_params()
|
||||
|
||||
params = {}
|
||||
for key in DOCKER_COMMON_ARGS:
|
||||
params[key] = client_params.get(key)
|
||||
|
||||
result = {
|
||||
"docker_host": self._get_value(
|
||||
"docker_host",
|
||||
params["docker_host"],
|
||||
"DOCKER_HOST",
|
||||
DEFAULT_DOCKER_HOST,
|
||||
value_type="str",
|
||||
),
|
||||
"tls_hostname": self._get_value(
|
||||
"tls_hostname",
|
||||
params["tls_hostname"],
|
||||
"DOCKER_TLS_HOSTNAME",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"api_version": self._get_value(
|
||||
"api_version",
|
||||
params["api_version"],
|
||||
"DOCKER_API_VERSION",
|
||||
"auto",
|
||||
value_type="str",
|
||||
),
|
||||
"cacert_path": self._get_value(
|
||||
"cacert_path",
|
||||
params["ca_path"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"cert_path": self._get_value(
|
||||
"cert_path",
|
||||
params["client_cert"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"key_path": self._get_value(
|
||||
"key_path",
|
||||
params["client_key"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"tls": self._get_value(
|
||||
"tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool"
|
||||
),
|
||||
"tls_verify": self._get_value(
|
||||
"validate_certs",
|
||||
params["validate_certs"],
|
||||
"DOCKER_TLS_VERIFY",
|
||||
DEFAULT_TLS_VERIFY,
|
||||
value_type="bool",
|
||||
),
|
||||
"timeout": self._get_value(
|
||||
"timeout",
|
||||
params["timeout"],
|
||||
"DOCKER_TIMEOUT",
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
value_type="int",
|
||||
),
|
||||
"use_ssh_client": self._get_value(
|
||||
"use_ssh_client",
|
||||
params["use_ssh_client"],
|
||||
None,
|
||||
False,
|
||||
value_type="bool",
|
||||
),
|
||||
}
|
||||
|
||||
if LooseVersion(docker_version) < LooseVersion("7.0.0b1"):
|
||||
update_tls_hostname(result)
|
||||
|
||||
return result
|
||||
|
||||
def _handle_ssl_error(self, error: Exception) -> t.NoReturn:
|
||||
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
|
||||
if match:
|
||||
hostname = self.auth_params["tls_hostname"]
|
||||
self.fail(
|
||||
f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
|
||||
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
|
||||
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
|
||||
"setting the `tls` parameter to true."
|
||||
)
|
||||
self.fail(f"SSL Exception: {error}")
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(
|
||||
self,
|
||||
argument_spec: dict[str, t.Any] | None = None,
|
||||
supports_check_mode: bool = False,
|
||||
mutually_exclusive: Sequence[Sequence[str]] | None = None,
|
||||
required_together: Sequence[Sequence[str]] | None = None,
|
||||
required_if: (
|
||||
Sequence[
|
||||
tuple[str, t.Any, Sequence[str]]
|
||||
| tuple[str, t.Any, Sequence[str], bool]
|
||||
]
|
||||
| None
|
||||
) = None,
|
||||
required_one_of: Sequence[Sequence[str]] | None = None,
|
||||
required_by: dict[str, Sequence[str]] | None = None,
|
||||
min_docker_version: str | None = None,
|
||||
min_docker_api_version: str | None = None,
|
||||
option_minimal_versions: dict[str, t.Any] | None = None,
|
||||
option_minimal_versions_ignore_params: Sequence[str] | None = None,
|
||||
fail_results: dict[str, t.Any] | None = None,
|
||||
):
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = {}
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params: list[Sequence[str]] = []
|
||||
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params: list[Sequence[str]] = []
|
||||
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
required_by=required_by or {},
|
||||
)
|
||||
|
||||
self.debug = self.module.params.get("debug")
|
||||
self.check_mode = self.module.check_mode
|
||||
|
||||
super().__init__(
|
||||
min_docker_version=min_docker_version,
|
||||
min_docker_api_version=min_docker_api_version,
|
||||
)
|
||||
|
||||
if option_minimal_versions is not None:
|
||||
self._get_minimal_versions(
|
||||
option_minimal_versions, option_minimal_versions_ignore_params
|
||||
)
|
||||
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
self.module.deprecate(
|
||||
msg, version=version, date=date, collection_name=collection_name
|
||||
)
|
||||
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
return self.module.params
|
||||
|
||||
def _get_minimal_versions(
|
||||
self,
|
||||
option_minimal_versions: dict[str, t.Any],
|
||||
ignore_params: Sequence[str] | None = None,
|
||||
) -> None:
|
||||
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
|
||||
for option in self.module.argument_spec:
|
||||
if ignore_params is not None and option in ignore_params:
|
||||
continue
|
||||
self.option_minimal_versions[option] = {}
|
||||
self.option_minimal_versions.update(option_minimal_versions)
|
||||
|
||||
for option, data in self.option_minimal_versions.items():
|
||||
# Test whether option is supported, and store result
|
||||
support_docker_py = True
|
||||
support_docker_api = True
|
||||
if "docker_py_version" in data:
|
||||
support_docker_py = self.docker_py_version >= LooseVersion(
|
||||
data["docker_py_version"]
|
||||
)
|
||||
if "docker_api_version" in data:
|
||||
support_docker_api = self.docker_api_version >= LooseVersion(
|
||||
data["docker_api_version"]
|
||||
)
|
||||
data["supported"] = support_docker_py and support_docker_api
|
||||
# Fail if option is not supported but used
|
||||
if not data["supported"]:
|
||||
# Test whether option is specified
|
||||
if "detect_usage" in data:
|
||||
used = data["detect_usage"](self)
|
||||
else:
|
||||
used = self.module.params.get(option) is not None
|
||||
if used and "default" in self.module.argument_spec[option]:
|
||||
used = (
|
||||
self.module.params[option]
|
||||
!= self.module.argument_spec[option]["default"]
|
||||
)
|
||||
if used:
|
||||
# If the option is used, compose error message.
|
||||
if "usage_msg" in data:
|
||||
usg = data["usage_msg"]
|
||||
else:
|
||||
usg = f"set {option} option"
|
||||
if not support_docker_api:
|
||||
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
|
||||
elif not support_docker_py:
|
||||
msg = (
|
||||
f"Docker SDK for Python version is {docker_version} ({platform.node()}'s Python {sys.executable})."
|
||||
f" Minimum version required is {data['docker_py_version']} to {usg}. {DOCKERPYUPGRADE_UPGRADE_DOCKER}"
|
||||
)
|
||||
else:
|
||||
# should not happen
|
||||
msg = f"Cannot {usg} with your configuration."
|
||||
self.fail(msg)
|
||||
|
||||
def report_warnings(
|
||||
self, result: t.Any, warnings_key: Sequence[str] | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Checks result of client operation for warnings, and if present, outputs them.
|
||||
|
||||
warnings_key should be a list of keys used to crawl the result dictionary.
|
||||
For example, if warnings_key == ['a', 'b'], the function will consider
|
||||
result['a']['b'] if these keys exist. If the result is a non-empty string, it
|
||||
will be reported as a warning. If the result is a list, every entry will be
|
||||
reported as a warning.
|
||||
|
||||
In most cases (if warnings are returned at all), warnings_key should be
|
||||
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
|
||||
"""
|
||||
if warnings_key is None:
|
||||
warnings_key = ["Warnings"]
|
||||
for key in warnings_key:
|
||||
if not isinstance(result, Mapping):
|
||||
return
|
||||
result = result.get(key)
|
||||
if isinstance(result, Sequence):
|
||||
for warning in result:
|
||||
self.module.warn(f"Docker warning: {warning}")
|
||||
elif isinstance(result, str) and result:
|
||||
self.module.warn(f"Docker warning: {result}")
|
||||
|
|
@ -0,0 +1,729 @@
|
|||
# Copyright 2016 Red Hat | Ansible
|
||||
# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import os
|
||||
import re
|
||||
import typing as t
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE, BOOLEANS_TRUE
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
try:
|
||||
from requests.exceptions import ( # noqa: F401, pylint: disable=unused-import
|
||||
RequestException,
|
||||
SSLError,
|
||||
)
|
||||
except ImportError:
|
||||
# Define an exception class RequestException so that our code does not break.
|
||||
class RequestException(Exception): # type: ignore
|
||||
pass
|
||||
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api import auth
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||
APIClient as Client,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
MissingRequirementException,
|
||||
NotFound,
|
||||
TLSParameterError,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.tls import TLSConfig
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
convert_filters,
|
||||
parse_repository_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DEFAULT_DOCKER_HOST,
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
DEFAULT_TLS,
|
||||
DEFAULT_TLS_VERIFY,
|
||||
DOCKER_COMMON_ARGS,
|
||||
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||
DOCKER_REQUIRED_TOGETHER,
|
||||
sanitize_result,
|
||||
update_tls_hostname,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
|
||||
def _get_tls_config(
|
||||
fail_function: Callable[[str], t.NoReturn], **kwargs: t.Any
|
||||
) -> TLSConfig:
|
||||
try:
|
||||
return TLSConfig(**kwargs)
|
||||
except TLSParameterError as exc:
|
||||
fail_function(f"TLS config error: {exc}")
|
||||
|
||||
|
||||
def is_using_tls(auth_data: dict[str, t.Any]) -> bool:
|
||||
return auth_data["tls_verify"] or auth_data["tls"]
|
||||
|
||||
|
||||
def get_connect_params(
|
||||
auth_data: dict[str, t.Any], fail_function: Callable[[str], t.NoReturn]
|
||||
) -> dict[str, t.Any]:
|
||||
if is_using_tls(auth_data):
|
||||
auth_data["docker_host"] = auth_data["docker_host"].replace(
|
||||
"tcp://", "https://"
|
||||
)
|
||||
|
||||
result = {
|
||||
"base_url": auth_data["docker_host"],
|
||||
"version": auth_data["api_version"],
|
||||
"timeout": auth_data["timeout"],
|
||||
}
|
||||
|
||||
if auth_data["tls_verify"]:
|
||||
# TLS with verification
|
||||
tls_config = {
|
||||
"verify": True,
|
||||
"assert_hostname": auth_data["tls_hostname"],
|
||||
"fail_function": fail_function,
|
||||
}
|
||||
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||
if auth_data["cacert_path"]:
|
||||
tls_config["ca_cert"] = auth_data["cacert_path"]
|
||||
result["tls"] = _get_tls_config(**tls_config)
|
||||
elif auth_data["tls"]:
|
||||
# TLS without verification
|
||||
tls_config = {
|
||||
"verify": False,
|
||||
"fail_function": fail_function,
|
||||
}
|
||||
if auth_data["cert_path"] and auth_data["key_path"]:
|
||||
tls_config["client_cert"] = (auth_data["cert_path"], auth_data["key_path"])
|
||||
result["tls"] = _get_tls_config(**tls_config)
|
||||
|
||||
if auth_data.get("use_ssh_client"):
|
||||
result["use_ssh_client"] = True
|
||||
|
||||
# No TLS
|
||||
return result
|
||||
|
||||
|
||||
class AnsibleDockerClientBase(Client):
|
||||
def __init__(self, min_docker_api_version: str | None = None) -> None:
|
||||
self._connect_params = get_connect_params(
|
||||
self.auth_params, fail_function=self.fail
|
||||
)
|
||||
|
||||
try:
|
||||
super().__init__(**self._connect_params)
|
||||
self.docker_api_version_str = self.api_version
|
||||
except MissingRequirementException as exc:
|
||||
self.fail(
|
||||
missing_required_lib(exc.requirement), exception=exc.import_exception
|
||||
)
|
||||
except APIError as exc:
|
||||
self.fail(f"Docker API error: {exc}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error connecting: {exc}")
|
||||
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or "1.25"
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
self.fail(
|
||||
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
|
||||
)
|
||||
|
||||
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
|
||||
pass
|
||||
# if self.debug:
|
||||
# from .util import log_debug
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
@abc.abstractmethod
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _get_value(
|
||||
param_name: str,
|
||||
param_value: t.Any,
|
||||
env_variable: str | None,
|
||||
default_value: t.Any | None,
|
||||
value_type: t.Literal["str", "bool", "int"] = "str",
|
||||
) -> t.Any:
|
||||
if param_value is not None:
|
||||
# take module parameter value
|
||||
if value_type == "bool":
|
||||
if param_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if param_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(param_value)
|
||||
if value_type == "int":
|
||||
return int(param_value)
|
||||
return param_value
|
||||
|
||||
if env_variable is not None:
|
||||
env_value = os.environ.get(env_variable)
|
||||
if env_value is not None:
|
||||
# take the env variable value
|
||||
if param_name == "cert_path":
|
||||
return os.path.join(env_value, "cert.pem")
|
||||
if param_name == "cacert_path":
|
||||
return os.path.join(env_value, "ca.pem")
|
||||
if param_name == "key_path":
|
||||
return os.path.join(env_value, "key.pem")
|
||||
if value_type == "bool":
|
||||
if env_value in BOOLEANS_TRUE:
|
||||
return True
|
||||
if env_value in BOOLEANS_FALSE:
|
||||
return False
|
||||
return bool(env_value)
|
||||
if value_type == "int":
|
||||
return int(env_value)
|
||||
return env_value
|
||||
|
||||
# take the default
|
||||
return default_value
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
pass
|
||||
|
||||
@property
|
||||
def auth_params(self) -> dict[str, t.Any]:
|
||||
# Get authentication credentials.
|
||||
# Precedence: module parameters-> environment variables-> defaults.
|
||||
|
||||
self.log("Getting credentials")
|
||||
|
||||
client_params = self._get_params()
|
||||
|
||||
params = {}
|
||||
for key in DOCKER_COMMON_ARGS:
|
||||
params[key] = client_params.get(key)
|
||||
|
||||
result = {
|
||||
"docker_host": self._get_value(
|
||||
"docker_host",
|
||||
params["docker_host"],
|
||||
"DOCKER_HOST",
|
||||
DEFAULT_DOCKER_HOST,
|
||||
value_type="str",
|
||||
),
|
||||
"tls_hostname": self._get_value(
|
||||
"tls_hostname",
|
||||
params["tls_hostname"],
|
||||
"DOCKER_TLS_HOSTNAME",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"api_version": self._get_value(
|
||||
"api_version",
|
||||
params["api_version"],
|
||||
"DOCKER_API_VERSION",
|
||||
"auto",
|
||||
value_type="str",
|
||||
),
|
||||
"cacert_path": self._get_value(
|
||||
"cacert_path",
|
||||
params["ca_path"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"cert_path": self._get_value(
|
||||
"cert_path",
|
||||
params["client_cert"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"key_path": self._get_value(
|
||||
"key_path",
|
||||
params["client_key"],
|
||||
"DOCKER_CERT_PATH",
|
||||
None,
|
||||
value_type="str",
|
||||
),
|
||||
"tls": self._get_value(
|
||||
"tls", params["tls"], "DOCKER_TLS", DEFAULT_TLS, value_type="bool"
|
||||
),
|
||||
"tls_verify": self._get_value(
|
||||
"validate_certs",
|
||||
params["validate_certs"],
|
||||
"DOCKER_TLS_VERIFY",
|
||||
DEFAULT_TLS_VERIFY,
|
||||
value_type="bool",
|
||||
),
|
||||
"timeout": self._get_value(
|
||||
"timeout",
|
||||
params["timeout"],
|
||||
"DOCKER_TIMEOUT",
|
||||
DEFAULT_TIMEOUT_SECONDS,
|
||||
value_type="int",
|
||||
),
|
||||
"use_ssh_client": self._get_value(
|
||||
"use_ssh_client",
|
||||
params["use_ssh_client"],
|
||||
None,
|
||||
False,
|
||||
value_type="bool",
|
||||
),
|
||||
}
|
||||
|
||||
def depr(*args: t.Any, **kwargs: t.Any) -> None:
|
||||
self.deprecate(*args, **kwargs)
|
||||
|
||||
update_tls_hostname(
|
||||
result,
|
||||
old_behavior=True,
|
||||
deprecate_function=depr,
|
||||
uses_tls=is_using_tls(result),
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def _handle_ssl_error(self, error: Exception) -> t.NoReturn:
|
||||
match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
|
||||
if match:
|
||||
hostname = self.auth_params["tls_hostname"]
|
||||
self.fail(
|
||||
f"You asked for verification that Docker daemons certificate's hostname matches {hostname}. "
|
||||
f"The actual certificate's hostname is {match.group(1)}. Most likely you need to set DOCKER_TLS_HOSTNAME "
|
||||
f"or pass `tls_hostname` with a value of {match.group(1)}. You may also use TLS without verification by "
|
||||
"setting the `tls` parameter to true."
|
||||
)
|
||||
self.fail(f"SSL Exception: {error}")
|
||||
|
||||
def get_container_by_id(self, container_id: str) -> dict[str, t.Any] | None:
|
||||
try:
|
||||
self.log(f"Inspecting container Id {container_id}")
|
||||
result = self.get_json("/containers/{0}/json", container_id)
|
||||
self.log("Completed container inspection")
|
||||
return result
|
||||
except NotFound:
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting container: {exc}")
|
||||
|
||||
def get_container(self, name: str | None) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup a container and return the inspection results.
|
||||
"""
|
||||
if name is None:
|
||||
return None
|
||||
|
||||
search_name = name
|
||||
if not name.startswith("/"):
|
||||
search_name = "/" + name
|
||||
|
||||
result = None
|
||||
try:
|
||||
params = {
|
||||
"limit": -1,
|
||||
"all": 1,
|
||||
"size": 0,
|
||||
"trunc_cmd": 0,
|
||||
}
|
||||
containers = self.get_json("/containers/json", params=params)
|
||||
for container in containers:
|
||||
self.log(f"testing container: {container['Names']}")
|
||||
if (
|
||||
isinstance(container["Names"], list)
|
||||
and search_name in container["Names"]
|
||||
):
|
||||
result = container
|
||||
break
|
||||
if container["Id"].startswith(name):
|
||||
result = container
|
||||
break
|
||||
if container["Id"] == name:
|
||||
result = container
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error retrieving container list: {exc}")
|
||||
|
||||
if result is None:
|
||||
return None
|
||||
|
||||
return self.get_container_by_id(result["Id"])
|
||||
|
||||
def get_network(
|
||||
self, name: str | None = None, network_id: str | None = None
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup a network and return the inspection results.
|
||||
"""
|
||||
if name is None and network_id is None:
|
||||
return None
|
||||
|
||||
result = None
|
||||
|
||||
if network_id is None:
|
||||
try:
|
||||
networks = self.get_json("/networks")
|
||||
for network in networks:
|
||||
self.log(f"testing network: {network['Name']}")
|
||||
if name == network["Name"]:
|
||||
result = network
|
||||
break
|
||||
if network["Id"].startswith(name):
|
||||
result = network
|
||||
break
|
||||
except SSLError as exc:
|
||||
self._handle_ssl_error(exc)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error retrieving network list: {exc}")
|
||||
|
||||
if result is not None:
|
||||
network_id = result["Id"]
|
||||
|
||||
if network_id is not None:
|
||||
try:
|
||||
self.log(f"Inspecting network Id {network_id}")
|
||||
result = self.get_json("/networks/{0}", network_id)
|
||||
self.log("Completed network inspection")
|
||||
except NotFound:
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting network: {exc}")
|
||||
|
||||
return result
|
||||
|
||||
def _image_lookup(self, name: str, tag: str | None) -> list[dict[str, t.Any]]:
|
||||
"""
|
||||
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||
does not work consistently. Instead, get the result set for name and manually check
|
||||
if the tag exists.
|
||||
"""
|
||||
try:
|
||||
params: dict[str, t.Any] = {
|
||||
"only_ids": 0,
|
||||
"all": 0,
|
||||
}
|
||||
if LooseVersion(self.api_version) < LooseVersion("1.25"):
|
||||
# only use "filter" on API 1.24 and under, as it is deprecated
|
||||
params["filter"] = name
|
||||
else:
|
||||
params["filters"] = convert_filters({"reference": name})
|
||||
images = self.get_json("/images/json", params=params)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error searching for image {name} - {exc}")
|
||||
if tag:
|
||||
lookup = f"{name}:{tag}"
|
||||
lookup_digest = f"{name}@{tag}"
|
||||
response = images
|
||||
images = []
|
||||
for image in response:
|
||||
tags = image.get("RepoTags")
|
||||
digests = image.get("RepoDigests")
|
||||
if (tags and lookup in tags) or (digests and lookup_digest in digests):
|
||||
images = [image]
|
||||
break
|
||||
return images
|
||||
|
||||
def find_image(self, name: str, tag: str | None) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by name and tag) and return the inspection results.
|
||||
"""
|
||||
if not name:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {name}:{tag}")
|
||||
images = self._image_lookup(name, tag)
|
||||
if not images:
|
||||
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||
registry, repo_name = auth.resolve_repository_name(name)
|
||||
if registry == "docker.io":
|
||||
# If docker.io is explicitly there in name, the image
|
||||
# is not found in some cases (#41509)
|
||||
self.log(f"Check for docker.io image: {repo_name}")
|
||||
images = self._image_lookup(repo_name, tag)
|
||||
if not images and repo_name.startswith("library/"):
|
||||
# Sometimes library/xxx images are not found
|
||||
lookup = repo_name[len("library/") :]
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images:
|
||||
# Last case for some Docker versions: if docker.io was not there,
|
||||
# it can be that the image was not found either
|
||||
# (https://github.com/ansible/ansible/pull/15586)
|
||||
lookup = f"{registry}/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images and "/" not in repo_name:
|
||||
# This seems to be happening with podman-docker
|
||||
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||
lookup = f"{registry}/library/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
|
||||
if len(images) > 1:
|
||||
self.fail(f"Daemon returned more than one result for {name}:{tag}")
|
||||
|
||||
if len(images) == 1:
|
||||
try:
|
||||
return self.get_json("/images/{0}/json", images[0]["Id"])
|
||||
except NotFound:
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting image {name}:{tag} - {exc}")
|
||||
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
|
||||
def find_image_by_id(
|
||||
self, image_id: str, accept_missing_image: bool = False
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by ID) and return the inspection results.
|
||||
"""
|
||||
if not image_id:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {image_id} (by ID)")
|
||||
try:
|
||||
return self.get_json("/images/{0}/json", image_id)
|
||||
except NotFound as exc:
|
||||
if not accept_missing_image:
|
||||
self.fail(f"Error inspecting image ID {image_id} - {exc}")
|
||||
self.log(f"Image {image_id} not found.")
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting image ID {image_id} - {exc}")
|
||||
|
||||
@staticmethod
|
||||
def _compare_images(
|
||||
img1: dict[str, t.Any] | None, img2: dict[str, t.Any] | None
|
||||
) -> bool:
|
||||
if img1 is None or img2 is None:
|
||||
return img1 == img2
|
||||
filter_keys = {"Metadata"}
|
||||
img1_filtered = {k: v for k, v in img1.items() if k not in filter_keys}
|
||||
img2_filtered = {k: v for k, v in img2.items() if k not in filter_keys}
|
||||
return img1_filtered == img2_filtered
|
||||
|
||||
def pull_image(
|
||||
self, name: str, tag: str = "latest", image_platform: str | None = None
|
||||
) -> tuple[dict[str, t.Any] | None, bool]:
|
||||
"""
|
||||
Pull an image
|
||||
"""
|
||||
self.log(f"Pulling image {name}:{tag}")
|
||||
old_image = self.find_image(name, tag)
|
||||
try:
|
||||
repository, image_tag = parse_repository_tag(name)
|
||||
registry, dummy_repo_name = auth.resolve_repository_name(repository)
|
||||
params = {
|
||||
"tag": tag or image_tag or "latest",
|
||||
"fromImage": repository,
|
||||
}
|
||||
if image_platform is not None:
|
||||
params["platform"] = image_platform
|
||||
|
||||
headers = {}
|
||||
header = auth.get_config_header(self, registry)
|
||||
if header:
|
||||
headers["X-Registry-Auth"] = header
|
||||
|
||||
response = self._post(
|
||||
self._url("/images/create"),
|
||||
params=params,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
timeout=None,
|
||||
)
|
||||
self._raise_for_status(response)
|
||||
for line in self._stream_helper(response, decode=True):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get("error"):
|
||||
if line.get("errorDetail"):
|
||||
error_detail = line.get("errorDetail")
|
||||
self.fail(
|
||||
f"Error pulling {name} - code: {error_detail.get('code')} message: {error_detail.get('message')}"
|
||||
)
|
||||
else:
|
||||
self.fail(f"Error pulling {name} - {line.get('error')}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error pulling image {name}:{tag} - {exc}")
|
||||
|
||||
new_image = self.find_image(name, tag)
|
||||
|
||||
return new_image, self._compare_images(old_image, new_image)
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(
|
||||
self,
|
||||
argument_spec: dict[str, t.Any] | None = None,
|
||||
supports_check_mode: bool = False,
|
||||
mutually_exclusive: Sequence[Sequence[str]] | None = None,
|
||||
required_together: Sequence[Sequence[str]] | None = None,
|
||||
required_if: (
|
||||
Sequence[
|
||||
tuple[str, t.Any, Sequence[str]]
|
||||
| tuple[str, t.Any, Sequence[str], bool]
|
||||
]
|
||||
| None
|
||||
) = None,
|
||||
required_one_of: Sequence[Sequence[str]] | None = None,
|
||||
required_by: dict[str, Sequence[str]] | None = None,
|
||||
min_docker_api_version: str | None = None,
|
||||
option_minimal_versions: dict[str, t.Any] | None = None,
|
||||
option_minimal_versions_ignore_params: Sequence[str] | None = None,
|
||||
fail_results: dict[str, t.Any] | None = None,
|
||||
):
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = {}
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params: list[Sequence[str]] = []
|
||||
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params: list[Sequence[str]] = []
|
||||
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
required_by=required_by or {},
|
||||
)
|
||||
|
||||
self.debug = self.module.params.get("debug")
|
||||
self.check_mode = self.module.check_mode
|
||||
|
||||
super().__init__(min_docker_api_version=min_docker_api_version)
|
||||
|
||||
if option_minimal_versions is not None:
|
||||
self._get_minimal_versions(
|
||||
option_minimal_versions, option_minimal_versions_ignore_params
|
||||
)
|
||||
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
self.module.deprecate(
|
||||
msg, version=version, date=date, collection_name=collection_name
|
||||
)
|
||||
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
return self.module.params
|
||||
|
||||
def _get_minimal_versions(
|
||||
self,
|
||||
option_minimal_versions: dict[str, t.Any],
|
||||
ignore_params: Sequence[str] | None = None,
|
||||
) -> None:
|
||||
self.option_minimal_versions: dict[str, dict[str, t.Any]] = {}
|
||||
for option in self.module.argument_spec:
|
||||
if ignore_params is not None and option in ignore_params:
|
||||
continue
|
||||
self.option_minimal_versions[option] = {}
|
||||
self.option_minimal_versions.update(option_minimal_versions)
|
||||
|
||||
for option, data in self.option_minimal_versions.items():
|
||||
# Test whether option is supported, and store result
|
||||
support_docker_api = True
|
||||
if "docker_api_version" in data:
|
||||
support_docker_api = self.docker_api_version >= LooseVersion(
|
||||
data["docker_api_version"]
|
||||
)
|
||||
data["supported"] = support_docker_api
|
||||
# Fail if option is not supported but used
|
||||
if not data["supported"]:
|
||||
# Test whether option is specified
|
||||
if "detect_usage" in data:
|
||||
used = data["detect_usage"](self)
|
||||
else:
|
||||
used = self.module.params.get(option) is not None
|
||||
if used and "default" in self.module.argument_spec[option]:
|
||||
used = (
|
||||
self.module.params[option]
|
||||
!= self.module.argument_spec[option]["default"]
|
||||
)
|
||||
if used:
|
||||
# If the option is used, compose error message.
|
||||
if "usage_msg" in data:
|
||||
usg = data["usage_msg"]
|
||||
else:
|
||||
usg = f"set {option} option"
|
||||
if not support_docker_api:
|
||||
msg = f"Docker API version is {self.docker_api_version_str}. Minimum version required is {data['docker_api_version']} to {usg}."
|
||||
else:
|
||||
# should not happen
|
||||
msg = f"Cannot {usg} with your configuration."
|
||||
self.fail(msg)
|
||||
|
||||
def report_warnings(
|
||||
self, result: t.Any, warnings_key: Sequence[str] | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Checks result of client operation for warnings, and if present, outputs them.
|
||||
|
||||
warnings_key should be a list of keys used to crawl the result dictionary.
|
||||
For example, if warnings_key == ['a', 'b'], the function will consider
|
||||
result['a']['b'] if these keys exist. If the result is a non-empty string, it
|
||||
will be reported as a warning. If the result is a list, every entry will be
|
||||
reported as a warning.
|
||||
|
||||
In most cases (if warnings are returned at all), warnings_key should be
|
||||
['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
|
||||
"""
|
||||
if warnings_key is None:
|
||||
warnings_key = ["Warnings"]
|
||||
for key in warnings_key:
|
||||
if not isinstance(result, Mapping):
|
||||
return
|
||||
result = result.get(key)
|
||||
if isinstance(result, Sequence):
|
||||
for warning in result:
|
||||
self.module.warn(f"Docker warning: {warning}")
|
||||
elif isinstance(result, str) and result:
|
||||
self.module.warn(f"Docker warning: {result}")
|
||||
|
|
@ -0,0 +1,489 @@
|
|||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import json
|
||||
import shlex
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, env_fallback
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.auth import (
|
||||
resolve_repository_name,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DEFAULT_DOCKER_HOST,
|
||||
DEFAULT_TLS,
|
||||
DEFAULT_TLS_VERIFY,
|
||||
DOCKER_MUTUALLY_EXCLUSIVE,
|
||||
DOCKER_REQUIRED_TOGETHER,
|
||||
sanitize_result,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Mapping, Sequence
|
||||
|
||||
|
||||
DOCKER_COMMON_ARGS = {
|
||||
"docker_cli": {"type": "path"},
|
||||
"docker_host": {
|
||||
"type": "str",
|
||||
"fallback": (env_fallback, ["DOCKER_HOST"]),
|
||||
"aliases": ["docker_url"],
|
||||
},
|
||||
"tls_hostname": {
|
||||
"type": "str",
|
||||
"fallback": (env_fallback, ["DOCKER_TLS_HOSTNAME"]),
|
||||
},
|
||||
"api_version": {
|
||||
"type": "str",
|
||||
"default": "auto",
|
||||
"fallback": (env_fallback, ["DOCKER_API_VERSION"]),
|
||||
"aliases": ["docker_api_version"],
|
||||
},
|
||||
"ca_path": {"type": "path", "aliases": ["ca_cert", "tls_ca_cert", "cacert_path"]},
|
||||
"client_cert": {"type": "path", "aliases": ["tls_client_cert", "cert_path"]},
|
||||
"client_key": {"type": "path", "aliases": ["tls_client_key", "key_path"]},
|
||||
"tls": {
|
||||
"type": "bool",
|
||||
"default": DEFAULT_TLS,
|
||||
"fallback": (env_fallback, ["DOCKER_TLS"]),
|
||||
},
|
||||
"validate_certs": {
|
||||
"type": "bool",
|
||||
"default": DEFAULT_TLS_VERIFY,
|
||||
"fallback": (env_fallback, ["DOCKER_TLS_VERIFY"]),
|
||||
"aliases": ["tls_verify"],
|
||||
},
|
||||
# "debug": {"type": "bool", "default: False},
|
||||
"cli_context": {"type": "str"},
|
||||
}
|
||||
|
||||
|
||||
class DockerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleDockerClientBase:
|
||||
docker_api_version_str: str | None
|
||||
docker_api_version: LooseVersion | None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
common_args: dict[str, t.Any],
|
||||
min_docker_api_version: str | None = None,
|
||||
needs_api_version: bool = True,
|
||||
) -> None:
|
||||
self._environment: dict[str, str] = {}
|
||||
if common_args["tls_hostname"]:
|
||||
self._environment["DOCKER_TLS_HOSTNAME"] = common_args["tls_hostname"]
|
||||
if common_args["api_version"] and common_args["api_version"] != "auto":
|
||||
self._environment["DOCKER_API_VERSION"] = common_args["api_version"]
|
||||
cli = common_args.get("docker_cli")
|
||||
if cli is None:
|
||||
try:
|
||||
cli = get_bin_path("docker")
|
||||
except ValueError:
|
||||
self.fail(
|
||||
"Cannot find docker CLI in path. Please provide it explicitly with the docker_cli parameter"
|
||||
)
|
||||
self._cli = cli
|
||||
self._cli_base = [self._cli]
|
||||
docker_host = common_args["docker_host"]
|
||||
if not docker_host and not common_args["cli_context"]:
|
||||
docker_host = DEFAULT_DOCKER_HOST
|
||||
if docker_host:
|
||||
self._cli_base.extend(["--host", docker_host])
|
||||
if common_args["validate_certs"]:
|
||||
self._cli_base.append("--tlsverify")
|
||||
elif common_args["tls"]:
|
||||
self._cli_base.append("--tls")
|
||||
if common_args["ca_path"]:
|
||||
self._cli_base.extend(["--tlscacert", common_args["ca_path"]])
|
||||
if common_args["client_cert"]:
|
||||
self._cli_base.extend(["--tlscert", common_args["client_cert"]])
|
||||
if common_args["client_key"]:
|
||||
self._cli_base.extend(["--tlskey", common_args["client_key"]])
|
||||
if common_args["cli_context"]:
|
||||
self._cli_base.extend(["--context", common_args["cli_context"]])
|
||||
|
||||
# `--format json` was only added as a shorthand for `--format {{ json . }}` in Docker 23.0
|
||||
dummy, self._version, dummy2 = self.call_cli_json(
|
||||
"version", "--format", "{{ json . }}", check_rc=True
|
||||
)
|
||||
self._info: dict[str, t.Any] | None = None
|
||||
|
||||
if needs_api_version:
|
||||
api_version_string = self._version["Server"].get(
|
||||
"ApiVersion"
|
||||
) or self._version["Server"].get("APIVersion")
|
||||
if not isinstance(self._version.get("Server"), dict) or not isinstance(
|
||||
api_version_string, str
|
||||
):
|
||||
self.fail(
|
||||
"Cannot determine Docker Daemon information. Are you maybe using podman instead of docker?"
|
||||
)
|
||||
self.docker_api_version_str = to_text(api_version_string)
|
||||
self.docker_api_version = LooseVersion(self.docker_api_version_str)
|
||||
min_docker_api_version = min_docker_api_version or "1.25"
|
||||
if self.docker_api_version < LooseVersion(min_docker_api_version):
|
||||
self.fail(
|
||||
f"Docker API version is {self.docker_api_version_str}. Minimum version required is {min_docker_api_version}."
|
||||
)
|
||||
else:
|
||||
self.docker_api_version_str = None
|
||||
self.docker_api_version = None
|
||||
if min_docker_api_version is not None:
|
||||
self.fail(
|
||||
"Internal error: cannot have needs_api_version=False with min_docker_api_version not None"
|
||||
)
|
||||
|
||||
def log(self, msg: str, pretty_print: bool = False) -> None:
|
||||
pass
|
||||
# if self.debug:
|
||||
# from .util import log_debug
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
def get_cli(self) -> str:
|
||||
return self._cli
|
||||
|
||||
def get_version_info(self) -> str:
|
||||
return self._version
|
||||
|
||||
def _compose_cmd(self, args: t.Sequence[str]) -> list[str]:
|
||||
return self._cli_base + list(args)
|
||||
|
||||
def _compose_cmd_str(self, args: t.Sequence[str]) -> str:
|
||||
return " ".join(shlex.quote(a) for a in self._compose_cmd(args))
|
||||
|
||||
@abc.abstractmethod
|
||||
def call_cli(
|
||||
self,
|
||||
*args: str,
|
||||
check_rc: bool = False,
|
||||
data: bytes | None = None,
|
||||
cwd: str | None = None,
|
||||
environ_update: dict[str, str] | None = None,
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
pass
|
||||
|
||||
def call_cli_json(
|
||||
self,
|
||||
*args: str,
|
||||
check_rc: bool = False,
|
||||
data: bytes | None = None,
|
||||
cwd: str | None = None,
|
||||
environ_update: dict[str, str] | None = None,
|
||||
warn_on_stderr: bool = False,
|
||||
) -> tuple[int, t.Any, bytes]:
|
||||
rc, stdout, stderr = self.call_cli(
|
||||
*args, check_rc=check_rc, data=data, cwd=cwd, environ_update=environ_update
|
||||
)
|
||||
if warn_on_stderr and stderr:
|
||||
self.warn(to_text(stderr))
|
||||
try:
|
||||
data = json.loads(stdout)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(
|
||||
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
|
||||
cmd=self._compose_cmd_str(args),
|
||||
rc=rc,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
)
|
||||
return rc, data, stderr
|
||||
|
||||
def call_cli_json_stream(
|
||||
self,
|
||||
*args: str,
|
||||
check_rc: bool = False,
|
||||
data: bytes | None = None,
|
||||
cwd: str | None = None,
|
||||
environ_update: dict[str, str] | None = None,
|
||||
warn_on_stderr: bool = False,
|
||||
) -> tuple[int, list[t.Any], bytes]:
|
||||
rc, stdout, stderr = self.call_cli(
|
||||
*args, check_rc=check_rc, data=data, cwd=cwd, environ_update=environ_update
|
||||
)
|
||||
if warn_on_stderr and stderr:
|
||||
self.warn(to_text(stderr))
|
||||
result = []
|
||||
try:
|
||||
for line in stdout.splitlines():
|
||||
line = line.strip()
|
||||
if line.startswith(b"{"):
|
||||
result.append(json.loads(line))
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(
|
||||
f"Error while parsing JSON output of {self._compose_cmd_str(args)}: {exc}\nJSON output: {to_text(stdout)}\n\nError output:\n{to_text(stderr)}",
|
||||
cmd=self._compose_cmd_str(args),
|
||||
rc=rc,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
)
|
||||
return rc, result, stderr
|
||||
|
||||
@abc.abstractmethod
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def warn(self, msg: str) -> None:
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def get_cli_info(self) -> dict[str, t.Any]:
|
||||
if self._info is None:
|
||||
dummy, self._info, dummy2 = self.call_cli_json(
|
||||
"info", "--format", "{{ json . }}", check_rc=True
|
||||
)
|
||||
return self._info
|
||||
|
||||
def get_client_plugin_info(self, component: str) -> dict[str, t.Any] | None:
|
||||
cli_info = self.get_cli_info()
|
||||
if not isinstance(cli_info.get("ClientInfo"), dict):
|
||||
self.fail(
|
||||
"Cannot determine Docker client information. Are you maybe using podman instead of docker?"
|
||||
)
|
||||
for plugin in cli_info["ClientInfo"].get("Plugins") or []:
|
||||
if plugin.get("Name") == component:
|
||||
return plugin
|
||||
return None
|
||||
|
||||
def _image_lookup(self, name: str, tag: str) -> list[dict[str, t.Any]]:
|
||||
"""
|
||||
Including a tag in the name parameter sent to the Docker SDK for Python images method
|
||||
does not work consistently. Instead, get the result set for name and manually check
|
||||
if the tag exists.
|
||||
"""
|
||||
dummy, images, dummy2 = self.call_cli_json_stream(
|
||||
"image",
|
||||
"ls",
|
||||
"--format",
|
||||
"{{ json . }}",
|
||||
"--no-trunc",
|
||||
"--filter",
|
||||
f"reference={name}",
|
||||
check_rc=True,
|
||||
)
|
||||
if tag:
|
||||
response = images
|
||||
images = []
|
||||
for image in response:
|
||||
if image.get("Tag") == tag or image.get("Digest") == tag:
|
||||
images = [image]
|
||||
break
|
||||
return images
|
||||
|
||||
@t.overload
|
||||
def find_image(self, name: None, tag: str) -> None: ...
|
||||
|
||||
@t.overload
|
||||
def find_image(self, name: str, tag: str) -> dict[str, t.Any] | None: ...
|
||||
|
||||
def find_image(self, name: str | None, tag: str) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by name and tag) and return the inspection results.
|
||||
"""
|
||||
if not name:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {name}:{tag}")
|
||||
images = self._image_lookup(name, tag)
|
||||
if not images:
|
||||
# In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
|
||||
registry, repo_name = resolve_repository_name(name)
|
||||
if registry == "docker.io":
|
||||
# If docker.io is explicitly there in name, the image
|
||||
# is not found in some cases (#41509)
|
||||
self.log(f"Check for docker.io image: {repo_name}")
|
||||
images = self._image_lookup(repo_name, tag)
|
||||
if not images and repo_name.startswith("library/"):
|
||||
# Sometimes library/xxx images are not found
|
||||
lookup = repo_name[len("library/") :]
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images:
|
||||
# Last case for some Docker versions: if docker.io was not there,
|
||||
# it can be that the image was not found either
|
||||
# (https://github.com/ansible/ansible/pull/15586)
|
||||
lookup = f"{registry}/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
if not images and "/" not in repo_name:
|
||||
# This seems to be happening with podman-docker
|
||||
# (https://github.com/ansible-collections/community.docker/issues/291)
|
||||
lookup = f"{registry}/library/{repo_name}"
|
||||
self.log(f"Check for docker.io image: {lookup}")
|
||||
images = self._image_lookup(lookup, tag)
|
||||
|
||||
if len(images) > 1:
|
||||
self.fail(f"Daemon returned more than one result for {name}:{tag}")
|
||||
|
||||
if len(images) == 1:
|
||||
rc, image, stderr = self.call_cli_json("image", "inspect", images[0]["ID"])
|
||||
if not image:
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
if rc != 0:
|
||||
self.fail(f"Error inspecting image {name}:{tag} - {to_text(stderr)}")
|
||||
return image[0]
|
||||
|
||||
self.log(f"Image {name}:{tag} not found.")
|
||||
return None
|
||||
|
||||
@t.overload
|
||||
def find_image_by_id(
|
||||
self, image_id: None, accept_missing_image: bool = False
|
||||
) -> None: ...
|
||||
|
||||
@t.overload
|
||||
def find_image_by_id(
|
||||
self, image_id: str | None, accept_missing_image: bool = False
|
||||
) -> dict[str, t.Any] | None: ...
|
||||
|
||||
def find_image_by_id(
|
||||
self, image_id: str | None, accept_missing_image: bool = False
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Lookup an image (by ID) and return the inspection results.
|
||||
"""
|
||||
if not image_id:
|
||||
return None
|
||||
|
||||
self.log(f"Find image {image_id} (by ID)")
|
||||
rc, image, stderr = self.call_cli_json("image", "inspect", image_id)
|
||||
if not image:
|
||||
if not accept_missing_image:
|
||||
self.fail(f"Error inspecting image ID {image_id} - {to_text(stderr)}")
|
||||
self.log(f"Image {image_id} not found.")
|
||||
return None
|
||||
if rc != 0:
|
||||
self.fail(f"Error inspecting image ID {image_id} - {to_text(stderr)}")
|
||||
return image[0]
|
||||
|
||||
|
||||
class AnsibleModuleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(
|
||||
self,
|
||||
argument_spec: dict[str, t.Any] | None = None,
|
||||
supports_check_mode: bool = False,
|
||||
mutually_exclusive: Sequence[Sequence[str]] | None = None,
|
||||
required_together: Sequence[Sequence[str]] | None = None,
|
||||
required_if: (
|
||||
Sequence[
|
||||
tuple[str, t.Any, Sequence[str]]
|
||||
| tuple[str, t.Any, Sequence[str], bool]
|
||||
]
|
||||
| None
|
||||
) = None,
|
||||
required_one_of: Sequence[Sequence[str]] | None = None,
|
||||
required_by: Mapping[str, Sequence[str]] | None = None,
|
||||
min_docker_api_version: str | None = None,
|
||||
fail_results: dict[str, t.Any] | None = None,
|
||||
needs_api_version: bool = True,
|
||||
) -> None:
|
||||
# Modules can put information in here which will always be returned
|
||||
# in case client.fail() is called.
|
||||
self.fail_results = fail_results or {}
|
||||
|
||||
merged_arg_spec = {}
|
||||
merged_arg_spec.update(DOCKER_COMMON_ARGS)
|
||||
if argument_spec:
|
||||
merged_arg_spec.update(argument_spec)
|
||||
self.arg_spec = merged_arg_spec
|
||||
|
||||
mutually_exclusive_params: list[Sequence[str]] = [
|
||||
("docker_host", "cli_context")
|
||||
]
|
||||
mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
|
||||
if mutually_exclusive:
|
||||
mutually_exclusive_params += mutually_exclusive
|
||||
|
||||
required_together_params: list[Sequence[str]] = []
|
||||
required_together_params += DOCKER_REQUIRED_TOGETHER
|
||||
if required_together:
|
||||
required_together_params += required_together
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=merged_arg_spec,
|
||||
supports_check_mode=supports_check_mode,
|
||||
mutually_exclusive=mutually_exclusive_params,
|
||||
required_together=required_together_params,
|
||||
required_if=required_if,
|
||||
required_one_of=required_one_of,
|
||||
required_by=required_by or {},
|
||||
)
|
||||
|
||||
self.debug = False # self.module.params['debug']
|
||||
self.check_mode = self.module.check_mode
|
||||
self.diff = self.module._diff
|
||||
|
||||
common_args = dict((k, self.module.params[k]) for k in DOCKER_COMMON_ARGS)
|
||||
super().__init__(
|
||||
common_args,
|
||||
min_docker_api_version=min_docker_api_version,
|
||||
needs_api_version=needs_api_version,
|
||||
)
|
||||
|
||||
def call_cli(
|
||||
self,
|
||||
*args: str,
|
||||
check_rc: bool = False,
|
||||
data: bytes | None = None,
|
||||
cwd: str | None = None,
|
||||
environ_update: dict[str, str] | None = None,
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
environment = self._environment.copy()
|
||||
if environ_update:
|
||||
environment.update(environ_update)
|
||||
rc, stdout, stderr = self.module.run_command(
|
||||
self._compose_cmd(args),
|
||||
binary_data=True,
|
||||
check_rc=check_rc,
|
||||
cwd=cwd,
|
||||
data=data,
|
||||
encoding=None,
|
||||
environ_update=environment,
|
||||
expand_user_and_vars=False,
|
||||
ignore_invalid_cwd=False,
|
||||
)
|
||||
return rc, stdout, stderr
|
||||
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
self.fail_results.update(kwargs)
|
||||
self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
|
||||
|
||||
def warn(self, msg: str) -> None:
|
||||
self.module.warn(msg)
|
||||
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
self.module.deprecate(
|
||||
msg, version=version, date=date, collection_name=collection_name
|
||||
)
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,590 @@
|
|||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import os.path
|
||||
import shutil
|
||||
import stat
|
||||
import tarfile
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
NotFound,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
from _typeshed import WriteableBuffer
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.api.client import (
|
||||
APIClient,
|
||||
)
|
||||
|
||||
|
||||
class DockerFileCopyError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DockerUnexpectedError(DockerFileCopyError):
|
||||
pass
|
||||
|
||||
|
||||
class DockerFileNotFound(DockerFileCopyError):
|
||||
pass
|
||||
|
||||
|
||||
def _put_archive(
|
||||
client: APIClient, container: str, path: str, data: bytes | t.Generator[bytes]
|
||||
) -> bool:
|
||||
# data can also be file object for streaming. This is because _put uses requests's put().
|
||||
# See https://requests.readthedocs.io/en/latest/user/advanced/#streaming-uploads
|
||||
url = client._url("/containers/{0}/archive", container)
|
||||
res = client._put(url, params={"path": path}, data=data)
|
||||
client._raise_for_status(res)
|
||||
return res.status_code == 200
|
||||
|
||||
|
||||
def _symlink_tar_creator(
|
||||
b_in_path: bytes,
|
||||
file_stat: os.stat_result,
|
||||
out_file: str | bytes,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int | None = None,
|
||||
user_name: str | None = None,
|
||||
) -> bytes:
|
||||
if not stat.S_ISLNK(file_stat.st_mode):
|
||||
raise DockerUnexpectedError("stat information is not for a symlink")
|
||||
bio = io.BytesIO()
|
||||
with tarfile.open(
|
||||
fileobj=bio, mode="w|", dereference=False, encoding="utf-8"
|
||||
) as tar:
|
||||
# Note that without both name (bytes) and arcname (unicode), this either fails for
|
||||
# Python 2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
|
||||
# form) it works with Python 2.7, 3.5, 3.6, and 3.7 up to 3.11
|
||||
tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.uname = ""
|
||||
if user_name:
|
||||
tarinfo.uname = user_name
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.gname = ""
|
||||
tarinfo.mode &= 0o700
|
||||
if mode is not None:
|
||||
tarinfo.mode = mode
|
||||
if not tarinfo.issym():
|
||||
raise DockerUnexpectedError("stat information is not for a symlink")
|
||||
tar.addfile(tarinfo)
|
||||
return bio.getvalue()
|
||||
|
||||
|
||||
def _symlink_tar_generator(
|
||||
b_in_path: bytes,
|
||||
file_stat: os.stat_result,
|
||||
out_file: str | bytes,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int | None = None,
|
||||
user_name: str | None = None,
|
||||
) -> t.Generator[bytes]:
|
||||
yield _symlink_tar_creator(
|
||||
b_in_path, file_stat, out_file, user_id, group_id, mode, user_name
|
||||
)
|
||||
|
||||
|
||||
def _regular_file_tar_generator(
|
||||
b_in_path: bytes,
|
||||
file_stat: os.stat_result,
|
||||
out_file: str | bytes,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int | None = None,
|
||||
user_name: str | None = None,
|
||||
) -> t.Generator[bytes]:
|
||||
if not stat.S_ISREG(file_stat.st_mode):
|
||||
raise DockerUnexpectedError("stat information is not for a regular file")
|
||||
tarinfo = tarfile.TarInfo()
|
||||
tarinfo.name = (
|
||||
os.path.splitdrive(to_text(out_file))[1].replace(os.sep, "/").lstrip("/")
|
||||
)
|
||||
tarinfo.mode = (file_stat.st_mode & 0o700) if mode is None else mode
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.size = file_stat.st_size
|
||||
tarinfo.mtime = file_stat.st_mtime
|
||||
tarinfo.type = tarfile.REGTYPE
|
||||
tarinfo.linkname = ""
|
||||
if user_name:
|
||||
tarinfo.uname = user_name
|
||||
|
||||
tarinfo_buf = tarinfo.tobuf()
|
||||
total_size = len(tarinfo_buf)
|
||||
yield tarinfo_buf
|
||||
|
||||
size = tarinfo.size
|
||||
total_size += size
|
||||
with open(b_in_path, "rb") as f:
|
||||
while size > 0:
|
||||
to_read = min(size, 65536)
|
||||
buf = f.read(to_read)
|
||||
if not buf:
|
||||
break
|
||||
size -= len(buf)
|
||||
yield buf
|
||||
if size:
|
||||
# If for some reason the file shrunk, fill up to the announced size with zeros.
|
||||
# (If it enlarged, ignore the remainder.)
|
||||
yield tarfile.NUL * size
|
||||
|
||||
remainder = tarinfo.size % tarfile.BLOCKSIZE
|
||||
if remainder:
|
||||
# We need to write a multiple of 512 bytes. Fill up with zeros.
|
||||
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
|
||||
total_size += tarfile.BLOCKSIZE - remainder
|
||||
|
||||
# End with two zeroed blocks
|
||||
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
|
||||
total_size += 2 * tarfile.BLOCKSIZE
|
||||
|
||||
remainder = total_size % tarfile.RECORDSIZE
|
||||
if remainder > 0:
|
||||
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
|
||||
|
||||
|
||||
def _regular_content_tar_generator(
|
||||
content: bytes,
|
||||
out_file: str | bytes,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int,
|
||||
user_name: str | None = None,
|
||||
) -> t.Generator[bytes]:
|
||||
tarinfo = tarfile.TarInfo()
|
||||
tarinfo.name = (
|
||||
os.path.splitdrive(to_text(out_file))[1].replace(os.sep, "/").lstrip("/")
|
||||
)
|
||||
tarinfo.mode = mode
|
||||
tarinfo.uid = user_id
|
||||
tarinfo.gid = group_id
|
||||
tarinfo.size = len(content)
|
||||
tarinfo.mtime = int(datetime.datetime.now().timestamp())
|
||||
tarinfo.type = tarfile.REGTYPE
|
||||
tarinfo.linkname = ""
|
||||
if user_name:
|
||||
tarinfo.uname = user_name
|
||||
|
||||
tarinfo_buf = tarinfo.tobuf()
|
||||
total_size = len(tarinfo_buf)
|
||||
yield tarinfo_buf
|
||||
|
||||
total_size += len(content)
|
||||
yield content
|
||||
|
||||
remainder = tarinfo.size % tarfile.BLOCKSIZE
|
||||
if remainder:
|
||||
# We need to write a multiple of 512 bytes. Fill up with zeros.
|
||||
yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
|
||||
total_size += tarfile.BLOCKSIZE - remainder
|
||||
|
||||
# End with two zeroed blocks
|
||||
yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
|
||||
total_size += 2 * tarfile.BLOCKSIZE
|
||||
|
||||
remainder = total_size % tarfile.RECORDSIZE
|
||||
if remainder > 0:
|
||||
yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
|
||||
|
||||
|
||||
def put_file(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
in_path: str,
|
||||
out_path: str,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int | None = None,
|
||||
user_name: str | None = None,
|
||||
follow_links: bool = False,
|
||||
) -> None:
|
||||
"""Transfer a file from local to Docker container."""
|
||||
if not os.path.exists(to_bytes(in_path, errors="surrogate_or_strict")):
|
||||
raise DockerFileNotFound(f"file or module does not exist: {to_text(in_path)}")
|
||||
|
||||
b_in_path = to_bytes(in_path, errors="surrogate_or_strict")
|
||||
|
||||
out_dir, out_file = os.path.split(out_path)
|
||||
|
||||
if follow_links:
|
||||
file_stat = os.stat(b_in_path)
|
||||
else:
|
||||
file_stat = os.lstat(b_in_path)
|
||||
|
||||
if stat.S_ISREG(file_stat.st_mode):
|
||||
stream = _regular_file_tar_generator(
|
||||
b_in_path,
|
||||
file_stat,
|
||||
out_file,
|
||||
user_id,
|
||||
group_id,
|
||||
mode=mode,
|
||||
user_name=user_name,
|
||||
)
|
||||
elif stat.S_ISLNK(file_stat.st_mode):
|
||||
stream = _symlink_tar_generator(
|
||||
b_in_path,
|
||||
file_stat,
|
||||
out_file,
|
||||
user_id,
|
||||
group_id,
|
||||
mode=mode,
|
||||
user_name=user_name,
|
||||
)
|
||||
else:
|
||||
file_part = " referenced by" if follow_links else ""
|
||||
raise DockerFileCopyError(
|
||||
f"File{file_part} {in_path} is neither a regular file nor a symlink (stat mode {oct(file_stat.st_mode)})."
|
||||
)
|
||||
|
||||
ok = _put_archive(client, container, out_dir, stream)
|
||||
if not ok:
|
||||
raise DockerUnexpectedError(
|
||||
f'Unknown error while creating file "{out_path}" in container "{container}".'
|
||||
)
|
||||
|
||||
|
||||
def put_file_content(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
content: bytes,
|
||||
out_path: str,
|
||||
user_id: int,
|
||||
group_id: int,
|
||||
mode: int,
|
||||
user_name: str | None = None,
|
||||
) -> None:
|
||||
"""Transfer a file from local to Docker container."""
|
||||
out_dir, out_file = os.path.split(out_path)
|
||||
|
||||
stream = _regular_content_tar_generator(
|
||||
content, out_file, user_id, group_id, mode, user_name=user_name
|
||||
)
|
||||
|
||||
ok = _put_archive(client, container, out_dir, stream)
|
||||
if not ok:
|
||||
raise DockerUnexpectedError(
|
||||
f'Unknown error while creating file "{out_path}" in container "{container}".'
|
||||
)
|
||||
|
||||
|
||||
def stat_file(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
in_path: str,
|
||||
follow_links: bool = False,
|
||||
log: Callable[[str], None] | None = None,
|
||||
) -> tuple[str, dict[str, t.Any] | None, str | None]:
|
||||
"""Fetch information on a file from a Docker container to local.
|
||||
|
||||
Return a tuple ``(path, stat_data, link_target)`` where:
|
||||
|
||||
:path: is the resolved path in case ``follow_links=True``;
|
||||
:stat_data: is ``None`` if the file does not exist, or a dictionary with fields
|
||||
``name`` (string), ``size`` (integer), ``mode`` (integer, see https://pkg.go.dev/io/fs#FileMode),
|
||||
``mtime`` (string), and ``linkTarget`` (string);
|
||||
:link_target: is ``None`` if the file is not a symlink or when ``follow_links=False``,
|
||||
and a string with the symlink target otherwise.
|
||||
"""
|
||||
considered_in_paths = set()
|
||||
|
||||
while True:
|
||||
if in_path in considered_in_paths:
|
||||
raise DockerFileCopyError(
|
||||
f"Found infinite symbolic link loop when trying to stating {in_path!r}"
|
||||
)
|
||||
considered_in_paths.add(in_path)
|
||||
|
||||
if log:
|
||||
log(f"FETCH: Stating {in_path!r}")
|
||||
|
||||
response = client._head(
|
||||
client._url("/containers/{0}/archive", container),
|
||||
params={"path": in_path},
|
||||
)
|
||||
if response.status_code == 404:
|
||||
return in_path, None, None
|
||||
client._raise_for_status(response)
|
||||
header = response.headers.get("x-docker-container-path-stat")
|
||||
try:
|
||||
if header is None:
|
||||
raise ValueError("x-docker-container-path-stat header not present")
|
||||
stat_data = json.loads(base64.b64decode(header))
|
||||
except Exception as exc:
|
||||
raise DockerUnexpectedError(
|
||||
f"When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}"
|
||||
) from exc
|
||||
|
||||
# https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink
|
||||
if stat_data["mode"] & (1 << (32 - 5)) != 0:
|
||||
link_target = stat_data["linkTarget"]
|
||||
if not follow_links:
|
||||
return in_path, stat_data, link_target
|
||||
in_path = os.path.join(os.path.split(in_path)[0], link_target)
|
||||
continue
|
||||
|
||||
return in_path, stat_data, None
|
||||
|
||||
|
||||
class _RawGeneratorFileobj(io.RawIOBase):
|
||||
def __init__(self, stream: t.Generator[bytes]):
|
||||
self._stream = stream
|
||||
self._buf = b""
|
||||
|
||||
def readable(self) -> bool:
|
||||
return True
|
||||
|
||||
def _readinto_from_buf(self, b: WriteableBuffer, index: int, length: int) -> int:
|
||||
cpy = min(length - index, len(self._buf))
|
||||
if cpy:
|
||||
b[index : index + cpy] = self._buf[:cpy] # type: ignore # TODO!
|
||||
self._buf = self._buf[cpy:]
|
||||
index += cpy
|
||||
return index
|
||||
|
||||
def readinto(self, b: WriteableBuffer) -> int:
|
||||
index = 0
|
||||
length = len(b) # type: ignore # TODO!
|
||||
|
||||
index = self._readinto_from_buf(b, index, length)
|
||||
if index == length:
|
||||
return index
|
||||
|
||||
try:
|
||||
self._buf += next(self._stream)
|
||||
except StopIteration:
|
||||
return index
|
||||
|
||||
return self._readinto_from_buf(b, index, length)
|
||||
|
||||
|
||||
def _stream_generator_to_fileobj(stream: t.Generator[bytes]) -> io.BufferedReader:
|
||||
"""Given a generator that generates chunks of bytes, create a readable buffered stream."""
|
||||
raw = _RawGeneratorFileobj(stream)
|
||||
return io.BufferedReader(raw)
|
||||
|
||||
|
||||
_T = t.TypeVar("_T")
|
||||
|
||||
|
||||
def fetch_file_ex(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
in_path: str,
|
||||
process_none: Callable[[str], _T],
|
||||
process_regular: Callable[[str, tarfile.TarFile, tarfile.TarInfo], _T],
|
||||
process_symlink: Callable[[str, tarfile.TarInfo], _T],
|
||||
process_other: Callable[[str, tarfile.TarInfo], _T],
|
||||
follow_links: bool = False,
|
||||
log: Callable[[str], None] | None = None,
|
||||
) -> _T:
|
||||
"""Fetch a file (as a tar file entry) from a Docker container to local."""
|
||||
considered_in_paths: set[str] = set()
|
||||
|
||||
while True:
|
||||
if in_path in considered_in_paths:
|
||||
raise DockerFileCopyError(
|
||||
f'Found infinite symbolic link loop when trying to fetch "{in_path}"'
|
||||
)
|
||||
considered_in_paths.add(in_path)
|
||||
|
||||
if log:
|
||||
log(f'FETCH: Fetching "{in_path}"')
|
||||
try:
|
||||
stream = client.get_raw_stream(
|
||||
"/containers/{0}/archive",
|
||||
container,
|
||||
params={"path": in_path},
|
||||
headers={"Accept-Encoding": "identity"},
|
||||
)
|
||||
except NotFound:
|
||||
return process_none(in_path)
|
||||
|
||||
with tarfile.open(
|
||||
fileobj=_stream_generator_to_fileobj(stream), mode="r|"
|
||||
) as tar:
|
||||
symlink_member: tarfile.TarInfo | None = None
|
||||
result: _T | None = None
|
||||
found = False
|
||||
for member in tar:
|
||||
if found:
|
||||
raise DockerUnexpectedError(
|
||||
"Received tarfile contains more than one file!"
|
||||
)
|
||||
found = True
|
||||
if member.issym():
|
||||
symlink_member = member
|
||||
continue
|
||||
if member.isfile():
|
||||
result = process_regular(in_path, tar, member)
|
||||
continue
|
||||
result = process_other(in_path, member)
|
||||
if symlink_member:
|
||||
if not follow_links:
|
||||
return process_symlink(in_path, symlink_member)
|
||||
in_path = os.path.join(
|
||||
os.path.split(in_path)[0], symlink_member.linkname
|
||||
)
|
||||
if log:
|
||||
log(f'FETCH: Following symbolic link to "{in_path}"')
|
||||
continue
|
||||
if found:
|
||||
return result # type: ignore
|
||||
raise DockerUnexpectedError("Received tarfile is empty!")
|
||||
|
||||
|
||||
def fetch_file(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
in_path: str,
|
||||
out_path: str,
|
||||
follow_links: bool = False,
|
||||
log: Callable[[str], None] | None = None,
|
||||
) -> str:
|
||||
b_out_path = to_bytes(out_path, errors="surrogate_or_strict")
|
||||
|
||||
def process_none(in_path: str) -> str:
|
||||
raise DockerFileNotFound(
|
||||
f"File {in_path} does not exist in container {container}"
|
||||
)
|
||||
|
||||
def process_regular(
|
||||
in_path: str, tar: tarfile.TarFile, member: tarfile.TarInfo
|
||||
) -> str:
|
||||
if not follow_links and os.path.exists(b_out_path):
|
||||
os.unlink(b_out_path)
|
||||
|
||||
reader = tar.extractfile(member)
|
||||
if reader:
|
||||
with reader as in_f, open(b_out_path, "wb") as out_f:
|
||||
shutil.copyfileobj(in_f, out_f)
|
||||
return in_path
|
||||
|
||||
def process_symlink(in_path: str, member: tarfile.TarInfo) -> str:
|
||||
if os.path.exists(b_out_path):
|
||||
os.unlink(b_out_path)
|
||||
|
||||
os.symlink(member.linkname, b_out_path)
|
||||
return in_path
|
||||
|
||||
def process_other(in_path: str, member: tarfile.TarInfo) -> str:
|
||||
raise DockerFileCopyError(
|
||||
f'Remote file "{in_path}" is not a regular file or a symbolic link'
|
||||
)
|
||||
|
||||
return fetch_file_ex(
|
||||
client,
|
||||
container,
|
||||
in_path,
|
||||
process_none,
|
||||
process_regular,
|
||||
process_symlink,
|
||||
process_other,
|
||||
follow_links=follow_links,
|
||||
log=log,
|
||||
)
|
||||
|
||||
|
||||
def _execute_command(
|
||||
client: APIClient,
|
||||
container: str,
|
||||
command: list[str],
|
||||
log: Callable[[str], None] | None = None,
|
||||
check_rc: bool = False,
|
||||
) -> tuple[int, bytes, bytes]:
|
||||
if log:
|
||||
log(f"Executing {command} in {container}")
|
||||
|
||||
data = {
|
||||
"Container": container,
|
||||
"User": "",
|
||||
"Privileged": False,
|
||||
"Tty": False,
|
||||
"AttachStdin": False,
|
||||
"AttachStdout": True,
|
||||
"AttachStderr": True,
|
||||
"Cmd": command,
|
||||
}
|
||||
|
||||
if "detachKeys" in client._general_configs:
|
||||
data["detachKeys"] = client._general_configs["detachKeys"]
|
||||
|
||||
try:
|
||||
exec_data = client.post_json_to_json(
|
||||
"/containers/{0}/exec", container, data=data
|
||||
)
|
||||
except NotFound as e:
|
||||
raise DockerFileCopyError(f'Could not find container "{container}"') from e
|
||||
except APIError as e:
|
||||
if e.response is not None and e.response.status_code == 409:
|
||||
raise DockerFileCopyError(
|
||||
f'Cannot execute command in paused container "{container}"'
|
||||
) from e
|
||||
raise
|
||||
exec_id = exec_data["Id"]
|
||||
|
||||
data = {"Tty": False, "Detach": False}
|
||||
stdout, stderr = client.post_json_to_stream(
|
||||
"/exec/{0}/start", exec_id, stream=False, demux=True, tty=False
|
||||
)
|
||||
|
||||
result = client.get_json("/exec/{0}/json", exec_id)
|
||||
|
||||
rc: int = result.get("ExitCode") or 0
|
||||
stdout = stdout or b""
|
||||
stderr = stderr or b""
|
||||
|
||||
if log:
|
||||
log(f"Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}")
|
||||
|
||||
if check_rc and rc != 0:
|
||||
command_str = " ".join(command)
|
||||
raise DockerUnexpectedError(
|
||||
f'Obtained unexpected exit code {rc} when running "{command_str}" in {container}.\nSTDOUT: {stdout!r}\nSTDERR: {stderr!r}'
|
||||
)
|
||||
|
||||
return rc, stdout, stderr
|
||||
|
||||
|
||||
def determine_user_group(
|
||||
client: APIClient, container: str, log: Callable[[str], None] | None = None
|
||||
) -> tuple[int, int]:
|
||||
dummy_rc, stdout, dummy_stderr = _execute_command(
|
||||
client, container, ["/bin/sh", "-c", "id -u && id -g"], check_rc=True, log=log
|
||||
)
|
||||
|
||||
stdout_lines = stdout.splitlines()
|
||||
if len(stdout_lines) != 2:
|
||||
raise DockerUnexpectedError(
|
||||
f"Expected two-line output to obtain user and group ID for container {container}, but got {len(stdout_lines)} lines:\n{stdout!r}"
|
||||
)
|
||||
|
||||
user_id, group_id = stdout_lines
|
||||
try:
|
||||
return int(user_id), int(group_id)
|
||||
except ValueError as exc:
|
||||
raise DockerUnexpectedError(
|
||||
f"Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got {user_id!r} and {group_id!r} instead"
|
||||
) from exc
|
||||
|
|
@ -0,0 +1,166 @@
|
|||
# Copyright 2022 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import tarfile
|
||||
|
||||
|
||||
class ImageArchiveManifestSummary:
|
||||
"""
|
||||
Represents data extracted from a manifest.json found in the tar archive output of the
|
||||
"docker image save some:tag > some.tar" command.
|
||||
"""
|
||||
|
||||
def __init__(self, image_id: str, repo_tags: list[str]) -> None:
|
||||
"""
|
||||
:param image_id: File name portion of Config entry, e.g. abcde12345 from abcde12345.json
|
||||
:param repo_tags Docker image names, e.g. ["hello-world:latest"]
|
||||
"""
|
||||
|
||||
self.image_id = image_id
|
||||
self.repo_tags = repo_tags
|
||||
|
||||
|
||||
class ImageArchiveInvalidException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def api_image_id(archive_image_id: str) -> str:
|
||||
"""
|
||||
Accepts an image hash in the format stored in manifest.json, and returns an equivalent identifier
|
||||
that represents the same image hash, but in the format presented by the Docker Engine API.
|
||||
|
||||
:param archive_image_id: plain image hash
|
||||
:returns: Prefixed hash used by REST api
|
||||
"""
|
||||
|
||||
return f"sha256:{archive_image_id}"
|
||||
|
||||
|
||||
def load_archived_image_manifest(
|
||||
archive_path: str,
|
||||
) -> list[ImageArchiveManifestSummary] | None:
|
||||
"""
|
||||
Attempts to get image IDs and image names from metadata stored in the image
|
||||
archive tar file.
|
||||
|
||||
The tar should contain a file "manifest.json" with an array with one or more entries,
|
||||
and every entry should have a Config field with the image ID in its file name, as
|
||||
well as a RepoTags list, which typically has only one entry.
|
||||
|
||||
:raises:
|
||||
ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
|
||||
|
||||
:param archive_path: Tar file to read
|
||||
:return: None, if no file at archive_path, or a list of ImageArchiveManifestSummary objects.
|
||||
"""
|
||||
|
||||
try:
|
||||
# FileNotFoundError does not exist in Python 2
|
||||
if not os.path.isfile(archive_path):
|
||||
return None
|
||||
|
||||
with tarfile.open(archive_path, "r") as tf:
|
||||
try:
|
||||
try:
|
||||
reader = tf.extractfile("manifest.json")
|
||||
if reader is None:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Failed to read manifest.json"
|
||||
)
|
||||
with reader as ef:
|
||||
manifest = json.load(ef)
|
||||
except ImageArchiveInvalidException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to decode and deserialize manifest.json: {exc}"
|
||||
) from exc
|
||||
|
||||
if len(manifest) == 0:
|
||||
raise ImageArchiveInvalidException(
|
||||
"Expected to have at least one entry in manifest.json but found none"
|
||||
)
|
||||
|
||||
result = []
|
||||
for index, meta in enumerate(manifest):
|
||||
try:
|
||||
config_file = meta["Config"]
|
||||
except KeyError as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to get Config entry from {index + 1}th manifest in manifest.json: {exc}"
|
||||
) from exc
|
||||
|
||||
# Extracts hash without 'sha256:' prefix
|
||||
try:
|
||||
# Strip off .json filename extension, leaving just the hash.
|
||||
image_id = os.path.splitext(config_file)[0]
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to extract image id from config file name {config_file}: {exc}"
|
||||
) from exc
|
||||
|
||||
for prefix in ("blobs/sha256/",): # Moby 25.0.0, Docker API 1.44
|
||||
if image_id.startswith(prefix):
|
||||
image_id = image_id[len(prefix) :]
|
||||
|
||||
try:
|
||||
repo_tags = meta["RepoTags"]
|
||||
except KeyError as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to get RepoTags entry from {index + 1}th manifest in manifest.json: {exc}"
|
||||
) from exc
|
||||
|
||||
result.append(
|
||||
ImageArchiveManifestSummary(
|
||||
image_id=image_id, repo_tags=repo_tags
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
except ImageArchiveInvalidException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to extract manifest.json from tar file {archive_path}: {exc}"
|
||||
) from exc
|
||||
|
||||
except ImageArchiveInvalidException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Failed to open tar file {archive_path}: {exc}"
|
||||
) from exc
|
||||
|
||||
|
||||
def archived_image_manifest(archive_path: str) -> ImageArchiveManifestSummary | None:
|
||||
"""
|
||||
Attempts to get Image.Id and image name from metadata stored in the image
|
||||
archive tar file.
|
||||
|
||||
The tar should contain a file "manifest.json" with an array with a single entry,
|
||||
and the entry should have a Config field with the image ID in its file name, as
|
||||
well as a RepoTags list, which typically has only one entry.
|
||||
|
||||
:raises:
|
||||
ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
|
||||
|
||||
:param archive_path: Tar file to read
|
||||
:return: None, if no file at archive_path, or the extracted image ID, which will not have a sha256: prefix.
|
||||
"""
|
||||
|
||||
results = load_archived_image_manifest(archive_path)
|
||||
if results is None:
|
||||
return None
|
||||
if len(results) == 1:
|
||||
return results[0]
|
||||
raise ImageArchiveInvalidException(
|
||||
f"Expected to have one entry in manifest.json but found {len(results)}"
|
||||
)
|
||||
|
|
@ -0,0 +1,116 @@
|
|||
# Copyright (c) 2025 Felix Fontein
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
from dataclasses import dataclass
|
||||
|
||||
_PATH_RE = re.compile(
|
||||
r"^[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*(\/[a-z0-9]+((\.|_|__|-+)[a-z0-9]+)*)*$"
|
||||
)
|
||||
_TAG_RE = re.compile(r"^[a-zA-Z0-9_][a-zA-Z0-9._-]{0,127}$")
|
||||
_DIGEST_RE = re.compile(r"^sha256:[0-9a-fA-F]{64}$")
|
||||
|
||||
|
||||
def is_digest(name: str, allow_empty: bool = False) -> bool:
|
||||
"""Check whether the given name is in fact an image ID (hash)."""
|
||||
if not name:
|
||||
return allow_empty
|
||||
return _DIGEST_RE.match(name) is not None
|
||||
|
||||
|
||||
def is_tag(name: str, allow_empty: bool = False) -> bool:
|
||||
"""Check whether the given name can be an image tag."""
|
||||
if not name:
|
||||
return allow_empty
|
||||
return _TAG_RE.match(name) is not None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageName:
|
||||
registry: str | None
|
||||
path: str
|
||||
tag: str | None
|
||||
digest: str | None
|
||||
|
||||
@classmethod
|
||||
def parse(cls, name: str) -> t.Self:
|
||||
registry: str | None = None
|
||||
tag: str | None = None
|
||||
digest: str | None = None
|
||||
parts = name.rsplit("@", 1)
|
||||
if len(parts) == 2:
|
||||
name, digest = parts
|
||||
parts = name.rsplit(":", 1)
|
||||
if len(parts) == 2 and "/" not in parts[1]:
|
||||
name, tag = parts
|
||||
parts = name.split("/", 1)
|
||||
if len(parts) == 2 and (
|
||||
"." in parts[0] or ":" in parts[0] or parts[0] == "localhost"
|
||||
):
|
||||
registry, name = parts
|
||||
return cls(registry, name, tag, digest)
|
||||
|
||||
def validate(self) -> t.Self:
|
||||
if self.registry:
|
||||
if self.registry[0] == "-" or self.registry[-1] == "-":
|
||||
raise ValueError(
|
||||
f'Invalid registry name ({self.registry}): must not begin or end with a "-".'
|
||||
)
|
||||
if self.registry[-1] == ":":
|
||||
raise ValueError(
|
||||
f'Invalid registry name ({self.registry}): must not end with ":".'
|
||||
)
|
||||
if not _PATH_RE.match(self.path):
|
||||
raise ValueError(f"Invalid path ({self.path}).")
|
||||
if self.tag and not is_tag(self.tag):
|
||||
raise ValueError(f"Invalid tag ({self.tag}).")
|
||||
if self.digest and not is_digest(self.digest):
|
||||
raise ValueError(f"Invalid digest ({self.digest}).")
|
||||
return self
|
||||
|
||||
def combine(self) -> str:
|
||||
parts = []
|
||||
if self.registry:
|
||||
parts.append(self.registry)
|
||||
if self.path:
|
||||
parts.append("/")
|
||||
parts.append(self.path)
|
||||
if self.tag:
|
||||
parts.append(":")
|
||||
parts.append(self.tag)
|
||||
if self.digest:
|
||||
parts.append("@")
|
||||
parts.append(self.digest)
|
||||
return "".join(parts)
|
||||
|
||||
def normalize(self) -> ImageName:
|
||||
registry = self.registry
|
||||
path = self.path
|
||||
if registry in ("", None, "index.docker.io", "registry.hub.docker.com"):
|
||||
registry = "docker.io"
|
||||
if registry == "docker.io" and "/" not in path and path:
|
||||
path = f"library/{path}"
|
||||
return ImageName(registry, path, self.tag, self.digest)
|
||||
|
||||
def get_hostname_and_port(self) -> tuple[str, int]:
|
||||
if self.registry is None:
|
||||
raise ValueError(
|
||||
"Cannot get hostname when there is no registry. Normalize first!"
|
||||
)
|
||||
if self.registry == "docker.io":
|
||||
return "index.docker.io", 443
|
||||
parts = self.registry.split(":", 1)
|
||||
if len(parts) == 2:
|
||||
try:
|
||||
port = int(parts[1])
|
||||
except (TypeError, ValueError) as exc:
|
||||
raise ValueError(f"Cannot parse port {parts[1]!r}") from exc
|
||||
return parts[0], port
|
||||
return self.registry, 443
|
||||
|
|
@ -0,0 +1,211 @@
|
|||
# Copyright (c) 2024, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
"""
|
||||
Parse go logfmt messages.
|
||||
|
||||
See https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc for information on the format.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
from enum import Enum
|
||||
|
||||
# The format is defined in https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc
|
||||
# (look for "EBNFish")
|
||||
|
||||
|
||||
class InvalidLogFmt(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _Mode(Enum):
|
||||
GARBAGE = 0
|
||||
KEY = 1
|
||||
EQUAL = 2
|
||||
IDENT_VALUE = 3
|
||||
QUOTED_VALUE = 4
|
||||
|
||||
|
||||
_ESCAPE_DICT = {
|
||||
'"': '"',
|
||||
"\\": "\\",
|
||||
"'": "'",
|
||||
"/": "/",
|
||||
"b": "\b",
|
||||
"f": "\f",
|
||||
"n": "\n",
|
||||
"r": "\r",
|
||||
"t": "\t",
|
||||
}
|
||||
|
||||
_HEX_DICT = {
|
||||
"0": 0,
|
||||
"1": 1,
|
||||
"2": 2,
|
||||
"3": 3,
|
||||
"4": 4,
|
||||
"5": 5,
|
||||
"6": 6,
|
||||
"7": 7,
|
||||
"8": 8,
|
||||
"9": 9,
|
||||
"a": 0xA,
|
||||
"b": 0xB,
|
||||
"c": 0xC,
|
||||
"d": 0xD,
|
||||
"e": 0xE,
|
||||
"f": 0xF,
|
||||
"A": 0xA,
|
||||
"B": 0xB,
|
||||
"C": 0xC,
|
||||
"D": 0xD,
|
||||
"E": 0xE,
|
||||
"F": 0xF,
|
||||
}
|
||||
|
||||
|
||||
def _is_ident(cur: str) -> bool:
|
||||
return cur > " " and cur not in ('"', "=")
|
||||
|
||||
|
||||
class _Parser:
|
||||
def __init__(self, line: str) -> None:
|
||||
self.line = line
|
||||
self.index = 0
|
||||
self.length = len(line)
|
||||
|
||||
def done(self) -> bool:
|
||||
return self.index >= self.length
|
||||
|
||||
def cur(self) -> str:
|
||||
return self.line[self.index]
|
||||
|
||||
def next(self) -> None:
|
||||
self.index += 1
|
||||
|
||||
def prev(self) -> None:
|
||||
self.index -= 1
|
||||
|
||||
def parse_unicode_sequence(self) -> str:
|
||||
if self.index + 6 > self.length:
|
||||
raise InvalidLogFmt("Not enough space for unicode escape")
|
||||
if self.line[self.index : self.index + 2] != "\\u":
|
||||
raise InvalidLogFmt("Invalid unicode escape start")
|
||||
v = 0
|
||||
for dummy_index in range(self.index + 2, self.index + 6):
|
||||
v <<= 4
|
||||
try:
|
||||
v += _HEX_DICT[self.line[self.index]]
|
||||
except KeyError:
|
||||
raise InvalidLogFmt(
|
||||
f"Invalid unicode escape digit {self.line[self.index]!r}"
|
||||
) from None
|
||||
self.index += 6
|
||||
return chr(v)
|
||||
|
||||
|
||||
def parse_line(line: str, logrus_mode: bool = False) -> dict[str, t.Any]:
|
||||
result: dict[str, t.Any] = {}
|
||||
parser = _Parser(line)
|
||||
key: list[str] = []
|
||||
value: list[str] = []
|
||||
mode = _Mode.GARBAGE
|
||||
|
||||
def handle_kv(has_no_value: bool = False) -> None:
|
||||
k = "".join(key)
|
||||
v = None if has_no_value else "".join(value)
|
||||
result[k] = v
|
||||
del key[:]
|
||||
del value[:]
|
||||
|
||||
def parse_garbage(cur: str) -> _Mode:
|
||||
if _is_ident(cur):
|
||||
return _Mode.KEY
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_key(cur: str) -> _Mode:
|
||||
if _is_ident(cur):
|
||||
key.append(cur)
|
||||
parser.next()
|
||||
return _Mode.KEY
|
||||
if cur == "=":
|
||||
parser.next()
|
||||
return _Mode.EQUAL
|
||||
if logrus_mode:
|
||||
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
|
||||
handle_kv(has_no_value=True)
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_equal(cur: str) -> _Mode:
|
||||
if _is_ident(cur):
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.IDENT_VALUE
|
||||
if cur == '"':
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_ident_value(cur: str) -> _Mode:
|
||||
if _is_ident(cur):
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.IDENT_VALUE
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
|
||||
def parse_quoted_value(cur: str) -> _Mode:
|
||||
if cur == "\\":
|
||||
parser.next()
|
||||
if parser.done():
|
||||
raise InvalidLogFmt("Unterminated escape sequence in quoted string")
|
||||
cur = parser.cur()
|
||||
if cur in _ESCAPE_DICT:
|
||||
value.append(_ESCAPE_DICT[cur])
|
||||
elif cur != "u":
|
||||
es = f"\\{cur}"
|
||||
raise InvalidLogFmt(f"Unknown escape sequence {es!r}")
|
||||
else:
|
||||
parser.prev()
|
||||
value.append(parser.parse_unicode_sequence())
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
if cur == '"':
|
||||
handle_kv()
|
||||
parser.next()
|
||||
return _Mode.GARBAGE
|
||||
if cur < " ":
|
||||
raise InvalidLogFmt("Control characters in quoted string are not allowed")
|
||||
value.append(cur)
|
||||
parser.next()
|
||||
return _Mode.QUOTED_VALUE
|
||||
|
||||
parsers = {
|
||||
_Mode.GARBAGE: parse_garbage,
|
||||
_Mode.KEY: parse_key,
|
||||
_Mode.EQUAL: parse_equal,
|
||||
_Mode.IDENT_VALUE: parse_ident_value,
|
||||
_Mode.QUOTED_VALUE: parse_quoted_value,
|
||||
}
|
||||
while not parser.done():
|
||||
mode = parsers[mode](parser.cur())
|
||||
if mode == _Mode.KEY and logrus_mode:
|
||||
raise InvalidLogFmt('Key must always be followed by "=" in logrus mode')
|
||||
if mode in (_Mode.KEY, _Mode.EQUAL):
|
||||
handle_kv(has_no_value=True)
|
||||
elif mode == _Mode.IDENT_VALUE:
|
||||
handle_kv()
|
||||
elif mode == _Mode.QUOTED_VALUE:
|
||||
raise InvalidLogFmt("Unterminated quoted string")
|
||||
return result
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,259 @@
|
|||
# This code is part of the Ansible collection community.docker, but is an independent component.
|
||||
# This particular file, and this file only, is based on containerd's platforms Go module
|
||||
# (https://github.com/containerd/containerd/tree/main/platforms)
|
||||
#
|
||||
# Copyright (c) 2023 Felix Fontein <felix@fontein.de>
|
||||
# Copyright The containerd Authors
|
||||
#
|
||||
# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
|
||||
# SPDX-License-Identifier: Apache-2.0
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
|
||||
_VALID_STR = re.compile("^[A-Za-z0-9_-]+$")
|
||||
|
||||
|
||||
def _validate_part(string: str, part: str, part_name: str) -> str:
|
||||
if not part:
|
||||
raise ValueError(f'Invalid platform string "{string}": {part_name} is empty')
|
||||
if not _VALID_STR.match(part):
|
||||
raise ValueError(
|
||||
f'Invalid platform string "{string}": {part_name} has invalid characters'
|
||||
)
|
||||
return part
|
||||
|
||||
|
||||
# See https://github.com/containerd/containerd/blob/main/platforms/database.go#L32-L38
|
||||
_KNOWN_OS = (
|
||||
"aix",
|
||||
"android",
|
||||
"darwin",
|
||||
"dragonfly",
|
||||
"freebsd",
|
||||
"hurd",
|
||||
"illumos",
|
||||
"ios",
|
||||
"js",
|
||||
"linux",
|
||||
"nacl",
|
||||
"netbsd",
|
||||
"openbsd",
|
||||
"plan9",
|
||||
"solaris",
|
||||
"windows",
|
||||
"zos",
|
||||
)
|
||||
|
||||
# See https://github.com/containerd/containerd/blob/main/platforms/database.go#L54-L60
|
||||
_KNOWN_ARCH = (
|
||||
"386",
|
||||
"amd64",
|
||||
"amd64p32",
|
||||
"arm",
|
||||
"armbe",
|
||||
"arm64",
|
||||
"arm64be",
|
||||
"ppc64",
|
||||
"ppc64le",
|
||||
"loong64",
|
||||
"mips",
|
||||
"mipsle",
|
||||
"mips64",
|
||||
"mips64le",
|
||||
"mips64p32",
|
||||
"mips64p32le",
|
||||
"ppc",
|
||||
"riscv",
|
||||
"riscv64",
|
||||
"s390",
|
||||
"s390x",
|
||||
"sparc",
|
||||
"sparc64",
|
||||
"wasm",
|
||||
)
|
||||
|
||||
|
||||
def _normalize_os(os_str: str) -> str:
|
||||
# See normalizeOS() in https://github.com/containerd/containerd/blob/main/platforms/database.go
|
||||
os_str = os_str.lower()
|
||||
if os_str == "macos":
|
||||
os_str = "darwin"
|
||||
return os_str
|
||||
|
||||
|
||||
_NORMALIZE_ARCH = {
|
||||
("i386", None): ("386", ""),
|
||||
("x86_64", "v1"): ("amd64", ""),
|
||||
("x86-64", "v1"): ("amd64", ""),
|
||||
("amd64", "v1"): ("amd64", ""),
|
||||
("x86_64", None): ("amd64", None),
|
||||
("x86-64", None): ("amd64", None),
|
||||
("amd64", None): ("amd64", None),
|
||||
("aarch64", "8"): ("arm64", ""),
|
||||
("arm64", "8"): ("arm64", ""),
|
||||
("aarch64", "v8"): ("arm64", ""),
|
||||
("arm64", "v8"): ("arm64", ""),
|
||||
("aarch64", None): ("arm64", None),
|
||||
("arm64", None): ("arm64", None),
|
||||
("armhf", None): ("arm", "v7"),
|
||||
("armel", None): ("arm", "v6"),
|
||||
("arm", ""): ("arm", "v7"),
|
||||
("arm", "5"): ("arm", "v5"),
|
||||
("arm", "6"): ("arm", "v6"),
|
||||
("arm", "7"): ("arm", "v7"),
|
||||
("arm", "8"): ("arm", "v8"),
|
||||
("arm", None): ("arm", None),
|
||||
}
|
||||
|
||||
|
||||
def _normalize_arch(arch_str: str, variant_str: str) -> tuple[str, str]:
|
||||
# See normalizeArch() in https://github.com/containerd/containerd/blob/main/platforms/database.go
|
||||
arch_str = arch_str.lower()
|
||||
variant_str = variant_str.lower()
|
||||
res = _NORMALIZE_ARCH.get((arch_str, variant_str))
|
||||
if res is None:
|
||||
res = _NORMALIZE_ARCH.get((arch_str, None))
|
||||
if res is None:
|
||||
return arch_str, variant_str
|
||||
arch_str = res[0]
|
||||
if res[1] is not None:
|
||||
variant_str = res[1]
|
||||
return arch_str, variant_str
|
||||
|
||||
|
||||
class _Platform:
|
||||
def __init__(
|
||||
self, os: str | None = None, arch: str | None = None, variant: str | None = None
|
||||
) -> None:
|
||||
self.os = os
|
||||
self.arch = arch
|
||||
self.variant = variant
|
||||
if variant is not None:
|
||||
if arch is None:
|
||||
raise ValueError("If variant is given, architecture must be given too")
|
||||
if os is None:
|
||||
raise ValueError("If variant is given, os must be given too")
|
||||
|
||||
@classmethod
|
||||
def parse_platform_string(
|
||||
cls,
|
||||
string: str | None,
|
||||
daemon_os: str | None = None,
|
||||
daemon_arch: str | None = None,
|
||||
) -> t.Self:
|
||||
# See Parse() in https://github.com/containerd/containerd/blob/main/platforms/platforms.go
|
||||
if string is None:
|
||||
return cls()
|
||||
if not string:
|
||||
raise ValueError("Platform string must be non-empty")
|
||||
parts = string.split("/", 2)
|
||||
arch = None
|
||||
variant = None
|
||||
if len(parts) == 1:
|
||||
_validate_part(string, string, "OS/architecture")
|
||||
# The part is either OS or architecture
|
||||
os = _normalize_os(string)
|
||||
if os in _KNOWN_OS:
|
||||
if daemon_arch is not None:
|
||||
arch, variant = _normalize_arch(daemon_arch, "")
|
||||
return cls(os=os, arch=arch, variant=variant)
|
||||
arch, variant = _normalize_arch(os, "")
|
||||
if arch in _KNOWN_ARCH:
|
||||
return cls(
|
||||
os=_normalize_os(daemon_os) if daemon_os else None,
|
||||
arch=arch or None,
|
||||
variant=variant or None,
|
||||
)
|
||||
raise ValueError(
|
||||
f'Invalid platform string "{string}": unknown OS or architecture'
|
||||
)
|
||||
os = _validate_part(string, parts[0], "OS")
|
||||
if not os:
|
||||
raise ValueError(f'Invalid platform string "{string}": OS is empty')
|
||||
arch = (
|
||||
_validate_part(string, parts[1], "architecture") if len(parts) > 1 else None
|
||||
)
|
||||
if arch is not None and not arch:
|
||||
raise ValueError(
|
||||
f'Invalid platform string "{string}": architecture is empty'
|
||||
)
|
||||
variant = (
|
||||
_validate_part(string, parts[2], "variant") if len(parts) > 2 else None
|
||||
)
|
||||
if variant is not None and not variant:
|
||||
raise ValueError(f'Invalid platform string "{string}": variant is empty')
|
||||
assert arch is not None # otherwise variant would be None as well
|
||||
arch, variant = _normalize_arch(arch, variant or "")
|
||||
if len(parts) == 2 and arch == "arm" and variant == "v7":
|
||||
variant = None
|
||||
if len(parts) == 3 and arch == "arm64" and variant == "":
|
||||
variant = "v8"
|
||||
return cls(os=_normalize_os(os), arch=arch, variant=variant or None)
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.variant:
|
||||
assert (
|
||||
self.os is not None and self.arch is not None
|
||||
) # ensured in constructor
|
||||
parts: list[str] = [self.os, self.arch, self.variant]
|
||||
elif self.os:
|
||||
if self.arch:
|
||||
parts = [self.os, self.arch]
|
||||
else:
|
||||
parts = [self.os]
|
||||
elif self.arch is not None:
|
||||
parts = [self.arch]
|
||||
else:
|
||||
parts = []
|
||||
return "/".join(parts)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"_Platform(os={self.os!r}, arch={self.arch!r}, variant={self.variant!r})"
|
||||
)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, _Platform):
|
||||
return NotImplemented
|
||||
return (
|
||||
self.os == other.os
|
||||
and self.arch == other.arch
|
||||
and self.variant == other.variant
|
||||
)
|
||||
|
||||
|
||||
def normalize_platform_string(
|
||||
string: str, daemon_os: str | None = None, daemon_arch: str | None = None
|
||||
) -> str:
|
||||
return str(
|
||||
_Platform.parse_platform_string(
|
||||
string, daemon_os=daemon_os, daemon_arch=daemon_arch
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def compose_platform_string(
|
||||
os: str | None = None,
|
||||
arch: str | None = None,
|
||||
variant: str | None = None,
|
||||
daemon_os: str | None = None,
|
||||
daemon_arch: str | None = None,
|
||||
) -> str:
|
||||
if os is None and daemon_os is not None:
|
||||
os = _normalize_os(daemon_os)
|
||||
if arch is None and daemon_arch is not None:
|
||||
arch, variant = _normalize_arch(daemon_arch, variant or "")
|
||||
variant = variant or None
|
||||
return str(_Platform(os=os, arch=arch, variant=variant or None))
|
||||
|
||||
|
||||
def compare_platform_strings(string1: str, string2: str) -> bool:
|
||||
return _Platform.parse_platform_string(string1) == _Platform.parse_platform_string(
|
||||
string2
|
||||
)
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import base64
|
||||
import random
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
|
||||
|
||||
def generate_insecure_key() -> bytes:
|
||||
"""Do NOT use this for cryptographic purposes!"""
|
||||
while True:
|
||||
# Generate a one-byte key. Right now the functions below do not use more
|
||||
# than one byte, so this is sufficient.
|
||||
key = bytes([random.randint(0, 255)])
|
||||
# Return anything that is not zero
|
||||
if key != b"\x00":
|
||||
return key
|
||||
|
||||
|
||||
def scramble(value: str, key: bytes) -> str:
|
||||
"""Do NOT use this for cryptographic purposes!"""
|
||||
if len(key) < 1:
|
||||
raise ValueError("Key must be at least one byte")
|
||||
b_value = to_bytes(value)
|
||||
k = key[0]
|
||||
b_value = bytes([k ^ b for b in b_value])
|
||||
return f"=S={to_text(base64.b64encode(b_value))}"
|
||||
|
||||
|
||||
def unscramble(value: str, key: bytes) -> str:
|
||||
"""Do NOT use this for cryptographic purposes!"""
|
||||
if len(key) < 1:
|
||||
raise ValueError("Key must be at least one byte")
|
||||
if not value.startswith("=S="):
|
||||
raise ValueError("Value does not start with indicator")
|
||||
b_value = base64.b64decode(value[3:])
|
||||
k = key[0]
|
||||
b_value = bytes([k ^ b for b in b_value])
|
||||
return to_text(b_value)
|
||||
|
|
@ -0,0 +1,240 @@
|
|||
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import selectors
|
||||
import socket as pysocket
|
||||
import struct
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils import (
|
||||
socket as docker_socket,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._socket_helper import (
|
||||
make_unblocking,
|
||||
shutdown_writing,
|
||||
write_to_socket,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
from types import TracebackType
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._socket_helper import (
|
||||
SocketLike,
|
||||
)
|
||||
|
||||
|
||||
PARAMIKO_POLL_TIMEOUT = 0.01 # 10 milliseconds
|
||||
|
||||
|
||||
def _empty_writer(msg: str) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class DockerSocketHandlerBase:
|
||||
def __init__(
|
||||
self, sock: SocketLike, log: Callable[[str], None] | None = None
|
||||
) -> None:
|
||||
make_unblocking(sock)
|
||||
|
||||
self._log = log or _empty_writer
|
||||
self._paramiko_read_workaround = hasattr(
|
||||
sock, "send_ready"
|
||||
) and "paramiko" in str(type(sock))
|
||||
|
||||
self._sock = sock
|
||||
self._block_done_callback: Callable[[int, bytes], None] | None = None
|
||||
self._block_buffer: list[tuple[int, bytes]] = []
|
||||
self._eof = False
|
||||
self._read_buffer = b""
|
||||
self._write_buffer = b""
|
||||
self._end_of_writing = False
|
||||
|
||||
self._current_stream: int | None = None
|
||||
self._current_missing = 0
|
||||
self._current_buffer = b""
|
||||
|
||||
self._selector = selectors.DefaultSelector()
|
||||
self._selector.register(self._sock, selectors.EVENT_READ)
|
||||
|
||||
def __enter__(self) -> t.Self:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
type_: type[BaseException] | None,
|
||||
value: BaseException | None,
|
||||
tb: TracebackType | None,
|
||||
) -> None:
|
||||
self._selector.close()
|
||||
|
||||
def set_block_done_callback(
|
||||
self, block_done_callback: Callable[[int, bytes], None]
|
||||
) -> None:
|
||||
self._block_done_callback = block_done_callback
|
||||
if self._block_done_callback is not None:
|
||||
while self._block_buffer:
|
||||
elt = self._block_buffer.pop(0)
|
||||
self._block_done_callback(*elt)
|
||||
|
||||
def _add_block(self, stream_id: int, data: bytes) -> None:
|
||||
if self._block_done_callback is not None:
|
||||
self._block_done_callback(stream_id, data)
|
||||
else:
|
||||
self._block_buffer.append((stream_id, data))
|
||||
|
||||
def _read(self) -> None:
|
||||
if self._eof:
|
||||
return
|
||||
data: bytes | None
|
||||
if hasattr(self._sock, "recv"):
|
||||
try:
|
||||
data = self._sock.recv(262144)
|
||||
except Exception as e: # pylint: disable=broad-exception-caught
|
||||
# After calling self._sock.shutdown(), OpenSSL's/urllib3's
|
||||
# WrappedSocket seems to eventually raise ZeroReturnError in
|
||||
# case of EOF
|
||||
if "OpenSSL.SSL.ZeroReturnError" in str(type(e)):
|
||||
self._eof = True
|
||||
return
|
||||
raise
|
||||
elif isinstance(self._sock, pysocket.SocketIO): # type: ignore[unreachable]
|
||||
data = self._sock.read() # type: ignore
|
||||
else:
|
||||
data = os.read(self._sock.fileno()) # type: ignore # TODO does this really work?!
|
||||
if data is None:
|
||||
# no data available
|
||||
return # type: ignore[unreachable]
|
||||
self._log(f"read {len(data)} bytes")
|
||||
if len(data) == 0:
|
||||
# Stream EOF
|
||||
self._eof = True
|
||||
return
|
||||
self._read_buffer += data
|
||||
while len(self._read_buffer) > 0:
|
||||
if self._current_missing > 0:
|
||||
n = min(len(self._read_buffer), self._current_missing)
|
||||
self._current_buffer += self._read_buffer[:n]
|
||||
self._read_buffer = self._read_buffer[n:]
|
||||
self._current_missing -= n
|
||||
if self._current_missing == 0:
|
||||
assert self._current_stream is not None
|
||||
self._add_block(self._current_stream, self._current_buffer)
|
||||
self._current_buffer = b""
|
||||
if len(self._read_buffer) < 8:
|
||||
break
|
||||
self._current_stream, self._current_missing = struct.unpack(
|
||||
">BxxxL", self._read_buffer[:8]
|
||||
)
|
||||
self._read_buffer = self._read_buffer[8:]
|
||||
if self._current_missing < 0:
|
||||
# Stream EOF (as reported by docker daemon)
|
||||
self._eof = True
|
||||
break
|
||||
|
||||
def _handle_end_of_writing(self) -> None:
|
||||
if self._end_of_writing and len(self._write_buffer) == 0:
|
||||
self._end_of_writing = False
|
||||
self._log("Shutting socket down for writing")
|
||||
shutdown_writing(self._sock, self._log)
|
||||
|
||||
def _write(self) -> None:
|
||||
if len(self._write_buffer) > 0:
|
||||
written = write_to_socket(self._sock, self._write_buffer)
|
||||
self._write_buffer = self._write_buffer[written:]
|
||||
self._log(f"wrote {written} bytes, {len(self._write_buffer)} are left")
|
||||
if len(self._write_buffer) > 0:
|
||||
self._selector.modify(
|
||||
self._sock, selectors.EVENT_READ | selectors.EVENT_WRITE
|
||||
)
|
||||
else:
|
||||
self._selector.modify(self._sock, selectors.EVENT_READ)
|
||||
self._handle_end_of_writing()
|
||||
|
||||
def select(
|
||||
self, timeout: int | float | None = None, _internal_recursion: bool = False
|
||||
) -> bool:
|
||||
if (
|
||||
not _internal_recursion
|
||||
and self._paramiko_read_workaround
|
||||
and len(self._write_buffer) > 0
|
||||
):
|
||||
# When the SSH transport is used, Docker SDK for Python internally uses Paramiko, whose
|
||||
# Channel object supports select(), but only for reading
|
||||
# (https://github.com/paramiko/paramiko/issues/695).
|
||||
if self._sock.send_ready(): # type: ignore
|
||||
self._write()
|
||||
return True
|
||||
while timeout is None or timeout > PARAMIKO_POLL_TIMEOUT:
|
||||
result = int(
|
||||
self.select(PARAMIKO_POLL_TIMEOUT, _internal_recursion=True)
|
||||
)
|
||||
if self._sock.send_ready(): # type: ignore
|
||||
self._read()
|
||||
result += 1
|
||||
if result > 0:
|
||||
return True
|
||||
if timeout is not None:
|
||||
timeout -= PARAMIKO_POLL_TIMEOUT
|
||||
self._log(f"select... ({timeout})")
|
||||
events = self._selector.select(timeout)
|
||||
for key, event in events:
|
||||
if key.fileobj == self._sock:
|
||||
ev_read = event & selectors.EVENT_READ != 0
|
||||
ev_write = event & selectors.EVENT_WRITE != 0
|
||||
self._log(f"select event read:{ev_read} write:{ev_write}")
|
||||
if event & selectors.EVENT_READ != 0:
|
||||
self._read()
|
||||
if event & selectors.EVENT_WRITE != 0:
|
||||
self._write()
|
||||
result = len(events)
|
||||
if self._paramiko_read_workaround and len(self._write_buffer) > 0 and self._sock.send_ready(): # type: ignore
|
||||
self._write()
|
||||
result += 1
|
||||
return result > 0
|
||||
|
||||
def is_eof(self) -> bool:
|
||||
return self._eof
|
||||
|
||||
def end_of_writing(self) -> None:
|
||||
self._end_of_writing = True
|
||||
self._handle_end_of_writing()
|
||||
|
||||
def consume(self) -> tuple[bytes, bytes]:
|
||||
stdout = []
|
||||
stderr = []
|
||||
|
||||
def append_block(stream_id: int, data: bytes) -> None:
|
||||
if stream_id == docker_socket.STDOUT:
|
||||
stdout.append(data)
|
||||
elif stream_id == docker_socket.STDERR:
|
||||
stderr.append(data)
|
||||
else:
|
||||
raise ValueError(f"{stream_id} is not a valid stream ID")
|
||||
|
||||
self.end_of_writing()
|
||||
|
||||
self.set_block_done_callback(append_block)
|
||||
while not self._eof:
|
||||
self.select()
|
||||
return b"".join(stdout), b"".join(stderr)
|
||||
|
||||
def write(self, str_to_write: bytes) -> None:
|
||||
self._write_buffer += str_to_write
|
||||
if len(self._write_buffer) == len(str_to_write):
|
||||
self._write()
|
||||
|
||||
|
||||
class DockerSocketHandlerModule(DockerSocketHandlerBase):
|
||||
def __init__(self, sock: SocketLike, module: AnsibleModule) -> None:
|
||||
super().__init__(sock, module.debug)
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import fcntl
|
||||
import os
|
||||
import os.path
|
||||
import socket as pysocket
|
||||
import typing as t
|
||||
from collections.abc import Callable
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
SocketLike = pysocket.socket
|
||||
|
||||
|
||||
def make_file_unblocking(file: SocketLike) -> None:
|
||||
fcntl.fcntl(
|
||||
file.fileno(),
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(file.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK,
|
||||
)
|
||||
|
||||
|
||||
def make_file_blocking(file: SocketLike) -> None:
|
||||
fcntl.fcntl(
|
||||
file.fileno(),
|
||||
fcntl.F_SETFL,
|
||||
fcntl.fcntl(file.fileno(), fcntl.F_GETFL) & ~os.O_NONBLOCK,
|
||||
)
|
||||
|
||||
|
||||
def make_unblocking(sock: SocketLike) -> None:
|
||||
if hasattr(sock, "_sock"):
|
||||
sock._sock.setblocking(0)
|
||||
elif hasattr(sock, "setblocking"):
|
||||
sock.setblocking(0) # type: ignore # TODO: CHECK!
|
||||
else:
|
||||
make_file_unblocking(sock)
|
||||
|
||||
|
||||
def _empty_writer(msg: str) -> None:
|
||||
pass
|
||||
|
||||
|
||||
def shutdown_writing(
|
||||
sock: SocketLike, log: Callable[[str], None] = _empty_writer
|
||||
) -> None:
|
||||
# FIXME: This does **not work with SSLSocket**! Apparently SSLSocket does not allow to send
|
||||
# a close_notify TLS alert without completely shutting down the connection.
|
||||
# Calling sock.shutdown(pysocket.SHUT_WR) simply turns of TLS encryption and from that
|
||||
# point on the raw encrypted data is returned when sock.recv() is called. :-(
|
||||
if hasattr(sock, "shutdown_write"):
|
||||
sock.shutdown_write()
|
||||
elif hasattr(sock, "shutdown"):
|
||||
try:
|
||||
sock.shutdown(pysocket.SHUT_WR)
|
||||
except TypeError as e:
|
||||
# probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
|
||||
log(f"Shutting down for writing not possible; trying shutdown instead: {e}")
|
||||
sock.shutdown() # type: ignore
|
||||
elif isinstance(sock, pysocket.SocketIO): # type: ignore
|
||||
sock._sock.shutdown(pysocket.SHUT_WR) # type: ignore[unreachable]
|
||||
else:
|
||||
log("No idea how to signal end of writing")
|
||||
|
||||
|
||||
def write_to_socket(sock: SocketLike, data: bytes) -> int:
|
||||
if hasattr(sock, "_send_until_done"):
|
||||
# WrappedSocket (urllib3/contrib/pyopenssl) does not have `send`, but
|
||||
# only `sendall`, which uses `_send_until_done` under the hood.
|
||||
return sock._send_until_done(data)
|
||||
if hasattr(sock, "send"):
|
||||
return sock.send(data)
|
||||
return os.write(sock.fileno(), data)
|
||||
|
|
@ -0,0 +1,312 @@
|
|||
# Copyright (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
|
||||
# Copyright (c) Thierry Bouvet (@tbouvet)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import typing as t
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, NotFound
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
|
||||
class AnsibleDockerSwarmClient(AnsibleDockerClient):
|
||||
def get_swarm_node_id(self) -> str | None:
|
||||
"""
|
||||
Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
|
||||
of Docker host the module is executed on
|
||||
:return:
|
||||
NodeID of host or 'None' if not part of Swarm
|
||||
"""
|
||||
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError as exc:
|
||||
self.fail(f"Failed to get node information for {exc}")
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info["Swarm"]["NodeID"]:
|
||||
return swarm_info["Swarm"]["NodeID"]
|
||||
return None
|
||||
|
||||
def check_if_swarm_node(self, node_id: str | None = None) -> bool | None:
|
||||
"""
|
||||
Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
|
||||
system information looking if specific key in output exists. If 'node_id' is provided then it tries to
|
||||
read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
|
||||
it is not executed on Swarm manager
|
||||
|
||||
:param node_id: Node identifier
|
||||
:return:
|
||||
bool: True if node is part of Swarm, False otherwise
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
try:
|
||||
info = self.info()
|
||||
except APIError:
|
||||
self.fail("Failed to get host information.")
|
||||
|
||||
if info:
|
||||
json_str = json.dumps(info, ensure_ascii=False)
|
||||
swarm_info = json.loads(json_str)
|
||||
if swarm_info["Swarm"]["NodeID"]:
|
||||
return True
|
||||
return swarm_info["Swarm"]["LocalNodeState"] in (
|
||||
"active",
|
||||
"pending",
|
||||
"locked",
|
||||
)
|
||||
return False
|
||||
try:
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
except APIError:
|
||||
return None
|
||||
|
||||
return node_info["ID"] is not None
|
||||
|
||||
def check_if_swarm_manager(self) -> bool:
|
||||
"""
|
||||
Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
|
||||
is performed. The inspect_swarm() will fail if node is not a manager
|
||||
|
||||
:return: True if node is Swarm Manager, False otherwise
|
||||
"""
|
||||
|
||||
try:
|
||||
self.inspect_swarm()
|
||||
return True
|
||||
except APIError:
|
||||
return False
|
||||
|
||||
def fail_task_if_not_swarm_manager(self) -> None:
|
||||
"""
|
||||
If host is not a swarm manager then Ansible task on this host should end with 'failed' state
|
||||
"""
|
||||
if not self.check_if_swarm_manager():
|
||||
self.fail(
|
||||
"Error running docker swarm module: must run on swarm manager node"
|
||||
)
|
||||
|
||||
def check_if_swarm_worker(self) -> bool:
|
||||
"""
|
||||
Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
|
||||
is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
|
||||
|
||||
:return: True if node is Swarm Worker, False otherwise
|
||||
"""
|
||||
|
||||
return bool(self.check_if_swarm_node() and not self.check_if_swarm_manager())
|
||||
|
||||
def check_if_swarm_node_is_down(
|
||||
self, node_id: str | None = None, repeat_check: int = 1
|
||||
) -> bool:
|
||||
"""
|
||||
Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
|
||||
node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
|
||||
host that is not part of Swarm it will fail the playbook
|
||||
|
||||
:param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
True if node is part of swarm but its state is down, False otherwise
|
||||
"""
|
||||
|
||||
repeat_check = max(1, repeat_check)
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
for retry in range(0, repeat_check):
|
||||
if retry > 0:
|
||||
sleep(5)
|
||||
node_info = self.get_node_inspect(node_id=node_id)
|
||||
if node_info["Status"]["State"] == "down":
|
||||
return True
|
||||
return False
|
||||
|
||||
@t.overload
|
||||
def get_node_inspect(
|
||||
self, node_id: str | None = None, skip_missing: t.Literal[False] = False
|
||||
) -> dict[str, t.Any]: ...
|
||||
|
||||
@t.overload
|
||||
def get_node_inspect(
|
||||
self, node_id: str | None = None, skip_missing: bool = False
|
||||
) -> dict[str, t.Any] | None: ...
|
||||
|
||||
def get_node_inspect(
|
||||
self, node_id: str | None = None, skip_missing: bool = False
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about single node
|
||||
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:param node_id: node ID or name, if None then method will try to get node_id of host module run on
|
||||
:return:
|
||||
Single node information structure
|
||||
"""
|
||||
|
||||
if node_id is None:
|
||||
node_id = self.get_swarm_node_id()
|
||||
|
||||
if node_id is None:
|
||||
self.fail("Failed to get node information.")
|
||||
|
||||
try:
|
||||
node_info = self.inspect_node(node_id=node_id)
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail(
|
||||
"Cannot inspect node: To inspect node execute module on Swarm Manager"
|
||||
)
|
||||
if exc.status_code == 404 and skip_missing:
|
||||
return None
|
||||
self.fail(f"Error while reading from Swarm manager: {exc}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting swarm node: {exc}")
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
|
||||
if "ManagerStatus" in node_info and node_info["ManagerStatus"].get("Leader"):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
count_colons = node_info["ManagerStatus"]["Addr"].count(":")
|
||||
if count_colons == 1:
|
||||
swarm_leader_ip = (
|
||||
node_info["ManagerStatus"]["Addr"].split(":", 1)[0]
|
||||
or node_info["Status"]["Addr"]
|
||||
)
|
||||
else:
|
||||
swarm_leader_ip = node_info["Status"]["Addr"]
|
||||
node_info["Status"]["Addr"] = swarm_leader_ip
|
||||
return node_info
|
||||
|
||||
def get_all_nodes_inspect(self) -> list[dict[str, t.Any]]:
|
||||
"""
|
||||
Returns Swarm node info as in 'docker node inspect' command about all registered nodes
|
||||
|
||||
:return:
|
||||
Structure with information about all nodes
|
||||
"""
|
||||
try:
|
||||
node_info = self.nodes()
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail(
|
||||
"Cannot inspect node: To inspect node execute module on Swarm Manager"
|
||||
)
|
||||
self.fail(f"Error while reading from Swarm manager: {exc}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting swarm node: {exc}")
|
||||
|
||||
json_str = json.dumps(node_info, ensure_ascii=False)
|
||||
node_info = json.loads(json_str)
|
||||
return node_info
|
||||
|
||||
@t.overload
|
||||
def get_all_nodes_list(self, output: t.Literal["short"] = "short") -> list[str]: ...
|
||||
|
||||
@t.overload
|
||||
def get_all_nodes_list(
|
||||
self, output: t.Literal["long"]
|
||||
) -> list[dict[str, t.Any]]: ...
|
||||
|
||||
def get_all_nodes_list(
|
||||
self, output: t.Literal["short", "long"] = "short"
|
||||
) -> list[str] | list[dict[str, t.Any]]:
|
||||
"""
|
||||
Returns list of nodes registered in Swarm
|
||||
|
||||
:param output: Defines format of returned data
|
||||
:return:
|
||||
If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
|
||||
if 'output' is 'long' then returns data is list of dict containing the attributes as in
|
||||
output of command 'docker node ls'
|
||||
"""
|
||||
nodes_inspect = self.get_all_nodes_inspect()
|
||||
|
||||
if output == "short":
|
||||
nodes_list = []
|
||||
for node in nodes_inspect:
|
||||
nodes_list.append(node["Description"]["Hostname"])
|
||||
return nodes_list
|
||||
if output == "long":
|
||||
nodes_info_list = []
|
||||
for node in nodes_inspect:
|
||||
node_property: dict[str, t.Any] = {}
|
||||
|
||||
node_property["ID"] = node["ID"]
|
||||
node_property["Hostname"] = node["Description"]["Hostname"]
|
||||
node_property["Status"] = node["Status"]["State"]
|
||||
node_property["Availability"] = node["Spec"]["Availability"]
|
||||
if "ManagerStatus" in node:
|
||||
if node["ManagerStatus"]["Leader"] is True:
|
||||
node_property["Leader"] = True
|
||||
node_property["ManagerStatus"] = node["ManagerStatus"][
|
||||
"Reachability"
|
||||
]
|
||||
node_property["EngineVersion"] = node["Description"]["Engine"][
|
||||
"EngineVersion"
|
||||
]
|
||||
|
||||
nodes_info_list.append(node_property)
|
||||
return nodes_info_list
|
||||
|
||||
def get_node_name_by_id(self, nodeid: str) -> str:
|
||||
return self.get_node_inspect(nodeid)["Description"]["Hostname"]
|
||||
|
||||
def get_unlock_key(self) -> dict[str, t.Any] | None:
|
||||
if self.docker_py_version < LooseVersion("2.7.0"):
|
||||
return None
|
||||
return super().get_unlock_key()
|
||||
|
||||
def get_service_inspect(
|
||||
self, service_id: str, skip_missing: bool = False
|
||||
) -> dict[str, t.Any] | None:
|
||||
"""
|
||||
Returns Swarm service info as in 'docker service inspect' command about single service
|
||||
|
||||
:param service_id: service ID or name
|
||||
:param skip_missing: if True then function will return None instead of failing the task
|
||||
:return:
|
||||
Single service information structure
|
||||
"""
|
||||
try:
|
||||
service_info = self.inspect_service(service_id)
|
||||
except NotFound as exc:
|
||||
if skip_missing is False:
|
||||
self.fail(f"Error while reading from Swarm manager: {exc}")
|
||||
else:
|
||||
return None
|
||||
except APIError as exc:
|
||||
if exc.status_code == 503:
|
||||
self.fail(
|
||||
"Cannot inspect service: To inspect service execute module on Swarm Manager"
|
||||
)
|
||||
self.fail(f"Error inspecting swarm service: {exc}")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting swarm service: {exc}")
|
||||
|
||||
json_str = json.dumps(service_info, ensure_ascii=False)
|
||||
service_info = json.loads(json_str)
|
||||
return service_info
|
||||
|
|
@ -0,0 +1,551 @@
|
|||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ipaddress
|
||||
import json
|
||||
import re
|
||||
import typing as t
|
||||
from datetime import timedelta
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
from ansible.module_utils.common.collections import is_sequence
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from collections.abc import Callable
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ._common import AnsibleDockerClientBase as CADCB
|
||||
from ._common_api import AnsibleDockerClientBase as CAPIADCB
|
||||
from ._common_cli import AnsibleDockerClientBase as CCLIADCB
|
||||
|
||||
Client = t.Union[CADCB, CAPIADCB, CCLIADCB] # noqa: UP007
|
||||
|
||||
|
||||
DEFAULT_DOCKER_HOST = "unix:///var/run/docker.sock"
|
||||
DEFAULT_TLS = False
|
||||
DEFAULT_TLS_VERIFY = False
|
||||
DEFAULT_TLS_HOSTNAME = "localhost" # deprecated
|
||||
DEFAULT_TIMEOUT_SECONDS = 60
|
||||
|
||||
DOCKER_COMMON_ARGS = {
|
||||
"docker_host": {
|
||||
"type": "str",
|
||||
"default": DEFAULT_DOCKER_HOST,
|
||||
"fallback": (env_fallback, ["DOCKER_HOST"]),
|
||||
"aliases": ["docker_url"],
|
||||
},
|
||||
"tls_hostname": {
|
||||
"type": "str",
|
||||
"fallback": (env_fallback, ["DOCKER_TLS_HOSTNAME"]),
|
||||
},
|
||||
"api_version": {
|
||||
"type": "str",
|
||||
"default": "auto",
|
||||
"fallback": (env_fallback, ["DOCKER_API_VERSION"]),
|
||||
"aliases": ["docker_api_version"],
|
||||
},
|
||||
"timeout": {
|
||||
"type": "int",
|
||||
"default": DEFAULT_TIMEOUT_SECONDS,
|
||||
"fallback": (env_fallback, ["DOCKER_TIMEOUT"]),
|
||||
},
|
||||
"ca_path": {"type": "path", "aliases": ["ca_cert", "tls_ca_cert", "cacert_path"]},
|
||||
"client_cert": {"type": "path", "aliases": ["tls_client_cert", "cert_path"]},
|
||||
"client_key": {"type": "path", "aliases": ["tls_client_key", "key_path"]},
|
||||
"tls": {
|
||||
"type": "bool",
|
||||
"default": DEFAULT_TLS,
|
||||
"fallback": (env_fallback, ["DOCKER_TLS"]),
|
||||
},
|
||||
"use_ssh_client": {"type": "bool", "default": False},
|
||||
"validate_certs": {
|
||||
"type": "bool",
|
||||
"default": DEFAULT_TLS_VERIFY,
|
||||
"fallback": (env_fallback, ["DOCKER_TLS_VERIFY"]),
|
||||
"aliases": ["tls_verify"],
|
||||
},
|
||||
"debug": {"type": "bool", "default": False},
|
||||
}
|
||||
|
||||
DOCKER_COMMON_ARGS_VARS = {
|
||||
option_name: f"ansible_docker_{option_name}"
|
||||
for option_name in DOCKER_COMMON_ARGS
|
||||
if option_name != "debug"
|
||||
}
|
||||
|
||||
DOCKER_MUTUALLY_EXCLUSIVE: list[tuple[str, ...] | list[str]] = []
|
||||
|
||||
DOCKER_REQUIRED_TOGETHER: list[tuple[str, ...] | list[str]] = [
|
||||
["client_cert", "client_key"]
|
||||
]
|
||||
|
||||
DEFAULT_DOCKER_REGISTRY = "https://index.docker.io/v1/"
|
||||
BYTE_SUFFIXES = ["B", "KB", "MB", "GB", "TB", "PB"]
|
||||
|
||||
|
||||
def is_image_name_id(name: str) -> bool:
|
||||
"""Check whether the given image name is in fact an image ID (hash)."""
|
||||
return bool(re.match("^sha256:[0-9a-fA-F]{64}$", name))
|
||||
|
||||
|
||||
def is_valid_tag(tag: str, allow_empty: bool = False) -> bool:
|
||||
"""Check whether the given string is a valid docker tag name."""
|
||||
if not tag:
|
||||
return allow_empty
|
||||
# See here ("Extended description") for a definition what tags can be:
|
||||
# https://docs.docker.com/engine/reference/commandline/tag/
|
||||
return bool(re.match("^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$", tag))
|
||||
|
||||
|
||||
def sanitize_result(data: t.Any) -> t.Any:
|
||||
"""Sanitize data object for return to Ansible.
|
||||
|
||||
When the data object contains types such as docker.types.containers.HostConfig,
|
||||
Ansible will fail when these are returned via exit_json or fail_json.
|
||||
HostConfig is derived from dict, but its constructor requires additional
|
||||
arguments. This function sanitizes data structures by recursively converting
|
||||
everything derived from dict to dict and everything derived from list (and tuple)
|
||||
to a list.
|
||||
"""
|
||||
if isinstance(data, dict):
|
||||
return dict((k, sanitize_result(v)) for k, v in data.items())
|
||||
if isinstance(data, (list, tuple)):
|
||||
return [sanitize_result(v) for v in data]
|
||||
return data
|
||||
|
||||
|
||||
def log_debug(msg: t.Any, pretty_print: bool = False) -> None:
|
||||
"""Write a log message to docker.log.
|
||||
|
||||
If ``pretty_print=True``, the message will be pretty-printed as JSON.
|
||||
"""
|
||||
with open("docker.log", "at", encoding="utf-8") as log_file:
|
||||
if pretty_print:
|
||||
log_file.write(
|
||||
json.dumps(msg, sort_keys=True, indent=4, separators=(",", ": "))
|
||||
)
|
||||
log_file.write("\n")
|
||||
else:
|
||||
log_file.write(f"{msg}\n")
|
||||
|
||||
|
||||
class DockerBaseClass:
|
||||
def __init__(self) -> None:
|
||||
self.debug = False
|
||||
|
||||
def log(self, msg: t.Any, pretty_print: bool = False) -> None:
|
||||
pass
|
||||
# if self.debug:
|
||||
# log_debug(msg, pretty_print=pretty_print)
|
||||
|
||||
|
||||
def update_tls_hostname(
|
||||
result: dict[str, t.Any],
|
||||
old_behavior: bool = False,
|
||||
deprecate_function: Callable[[str], None] | None = None,
|
||||
uses_tls: bool = True,
|
||||
) -> None:
|
||||
if result["tls_hostname"] is None:
|
||||
# get default machine name from the url
|
||||
parsed_url = urlparse(result["docker_host"])
|
||||
result["tls_hostname"] = parsed_url.netloc.rsplit(":", 1)[0]
|
||||
|
||||
|
||||
def compare_dict_allow_more_present(av: dict, bv: dict) -> bool:
|
||||
"""
|
||||
Compare two dictionaries for whether every entry of the first is in the second.
|
||||
"""
|
||||
for key, value in av.items():
|
||||
if key not in bv:
|
||||
return False
|
||||
if bv[key] != value:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def compare_generic(
|
||||
a: t.Any,
|
||||
b: t.Any,
|
||||
method: t.Literal["ignore", "strict", "allow_more_present"],
|
||||
datatype: t.Literal["value", "list", "set", "set(dict)", "dict"],
|
||||
) -> bool:
|
||||
"""
|
||||
Compare values a and b as described by method and datatype.
|
||||
|
||||
Returns ``True`` if the values compare equal, and ``False`` if not.
|
||||
|
||||
``a`` is usually the module's parameter, while ``b`` is a property
|
||||
of the current object. ``a`` must not be ``None`` (except for
|
||||
``datatype == 'value'``).
|
||||
|
||||
Valid values for ``method`` are:
|
||||
- ``ignore`` (always compare as equal);
|
||||
- ``strict`` (only compare if really equal)
|
||||
- ``allow_more_present`` (allow b to have elements which a does not have).
|
||||
|
||||
Valid values for ``datatype`` are:
|
||||
- ``value``: for simple values (strings, numbers, ...);
|
||||
- ``list``: for ``list``s or ``tuple``s where order matters;
|
||||
- ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
|
||||
matter;
|
||||
- ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
|
||||
not matter and which contain ``dict``s; ``allow_more_present`` is used
|
||||
for the ``dict``s, and these are assumed to be dictionaries of values;
|
||||
- ``dict``: for dictionaries of values.
|
||||
"""
|
||||
if method == "ignore":
|
||||
return True
|
||||
# If a or b is None:
|
||||
if a is None or b is None:
|
||||
# If both are None: equality
|
||||
if a == b:
|
||||
return True
|
||||
# Otherwise, not equal for values, and equal
|
||||
# if the other is empty for set/list/dict
|
||||
if datatype == "value":
|
||||
return False
|
||||
# For allow_more_present, allow a to be None
|
||||
if method == "allow_more_present" and a is None:
|
||||
return True
|
||||
# Otherwise, the iterable object which is not None must have length 0
|
||||
return len(b if a is None else a) == 0
|
||||
# Do proper comparison (both objects not None)
|
||||
if datatype == "value":
|
||||
return a == b
|
||||
if datatype == "list":
|
||||
if method == "strict":
|
||||
return a == b
|
||||
i = 0
|
||||
for v in a:
|
||||
while i < len(b) and b[i] != v:
|
||||
i += 1
|
||||
if i == len(b):
|
||||
return False
|
||||
i += 1
|
||||
return True
|
||||
if datatype == "dict":
|
||||
if method == "strict":
|
||||
return a == b
|
||||
return compare_dict_allow_more_present(a, b)
|
||||
if datatype == "set":
|
||||
set_a = set(a)
|
||||
set_b = set(b)
|
||||
if method == "strict":
|
||||
return set_a == set_b
|
||||
return set_b >= set_a
|
||||
if datatype == "set(dict)":
|
||||
for av in a:
|
||||
found = False
|
||||
for bv in b:
|
||||
if compare_dict_allow_more_present(av, bv):
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
return False
|
||||
if method == "strict":
|
||||
# If we would know that both a and b do not contain duplicates,
|
||||
# we could simply compare len(a) to len(b) to finish this test.
|
||||
# We can assume that b has no duplicates (as it is returned by
|
||||
# docker), but we do not know for a.
|
||||
for bv in b:
|
||||
found = False
|
||||
for av in a:
|
||||
if compare_dict_allow_more_present(av, bv):
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class DifferenceTracker:
|
||||
def __init__(self) -> None:
|
||||
self._diff: list[dict[str, t.Any]] = []
|
||||
|
||||
def add(self, name: str, parameter: t.Any = None, active: t.Any = None) -> None:
|
||||
self._diff.append(
|
||||
{
|
||||
"name": name,
|
||||
"parameter": parameter,
|
||||
"active": active,
|
||||
}
|
||||
)
|
||||
|
||||
def merge(self, other_tracker: DifferenceTracker) -> None:
|
||||
self._diff.extend(other_tracker._diff)
|
||||
|
||||
@property
|
||||
def empty(self) -> bool:
|
||||
return len(self._diff) == 0
|
||||
|
||||
def get_before_after(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
|
||||
"""
|
||||
Return texts ``before`` and ``after``.
|
||||
"""
|
||||
before = {}
|
||||
after = {}
|
||||
for item in self._diff:
|
||||
before[item["name"]] = item["active"]
|
||||
after[item["name"]] = item["parameter"]
|
||||
return before, after
|
||||
|
||||
def has_difference_for(self, name: str) -> bool:
|
||||
"""
|
||||
Returns a boolean if a difference exists for name
|
||||
"""
|
||||
return any(diff for diff in self._diff if diff["name"] == name)
|
||||
|
||||
def get_legacy_docker_container_diffs(self) -> list[dict[str, t.Any]]:
|
||||
"""
|
||||
Return differences in the docker_container legacy format.
|
||||
"""
|
||||
result = []
|
||||
for entry in self._diff:
|
||||
item = {}
|
||||
item[entry["name"]] = {
|
||||
"parameter": entry["parameter"],
|
||||
"container": entry["active"],
|
||||
}
|
||||
result.append(item)
|
||||
return result
|
||||
|
||||
def get_legacy_docker_diffs(self) -> list[str]:
|
||||
"""
|
||||
Return differences in the docker_container legacy format.
|
||||
"""
|
||||
result = [entry["name"] for entry in self._diff]
|
||||
return result
|
||||
|
||||
|
||||
def sanitize_labels(
|
||||
labels: dict[str, t.Any] | None,
|
||||
labels_field: str,
|
||||
client: Client | None = None,
|
||||
module: AnsibleModule | None = None,
|
||||
) -> None:
|
||||
def fail(msg: str) -> t.NoReturn:
|
||||
if client is not None:
|
||||
client.fail(msg)
|
||||
if module is not None:
|
||||
module.fail_json(msg=msg)
|
||||
raise ValueError(msg)
|
||||
|
||||
if labels is None:
|
||||
return
|
||||
for k, v in list(labels.items()):
|
||||
if not isinstance(k, str):
|
||||
fail(f"The key {k!r} of {labels_field} is not a string!")
|
||||
if isinstance(v, (bool, float)):
|
||||
fail(
|
||||
f"The value {v!r} for {k!r} of {labels_field} is not a string or something than can be safely converted to a string!"
|
||||
)
|
||||
labels[k] = to_text(v)
|
||||
|
||||
|
||||
@t.overload
|
||||
def clean_dict_booleans_for_docker_api(
|
||||
data: dict[str, t.Any], *, allow_sequences: t.Literal[False] = False
|
||||
) -> dict[str, str]: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def clean_dict_booleans_for_docker_api(
|
||||
data: dict[str, t.Any], *, allow_sequences: bool
|
||||
) -> dict[str, str | list[str]]: ...
|
||||
|
||||
|
||||
def clean_dict_booleans_for_docker_api(
|
||||
data: dict[str, t.Any] | None, *, allow_sequences: bool = False
|
||||
) -> dict[str, str] | dict[str, str | list[str]]:
|
||||
"""
|
||||
Go does not like Python booleans 'True' or 'False', while Ansible is just
|
||||
fine with them in YAML. As such, they need to be converted in cases where
|
||||
we pass dictionaries to the Docker API (e.g. docker_network's
|
||||
driver_options and docker_prune's filters). When `allow_sequences=True`
|
||||
YAML sequences (lists, tuples) are converted to [str] instead of str([...])
|
||||
which is the expected format of filters which accept lists such as labels.
|
||||
"""
|
||||
|
||||
def sanitize(value: t.Any) -> str:
|
||||
if value is True:
|
||||
return "true"
|
||||
if value is False:
|
||||
return "false"
|
||||
return str(value)
|
||||
|
||||
result = {}
|
||||
if data is not None:
|
||||
for k, v in data.items():
|
||||
result[str(k)] = (
|
||||
[sanitize(e) for e in v]
|
||||
if allow_sequences and is_sequence(v)
|
||||
else sanitize(v)
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
def convert_duration_to_nanosecond(time_str: str) -> int:
|
||||
"""
|
||||
Return time duration in nanosecond.
|
||||
"""
|
||||
if not isinstance(time_str, str):
|
||||
raise ValueError(f"Missing unit in duration - {time_str}")
|
||||
|
||||
regex = re.compile(
|
||||
r"^(((?P<hours>\d+)h)?"
|
||||
r"((?P<minutes>\d+)m(?!s))?"
|
||||
r"((?P<seconds>\d+)s)?"
|
||||
r"((?P<milliseconds>\d+)ms)?"
|
||||
r"((?P<microseconds>\d+)us)?)$"
|
||||
)
|
||||
parts = regex.match(time_str)
|
||||
|
||||
if not parts:
|
||||
raise ValueError(f"Invalid time duration - {time_str}")
|
||||
|
||||
parts_dict = parts.groupdict()
|
||||
time_params = {}
|
||||
for name, value in parts_dict.items():
|
||||
if value:
|
||||
time_params[name] = int(value)
|
||||
|
||||
delta = timedelta(**time_params)
|
||||
time_in_nanoseconds = (
|
||||
delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6
|
||||
) * 10**3
|
||||
|
||||
return time_in_nanoseconds
|
||||
|
||||
|
||||
def normalize_healthcheck_test(test: t.Any) -> list[str]:
|
||||
if isinstance(test, (tuple, list)):
|
||||
return [str(e) for e in test]
|
||||
return ["CMD-SHELL", str(test)]
|
||||
|
||||
|
||||
def normalize_healthcheck(
|
||||
healthcheck: dict[str, t.Any], normalize_test: bool = False
|
||||
) -> dict[str, t.Any]:
|
||||
"""
|
||||
Return dictionary of healthcheck parameters.
|
||||
"""
|
||||
result = {}
|
||||
|
||||
# All supported healthcheck parameters
|
||||
options = (
|
||||
"test",
|
||||
"test_cli_compatible",
|
||||
"interval",
|
||||
"timeout",
|
||||
"start_period",
|
||||
"start_interval",
|
||||
"retries",
|
||||
)
|
||||
|
||||
duration_options = ("interval", "timeout", "start_period", "start_interval")
|
||||
|
||||
for key in options:
|
||||
if key in healthcheck:
|
||||
value = healthcheck[key]
|
||||
if value is None:
|
||||
# due to recursive argument_spec, all keys are always present
|
||||
# (but have default value None if not specified)
|
||||
continue
|
||||
if key in duration_options:
|
||||
value = convert_duration_to_nanosecond(value)
|
||||
if not value and not (
|
||||
healthcheck.get("test_cli_compatible") and key == "test"
|
||||
):
|
||||
continue
|
||||
if key == "retries":
|
||||
try:
|
||||
value = int(value)
|
||||
except ValueError as exc:
|
||||
raise ValueError(
|
||||
f'Cannot parse number of retries for healthcheck. Expected an integer, got "{value}".'
|
||||
) from exc
|
||||
if key == "test" and value and normalize_test:
|
||||
value = normalize_healthcheck_test(value)
|
||||
result[key] = value
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def parse_healthcheck(
|
||||
healthcheck: dict[str, t.Any] | None,
|
||||
) -> tuple[dict[str, t.Any] | None, bool | None]:
|
||||
"""
|
||||
Return dictionary of healthcheck parameters and boolean if
|
||||
healthcheck defined in image was requested to be disabled.
|
||||
"""
|
||||
if (not healthcheck) or (not healthcheck.get("test")):
|
||||
return None, None
|
||||
|
||||
result = normalize_healthcheck(healthcheck, normalize_test=True)
|
||||
|
||||
if result["test"] == ["NONE"]:
|
||||
# If the user explicitly disables the healthcheck, return None
|
||||
# as the healthcheck object, and set disable_healthcheck to True
|
||||
return None, True
|
||||
|
||||
return result, False
|
||||
|
||||
|
||||
def omit_none_from_dict(d: dict[str, t.Any]) -> dict[str, t.Any]:
|
||||
"""
|
||||
Return a copy of the dictionary with all keys with value None omitted.
|
||||
"""
|
||||
return {k: v for (k, v) in d.items() if v is not None}
|
||||
|
||||
|
||||
@t.overload
|
||||
def normalize_ip_address(ip_address: str) -> str: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def normalize_ip_address(ip_address: str | None) -> str | None: ...
|
||||
|
||||
|
||||
def normalize_ip_address(ip_address: str | None) -> str | None:
|
||||
"""
|
||||
Given an IP address as a string, normalize it so that it can be
|
||||
used to compare IP addresses as strings.
|
||||
"""
|
||||
if ip_address is None:
|
||||
return None
|
||||
try:
|
||||
return ipaddress.ip_address(ip_address).compressed
|
||||
except ValueError:
|
||||
# Fallback for invalid addresses: simply return the input
|
||||
return ip_address
|
||||
|
||||
|
||||
@t.overload
|
||||
def normalize_ip_network(network: str) -> str: ...
|
||||
|
||||
|
||||
@t.overload
|
||||
def normalize_ip_network(network: str | None) -> str | None: ...
|
||||
|
||||
|
||||
def normalize_ip_network(network: str | None) -> str | None:
|
||||
"""
|
||||
Given a network in CIDR notation as a string, normalize it so that it can be
|
||||
used to compare networks as strings.
|
||||
"""
|
||||
if network is None:
|
||||
return None
|
||||
try:
|
||||
return ipaddress.ip_network(network).compressed
|
||||
except ValueError:
|
||||
# Fallback for invalid networks: simply return the input
|
||||
return network
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
"""Provide version object to compare version numbers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from ansible.module_utils.compat.version import ( # noqa: F401, pylint: disable=unused-import
|
||||
LooseVersion,
|
||||
StrictVersion,
|
||||
)
|
||||
|
|
@ -0,0 +1,148 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2020 Matt Clay <mclay@redhat.com>
|
||||
# Copyright (c) 2020 Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: current_container_facts
|
||||
short_description: Return facts about whether the module runs in a container
|
||||
version_added: 1.1.0
|
||||
description:
|
||||
- Return facts about whether the module runs in a Docker or podman container.
|
||||
- This module attempts a best-effort detection. There might be special cases where it does not work; if you encounter one,
|
||||
make sure that this is still a problem with the latest community.docker release, and if it is,
|
||||
L(please file an issue, https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=bug_report.md).
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
extends_documentation_fragment:
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.facts
|
||||
- community.docker._attributes.facts_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Get facts on current container
|
||||
community.docker.current_container_facts:
|
||||
|
||||
- name: Print information on current container when running in a container
|
||||
ansible.builtin.debug:
|
||||
msg: "Container ID is {{ ansible_module_container_id }}"
|
||||
when: ansible_module_running_in_container
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
ansible_facts:
|
||||
description: Ansible facts returned by the module.
|
||||
type: dict
|
||||
returned: always
|
||||
contains:
|
||||
ansible_module_running_in_container:
|
||||
description:
|
||||
- Whether the module was able to detect that it runs in a container or not.
|
||||
returned: always
|
||||
type: bool
|
||||
ansible_module_container_id:
|
||||
description:
|
||||
- The detected container ID.
|
||||
- Contains an empty string if no container was detected.
|
||||
returned: always
|
||||
type: str
|
||||
ansible_module_container_type:
|
||||
description:
|
||||
- The detected container environment.
|
||||
- Contains an empty string if no container was detected, or a non-empty string identifying the container environment.
|
||||
- V(docker) indicates that the module ran inside a regular Docker container.
|
||||
- V(azure_pipelines) indicates that the module ran on Azure Pipelines. This seems to no longer be reported.
|
||||
- V(github_actions) indicates that the module ran inside a Docker container on GitHub Actions. It is supported since
|
||||
community.docker 2.4.0.
|
||||
- V(podman) indicates that the module ran inside a regular Podman container. It is supported since community.docker
|
||||
3.3.0.
|
||||
returned: always
|
||||
type: str
|
||||
choices:
|
||||
- ''
|
||||
- docker
|
||||
- azure_pipelines
|
||||
- github_actions
|
||||
- podman
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main() -> None:
|
||||
module = AnsibleModule({}, supports_check_mode=True)
|
||||
|
||||
cpuset_path = "/proc/self/cpuset"
|
||||
mountinfo_path = "/proc/self/mountinfo"
|
||||
|
||||
container_id = ""
|
||||
container_type = ""
|
||||
|
||||
contents = None
|
||||
if os.path.exists(cpuset_path):
|
||||
# File content varies based on the environment:
|
||||
# No Container: /
|
||||
# Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
|
||||
# Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
|
||||
# Podman: /../../../../../..
|
||||
# While this was true and worked well for a long time, this seems to be no longer accurate
|
||||
# with newer Docker / Podman versions and/or with cgroupv2. That's why the /proc/self/mountinfo
|
||||
# detection further down is done when this test is inconclusive.
|
||||
with open(cpuset_path, "rb") as f:
|
||||
contents = f.read().decode("utf-8")
|
||||
|
||||
cgroup_path, cgroup_name = os.path.split(contents.strip())
|
||||
|
||||
if cgroup_path == "/docker":
|
||||
container_id = cgroup_name
|
||||
container_type = "docker"
|
||||
|
||||
if cgroup_path == "/azpl_job":
|
||||
container_id = cgroup_name
|
||||
container_type = "azure_pipelines"
|
||||
|
||||
if cgroup_path == "/actions_job":
|
||||
container_id = cgroup_name
|
||||
container_type = "github_actions"
|
||||
|
||||
if not container_id and os.path.exists(mountinfo_path):
|
||||
with open(mountinfo_path, "rb") as f:
|
||||
contents = f.read().decode("utf-8")
|
||||
|
||||
# As to why this works, see the explanations by Matt Clay in
|
||||
# https://github.com/ansible/ansible/blob/80d2f8da02052f64396da6b8caaf820eedbf18e2/test/lib/ansible_test/_internal/docker_util.py#L571-L610
|
||||
|
||||
for line in contents.splitlines():
|
||||
parts = line.split()
|
||||
if len(parts) >= 5 and parts[4] == "/etc/hostname":
|
||||
m = re.match(".*/([a-f0-9]{64})/hostname$", parts[3])
|
||||
if m:
|
||||
container_id = m.group(1)
|
||||
container_type = "docker"
|
||||
|
||||
m = re.match(".*/([a-f0-9]{64})/userdata/hostname$", parts[3])
|
||||
if m:
|
||||
container_id = m.group(1)
|
||||
container_type = "podman"
|
||||
|
||||
module.exit_json(
|
||||
ansible_facts={
|
||||
"ansible_module_running_in_container": container_id != "",
|
||||
"ansible_module_container_id": container_id,
|
||||
"ansible_module_container_type": container_type,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,748 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# Copyright (c) 2023, Léo El Amri (@lel-amri)
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_compose_v2
|
||||
|
||||
short_description: Manage multi-container Docker applications with Docker Compose CLI plugin
|
||||
|
||||
version_added: 3.6.0
|
||||
|
||||
description:
|
||||
- Uses Docker Compose to start or shutdown services.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._compose_v2
|
||||
- community.docker._compose_v2.minimum_version
|
||||
- community.docker._docker.cli_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- In check mode, pulling the image does not result in a changed result.
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: partial
|
||||
details:
|
||||
- If O(state=restarted) or O(recreate=always) the module is not idempotent.
|
||||
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Desired state of the project.
|
||||
- V(present) is equivalent to running C(docker compose up).
|
||||
- V(stopped) is equivalent to running C(docker compose stop).
|
||||
- V(absent) is equivalent to running C(docker compose down).
|
||||
- V(restarted) is equivalent to running C(docker compose restart).
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- stopped
|
||||
- restarted
|
||||
- present
|
||||
pull:
|
||||
description:
|
||||
- Whether to pull images before running. This is used when C(docker compose up) is run.
|
||||
- V(always) ensures that the images are always pulled, even when already present on the Docker daemon.
|
||||
- V(missing) only pulls them when they are not present on the Docker daemon.
|
||||
- V(never) never pulls images. If they are not present, the module will fail when trying to create the containers that
|
||||
need them.
|
||||
- V(policy) use the Compose file's C(pull_policy) defined for the service to figure out what to do.
|
||||
type: str
|
||||
choices:
|
||||
- always
|
||||
- missing
|
||||
- never
|
||||
- policy
|
||||
default: policy
|
||||
build:
|
||||
description:
|
||||
- Whether to build images before starting containers. This is used when C(docker compose up) is run.
|
||||
- V(always) always builds before starting containers. This is equivalent to the C(--build) option of C(docker compose
|
||||
up).
|
||||
- V(never) never builds before starting containers. This is equivalent to the C(--no-build) option of C(docker compose
|
||||
up).
|
||||
- V(policy) uses the policy as defined in the Compose file.
|
||||
type: str
|
||||
choices:
|
||||
- always
|
||||
- never
|
||||
- policy
|
||||
default: policy
|
||||
dependencies:
|
||||
description:
|
||||
- When O(state) is V(present) or V(restarted), specify whether or not to include linked services.
|
||||
type: bool
|
||||
default: true
|
||||
ignore_build_events:
|
||||
description:
|
||||
- Ignores image building events for change detection.
|
||||
- If O(state=present) and O(ignore_build_events=true) and O(build=always), a rebuild that does not trigger a container
|
||||
restart no longer results in RV(ignore:changed=true).
|
||||
- Note that Docker Compose 2.31.0 is the first Compose 2.x version to emit build events. For older versions, the behavior
|
||||
is always as if O(ignore_build_events=true).
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 4.2.0
|
||||
recreate:
|
||||
description:
|
||||
- By default containers will be recreated when their configuration differs from the service definition.
|
||||
- Setting to V(never) ignores configuration differences and leaves existing containers unchanged.
|
||||
- Setting to V(always) forces recreation of all existing containers.
|
||||
type: str
|
||||
default: auto
|
||||
choices:
|
||||
- always
|
||||
- never
|
||||
- auto
|
||||
renew_anon_volumes:
|
||||
description:
|
||||
- Whether to recreate instead of reuse anonymous volumes from previous containers.
|
||||
- V(true) is equivalent to the C(--renew-anon-volumes) option of C(docker compose up).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 4.0.0
|
||||
remove_images:
|
||||
description:
|
||||
- Use with O(state=absent) to remove all images or only local images.
|
||||
type: str
|
||||
choices:
|
||||
- all
|
||||
- local
|
||||
remove_volumes:
|
||||
description:
|
||||
- Use with O(state=absent) to remove data volumes.
|
||||
type: bool
|
||||
default: false
|
||||
remove_orphans:
|
||||
description:
|
||||
- Remove containers for services not defined in the Compose file.
|
||||
type: bool
|
||||
default: false
|
||||
timeout:
|
||||
description:
|
||||
- Timeout in seconds for container shutdown when attached or when containers are already running.
|
||||
type: int
|
||||
services:
|
||||
description:
|
||||
- Specifies a subset of services to be targeted.
|
||||
type: list
|
||||
elements: str
|
||||
scale:
|
||||
description:
|
||||
- Define how to scale services when running C(docker compose up).
|
||||
- Provide a dictionary of key/value pairs where the key is the name of the service and the value is an integer count
|
||||
for the number of containers.
|
||||
type: dict
|
||||
version_added: 3.7.0
|
||||
wait:
|
||||
description:
|
||||
- When running C(docker compose up), pass C(--wait) to wait for services to be running/healthy.
|
||||
- A timeout can be set with the O(wait_timeout) option.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.8.0
|
||||
wait_timeout:
|
||||
description:
|
||||
- When O(wait=true), wait at most this amount of seconds.
|
||||
type: int
|
||||
version_added: 3.8.0
|
||||
assume_yes:
|
||||
description:
|
||||
- When O(assume_yes=true), pass C(-y)/C(--yes) to assume "yes" as answer to all prompts and run non-interactively.
|
||||
- Right now a prompt is asked whenever a non-matching volume should be re-created. O(assume_yes=false)
|
||||
results in the question being answered by "no", which will simply re-use the existing volume.
|
||||
- This option is only available on Docker Compose 2.32.0 or newer.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 4.5.0
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_compose_v2_pull
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
|
||||
# flask directory
|
||||
|
||||
- name: Run using a project directory
|
||||
hosts: localhost
|
||||
gather_facts: false
|
||||
tasks:
|
||||
- name: Tear down existing services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: flask
|
||||
state: absent
|
||||
|
||||
- name: Create and start services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: flask
|
||||
register: output
|
||||
|
||||
- name: Show results
|
||||
ansible.builtin.debug:
|
||||
var: output
|
||||
|
||||
- name: Run `docker compose up` again
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: flask
|
||||
register: output
|
||||
|
||||
- name: Show results
|
||||
ansible.builtin.debug:
|
||||
var: output
|
||||
|
||||
- ansible.builtin.assert:
|
||||
that: not output.changed
|
||||
|
||||
- name: Stop all services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: flask
|
||||
state: stopped
|
||||
register: output
|
||||
|
||||
- name: Show results
|
||||
ansible.builtin.debug:
|
||||
var: output
|
||||
|
||||
- name: Verify that web and db services are not running
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- web_container.State != 'running'
|
||||
- db_container.State != 'running'
|
||||
vars:
|
||||
web_container: >-
|
||||
{{ output.containers | selectattr("Service", "equalto", "web") | first }}
|
||||
db_container: >-
|
||||
{{ output.containers | selectattr("Service", "equalto", "db") | first }}
|
||||
|
||||
- name: Restart services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: flask
|
||||
state: restarted
|
||||
register: output
|
||||
|
||||
- name: Show results
|
||||
ansible.builtin.debug:
|
||||
var: output
|
||||
|
||||
- name: Verify that web and db services are running
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- web_container.State == 'running'
|
||||
- db_container.State == 'running'
|
||||
vars:
|
||||
web_container: >-
|
||||
{{ output.containers | selectattr("Service", "equalto", "web") | first }}
|
||||
db_container: >-
|
||||
{{ output.containers | selectattr("Service", "equalto", "db") | first }}
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
containers:
|
||||
description:
|
||||
- A list of containers associated to the service.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
Command:
|
||||
description:
|
||||
- The container's command.
|
||||
type: raw
|
||||
CreatedAt:
|
||||
description:
|
||||
- The timestamp when the container was created.
|
||||
type: str
|
||||
sample: "2024-01-02 12:20:41 +0100 CET"
|
||||
ExitCode:
|
||||
description:
|
||||
- The container's exit code.
|
||||
type: int
|
||||
Health:
|
||||
description:
|
||||
- The container's health check.
|
||||
type: raw
|
||||
ID:
|
||||
description:
|
||||
- The container's ID.
|
||||
type: str
|
||||
sample: "44a7d607219a60b7db0a4817fb3205dce46e91df2cb4b78a6100b6e27b0d3135"
|
||||
Image:
|
||||
description:
|
||||
- The container's image.
|
||||
type: str
|
||||
Labels:
|
||||
description:
|
||||
- Labels for this container.
|
||||
type: dict
|
||||
LocalVolumes:
|
||||
description:
|
||||
- The local volumes count.
|
||||
type: str
|
||||
Mounts:
|
||||
description:
|
||||
- Mounts.
|
||||
type: str
|
||||
Name:
|
||||
description:
|
||||
- The container's primary name.
|
||||
type: str
|
||||
Names:
|
||||
description:
|
||||
- List of names of the container.
|
||||
type: list
|
||||
elements: str
|
||||
Networks:
|
||||
description:
|
||||
- List of networks attached to this container.
|
||||
type: list
|
||||
elements: str
|
||||
Ports:
|
||||
description:
|
||||
- List of port assignments as a string.
|
||||
type: str
|
||||
Publishers:
|
||||
description:
|
||||
- List of port assigments.
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
URL:
|
||||
description:
|
||||
- Interface the port is bound to.
|
||||
type: str
|
||||
TargetPort:
|
||||
description:
|
||||
- The container's port the published port maps to.
|
||||
type: int
|
||||
PublishedPort:
|
||||
description:
|
||||
- The port that is published.
|
||||
type: int
|
||||
Protocol:
|
||||
description:
|
||||
- The protocol.
|
||||
type: str
|
||||
choices:
|
||||
- tcp
|
||||
- udp
|
||||
RunningFor:
|
||||
description:
|
||||
- Amount of time the container runs.
|
||||
type: str
|
||||
Service:
|
||||
description:
|
||||
- The name of the service.
|
||||
type: str
|
||||
Size:
|
||||
description:
|
||||
- The container's size.
|
||||
type: str
|
||||
sample: "0B"
|
||||
State:
|
||||
description:
|
||||
- The container's state.
|
||||
type: str
|
||||
sample: running
|
||||
Status:
|
||||
description:
|
||||
- The container's status.
|
||||
type: str
|
||||
sample: Up About a minute
|
||||
images:
|
||||
description:
|
||||
- A list of images associated to the service.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
ID:
|
||||
description:
|
||||
- The image's ID.
|
||||
type: str
|
||||
sample: sha256:c8bccc0af9571ec0d006a43acb5a8d08c4ce42b6cc7194dd6eb167976f501ef1
|
||||
ContainerName:
|
||||
description:
|
||||
- Name of the conainer this image is used by.
|
||||
type: str
|
||||
Repository:
|
||||
description:
|
||||
- The repository where this image belongs to.
|
||||
type: str
|
||||
Tag:
|
||||
description:
|
||||
- The tag of the image.
|
||||
type: str
|
||||
Size:
|
||||
description:
|
||||
- The image's size in bytes.
|
||||
type: int
|
||||
actions:
|
||||
description:
|
||||
- A list of actions that have been applied.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
what:
|
||||
description:
|
||||
- What kind of resource was changed.
|
||||
type: str
|
||||
sample: container
|
||||
choices:
|
||||
- container
|
||||
- image
|
||||
- network
|
||||
- service
|
||||
- unknown
|
||||
- volume
|
||||
id:
|
||||
description:
|
||||
- The ID of the resource that was changed.
|
||||
type: str
|
||||
sample: container
|
||||
status:
|
||||
description:
|
||||
- The status change that happened.
|
||||
type: str
|
||||
sample: Creating
|
||||
choices:
|
||||
- Starting
|
||||
- Exiting
|
||||
- Restarting
|
||||
- Creating
|
||||
- Stopping
|
||||
- Killing
|
||||
- Removing
|
||||
- Recreating
|
||||
- Pulling
|
||||
- Building
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.validation import check_type_int
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_cli import (
|
||||
AnsibleModuleDockerClient,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._compose_v2 import (
|
||||
BaseComposeManager,
|
||||
common_compose_argspec_ex,
|
||||
is_failed,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
|
||||
class ServicesManager(BaseComposeManager):
|
||||
def __init__(self, client: AnsibleModuleDockerClient) -> None:
|
||||
super().__init__(client)
|
||||
parameters = self.client.module.params
|
||||
|
||||
self.state: t.Literal["absent", "present", "stopped", "restarted"] = parameters[
|
||||
"state"
|
||||
]
|
||||
self.dependencies: bool = parameters["dependencies"]
|
||||
self.pull: t.Literal["always", "missing", "never", "policy"] = parameters[
|
||||
"pull"
|
||||
]
|
||||
self.build: t.Literal["always", "never", "policy"] = parameters["build"]
|
||||
self.ignore_build_events: bool = parameters["ignore_build_events"]
|
||||
self.recreate: t.Literal["always", "never", "auto"] = parameters["recreate"]
|
||||
self.remove_images: t.Literal["all", "local"] | None = parameters[
|
||||
"remove_images"
|
||||
]
|
||||
self.remove_volumes: bool = parameters["remove_volumes"]
|
||||
self.remove_orphans: bool = parameters["remove_orphans"]
|
||||
self.renew_anon_volumes: bool = parameters["renew_anon_volumes"]
|
||||
self.timeout: int | None = parameters["timeout"]
|
||||
self.services: list[str] = parameters["services"] or []
|
||||
self.scale: dict[str, t.Any] = parameters["scale"] or {}
|
||||
self.wait: bool = parameters["wait"]
|
||||
self.wait_timeout: int | None = parameters["wait_timeout"]
|
||||
self.yes: bool = parameters["assume_yes"]
|
||||
if self.compose_version < LooseVersion("2.32.0") and self.yes:
|
||||
self.fail(
|
||||
f"assume_yes=true needs Docker Compose 2.32.0 or newer, not version {self.compose_version}"
|
||||
)
|
||||
|
||||
for key, value in self.scale.items():
|
||||
if not isinstance(key, str):
|
||||
self.fail(f"The key {key!r} for `scale` is not a string")
|
||||
try:
|
||||
value = check_type_int(value)
|
||||
except TypeError:
|
||||
self.fail(f"The value {value!r} for `scale[{key!r}]` is not an integer")
|
||||
if value < 0:
|
||||
self.fail(f"The value {value!r} for `scale[{key!r}]` is negative")
|
||||
self.scale[key] = value
|
||||
|
||||
def run(self) -> dict[str, t.Any]:
|
||||
if self.state == "present":
|
||||
result = self.cmd_up()
|
||||
elif self.state == "stopped":
|
||||
result = self.cmd_stop()
|
||||
elif self.state == "restarted":
|
||||
result = self.cmd_restart()
|
||||
elif self.state == "absent":
|
||||
result = self.cmd_down()
|
||||
else:
|
||||
raise AssertionError("Unexpected state")
|
||||
|
||||
result["containers"] = self.list_containers()
|
||||
result["images"] = self.list_images()
|
||||
self.cleanup_result(result)
|
||||
return result
|
||||
|
||||
def get_up_cmd(self, dry_run: bool, no_start: bool = False) -> list[str]:
|
||||
args = self.get_base_args() + ["up", "--detach", "--no-color", "--quiet-pull"]
|
||||
if self.pull != "policy":
|
||||
args.extend(["--pull", self.pull])
|
||||
if self.remove_orphans:
|
||||
args.append("--remove-orphans")
|
||||
if self.recreate == "always":
|
||||
args.append("--force-recreate")
|
||||
if self.recreate == "never":
|
||||
args.append("--no-recreate")
|
||||
if self.renew_anon_volumes:
|
||||
args.append("--renew-anon-volumes")
|
||||
if not self.dependencies:
|
||||
args.append("--no-deps")
|
||||
if self.timeout is not None:
|
||||
args.extend(["--timeout", f"{self.timeout}"])
|
||||
if self.build == "always":
|
||||
args.append("--build")
|
||||
elif self.build == "never":
|
||||
args.append("--no-build")
|
||||
for key, value in sorted(self.scale.items()):
|
||||
args.extend(["--scale", f"{key}={value}"])
|
||||
if self.wait:
|
||||
args.append("--wait")
|
||||
if self.wait_timeout is not None:
|
||||
args.extend(["--wait-timeout", str(self.wait_timeout)])
|
||||
if no_start:
|
||||
args.append("--no-start")
|
||||
if dry_run:
|
||||
args.append("--dry-run")
|
||||
if self.yes:
|
||||
# Note that for Docker Compose 2.32.x and 2.33.x, the long form is '--y' and not '--yes'.
|
||||
# This was fixed in Docker Compose 2.34.0 (https://github.com/docker/compose/releases/tag/v2.34.0).
|
||||
args.append(
|
||||
"-y" if self.compose_version < LooseVersion("2.34.0") else "--yes"
|
||||
)
|
||||
args.append("--")
|
||||
for service in self.services:
|
||||
args.append(service)
|
||||
return args
|
||||
|
||||
def cmd_up(self) -> dict[str, t.Any]:
|
||||
result: dict[str, t.Any] = {}
|
||||
args = self.get_up_cmd(self.check_mode)
|
||||
rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src)
|
||||
events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0)
|
||||
self.emit_warnings(events)
|
||||
self.update_result(
|
||||
result,
|
||||
events,
|
||||
stdout,
|
||||
stderr,
|
||||
ignore_service_pull_events=True,
|
||||
ignore_build_events=self.ignore_build_events,
|
||||
)
|
||||
self.update_failed(result, events, args, stdout, stderr, rc)
|
||||
return result
|
||||
|
||||
def get_stop_cmd(self, dry_run: bool) -> list[str]:
|
||||
args = self.get_base_args() + ["stop"]
|
||||
if self.timeout is not None:
|
||||
args.extend(["--timeout", f"{self.timeout}"])
|
||||
if dry_run:
|
||||
args.append("--dry-run")
|
||||
args.append("--")
|
||||
for service in self.services:
|
||||
args.append(service)
|
||||
return args
|
||||
|
||||
def _are_containers_stopped(self) -> bool:
|
||||
return all(
|
||||
container["State"] in ("created", "exited", "stopped", "killed")
|
||||
for container in self.list_containers_raw()
|
||||
)
|
||||
|
||||
def cmd_stop(self) -> dict[str, t.Any]:
|
||||
# Since 'docker compose stop' **always** claims it is stopping containers, even if they are already
|
||||
# stopped, we have to do this a bit more complicated.
|
||||
|
||||
result: dict[str, t.Any] = {}
|
||||
# Make sure all containers are created
|
||||
args_1 = self.get_up_cmd(self.check_mode, no_start=True)
|
||||
rc_1, stdout_1, stderr_1 = self.client.call_cli(*args_1, cwd=self.project_src)
|
||||
events_1 = self.parse_events(
|
||||
stderr_1, dry_run=self.check_mode, nonzero_rc=rc_1 != 0
|
||||
)
|
||||
self.emit_warnings(events_1)
|
||||
self.update_result(
|
||||
result,
|
||||
events_1,
|
||||
stdout_1,
|
||||
stderr_1,
|
||||
ignore_service_pull_events=True,
|
||||
ignore_build_events=self.ignore_build_events,
|
||||
)
|
||||
is_failed_1 = is_failed(events_1, rc_1)
|
||||
if not is_failed_1 and not self._are_containers_stopped():
|
||||
# Make sure all containers are stopped
|
||||
args_2 = self.get_stop_cmd(self.check_mode)
|
||||
rc_2, stdout_2, stderr_2 = self.client.call_cli(
|
||||
*args_2, cwd=self.project_src
|
||||
)
|
||||
events_2 = self.parse_events(
|
||||
stderr_2, dry_run=self.check_mode, nonzero_rc=rc_2 != 0
|
||||
)
|
||||
self.emit_warnings(events_2)
|
||||
self.update_result(result, events_2, stdout_2, stderr_2)
|
||||
else:
|
||||
args_2 = []
|
||||
rc_2, stdout_2, stderr_2 = 0, b"", b""
|
||||
events_2 = []
|
||||
# Compose result
|
||||
self.update_failed(
|
||||
result,
|
||||
events_1 + events_2,
|
||||
args_1 if is_failed_1 else args_2,
|
||||
stdout_1 if is_failed_1 else stdout_2,
|
||||
stderr_1 if is_failed_1 else stderr_2,
|
||||
rc_1 if is_failed_1 else rc_2,
|
||||
)
|
||||
return result
|
||||
|
||||
def get_restart_cmd(self, dry_run: bool) -> list[str]:
|
||||
args = self.get_base_args() + ["restart"]
|
||||
if not self.dependencies:
|
||||
args.append("--no-deps")
|
||||
if self.timeout is not None:
|
||||
args.extend(["--timeout", f"{self.timeout}"])
|
||||
if dry_run:
|
||||
args.append("--dry-run")
|
||||
args.append("--")
|
||||
for service in self.services:
|
||||
args.append(service)
|
||||
return args
|
||||
|
||||
def cmd_restart(self) -> dict[str, t.Any]:
|
||||
result: dict[str, t.Any] = {}
|
||||
args = self.get_restart_cmd(self.check_mode)
|
||||
rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src)
|
||||
events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0)
|
||||
self.emit_warnings(events)
|
||||
self.update_result(result, events, stdout, stderr)
|
||||
self.update_failed(result, events, args, stdout, stderr, rc)
|
||||
return result
|
||||
|
||||
def get_down_cmd(self, dry_run: bool) -> list[str]:
|
||||
args = self.get_base_args() + ["down"]
|
||||
if self.remove_orphans:
|
||||
args.append("--remove-orphans")
|
||||
if self.remove_images:
|
||||
args.extend(["--rmi", self.remove_images])
|
||||
if self.remove_volumes:
|
||||
args.append("--volumes")
|
||||
if self.timeout is not None:
|
||||
args.extend(["--timeout", f"{self.timeout}"])
|
||||
if dry_run:
|
||||
args.append("--dry-run")
|
||||
args.append("--")
|
||||
for service in self.services:
|
||||
args.append(service)
|
||||
return args
|
||||
|
||||
def cmd_down(self) -> dict[str, t.Any]:
|
||||
result: dict[str, t.Any] = {}
|
||||
args = self.get_down_cmd(self.check_mode)
|
||||
rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src)
|
||||
events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0)
|
||||
self.emit_warnings(events)
|
||||
self.update_result(result, events, stdout, stderr)
|
||||
self.update_failed(result, events, args, stdout, stderr, rc)
|
||||
return result
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"state": {
|
||||
"type": "str",
|
||||
"default": "present",
|
||||
"choices": ["absent", "present", "stopped", "restarted"],
|
||||
},
|
||||
"dependencies": {"type": "bool", "default": True},
|
||||
"pull": {
|
||||
"type": "str",
|
||||
"choices": ["always", "missing", "never", "policy"],
|
||||
"default": "policy",
|
||||
},
|
||||
"build": {
|
||||
"type": "str",
|
||||
"choices": ["always", "never", "policy"],
|
||||
"default": "policy",
|
||||
},
|
||||
"recreate": {
|
||||
"type": "str",
|
||||
"default": "auto",
|
||||
"choices": ["always", "never", "auto"],
|
||||
},
|
||||
"renew_anon_volumes": {"type": "bool", "default": False},
|
||||
"remove_images": {"type": "str", "choices": ["all", "local"]},
|
||||
"remove_volumes": {"type": "bool", "default": False},
|
||||
"remove_orphans": {"type": "bool", "default": False},
|
||||
"timeout": {"type": "int"},
|
||||
"services": {"type": "list", "elements": "str"},
|
||||
"scale": {"type": "dict"},
|
||||
"wait": {"type": "bool", "default": False},
|
||||
"wait_timeout": {"type": "int"},
|
||||
"ignore_build_events": {"type": "bool", "default": True},
|
||||
"assume_yes": {"type": "bool", "default": False},
|
||||
}
|
||||
argspec_ex = common_compose_argspec_ex()
|
||||
argument_spec.update(argspec_ex.pop("argspec"))
|
||||
|
||||
client = AnsibleModuleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
needs_api_version=False,
|
||||
**argspec_ex,
|
||||
)
|
||||
|
||||
try:
|
||||
manager = ServicesManager(client)
|
||||
result = manager.run()
|
||||
manager.cleanup()
|
||||
client.module.exit_json(**result)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,308 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_compose_v2_exec
|
||||
|
||||
short_description: Run command in a container of a Compose service
|
||||
|
||||
version_added: 3.13.0
|
||||
|
||||
description:
|
||||
- Uses Docker Compose to run a command in a service's container.
|
||||
- This can be used to run one-off commands in an existing service's container, and encapsulates C(docker compose exec).
|
||||
extends_documentation_fragment:
|
||||
- community.docker._compose_v2
|
||||
- community.docker._compose_v2.minimum_version
|
||||
- community.docker._docker.cli_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: N/A
|
||||
details:
|
||||
- Whether the executed command is idempotent depends on the command.
|
||||
|
||||
options:
|
||||
service:
|
||||
description:
|
||||
- The service to run the command in.
|
||||
type: str
|
||||
required: true
|
||||
index:
|
||||
description:
|
||||
- The index of the container to run the command in if the service has multiple replicas.
|
||||
type: int
|
||||
argv:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- The command to execute.
|
||||
- Since this is a list of arguments, no quoting is needed.
|
||||
- Exactly one of O(argv) or O(command) must be specified.
|
||||
command:
|
||||
type: str
|
||||
description:
|
||||
- The command to execute.
|
||||
- Exactly one of O(argv) or O(command) must be specified.
|
||||
chdir:
|
||||
type: str
|
||||
description:
|
||||
- The directory to run the command in.
|
||||
detach:
|
||||
description:
|
||||
- Whether to run the command synchronously (O(detach=false), default) or asynchronously (O(detach=true)).
|
||||
- If set to V(true), O(stdin) cannot be provided, and the return values RV(stdout), RV(stderr), and RV(rc) are not returned.
|
||||
type: bool
|
||||
default: false
|
||||
user:
|
||||
type: str
|
||||
description:
|
||||
- If specified, the user to execute this command with.
|
||||
stdin:
|
||||
type: str
|
||||
description:
|
||||
- Set the stdin of the command directly to the specified value.
|
||||
- Can only be used if O(detach=false).
|
||||
stdin_add_newline:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- If set to V(true), appends a newline to O(stdin).
|
||||
strip_empty_ends:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- Strip empty lines from the end of stdout/stderr in result.
|
||||
privileged:
|
||||
type: bool
|
||||
default: false
|
||||
description:
|
||||
- Whether to give extended privileges to the process.
|
||||
tty:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- Whether to allocate a TTY.
|
||||
env:
|
||||
description:
|
||||
- Dictionary of environment variables with their respective values to be passed to the command ran inside the container.
|
||||
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example V("true"))
|
||||
in order to avoid data loss.
|
||||
- Please note that if you are passing values in with Jinja2 templates, like V("{{ value }}"), you need to add V(| string)
|
||||
to prevent Ansible to convert strings such as V("true") back to booleans. The correct way is to use V("{{ value |
|
||||
string }}").
|
||||
type: dict
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_compose_v2
|
||||
|
||||
notes:
|
||||
- If you need to evaluate environment variables of the container in O(command) or O(argv), you need to pass the command
|
||||
through a shell, like O(command=/bin/sh -c "echo $ENV_VARIABLE"). The same needs to be done in case you want to use glob patterns
|
||||
or other shell features such as redirects.
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Run a simple command (command)
|
||||
community.docker.docker_compose_v2_exec:
|
||||
service: foo
|
||||
command: /bin/bash -c "ls -lah"
|
||||
chdir: /root
|
||||
register: result
|
||||
|
||||
- name: Print stdout
|
||||
ansible.builtin.debug:
|
||||
var: result.stdout
|
||||
|
||||
- name: Run a simple command (argv)
|
||||
community.docker.docker_compose_v2_exec:
|
||||
service: foo
|
||||
argv:
|
||||
- /bin/bash
|
||||
- "-c"
|
||||
- "ls -lah > /dev/stderr"
|
||||
chdir: /root
|
||||
register: result
|
||||
|
||||
- name: Print stderr lines
|
||||
ansible.builtin.debug:
|
||||
var: result.stderr_lines
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
stdout:
|
||||
type: str
|
||||
returned: success and O(detach=false)
|
||||
description:
|
||||
- The standard output of the container command.
|
||||
stderr:
|
||||
type: str
|
||||
returned: success and O(detach=false)
|
||||
description:
|
||||
- The standard error output of the container command.
|
||||
rc:
|
||||
type: int
|
||||
returned: success and O(detach=false)
|
||||
sample: 0
|
||||
description:
|
||||
- The exit code of the command.
|
||||
"""
|
||||
|
||||
import shlex
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_cli import (
|
||||
AnsibleModuleDockerClient,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._compose_v2 import (
|
||||
BaseComposeManager,
|
||||
common_compose_argspec_ex,
|
||||
)
|
||||
|
||||
|
||||
class ExecManager(BaseComposeManager):
|
||||
def __init__(self, client: AnsibleModuleDockerClient) -> None:
|
||||
super().__init__(client)
|
||||
parameters = self.client.module.params
|
||||
|
||||
self.service: str = parameters["service"]
|
||||
self.index: int | None = parameters["index"]
|
||||
self.chdir: str | None = parameters["chdir"]
|
||||
self.detach: bool = parameters["detach"]
|
||||
self.user: str | None = parameters["user"]
|
||||
self.stdin: str | None = parameters["stdin"]
|
||||
self.strip_empty_ends: bool = parameters["strip_empty_ends"]
|
||||
self.privileged: bool = parameters["privileged"]
|
||||
self.tty: bool = parameters["tty"]
|
||||
self.env: dict[str, t.Any] = parameters["env"]
|
||||
|
||||
self.argv: list[str]
|
||||
if parameters["command"] is not None:
|
||||
self.argv = shlex.split(parameters["command"])
|
||||
else:
|
||||
self.argv = parameters["argv"]
|
||||
|
||||
if self.detach and self.stdin is not None:
|
||||
self.fail("If detach=true, stdin cannot be provided.")
|
||||
|
||||
stdin_add_newline: bool = parameters["stdin_add_newline"]
|
||||
if self.stdin is not None and stdin_add_newline:
|
||||
self.stdin += "\n"
|
||||
|
||||
if self.env is not None:
|
||||
for name, value in self.env.items():
|
||||
if not isinstance(value, str):
|
||||
self.fail(
|
||||
"Non-string value found for env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"Key: {name}"
|
||||
)
|
||||
|
||||
def get_exec_cmd(self, dry_run: bool) -> list[str]:
|
||||
args = self.get_base_args(plain_progress=True) + ["exec"]
|
||||
if self.index is not None:
|
||||
args.extend(["--index", str(self.index)])
|
||||
if self.chdir is not None:
|
||||
args.extend(["--workdir", self.chdir])
|
||||
if self.detach:
|
||||
args.extend(["--detach"])
|
||||
if self.user is not None:
|
||||
args.extend(["--user", self.user])
|
||||
if self.privileged:
|
||||
args.append("--privileged")
|
||||
if not self.tty:
|
||||
args.append("--no-TTY")
|
||||
if self.env:
|
||||
for name, value in list(self.env.items()):
|
||||
args.append("--env")
|
||||
args.append(f"{name}={value}")
|
||||
args.append("--")
|
||||
args.append(self.service)
|
||||
args.extend(self.argv)
|
||||
return args
|
||||
|
||||
def run(self) -> dict[str, t.Any]:
|
||||
args = self.get_exec_cmd(self.check_mode)
|
||||
kwargs: dict[str, t.Any] = {
|
||||
"cwd": self.project_src,
|
||||
}
|
||||
if self.stdin is not None:
|
||||
kwargs["data"] = self.stdin.encode("utf-8")
|
||||
if self.detach:
|
||||
kwargs["check_rc"] = True
|
||||
rc, stdout_b, stderr_b = self.client.call_cli(*args, **kwargs)
|
||||
if self.detach:
|
||||
return {}
|
||||
stdout = to_text(stdout_b)
|
||||
stderr = to_text(stderr_b)
|
||||
if self.strip_empty_ends:
|
||||
stdout = stdout.rstrip("\r\n")
|
||||
stderr = stderr.rstrip("\r\n")
|
||||
return {
|
||||
"changed": True,
|
||||
"rc": rc,
|
||||
"stdout": stdout,
|
||||
"stderr": stderr,
|
||||
}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"service": {"type": "str", "required": True},
|
||||
"index": {"type": "int"},
|
||||
"argv": {"type": "list", "elements": "str"},
|
||||
"command": {"type": "str"},
|
||||
"chdir": {"type": "str"},
|
||||
"detach": {"type": "bool", "default": False},
|
||||
"user": {"type": "str"},
|
||||
"stdin": {"type": "str"},
|
||||
"stdin_add_newline": {"type": "bool", "default": True},
|
||||
"strip_empty_ends": {"type": "bool", "default": True},
|
||||
"privileged": {"type": "bool", "default": False},
|
||||
"tty": {"type": "bool", "default": True},
|
||||
"env": {"type": "dict"},
|
||||
}
|
||||
argspec_ex = common_compose_argspec_ex()
|
||||
argument_spec.update(argspec_ex.pop("argspec"))
|
||||
|
||||
client = AnsibleModuleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=False,
|
||||
needs_api_version=False,
|
||||
**argspec_ex,
|
||||
)
|
||||
|
||||
try:
|
||||
manager = ExecManager(client)
|
||||
result = manager.run()
|
||||
manager.cleanup()
|
||||
client.module.exit_json(**result)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,216 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_compose_v2_pull
|
||||
|
||||
short_description: Pull a Docker compose project
|
||||
|
||||
version_added: 3.6.0
|
||||
|
||||
description:
|
||||
- Uses Docker Compose to pull images for a project.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._compose_v2
|
||||
- community.docker._compose_v2.minimum_version
|
||||
- community.docker._docker.cli_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- If O(policy=always), the module will always indicate a change. Docker Compose does not give any information whether
|
||||
pulling would update the image or not.
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
policy:
|
||||
description:
|
||||
- Whether to pull images before running. This is used when C(docker compose up) is ran.
|
||||
- V(always) ensures that the images are always pulled, even when already present on the Docker daemon.
|
||||
- V(missing) only pulls them when they are not present on the Docker daemon. This is only supported since Docker Compose
|
||||
2.22.0.
|
||||
type: str
|
||||
choices:
|
||||
- always
|
||||
- missing
|
||||
default: always
|
||||
ignore_buildable:
|
||||
description:
|
||||
- If set to V(true), will not pull images that can be built.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.12.0
|
||||
include_deps:
|
||||
description:
|
||||
- If set to V(true), also pull services that are declared as dependencies.
|
||||
- This only makes sense if O(services) is used.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.12.0
|
||||
services:
|
||||
description:
|
||||
- Specifies a subset of services to be targeted.
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 3.12.0
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_compose_v2
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Pull images for flask project
|
||||
community.docker.docker_compose_v2_pull:
|
||||
project_src: /path/to/flask
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
actions:
|
||||
description:
|
||||
- A list of actions that have been applied.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
contains:
|
||||
what:
|
||||
description:
|
||||
- What kind of resource was changed.
|
||||
type: str
|
||||
sample: container
|
||||
choices:
|
||||
- image
|
||||
- unknown
|
||||
id:
|
||||
description:
|
||||
- The ID of the resource that was changed.
|
||||
type: str
|
||||
sample: container
|
||||
status:
|
||||
description:
|
||||
- The status change that happened.
|
||||
type: str
|
||||
sample: Pulling
|
||||
choices:
|
||||
- Pulling
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_cli import (
|
||||
AnsibleModuleDockerClient,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._compose_v2 import (
|
||||
BaseComposeManager,
|
||||
common_compose_argspec_ex,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
|
||||
class PullManager(BaseComposeManager):
|
||||
def __init__(self, client: AnsibleModuleDockerClient) -> None:
|
||||
super().__init__(client)
|
||||
parameters = self.client.module.params
|
||||
|
||||
self.policy: t.Literal["always", "missing"] = parameters["policy"]
|
||||
self.ignore_buildable: bool = parameters["ignore_buildable"]
|
||||
self.include_deps: bool = parameters["include_deps"]
|
||||
self.services: list[str] = parameters["services"] or []
|
||||
|
||||
if self.policy != "always" and self.compose_version < LooseVersion("2.22.0"):
|
||||
# https://github.com/docker/compose/pull/10981 - 2.22.0
|
||||
self.fail(
|
||||
f"A pull policy other than always is only supported since Docker Compose 2.22.0. {self.client.get_cli()} has version {self.compose_version}"
|
||||
)
|
||||
if self.ignore_buildable and self.compose_version < LooseVersion("2.15.0"):
|
||||
# https://github.com/docker/compose/pull/10134 - 2.15.0
|
||||
self.fail(
|
||||
f"--ignore-buildable is only supported since Docker Compose 2.15.0. {self.client.get_cli()} has version {self.compose_version}"
|
||||
)
|
||||
|
||||
def get_pull_cmd(self, dry_run: bool) -> list[str]:
|
||||
args = self.get_base_args() + ["pull"]
|
||||
if self.policy != "always":
|
||||
args.extend(["--policy", self.policy])
|
||||
if self.ignore_buildable:
|
||||
args.append("--ignore-buildable")
|
||||
if self.include_deps:
|
||||
args.append("--include-deps")
|
||||
if dry_run:
|
||||
args.append("--dry-run")
|
||||
args.append("--")
|
||||
for service in self.services:
|
||||
args.append(service)
|
||||
return args
|
||||
|
||||
def run(self) -> dict[str, t.Any]:
|
||||
result: dict[str, t.Any] = {}
|
||||
args = self.get_pull_cmd(self.check_mode)
|
||||
rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src)
|
||||
events = self.parse_events(stderr, dry_run=self.check_mode, nonzero_rc=rc != 0)
|
||||
self.emit_warnings(events)
|
||||
self.update_result(
|
||||
result,
|
||||
events,
|
||||
stdout,
|
||||
stderr,
|
||||
ignore_service_pull_events=self.policy != "missing" and not self.check_mode,
|
||||
)
|
||||
self.update_failed(result, events, args, stdout, stderr, rc)
|
||||
self.cleanup_result(result)
|
||||
return result
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"policy": {
|
||||
"type": "str",
|
||||
"choices": ["always", "missing"],
|
||||
"default": "always",
|
||||
},
|
||||
"ignore_buildable": {"type": "bool", "default": False},
|
||||
"include_deps": {"type": "bool", "default": False},
|
||||
"services": {"type": "list", "elements": "str"},
|
||||
}
|
||||
argspec_ex = common_compose_argspec_ex()
|
||||
argument_spec.update(argspec_ex.pop("argspec"))
|
||||
|
||||
client = AnsibleModuleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
needs_api_version=False,
|
||||
**argspec_ex,
|
||||
)
|
||||
|
||||
try:
|
||||
manager = PullManager(client)
|
||||
result = manager.run()
|
||||
manager.cleanup()
|
||||
client.module.exit_json(**result)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,441 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_compose_v2_run
|
||||
|
||||
short_description: Run command in a new container of a Compose service
|
||||
|
||||
version_added: 3.13.0
|
||||
|
||||
description:
|
||||
- Uses Docker Compose to run a command in a new container for a service.
|
||||
- This encapsulates C(docker compose run).
|
||||
extends_documentation_fragment:
|
||||
- community.docker._compose_v2
|
||||
- community.docker._compose_v2.minimum_version
|
||||
- community.docker._docker.cli_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: N/A
|
||||
details:
|
||||
- Whether the executed command is idempotent depends on the command.
|
||||
|
||||
options:
|
||||
service:
|
||||
description:
|
||||
- The service to run the command in.
|
||||
type: str
|
||||
required: true
|
||||
argv:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- The command to execute.
|
||||
- Since this is a list of arguments, no quoting is needed.
|
||||
- O(argv) or O(command) are mutually exclusive.
|
||||
command:
|
||||
type: str
|
||||
description:
|
||||
- The command to execute.
|
||||
- O(argv) or O(command) are mutually exclusive.
|
||||
build:
|
||||
description:
|
||||
- Build image before starting container.
|
||||
- Note that building can insert information into RV(stdout) or RV(stderr).
|
||||
type: bool
|
||||
default: false
|
||||
cap_add:
|
||||
description:
|
||||
- Linux capabilities to add to the container.
|
||||
type: list
|
||||
elements: str
|
||||
cap_drop:
|
||||
description:
|
||||
- Linux capabilities to drop from the container.
|
||||
type: list
|
||||
elements: str
|
||||
entrypoint:
|
||||
description:
|
||||
- Override the entrypoint of the container image.
|
||||
type: str
|
||||
interactive:
|
||||
description:
|
||||
- Whether to keep STDIN open even if not attached.
|
||||
type: bool
|
||||
default: true
|
||||
labels:
|
||||
description:
|
||||
- Add or override labels to the container.
|
||||
type: list
|
||||
elements: str
|
||||
name:
|
||||
description:
|
||||
- Assign a name to the container.
|
||||
type: str
|
||||
no_deps:
|
||||
description:
|
||||
- Do not start linked services.
|
||||
type: bool
|
||||
default: false
|
||||
publish:
|
||||
description:
|
||||
- Publish a container's port(s) to the host.
|
||||
type: list
|
||||
elements: str
|
||||
quiet_pull:
|
||||
description:
|
||||
- Pull without printing progress information.
|
||||
- Note that pulling can insert information into RV(stdout) or RV(stderr).
|
||||
type: bool
|
||||
default: false
|
||||
remove_orphans:
|
||||
description:
|
||||
- Remove containers for services not defined in the Compose file.
|
||||
type: bool
|
||||
default: false
|
||||
cleanup:
|
||||
description:
|
||||
- Automatically remove th econtainer when it exits.
|
||||
- Corresponds to the C(--rm) option of C(docker compose run).
|
||||
type: bool
|
||||
default: false
|
||||
service_ports:
|
||||
description:
|
||||
- Run command with all service's ports enabled and mapped to the host.
|
||||
type: bool
|
||||
default: false
|
||||
use_aliases:
|
||||
description:
|
||||
- Use the service's network C(useAliases) in the network(s) the container connects to.
|
||||
type: bool
|
||||
default: false
|
||||
volumes:
|
||||
description:
|
||||
- Bind mount one or more volumes.
|
||||
type: list
|
||||
elements: str
|
||||
chdir:
|
||||
type: str
|
||||
description:
|
||||
- The directory to run the command in.
|
||||
detach:
|
||||
description:
|
||||
- Whether to run the command synchronously (O(detach=false), default) or asynchronously (O(detach=true)).
|
||||
- If set to V(true), O(stdin) cannot be provided, and the return values RV(stdout), RV(stderr), and RV(rc) are not returned.
|
||||
Instead, the return value RV(container_id) is provided.
|
||||
type: bool
|
||||
default: false
|
||||
user:
|
||||
type: str
|
||||
description:
|
||||
- If specified, the user to execute this command with.
|
||||
stdin:
|
||||
type: str
|
||||
description:
|
||||
- Set the stdin of the command directly to the specified value.
|
||||
- Can only be used if O(detach=false).
|
||||
stdin_add_newline:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- If set to V(true), appends a newline to O(stdin).
|
||||
strip_empty_ends:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- Strip empty lines from the end of stdout/stderr in result.
|
||||
tty:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- Whether to allocate a TTY.
|
||||
env:
|
||||
description:
|
||||
- Dictionary of environment variables with their respective values to be passed to the command ran inside the container.
|
||||
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example V("true"))
|
||||
in order to avoid data loss.
|
||||
- Please note that if you are passing values in with Jinja2 templates, like V("{{ value }}"), you need to add V(| string)
|
||||
to prevent Ansible to convert strings such as V("true") back to booleans. The correct way is to use V("{{ value |
|
||||
string }}").
|
||||
type: dict
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_compose_v2
|
||||
|
||||
notes:
|
||||
- If you need to evaluate environment variables of the container in O(command) or O(argv), you need to pass the command
|
||||
through a shell, like O(command=/bin/sh -c "echo $ENV_VARIABLE"). The same needs to be done in case you want to use glob patterns
|
||||
or other shell features such as redirects.
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Run a simple command (command)
|
||||
community.docker.docker_compose_v2_run:
|
||||
service: foo
|
||||
command: /bin/bash -c "ls -lah"
|
||||
chdir: /root
|
||||
register: result
|
||||
|
||||
- name: Print stdout
|
||||
ansible.builtin.debug:
|
||||
var: result.stdout
|
||||
|
||||
- name: Run a simple command (argv)
|
||||
community.docker.docker_compose_v2_run:
|
||||
service: foo
|
||||
argv:
|
||||
- /bin/bash
|
||||
- "-c"
|
||||
- "ls -lah > /dev/stderr"
|
||||
chdir: /root
|
||||
register: result
|
||||
|
||||
- name: Print stderr lines
|
||||
ansible.builtin.debug:
|
||||
var: result.stderr_lines
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
container_id:
|
||||
type: str
|
||||
returned: success and O(detach=true)
|
||||
description:
|
||||
- The ID of the created container.
|
||||
stdout:
|
||||
type: str
|
||||
returned: success and O(detach=false)
|
||||
description:
|
||||
- The standard output of the container command.
|
||||
stderr:
|
||||
type: str
|
||||
returned: success and O(detach=false)
|
||||
description:
|
||||
- The standard error output of the container command.
|
||||
rc:
|
||||
type: int
|
||||
returned: success and O(detach=false)
|
||||
sample: 0
|
||||
description:
|
||||
- The exit code of the command.
|
||||
"""
|
||||
|
||||
import shlex
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_cli import (
|
||||
AnsibleModuleDockerClient,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._compose_v2 import (
|
||||
BaseComposeManager,
|
||||
common_compose_argspec_ex,
|
||||
)
|
||||
|
||||
|
||||
class ExecManager(BaseComposeManager):
|
||||
def __init__(self, client: AnsibleModuleDockerClient) -> None:
|
||||
super().__init__(client)
|
||||
parameters = self.client.module.params
|
||||
|
||||
self.service: str = parameters["service"]
|
||||
self.build: bool = parameters["build"]
|
||||
self.cap_add: list[str] | None = parameters["cap_add"]
|
||||
self.cap_drop: list[str] | None = parameters["cap_drop"]
|
||||
self.entrypoint: str | None = parameters["entrypoint"]
|
||||
self.interactive: bool = parameters["interactive"]
|
||||
self.labels: list[str] | None = parameters["labels"]
|
||||
self.name: str | None = parameters["name"]
|
||||
self.no_deps: bool = parameters["no_deps"]
|
||||
self.publish: list[str] | None = parameters["publish"]
|
||||
self.quiet_pull: bool = parameters["quiet_pull"]
|
||||
self.remove_orphans: bool = parameters["remove_orphans"]
|
||||
self.do_cleanup: bool = parameters["cleanup"]
|
||||
self.service_ports: bool = parameters["service_ports"]
|
||||
self.use_aliases: bool = parameters["use_aliases"]
|
||||
self.volumes: list[str] | None = parameters["volumes"]
|
||||
self.chdir: str | None = parameters["chdir"]
|
||||
self.detach: bool = parameters["detach"]
|
||||
self.user: str | None = parameters["user"]
|
||||
self.stdin: str | None = parameters["stdin"]
|
||||
self.strip_empty_ends: bool = parameters["strip_empty_ends"]
|
||||
self.tty: bool = parameters["tty"]
|
||||
self.env: dict[str, t.Any] | None = parameters["env"]
|
||||
|
||||
self.argv: list[str]
|
||||
if parameters["command"] is not None:
|
||||
self.argv = shlex.split(parameters["command"])
|
||||
else:
|
||||
self.argv = parameters["argv"]
|
||||
|
||||
if self.detach and self.stdin is not None:
|
||||
self.fail("If detach=true, stdin cannot be provided.")
|
||||
|
||||
stdin_add_newline: bool = parameters["stdin_add_newline"]
|
||||
if self.stdin is not None and stdin_add_newline:
|
||||
self.stdin += "\n"
|
||||
|
||||
if self.env is not None:
|
||||
for name, value in self.env.items():
|
||||
if not isinstance(value, str):
|
||||
self.fail(
|
||||
"Non-string value found for env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"Key: {name}"
|
||||
)
|
||||
|
||||
def get_run_cmd(self, dry_run: bool) -> list[str]:
|
||||
args = self.get_base_args(plain_progress=True) + ["run"]
|
||||
if self.build:
|
||||
args.append("--build")
|
||||
if self.cap_add:
|
||||
for cap in self.cap_add:
|
||||
args.extend(["--cap-add", cap])
|
||||
if self.cap_drop:
|
||||
for cap in self.cap_drop:
|
||||
args.extend(["--cap-drop", cap])
|
||||
if self.entrypoint is not None:
|
||||
args.extend(["--entrypoint", self.entrypoint])
|
||||
if not self.interactive:
|
||||
args.append("--no-interactive")
|
||||
if self.labels:
|
||||
for label in self.labels:
|
||||
args.extend(["--label", label])
|
||||
if self.name is not None:
|
||||
args.extend(["--name", self.name])
|
||||
if self.no_deps:
|
||||
args.append("--no-deps")
|
||||
if self.publish:
|
||||
for publish in self.publish:
|
||||
args.extend(["--publish", publish])
|
||||
if self.quiet_pull:
|
||||
args.append("--quiet-pull")
|
||||
if self.remove_orphans:
|
||||
args.append("--remove-orphans")
|
||||
if self.do_cleanup:
|
||||
args.append("--rm")
|
||||
if self.service_ports:
|
||||
args.append("--service-ports")
|
||||
if self.use_aliases:
|
||||
args.append("--use-aliases")
|
||||
if self.volumes:
|
||||
for volume in self.volumes:
|
||||
args.extend(["--volume", volume])
|
||||
if self.chdir is not None:
|
||||
args.extend(["--workdir", self.chdir])
|
||||
if self.detach:
|
||||
args.extend(["--detach"])
|
||||
if self.user is not None:
|
||||
args.extend(["--user", self.user])
|
||||
if not self.tty:
|
||||
args.append("--no-TTY")
|
||||
if self.env:
|
||||
for name, value in list(self.env.items()):
|
||||
args.append("--env")
|
||||
args.append(f"{name}={value}")
|
||||
args.append("--")
|
||||
args.append(self.service)
|
||||
if self.argv:
|
||||
args.extend(self.argv)
|
||||
return args
|
||||
|
||||
def run(self) -> dict[str, t.Any]:
|
||||
args = self.get_run_cmd(self.check_mode)
|
||||
kwargs: dict[str, t.Any] = {
|
||||
"cwd": self.project_src,
|
||||
}
|
||||
if self.stdin is not None:
|
||||
kwargs["data"] = self.stdin.encode("utf-8")
|
||||
if self.detach:
|
||||
kwargs["check_rc"] = True
|
||||
rc, stdout_b, stderr_b = self.client.call_cli(*args, **kwargs)
|
||||
if self.detach:
|
||||
return {
|
||||
"container_id": to_text(stdout_b.strip()),
|
||||
}
|
||||
stdout = to_text(stdout_b)
|
||||
stderr = to_text(stderr_b)
|
||||
if self.strip_empty_ends:
|
||||
stdout = stdout.rstrip("\r\n")
|
||||
stderr = stderr.rstrip("\r\n")
|
||||
return {
|
||||
"changed": True,
|
||||
"rc": rc,
|
||||
"stdout": stdout,
|
||||
"stderr": stderr,
|
||||
}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"service": {"type": "str", "required": True},
|
||||
"argv": {"type": "list", "elements": "str"},
|
||||
"command": {"type": "str"},
|
||||
"build": {"type": "bool", "default": False},
|
||||
"cap_add": {"type": "list", "elements": "str"},
|
||||
"cap_drop": {"type": "list", "elements": "str"},
|
||||
"entrypoint": {"type": "str"},
|
||||
"interactive": {"type": "bool", "default": True},
|
||||
"labels": {"type": "list", "elements": "str"},
|
||||
"name": {"type": "str"},
|
||||
"no_deps": {"type": "bool", "default": False},
|
||||
"publish": {"type": "list", "elements": "str"},
|
||||
"quiet_pull": {"type": "bool", "default": False},
|
||||
"remove_orphans": {"type": "bool", "default": False},
|
||||
"cleanup": {"type": "bool", "default": False},
|
||||
"service_ports": {"type": "bool", "default": False},
|
||||
"use_aliases": {"type": "bool", "default": False},
|
||||
"volumes": {"type": "list", "elements": "str"},
|
||||
"chdir": {"type": "str"},
|
||||
"detach": {"type": "bool", "default": False},
|
||||
"user": {"type": "str"},
|
||||
"stdin": {"type": "str"},
|
||||
"stdin_add_newline": {"type": "bool", "default": True},
|
||||
"strip_empty_ends": {"type": "bool", "default": True},
|
||||
"tty": {"type": "bool", "default": True},
|
||||
"env": {"type": "dict"},
|
||||
}
|
||||
argspec_ex = common_compose_argspec_ex()
|
||||
argument_spec.update(argspec_ex.pop("argspec"))
|
||||
|
||||
client = AnsibleModuleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=False,
|
||||
needs_api_version=False,
|
||||
**argspec_ex,
|
||||
)
|
||||
|
||||
try:
|
||||
manager = ExecManager(client)
|
||||
result = manager.run()
|
||||
manager.cleanup()
|
||||
client.module.exit_json(**result)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,447 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_config
|
||||
|
||||
short_description: Manage docker configs
|
||||
|
||||
description:
|
||||
- Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
|
||||
- Adds to the metadata of new configs C(ansible_key), an encrypted hash representation of the data, which is then used in
|
||||
future runs to test if a config has changed. If C(ansible_key) is not present, then a config will not be updated unless
|
||||
the O(force) option is set.
|
||||
- Updates to configs are performed by removing the config and creating it again.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker
|
||||
- community.docker._docker.docker_py_2_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: partial
|
||||
details:
|
||||
- If O(force=true) the module is not idempotent.
|
||||
|
||||
options:
|
||||
data:
|
||||
description:
|
||||
- The value of the config.
|
||||
- Mutually exclusive with O(data_src). One of O(data) and O(data_src) is required if O(state=present).
|
||||
type: str
|
||||
data_is_b64:
|
||||
description:
|
||||
- If set to V(true), the data is assumed to be Base64 encoded and will be decoded before being used.
|
||||
- To use binary O(data), it is better to keep it Base64 encoded and let it be decoded by this option.
|
||||
type: bool
|
||||
default: false
|
||||
data_src:
|
||||
description:
|
||||
- The file on the target from which to read the config.
|
||||
- Mutually exclusive with O(data). One of O(data) and O(data_src) is required if O(state=present).
|
||||
type: path
|
||||
version_added: 1.10.0
|
||||
labels:
|
||||
description:
|
||||
- A map of key:value meta data, where both the C(key) and C(value) are expected to be a string.
|
||||
- If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating
|
||||
it again.
|
||||
type: dict
|
||||
force:
|
||||
description:
|
||||
- Use with O(state=present) to always remove and recreate an existing config.
|
||||
- If V(true), an existing config will be replaced, even if it has not been changed.
|
||||
type: bool
|
||||
default: false
|
||||
rolling_versions:
|
||||
description:
|
||||
- If set to V(true), configs are created with an increasing version number appended to their name.
|
||||
- Adds a label containing the version number to the managed configs with the name C(ansible_version).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.2.0
|
||||
versions_to_keep:
|
||||
description:
|
||||
- When using O(rolling_versions), the number of old versions of the config to keep.
|
||||
- Extraneous old configs are deleted after the new one is created.
|
||||
- Set to V(-1) to keep everything or V(0) or V(1) to keep only the current one.
|
||||
type: int
|
||||
default: 5
|
||||
version_added: 2.2.0
|
||||
name:
|
||||
description:
|
||||
- The name of the config.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Set to V(present), if the config should exist, and V(absent), if it should not.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
template_driver:
|
||||
description:
|
||||
- Set to V(golang) to use a Go template in O(data) or a Go template file in O(data_src).
|
||||
type: str
|
||||
choices:
|
||||
- golang
|
||||
version_added: 2.5.0
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
|
||||
- "Docker API >= 1.30"
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
- John Hu (@ushuz)
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Create config foo (from a file on the control machine)
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
# If the file is JSON or binary, Ansible might modify it (because
|
||||
# it is first decoded and later re-encoded). Base64-encoding the
|
||||
# file directly after reading it prevents this to happen.
|
||||
data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
|
||||
data_is_b64: true
|
||||
state: present
|
||||
|
||||
- name: Create config foo (from a file on the target machine)
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data_src: /path/to/config/file
|
||||
state: present
|
||||
|
||||
- name: Change the config data
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Add a new label
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Adding a new label will cause a remove/create of the config
|
||||
two: '2'
|
||||
state: present
|
||||
|
||||
- name: No change
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Even though 'two' is missing, there is no change to the existing config
|
||||
state: present
|
||||
|
||||
- name: Update an existing label
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: monkey # Changing a label will cause a remove/create of the config
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Force the (re-)creation of the config
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
force: true
|
||||
state: present
|
||||
|
||||
- name: Remove config foo
|
||||
community.docker.docker_config:
|
||||
name: foo
|
||||
state: absent
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
config_id:
|
||||
description:
|
||||
- The ID assigned by Docker to the config object.
|
||||
returned: success and O(state=present)
|
||||
type: str
|
||||
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
|
||||
config_name:
|
||||
description:
|
||||
- The name of the created config object.
|
||||
returned: success and O(state=present)
|
||||
type: str
|
||||
sample: 'awesome_config'
|
||||
version_added: 2.2.0
|
||||
"""
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
compare_generic,
|
||||
sanitize_labels,
|
||||
)
|
||||
|
||||
|
||||
class ConfigManager(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient, results: dict[str, t.Any]) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
parameters = self.client.module.params
|
||||
self.name = parameters.get("name")
|
||||
self.state = parameters.get("state")
|
||||
self.data = parameters.get("data")
|
||||
if self.data is not None:
|
||||
if parameters.get("data_is_b64"):
|
||||
self.data = base64.b64decode(self.data)
|
||||
else:
|
||||
self.data = to_bytes(self.data)
|
||||
data_src = parameters.get("data_src")
|
||||
if data_src is not None:
|
||||
try:
|
||||
with open(data_src, "rb") as f:
|
||||
self.data = f.read()
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.client.fail(f"Error while reading {data_src}: {exc}")
|
||||
self.labels = parameters.get("labels")
|
||||
self.force = parameters.get("force")
|
||||
self.rolling_versions = parameters.get("rolling_versions")
|
||||
self.versions_to_keep = parameters.get("versions_to_keep")
|
||||
self.template_driver = parameters.get("template_driver")
|
||||
|
||||
if self.rolling_versions:
|
||||
self.version = 0
|
||||
self.data_key: str | None = None
|
||||
self.configs: list[dict[str, t.Any]] = []
|
||||
|
||||
def __call__(self) -> None:
|
||||
self.get_config()
|
||||
if self.state == "present":
|
||||
self.data_key = hashlib.sha224(self.data).hexdigest()
|
||||
self.present()
|
||||
self.remove_old_versions()
|
||||
elif self.state == "absent":
|
||||
self.absent()
|
||||
|
||||
def get_version(self, config: dict[str, t.Any]) -> int:
|
||||
try:
|
||||
return int(
|
||||
config.get("Spec", {}).get("Labels", {}).get("ansible_version", 0)
|
||||
)
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
def remove_old_versions(self) -> None:
|
||||
if not self.rolling_versions or self.versions_to_keep < 0:
|
||||
return
|
||||
if not self.check_mode:
|
||||
while len(self.configs) > max(self.versions_to_keep, 1):
|
||||
self.remove_config(self.configs.pop(0))
|
||||
|
||||
def get_config(self) -> None:
|
||||
"""Find an existing config."""
|
||||
try:
|
||||
configs = self.client.configs(filters={"name": self.name})
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Error accessing config {self.name}: {exc}")
|
||||
|
||||
if self.rolling_versions:
|
||||
self.configs = [
|
||||
config
|
||||
for config in configs
|
||||
if config["Spec"]["Name"].startswith(f"{self.name}_v")
|
||||
]
|
||||
self.configs.sort(key=self.get_version)
|
||||
else:
|
||||
self.configs = [
|
||||
config for config in configs if config["Spec"]["Name"] == self.name
|
||||
]
|
||||
|
||||
def create_config(self) -> str | None:
|
||||
"""Create a new config"""
|
||||
config_id: str | dict[str, t.Any] | None = None
|
||||
# We ca not see the data after creation, so adding a label we can use for idempotency check
|
||||
labels = {"ansible_key": self.data_key}
|
||||
if self.rolling_versions:
|
||||
self.version += 1
|
||||
labels["ansible_version"] = str(self.version)
|
||||
self.name = f"{self.name}_v{self.version}"
|
||||
if self.labels:
|
||||
labels.update(self.labels)
|
||||
|
||||
try:
|
||||
if not self.check_mode:
|
||||
# only use templating argument when self.template_driver is defined
|
||||
kwargs = {}
|
||||
if self.template_driver:
|
||||
kwargs["templating"] = {"name": self.template_driver}
|
||||
config_id = self.client.create_config(
|
||||
self.name, self.data, labels=labels, **kwargs
|
||||
)
|
||||
self.configs += self.client.configs(filters={"id": config_id})
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Error creating config: {exc}")
|
||||
|
||||
if isinstance(config_id, dict):
|
||||
return config_id["ID"]
|
||||
|
||||
return config_id
|
||||
|
||||
def remove_config(self, config: dict[str, t.Any]) -> None:
|
||||
try:
|
||||
if not self.check_mode:
|
||||
self.client.remove_config(config["ID"])
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Error removing config {config['Spec']['Name']}: {exc}")
|
||||
|
||||
def present(self) -> None:
|
||||
"""Handles state == 'present', creating or updating the config"""
|
||||
if self.configs:
|
||||
config = self.configs[-1]
|
||||
self.results["config_id"] = config["ID"]
|
||||
self.results["config_name"] = config["Spec"]["Name"]
|
||||
data_changed = False
|
||||
template_driver_changed = False
|
||||
attrs = config.get("Spec", {})
|
||||
if attrs.get("Labels", {}).get("ansible_key"):
|
||||
if attrs["Labels"]["ansible_key"] != self.data_key:
|
||||
data_changed = True
|
||||
else:
|
||||
if not self.force:
|
||||
self.client.module.warn(
|
||||
"'ansible_key' label not found. Config will not be changed unless the force parameter is set to 'true'"
|
||||
)
|
||||
# template_driver has changed if it was set in the previous config
|
||||
# and now it differs, or if it was not set but now it is.
|
||||
if attrs.get("Templating", {}).get("Name"):
|
||||
if attrs["Templating"]["Name"] != self.template_driver:
|
||||
template_driver_changed = True
|
||||
elif self.template_driver:
|
||||
template_driver_changed = True
|
||||
labels_changed = not compare_generic(
|
||||
self.labels, attrs.get("Labels"), "allow_more_present", "dict"
|
||||
)
|
||||
if self.rolling_versions:
|
||||
self.version = self.get_version(config)
|
||||
if data_changed or template_driver_changed or labels_changed or self.force:
|
||||
# if something changed or force, delete and re-create the config
|
||||
if not self.rolling_versions:
|
||||
self.absent()
|
||||
config_id = self.create_config()
|
||||
self.results["changed"] = True
|
||||
self.results["config_id"] = config_id
|
||||
self.results["config_name"] = self.name
|
||||
else:
|
||||
self.results["changed"] = True
|
||||
self.results["config_id"] = self.create_config()
|
||||
self.results["config_name"] = self.name
|
||||
|
||||
def absent(self) -> None:
|
||||
"""Handles state == 'absent', removing the config"""
|
||||
if self.configs:
|
||||
for config in self.configs:
|
||||
self.remove_config(config)
|
||||
self.results["changed"] = True
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
"state": {
|
||||
"type": "str",
|
||||
"default": "present",
|
||||
"choices": ["absent", "present"],
|
||||
},
|
||||
"data": {"type": "str"},
|
||||
"data_is_b64": {"type": "bool", "default": False},
|
||||
"data_src": {"type": "path"},
|
||||
"labels": {"type": "dict"},
|
||||
"force": {"type": "bool", "default": False},
|
||||
"rolling_versions": {"type": "bool", "default": False},
|
||||
"versions_to_keep": {"type": "int", "default": 5},
|
||||
"template_driver": {"type": "str", "choices": ["golang"]},
|
||||
}
|
||||
|
||||
required_if = [
|
||||
("state", "present", ["data", "data_src"], True),
|
||||
]
|
||||
|
||||
mutually_exclusive = [
|
||||
("data", "data_src"),
|
||||
]
|
||||
|
||||
option_minimal_versions = {
|
||||
"template_driver": {"docker_py_version": "5.0.3", "docker_api_version": "1.37"},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
min_docker_version="2.6.0",
|
||||
min_docker_api_version="1.30",
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
sanitize_labels(client.module.params["labels"], "labels", client)
|
||||
|
||||
try:
|
||||
results = {
|
||||
"changed": False,
|
||||
}
|
||||
|
||||
ConfigManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,345 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_container_exec
|
||||
|
||||
short_description: Execute command in a docker container
|
||||
|
||||
version_added: 1.5.0
|
||||
|
||||
description:
|
||||
- Executes a command in a Docker container.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: N/A
|
||||
details:
|
||||
- Whether the executed command is idempotent depends on the command.
|
||||
|
||||
options:
|
||||
container:
|
||||
type: str
|
||||
required: true
|
||||
description:
|
||||
- The name of the container to execute the command in.
|
||||
argv:
|
||||
type: list
|
||||
elements: str
|
||||
description:
|
||||
- The command to execute.
|
||||
- Since this is a list of arguments, no quoting is needed.
|
||||
- Exactly one of O(argv) or O(command) must be specified.
|
||||
command:
|
||||
type: str
|
||||
description:
|
||||
- The command to execute.
|
||||
- Exactly one of O(argv) or O(command) must be specified.
|
||||
chdir:
|
||||
type: str
|
||||
description:
|
||||
- The directory to run the command in.
|
||||
detach:
|
||||
description:
|
||||
- Whether to run the command synchronously (O(detach=false), default) or asynchronously (O(detach=true)).
|
||||
- If set to V(true), O(stdin) cannot be provided, and the return values RV(stdout), RV(stderr), and RV(rc) are not returned.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.1.0
|
||||
user:
|
||||
type: str
|
||||
description:
|
||||
- If specified, the user to execute this command with.
|
||||
stdin:
|
||||
type: str
|
||||
description:
|
||||
- Set the stdin of the command directly to the specified value.
|
||||
- Can only be used if O(detach=false).
|
||||
stdin_add_newline:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- If set to V(true), appends a newline to O(stdin).
|
||||
strip_empty_ends:
|
||||
type: bool
|
||||
default: true
|
||||
description:
|
||||
- Strip empty lines from the end of stdout/stderr in result.
|
||||
tty:
|
||||
type: bool
|
||||
default: false
|
||||
description:
|
||||
- Whether to allocate a TTY.
|
||||
env:
|
||||
description:
|
||||
- Dictionary of environment variables with their respective values to be passed to the command ran inside the container.
|
||||
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example V("true"))
|
||||
in order to avoid data loss.
|
||||
- Please note that if you are passing values in with Jinja2 templates, like V("{{ value }}"), you need to add V(| string)
|
||||
to prevent Ansible to convert strings such as V("true") back to booleans. The correct way is to use V("{{ value |
|
||||
string }}").
|
||||
type: dict
|
||||
version_added: 2.1.0
|
||||
|
||||
notes:
|
||||
- Does B(not work with TCP TLS sockets) when using O(stdin). This is caused by the inability to send C(close_notify) without
|
||||
closing the connection with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605)
|
||||
for more information.
|
||||
- If you need to evaluate environment variables of the container in O(command) or O(argv), you need to pass the command
|
||||
through a shell, like O(command=/bin/sh -c "echo $ENV_VARIABLE"). The same needs to be done in case you want to use glob patterns
|
||||
or other shell features such as redirects.
|
||||
author:
|
||||
- "Felix Fontein (@felixfontein)"
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Run a simple command (command)
|
||||
community.docker.docker_container_exec:
|
||||
container: foo
|
||||
command: /bin/bash -c "ls -lah"
|
||||
chdir: /root
|
||||
register: result
|
||||
|
||||
- name: Print stdout
|
||||
ansible.builtin.debug:
|
||||
var: result.stdout
|
||||
|
||||
- name: Run a simple command (argv)
|
||||
community.docker.docker_container_exec:
|
||||
container: foo
|
||||
argv:
|
||||
- /bin/bash
|
||||
- "-c"
|
||||
- "ls -lah > /dev/stderr"
|
||||
chdir: /root
|
||||
register: result
|
||||
|
||||
- name: Print stderr lines
|
||||
ansible.builtin.debug:
|
||||
var: result.stderr_lines
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
stdout:
|
||||
type: str
|
||||
returned: success and O(detach=false)
|
||||
description:
|
||||
- The standard output of the container command.
|
||||
stderr:
|
||||
type: str
|
||||
returned: success and O(detach=false)
|
||||
description:
|
||||
- The standard error output of the container command.
|
||||
rc:
|
||||
type: int
|
||||
returned: success and O(detach=false)
|
||||
sample: 0
|
||||
description:
|
||||
- The exit code of the command.
|
||||
exec_id:
|
||||
type: str
|
||||
returned: success and O(detach=true)
|
||||
sample: 249d9e3075655baf705ed8f40488c5e9434049cf3431976f1bfdb73741c574c5
|
||||
description:
|
||||
- The execution ID of the command.
|
||||
version_added: 2.1.0
|
||||
"""
|
||||
|
||||
import shlex
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
DockerException,
|
||||
NotFound,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
format_environment,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._socket_handler import (
|
||||
DockerSocketHandlerModule,
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"container": {"type": "str", "required": True},
|
||||
"argv": {"type": "list", "elements": "str"},
|
||||
"command": {"type": "str"},
|
||||
"chdir": {"type": "str"},
|
||||
"detach": {"type": "bool", "default": False},
|
||||
"user": {"type": "str"},
|
||||
"stdin": {"type": "str"},
|
||||
"stdin_add_newline": {"type": "bool", "default": True},
|
||||
"strip_empty_ends": {"type": "bool", "default": True},
|
||||
"tty": {"type": "bool", "default": False},
|
||||
"env": {"type": "dict"},
|
||||
}
|
||||
|
||||
option_minimal_versions = {
|
||||
"chdir": {"docker_api_version": "1.35"},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
mutually_exclusive=[("argv", "command")],
|
||||
required_one_of=[("argv", "command")],
|
||||
)
|
||||
|
||||
container: str = client.module.params["container"]
|
||||
argv: list[str] | None = client.module.params["argv"]
|
||||
command: str | None = client.module.params["command"]
|
||||
chdir: str | None = client.module.params["chdir"]
|
||||
detach: bool = client.module.params["detach"]
|
||||
user: str | None = client.module.params["user"]
|
||||
stdin: str | None = client.module.params["stdin"]
|
||||
strip_empty_ends: bool = client.module.params["strip_empty_ends"]
|
||||
tty: bool = client.module.params["tty"]
|
||||
env: dict[str, t.Any] | None = client.module.params["env"]
|
||||
|
||||
if env is not None:
|
||||
for name, value in env.items():
|
||||
if not isinstance(value, str):
|
||||
client.module.fail_json(
|
||||
msg="Non-string value found for env option. Ambiguous env options must be "
|
||||
"wrapped in quotes to avoid them being interpreted when directly specified "
|
||||
"in YAML, or explicitly converted to strings when the option is templated. "
|
||||
f"Key: {name}"
|
||||
)
|
||||
|
||||
if command is not None:
|
||||
argv = shlex.split(command)
|
||||
assert argv is not None
|
||||
|
||||
if detach and stdin is not None:
|
||||
client.module.fail_json(msg="If detach=true, stdin cannot be provided.")
|
||||
|
||||
if stdin is not None and client.module.params["stdin_add_newline"]:
|
||||
stdin += "\n"
|
||||
|
||||
try:
|
||||
data = {
|
||||
"Container": container,
|
||||
"User": user or "",
|
||||
"Privileged": False,
|
||||
"Tty": False,
|
||||
"AttachStdin": bool(stdin),
|
||||
"AttachStdout": True,
|
||||
"AttachStderr": True,
|
||||
"Cmd": argv,
|
||||
"Env": format_environment(env) if env is not None else None,
|
||||
}
|
||||
if chdir is not None:
|
||||
data["WorkingDir"] = chdir
|
||||
|
||||
exec_data = client.post_json_to_json(
|
||||
"/containers/{0}/exec", container, data=data
|
||||
)
|
||||
exec_id: str = exec_data["Id"]
|
||||
|
||||
data = {
|
||||
"Tty": tty,
|
||||
"Detach": detach,
|
||||
}
|
||||
if detach:
|
||||
client.post_json_to_text("/exec/{0}/start", exec_id, data=data)
|
||||
client.module.exit_json(changed=True, exec_id=exec_id)
|
||||
|
||||
else:
|
||||
stdout: bytes | None
|
||||
stderr: bytes | None
|
||||
if stdin and not detach:
|
||||
exec_socket = client.post_json_to_stream_socket(
|
||||
"/exec/{0}/start", exec_id, data=data
|
||||
)
|
||||
try:
|
||||
with DockerSocketHandlerModule(
|
||||
exec_socket, client.module
|
||||
) as exec_socket_handler:
|
||||
if stdin:
|
||||
exec_socket_handler.write(to_bytes(stdin))
|
||||
|
||||
stdout, stderr = exec_socket_handler.consume()
|
||||
finally:
|
||||
exec_socket.close()
|
||||
elif tty:
|
||||
stdout, stderr = client.post_json_to_stream(
|
||||
"/exec/{0}/start",
|
||||
exec_id,
|
||||
data=data,
|
||||
stream=False,
|
||||
tty=True,
|
||||
demux=True,
|
||||
)
|
||||
else:
|
||||
stdout, stderr = client.post_json_to_stream(
|
||||
"/exec/{0}/start",
|
||||
exec_id,
|
||||
data=data,
|
||||
stream=False,
|
||||
tty=False,
|
||||
demux=True,
|
||||
)
|
||||
|
||||
result = client.get_json("/exec/{0}/json", exec_id)
|
||||
|
||||
stdout_t = to_text(stdout or b"")
|
||||
stderr_t = to_text(stderr or b"")
|
||||
if strip_empty_ends:
|
||||
stdout_t = stdout_t.rstrip("\r\n")
|
||||
stderr_t = stderr_t.rstrip("\r\n")
|
||||
|
||||
client.module.exit_json(
|
||||
changed=True,
|
||||
stdout=stdout_t,
|
||||
stderr=stderr_t,
|
||||
rc=result.get("ExitCode") or 0,
|
||||
)
|
||||
except NotFound:
|
||||
client.fail(f'Could not find container "{container}"')
|
||||
except APIError as e:
|
||||
if e.response is not None and e.response.status_code == 409:
|
||||
client.fail(f'The container "{container}" has been paused ({e})')
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,120 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_container_info
|
||||
|
||||
short_description: Retrieves facts about docker container
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker container.
|
||||
- Essentially returns the output of C(docker inspect <name>), similar to what M(community.docker.docker_container) returns
|
||||
for a non-absent container.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the container to inspect.
|
||||
- When identifying an existing container name may be a name or a long or short container ID.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
author:
|
||||
- "Felix Fontein (@felixfontein)"
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Get infos on container
|
||||
community.docker.docker_container_info:
|
||||
name: mydata
|
||||
register: result
|
||||
|
||||
- name: Does container exist?
|
||||
ansible.builtin.debug:
|
||||
msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
|
||||
|
||||
- name: Print information about container
|
||||
ansible.builtin.debug:
|
||||
var: result.container
|
||||
when: result.exists
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the container exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
container:
|
||||
description:
|
||||
- Facts representing the current state of the container. Matches the docker inspection output.
|
||||
- Will be V(none) if container does not exist.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: '{ "AppArmorProfile": "", "Args": [], "Config": { "AttachStderr": false, "AttachStdin": false, "AttachStdout": false,
|
||||
"Cmd": [ "/usr/bin/supervisord" ], "Domainname": "", "Entrypoint": null, "Env": [ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
], "ExposedPorts": { "443/tcp": {}, "80/tcp": {} }, "Hostname": "8e47bf643eb9", "Image": "lnmp_nginx:v1", "Labels": {},
|
||||
"OnBuild": null, "OpenStdin": false, "StdinOnce": false, "Tty": false, "User": "", "Volumes": { "/tmp/lnmp/nginx-sites/logs/":
|
||||
{} }, ... }'
|
||||
"""
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
container_id: str = client.module.params["name"]
|
||||
try:
|
||||
container = client.get_container(container_id)
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
exists=bool(container),
|
||||
container=container,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,325 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2025 Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_context_info
|
||||
|
||||
short_description: Retrieve information on Docker contexts for the current user
|
||||
|
||||
version_added: 4.4.0
|
||||
|
||||
description:
|
||||
- Return information on Docker contexts.
|
||||
- This includes some generic information, as well as a RV(contexts[].config) dictionary that can be used for module defaults for all community.docker modules
|
||||
that use the C(community.docker.docker) module defaults group.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
|
||||
options:
|
||||
only_current:
|
||||
description:
|
||||
- If set to V(true), RV(contexts) will just contain the current context and none else.
|
||||
- If set to V(false) (default), RV(contexts) will list all contexts, unless O(name) is specified.
|
||||
- Mutually exclusive to O(name).
|
||||
type: bool
|
||||
default: false
|
||||
name:
|
||||
description:
|
||||
- A specific Docker CLI context to query.
|
||||
- The module will fail if this context does not exist. If you simply want to query whether a context exists,
|
||||
do not specify this parameter and use Jinja2 to search the resulting list for a context of the given name instead.
|
||||
- Mutually exclusive with O(only_current).
|
||||
type: str
|
||||
cli_context:
|
||||
description:
|
||||
- Override for the default context's name.
|
||||
- This is preferably used for context selection when O(only_current=true),
|
||||
and it is used to compute the return values RV(contexts[].current) and RV(current_context_name).
|
||||
type: str
|
||||
|
||||
author:
|
||||
- "Felix Fontein (@felixfontein)"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Get infos on contexts
|
||||
community.docker.docker_context_info:
|
||||
register: result
|
||||
|
||||
- name: Show all contexts
|
||||
ansible.builtin.debug:
|
||||
msg: "{{ result.contexts }}"
|
||||
|
||||
- name: Get current context
|
||||
community.docker.docker_context_info:
|
||||
only_current: true
|
||||
register: docker_current_context
|
||||
|
||||
- name: Run community.docker modules with current context
|
||||
module_defaults:
|
||||
group/community.docker.docker: "{{ docker_current_context.contexts[0].config }}"
|
||||
block:
|
||||
- name: Task using the current context
|
||||
community.docker.docker_container:
|
||||
image: ubuntu:latest
|
||||
name: ubuntu
|
||||
state: started
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
contexts:
|
||||
description:
|
||||
- A list of all contexts (O(only_current=false), O(name) not specified),
|
||||
only the current context (O(only_current=true)),
|
||||
or the requested context (O(name) specified).
|
||||
type: list
|
||||
elements: dict
|
||||
returned: success
|
||||
contains:
|
||||
current:
|
||||
description:
|
||||
- Whether this context is the current one.
|
||||
type: bool
|
||||
returned: success
|
||||
sample: true
|
||||
name:
|
||||
description:
|
||||
- The context's name.
|
||||
type: bool
|
||||
returned: success
|
||||
sample: default
|
||||
description:
|
||||
description:
|
||||
- The context's description, if available.
|
||||
type: bool
|
||||
returned: success
|
||||
sample: My context
|
||||
meta_path:
|
||||
description:
|
||||
- The path to the context's meta directory.
|
||||
- Not present for RV(contexts[].name=default).
|
||||
type: str
|
||||
returned: success
|
||||
sample: /home/felix/.docker/contexts/meta/0123456789abcdef01234567890abcdef0123456789abcdef0123456789abcde
|
||||
tls_path:
|
||||
description:
|
||||
- The path to the context's TLS config directory.
|
||||
- Not present for RV(contexts[].name=default).
|
||||
type: str
|
||||
returned: success
|
||||
sample: /home/user/.docker/contexts/tls/0123456789abcdef01234567890abcdef0123456789abcdef0123456789abcde/
|
||||
config:
|
||||
description:
|
||||
- In case the context is for Docker, contains option values to configure the community.docker modules to use this context.
|
||||
- Note that the exact values returned here and their values might change over time if incompatibilities to existing modules are found.
|
||||
The goal is that this configuration works fine with all modules in this collection, but we do not have the capabilities to
|
||||
test all possible configuration options at the moment.
|
||||
type: dict
|
||||
returned: success
|
||||
sample: {}
|
||||
contains:
|
||||
docker_host:
|
||||
description:
|
||||
- The Docker daemon to connect to.
|
||||
type: str
|
||||
returned: success and context is for Docker
|
||||
sample: unix:///var/run/docker.sock
|
||||
tls:
|
||||
description:
|
||||
- Whether the Docker context should use an unvalidated TLS connection.
|
||||
type: bool
|
||||
returned: success and context is for Docker
|
||||
sample: false
|
||||
ca_path:
|
||||
description:
|
||||
- The CA certificate used to validate the Docker daemon's certificate.
|
||||
type: bool
|
||||
returned: success, context is for Docker, TLS config is present, and CA cert is present
|
||||
sample: /path/to/ca-cert.pem
|
||||
client_cert:
|
||||
description:
|
||||
- The client certificate to authenticate with to the Docker daemon.
|
||||
type: bool
|
||||
returned: success, context is for Docker, TLS config is present, and client cert info is present
|
||||
sample: /path/to/client-cert.pem
|
||||
client_key:
|
||||
description:
|
||||
- The client certificate's key to authenticate with to the Docker daemon.
|
||||
type: bool
|
||||
returned: success, context is for Docker, TLS config is present, and client cert info is present
|
||||
sample: /path/to/client-key.pem
|
||||
validate_certs:
|
||||
description:
|
||||
- Whether the Docker context should use a validated TLS connection.
|
||||
type: bool
|
||||
returned: success, context is for Docker, and TLS config is present
|
||||
sample: true
|
||||
|
||||
current_context_name:
|
||||
description:
|
||||
- The name of the current Docker context.
|
||||
type: str
|
||||
returned: success
|
||||
sample: default
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.context.api import (
|
||||
ContextAPI,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.context.config import (
|
||||
get_current_context_name_with_source,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.context.context import (
|
||||
IN_MEMORY,
|
||||
Context,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
ContextException,
|
||||
DockerException,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.tls import (
|
||||
TLSConfig,
|
||||
)
|
||||
|
||||
|
||||
def tls_context_to_json(context: TLSConfig | None) -> dict[str, t.Any] | None:
|
||||
if context is None:
|
||||
return None
|
||||
return {
|
||||
"client_cert": context.cert[0] if context.cert else None,
|
||||
"client_key": context.cert[1] if context.cert else None,
|
||||
"ca_cert": context.ca_cert,
|
||||
"verify": context.verify,
|
||||
# 'ssl_version': context.ssl_version, -- this isn't used anymore
|
||||
}
|
||||
|
||||
|
||||
def context_to_json(context: Context, current: bool) -> dict[str, t.Any]:
|
||||
module_config: dict[str, t.Any] = {}
|
||||
if "docker" in context.endpoints:
|
||||
endpoint = context.endpoints["docker"]
|
||||
if isinstance(endpoint.get("Host"), str):
|
||||
host_str = to_text(endpoint["Host"])
|
||||
|
||||
# Adjust protocol name so that it works with the Docker CLI tool as well
|
||||
proto = None
|
||||
idx = host_str.find("://")
|
||||
if idx >= 0:
|
||||
proto = host_str[:idx]
|
||||
host_str = host_str[idx + 3 :]
|
||||
if proto in ("http", "https"):
|
||||
proto = "tcp"
|
||||
if proto == "http+unix":
|
||||
proto = "unix"
|
||||
if proto:
|
||||
host_str = f"{proto}://{host_str}"
|
||||
|
||||
# Create config for the modules
|
||||
module_config["docker_host"] = host_str
|
||||
if context.tls_cfg.get("docker"):
|
||||
tls_cfg = context.tls_cfg["docker"]
|
||||
if tls_cfg.ca_cert:
|
||||
module_config["ca_path"] = tls_cfg.ca_cert
|
||||
if tls_cfg.cert:
|
||||
module_config["client_cert"] = tls_cfg.cert[0]
|
||||
module_config["client_key"] = tls_cfg.cert[1]
|
||||
module_config["validate_certs"] = tls_cfg.verify
|
||||
module_config["tls"] = True
|
||||
else:
|
||||
module_config["tls"] = bool(endpoint.get("SkipTLSVerify"))
|
||||
return {
|
||||
"current": current,
|
||||
"name": context.name,
|
||||
"description": context.description,
|
||||
"meta_path": None if context.meta_path is IN_MEMORY else context.meta_path,
|
||||
"tls_path": None if context.tls_path is IN_MEMORY else context.tls_path,
|
||||
"config": module_config,
|
||||
}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"only_current": {"type": "bool", "default": False},
|
||||
"name": {"type": "str"},
|
||||
"cli_context": {"type": "str"},
|
||||
}
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
("only_current", "name"),
|
||||
],
|
||||
)
|
||||
|
||||
only_current: bool = module.params["only_current"]
|
||||
name: str | None = module.params["name"]
|
||||
cli_context: str | None = module.params["cli_context"]
|
||||
try:
|
||||
if cli_context:
|
||||
current_context_name, current_context_source = (
|
||||
cli_context,
|
||||
"cli_context module option",
|
||||
)
|
||||
else:
|
||||
current_context_name, current_context_source = (
|
||||
get_current_context_name_with_source()
|
||||
)
|
||||
if name:
|
||||
context_or_none = ContextAPI.get_context(name)
|
||||
if not context_or_none:
|
||||
module.fail_json(msg=f"There is no context of name {name!r}")
|
||||
contexts = [context_or_none]
|
||||
elif only_current:
|
||||
context_or_none = ContextAPI.get_context(current_context_name)
|
||||
if not context_or_none:
|
||||
module.fail_json(
|
||||
msg=f"There is no context of name {current_context_name!r}, which is configured as the default context ({current_context_source})",
|
||||
)
|
||||
contexts = [context_or_none]
|
||||
else:
|
||||
contexts = ContextAPI.contexts()
|
||||
|
||||
json_contexts = sorted(
|
||||
[
|
||||
context_to_json(context, context.name == current_context_name)
|
||||
for context in contexts
|
||||
],
|
||||
key=lambda entry: entry["name"],
|
||||
)
|
||||
|
||||
module.exit_json(
|
||||
changed=False,
|
||||
contexts=json_contexts,
|
||||
current_context_name=current_context_name,
|
||||
)
|
||||
except ContextException as e:
|
||||
module.fail_json(
|
||||
msg=f"Error when handling Docker contexts: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except DockerException as e:
|
||||
module.fail_json(
|
||||
msg=f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,404 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_host_info
|
||||
|
||||
short_description: Retrieves facts about docker host and lists of objects of the services
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker host.
|
||||
- Essentially returns the output of C(docker system info).
|
||||
- The module also allows to list object names for containers, images, networks and volumes. It also allows to query information
|
||||
on disk usage.
|
||||
- The output differs depending on API version of the docker daemon.
|
||||
- If the docker daemon cannot be contacted or does not meet the API version requirements, the module will fail.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
details:
|
||||
- This action does not modify state.
|
||||
diff_mode:
|
||||
support: N/A
|
||||
details:
|
||||
- This action does not modify state.
|
||||
|
||||
options:
|
||||
containers:
|
||||
description:
|
||||
- Whether to list containers.
|
||||
type: bool
|
||||
default: false
|
||||
containers_all:
|
||||
description:
|
||||
- By default, only running containers are returned.
|
||||
- This corresponds to the C(--all) option to C(docker container list).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.4.0
|
||||
containers_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting containers to list.
|
||||
- 'For example, C(until: 24h).'
|
||||
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string C(<key>=<value>)
|
||||
matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering) for
|
||||
more information on possible filters.
|
||||
type: dict
|
||||
images:
|
||||
description:
|
||||
- Whether to list images.
|
||||
type: bool
|
||||
default: false
|
||||
images_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting images to list.
|
||||
- 'For example, C(dangling: true).'
|
||||
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string C(<key>=<value>)
|
||||
matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering) for more
|
||||
information on possible filters.
|
||||
type: dict
|
||||
networks:
|
||||
description:
|
||||
- Whether to list networks.
|
||||
type: bool
|
||||
default: false
|
||||
networks_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting networks to list.
|
||||
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string C(<key>=<value>)
|
||||
matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering) for
|
||||
more information on possible filters.
|
||||
type: dict
|
||||
volumes:
|
||||
description:
|
||||
- Whether to list volumes.
|
||||
type: bool
|
||||
default: false
|
||||
volumes_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting volumes to list.
|
||||
- C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string C(<key>=<value>)
|
||||
matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering) for more
|
||||
information on possible filters.
|
||||
type: dict
|
||||
disk_usage:
|
||||
description:
|
||||
- Summary information on used disk space by all Docker layers.
|
||||
- The output is a sum of images, volumes, containers and build cache.
|
||||
type: bool
|
||||
default: false
|
||||
verbose_output:
|
||||
description:
|
||||
- When set to V(true) and O(networks), O(volumes), O(images), O(containers), or O(disk_usage) is set to V(true) then
|
||||
output will contain verbose information about objects matching the full output of API method. For details see the
|
||||
documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
|
||||
- The verbose output in this module contains only subset of information returned by this module for each type of the
|
||||
objects.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Get info on docker host
|
||||
community.docker.docker_host_info:
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and list images
|
||||
community.docker.docker_host_info:
|
||||
images: true
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and list images matching the filter
|
||||
community.docker.docker_host_info:
|
||||
images: true
|
||||
images_filters:
|
||||
label: "mylabel"
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and verbose list images
|
||||
community.docker.docker_host_info:
|
||||
images: true
|
||||
verbose_output: true
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and used disk space
|
||||
community.docker.docker_host_info:
|
||||
disk_usage: true
|
||||
register: result
|
||||
|
||||
- name: Get info on docker host and list containers matching the filter
|
||||
community.docker.docker_host_info:
|
||||
containers: true
|
||||
containers_filters:
|
||||
label:
|
||||
- key1=value1
|
||||
- key2=value2
|
||||
register: result
|
||||
|
||||
- name: Show host information
|
||||
ansible.builtin.debug:
|
||||
var: result.host_info
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
can_talk_to_docker:
|
||||
description:
|
||||
- Will be V(true) if the module can talk to the docker daemon.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
|
||||
host_info:
|
||||
description:
|
||||
- Facts representing the basic state of the docker host. Matches the C(docker system info) output.
|
||||
returned: always
|
||||
type: dict
|
||||
volumes:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume. Keys matches the C(docker volume ls) output
|
||||
unless O(verbose_output=true). See description for O(verbose_output).
|
||||
returned: When O(volumes=true)
|
||||
type: list
|
||||
elements: dict
|
||||
networks:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each network. Keys matches the C(docker network ls) output
|
||||
unless O(verbose_output=true). See description for O(verbose_output).
|
||||
returned: When O(networks=true)
|
||||
type: list
|
||||
elements: dict
|
||||
containers:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each container. Keys matches the C(docker container ls)
|
||||
output unless O(verbose_output=true). See description for O(verbose_output).
|
||||
returned: When O(containers=true)
|
||||
type: list
|
||||
elements: dict
|
||||
images:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each image. Keys matches the C(docker image ls) output unless
|
||||
O(verbose_output=true). See description for O(verbose_output).
|
||||
returned: When O(images=true)
|
||||
type: list
|
||||
elements: dict
|
||||
disk_usage:
|
||||
description:
|
||||
- Information on summary disk usage by images, containers and volumes on docker host unless O(verbose_output=true). See
|
||||
description for O(verbose_output).
|
||||
returned: When O(disk_usage=true)
|
||||
type: dict
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
convert_filters,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
clean_dict_booleans_for_docker_api,
|
||||
)
|
||||
|
||||
|
||||
class DockerHostManager(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient, results: dict[str, t.Any]) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.verbose_output = self.client.module.params["verbose_output"]
|
||||
|
||||
listed_objects = ["volumes", "networks", "containers", "images"]
|
||||
|
||||
self.results["host_info"] = self.get_docker_host_info()
|
||||
# At this point we definitely know that we can talk to the Docker daemon
|
||||
self.results["can_talk_to_docker"] = True
|
||||
self.client.fail_results["can_talk_to_docker"] = True
|
||||
|
||||
if self.client.module.params["disk_usage"]:
|
||||
self.results["disk_usage"] = self.get_docker_disk_usage_facts()
|
||||
|
||||
for docker_object in listed_objects:
|
||||
if self.client.module.params[docker_object]:
|
||||
returned_name = docker_object
|
||||
filter_name = f"{docker_object}_filters"
|
||||
filters = clean_dict_booleans_for_docker_api(
|
||||
client.module.params.get(filter_name), allow_sequences=True
|
||||
)
|
||||
self.results[returned_name] = self.get_docker_items_list(
|
||||
docker_object, filters
|
||||
)
|
||||
|
||||
def get_docker_host_info(self) -> dict[str, t.Any]:
|
||||
try:
|
||||
return self.client.info()
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Error inspecting docker host: {exc}")
|
||||
|
||||
def get_docker_disk_usage_facts(self) -> dict[str, t.Any]:
|
||||
try:
|
||||
if self.verbose_output:
|
||||
return self.client.df()
|
||||
return {"LayersSize": self.client.df()["LayersSize"]}
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Error inspecting docker host: {exc}")
|
||||
|
||||
def get_docker_items_list(
|
||||
self,
|
||||
docker_object: str,
|
||||
filters: dict[str, t.Any] | None = None,
|
||||
verbose: bool = False,
|
||||
) -> list[dict[str, t.Any]]:
|
||||
items = []
|
||||
|
||||
header_containers = [
|
||||
"Id",
|
||||
"Image",
|
||||
"Command",
|
||||
"Created",
|
||||
"Status",
|
||||
"Ports",
|
||||
"Names",
|
||||
]
|
||||
header_volumes = ["Driver", "Name"]
|
||||
header_images = ["Id", "RepoTags", "Created", "Size"]
|
||||
header_networks = ["Id", "Driver", "Name", "Scope"]
|
||||
|
||||
filter_arg = {}
|
||||
if filters:
|
||||
filter_arg["filters"] = filters
|
||||
try:
|
||||
if docker_object == "containers":
|
||||
params = {
|
||||
"limit": -1,
|
||||
"all": 1 if self.client.module.params["containers_all"] else 0,
|
||||
"size": 0,
|
||||
"trunc_cmd": 0,
|
||||
"filters": convert_filters(filters) if filters else None,
|
||||
}
|
||||
items = self.client.get_json("/containers/json", params=params)
|
||||
elif docker_object == "networks":
|
||||
params = {"filters": convert_filters(filters or {})}
|
||||
items = self.client.get_json("/networks", params=params)
|
||||
elif docker_object == "images":
|
||||
params = {
|
||||
"only_ids": 0,
|
||||
"all": 0,
|
||||
"filters": convert_filters(filters) if filters else None,
|
||||
}
|
||||
items = self.client.get_json("/images/json", params=params)
|
||||
elif docker_object == "volumes":
|
||||
params = {
|
||||
"filters": convert_filters(filters) if filters else None,
|
||||
}
|
||||
items = self.client.get_json("/volumes", params=params)
|
||||
items = items["Volumes"]
|
||||
except APIError as exc:
|
||||
self.client.fail(
|
||||
f"Error inspecting docker host for object '{docker_object}': {exc}"
|
||||
)
|
||||
|
||||
if self.verbose_output:
|
||||
return items
|
||||
|
||||
items_list = []
|
||||
for item in items:
|
||||
item_record = {}
|
||||
|
||||
if docker_object == "containers":
|
||||
for key in header_containers:
|
||||
item_record[key] = item.get(key)
|
||||
elif docker_object == "networks":
|
||||
for key in header_networks:
|
||||
item_record[key] = item.get(key)
|
||||
elif docker_object == "images":
|
||||
for key in header_images:
|
||||
item_record[key] = item.get(key)
|
||||
elif docker_object == "volumes":
|
||||
for key in header_volumes:
|
||||
item_record[key] = item.get(key)
|
||||
items_list.append(item_record)
|
||||
|
||||
return items_list
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"containers": {"type": "bool", "default": False},
|
||||
"containers_all": {"type": "bool", "default": False},
|
||||
"containers_filters": {"type": "dict"},
|
||||
"images": {"type": "bool", "default": False},
|
||||
"images_filters": {"type": "dict"},
|
||||
"networks": {"type": "bool", "default": False},
|
||||
"networks_filters": {"type": "dict"},
|
||||
"volumes": {"type": "bool", "default": False},
|
||||
"volumes_filters": {"type": "dict"},
|
||||
"disk_usage": {"type": "bool", "default": False},
|
||||
"verbose_output": {"type": "bool", "default": False},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
fail_results={
|
||||
"can_talk_to_docker": False,
|
||||
},
|
||||
)
|
||||
if (
|
||||
client.module.params["api_version"] is None
|
||||
or client.module.params["api_version"].lower() == "auto"
|
||||
):
|
||||
# At this point we know that we can talk to Docker, since we asked it for the API version
|
||||
client.fail_results["can_talk_to_docker"] = True
|
||||
|
||||
try:
|
||||
results = {
|
||||
"changed": False,
|
||||
}
|
||||
|
||||
DockerHostManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1239
ansible_collections/community/docker/plugins/modules/docker_image.py
Normal file
1239
ansible_collections/community/docker/plugins/modules/docker_image.py
Normal file
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,647 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_image_build
|
||||
|
||||
short_description: Build Docker images using Docker buildx
|
||||
|
||||
version_added: 3.6.0
|
||||
|
||||
description:
|
||||
- This module allows you to build Docker images using Docker's buildx plugin (BuildKit).
|
||||
- Note that the module is B(not idempotent) in the sense of classical Ansible modules. The only idempotence check is whether
|
||||
the built image already exists. This check can be disabled with the O(rebuild) option.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.cli_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: partial
|
||||
details:
|
||||
- If O(rebuild=always) the module is not idempotent.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- 'Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). When pushing or
|
||||
pulling an image the name can optionally include the tag by appending C(:tag_name).'
|
||||
- Note that image IDs (hashes) and names with digest cannot be used.
|
||||
type: str
|
||||
required: true
|
||||
tag:
|
||||
description:
|
||||
- Tag for the image name O(name) that is to be tagged.
|
||||
- If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence.
|
||||
type: str
|
||||
default: latest
|
||||
path:
|
||||
description:
|
||||
- The path for the build environment.
|
||||
type: path
|
||||
required: true
|
||||
dockerfile:
|
||||
description:
|
||||
- Provide an alternate name for the Dockerfile to use when building an image.
|
||||
- This can also include a relative path (relative to O(path)).
|
||||
type: str
|
||||
cache_from:
|
||||
description:
|
||||
- List of image names to consider as cache source.
|
||||
type: list
|
||||
elements: str
|
||||
pull:
|
||||
description:
|
||||
- When building an image downloads any updates to the FROM image in Dockerfile.
|
||||
type: bool
|
||||
default: false
|
||||
network:
|
||||
description:
|
||||
- The network to use for C(RUN) build instructions.
|
||||
type: str
|
||||
nocache:
|
||||
description:
|
||||
- Do not use cache when building an image.
|
||||
type: bool
|
||||
default: false
|
||||
etc_hosts:
|
||||
description:
|
||||
- Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
|
||||
- Instead of an IP address, the special value V(host-gateway) can also be used, which resolves to the host's gateway
|
||||
IP and allows building containers to connect to services running on the host.
|
||||
type: dict
|
||||
args:
|
||||
description:
|
||||
- Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
|
||||
- Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
|
||||
type: dict
|
||||
target:
|
||||
description:
|
||||
- When building an image specifies an intermediate build stage by name as a final stage for the resulting image.
|
||||
type: str
|
||||
platform:
|
||||
description:
|
||||
- Platforms in the format C(os[/arch[/variant]]).
|
||||
- Since community.docker 3.10.0 this can be a list of platforms, instead of just a single platform.
|
||||
type: list
|
||||
elements: str
|
||||
shm_size:
|
||||
description:
|
||||
- Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer. Unit can be V(B) (byte), V(K) (kibibyte,
|
||||
1024B), V(M) (mebibyte), V(G) (gibibyte), V(T) (tebibyte), or V(P) (pebibyte).
|
||||
- Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses V(64M).
|
||||
type: str
|
||||
labels:
|
||||
description:
|
||||
- Dictionary of key value pairs.
|
||||
type: dict
|
||||
rebuild:
|
||||
description:
|
||||
- Defines the behavior of the module if the image to build (as specified in O(name) and O(tag)) already exists.
|
||||
type: str
|
||||
choices:
|
||||
- never
|
||||
- always
|
||||
default: never
|
||||
secrets:
|
||||
description:
|
||||
- Secrets to expose to the build.
|
||||
type: list
|
||||
elements: dict
|
||||
version_added: 3.10.0
|
||||
suboptions:
|
||||
id:
|
||||
description:
|
||||
- The secret identifier.
|
||||
- The secret will be made available as a file in the container under C(/run/secrets/<id>).
|
||||
type: str
|
||||
required: true
|
||||
type:
|
||||
description:
|
||||
- Type of the secret.
|
||||
type: str
|
||||
choices:
|
||||
file:
|
||||
- Reads the secret from a file on the target.
|
||||
- The file must be specified in O(secrets[].src).
|
||||
env:
|
||||
- Reads the secret from an environment variable on the target.
|
||||
- The environment variable must be named in O(secrets[].env).
|
||||
- Note that this requires the Buildkit plugin to have version 0.6.0 or newer.
|
||||
value:
|
||||
- Provides the secret from a given value O(secrets[].value).
|
||||
- B(Note) that the secret will be passed as an environment variable to C(docker compose). Use another mean of
|
||||
transport if you consider this not safe enough.
|
||||
- Note that this requires the Buildkit plugin to have version 0.6.0 or newer.
|
||||
required: true
|
||||
src:
|
||||
description:
|
||||
- Source path of the secret.
|
||||
- Only supported and required for O(secrets[].type=file).
|
||||
type: path
|
||||
env:
|
||||
description:
|
||||
- Environment value of the secret.
|
||||
- Only supported and required for O(secrets[].type=env).
|
||||
type: str
|
||||
value:
|
||||
description:
|
||||
- Value of the secret.
|
||||
- B(Note) that the secret will be passed as an environment variable to C(docker compose). Use another mean of transport
|
||||
if you consider this not safe enough.
|
||||
- Only supported and required for O(secrets[].type=value).
|
||||
type: str
|
||||
outputs:
|
||||
description:
|
||||
- Output destinations.
|
||||
- You can provide a list of exporters to export the built image in various places. Note that not all exporters might
|
||||
be supported by the build driver used.
|
||||
- Note that depending on how this option is used, no image with name O(name) and tag O(tag) might be created, which
|
||||
can cause the basic idempotency this module offers to not work.
|
||||
- Providing an empty list to this option is equivalent to not specifying it at all. The default behavior is a single
|
||||
entry with O(outputs[].type=image).
|
||||
- B(Note) that since community.docker 4.2.0, an entry for O(name)/O(tag) is added if O(outputs) has at least one entry
|
||||
and no entry has type O(outputs[].type=image) and includes O(name)/O(tag) in O(outputs[].name). This is because the
|
||||
module would otherwise pass C(--tag name:image) to the buildx plugin, which for some reason overwrites all images
|
||||
in O(outputs) by the C(name:image) provided in O(name)/O(tag).
|
||||
type: list
|
||||
elements: dict
|
||||
version_added: 3.10.0
|
||||
suboptions:
|
||||
type:
|
||||
description:
|
||||
- The type of exporter to use.
|
||||
type: str
|
||||
choices:
|
||||
local:
|
||||
- This export type writes all result files to a directory on the client. The new files will be owned by the current
|
||||
user. On multi-platform builds, all results will be put in subdirectories by their platform.
|
||||
- The destination has to be provided in O(outputs[].dest).
|
||||
tar:
|
||||
- This export type export type writes all result files as a single tarball on the client. On multi-platform builds,
|
||||
all results will be put in subdirectories by their platform.
|
||||
- The destination has to be provided in O(outputs[].dest).
|
||||
oci:
|
||||
- This export type writes the result image or manifest list as an L(OCI image layout,
|
||||
https://github.com/opencontainers/image-spec/blob/v1.0.1/image-layout.md)
|
||||
tarball on the client.
|
||||
- The destination has to be provided in O(outputs[].dest).
|
||||
docker:
|
||||
- This export type writes the single-platform result image as a Docker image specification tarball on the client.
|
||||
Tarballs created by this exporter are also OCI compatible.
|
||||
- The destination can be provided in O(outputs[].dest). If not specified, the tar will be loaded automatically
|
||||
to the local image store.
|
||||
- The Docker context where to import the result can be provided in O(outputs[].context).
|
||||
image:
|
||||
- This exporter writes the build result as an image or a manifest list. When using this driver, the image will
|
||||
appear in C(docker images).
|
||||
- The image name can be provided in O(outputs[].name). If it is not provided, O(name) and O(tag) will be used.
|
||||
- Optionally, image can be automatically pushed to a registry by setting O(outputs[].push=true).
|
||||
required: true
|
||||
dest:
|
||||
description:
|
||||
- The destination path.
|
||||
- Required for O(outputs[].type=local), O(outputs[].type=tar), O(outputs[].type=oci).
|
||||
- Optional for O(outputs[].type=docker).
|
||||
type: path
|
||||
context:
|
||||
description:
|
||||
- Name for the Docker context where to import the result.
|
||||
- Optional for O(outputs[].type=docker).
|
||||
type: str
|
||||
name:
|
||||
description:
|
||||
- Name(s) under which the image is stored under.
|
||||
- If not provided, O(name) and O(tag) will be used.
|
||||
- Optional for O(outputs[].type=image).
|
||||
- This can be a list of strings since community.docker 4.2.0.
|
||||
type: list
|
||||
elements: str
|
||||
push:
|
||||
description:
|
||||
- Whether to push the built image to a registry.
|
||||
- Only used for O(outputs[].type=image).
|
||||
type: bool
|
||||
default: false
|
||||
requirements:
|
||||
- "Docker CLI with Docker buildx plugin"
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_image_push
|
||||
- module: community.docker.docker_image_tag
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Build Python 3.12 image
|
||||
community.docker.docker_image_build:
|
||||
name: localhost/python/3.12:latest
|
||||
path: /home/user/images/python
|
||||
dockerfile: Dockerfile-3.12
|
||||
|
||||
- name: Build multi-platform image
|
||||
community.docker.docker_image_build:
|
||||
name: multi-platform-image
|
||||
tag: "1.5.2"
|
||||
path: /home/user/images/multi-platform
|
||||
platform:
|
||||
- linux/amd64
|
||||
- linux/arm64/v8
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
image:
|
||||
description: Image inspection results for the affected image.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
|
||||
command:
|
||||
description: The command executed.
|
||||
returned: success and for some failures
|
||||
type: list
|
||||
elements: str
|
||||
version_added: 4.2.0
|
||||
"""
|
||||
|
||||
import base64
|
||||
import os
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.module_utils.common.text.formatters import human_to_bytes
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
parse_repository_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_cli import (
|
||||
AnsibleModuleDockerClient,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
clean_dict_booleans_for_docker_api,
|
||||
is_image_name_id,
|
||||
is_valid_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._version import (
|
||||
LooseVersion,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def convert_to_bytes(
|
||||
value: str | None,
|
||||
module: AnsibleModule,
|
||||
name: str,
|
||||
unlimited_value: int | None = None,
|
||||
) -> int | None:
|
||||
if value is None:
|
||||
return value
|
||||
try:
|
||||
if unlimited_value is not None and value in ("unlimited", str(unlimited_value)):
|
||||
return unlimited_value
|
||||
return human_to_bytes(value)
|
||||
except ValueError as exc:
|
||||
module.fail_json(msg=f"Failed to convert {name} to bytes: {exc}")
|
||||
|
||||
|
||||
def dict_to_list(dictionary: dict[str, t.Any], concat: str = "=") -> list[str]:
|
||||
return [f"{k}{concat}{v}" for k, v in sorted(dictionary.items())]
|
||||
|
||||
|
||||
def _quote_csv(text: str) -> str:
|
||||
if text.strip() == text and all(i not in text for i in '",\r\n'):
|
||||
return text
|
||||
text = text.replace('"', '""')
|
||||
return f'"{text}"'
|
||||
|
||||
|
||||
class ImageBuilder(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleModuleDockerClient) -> None:
|
||||
super().__init__()
|
||||
self.client = client
|
||||
self.check_mode = self.client.check_mode
|
||||
parameters = self.client.module.params
|
||||
|
||||
self.cache_from = parameters["cache_from"]
|
||||
self.pull = parameters["pull"]
|
||||
self.network = parameters["network"]
|
||||
self.nocache = parameters["nocache"]
|
||||
self.etc_hosts = clean_dict_booleans_for_docker_api(parameters["etc_hosts"])
|
||||
self.args = clean_dict_booleans_for_docker_api(parameters["args"])
|
||||
self.target = parameters["target"]
|
||||
self.platform = parameters["platform"]
|
||||
self.shm_size = convert_to_bytes(
|
||||
parameters["shm_size"], self.client.module, "shm_size"
|
||||
)
|
||||
self.labels = clean_dict_booleans_for_docker_api(parameters["labels"])
|
||||
self.rebuild = parameters["rebuild"]
|
||||
self.secrets = parameters["secrets"]
|
||||
self.outputs = parameters["outputs"]
|
||||
|
||||
buildx = self.client.get_client_plugin_info("buildx")
|
||||
if buildx is None:
|
||||
self.fail(
|
||||
f"Docker CLI {self.client.get_cli()} does not have the buildx plugin installed"
|
||||
)
|
||||
buildx_version = buildx["Version"].lstrip("v")
|
||||
|
||||
if self.secrets:
|
||||
for secret in self.secrets:
|
||||
if secret["type"] in ("env", "value") and LooseVersion(
|
||||
buildx_version
|
||||
) < LooseVersion("0.6.0"):
|
||||
self.fail(
|
||||
f"The Docker buildx plugin has version {buildx_version}, but 0.6.0 is needed for secrets of type=env and type=value"
|
||||
)
|
||||
if (
|
||||
self.outputs
|
||||
and len(self.outputs) > 1
|
||||
and LooseVersion(buildx_version) < LooseVersion("0.13.0")
|
||||
):
|
||||
self.fail(
|
||||
f"The Docker buildx plugin has version {buildx_version}, but 0.13.0 is needed to specify more than one output"
|
||||
)
|
||||
|
||||
self.path = parameters["path"]
|
||||
if not os.path.isdir(self.path):
|
||||
self.fail(f'"{self.path}" is not an existing directory')
|
||||
self.dockerfile = parameters["dockerfile"]
|
||||
if self.dockerfile and not os.path.isfile(
|
||||
os.path.join(self.path, self.dockerfile)
|
||||
):
|
||||
self.fail(
|
||||
f'"{os.path.join(self.path, self.dockerfile)}" is not an existing file'
|
||||
)
|
||||
|
||||
self.name = parameters["name"]
|
||||
self.tag = parameters["tag"]
|
||||
if not is_valid_tag(self.tag, allow_empty=True):
|
||||
self.fail(f'"{self.tag}" is not a valid docker tag')
|
||||
if is_image_name_id(self.name):
|
||||
self.fail("Image name must not be a digest")
|
||||
|
||||
# If name contains a tag, it takes precedence over tag parameter.
|
||||
repo, repo_tag = parse_repository_tag(self.name)
|
||||
if repo_tag:
|
||||
self.name = repo
|
||||
self.tag = repo_tag
|
||||
|
||||
if is_image_name_id(self.tag):
|
||||
self.fail("Image name must not contain a digest, but have a tag")
|
||||
|
||||
if self.outputs:
|
||||
found = False
|
||||
name_tag = f"{self.name}:{self.tag}"
|
||||
for output in self.outputs:
|
||||
if output["type"] == "image":
|
||||
if not output["name"]:
|
||||
# Since we no longer pass --tag if --output is provided, we need to set this manually
|
||||
output["name"] = [name_tag]
|
||||
if output["name"] and name_tag in output["name"]:
|
||||
found = True
|
||||
if not found:
|
||||
self.outputs.append(
|
||||
{
|
||||
"type": "image",
|
||||
"name": [name_tag],
|
||||
"push": False,
|
||||
}
|
||||
)
|
||||
if LooseVersion(buildx_version) < LooseVersion("0.13.0"):
|
||||
self.fail(
|
||||
f"The output does not include an image with name {name_tag}, and the Docker"
|
||||
f" buildx plugin has version {buildx_version} which only supports one output."
|
||||
)
|
||||
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
self.client.fail(msg, **kwargs)
|
||||
|
||||
def add_list_arg(self, args: list[str], option: str, values: list[str]) -> None:
|
||||
for value in values:
|
||||
args.extend([option, value])
|
||||
|
||||
def add_args(self, args: list[str]) -> dict[str, t.Any]:
|
||||
environ_update = {}
|
||||
if not self.outputs:
|
||||
args.extend(["--tag", f"{self.name}:{self.tag}"])
|
||||
if self.dockerfile:
|
||||
args.extend(["--file", os.path.join(self.path, self.dockerfile)])
|
||||
if self.cache_from:
|
||||
self.add_list_arg(args, "--cache-from", self.cache_from)
|
||||
if self.pull:
|
||||
args.append("--pull")
|
||||
if self.network:
|
||||
args.extend(["--network", self.network])
|
||||
if self.nocache:
|
||||
args.append("--no-cache")
|
||||
if self.etc_hosts:
|
||||
self.add_list_arg(args, "--add-host", dict_to_list(self.etc_hosts, ":"))
|
||||
if self.args:
|
||||
self.add_list_arg(args, "--build-arg", dict_to_list(self.args))
|
||||
if self.target:
|
||||
args.extend(["--target", self.target])
|
||||
if self.platform:
|
||||
for platform in self.platform:
|
||||
args.extend(["--platform", platform])
|
||||
if self.shm_size:
|
||||
args.extend(["--shm-size", str(self.shm_size)])
|
||||
if self.labels:
|
||||
self.add_list_arg(args, "--label", dict_to_list(self.labels))
|
||||
if self.secrets:
|
||||
random_prefix = None
|
||||
for index, secret in enumerate(self.secrets):
|
||||
sid = secret["id"]
|
||||
if secret["type"] == "file":
|
||||
src = secret["src"]
|
||||
args.extend(["--secret", f"id={sid},type=file,src={src}"])
|
||||
if secret["type"] == "env":
|
||||
env = secret["src"]
|
||||
args.extend(["--secret", f"id={sid},type=env,env={env}"])
|
||||
if secret["type"] == "value":
|
||||
# We pass values on using environment variables. The user has been warned in the documentation
|
||||
# that they should only use this mechanism when being comfortable with it.
|
||||
if random_prefix is None:
|
||||
# Use /dev/urandom to generate some entropy to make the environment variable's name unguessable
|
||||
random_prefix = (
|
||||
base64.b64encode(os.urandom(16))
|
||||
.decode("utf-8")
|
||||
.replace("=", "")
|
||||
)
|
||||
env_name = (
|
||||
f"ANSIBLE_DOCKER_COMPOSE_ENV_SECRET_{random_prefix}_{index}"
|
||||
)
|
||||
environ_update[env_name] = secret["value"]
|
||||
args.extend(["--secret", f"id={sid},type=env,env={env_name}"])
|
||||
if self.outputs:
|
||||
for output in self.outputs:
|
||||
subargs = []
|
||||
if output["type"] == "local":
|
||||
dest = output["dest"]
|
||||
subargs.extend(["type=local", f"dest={dest}"])
|
||||
if output["type"] == "tar":
|
||||
dest = output["dest"]
|
||||
subargs.extend(["type=tar", f"dest={dest}"])
|
||||
if output["type"] == "oci":
|
||||
dest = output["dest"]
|
||||
subargs.extend(["type=oci", f"dest={dest}"])
|
||||
if output["type"] == "docker":
|
||||
subargs.append("type=docker")
|
||||
dest = output["dest"]
|
||||
if output["dest"] is not None:
|
||||
subargs.append(f"dest={dest}")
|
||||
if output["context"] is not None:
|
||||
context = output["context"]
|
||||
subargs.append(f"context={context}")
|
||||
if output["type"] == "image":
|
||||
subargs.append("type=image")
|
||||
if output["name"] is not None:
|
||||
name = ",".join(output["name"])
|
||||
subargs.append(f"name={name}")
|
||||
if output["push"]:
|
||||
subargs.append("push=true")
|
||||
if subargs:
|
||||
args.extend(
|
||||
["--output", ",".join(_quote_csv(subarg) for subarg in subargs)]
|
||||
)
|
||||
return environ_update
|
||||
|
||||
def build_image(self) -> dict[str, t.Any]:
|
||||
image = self.client.find_image(self.name, self.tag)
|
||||
results: dict[str, t.Any] = {
|
||||
"changed": False,
|
||||
"actions": [],
|
||||
"image": image or {},
|
||||
}
|
||||
|
||||
if image and self.rebuild == "never":
|
||||
return results
|
||||
|
||||
results["changed"] = True
|
||||
if not self.check_mode:
|
||||
args = ["buildx", "build", "--progress", "plain"]
|
||||
environ_update = self.add_args(args)
|
||||
args.extend(["--", self.path])
|
||||
rc, stdout, stderr = self.client.call_cli(
|
||||
*args, environ_update=environ_update
|
||||
)
|
||||
if rc != 0:
|
||||
self.fail(
|
||||
f"Building {self.name}:{self.tag} failed",
|
||||
stdout=to_text(stdout),
|
||||
stderr=to_text(stderr),
|
||||
command=args,
|
||||
)
|
||||
results["stdout"] = to_text(stdout)
|
||||
results["stderr"] = to_text(stderr)
|
||||
results["image"] = self.client.find_image(self.name, self.tag) or {}
|
||||
results["command"] = args
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
"tag": {"type": "str", "default": "latest"},
|
||||
"path": {"type": "path", "required": True},
|
||||
"dockerfile": {"type": "str"},
|
||||
"cache_from": {"type": "list", "elements": "str"},
|
||||
"pull": {"type": "bool", "default": False},
|
||||
"network": {"type": "str"},
|
||||
"nocache": {"type": "bool", "default": False},
|
||||
"etc_hosts": {"type": "dict"},
|
||||
"args": {"type": "dict"},
|
||||
"target": {"type": "str"},
|
||||
"platform": {"type": "list", "elements": "str"},
|
||||
"shm_size": {"type": "str"},
|
||||
"labels": {"type": "dict"},
|
||||
"rebuild": {"type": "str", "choices": ["never", "always"], "default": "never"},
|
||||
"secrets": {
|
||||
"type": "list",
|
||||
"elements": "dict",
|
||||
"options": {
|
||||
"id": {"type": "str", "required": True},
|
||||
"type": {
|
||||
"type": "str",
|
||||
"choices": ["file", "env", "value"],
|
||||
"required": True,
|
||||
},
|
||||
"src": {"type": "path"},
|
||||
"env": {"type": "str"},
|
||||
"value": {"type": "str", "no_log": True},
|
||||
},
|
||||
"required_if": [
|
||||
("type", "file", ["src"]),
|
||||
("type", "env", ["env"]),
|
||||
("type", "value", ["value"]),
|
||||
],
|
||||
"mutually_exclusive": [
|
||||
("src", "env", "value"),
|
||||
],
|
||||
"no_log": False,
|
||||
},
|
||||
"outputs": {
|
||||
"type": "list",
|
||||
"elements": "dict",
|
||||
"options": {
|
||||
"type": {
|
||||
"type": "str",
|
||||
"choices": ["local", "tar", "oci", "docker", "image"],
|
||||
"required": True,
|
||||
},
|
||||
"dest": {"type": "path"},
|
||||
"context": {"type": "str"},
|
||||
"name": {"type": "list", "elements": "str"},
|
||||
"push": {"type": "bool", "default": False},
|
||||
},
|
||||
"required_if": [
|
||||
("type", "local", ["dest"]),
|
||||
("type", "tar", ["dest"]),
|
||||
("type", "oci", ["dest"]),
|
||||
],
|
||||
"mutually_exclusive": [
|
||||
("dest", "name"),
|
||||
("dest", "push"),
|
||||
("context", "name"),
|
||||
("context", "push"),
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
client = AnsibleModuleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
needs_api_version=False,
|
||||
)
|
||||
|
||||
try:
|
||||
results = ImageBuilder(client).build_image()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,303 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_image_export
|
||||
|
||||
short_description: Export (archive) Docker images
|
||||
|
||||
version_added: 3.7.0
|
||||
|
||||
description:
|
||||
- Creates an archive (tarball) from one or more Docker images.
|
||||
- This can be copied to another machine and loaded with M(community.docker.docker_image_load).
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: partial
|
||||
details:
|
||||
- Whether the module is idempotent depends on the storage API used for images,
|
||||
which determines how the image ID is computed. The idempotency check needs
|
||||
that the image ID equals the ID stored in archive's C(manifest.json).
|
||||
This seemed to have worked fine with the default storage backend up to Docker 28,
|
||||
but seems to have changed in Docker 29.
|
||||
|
||||
options:
|
||||
names:
|
||||
description:
|
||||
- 'One or more image names. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). When
|
||||
pushing or pulling an image the name can optionally include the tag by appending C(:tag_name).'
|
||||
- Note that image IDs (hashes) can also be used.
|
||||
type: list
|
||||
elements: str
|
||||
required: true
|
||||
aliases:
|
||||
- name
|
||||
tag:
|
||||
description:
|
||||
- Tag for the image name O(name) that is to be tagged.
|
||||
- If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence.
|
||||
type: str
|
||||
default: latest
|
||||
path:
|
||||
description:
|
||||
- The C(.tar) file the image should be exported to.
|
||||
type: path
|
||||
force:
|
||||
description:
|
||||
- Export the image even if the C(.tar) file already exists and seems to contain the right image.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_image
|
||||
- module: community.docker.docker_image_info
|
||||
- module: community.docker.docker_image_load
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Export an image
|
||||
community.docker.docker_image_export:
|
||||
name: pacur/centos-7
|
||||
path: /tmp/centos-7.tar
|
||||
|
||||
- name: Export multiple images
|
||||
community.docker.docker_image_export:
|
||||
names:
|
||||
- hello-world:latest
|
||||
- pacur/centos-7:latest
|
||||
path: /tmp/various.tar
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
images:
|
||||
description: Image inspection results for the affected images.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
sample: []
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.constants import (
|
||||
DEFAULT_DATA_CHUNK_SIZE,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
parse_repository_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._image_archive import (
|
||||
ImageArchiveInvalidException,
|
||||
api_image_id,
|
||||
load_archived_image_manifest,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
is_valid_tag,
|
||||
)
|
||||
|
||||
|
||||
class ImageExportManager(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
parameters = self.client.module.params
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.path = parameters["path"]
|
||||
self.force = parameters["force"]
|
||||
self.tag = parameters["tag"]
|
||||
|
||||
if not is_valid_tag(self.tag, allow_empty=True):
|
||||
self.fail(f'"{self.tag}" is not a valid docker tag')
|
||||
|
||||
# If name contains a tag, it takes precedence over tag parameter.
|
||||
self.names = []
|
||||
for name in parameters["names"]:
|
||||
if is_image_name_id(name):
|
||||
self.names.append({"id": name, "joined": name})
|
||||
else:
|
||||
repo, repo_tag = parse_repository_tag(name)
|
||||
if not repo_tag:
|
||||
repo_tag = self.tag
|
||||
self.names.append(
|
||||
{"name": repo, "tag": repo_tag, "joined": f"{repo}:{repo_tag}"}
|
||||
)
|
||||
|
||||
if not self.names:
|
||||
self.fail("At least one image name must be specified")
|
||||
|
||||
def fail(self, msg: str) -> t.NoReturn:
|
||||
self.client.fail(msg)
|
||||
|
||||
def get_export_reason(self) -> str | None:
|
||||
if self.force:
|
||||
return "Exporting since force=true"
|
||||
|
||||
try:
|
||||
archived_images = load_archived_image_manifest(self.path)
|
||||
if archived_images is None:
|
||||
return "Overwriting since no image is present in archive"
|
||||
except ImageArchiveInvalidException as exc:
|
||||
self.log(f"Unable to extract manifest summary from archive: {exc}")
|
||||
return "Overwriting an unreadable archive file"
|
||||
|
||||
left_names = list(self.names)
|
||||
for archived_image in archived_images:
|
||||
found = False
|
||||
for i, name in enumerate(left_names):
|
||||
if (
|
||||
name["id"] == api_image_id(archived_image.image_id)
|
||||
and [name["joined"]] == archived_image.repo_tags
|
||||
):
|
||||
del left_names[i]
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
return f"Overwriting archive since it contains unexpected image {archived_image.image_id} named {', '.join(archived_image.repo_tags)}"
|
||||
if left_names:
|
||||
return f"Overwriting archive since it is missing image(s) {', '.join([name['joined'] for name in left_names])}"
|
||||
|
||||
return None
|
||||
|
||||
def write_chunks(self, chunks: t.Generator[bytes]) -> None:
|
||||
try:
|
||||
with open(self.path, "wb") as fd:
|
||||
for chunk in chunks:
|
||||
fd.write(chunk)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error writing image archive {self.path} - {exc}")
|
||||
|
||||
def export_images(self) -> None:
|
||||
image_names = [name["joined"] for name in self.names]
|
||||
image_names_str = ", ".join(image_names)
|
||||
if len(image_names) == 1:
|
||||
self.log(f"Getting archive of image {image_names[0]}")
|
||||
try:
|
||||
chunks = self.client._stream_raw_result(
|
||||
self.client._get(
|
||||
self.client._url("/images/{0}/get", image_names[0]), stream=True
|
||||
),
|
||||
chunk_size=DEFAULT_DATA_CHUNK_SIZE,
|
||||
decode=False,
|
||||
)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error getting image {image_names[0]} - {exc}")
|
||||
else:
|
||||
self.log(f"Getting archive of images {image_names_str}")
|
||||
try:
|
||||
chunks = self.client._stream_raw_result(
|
||||
self.client._get(
|
||||
self.client._url("/images/get"),
|
||||
stream=True,
|
||||
params={"names": image_names},
|
||||
),
|
||||
chunk_size=DEFAULT_DATA_CHUNK_SIZE,
|
||||
decode=False,
|
||||
)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error getting images {image_names_str} - {exc}")
|
||||
|
||||
self.write_chunks(chunks)
|
||||
|
||||
def run(self) -> dict[str, t.Any]:
|
||||
tag = self.tag
|
||||
if not tag:
|
||||
tag = "latest"
|
||||
|
||||
images = []
|
||||
for name in self.names:
|
||||
if "id" in name:
|
||||
image = self.client.find_image_by_id(
|
||||
name["id"], accept_missing_image=True
|
||||
)
|
||||
else:
|
||||
image = self.client.find_image(name=name["name"], tag=name["tag"])
|
||||
if not image:
|
||||
self.fail(f"Image {name['joined']} not found")
|
||||
images.append(image)
|
||||
|
||||
# Will have a 'sha256:' prefix
|
||||
name["id"] = image["Id"]
|
||||
|
||||
results = {
|
||||
"changed": False,
|
||||
"images": images,
|
||||
}
|
||||
|
||||
reason = self.get_export_reason()
|
||||
if reason is not None:
|
||||
results["msg"] = reason
|
||||
results["changed"] = True
|
||||
|
||||
if not self.check_mode:
|
||||
self.export_images()
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"path": {"type": "path"},
|
||||
"force": {"type": "bool", "default": False},
|
||||
"names": {
|
||||
"type": "list",
|
||||
"elements": "str",
|
||||
"required": True,
|
||||
"aliases": ["name"],
|
||||
},
|
||||
"tag": {"type": "str", "default": "latest"},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
results = ImageExportManager(client).run()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,247 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_image_info
|
||||
|
||||
short_description: Inspect docker images
|
||||
|
||||
description:
|
||||
- Provide one or more image names, and the module will inspect each, returning an array of inspection results.
|
||||
- If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists locally,
|
||||
you can call the module with the image name, then check whether the result list is empty (image does not exist) or has
|
||||
one element (the image exists locally).
|
||||
- The module will not attempt to pull images from registries. Use M(community.docker.docker_image) with O(community.docker.docker_image#module:source=pull)
|
||||
to ensure an image is pulled.
|
||||
notes:
|
||||
- This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]), where C(tag)
|
||||
is optional. If a tag is not provided, V(latest) will be used. Instead of image names, also image IDs can be used.
|
||||
- If no name is provided, a list of all images will be returned.
|
||||
type: list
|
||||
elements: str
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Inspect a single image
|
||||
community.docker.docker_image_info:
|
||||
name: pacur/centos-7
|
||||
|
||||
- name: Inspect multiple images
|
||||
community.docker.docker_image_info:
|
||||
name:
|
||||
- pacur/centos-7
|
||||
- sinatra
|
||||
register: result
|
||||
|
||||
- name: Make sure that both images pacur/centos-7 and sinatra exist locally
|
||||
ansible.builtin.assert:
|
||||
that:
|
||||
- result.images | length == 2
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
images:
|
||||
description:
|
||||
- Inspection results for the selected images.
|
||||
- The list only contains inspection results of images existing locally.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
sample: [
|
||||
{
|
||||
"Architecture": "amd64",
|
||||
"Author": "",
|
||||
"Comment": "",
|
||||
"Config": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": ["/etc/docker/registry/config.yml"],
|
||||
"Domainname": "",
|
||||
"Entrypoint": ["/bin/registry"],
|
||||
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],
|
||||
"ExposedPorts": {"5000/tcp": {}},
|
||||
"Hostname": "e5c68db50333",
|
||||
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
|
||||
"Labels": {},
|
||||
"OnBuild": [],
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": {"/var/lib/registry": {}},
|
||||
"WorkingDir": "",
|
||||
},
|
||||
"Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
|
||||
"ContainerConfig": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": ["/bin/sh", "-c", '#(nop) CMD ["/etc/docker/registry/config.yml"]'],
|
||||
"Domainname": "",
|
||||
"Entrypoint": ["/bin/registry"],
|
||||
"Env": ["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],
|
||||
"ExposedPorts": {"5000/tcp": {}},
|
||||
"Hostname": "e5c68db50333",
|
||||
"Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
|
||||
"Labels": {},
|
||||
"OnBuild": [],
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": {"/var/lib/registry": {}},
|
||||
"WorkingDir": "",
|
||||
},
|
||||
"Created": "2016-03-08T21:08:15.399680378Z",
|
||||
"DockerVersion": "1.9.1",
|
||||
"GraphDriver": {
|
||||
"Data": null,
|
||||
"Name": "aufs",
|
||||
},
|
||||
"Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
|
||||
"Name": "registry:2",
|
||||
"Os": "linux",
|
||||
"Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
|
||||
"RepoDigests": [],
|
||||
"RepoTags": ["registry:2"],
|
||||
"Size": 0,
|
||||
"VirtualSize": 165808884,
|
||||
},
|
||||
]
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
NotFound,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
parse_repository_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
)
|
||||
|
||||
|
||||
class ImageManager(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient, results: dict[str, t.Any]) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.name = self.client.module.params.get("name")
|
||||
self.log(f"Gathering facts for images: {self.name}")
|
||||
|
||||
if self.name:
|
||||
self.results["images"] = self.get_facts()
|
||||
else:
|
||||
self.results["images"] = self.get_all_images()
|
||||
|
||||
def fail(self, msg: str) -> t.NoReturn:
|
||||
self.client.fail(msg)
|
||||
|
||||
def get_facts(self) -> list[dict[str, t.Any]]:
|
||||
"""
|
||||
Lookup and inspect each image name found in the names parameter.
|
||||
|
||||
:returns array of image dictionaries
|
||||
"""
|
||||
|
||||
results = []
|
||||
|
||||
names = self.name
|
||||
if not isinstance(names, list):
|
||||
names = [names]
|
||||
|
||||
for name in names:
|
||||
if is_image_name_id(name):
|
||||
self.log(f"Fetching image {name} (ID)")
|
||||
image = self.client.find_image_by_id(name, accept_missing_image=True)
|
||||
else:
|
||||
repository, tag = parse_repository_tag(name)
|
||||
if not tag:
|
||||
tag = "latest"
|
||||
self.log(f"Fetching image {repository}:{tag}")
|
||||
image = self.client.find_image(name=repository, tag=tag)
|
||||
if image:
|
||||
results.append(image)
|
||||
return results
|
||||
|
||||
def get_all_images(self) -> list[dict[str, t.Any]]:
|
||||
results = []
|
||||
params = {
|
||||
"only_ids": 0,
|
||||
"all": 0,
|
||||
}
|
||||
images = self.client.get_json("/images/json", params=params)
|
||||
for image in images:
|
||||
try:
|
||||
inspection = self.client.get_json("/images/{0}/json", image["Id"])
|
||||
except NotFound:
|
||||
inspection = None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error inspecting image {image['Id']} - {exc}")
|
||||
results.append(inspection)
|
||||
return results
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "list", "elements": "str"},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
results = {"changed": False, "images": []}
|
||||
|
||||
ImageManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,211 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_image_load
|
||||
|
||||
short_description: Load docker image(s) from archives
|
||||
|
||||
version_added: 1.3.0
|
||||
|
||||
description:
|
||||
- Load one or multiple Docker images from a C(.tar) archive, and return information on the loaded image(s).
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: none
|
||||
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- The path to the C(.tar) archive to load Docker image(s) from.
|
||||
type: path
|
||||
required: true
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_image_export
|
||||
- module: community.docker.docker_image_push
|
||||
- module: community.docker.docker_image_remove
|
||||
- module: community.docker.docker_image_tag
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Load all image(s) from the given tar file
|
||||
community.docker.docker_image_load:
|
||||
path: /path/to/images.tar
|
||||
register: result
|
||||
|
||||
- name: Print the loaded image names
|
||||
ansible.builtin.debug:
|
||||
msg: "Loaded the following images: {{ result.image_names | join(', ') }}"
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
image_names:
|
||||
description: List of image names and IDs loaded from the archive.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample:
|
||||
- 'hello-world:latest'
|
||||
- 'sha256:e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9'
|
||||
images:
|
||||
description: Image inspection results for the loaded images.
|
||||
returned: success
|
||||
type: list
|
||||
elements: dict
|
||||
sample: []
|
||||
"""
|
||||
|
||||
import errno
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
)
|
||||
|
||||
|
||||
class ImageManager(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient, results: dict[str, t.Any]) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
parameters = self.client.module.params
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.path = parameters["path"]
|
||||
|
||||
self.load_images()
|
||||
|
||||
@staticmethod
|
||||
def _extract_output_line(line: dict[str, t.Any], output: list[str]) -> None:
|
||||
"""
|
||||
Extract text line from stream output and, if found, adds it to output.
|
||||
"""
|
||||
if "stream" in line or "status" in line:
|
||||
# Make sure we have a string (assuming that line['stream'] and
|
||||
# line['status'] are either not defined, falsish, or a string)
|
||||
text_line = line.get("stream") or line.get("status") or ""
|
||||
output.extend(text_line.splitlines())
|
||||
|
||||
def load_images(self) -> None:
|
||||
"""
|
||||
Load images from a .tar archive
|
||||
"""
|
||||
# Load image(s) from file
|
||||
load_output: list[str] = []
|
||||
try:
|
||||
self.log(f"Opening image {self.path}")
|
||||
with open(self.path, "rb") as image_tar:
|
||||
self.log(f"Loading images from {self.path}")
|
||||
res = self.client._post(
|
||||
self.client._url("/images/load"), data=image_tar, stream=True
|
||||
)
|
||||
for line in self.client._stream_helper(res, decode=True):
|
||||
self.log(line, pretty_print=True)
|
||||
self._extract_output_line(line, load_output)
|
||||
except EnvironmentError as exc:
|
||||
if exc.errno == errno.ENOENT:
|
||||
self.client.fail(f"Error opening archive {self.path} - {exc}")
|
||||
self.client.fail(
|
||||
f"Error loading archive {self.path} - {exc}",
|
||||
stdout="\n".join(load_output),
|
||||
)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.client.fail(
|
||||
f"Error loading archive {self.path} - {exc}",
|
||||
stdout="\n".join(load_output),
|
||||
)
|
||||
|
||||
# Collect loaded images
|
||||
loaded_images = []
|
||||
for line in load_output:
|
||||
if line.startswith("Loaded image:"):
|
||||
loaded_images.append(line[len("Loaded image:") :].strip())
|
||||
if line.startswith("Loaded image ID:"):
|
||||
loaded_images.append(line[len("Loaded image ID:") :].strip())
|
||||
|
||||
if not loaded_images:
|
||||
self.client.fail(
|
||||
"Detected no loaded images. Archive potentially corrupt?",
|
||||
stdout="\n".join(load_output),
|
||||
)
|
||||
|
||||
images = []
|
||||
for image_name in loaded_images:
|
||||
if is_image_name_id(image_name):
|
||||
images.append(self.client.find_image_by_id(image_name))
|
||||
elif ":" in image_name:
|
||||
image_name, tag = image_name.rsplit(":", 1)
|
||||
images.append(self.client.find_image(image_name, tag))
|
||||
else:
|
||||
self.client.module.warn(
|
||||
f'Image name "{image_name}" is neither ID nor has a tag'
|
||||
)
|
||||
|
||||
self.results["image_names"] = loaded_images
|
||||
self.results["images"] = images
|
||||
self.results["changed"] = True
|
||||
self.results["stdout"] = "\n".join(load_output)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec={
|
||||
"path": {"type": "path", "required": True},
|
||||
},
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
try:
|
||||
results: dict[str, t.Any] = {
|
||||
"image_names": [],
|
||||
"images": [],
|
||||
}
|
||||
|
||||
ImageManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,233 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_image_pull
|
||||
|
||||
short_description: Pull Docker images from registries
|
||||
|
||||
version_added: 3.6.0
|
||||
|
||||
description:
|
||||
- Pulls a Docker image from a registry.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: partial
|
||||
details:
|
||||
- When trying to pull an image with O(pull=always), the module assumes this is always changed in check mode.
|
||||
- When check mode is combined with diff mode, the pulled image's ID is always shown as V(unknown) in the diff.
|
||||
diff_mode:
|
||||
support: full
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Image name. Name format must be one of V(name), V(repository/name), or V(registry_server:port/name).
|
||||
- The name can optionally include the tag by appending V(:tag_name), or it can contain a digest by appending V(@hash:digest).
|
||||
type: str
|
||||
required: true
|
||||
tag:
|
||||
description:
|
||||
- Used to select an image when pulling. Defaults to V(latest).
|
||||
- If O(name) parameter format is C(name:tag) or C(image@hash:digest), then O(tag) will be ignored.
|
||||
type: str
|
||||
default: latest
|
||||
platform:
|
||||
description:
|
||||
- Ask for this specific platform when pulling.
|
||||
type: str
|
||||
pull:
|
||||
description:
|
||||
- Determines when to pull an image.
|
||||
- If V(always), will always pull the image.
|
||||
- If V(not_present), will only pull the image if no image of the name exists on the current Docker daemon, or if O(platform)
|
||||
does not match.
|
||||
type: str
|
||||
choices:
|
||||
- always
|
||||
- not_present
|
||||
default: always
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_image_pull
|
||||
- module: community.docker.docker_image_remove
|
||||
- module: community.docker.docker_image_tag
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Pull an image
|
||||
community.docker.docker_image_pull:
|
||||
name: pacur/centos-7
|
||||
# Select platform for pulling. If not specified, will pull whatever docker prefers.
|
||||
platform: amd64
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
image:
|
||||
description: Image inspection results for the affected image.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
parse_repository_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._platform import (
|
||||
compare_platform_strings,
|
||||
compose_platform_string,
|
||||
normalize_platform_string,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
is_valid_tag,
|
||||
)
|
||||
|
||||
|
||||
def image_info(image: dict[str, t.Any] | None) -> dict[str, t.Any]:
|
||||
result = {}
|
||||
if image:
|
||||
result["id"] = image["Id"]
|
||||
else:
|
||||
result["exists"] = False
|
||||
return result
|
||||
|
||||
|
||||
class ImagePuller(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
parameters = self.client.module.params
|
||||
self.name: str = parameters["name"]
|
||||
self.tag: str = parameters["tag"]
|
||||
self.platform: str | None = parameters["platform"]
|
||||
self.pull_mode: t.Literal["always", "not_present"] = parameters["pull"]
|
||||
|
||||
if is_image_name_id(self.name):
|
||||
self.client.fail("Cannot pull an image by ID")
|
||||
if not is_valid_tag(self.tag, allow_empty=True):
|
||||
self.client.fail(f'"{self.tag}" is not a valid docker tag!')
|
||||
|
||||
# If name contains a tag, it takes precedence over tag parameter.
|
||||
repo, repo_tag = parse_repository_tag(self.name)
|
||||
if repo_tag:
|
||||
self.name = repo
|
||||
self.tag = repo_tag
|
||||
|
||||
def pull(self) -> dict[str, t.Any]:
|
||||
image = self.client.find_image(name=self.name, tag=self.tag)
|
||||
actions: list[str] = []
|
||||
diff = {"before": image_info(image), "after": image_info(image)}
|
||||
results = {
|
||||
"changed": False,
|
||||
"actions": actions,
|
||||
"image": image or {},
|
||||
"diff": diff,
|
||||
}
|
||||
|
||||
if image and self.pull_mode == "not_present":
|
||||
if self.platform is None:
|
||||
return results
|
||||
host_info = self.client.info()
|
||||
wanted_platform = normalize_platform_string(
|
||||
self.platform,
|
||||
daemon_os=host_info.get("OSType"),
|
||||
daemon_arch=host_info.get("Architecture"),
|
||||
)
|
||||
image_platform = compose_platform_string(
|
||||
os=image.get("Os"),
|
||||
arch=image.get("Architecture"),
|
||||
variant=image.get("Variant"),
|
||||
daemon_os=host_info.get("OSType"),
|
||||
daemon_arch=host_info.get("Architecture"),
|
||||
)
|
||||
if compare_platform_strings(wanted_platform, image_platform):
|
||||
return results
|
||||
|
||||
actions.append(f"Pulled image {self.name}:{self.tag}")
|
||||
if self.check_mode:
|
||||
results["changed"] = True
|
||||
diff["after"] = image_info({"Id": "unknown"})
|
||||
else:
|
||||
image, not_changed = self.client.pull_image(
|
||||
self.name, tag=self.tag, image_platform=self.platform
|
||||
)
|
||||
results["image"] = image
|
||||
results["changed"] = not not_changed
|
||||
diff["after"] = image_info(image)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
"tag": {"type": "str", "default": "latest"},
|
||||
"platform": {"type": "str"},
|
||||
"pull": {
|
||||
"type": "str",
|
||||
"choices": ["always", "not_present"],
|
||||
"default": "always",
|
||||
},
|
||||
}
|
||||
|
||||
option_minimal_versions = {
|
||||
"platform": {"docker_api_version": "1.32"},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
|
||||
try:
|
||||
results = ImagePuller(client).pull()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,205 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_image_push
|
||||
|
||||
short_description: Push Docker images to registries
|
||||
|
||||
version_added: 3.6.0
|
||||
|
||||
description:
|
||||
- Pushes a Docker image to a registry.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Image name. Name format must be one of V(name), V(repository/name), or V(registry_server:port/name).
|
||||
- The name can optionally include the tag by appending V(:tag_name), or it can contain a digest by appending V(@hash:digest).
|
||||
type: str
|
||||
required: true
|
||||
tag:
|
||||
description:
|
||||
- Select which image to push. Defaults to V(latest).
|
||||
- If O(name) parameter format is C(name:tag) or C(image@hash:digest), then O(tag) will be ignored.
|
||||
type: str
|
||||
default: latest
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_image_pull
|
||||
- module: community.docker.docker_image_remove
|
||||
- module: community.docker.docker_image_tag
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Push an image
|
||||
community.docker.docker_image_push:
|
||||
name: registry.example.com:5000/repo/image
|
||||
tag: latest
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
image:
|
||||
description: Image inspection results for the affected image.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
"""
|
||||
|
||||
import base64
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.auth import (
|
||||
get_config_header,
|
||||
resolve_repository_name,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
parse_repository_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
is_valid_tag,
|
||||
)
|
||||
|
||||
|
||||
class ImagePusher(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
parameters = self.client.module.params
|
||||
self.name: str = parameters["name"]
|
||||
self.tag: str = parameters["tag"]
|
||||
|
||||
if is_image_name_id(self.name):
|
||||
self.client.fail("Cannot push an image by ID")
|
||||
if not is_valid_tag(self.tag, allow_empty=True):
|
||||
self.client.fail(f'"{self.tag}" is not a valid docker tag!')
|
||||
|
||||
# If name contains a tag, it takes precedence over tag parameter.
|
||||
repo, repo_tag = parse_repository_tag(self.name)
|
||||
if repo_tag:
|
||||
self.name = repo
|
||||
self.tag = repo_tag
|
||||
|
||||
if is_image_name_id(self.tag):
|
||||
self.client.fail("Cannot push an image by digest")
|
||||
if not is_valid_tag(self.tag, allow_empty=False):
|
||||
self.client.fail(f'"{self.tag}" is not a valid docker tag!')
|
||||
|
||||
def push(self) -> dict[str, t.Any]:
|
||||
image = self.client.find_image(name=self.name, tag=self.tag)
|
||||
if not image:
|
||||
self.client.fail(f"Cannot find image {self.name}:{self.tag}")
|
||||
|
||||
actions: list[str] = []
|
||||
results: dict[str, t.Any] = {
|
||||
"changed": False,
|
||||
"actions": actions,
|
||||
"image": image,
|
||||
}
|
||||
|
||||
push_registry, push_repo = resolve_repository_name(self.name)
|
||||
try:
|
||||
actions.append(f"Pushed image {self.name}:{self.tag}")
|
||||
|
||||
headers = {}
|
||||
header = get_config_header(self.client, push_registry)
|
||||
if not header:
|
||||
# For some reason, from Docker 28.3.3 on not specifying X-Registry-Auth seems to be invalid.
|
||||
# See https://github.com/moby/moby/issues/50614.
|
||||
header = base64.urlsafe_b64encode(b"{}")
|
||||
headers["X-Registry-Auth"] = header
|
||||
response = self.client._post_json(
|
||||
self.client._url("/images/{0}/push", self.name),
|
||||
data=None,
|
||||
headers=headers,
|
||||
stream=True,
|
||||
params={"tag": self.tag},
|
||||
)
|
||||
self.client._raise_for_status(response)
|
||||
for line in self.client._stream_helper(response, decode=True):
|
||||
self.log(line, pretty_print=True)
|
||||
if line.get("errorDetail"):
|
||||
raise RuntimeError(line["errorDetail"]["message"])
|
||||
status = line.get("status")
|
||||
if status in ("Pushing", "Pushed"):
|
||||
results["changed"] = True
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
if "unauthorized" in str(exc):
|
||||
if "authentication required" in str(exc):
|
||||
self.client.fail(
|
||||
f"Error pushing image {push_registry}/{push_repo}:{self.tag} - {exc}. Try logging into {push_registry} first."
|
||||
)
|
||||
else:
|
||||
self.client.fail(
|
||||
f"Error pushing image {push_registry}/{push_repo}:{self.tag} - {exc}. Does the repository exist?"
|
||||
)
|
||||
self.client.fail(f"Error pushing image {self.name}:{self.tag}: {exc}")
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
"tag": {"type": "str", "default": "latest"},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
try:
|
||||
results = ImagePusher(client).push()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,292 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_image_remove
|
||||
|
||||
short_description: Remove Docker images
|
||||
|
||||
version_added: 3.6.0
|
||||
|
||||
description:
|
||||
- Remove Docker images from the Docker daemon.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- 'Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). When pushing or
|
||||
pulling an image the name can optionally include the tag by appending C(:tag_name).'
|
||||
- Note that image IDs (hashes) can also be used.
|
||||
type: str
|
||||
required: true
|
||||
tag:
|
||||
description:
|
||||
- Tag for the image name O(name) that is to be tagged.
|
||||
- If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence.
|
||||
type: str
|
||||
default: latest
|
||||
force:
|
||||
description:
|
||||
- Un-tag and remove all images matching the specified name.
|
||||
type: bool
|
||||
default: false
|
||||
prune:
|
||||
description:
|
||||
- Delete untagged parent images.
|
||||
type: bool
|
||||
default: true
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_image_load
|
||||
- module: community.docker.docker_image_pull
|
||||
- module: community.docker.docker_image_tag
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Remove an image
|
||||
community.docker.docker_image_remove:
|
||||
name: pacur/centos-7
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
image:
|
||||
description:
|
||||
- Image inspection results for the affected image before removal.
|
||||
- Empty if the image was not found.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
deleted:
|
||||
description:
|
||||
- The digests of the images that were deleted.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample: []
|
||||
untagged:
|
||||
description:
|
||||
- The digests of the images that were untagged.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample: []
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
NotFound,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
parse_repository_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
is_valid_tag,
|
||||
)
|
||||
|
||||
|
||||
class ImageRemover(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.check_mode = self.client.check_mode
|
||||
self.diff = self.client.module._diff
|
||||
|
||||
parameters = self.client.module.params
|
||||
self.name = parameters["name"]
|
||||
self.tag = parameters["tag"]
|
||||
self.force = parameters["force"]
|
||||
self.prune = parameters["prune"]
|
||||
|
||||
if not is_valid_tag(self.tag, allow_empty=True):
|
||||
self.fail(f'"{self.tag}" is not a valid docker tag')
|
||||
|
||||
# If name contains a tag, it takes precedence over tag parameter.
|
||||
if not is_image_name_id(self.name):
|
||||
repo, repo_tag = parse_repository_tag(self.name)
|
||||
if repo_tag:
|
||||
self.name = repo
|
||||
self.tag = repo_tag
|
||||
|
||||
def fail(self, msg: str) -> t.NoReturn:
|
||||
self.client.fail(msg)
|
||||
|
||||
def get_diff_state(self, image: dict[str, t.Any] | None) -> dict[str, t.Any]:
|
||||
if not image:
|
||||
return {"exists": False}
|
||||
return {
|
||||
"exists": True,
|
||||
"id": image["Id"],
|
||||
"tags": sorted(image.get("RepoTags") or []),
|
||||
"digests": sorted(image.get("RepoDigests") or []),
|
||||
}
|
||||
|
||||
def absent(self) -> dict[str, t.Any]:
|
||||
actions: list[str] = []
|
||||
deleted: list[str] = []
|
||||
untagged: list[str] = []
|
||||
results: dict[str, t.Any] = {
|
||||
"changed": False,
|
||||
"actions": actions,
|
||||
"image": {},
|
||||
"deleted": deleted,
|
||||
"untagged": untagged,
|
||||
}
|
||||
|
||||
name = self.name
|
||||
if is_image_name_id(name):
|
||||
image = self.client.find_image_by_id(name, accept_missing_image=True)
|
||||
else:
|
||||
image = self.client.find_image(name, self.tag)
|
||||
if self.tag:
|
||||
name = f"{self.name}:{self.tag}"
|
||||
|
||||
diff: dict[str, t.Any] = {}
|
||||
if self.diff:
|
||||
results["diff"] = diff
|
||||
diff["before"] = self.get_diff_state(image)
|
||||
|
||||
if not image:
|
||||
if self.diff:
|
||||
diff["after"] = self.get_diff_state(image)
|
||||
return results
|
||||
|
||||
results["changed"] = True
|
||||
actions.append(f"Removed image {name}")
|
||||
results["image"] = image
|
||||
|
||||
if not self.check_mode:
|
||||
try:
|
||||
res = self.client.delete_json(
|
||||
"/images/{0}",
|
||||
name,
|
||||
params={"force": self.force, "noprune": not self.prune},
|
||||
)
|
||||
except NotFound:
|
||||
# If the image vanished while we were trying to remove it, do not fail
|
||||
res = []
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error removing image {name} - {exc}")
|
||||
|
||||
for entry in res:
|
||||
if entry.get("Untagged"):
|
||||
untagged.append(entry["Untagged"])
|
||||
if entry.get("Deleted"):
|
||||
deleted.append(entry["Deleted"])
|
||||
|
||||
untagged[:] = sorted(untagged)
|
||||
deleted[:] = sorted(deleted)
|
||||
|
||||
if self.diff:
|
||||
image_after = self.client.find_image_by_id(
|
||||
image["Id"], accept_missing_image=True
|
||||
)
|
||||
diff["after"] = self.get_diff_state(image_after)
|
||||
|
||||
elif is_image_name_id(name):
|
||||
deleted.append(image["Id"])
|
||||
# TODO: the following is no longer correct with Docker 29+...
|
||||
untagged[:] = sorted(
|
||||
(image.get("RepoTags") or []) + (image.get("RepoDigests") or [])
|
||||
)
|
||||
if not self.force and results["untagged"]:
|
||||
self.fail(
|
||||
"Cannot delete image by ID that is still in use - use force=true"
|
||||
)
|
||||
if self.diff:
|
||||
diff["after"] = self.get_diff_state({})
|
||||
|
||||
elif is_image_name_id(self.tag):
|
||||
untagged.append(name)
|
||||
if (
|
||||
len(image.get("RepoTags") or []) < 1
|
||||
and len(image.get("RepoDigests") or []) < 2
|
||||
):
|
||||
deleted.append(image["Id"])
|
||||
if self.diff:
|
||||
diff["after"] = self.get_diff_state(image)
|
||||
try:
|
||||
diff["after"]["digests"].remove(name)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
else:
|
||||
untagged.append(name)
|
||||
if (
|
||||
len(image.get("RepoTags") or []) < 2
|
||||
and len(image.get("RepoDigests") or []) < 1
|
||||
):
|
||||
deleted.append(image["Id"])
|
||||
if self.diff:
|
||||
diff["after"] = self.get_diff_state(image)
|
||||
try:
|
||||
diff["after"]["tags"].remove(name)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
"tag": {"type": "str", "default": "latest"},
|
||||
"force": {"type": "bool", "default": False},
|
||||
"prune": {"type": "bool", "default": True},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
results = ImageRemover(client).absent()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,304 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_image_tag
|
||||
|
||||
short_description: Tag Docker images with new names and/or tags
|
||||
|
||||
version_added: 3.6.0
|
||||
|
||||
description:
|
||||
- This module allows to tag Docker images with new names and/or tags.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- 'Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). When pushing or
|
||||
pulling an image the name can optionally include the tag by appending C(:tag_name).'
|
||||
- Note that image IDs (hashes) can also be used.
|
||||
type: str
|
||||
required: true
|
||||
tag:
|
||||
description:
|
||||
- Tag for the image name O(name) that is to be tagged.
|
||||
- If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence.
|
||||
type: str
|
||||
default: latest
|
||||
repository:
|
||||
description:
|
||||
- List of new image names to tag the image as.
|
||||
- Expects format C(repository:tag). If no tag is provided, will use the value of the O(tag) parameter if present, or
|
||||
V(latest).
|
||||
type: list
|
||||
elements: str
|
||||
required: true
|
||||
existing_images:
|
||||
description:
|
||||
- Defines the behavior if the image to be tagged already exists and is another image than the one identified by O(name)
|
||||
and O(tag).
|
||||
- If set to V(keep), the tagged image is kept.
|
||||
- If set to V(overwrite), the tagged image is overwritten by the specified one.
|
||||
type: str
|
||||
choices:
|
||||
- keep
|
||||
- overwrite
|
||||
default: overwrite
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
seealso:
|
||||
- module: community.docker.docker_image_push
|
||||
- module: community.docker.docker_image_remove
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Tag Python 3.12 image with two new names
|
||||
community.docker.docker_image_tag:
|
||||
name: python:3.12
|
||||
repository:
|
||||
- python-3:3.12
|
||||
- local-registry:5000/python-3/3.12:latest
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
image:
|
||||
description: Image inspection results for the affected image.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
tagged_images:
|
||||
description:
|
||||
- A list of images that got tagged.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample:
|
||||
- python-3:3.12
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.formatters import human_to_bytes
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
parse_repository_tag,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
is_image_name_id,
|
||||
is_valid_tag,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def convert_to_bytes(
|
||||
value: str | None,
|
||||
module: AnsibleModule,
|
||||
name: str,
|
||||
unlimited_value: int | None = None,
|
||||
) -> int | None:
|
||||
if value is None:
|
||||
return value
|
||||
try:
|
||||
if unlimited_value is not None and value in ("unlimited", str(unlimited_value)):
|
||||
return unlimited_value
|
||||
return human_to_bytes(value)
|
||||
except ValueError as exc:
|
||||
module.fail_json(msg=f"Failed to convert {name} to bytes: {exc}")
|
||||
|
||||
|
||||
def image_info(name: str, tag: str, image: dict[str, t.Any] | None) -> dict[str, t.Any]:
|
||||
result: dict[str, t.Any] = {"name": name, "tag": tag}
|
||||
if image:
|
||||
result["id"] = image["Id"]
|
||||
else:
|
||||
result["exists"] = False
|
||||
return result
|
||||
|
||||
|
||||
class ImageTagger(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
parameters = self.client.module.params
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.name = parameters["name"]
|
||||
self.tag = parameters["tag"]
|
||||
if not is_valid_tag(self.tag, allow_empty=True):
|
||||
self.fail(f'"{self.tag}" is not a valid docker tag')
|
||||
|
||||
# If name contains a tag, it takes precedence over tag parameter.
|
||||
if not is_image_name_id(self.name):
|
||||
repo, repo_tag = parse_repository_tag(self.name)
|
||||
if repo_tag:
|
||||
self.name = repo
|
||||
self.tag = repo_tag
|
||||
|
||||
self.keep_existing_images = parameters["existing_images"] == "keep"
|
||||
|
||||
# Make sure names in repository are valid images, and add tag if needed
|
||||
self.repositories = []
|
||||
for i, repository in enumerate(parameters["repository"]):
|
||||
if is_image_name_id(repository):
|
||||
self.fail(
|
||||
f"repository[{i + 1}] must not be an image ID; got: {repository}"
|
||||
)
|
||||
repo, repo_tag = parse_repository_tag(repository)
|
||||
if not repo_tag:
|
||||
repo_tag = parameters["tag"]
|
||||
elif not is_valid_tag(repo_tag, allow_empty=False):
|
||||
self.fail(
|
||||
f"repository[{i + 1}] must not have a digest; got: {repository}"
|
||||
)
|
||||
self.repositories.append((repo, repo_tag))
|
||||
|
||||
def fail(self, msg: str) -> t.NoReturn:
|
||||
self.client.fail(msg)
|
||||
|
||||
def tag_image(
|
||||
self, image: dict[str, t.Any], name: str, tag: str
|
||||
) -> tuple[bool, str, dict[str, t.Any] | None]:
|
||||
tagged_image = self.client.find_image(name=name, tag=tag)
|
||||
if tagged_image:
|
||||
# Idempotency checks
|
||||
if tagged_image["Id"] == image["Id"]:
|
||||
return (
|
||||
False,
|
||||
f"target image already exists ({tagged_image['Id']}) and is as expected",
|
||||
tagged_image,
|
||||
)
|
||||
if self.keep_existing_images:
|
||||
return (
|
||||
False,
|
||||
f"target image already exists ({tagged_image['Id']}) and is not as expected, but kept",
|
||||
tagged_image,
|
||||
)
|
||||
msg = f"target image existed ({tagged_image['Id']}) and was not as expected"
|
||||
else:
|
||||
msg = "target image did not exist"
|
||||
|
||||
if not self.check_mode:
|
||||
try:
|
||||
params = {
|
||||
"tag": tag,
|
||||
"repo": name,
|
||||
"force": True,
|
||||
}
|
||||
res = self.client._post(
|
||||
self.client._url("/images/{0}/tag", image["Id"]), params=params
|
||||
)
|
||||
self.client._raise_for_status(res)
|
||||
if res.status_code != 201:
|
||||
raise RuntimeError("Tag operation failed.")
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(f"Error: failed to tag image as {name}:{tag} - {exc}")
|
||||
|
||||
return True, msg, tagged_image
|
||||
|
||||
def tag_images(self) -> dict[str, t.Any]:
|
||||
if is_image_name_id(self.name):
|
||||
image = self.client.find_image_by_id(self.name, accept_missing_image=False)
|
||||
else:
|
||||
image = self.client.find_image(name=self.name, tag=self.tag)
|
||||
if not image:
|
||||
self.fail(f"Cannot find image {self.name}:{self.tag}")
|
||||
assert image is not None
|
||||
|
||||
before: list[dict[str, t.Any]] = []
|
||||
after: list[dict[str, t.Any]] = []
|
||||
tagged_images: list[str] = []
|
||||
actions: list[str] = []
|
||||
results: dict[str, t.Any] = {
|
||||
"changed": False,
|
||||
"actions": actions,
|
||||
"image": image,
|
||||
"tagged_images": tagged_images,
|
||||
"diff": {"before": {"images": before}, "after": {"images": after}},
|
||||
}
|
||||
for repository, tag in self.repositories:
|
||||
tagged, msg, old_image = self.tag_image(image, repository, tag)
|
||||
before.append(image_info(repository, tag, old_image))
|
||||
after.append(image_info(repository, tag, image if tagged else old_image))
|
||||
if tagged:
|
||||
results["changed"] = True
|
||||
actions.append(
|
||||
f"Tagged image {image['Id']} as {repository}:{tag}: {msg}"
|
||||
)
|
||||
tagged_images.append(f"{repository}:{tag}")
|
||||
else:
|
||||
actions.append(
|
||||
f"Not tagged image {image['Id']} as {repository}:{tag}: {msg}"
|
||||
)
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
"tag": {"type": "str", "default": "latest"},
|
||||
"repository": {"type": "list", "elements": "str", "required": True},
|
||||
"existing_images": {
|
||||
"type": "str",
|
||||
"choices": ["keep", "overwrite"],
|
||||
"default": "overwrite",
|
||||
},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
results = ImageTagger(client).tag_images()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,468 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
|
||||
# Chris Houseknecht, <house@redhat.com>
|
||||
# James Tanner, <jtanner@redhat.com>
|
||||
#
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_login
|
||||
short_description: Log into a Docker registry
|
||||
description:
|
||||
- Provides functionality similar to the C(docker login) command.
|
||||
- Authenticate with a docker registry and add the credentials to your local Docker config file respectively the credentials
|
||||
store associated to the registry. Adding the credentials to the config files resp. the credential store allows future
|
||||
connections to the registry using tools such as Ansible's Docker modules, the Docker CLI and Docker SDK for Python without
|
||||
needing to provide credentials.
|
||||
- Running in check mode will perform the authentication without updating the config file.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
registry_url:
|
||||
description:
|
||||
- The registry URL.
|
||||
type: str
|
||||
default: "https://index.docker.io/v1/"
|
||||
aliases:
|
||||
- registry
|
||||
- url
|
||||
username:
|
||||
description:
|
||||
- The username for the registry account.
|
||||
- Required when O(state=present).
|
||||
type: str
|
||||
password:
|
||||
description:
|
||||
- The plaintext password for the registry account.
|
||||
- Required when O(state=present).
|
||||
type: str
|
||||
reauthorize:
|
||||
description:
|
||||
- Refresh existing authentication found in the configuration file.
|
||||
type: bool
|
||||
default: false
|
||||
aliases:
|
||||
- reauth
|
||||
config_path:
|
||||
description:
|
||||
- Custom path to the Docker CLI configuration file.
|
||||
type: path
|
||||
default: ~/.docker/config.json
|
||||
aliases:
|
||||
- dockercfg_path
|
||||
state:
|
||||
description:
|
||||
- This controls the current state of the user. V(present) will login in a user, V(absent) will log them out.
|
||||
- To logout you only need the registry server, which defaults to DockerHub.
|
||||
- Before 2.1 you could ONLY log in.
|
||||
- Docker does not support 'logout' with a custom config file.
|
||||
type: str
|
||||
default: 'present'
|
||||
choices: ['present', 'absent']
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
author:
|
||||
- Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Log into DockerHub
|
||||
community.docker.docker_login:
|
||||
username: docker
|
||||
password: rekcod
|
||||
|
||||
- name: Log into private registry and force re-authorization
|
||||
community.docker.docker_login:
|
||||
registry_url: your.private.registry.io
|
||||
username: yourself
|
||||
password: secrets3
|
||||
reauthorize: true
|
||||
|
||||
- name: Log into DockerHub using a custom config file
|
||||
community.docker.docker_login:
|
||||
username: docker
|
||||
password: rekcod
|
||||
config_path: /tmp/.mydockercfg
|
||||
|
||||
- name: Log out of DockerHub
|
||||
community.docker.docker_login:
|
||||
state: absent
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
login_result:
|
||||
description: Result from the login.
|
||||
returned: when O(state=present)
|
||||
type: dict
|
||||
sample: {"serveraddress": "localhost:5000", "username": "testuser"}
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes, to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api import auth
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.auth import (
|
||||
decode_auth,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.credentials.errors import (
|
||||
CredentialsNotFound,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.credentials.store import (
|
||||
Store,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DEFAULT_DOCKER_REGISTRY,
|
||||
DockerBaseClass,
|
||||
)
|
||||
|
||||
|
||||
class DockerFileStore:
|
||||
"""
|
||||
A custom credential store class that implements only the functionality we need to
|
||||
update the docker config file when no credential helpers is provided.
|
||||
"""
|
||||
|
||||
program = "<legacy config>"
|
||||
|
||||
def __init__(self, config_path: str) -> None:
|
||||
self._config_path = config_path
|
||||
|
||||
# Make sure we have a minimal config if none is available.
|
||||
self._config: dict[str, t.Any] = {"auths": {}}
|
||||
|
||||
try:
|
||||
# Attempt to read the existing config.
|
||||
with open(self._config_path, "rt", encoding="utf-8") as f:
|
||||
config = json.load(f)
|
||||
except (ValueError, IOError):
|
||||
# No config found or an invalid config found so we'll ignore it.
|
||||
config = {}
|
||||
|
||||
# Update our internal config with what ever was loaded.
|
||||
self._config.update(config)
|
||||
|
||||
@property
|
||||
def config_path(self) -> str:
|
||||
"""
|
||||
Return the config path configured in this DockerFileStore instance.
|
||||
"""
|
||||
|
||||
return self._config_path
|
||||
|
||||
def get(self, server: str) -> dict[str, t.Any]:
|
||||
"""
|
||||
Retrieve credentials for `server` if there are any in the config file.
|
||||
Otherwise raise a `StoreError`
|
||||
"""
|
||||
|
||||
server_creds = self._config["auths"].get(server)
|
||||
if not server_creds:
|
||||
raise CredentialsNotFound("No matching credentials")
|
||||
|
||||
(username, password) = decode_auth(server_creds["auth"])
|
||||
|
||||
return {"Username": username, "Secret": password}
|
||||
|
||||
def _write(self) -> None:
|
||||
"""
|
||||
Write config back out to disk.
|
||||
"""
|
||||
# Make sure directory exists
|
||||
directory = os.path.dirname(self._config_path)
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
# Write config; make sure it has permissions 0x600
|
||||
content = json.dumps(self._config, indent=4, sort_keys=True).encode("utf-8")
|
||||
f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
|
||||
try:
|
||||
os.write(f, content)
|
||||
finally:
|
||||
os.close(f)
|
||||
|
||||
def store(self, server: str, username: str, password: str) -> None:
|
||||
"""
|
||||
Add a credentials for `server` to the current configuration.
|
||||
"""
|
||||
|
||||
b64auth = base64.b64encode(to_bytes(username) + b":" + to_bytes(password))
|
||||
tauth = to_text(b64auth)
|
||||
|
||||
# build up the auth structure
|
||||
if "auths" not in self._config:
|
||||
self._config["auths"] = {}
|
||||
|
||||
self._config["auths"][server] = {"auth": tauth}
|
||||
|
||||
self._write()
|
||||
|
||||
def erase(self, server: str) -> None:
|
||||
"""
|
||||
Remove credentials for the given server from the configuration.
|
||||
"""
|
||||
|
||||
if "auths" in self._config and server in self._config["auths"]:
|
||||
self._config["auths"].pop(server)
|
||||
self._write()
|
||||
|
||||
|
||||
class LoginManager(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient, results: dict[str, t.Any]) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
parameters = self.client.module.params
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.registry_url: str = parameters.get("registry_url")
|
||||
self.username: str | None = parameters.get("username")
|
||||
self.password: str | None = parameters.get("password")
|
||||
self.reauthorize: bool = parameters.get("reauthorize")
|
||||
self.config_path: str = parameters.get("config_path")
|
||||
self.state: t.Literal["present", "absent"] = parameters.get("state")
|
||||
|
||||
def run(self) -> None:
|
||||
"""
|
||||
Do the actual work of this task here. This allows instantiation for partial
|
||||
testing.
|
||||
"""
|
||||
|
||||
if self.state == "present":
|
||||
self.login()
|
||||
else:
|
||||
self.logout()
|
||||
|
||||
def fail(self, msg: str) -> t.NoReturn:
|
||||
self.client.fail(msg)
|
||||
|
||||
def _login(self, reauth: bool) -> dict[str, t.Any]:
|
||||
if self.config_path and os.path.exists(self.config_path):
|
||||
self.client._auth_configs = auth.load_config(
|
||||
self.config_path, credstore_env=self.client.credstore_env
|
||||
)
|
||||
elif not self.client._auth_configs or self.client._auth_configs.is_empty:
|
||||
self.client._auth_configs = auth.load_config(
|
||||
credstore_env=self.client.credstore_env
|
||||
)
|
||||
|
||||
authcfg = self.client._auth_configs.resolve_authconfig(self.registry_url)
|
||||
# If we found an existing auth config for this registry and username
|
||||
# combination, we can return it immediately unless reauth is requested.
|
||||
if authcfg and authcfg.get("username") == self.username and not reauth:
|
||||
return authcfg
|
||||
|
||||
req_data = {
|
||||
"username": self.username,
|
||||
"password": self.password,
|
||||
"email": None,
|
||||
"serveraddress": self.registry_url,
|
||||
}
|
||||
|
||||
response = self.client._post_json(self.client._url("/auth"), data=req_data)
|
||||
if response.status_code == 200:
|
||||
self.client._auth_configs.add_auth(
|
||||
self.registry_url or auth.INDEX_NAME, req_data
|
||||
)
|
||||
return self.client._result(response, get_json=True)
|
||||
|
||||
def login(self) -> None:
|
||||
"""
|
||||
Log into the registry with provided username/password. On success update the config
|
||||
file with the new authorization.
|
||||
|
||||
:return: None
|
||||
"""
|
||||
|
||||
self.results["actions"].append(f"Logged into {self.registry_url}")
|
||||
self.log(f"Log into {self.registry_url} with username {self.username}")
|
||||
try:
|
||||
response = self._login(self.reauthorize)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(
|
||||
f"Logging into {self.registry_url} for user {self.username} failed - {exc}"
|
||||
)
|
||||
|
||||
# If user is already logged in, then response contains password for user
|
||||
if "password" in response:
|
||||
# This returns correct password if user is logged in and wrong password is given.
|
||||
# So if it returns another password as we passed, and the user did not request to
|
||||
# reauthorize, still do it.
|
||||
if not self.reauthorize and response["password"] != self.password:
|
||||
try:
|
||||
response = self._login(True)
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.fail(
|
||||
f"Logging into {self.registry_url} for user {self.username} failed - {exc}"
|
||||
)
|
||||
response.pop("password", None)
|
||||
self.results["login_result"] = response
|
||||
|
||||
self.update_credentials()
|
||||
|
||||
def logout(self) -> None:
|
||||
"""
|
||||
Log out of the registry. On success update the config file.
|
||||
|
||||
:return: None
|
||||
"""
|
||||
|
||||
# Get the configuration store.
|
||||
store = self.get_credential_store_instance(self.registry_url, self.config_path)
|
||||
|
||||
try:
|
||||
store.get(self.registry_url)
|
||||
except CredentialsNotFound:
|
||||
# get raises an exception on not found.
|
||||
self.log(f"Credentials for {self.registry_url} not present, doing nothing.")
|
||||
self.results["changed"] = False
|
||||
return
|
||||
|
||||
if not self.check_mode:
|
||||
store.erase(self.registry_url)
|
||||
self.results["changed"] = True
|
||||
|
||||
def update_credentials(self) -> None:
|
||||
"""
|
||||
If the authorization is not stored attempt to store authorization values via
|
||||
the appropriate credential helper or to the config file.
|
||||
|
||||
:return: None
|
||||
"""
|
||||
# This is only called from login()
|
||||
assert self.username is not None
|
||||
assert self.password is not None
|
||||
|
||||
# Check to see if credentials already exist.
|
||||
store = self.get_credential_store_instance(self.registry_url, self.config_path)
|
||||
|
||||
try:
|
||||
current = store.get(self.registry_url)
|
||||
except CredentialsNotFound:
|
||||
# get raises an exception on not found.
|
||||
current = {"Username": "", "Secret": ""}
|
||||
|
||||
if (
|
||||
current["Username"] != self.username
|
||||
or current["Secret"] != self.password
|
||||
or self.reauthorize
|
||||
):
|
||||
if not self.check_mode:
|
||||
store.store(self.registry_url, self.username, self.password)
|
||||
self.log(
|
||||
f"Writing credentials to configured helper {store.program} for {self.registry_url}"
|
||||
)
|
||||
self.results["actions"].append(
|
||||
f"Wrote credentials to configured helper {store.program} for {self.registry_url}"
|
||||
)
|
||||
self.results["changed"] = True
|
||||
|
||||
def get_credential_store_instance(
|
||||
self, registry: str, dockercfg_path: str
|
||||
) -> Store | DockerFileStore:
|
||||
"""
|
||||
Return an instance of docker.credentials.Store used by the given registry.
|
||||
|
||||
:return: A Store or None
|
||||
:rtype: Union[docker.credentials.Store, NoneType]
|
||||
"""
|
||||
|
||||
credstore_env = self.client.credstore_env
|
||||
|
||||
config = auth.load_config(config_path=dockercfg_path)
|
||||
|
||||
store_name = auth.get_credential_store(config, registry)
|
||||
|
||||
# Make sure that there is a credential helper before trying to instantiate a
|
||||
# Store object.
|
||||
if store_name:
|
||||
self.log(f"Found credential store {store_name}")
|
||||
return Store(store_name, environment=credstore_env)
|
||||
|
||||
return DockerFileStore(dockercfg_path)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"registry_url": {
|
||||
"type": "str",
|
||||
"default": DEFAULT_DOCKER_REGISTRY,
|
||||
"aliases": ["registry", "url"],
|
||||
},
|
||||
"username": {"type": "str"},
|
||||
"password": {"type": "str", "no_log": True},
|
||||
"reauthorize": {"type": "bool", "default": False, "aliases": ["reauth"]},
|
||||
"state": {
|
||||
"type": "str",
|
||||
"default": "present",
|
||||
"choices": ["present", "absent"],
|
||||
},
|
||||
"config_path": {
|
||||
"type": "path",
|
||||
"default": "~/.docker/config.json",
|
||||
"aliases": ["dockercfg_path"],
|
||||
},
|
||||
}
|
||||
|
||||
required_if = [
|
||||
("state", "present", ["username", "password"]),
|
||||
]
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
)
|
||||
|
||||
try:
|
||||
results = {"changed": False, "actions": [], "login_result": {}}
|
||||
|
||||
manager = LoginManager(client, results)
|
||||
manager.run()
|
||||
|
||||
if "actions" in results:
|
||||
del results["actions"]
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,886 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_network
|
||||
short_description: Manage Docker networks
|
||||
description:
|
||||
- Create/remove Docker networks and connect containers to them.
|
||||
- Performs largely the same function as the C(docker network) CLI subcommand.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
idempotent:
|
||||
support: partial
|
||||
details:
|
||||
- If O(force=true) the module is not idempotent.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the network to operate on.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- network_name
|
||||
|
||||
config_from:
|
||||
description:
|
||||
- Specifies the config only network to use the config from.
|
||||
type: str
|
||||
version_added: 3.10.0
|
||||
|
||||
config_only:
|
||||
description:
|
||||
- Sets that this is a config only network.
|
||||
type: bool
|
||||
version_added: 3.10.0
|
||||
|
||||
connected:
|
||||
description:
|
||||
- List of container names or container IDs to connect to a network.
|
||||
- Please note that the module only makes sure that these containers are connected to the network, but does not care
|
||||
about connection options. If you rely on specific IP addresses and so on, use the M(community.docker.docker_container)
|
||||
module to ensure your containers are correctly connected to this network.
|
||||
type: list
|
||||
elements: str
|
||||
default: []
|
||||
aliases:
|
||||
- containers
|
||||
|
||||
driver:
|
||||
description:
|
||||
- Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
|
||||
type: str
|
||||
default: bridge
|
||||
|
||||
driver_options:
|
||||
description:
|
||||
- Dictionary of network settings. Consult docker docs for valid options and values.
|
||||
type: dict
|
||||
default: {}
|
||||
|
||||
force:
|
||||
description:
|
||||
- With state V(present) will disconnect all containers for existing networks, delete the network and re-create the network.
|
||||
- This option is required if you have changed the IPAM or driver options and want an existing network to be updated
|
||||
to use the new options.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
appends:
|
||||
description:
|
||||
- By default the connected list is canonical, meaning containers not on the list are removed from the network.
|
||||
- Use O(appends) to leave existing containers connected.
|
||||
type: bool
|
||||
default: false
|
||||
aliases:
|
||||
- incremental
|
||||
|
||||
enable_ipv4:
|
||||
description:
|
||||
- Enable IPv4 networking.
|
||||
- This is enabled by default, but can be explicitly disabled.
|
||||
- Requires Docker API 1.47 or newer.
|
||||
type: bool
|
||||
version_added: 4.5.0
|
||||
|
||||
enable_ipv6:
|
||||
description:
|
||||
- Enable IPv6 networking.
|
||||
type: bool
|
||||
|
||||
ingress:
|
||||
description:
|
||||
- Enable Swarm routing-mesh.
|
||||
type: bool
|
||||
version_added: 4.2.0
|
||||
|
||||
ipam_driver:
|
||||
description:
|
||||
- Specify an IPAM driver.
|
||||
type: str
|
||||
|
||||
ipam_driver_options:
|
||||
description:
|
||||
- Dictionary of IPAM driver options.
|
||||
type: dict
|
||||
|
||||
ipam_config:
|
||||
description:
|
||||
- List of IPAM config blocks. Consult L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam)
|
||||
for valid options and values. Note that O(ipam_config[].iprange) is spelled differently here (we use the notation
|
||||
from the Docker SDK for Python).
|
||||
type: list
|
||||
elements: dict
|
||||
suboptions:
|
||||
subnet:
|
||||
description:
|
||||
- IP subset in CIDR notation.
|
||||
type: str
|
||||
iprange:
|
||||
description:
|
||||
- IP address range in CIDR notation.
|
||||
type: str
|
||||
gateway:
|
||||
description:
|
||||
- IP gateway address.
|
||||
type: str
|
||||
aux_addresses:
|
||||
description:
|
||||
- Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
|
||||
type: dict
|
||||
|
||||
state:
|
||||
description:
|
||||
- V(absent) deletes the network. If a network has connected containers, these will be detached from the network.
|
||||
- V(present) creates the network, if it does not already exist with the specified parameters, and connects the list
|
||||
of containers provided by the O(connected) parameter. Containers not on the list will be disconnected. An empty list
|
||||
will leave no containers connected to the network. Use the O(appends) option to leave existing containers connected.
|
||||
Use the O(force) options to force re-creation of the network.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
internal:
|
||||
description:
|
||||
- Restrict external access to the network.
|
||||
type: bool
|
||||
|
||||
labels:
|
||||
description:
|
||||
- Dictionary of labels.
|
||||
type: dict
|
||||
default: {}
|
||||
|
||||
scope:
|
||||
description:
|
||||
- Specify the network's scope.
|
||||
type: str
|
||||
choices:
|
||||
- local
|
||||
- global
|
||||
- swarm
|
||||
|
||||
attachable:
|
||||
description:
|
||||
- If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect
|
||||
to the network.
|
||||
type: bool
|
||||
|
||||
notes:
|
||||
- When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates
|
||||
the network. It does not try to reconnect containers, except the ones listed in (O(connected), and even for these, it
|
||||
does not consider specific connection options like fixed IP addresses or MAC addresses. If you need more control over
|
||||
how the containers are connected to the network, loop the M(community.docker.docker_container) module to loop over your
|
||||
containers to make sure they are connected properly.
|
||||
- The module does not support Docker Swarm. This means that it will not try to disconnect or reconnect services. If services
|
||||
are connected to the network, deleting the network will fail. When network options are changed, the network has to be
|
||||
deleted and recreated, so this will fail as well.
|
||||
author:
|
||||
- "Ben Keith (@keitwb)"
|
||||
- "Chris Houseknecht (@chouseknecht)"
|
||||
- "Dave Bendit (@DBendit)"
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Create a network
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
|
||||
- name: Remove all but selected list of containers
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
connected:
|
||||
- container_a
|
||||
- container_b
|
||||
- container_c
|
||||
|
||||
- name: Remove a single container
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
connected: "{{ fulllist|difference(['container_a']) }}"
|
||||
|
||||
- name: Add a container to a network, leaving existing containers connected
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
connected:
|
||||
- container_a
|
||||
appends: true
|
||||
|
||||
- name: Create a network with driver options
|
||||
community.docker.docker_network:
|
||||
name: network_two
|
||||
driver_options:
|
||||
com.docker.network.bridge.name: net2
|
||||
|
||||
- name: Create a network with custom IPAM config
|
||||
community.docker.docker_network:
|
||||
name: network_three
|
||||
ipam_config:
|
||||
- subnet: 172.23.27.0/24
|
||||
gateway: 172.23.27.2
|
||||
iprange: 172.23.27.0/26
|
||||
aux_addresses:
|
||||
host1: 172.23.27.3
|
||||
host2: 172.23.27.4
|
||||
|
||||
- name: Create a network with labels
|
||||
community.docker.docker_network:
|
||||
name: network_four
|
||||
labels:
|
||||
key1: value1
|
||||
key2: value2
|
||||
|
||||
- name: Create a network with IPv6 IPAM config
|
||||
community.docker.docker_network:
|
||||
name: network_ipv6_one
|
||||
enable_ipv6: true
|
||||
ipam_config:
|
||||
- subnet: fdd1:ac8c:0557:7ce1::/64
|
||||
|
||||
- name: Create a network with IPv6 and custom IPv4 IPAM config
|
||||
community.docker.docker_network:
|
||||
name: network_ipv6_two
|
||||
enable_ipv6: true
|
||||
ipam_config:
|
||||
- subnet: 172.24.27.0/24
|
||||
- subnet: fdd1:ac8c:0557:7ce2::/64
|
||||
|
||||
- name: Delete a network, disconnecting all containers
|
||||
community.docker.docker_network:
|
||||
name: network_one
|
||||
state: absent
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
network:
|
||||
description:
|
||||
- Network inspection results for the affected network.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
"""
|
||||
|
||||
import re
|
||||
import time
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DifferenceTracker,
|
||||
DockerBaseClass,
|
||||
clean_dict_booleans_for_docker_api,
|
||||
normalize_ip_address,
|
||||
normalize_ip_network,
|
||||
sanitize_labels,
|
||||
)
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
name: str
|
||||
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
super().__init__()
|
||||
self.client = client
|
||||
|
||||
self.connected: list[str] = []
|
||||
self.config_from: str | None = None
|
||||
self.config_only: bool | None = None
|
||||
self.driver: str = "bridge"
|
||||
self.driver_options: dict[str, t.Any] = {}
|
||||
self.ipam_driver: str | None = None
|
||||
self.ipam_driver_options: dict[str, t.Any] | None = None
|
||||
self.ipam_config: list[dict[str, t.Any]] | None = None
|
||||
self.appends: bool = False
|
||||
self.force: bool = False
|
||||
self.internal: bool | None = None
|
||||
self.labels: dict[str, t.Any] = {}
|
||||
self.debug: bool = False
|
||||
self.enable_ipv4: bool | None = None
|
||||
self.enable_ipv6: bool | None = None
|
||||
self.scope: t.Literal["local", "global", "swarm"] | None = None
|
||||
self.attachable: bool | None = None
|
||||
self.ingress: bool | None = None
|
||||
self.state: t.Literal["present", "absent"] = "present"
|
||||
|
||||
for key, value in client.module.params.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
# config_only sets driver to 'null' (and scope to 'local') so force that here. Otherwise we get
|
||||
# diffs of 'null' --> 'bridge' given that the driver option defaults to 'bridge'.
|
||||
if self.config_only:
|
||||
self.driver = "null" # type: ignore[unreachable]
|
||||
|
||||
|
||||
def container_names_in_network(network: dict[str, t.Any]) -> list[str]:
|
||||
return (
|
||||
[c["Name"] for c in network["Containers"].values()]
|
||||
if network["Containers"]
|
||||
else []
|
||||
)
|
||||
|
||||
|
||||
CIDR_IPV4 = re.compile(r"^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$")
|
||||
CIDR_IPV6 = re.compile(r"^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$")
|
||||
|
||||
|
||||
def validate_cidr(cidr: str) -> t.Literal["ipv4", "ipv6"]:
|
||||
"""Validate CIDR. Return IP version of a CIDR string on success.
|
||||
|
||||
:param cidr: Valid CIDR
|
||||
:type cidr: str
|
||||
:return: ``ipv4`` or ``ipv6``
|
||||
:rtype: str
|
||||
:raises ValueError: If ``cidr`` is not a valid CIDR
|
||||
"""
|
||||
# TODO: Use ipaddress for this instead of rolling your own...
|
||||
if CIDR_IPV4.match(cidr):
|
||||
return "ipv4"
|
||||
if CIDR_IPV6.match(cidr):
|
||||
return "ipv6"
|
||||
raise ValueError(f'"{cidr}" is not a valid CIDR')
|
||||
|
||||
|
||||
def normalize_ipam_config_key(key: str) -> str:
|
||||
"""Normalizes IPAM config keys returned by Docker API to match Ansible keys.
|
||||
|
||||
:param key: Docker API key
|
||||
:type key: str
|
||||
:return Ansible module key
|
||||
:rtype str
|
||||
"""
|
||||
special_cases = {"AuxiliaryAddresses": "aux_addresses"}
|
||||
return special_cases.get(key, key.lower())
|
||||
|
||||
|
||||
def dicts_are_essentially_equal(a: dict[str, t.Any], b: dict[str, t.Any]) -> bool:
|
||||
"""Make sure that a is a subset of b, where None entries of a are ignored."""
|
||||
for k, v in a.items():
|
||||
if v is None:
|
||||
continue
|
||||
if b.get(k) != v:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def normalize_ipam_values(ipam_config: dict[str, t.Any]) -> dict[str, t.Any]:
|
||||
result = {}
|
||||
for key, value in ipam_config.items():
|
||||
if key in ("subnet", "iprange"):
|
||||
value = normalize_ip_network(value)
|
||||
elif key in ("gateway",):
|
||||
value = normalize_ip_address(value)
|
||||
elif key in ("aux_addresses",) and value is not None:
|
||||
value = {k: normalize_ip_address(v) for k, v in value.items()}
|
||||
result[key] = value
|
||||
return result
|
||||
|
||||
|
||||
class DockerNetworkManager:
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
self.client = client
|
||||
self.parameters = TaskParameters(client)
|
||||
self.check_mode = self.client.check_mode
|
||||
self.actions: list[str] = []
|
||||
self.results: dict[str, t.Any] = {"changed": False, "actions": self.actions}
|
||||
self.diff = self.client.module._diff
|
||||
self.diff_tracker = DifferenceTracker()
|
||||
self.diff_result: dict[str, t.Any] = {}
|
||||
|
||||
self.existing_network = self.get_existing_network()
|
||||
|
||||
if not self.parameters.connected and self.existing_network:
|
||||
self.parameters.connected = container_names_in_network(
|
||||
self.existing_network
|
||||
)
|
||||
|
||||
if self.parameters.ipam_config:
|
||||
try:
|
||||
for ipam_config in self.parameters.ipam_config:
|
||||
validate_cidr(ipam_config["subnet"])
|
||||
except ValueError as e:
|
||||
self.client.fail(to_text(e))
|
||||
|
||||
if self.parameters.driver_options:
|
||||
self.parameters.driver_options = clean_dict_booleans_for_docker_api(
|
||||
self.parameters.driver_options
|
||||
)
|
||||
|
||||
state = self.parameters.state
|
||||
if state == "present":
|
||||
self.present()
|
||||
elif state == "absent":
|
||||
self.absent()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
if self.diff:
|
||||
self.diff_result["before"], self.diff_result["after"] = (
|
||||
self.diff_tracker.get_before_after()
|
||||
)
|
||||
self.results["diff"] = self.diff_result
|
||||
|
||||
def get_existing_network(self) -> dict[str, t.Any] | None:
|
||||
return self.client.get_network(name=self.parameters.name)
|
||||
|
||||
def has_different_config(
|
||||
self, net: dict[str, t.Any]
|
||||
) -> tuple[bool, DifferenceTracker]:
|
||||
"""
|
||||
Evaluates an existing network and returns a tuple containing a boolean
|
||||
indicating if the configuration is different and a list of differences.
|
||||
|
||||
:param net: the inspection output for an existing network
|
||||
:return: (bool, list)
|
||||
"""
|
||||
differences = DifferenceTracker()
|
||||
if (
|
||||
self.parameters.config_only is not None
|
||||
and self.parameters.config_only != net.get("ConfigOnly", False)
|
||||
):
|
||||
differences.add(
|
||||
"config_only",
|
||||
parameter=self.parameters.config_only,
|
||||
active=net.get("ConfigOnly", False),
|
||||
)
|
||||
if (
|
||||
self.parameters.config_from is not None
|
||||
and self.parameters.config_from
|
||||
!= net.get("ConfigFrom", {}).get("Network", "")
|
||||
):
|
||||
differences.add(
|
||||
"config_from",
|
||||
parameter=self.parameters.config_from,
|
||||
active=net.get("ConfigFrom", {}).get("Network", ""),
|
||||
)
|
||||
if self.parameters.driver and self.parameters.driver != net["Driver"]:
|
||||
differences.add(
|
||||
"driver", parameter=self.parameters.driver, active=net["Driver"]
|
||||
)
|
||||
if self.parameters.driver_options:
|
||||
if not net.get("Options"):
|
||||
differences.add(
|
||||
"driver_options",
|
||||
parameter=self.parameters.driver_options,
|
||||
active=net.get("Options"),
|
||||
)
|
||||
else:
|
||||
for key, value in self.parameters.driver_options.items():
|
||||
if key not in net["Options"] or value != net["Options"][key]:
|
||||
differences.add(
|
||||
f"driver_options.{key}",
|
||||
parameter=value,
|
||||
active=net["Options"].get(key),
|
||||
)
|
||||
|
||||
if self.parameters.ipam_driver and (
|
||||
not net.get("IPAM") or net["IPAM"]["Driver"] != self.parameters.ipam_driver
|
||||
):
|
||||
differences.add(
|
||||
"ipam_driver",
|
||||
parameter=self.parameters.ipam_driver,
|
||||
active=net.get("IPAM"),
|
||||
)
|
||||
|
||||
if self.parameters.ipam_driver_options is not None:
|
||||
ipam_driver_options = net["IPAM"].get("Options") or {}
|
||||
if ipam_driver_options != self.parameters.ipam_driver_options:
|
||||
differences.add(
|
||||
"ipam_driver_options",
|
||||
parameter=self.parameters.ipam_driver_options,
|
||||
active=ipam_driver_options,
|
||||
)
|
||||
|
||||
if self.parameters.ipam_config is not None and self.parameters.ipam_config:
|
||||
if not net.get("IPAM") or not net["IPAM"]["Config"]:
|
||||
differences.add(
|
||||
"ipam_config",
|
||||
parameter=self.parameters.ipam_config,
|
||||
active=net.get("IPAM", {}).get("Config"),
|
||||
)
|
||||
else:
|
||||
# Put network's IPAM config into the same format as module's IPAM config
|
||||
net_ipam_configs = []
|
||||
net_ipam_configs_normalized = []
|
||||
for net_ipam_config in net["IPAM"]["Config"]:
|
||||
config = {}
|
||||
for k, v in net_ipam_config.items():
|
||||
config[normalize_ipam_config_key(k)] = v
|
||||
net_ipam_configs.append(config)
|
||||
net_ipam_configs_normalized.append(normalize_ipam_values(config))
|
||||
# Compare lists of dicts as sets of dicts
|
||||
for idx, ipam_config in enumerate(self.parameters.ipam_config):
|
||||
ipam_config_normalized = normalize_ipam_values(ipam_config)
|
||||
net_config = {}
|
||||
net_config_normalized = {}
|
||||
for net_ipam_config, net_ipam_config_normalized in zip(
|
||||
net_ipam_configs, net_ipam_configs_normalized
|
||||
):
|
||||
if dicts_are_essentially_equal(
|
||||
ipam_config_normalized, net_ipam_config_normalized
|
||||
):
|
||||
net_config = net_ipam_config
|
||||
net_config_normalized = net_ipam_config_normalized
|
||||
break
|
||||
for key, value in ipam_config.items():
|
||||
if value is None:
|
||||
# due to recursive argument_spec, all keys are always present
|
||||
# (but have default value None if not specified)
|
||||
continue
|
||||
if ipam_config_normalized[key] != net_config_normalized.get(
|
||||
key
|
||||
):
|
||||
differences.add(
|
||||
f"ipam_config[{idx}].{key}",
|
||||
parameter=value,
|
||||
active=net_config.get(key),
|
||||
)
|
||||
|
||||
if (
|
||||
self.parameters.enable_ipv4 is not None
|
||||
and self.parameters.enable_ipv4 != net.get("EnableIPv4", False)
|
||||
):
|
||||
differences.add(
|
||||
"enable_ipv4",
|
||||
parameter=self.parameters.enable_ipv4,
|
||||
active=net.get("EnableIPv4", False),
|
||||
)
|
||||
if (
|
||||
self.parameters.enable_ipv6 is not None
|
||||
and self.parameters.enable_ipv6 != net.get("EnableIPv6", False)
|
||||
):
|
||||
differences.add(
|
||||
"enable_ipv6",
|
||||
parameter=self.parameters.enable_ipv6,
|
||||
active=net.get("EnableIPv6", False),
|
||||
)
|
||||
|
||||
if (
|
||||
self.parameters.internal is not None
|
||||
and self.parameters.internal != net.get("Internal", False)
|
||||
):
|
||||
differences.add(
|
||||
"internal",
|
||||
parameter=self.parameters.internal,
|
||||
active=net.get("Internal"),
|
||||
)
|
||||
|
||||
if self.parameters.scope is not None and self.parameters.scope != net.get(
|
||||
"Scope"
|
||||
):
|
||||
differences.add(
|
||||
"scope", parameter=self.parameters.scope, active=net.get("Scope")
|
||||
)
|
||||
|
||||
if (
|
||||
self.parameters.attachable is not None
|
||||
and self.parameters.attachable != net.get("Attachable", False)
|
||||
):
|
||||
differences.add(
|
||||
"attachable",
|
||||
parameter=self.parameters.attachable,
|
||||
active=net.get("Attachable"),
|
||||
)
|
||||
if self.parameters.ingress is not None and self.parameters.ingress != net.get(
|
||||
"Ingress", False
|
||||
):
|
||||
differences.add(
|
||||
"ingress", parameter=self.parameters.ingress, active=net.get("Ingress")
|
||||
)
|
||||
if self.parameters.labels:
|
||||
if not net.get("Labels"):
|
||||
differences.add(
|
||||
"labels", parameter=self.parameters.labels, active=net.get("Labels")
|
||||
)
|
||||
else:
|
||||
for key, value in self.parameters.labels.items():
|
||||
if key not in net["Labels"] or value != net["Labels"][key]:
|
||||
differences.add(
|
||||
f"labels.{key}",
|
||||
parameter=value,
|
||||
active=net["Labels"].get(key),
|
||||
)
|
||||
|
||||
return not differences.empty, differences
|
||||
|
||||
def create_network(self) -> None:
|
||||
if not self.existing_network:
|
||||
data: dict[str, t.Any] = {
|
||||
"Name": self.parameters.name,
|
||||
"Driver": self.parameters.driver,
|
||||
"Options": self.parameters.driver_options,
|
||||
"IPAM": None,
|
||||
"CheckDuplicate": None,
|
||||
}
|
||||
|
||||
if self.parameters.config_only is not None:
|
||||
data["ConfigOnly"] = self.parameters.config_only
|
||||
if self.parameters.config_from:
|
||||
data["ConfigFrom"] = {"Network": self.parameters.config_from}
|
||||
if self.parameters.enable_ipv6 is not None:
|
||||
data["EnableIPv6"] = self.parameters.enable_ipv6
|
||||
if self.parameters.enable_ipv4 is not None:
|
||||
data["EnableIPv4"] = self.parameters.enable_ipv4
|
||||
if self.parameters.internal:
|
||||
data["Internal"] = True
|
||||
if self.parameters.scope is not None:
|
||||
data["Scope"] = self.parameters.scope
|
||||
if self.parameters.attachable is not None:
|
||||
data["Attachable"] = self.parameters.attachable
|
||||
if self.parameters.ingress is not None:
|
||||
data["Ingress"] = self.parameters.ingress
|
||||
if self.parameters.labels is not None:
|
||||
data["Labels"] = self.parameters.labels
|
||||
|
||||
ipam_pools = []
|
||||
if self.parameters.ipam_config:
|
||||
for ipam_pool in self.parameters.ipam_config:
|
||||
ipam_pools.append(
|
||||
{
|
||||
"Subnet": ipam_pool["subnet"],
|
||||
"IPRange": ipam_pool["iprange"],
|
||||
"Gateway": ipam_pool["gateway"],
|
||||
"AuxiliaryAddresses": ipam_pool["aux_addresses"],
|
||||
}
|
||||
)
|
||||
|
||||
if (
|
||||
self.parameters.ipam_driver
|
||||
or self.parameters.ipam_driver_options
|
||||
or ipam_pools
|
||||
):
|
||||
# Only add IPAM if a driver was specified or if IPAM parameters were
|
||||
# specified. Leaving this parameter out can significantly speed up
|
||||
# creation; on my machine creation with this option needs ~15 seconds,
|
||||
# and without just a few seconds.
|
||||
data["IPAM"] = {
|
||||
"Driver": self.parameters.ipam_driver,
|
||||
"Config": ipam_pools or [],
|
||||
"Options": self.parameters.ipam_driver_options,
|
||||
}
|
||||
|
||||
if not self.check_mode:
|
||||
resp = self.client.post_json_to_json("/networks/create", data=data)
|
||||
self.client.report_warnings(resp, ["Warning"])
|
||||
self.existing_network = self.client.get_network(network_id=resp["Id"])
|
||||
self.actions.append(
|
||||
f"Created network {self.parameters.name} with driver {self.parameters.driver}"
|
||||
)
|
||||
self.results["changed"] = True
|
||||
|
||||
def remove_network(self) -> None:
|
||||
if self.existing_network:
|
||||
self.disconnect_all_containers()
|
||||
if not self.check_mode:
|
||||
self.client.delete_call("/networks/{0}", self.parameters.name)
|
||||
if self.existing_network.get("Scope", "local") == "swarm":
|
||||
while self.get_existing_network():
|
||||
time.sleep(0.1)
|
||||
self.actions.append(f"Removed network {self.parameters.name}")
|
||||
self.results["changed"] = True
|
||||
|
||||
def is_container_connected(self, container_name: str) -> bool:
|
||||
if not self.existing_network:
|
||||
return False
|
||||
return container_name in container_names_in_network(self.existing_network)
|
||||
|
||||
def is_container_exist(self, container_name: str) -> bool:
|
||||
try:
|
||||
container = self.client.get_container(container_name)
|
||||
return bool(container)
|
||||
|
||||
except DockerException as e:
|
||||
self.client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
self.client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
def connect_containers(self) -> None:
|
||||
for name in self.parameters.connected:
|
||||
if not self.is_container_connected(name) and self.is_container_exist(name):
|
||||
if not self.check_mode:
|
||||
data = {
|
||||
"Container": name,
|
||||
"EndpointConfig": None,
|
||||
}
|
||||
self.client.post_json(
|
||||
"/networks/{0}/connect", self.parameters.name, data=data
|
||||
)
|
||||
self.actions.append(f"Connected container {name}")
|
||||
self.results["changed"] = True
|
||||
self.diff_tracker.add(f"connected.{name}", parameter=True, active=False)
|
||||
|
||||
def disconnect_missing(self) -> None:
|
||||
if not self.existing_network:
|
||||
return
|
||||
containers = self.existing_network["Containers"]
|
||||
if not containers:
|
||||
return
|
||||
for c in containers.values():
|
||||
name = c["Name"]
|
||||
if name not in self.parameters.connected:
|
||||
self.disconnect_container(name)
|
||||
|
||||
def disconnect_all_containers(self) -> None:
|
||||
network = self.client.get_network(name=self.parameters.name)
|
||||
if not network:
|
||||
return
|
||||
containers = network["Containers"]
|
||||
if not containers:
|
||||
return
|
||||
for cont in containers.values():
|
||||
self.disconnect_container(cont["Name"])
|
||||
|
||||
def disconnect_container(self, container_name: str) -> None:
|
||||
if not self.check_mode:
|
||||
data = {"Container": container_name, "Force": True}
|
||||
self.client.post_json(
|
||||
"/networks/{0}/disconnect", self.parameters.name, data=data
|
||||
)
|
||||
self.actions.append(f"Disconnected container {container_name}")
|
||||
self.results["changed"] = True
|
||||
self.diff_tracker.add(
|
||||
f"connected.{container_name}", parameter=False, active=True
|
||||
)
|
||||
|
||||
def present(self) -> None:
|
||||
different = False
|
||||
differences = DifferenceTracker()
|
||||
if self.existing_network:
|
||||
different, differences = self.has_different_config(self.existing_network)
|
||||
|
||||
self.diff_tracker.add(
|
||||
"exists", parameter=True, active=self.existing_network is not None
|
||||
)
|
||||
if self.parameters.force or different:
|
||||
self.remove_network()
|
||||
self.existing_network = None
|
||||
|
||||
self.create_network()
|
||||
self.connect_containers()
|
||||
if not self.parameters.appends:
|
||||
self.disconnect_missing()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
self.diff_result["differences"] = differences.get_legacy_docker_diffs()
|
||||
self.diff_tracker.merge(differences)
|
||||
|
||||
if not self.check_mode and not self.parameters.debug:
|
||||
self.results.pop("actions")
|
||||
|
||||
network_facts = self.get_existing_network()
|
||||
self.results["network"] = network_facts
|
||||
|
||||
def absent(self) -> None:
|
||||
self.diff_tracker.add(
|
||||
"exists", parameter=False, active=self.existing_network is not None
|
||||
)
|
||||
self.remove_network()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True, "aliases": ["network_name"]},
|
||||
"config_from": {"type": "str"},
|
||||
"config_only": {"type": "bool"},
|
||||
"connected": {
|
||||
"type": "list",
|
||||
"default": [],
|
||||
"elements": "str",
|
||||
"aliases": ["containers"],
|
||||
},
|
||||
"state": {
|
||||
"type": "str",
|
||||
"default": "present",
|
||||
"choices": ["present", "absent"],
|
||||
},
|
||||
"driver": {"type": "str", "default": "bridge"},
|
||||
"driver_options": {"type": "dict", "default": {}},
|
||||
"force": {"type": "bool", "default": False},
|
||||
"appends": {"type": "bool", "default": False, "aliases": ["incremental"]},
|
||||
"ipam_driver": {"type": "str"},
|
||||
"ipam_driver_options": {"type": "dict"},
|
||||
"ipam_config": {
|
||||
"type": "list",
|
||||
"elements": "dict",
|
||||
"options": {
|
||||
"subnet": {"type": "str"},
|
||||
"iprange": {"type": "str"},
|
||||
"gateway": {"type": "str"},
|
||||
"aux_addresses": {"type": "dict"},
|
||||
},
|
||||
},
|
||||
"enable_ipv4": {"type": "bool"},
|
||||
"enable_ipv6": {"type": "bool"},
|
||||
"internal": {"type": "bool"},
|
||||
"labels": {"type": "dict", "default": {}},
|
||||
"debug": {"type": "bool", "default": False},
|
||||
"scope": {"type": "str", "choices": ["local", "global", "swarm"]},
|
||||
"attachable": {"type": "bool"},
|
||||
"ingress": {"type": "bool"},
|
||||
}
|
||||
|
||||
option_minimal_versions = {
|
||||
"config_from": {"docker_api_version": "1.30"},
|
||||
"config_only": {"docker_api_version": "1.30"},
|
||||
"scope": {"docker_api_version": "1.30"},
|
||||
"attachable": {"docker_api_version": "1.26"},
|
||||
"enable_ipv4": {"docker_api_version": "1.47"},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
# "The docker server >= 1.10.0"
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
sanitize_labels(client.module.params["labels"], "labels", client)
|
||||
try:
|
||||
cm = DockerNetworkManager(client)
|
||||
client.module.exit_json(**cm.results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,140 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_network_info
|
||||
|
||||
short_description: Retrieves facts about docker network
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker network.
|
||||
- Essentially returns the output of C(docker network inspect <name>), similar to what M(community.docker.docker_network)
|
||||
returns for a non-absent network.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the network to inspect.
|
||||
- When identifying an existing network name may be a name or a long or short network ID.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
author:
|
||||
- "Dave Bendit (@DBendit)"
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Get infos on network
|
||||
community.docker.docker_network_info:
|
||||
name: mydata
|
||||
register: result
|
||||
|
||||
- name: Does network exist?
|
||||
ansible.builtin.debug:
|
||||
msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
|
||||
|
||||
- name: Print information about network
|
||||
ansible.builtin.debug:
|
||||
var: result.network
|
||||
when: result.exists
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the network exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
network:
|
||||
description:
|
||||
- Facts representing the current state of the network. Matches the docker inspection output.
|
||||
- Will be V(none) if network does not exist.
|
||||
returned: always
|
||||
type: dict
|
||||
sample: {
|
||||
"Attachable": false,
|
||||
"ConfigFrom": {"Network": ""},
|
||||
"ConfigOnly": false,
|
||||
"Containers": {},
|
||||
"Created": "2018-12-07T01:47:51.250835114-06:00",
|
||||
"Driver": "bridge",
|
||||
"EnableIPv6": false,
|
||||
"IPAM": {
|
||||
"Config": [
|
||||
{
|
||||
"Gateway": "192.168.96.1",
|
||||
"Subnet": "192.168.96.0/20",
|
||||
},
|
||||
],
|
||||
"Driver": "default",
|
||||
"Options": null,
|
||||
},
|
||||
"Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
|
||||
"Ingress": false,
|
||||
"Internal": false,
|
||||
"Labels": {},
|
||||
"Name": "ansible-test-f2700bba",
|
||||
"Options": {},
|
||||
"Scope": "local",
|
||||
}
|
||||
"""
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
network = client.get_network(client.module.params["name"])
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
exists=bool(network),
|
||||
network=network,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,320 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_node
|
||||
short_description: Manage Docker Swarm node
|
||||
description:
|
||||
- Manages the Docker nodes through a Swarm Manager.
|
||||
- This module allows to change the node's role, its availability, and to modify, add or remove node labels.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker
|
||||
- community.docker._docker.docker_py_2_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
hostname:
|
||||
description:
|
||||
- The hostname or ID of node as registered in Swarm.
|
||||
- If more than one node is registered using the same hostname the ID must be used, otherwise module will fail.
|
||||
type: str
|
||||
required: true
|
||||
labels:
|
||||
description:
|
||||
- User-defined key/value metadata that will be assigned as node attribute.
|
||||
- Label operations in this module apply to the docker swarm node specified by O(hostname). Use M(community.docker.docker_swarm)
|
||||
module to add/modify/remove swarm cluster labels.
|
||||
- The actual state of labels assigned to the node when module completes its work depends on O(labels_state) and O(labels_to_remove)
|
||||
parameters values. See description below.
|
||||
type: dict
|
||||
labels_state:
|
||||
description:
|
||||
- It defines the operation on the labels assigned to node and labels specified in O(labels) option.
|
||||
- Set to V(merge) to combine labels provided in O(labels) with those already assigned to the node. If no labels are
|
||||
assigned then it will add listed labels. For labels that are already assigned to the node, it will update their values.
|
||||
The labels not specified in O(labels) will remain unchanged. If O(labels) is empty then no changes will be made.
|
||||
- Set to V(replace) to replace all assigned labels with provided ones. If O(labels) is empty then all labels assigned
|
||||
to the node will be removed.
|
||||
type: str
|
||||
default: 'merge'
|
||||
choices:
|
||||
- merge
|
||||
- replace
|
||||
labels_to_remove:
|
||||
description:
|
||||
- List of labels that will be removed from the node configuration. The list has to contain only label names, not their
|
||||
values.
|
||||
- If the label provided on the list is not assigned to the node, the entry is ignored.
|
||||
- If the label is both on the O(labels_to_remove) and O(labels), then value provided in O(labels) remains assigned to
|
||||
the node.
|
||||
- If O(labels_state=replace) and O(labels) is not provided or empty then all labels assigned to node are removed and
|
||||
O(labels_to_remove) is ignored.
|
||||
type: list
|
||||
elements: str
|
||||
availability:
|
||||
description: Node availability to assign. If not provided then node availability remains unchanged.
|
||||
choices:
|
||||
- active
|
||||
- pause
|
||||
- drain
|
||||
type: str
|
||||
role:
|
||||
description: Node role to assign. If not provided then node role remains unchanged.
|
||||
choices:
|
||||
- manager
|
||||
- worker
|
||||
type: str
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
|
||||
- Docker API >= 1.25
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
- Thierry Bouvet (@tbouvet)
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Set node role
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
role: manager
|
||||
|
||||
- name: Set node availability
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
availability: drain
|
||||
|
||||
- name: Replace node labels with new labels
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
labels:
|
||||
key: value
|
||||
labels_state: replace
|
||||
|
||||
- name: Merge node labels and new labels
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
labels:
|
||||
key: value
|
||||
|
||||
- name: Remove all labels assigned to node
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
labels_state: replace
|
||||
|
||||
- name: Remove selected labels from the node
|
||||
community.docker.docker_node:
|
||||
hostname: mynode
|
||||
labels_to_remove:
|
||||
- key1
|
||||
- key2
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
node:
|
||||
description: Information about node after 'update' operation.
|
||||
returned: success
|
||||
type: dict
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._swarm import (
|
||||
AnsibleDockerSwarmClient,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
sanitize_labels,
|
||||
)
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
hostname: str
|
||||
|
||||
def __init__(self, client: AnsibleDockerSwarmClient) -> None:
|
||||
super().__init__()
|
||||
|
||||
# Spec
|
||||
self.labels: dict[str, t.Any] | None = None
|
||||
self.labels_state: t.Literal["merge", "replace"] = "merge"
|
||||
self.labels_to_remove: list[str] | None = None
|
||||
|
||||
# Node
|
||||
self.availability: t.Literal["active", "pause", "drain"] | None = None
|
||||
self.role: t.Literal["worker", "manager"] | None = None
|
||||
|
||||
for key, value in client.module.params.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
sanitize_labels(self.labels, "labels", client)
|
||||
|
||||
|
||||
class SwarmNodeManager(DockerBaseClass):
|
||||
def __init__(
|
||||
self, client: AnsibleDockerSwarmClient, results: dict[str, t.Any]
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
self.client.fail_task_if_not_swarm_manager()
|
||||
|
||||
self.parameters = TaskParameters(client)
|
||||
|
||||
self.node_update()
|
||||
|
||||
def node_update(self) -> None:
|
||||
if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
|
||||
self.client.fail("This node is not part of a swarm.")
|
||||
|
||||
if self.client.check_if_swarm_node_is_down():
|
||||
self.client.fail("Can not update the node. The node is down.")
|
||||
|
||||
try:
|
||||
node_info = self.client.inspect_node(node_id=self.parameters.hostname)
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Failed to get node information for {exc}")
|
||||
|
||||
changed = False
|
||||
node_spec: dict[str, t.Any] = {
|
||||
"Availability": self.parameters.availability,
|
||||
"Role": self.parameters.role,
|
||||
"Labels": self.parameters.labels,
|
||||
}
|
||||
|
||||
if self.parameters.role is None:
|
||||
node_spec["Role"] = node_info["Spec"]["Role"]
|
||||
else:
|
||||
if node_info["Spec"]["Role"] != self.parameters.role:
|
||||
node_spec["Role"] = self.parameters.role
|
||||
changed = True
|
||||
|
||||
if self.parameters.availability is None:
|
||||
node_spec["Availability"] = node_info["Spec"]["Availability"]
|
||||
else:
|
||||
if node_info["Spec"]["Availability"] != self.parameters.availability:
|
||||
node_info["Spec"]["Availability"] = self.parameters.availability
|
||||
changed = True
|
||||
|
||||
if self.parameters.labels_state == "replace":
|
||||
if self.parameters.labels is None:
|
||||
node_spec["Labels"] = {}
|
||||
if node_info["Spec"]["Labels"]:
|
||||
changed = True
|
||||
else:
|
||||
if (node_info["Spec"]["Labels"] or {}) != self.parameters.labels:
|
||||
node_spec["Labels"] = self.parameters.labels
|
||||
changed = True
|
||||
elif self.parameters.labels_state == "merge":
|
||||
labels: dict[str, str] = dict(node_info["Spec"]["Labels"] or {})
|
||||
node_spec["Labels"] = labels
|
||||
if self.parameters.labels is not None:
|
||||
for key, value in self.parameters.labels.items():
|
||||
if labels.get(key) != value:
|
||||
labels[key] = value
|
||||
changed = True
|
||||
|
||||
if self.parameters.labels_to_remove is not None:
|
||||
for key in self.parameters.labels_to_remove:
|
||||
if self.parameters.labels is not None:
|
||||
if not self.parameters.labels.get(key):
|
||||
if node_spec["Labels"].get(key):
|
||||
node_spec["Labels"].pop(key)
|
||||
changed = True
|
||||
else:
|
||||
self.client.module.warn(
|
||||
f"Label '{to_text(key)}' listed both in 'labels' and 'labels_to_remove'. "
|
||||
"Keeping the assigned label value."
|
||||
)
|
||||
else:
|
||||
if node_spec["Labels"].get(key):
|
||||
node_spec["Labels"].pop(key)
|
||||
changed = True
|
||||
|
||||
if changed is True:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.update_node(
|
||||
node_id=node_info["ID"],
|
||||
version=node_info["Version"]["Index"],
|
||||
node_spec=node_spec,
|
||||
)
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Failed to update node : {exc}")
|
||||
self.results["node"] = self.client.get_node_inspect(node_id=node_info["ID"])
|
||||
self.results["changed"] = changed
|
||||
else:
|
||||
self.results["node"] = node_info
|
||||
self.results["changed"] = changed
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"hostname": {"type": "str", "required": True},
|
||||
"labels": {"type": "dict"},
|
||||
"labels_state": {
|
||||
"type": "str",
|
||||
"default": "merge",
|
||||
"choices": ["merge", "replace"],
|
||||
},
|
||||
"labels_to_remove": {"type": "list", "elements": "str"},
|
||||
"availability": {"type": "str", "choices": ["active", "pause", "drain"]},
|
||||
"role": {"type": "str", "choices": ["worker", "manager"]},
|
||||
}
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version="2.4.0",
|
||||
)
|
||||
|
||||
try:
|
||||
results = {
|
||||
"changed": False,
|
||||
}
|
||||
|
||||
SwarmNodeManager(client, results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,165 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_node_info
|
||||
|
||||
short_description: Retrieves facts about docker swarm node from Swarm Manager
|
||||
|
||||
description:
|
||||
- Retrieves facts about a docker node.
|
||||
- Essentially returns the output of C(docker node inspect <name>).
|
||||
- Must be executed on a host running as Swarm Manager, otherwise the module will fail.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker
|
||||
- community.docker._docker.docker_py_2_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the node to inspect.
|
||||
- The list of nodes names to inspect.
|
||||
- If empty then return information of all nodes in Swarm cluster.
|
||||
- When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
|
||||
- If O(self=true) then this parameter is ignored.
|
||||
type: list
|
||||
elements: str
|
||||
self:
|
||||
description:
|
||||
- If V(true), queries the node (that is, the docker daemon) the module communicates with.
|
||||
- If V(true) then O(name) is ignored.
|
||||
- If V(false) then query depends on O(name) presence and value.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Get info on all nodes
|
||||
community.docker.docker_node_info:
|
||||
register: result
|
||||
|
||||
- name: Get info on node
|
||||
community.docker.docker_node_info:
|
||||
name: mynode
|
||||
register: result
|
||||
|
||||
- name: Get info on list of nodes
|
||||
community.docker.docker_node_info:
|
||||
name:
|
||||
- mynode1
|
||||
- mynode2
|
||||
register: result
|
||||
|
||||
- name: Get info on host if it is Swarm Manager
|
||||
community.docker.docker_node_info:
|
||||
self: true
|
||||
register: result
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
nodes:
|
||||
description:
|
||||
- Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
|
||||
- Can contain multiple entries if more than one node provided in O(name), or O(name) is not provided.
|
||||
- If O(name) contains a list of nodes, the output will provide information on all nodes registered at the swarm, including
|
||||
nodes that left the swarm but have not been removed from the cluster on swarm managers and nodes that are unreachable.
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._swarm import (
|
||||
AnsibleDockerSwarmClient,
|
||||
)
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
|
||||
def get_node_facts(client: AnsibleDockerSwarmClient) -> list[dict[str, t.Any]]:
|
||||
results: list[dict[str, t.Any]] = []
|
||||
|
||||
if client.module.params["self"] is True:
|
||||
self_node_id = client.get_swarm_node_id()
|
||||
node_info = client.get_node_inspect(node_id=self_node_id)
|
||||
results.append(node_info)
|
||||
return results
|
||||
|
||||
if client.module.params["name"] is None:
|
||||
node_info_list = client.get_all_nodes_inspect()
|
||||
return node_info_list
|
||||
|
||||
nodes = client.module.params["name"]
|
||||
if not isinstance(nodes, list):
|
||||
nodes = [nodes]
|
||||
|
||||
for next_node_name in nodes:
|
||||
next_node_info = client.get_node_inspect(
|
||||
node_id=next_node_name, skip_missing=True
|
||||
)
|
||||
if next_node_info:
|
||||
results.append(next_node_info)
|
||||
return results
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "list", "elements": "str"},
|
||||
"self": {"type": "bool", "default": False},
|
||||
}
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version="2.4.0",
|
||||
)
|
||||
|
||||
client.fail_task_if_not_swarm_manager()
|
||||
|
||||
try:
|
||||
nodes = get_node_facts(client)
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
nodes=nodes,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,455 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2021 Red Hat | Ansible Sakar Mehra<@sakarmehra100@gmail.com | @sakar97>
|
||||
# Copyright (c) 2019, Vladimir Porshkevich (@porshkevich) <neosonic@mail.ru>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_plugin
|
||||
short_description: Manage Docker plugins
|
||||
version_added: 1.3.0
|
||||
description:
|
||||
- This module allows to install, delete, enable and disable Docker plugins.
|
||||
- Performs largely the same function as the C(docker plugin) CLI subcommand.
|
||||
notes:
|
||||
- The C(--grant-all-permissions) CLI flag is true by default in this module.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
plugin_name:
|
||||
description:
|
||||
- Name of the plugin to operate on.
|
||||
required: true
|
||||
type: str
|
||||
|
||||
state:
|
||||
description:
|
||||
- V(absent) remove the plugin.
|
||||
- V(present) install the plugin, if it does not already exist.
|
||||
- V(enable) enable the plugin.
|
||||
- V(disable) disable the plugin.
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
- enable
|
||||
- disable
|
||||
type: str
|
||||
|
||||
alias:
|
||||
description:
|
||||
- Local name for plugin.
|
||||
type: str
|
||||
version_added: 1.8.0
|
||||
|
||||
plugin_options:
|
||||
description:
|
||||
- Dictionary of plugin settings.
|
||||
type: dict
|
||||
default: {}
|
||||
|
||||
force_remove:
|
||||
description:
|
||||
- Remove even if the plugin is enabled.
|
||||
default: false
|
||||
type: bool
|
||||
|
||||
enable_timeout:
|
||||
description:
|
||||
- Timeout in seconds.
|
||||
type: int
|
||||
default: 0
|
||||
|
||||
author:
|
||||
- Sakar Mehra (@sakar97)
|
||||
- Vladimir Porshkevich (@porshkevich)
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Install a plugin
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: plugin_one
|
||||
state: present
|
||||
|
||||
- name: Remove a plugin
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: plugin_one
|
||||
state: absent
|
||||
|
||||
- name: Enable the plugin
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: plugin_one
|
||||
state: enable
|
||||
|
||||
- name: Disable the plugin
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: plugin_one
|
||||
state: disable
|
||||
|
||||
- name: Install a plugin with options
|
||||
community.docker.docker_plugin:
|
||||
plugin_name: weaveworks/net-plugin:latest_release
|
||||
plugin_options:
|
||||
IPALLOC_RANGE: "10.32.0.0/12"
|
||||
WEAVE_PASSWORD: "PASSWORD"
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
plugin:
|
||||
description:
|
||||
- Plugin inspection results for the affected plugin.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
actions:
|
||||
description:
|
||||
- List of actions performed during task execution.
|
||||
returned: when O(state) is not V(absent)
|
||||
type: list
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api import auth
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
DockerException,
|
||||
NotFound,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DifferenceTracker,
|
||||
DockerBaseClass,
|
||||
)
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
plugin_name: str
|
||||
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
super().__init__()
|
||||
self.client = client
|
||||
self.alias: str | None = None
|
||||
self.plugin_options: dict[str, t.Any] = {}
|
||||
self.debug: bool = False
|
||||
self.force_remove: bool = False
|
||||
self.enable_timeout: int = 0
|
||||
self.state: t.Literal["present", "absent", "enable", "disable"] = "present"
|
||||
|
||||
for key, value in client.module.params.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
def prepare_options(options: dict[str, t.Any] | None) -> list[str]:
|
||||
return (
|
||||
[f"{k}={v if v is not None else ''}" for k, v in options.items()]
|
||||
if options
|
||||
else []
|
||||
)
|
||||
|
||||
|
||||
def parse_options(options_list: list[str] | None) -> dict[str, str]:
|
||||
return dict(x.split("=", 1) for x in options_list) if options_list else {}
|
||||
|
||||
|
||||
class DockerPluginManager:
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
self.client = client
|
||||
|
||||
self.parameters = TaskParameters(client)
|
||||
self.preferred_name = self.parameters.alias or self.parameters.plugin_name
|
||||
self.check_mode = self.client.check_mode
|
||||
self.diff = self.client.module._diff
|
||||
self.diff_tracker = DifferenceTracker()
|
||||
self.diff_result: dict[str, t.Any] = {}
|
||||
|
||||
self.actions: list[str] = []
|
||||
self.changed = False
|
||||
|
||||
self.existing_plugin = self.get_existing_plugin()
|
||||
|
||||
state = self.parameters.state
|
||||
if state == "present":
|
||||
self.present()
|
||||
elif state == "absent":
|
||||
self.absent()
|
||||
elif state == "enable":
|
||||
self.enable()
|
||||
elif state == "disable":
|
||||
self.disable()
|
||||
|
||||
if self.diff:
|
||||
self.diff_result["before"], self.diff_result["after"] = (
|
||||
self.diff_tracker.get_before_after()
|
||||
)
|
||||
|
||||
def get_existing_plugin(self) -> dict[str, t.Any] | None:
|
||||
try:
|
||||
return self.client.get_json("/plugins/{0}/json", self.preferred_name)
|
||||
except NotFound:
|
||||
return None
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
|
||||
def has_different_config(self) -> DifferenceTracker:
|
||||
"""
|
||||
Return the list of differences between the current parameters and the existing plugin.
|
||||
|
||||
:return: list of options that differ
|
||||
"""
|
||||
assert self.existing_plugin is not None
|
||||
differences = DifferenceTracker()
|
||||
if self.parameters.plugin_options:
|
||||
settings = self.existing_plugin.get("Settings")
|
||||
if not settings:
|
||||
differences.add(
|
||||
"plugin_options",
|
||||
parameter=self.parameters.plugin_options,
|
||||
active=settings,
|
||||
)
|
||||
else:
|
||||
existing_options = parse_options(settings.get("Env"))
|
||||
|
||||
for key, value in self.parameters.plugin_options.items():
|
||||
if (
|
||||
(not existing_options.get(key) and value)
|
||||
or not value
|
||||
or value != existing_options[key]
|
||||
):
|
||||
differences.add(
|
||||
f"plugin_options.{key}",
|
||||
parameter=value,
|
||||
active=existing_options.get(key),
|
||||
)
|
||||
|
||||
return differences
|
||||
|
||||
def install_plugin(self) -> None:
|
||||
if not self.existing_plugin:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
# Get privileges
|
||||
headers = {}
|
||||
registry, dummy_repo_name = auth.resolve_repository_name(
|
||||
self.parameters.plugin_name
|
||||
)
|
||||
header = auth.get_config_header(self.client, registry)
|
||||
if header:
|
||||
headers["X-Registry-Auth"] = header
|
||||
privileges = self.client.get_json(
|
||||
"/plugins/privileges",
|
||||
params={"remote": self.parameters.plugin_name},
|
||||
headers=headers,
|
||||
)
|
||||
# Pull plugin
|
||||
params = {
|
||||
"remote": self.parameters.plugin_name,
|
||||
}
|
||||
if self.parameters.alias:
|
||||
params["name"] = self.parameters.alias
|
||||
response = self.client._post_json(
|
||||
self.client._url("/plugins/pull"),
|
||||
params=params,
|
||||
headers=headers,
|
||||
data=privileges,
|
||||
stream=True,
|
||||
)
|
||||
self.client._raise_for_status(response)
|
||||
for dummy in self.client._stream_helper(response, decode=True):
|
||||
pass
|
||||
# Inspect and configure plugin
|
||||
self.existing_plugin = self.client.get_json(
|
||||
"/plugins/{0}/json", self.preferred_name
|
||||
)
|
||||
if self.parameters.plugin_options:
|
||||
data = prepare_options(self.parameters.plugin_options)
|
||||
self.client.post_json(
|
||||
"/plugins/{0}/set", self.preferred_name, data=data
|
||||
)
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
|
||||
self.actions.append(f"Installed plugin {self.preferred_name}")
|
||||
self.changed = True
|
||||
|
||||
def remove_plugin(self) -> None:
|
||||
force = self.parameters.force_remove
|
||||
if self.existing_plugin:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.delete_call(
|
||||
"/plugins/{0}", self.preferred_name, params={"force": force}
|
||||
)
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
|
||||
self.actions.append(f"Removed plugin {self.preferred_name}")
|
||||
self.changed = True
|
||||
|
||||
def update_plugin(self) -> None:
|
||||
if self.existing_plugin:
|
||||
differences = self.has_different_config()
|
||||
if not differences.empty:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
data = prepare_options(self.parameters.plugin_options)
|
||||
self.client.post_json(
|
||||
"/plugins/{0}/set", self.preferred_name, data=data
|
||||
)
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
self.actions.append(f"Updated plugin {self.preferred_name} settings")
|
||||
self.changed = True
|
||||
else:
|
||||
self.client.fail("Cannot update the plugin: Plugin does not exist")
|
||||
|
||||
def present(self) -> None:
|
||||
differences = DifferenceTracker()
|
||||
if self.existing_plugin:
|
||||
differences = self.has_different_config()
|
||||
|
||||
self.diff_tracker.add(
|
||||
"exists", parameter=True, active=self.existing_plugin is not None
|
||||
)
|
||||
|
||||
if self.existing_plugin:
|
||||
self.update_plugin()
|
||||
else:
|
||||
self.install_plugin()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
self.diff_tracker.merge(differences)
|
||||
|
||||
def absent(self) -> None:
|
||||
self.remove_plugin()
|
||||
|
||||
def enable(self) -> None:
|
||||
timeout = self.parameters.enable_timeout
|
||||
if self.existing_plugin:
|
||||
if not self.existing_plugin.get("Enabled"):
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.post_json(
|
||||
"/plugins/{0}/enable",
|
||||
self.preferred_name,
|
||||
params={"timeout": timeout},
|
||||
)
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
self.actions.append(f"Enabled plugin {self.preferred_name}")
|
||||
self.changed = True
|
||||
else:
|
||||
self.install_plugin()
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.post_json(
|
||||
"/plugins/{0}/enable",
|
||||
self.preferred_name,
|
||||
params={"timeout": timeout},
|
||||
)
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
self.actions.append(f"Enabled plugin {self.preferred_name}")
|
||||
self.changed = True
|
||||
|
||||
def disable(self) -> None:
|
||||
if self.existing_plugin:
|
||||
if self.existing_plugin.get("Enabled"):
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.post_json(
|
||||
"/plugins/{0}/disable", self.preferred_name
|
||||
)
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
self.actions.append(f"Disable plugin {self.preferred_name}")
|
||||
self.changed = True
|
||||
else:
|
||||
self.client.fail("Plugin not found: Plugin does not exist.")
|
||||
|
||||
@property
|
||||
def result(self) -> dict[str, t.Any]:
|
||||
plugin_data = {}
|
||||
if self.parameters.state != "absent":
|
||||
try:
|
||||
plugin_data = self.client.get_json(
|
||||
"/plugins/{0}/json", self.preferred_name
|
||||
)
|
||||
except NotFound:
|
||||
# This can happen in check mode
|
||||
pass
|
||||
result: dict[str, t.Any] = {
|
||||
"actions": self.actions,
|
||||
"changed": self.changed,
|
||||
"diff": self.diff_result,
|
||||
"plugin": plugin_data,
|
||||
}
|
||||
if (
|
||||
self.parameters.state == "present"
|
||||
and not self.check_mode
|
||||
and not self.parameters.debug
|
||||
):
|
||||
result["actions"] = None
|
||||
return {k: v for k, v in result.items() if v is not None}
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"alias": {"type": "str"},
|
||||
"plugin_name": {"type": "str", "required": True},
|
||||
"state": {
|
||||
"type": "str",
|
||||
"default": "present",
|
||||
"choices": ["present", "absent", "enable", "disable"],
|
||||
},
|
||||
"plugin_options": {"type": "dict", "default": {}},
|
||||
"debug": {"type": "bool", "default": False},
|
||||
"force_remove": {"type": "bool", "default": False},
|
||||
"enable_timeout": {"type": "int", "default": 0},
|
||||
}
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
cm = DockerPluginManager(client)
|
||||
client.module.exit_json(**cm.result)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,368 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_prune
|
||||
|
||||
short_description: Allows to prune various docker objects
|
||||
|
||||
description:
|
||||
- Allows to run C(docker container prune), C(docker image prune), C(docker network prune) and C(docker volume prune) through
|
||||
the Docker API.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
containers:
|
||||
description:
|
||||
- Whether to prune containers.
|
||||
type: bool
|
||||
default: false
|
||||
containers_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting containers to delete.
|
||||
- 'For example, C(until: 24h).'
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering) for
|
||||
more information on possible filters.
|
||||
type: dict
|
||||
images:
|
||||
description:
|
||||
- Whether to prune images.
|
||||
type: bool
|
||||
default: false
|
||||
images_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting images to delete.
|
||||
- 'For example, C(dangling: true).'
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering) for more
|
||||
information on possible filters.
|
||||
type: dict
|
||||
networks:
|
||||
description:
|
||||
- Whether to prune networks.
|
||||
type: bool
|
||||
default: false
|
||||
networks_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting networks to delete.
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering) for
|
||||
more information on possible filters.
|
||||
type: dict
|
||||
volumes:
|
||||
description:
|
||||
- Whether to prune volumes.
|
||||
type: bool
|
||||
default: false
|
||||
volumes_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting volumes to delete.
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering) for more
|
||||
information on possible filters.
|
||||
type: dict
|
||||
builder_cache:
|
||||
description:
|
||||
- Whether to prune the builder cache.
|
||||
type: bool
|
||||
default: false
|
||||
builder_cache_all:
|
||||
description:
|
||||
- Whether to remove all types of build cache.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 3.10.0
|
||||
builder_cache_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting images to delete.
|
||||
- 'For example, C(until: 10m).'
|
||||
- See L(the API documentation,https://docs.docker.com/engine/api/v1.44/#tag/Image/operation/BuildPrune) for more information
|
||||
on possible filters.
|
||||
type: dict
|
||||
version_added: 3.10.0
|
||||
builder_cache_keep_storage:
|
||||
description:
|
||||
- Amount of disk space to keep for cache in format C(<number>[<unit>]).".
|
||||
- Number is a positive integer. Unit can be one of V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte),
|
||||
V(T) (tebibyte), or V(P) (pebibyte).
|
||||
- Omitting the unit defaults to bytes.
|
||||
type: str
|
||||
version_added: 3.10.0
|
||||
|
||||
author:
|
||||
- "Felix Fontein (@felixfontein)"
|
||||
|
||||
notes:
|
||||
- The module always returned C(changed=false) before community.docker 3.5.1.
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Prune containers older than 24h
|
||||
community.docker.docker_prune:
|
||||
containers: true
|
||||
containers_filters:
|
||||
# only consider containers created more than 24 hours ago
|
||||
until: 24h
|
||||
|
||||
- name: Prune containers with labels
|
||||
community.docker.docker_prune:
|
||||
containers: true
|
||||
containers_filters:
|
||||
# Prune containers whose "foo" label has value "bar", and
|
||||
# whose "bam" label has value "baz". If you only want to
|
||||
# compare one label, you can provide it as a string instead
|
||||
# of a list with one element.
|
||||
label:
|
||||
- foo=bar
|
||||
- bam=baz
|
||||
# Prune containers whose label "bar" does *not* have value
|
||||
# "baz". If you want to avoid more than one label, you can
|
||||
# provide a list of multiple label-value pairs.
|
||||
"label!": bar=baz
|
||||
|
||||
- name: Prune everything
|
||||
community.docker.docker_prune:
|
||||
containers: true
|
||||
images: true
|
||||
networks: true
|
||||
volumes: true
|
||||
builder_cache: true
|
||||
|
||||
- name: Prune everything (including non-dangling images)
|
||||
community.docker.docker_prune:
|
||||
containers: true
|
||||
images: true
|
||||
images_filters:
|
||||
dangling: false
|
||||
networks: true
|
||||
volumes: true
|
||||
builder_cache: true
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
# containers
|
||||
containers:
|
||||
description:
|
||||
- List of IDs of deleted containers.
|
||||
returned: O(containers=true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: []
|
||||
containers_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from container pruning in bytes.
|
||||
returned: O(containers=true)
|
||||
type: int
|
||||
sample: 0
|
||||
|
||||
# images
|
||||
images:
|
||||
description:
|
||||
- List of IDs of deleted images.
|
||||
returned: O(images=true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: []
|
||||
images_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from image pruning in bytes.
|
||||
returned: O(images=true)
|
||||
type: int
|
||||
sample: 0
|
||||
|
||||
# networks
|
||||
networks:
|
||||
description:
|
||||
- List of IDs of deleted networks.
|
||||
returned: O(networks=true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: []
|
||||
|
||||
# volumes
|
||||
volumes:
|
||||
description:
|
||||
- List of IDs of deleted volumes.
|
||||
returned: O(volumes=true)
|
||||
type: list
|
||||
elements: str
|
||||
sample: []
|
||||
volumes_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from volumes pruning in bytes.
|
||||
returned: O(volumes=true)
|
||||
type: int
|
||||
sample: 0
|
||||
|
||||
# builder_cache
|
||||
builder_cache_space_reclaimed:
|
||||
description:
|
||||
- Amount of reclaimed disk space from builder cache pruning in bytes.
|
||||
returned: O(builder_cache=true)
|
||||
type: int
|
||||
sample: 0
|
||||
builder_cache_caches_deleted:
|
||||
description:
|
||||
- The build caches that were deleted.
|
||||
returned: O(builder_cache=true) and API version is 1.39 or later
|
||||
type: list
|
||||
elements: str
|
||||
sample: []
|
||||
version_added: 3.10.0
|
||||
"""
|
||||
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.formatters import human_to_bytes
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
|
||||
convert_filters,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
clean_dict_booleans_for_docker_api,
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"containers": {"type": "bool", "default": False},
|
||||
"containers_filters": {"type": "dict"},
|
||||
"images": {"type": "bool", "default": False},
|
||||
"images_filters": {"type": "dict"},
|
||||
"networks": {"type": "bool", "default": False},
|
||||
"networks_filters": {"type": "dict"},
|
||||
"volumes": {"type": "bool", "default": False},
|
||||
"volumes_filters": {"type": "dict"},
|
||||
"builder_cache": {"type": "bool", "default": False},
|
||||
"builder_cache_all": {"type": "bool", "default": False},
|
||||
"builder_cache_filters": {"type": "dict"},
|
||||
"builder_cache_keep_storage": {"type": "str"}, # convert to bytes
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
option_minimal_versions={
|
||||
"builder_cache": {"docker_py_version": "1.31"},
|
||||
"builder_cache_all": {"docker_py_version": "1.39"},
|
||||
"builder_cache_filters": {"docker_py_version": "1.31"},
|
||||
"builder_cache_keep_storage": {"docker_py_version": "1.39"},
|
||||
},
|
||||
# supports_check_mode=True,
|
||||
)
|
||||
|
||||
builder_cache_keep_storage = None
|
||||
if client.module.params.get("builder_cache_keep_storage") is not None:
|
||||
try:
|
||||
builder_cache_keep_storage = human_to_bytes(
|
||||
client.module.params.get("builder_cache_keep_storage")
|
||||
)
|
||||
except ValueError as exc:
|
||||
client.module.fail_json(
|
||||
msg=f"Error while parsing value of builder_cache_keep_storage: {exc}"
|
||||
)
|
||||
|
||||
try:
|
||||
result = {}
|
||||
changed = False
|
||||
|
||||
if client.module.params["containers"]:
|
||||
filters = clean_dict_booleans_for_docker_api(
|
||||
client.module.params.get("containers_filters"), allow_sequences=True
|
||||
)
|
||||
params = {"filters": convert_filters(filters)}
|
||||
res = client.post_to_json("/containers/prune", params=params)
|
||||
result["containers"] = res.get("ContainersDeleted") or []
|
||||
result["containers_space_reclaimed"] = res["SpaceReclaimed"]
|
||||
if result["containers"] or result["containers_space_reclaimed"]:
|
||||
changed = True
|
||||
|
||||
if client.module.params["images"]:
|
||||
filters = clean_dict_booleans_for_docker_api(
|
||||
client.module.params.get("images_filters"), allow_sequences=True
|
||||
)
|
||||
params = {"filters": convert_filters(filters)}
|
||||
res = client.post_to_json("/images/prune", params=params)
|
||||
result["images"] = res.get("ImagesDeleted") or []
|
||||
result["images_space_reclaimed"] = res["SpaceReclaimed"]
|
||||
if result["images"] or result["images_space_reclaimed"]:
|
||||
changed = True
|
||||
|
||||
if client.module.params["networks"]:
|
||||
filters = clean_dict_booleans_for_docker_api(
|
||||
client.module.params.get("networks_filters"), allow_sequences=True
|
||||
)
|
||||
params = {"filters": convert_filters(filters)}
|
||||
res = client.post_to_json("/networks/prune", params=params)
|
||||
result["networks"] = res.get("NetworksDeleted") or []
|
||||
if result["networks"]:
|
||||
changed = True
|
||||
|
||||
if client.module.params["volumes"]:
|
||||
filters = clean_dict_booleans_for_docker_api(
|
||||
client.module.params.get("volumes_filters"), allow_sequences=True
|
||||
)
|
||||
params = {"filters": convert_filters(filters)}
|
||||
res = client.post_to_json("/volumes/prune", params=params)
|
||||
result["volumes"] = res.get("VolumesDeleted") or []
|
||||
result["volumes_space_reclaimed"] = res["SpaceReclaimed"]
|
||||
if result["volumes"] or result["volumes_space_reclaimed"]:
|
||||
changed = True
|
||||
|
||||
if client.module.params["builder_cache"]:
|
||||
filters = clean_dict_booleans_for_docker_api(
|
||||
client.module.params.get("builder_cache_filters"), allow_sequences=True
|
||||
)
|
||||
params = {"filters": convert_filters(filters)}
|
||||
if client.module.params.get("builder_cache_all"):
|
||||
params["all"] = "true"
|
||||
if builder_cache_keep_storage is not None:
|
||||
params["keep-storage"] = builder_cache_keep_storage
|
||||
res = client.post_to_json("/build/prune", params=params)
|
||||
result["builder_cache_space_reclaimed"] = res["SpaceReclaimed"]
|
||||
if result["builder_cache_space_reclaimed"]:
|
||||
changed = True
|
||||
if "CachesDeleted" in res:
|
||||
# API version 1.39+: return value CachesDeleted (list of str)
|
||||
result["builder_cache_caches_deleted"] = res["CachesDeleted"]
|
||||
if result["builder_cache_caches_deleted"]:
|
||||
changed = True
|
||||
|
||||
result["changed"] = changed
|
||||
client.module.exit_json(**result)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,417 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_secret
|
||||
|
||||
short_description: Manage docker secrets
|
||||
|
||||
description:
|
||||
- Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
|
||||
- Adds to the metadata of new secrets C(ansible_key), an encrypted hash representation of the data, which is then used in
|
||||
future runs to test if a secret has changed. If C(ansible_key) is not present, then a secret will not be updated unless
|
||||
the O(force) option is set.
|
||||
- Updates to secrets are performed by removing the secret and creating it again.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker
|
||||
- community.docker._docker.docker_py_2_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: none
|
||||
idempotent:
|
||||
support: partial
|
||||
details:
|
||||
- If O(force=true) the module is not idempotent.
|
||||
|
||||
options:
|
||||
data:
|
||||
description:
|
||||
- The value of the secret.
|
||||
- Mutually exclusive with O(data_src). One of O(data) and O(data_src) is required if O(state=present).
|
||||
type: str
|
||||
data_is_b64:
|
||||
description:
|
||||
- If set to V(true), the data is assumed to be Base64 encoded and will be decoded before being used.
|
||||
- To use binary O(data), it is better to keep it Base64 encoded and let it be decoded by this option.
|
||||
type: bool
|
||||
default: false
|
||||
data_src:
|
||||
description:
|
||||
- The file on the target from which to read the secret.
|
||||
- Mutually exclusive with O(data). One of O(data) and O(data_src) is required if O(state=present).
|
||||
type: path
|
||||
version_added: 1.10.0
|
||||
labels:
|
||||
description:
|
||||
- A map of key:value meta data, where both key and value are expected to be strings.
|
||||
- If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating
|
||||
it again.
|
||||
type: dict
|
||||
force:
|
||||
description:
|
||||
- Use with O(state=present) to always remove and recreate an existing secret.
|
||||
- If V(true), an existing secret will be replaced, even if it has not changed.
|
||||
type: bool
|
||||
default: false
|
||||
rolling_versions:
|
||||
description:
|
||||
- If set to V(true), secrets are created with an increasing version number appended to their name.
|
||||
- Adds a label containing the version number to the managed secrets with the name C(ansible_version).
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 2.2.0
|
||||
versions_to_keep:
|
||||
description:
|
||||
- When using O(rolling_versions), the number of old versions of the secret to keep.
|
||||
- Extraneous old secrets are deleted after the new one is created.
|
||||
- Set to V(-1) to keep everything or to V(0) or V(1) to keep only the current one.
|
||||
type: int
|
||||
default: 5
|
||||
version_added: 2.2.0
|
||||
name:
|
||||
description:
|
||||
- The name of the secret.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Set to V(present), if the secret should exist, and V(absent), if it should not.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
|
||||
- "Docker API >= 1.25"
|
||||
|
||||
author:
|
||||
- Chris Houseknecht (@chouseknecht)
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Create secret foo (from a file on the control machine)
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
# If the file is JSON or binary, Ansible might modify it (because
|
||||
# it is first decoded and later re-encoded). Base64-encoding the
|
||||
# file directly after reading it prevents this to happen.
|
||||
data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
|
||||
data_is_b64: true
|
||||
state: present
|
||||
|
||||
- name: Create secret foo (from a file on the target machine)
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data_src: /path/to/secret/file
|
||||
state: present
|
||||
|
||||
- name: Change the secret data
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Add a new label
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Adding a new label will cause a remove/create of the secret
|
||||
two: '2'
|
||||
state: present
|
||||
|
||||
- name: No change
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: baz
|
||||
one: '1'
|
||||
# Even though 'two' is missing, there is no change to the existing secret
|
||||
state: present
|
||||
|
||||
- name: Update an existing label
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
labels:
|
||||
bar: monkey # Changing a label will cause a remove/create of the secret
|
||||
one: '1'
|
||||
state: present
|
||||
|
||||
- name: Force the removal/creation of the secret
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
data: Goodnight everyone!
|
||||
force: true
|
||||
state: present
|
||||
|
||||
- name: Remove secret foo
|
||||
community.docker.docker_secret:
|
||||
name: foo
|
||||
state: absent
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
secret_id:
|
||||
description:
|
||||
- The ID assigned by Docker to the secret object.
|
||||
returned: success and O(state=present)
|
||||
type: str
|
||||
sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
|
||||
secret_name:
|
||||
description:
|
||||
- The name of the created secret object.
|
||||
returned: success and O(state=present)
|
||||
type: str
|
||||
sample: 'awesome_secret'
|
||||
version_added: 2.2.0
|
||||
"""
|
||||
|
||||
import base64
|
||||
import hashlib
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_bytes
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
compare_generic,
|
||||
sanitize_labels,
|
||||
)
|
||||
|
||||
|
||||
class SecretManager(DockerBaseClass):
|
||||
def __init__(self, client: AnsibleDockerClient, results: dict[str, t.Any]) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
|
||||
parameters = self.client.module.params
|
||||
self.name = parameters.get("name")
|
||||
self.state = parameters.get("state")
|
||||
self.data = parameters.get("data")
|
||||
if self.data is not None:
|
||||
if parameters.get("data_is_b64"):
|
||||
self.data = base64.b64decode(self.data)
|
||||
else:
|
||||
self.data = to_bytes(self.data)
|
||||
data_src = parameters.get("data_src")
|
||||
if data_src is not None:
|
||||
try:
|
||||
with open(data_src, "rb") as f:
|
||||
self.data = f.read()
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
self.client.fail(f"Error while reading {data_src}: {exc}")
|
||||
self.labels = parameters.get("labels")
|
||||
self.force = parameters.get("force")
|
||||
self.rolling_versions = parameters.get("rolling_versions")
|
||||
self.versions_to_keep = parameters.get("versions_to_keep")
|
||||
|
||||
if self.rolling_versions:
|
||||
self.version = 0
|
||||
self.data_key: str | None = None
|
||||
self.secrets: list[dict[str, t.Any]] = []
|
||||
|
||||
def __call__(self) -> None:
|
||||
self.get_secret()
|
||||
if self.state == "present":
|
||||
self.data_key = hashlib.sha224(self.data).hexdigest()
|
||||
self.present()
|
||||
self.remove_old_versions()
|
||||
elif self.state == "absent":
|
||||
self.absent()
|
||||
|
||||
def get_version(self, secret: dict[str, t.Any]) -> int:
|
||||
try:
|
||||
return int(
|
||||
secret.get("Spec", {}).get("Labels", {}).get("ansible_version", 0)
|
||||
)
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
def remove_old_versions(self) -> None:
|
||||
if not self.rolling_versions or self.versions_to_keep < 0:
|
||||
return
|
||||
if not self.check_mode:
|
||||
while len(self.secrets) > max(self.versions_to_keep, 1):
|
||||
self.remove_secret(self.secrets.pop(0))
|
||||
|
||||
def get_secret(self) -> None:
|
||||
"""Find an existing secret."""
|
||||
try:
|
||||
secrets = self.client.secrets(filters={"name": self.name})
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Error accessing secret {self.name}: {exc}")
|
||||
|
||||
if self.rolling_versions:
|
||||
self.secrets = [
|
||||
secret
|
||||
for secret in secrets
|
||||
if secret["Spec"]["Name"].startswith(f"{self.name}_v")
|
||||
]
|
||||
self.secrets.sort(key=self.get_version)
|
||||
else:
|
||||
self.secrets = [
|
||||
secret for secret in secrets if secret["Spec"]["Name"] == self.name
|
||||
]
|
||||
|
||||
def create_secret(self) -> str | None:
|
||||
"""Create a new secret"""
|
||||
secret_id: str | dict[str, t.Any] | None = None
|
||||
# We cannot see the data after creation, so adding a label we can use for idempotency check
|
||||
labels = {"ansible_key": self.data_key}
|
||||
if self.rolling_versions:
|
||||
self.version += 1
|
||||
labels["ansible_version"] = str(self.version)
|
||||
self.name = f"{self.name}_v{self.version}"
|
||||
if self.labels:
|
||||
labels.update(self.labels)
|
||||
|
||||
try:
|
||||
if not self.check_mode:
|
||||
secret_id = self.client.create_secret(
|
||||
self.name, self.data, labels=labels
|
||||
)
|
||||
self.secrets += self.client.secrets(filters={"id": secret_id})
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Error creating secret: {exc}")
|
||||
|
||||
if isinstance(secret_id, dict):
|
||||
return secret_id["ID"]
|
||||
|
||||
return secret_id
|
||||
|
||||
def remove_secret(self, secret: dict[str, t.Any]) -> None:
|
||||
try:
|
||||
if not self.check_mode:
|
||||
self.client.remove_secret(secret["ID"])
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Error removing secret {secret['Spec']['Name']}: {exc}")
|
||||
|
||||
def present(self) -> None:
|
||||
"""Handles state == 'present', creating or updating the secret"""
|
||||
if self.secrets:
|
||||
secret = self.secrets[-1]
|
||||
self.results["secret_id"] = secret["ID"]
|
||||
self.results["secret_name"] = secret["Spec"]["Name"]
|
||||
data_changed = False
|
||||
attrs = secret.get("Spec", {})
|
||||
if attrs.get("Labels", {}).get("ansible_key"):
|
||||
if attrs["Labels"]["ansible_key"] != self.data_key:
|
||||
data_changed = True
|
||||
else:
|
||||
if not self.force:
|
||||
self.client.module.warn(
|
||||
"'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'true'"
|
||||
)
|
||||
labels_changed = not compare_generic(
|
||||
self.labels, attrs.get("Labels"), "allow_more_present", "dict"
|
||||
)
|
||||
if self.rolling_versions:
|
||||
self.version = self.get_version(secret)
|
||||
if data_changed or labels_changed or self.force:
|
||||
# if something changed or force, delete and re-create the secret
|
||||
if not self.rolling_versions:
|
||||
self.absent()
|
||||
secret_id = self.create_secret()
|
||||
self.results["changed"] = True
|
||||
self.results["secret_id"] = secret_id
|
||||
self.results["secret_name"] = self.name
|
||||
else:
|
||||
self.results["changed"] = True
|
||||
self.results["secret_id"] = self.create_secret()
|
||||
self.results["secret_name"] = self.name
|
||||
|
||||
def absent(self) -> None:
|
||||
"""Handles state == 'absent', removing the secret"""
|
||||
if self.secrets:
|
||||
for secret in self.secrets:
|
||||
self.remove_secret(secret)
|
||||
self.results["changed"] = True
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
"state": {
|
||||
"type": "str",
|
||||
"default": "present",
|
||||
"choices": ["absent", "present"],
|
||||
},
|
||||
"data": {"type": "str", "no_log": True},
|
||||
"data_is_b64": {"type": "bool", "default": False},
|
||||
"data_src": {"type": "path"},
|
||||
"labels": {"type": "dict"},
|
||||
"force": {"type": "bool", "default": False},
|
||||
"rolling_versions": {"type": "bool", "default": False},
|
||||
"versions_to_keep": {"type": "int", "default": 5},
|
||||
}
|
||||
|
||||
required_if = [
|
||||
("state", "present", ["data", "data_src"], True),
|
||||
]
|
||||
|
||||
mutually_exclusive = [
|
||||
("data", "data_src"),
|
||||
]
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
min_docker_version="2.1.0",
|
||||
)
|
||||
sanitize_labels(client.module.params["labels"], "labels", client)
|
||||
|
||||
try:
|
||||
results = {"changed": False, "secret_id": "", "secret_name": ""}
|
||||
|
||||
SecretManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,379 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_stack
|
||||
author: "Dario Zanzico (@dariko)"
|
||||
short_description: docker stack module
|
||||
description:
|
||||
- Manage docker stacks using the C(docker stack) command on the target node (see examples).
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.cli_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
attributes:
|
||||
check_mode:
|
||||
support: none
|
||||
diff_mode:
|
||||
support: none
|
||||
action_group:
|
||||
version_added: 3.6.0
|
||||
idempotent:
|
||||
support: full
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Stack name.
|
||||
type: str
|
||||
required: true
|
||||
state:
|
||||
description:
|
||||
- Service state.
|
||||
type: str
|
||||
default: "present"
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
compose:
|
||||
description:
|
||||
- List of compose definitions. Any element may be a string referring to the path of the compose file on the target host
|
||||
or the YAML contents of a compose file nested as dictionary.
|
||||
type: list
|
||||
elements: raw
|
||||
default: []
|
||||
prune:
|
||||
description:
|
||||
- If true will add the C(--prune) option to the C(docker stack deploy) command. This will have docker remove the services
|
||||
not present in the current stack definition.
|
||||
type: bool
|
||||
default: false
|
||||
detach:
|
||||
description:
|
||||
- If V(false), the C(--detach=false) option is added to the C(docker stack deploy) command, allowing Docker to wait
|
||||
for tasks to converge before exiting.
|
||||
- If V(true) (default), Docker exits immediately instead of waiting for tasks to converge.
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 4.1.0
|
||||
with_registry_auth:
|
||||
description:
|
||||
- If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command. This will have docker send
|
||||
registry authentication details to Swarm agents.
|
||||
type: bool
|
||||
default: false
|
||||
resolve_image:
|
||||
description:
|
||||
- If set will add the C(--resolve-image) option to the C(docker stack deploy) command. This will have docker query the
|
||||
registry to resolve image digest and supported platforms. If not set, docker use "always" by default.
|
||||
type: str
|
||||
choices: ["always", "changed", "never"]
|
||||
absent_retries:
|
||||
description:
|
||||
- If larger than V(0) and O(state=absent) the module will retry up to O(absent_retries) times to delete the stack until
|
||||
all the resources have been effectively deleted. If the last try still reports the stack as not completely removed
|
||||
the module will fail.
|
||||
type: int
|
||||
default: 0
|
||||
absent_retries_interval:
|
||||
description:
|
||||
- Interval in seconds between consecutive O(absent_retries).
|
||||
type: int
|
||||
default: 1
|
||||
docker_cli:
|
||||
version_added: 3.6.0
|
||||
docker_host:
|
||||
version_added: 3.6.0
|
||||
tls_hostname:
|
||||
version_added: 3.6.0
|
||||
api_version:
|
||||
version_added: 3.6.0
|
||||
ca_path:
|
||||
version_added: 3.6.0
|
||||
client_cert:
|
||||
version_added: 3.6.0
|
||||
client_key:
|
||||
version_added: 3.6.0
|
||||
tls:
|
||||
version_added: 3.6.0
|
||||
validate_certs:
|
||||
version_added: 3.6.0
|
||||
cli_context:
|
||||
version_added: 3.6.0
|
||||
|
||||
requirements:
|
||||
- Docker CLI tool C(docker)
|
||||
- jsondiff
|
||||
- pyyaml
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
stack_spec_diff:
|
||||
description: |-
|
||||
Dictionary containing the differences between the 'Spec' field
|
||||
of the stack services before and after applying the new stack
|
||||
definition.
|
||||
sample: >
|
||||
"stack_spec_diff":
|
||||
{'test_stack_test_service': {'TaskTemplate': {'ContainerSpec': {delete: ['Env']}}}}
|
||||
returned: on change
|
||||
type: dict
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Deploy stack from a compose file
|
||||
community.docker.docker_stack:
|
||||
state: present
|
||||
name: mystack
|
||||
compose:
|
||||
- /opt/docker-compose.yml
|
||||
|
||||
- name: Deploy stack from base compose file and override the web service
|
||||
community.docker.docker_stack:
|
||||
state: present
|
||||
name: mystack
|
||||
compose:
|
||||
- /opt/docker-compose.yml
|
||||
- version: '3'
|
||||
services:
|
||||
web:
|
||||
image: nginx:latest
|
||||
environment:
|
||||
ENVVAR: envvar
|
||||
|
||||
- name: Remove stack
|
||||
community.docker.docker_stack:
|
||||
name: mystack
|
||||
state: absent
|
||||
"""
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
import traceback
|
||||
import typing as t
|
||||
from time import sleep
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_cli import (
|
||||
AnsibleModuleDockerClient,
|
||||
DockerException,
|
||||
)
|
||||
|
||||
try:
|
||||
from jsondiff import diff as json_diff
|
||||
|
||||
HAS_JSONDIFF = True
|
||||
except ImportError:
|
||||
HAS_JSONDIFF = False
|
||||
|
||||
try:
|
||||
from yaml import dump as yaml_dump
|
||||
|
||||
HAS_YAML = True
|
||||
except ImportError:
|
||||
HAS_YAML = False
|
||||
|
||||
|
||||
def docker_stack_services(
|
||||
client: AnsibleModuleDockerClient, stack_name: str
|
||||
) -> list[str]:
|
||||
dummy_rc, out, err = client.call_cli(
|
||||
"stack", "services", stack_name, "--format", "{{.Name}}"
|
||||
)
|
||||
if to_text(err) == f"Nothing found in stack: {stack_name}\n":
|
||||
return []
|
||||
return to_text(out).strip().split("\n")
|
||||
|
||||
|
||||
def docker_service_inspect(
|
||||
client: AnsibleModuleDockerClient, service_name: str
|
||||
) -> dict[str, t.Any] | None:
|
||||
rc, out, dummy_err = client.call_cli("service", "inspect", service_name)
|
||||
if rc != 0:
|
||||
return None
|
||||
ret = json.loads(out)[0]["Spec"]
|
||||
return ret
|
||||
|
||||
|
||||
def docker_stack_deploy(
|
||||
client: AnsibleModuleDockerClient, stack_name: str, compose_files: list[str]
|
||||
) -> tuple[int, str, str]:
|
||||
command = ["stack", "deploy"]
|
||||
if client.module.params["prune"]:
|
||||
command += ["--prune"]
|
||||
if not client.module.params["detach"]:
|
||||
command += ["--detach=false"]
|
||||
if client.module.params["with_registry_auth"]:
|
||||
command += ["--with-registry-auth"]
|
||||
if client.module.params["resolve_image"]:
|
||||
command += ["--resolve-image", client.module.params["resolve_image"]]
|
||||
for compose_file in compose_files:
|
||||
command += ["--compose-file", compose_file]
|
||||
command += [stack_name]
|
||||
rc, out, err = client.call_cli(*command)
|
||||
return rc, to_text(out), to_text(err)
|
||||
|
||||
|
||||
def docker_stack_inspect(
|
||||
client: AnsibleModuleDockerClient, stack_name: str
|
||||
) -> dict[str, dict[str, t.Any] | None]:
|
||||
ret: dict[str, dict[str, t.Any] | None] = {}
|
||||
for service_name in docker_stack_services(client, stack_name):
|
||||
ret[service_name] = docker_service_inspect(client, service_name)
|
||||
return ret
|
||||
|
||||
|
||||
def docker_stack_rm(
|
||||
client: AnsibleModuleDockerClient,
|
||||
stack_name: str,
|
||||
retries: int,
|
||||
interval: int | float,
|
||||
) -> tuple[int, str, str]:
|
||||
command = ["stack", "rm", stack_name]
|
||||
if not client.module.params["detach"]:
|
||||
command += ["--detach=false"]
|
||||
rc, out, err = client.call_cli(*command)
|
||||
|
||||
while to_text(err) != f"Nothing found in stack: {stack_name}\n" and retries > 0:
|
||||
sleep(interval)
|
||||
retries = retries - 1
|
||||
rc, out, err = client.call_cli(*command)
|
||||
return rc, to_text(out), to_text(err)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
client = AnsibleModuleDockerClient(
|
||||
argument_spec={
|
||||
"name": {"type": "str", "required": True},
|
||||
"compose": {"type": "list", "elements": "raw", "default": []},
|
||||
"prune": {"type": "bool", "default": False},
|
||||
"detach": {"type": "bool", "default": True},
|
||||
"with_registry_auth": {"type": "bool", "default": False},
|
||||
"resolve_image": {"type": "str", "choices": ["always", "changed", "never"]},
|
||||
"state": {
|
||||
"type": "str",
|
||||
"default": "present",
|
||||
"choices": ["present", "absent"],
|
||||
},
|
||||
"absent_retries": {"type": "int", "default": 0},
|
||||
"absent_retries_interval": {"type": "int", "default": 1},
|
||||
},
|
||||
supports_check_mode=False,
|
||||
)
|
||||
|
||||
if not HAS_JSONDIFF:
|
||||
client.fail("jsondiff is not installed, try 'pip install jsondiff'")
|
||||
|
||||
if not HAS_YAML:
|
||||
client.fail("yaml is not installed, try 'pip install pyyaml'")
|
||||
|
||||
try:
|
||||
state = client.module.params["state"]
|
||||
compose = client.module.params["compose"]
|
||||
name = client.module.params["name"]
|
||||
absent_retries = client.module.params["absent_retries"]
|
||||
absent_retries_interval = client.module.params["absent_retries_interval"]
|
||||
|
||||
if state == "present":
|
||||
if not compose:
|
||||
client.fail(
|
||||
"compose parameter must be a list containing at least one element"
|
||||
)
|
||||
|
||||
compose_files = []
|
||||
for compose_def in compose:
|
||||
if isinstance(compose_def, dict):
|
||||
compose_file_fd, compose_file = tempfile.mkstemp()
|
||||
client.module.add_cleanup_file(compose_file)
|
||||
with os.fdopen(compose_file_fd, "w") as stack_file:
|
||||
compose_files.append(compose_file)
|
||||
stack_file.write(yaml_dump(compose_def))
|
||||
elif isinstance(compose_def, str):
|
||||
compose_files.append(compose_def)
|
||||
else:
|
||||
client.fail(
|
||||
f"compose element '{compose_def}' must be a string or a dictionary"
|
||||
)
|
||||
|
||||
before_stack_services = docker_stack_inspect(client, name)
|
||||
|
||||
rc, out, err = docker_stack_deploy(client, name, compose_files)
|
||||
|
||||
after_stack_services = docker_stack_inspect(client, name)
|
||||
|
||||
if rc != 0:
|
||||
client.fail(
|
||||
"docker stack up deploy command failed",
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
)
|
||||
|
||||
before_after_differences = json_diff(
|
||||
before_stack_services, after_stack_services
|
||||
)
|
||||
for k in before_after_differences:
|
||||
if isinstance(before_after_differences[k], dict):
|
||||
before_after_differences[k].pop("UpdatedAt", None)
|
||||
before_after_differences[k].pop("Version", None)
|
||||
if not list(before_after_differences[k].keys()):
|
||||
before_after_differences.pop(k)
|
||||
|
||||
if not before_after_differences:
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
)
|
||||
else:
|
||||
client.module.exit_json(
|
||||
changed=True,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
stack_spec_diff=json_diff(
|
||||
before_stack_services,
|
||||
after_stack_services,
|
||||
dump=True,
|
||||
),
|
||||
)
|
||||
|
||||
else:
|
||||
if docker_stack_services(client, name):
|
||||
rc, out, err = docker_stack_rm(
|
||||
client, name, absent_retries, absent_retries_interval
|
||||
)
|
||||
if rc != 0:
|
||||
client.module.fail_json(
|
||||
msg="'docker stack down' command failed",
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
)
|
||||
else:
|
||||
client.module.exit_json(
|
||||
changed=True,
|
||||
msg=out,
|
||||
rc=rc,
|
||||
stdout=out,
|
||||
stderr=err,
|
||||
)
|
||||
client.module.exit_json(changed=False)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,112 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_stack_info
|
||||
author: "Jose Angel Munoz (@imjoseangel)"
|
||||
short_description: Return information on all docker stacks
|
||||
description:
|
||||
- Retrieve information on docker stacks using the C(docker stack) command on the target node (see examples).
|
||||
requirements:
|
||||
- Docker CLI tool C(docker)
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.cli_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
attributes:
|
||||
action_group:
|
||||
version_added: 3.6.0
|
||||
options:
|
||||
docker_cli:
|
||||
version_added: 3.6.0
|
||||
docker_host:
|
||||
version_added: 3.6.0
|
||||
tls_hostname:
|
||||
version_added: 3.6.0
|
||||
api_version:
|
||||
version_added: 3.6.0
|
||||
ca_path:
|
||||
version_added: 3.6.0
|
||||
client_cert:
|
||||
version_added: 3.6.0
|
||||
client_key:
|
||||
version_added: 3.6.0
|
||||
tls:
|
||||
version_added: 3.6.0
|
||||
validate_certs:
|
||||
version_added: 3.6.0
|
||||
cli_context:
|
||||
version_added: 3.6.0
|
||||
seealso:
|
||||
- module: community.docker.docker_stack_task_info
|
||||
description: >-
|
||||
To retrieve detailed information about the services under a specific stack use the M(community.docker.docker_stack_task_info)
|
||||
module.
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
results:
|
||||
description:
|
||||
- List of dictionaries containing the list of stacks on the target node.
|
||||
sample:
|
||||
- {"name": "grafana", "namespace": "default", "orchestrator": "Kubernetes", "services": "2"}
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Shows stack info
|
||||
community.docker.docker_stack_info:
|
||||
register: result
|
||||
|
||||
- name: Show results
|
||||
ansible.builtin.debug:
|
||||
var: result.results
|
||||
"""
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_cli import (
|
||||
AnsibleModuleDockerClient,
|
||||
DockerException,
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
client = AnsibleModuleDockerClient(
|
||||
argument_spec={},
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
rc, ret, stderr = client.call_cli_json_stream(
|
||||
"stack", "ls", "--format={{json .}}", check_rc=True
|
||||
)
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
rc=rc,
|
||||
stdout="\n".join([json.dumps(entry) for entry in ret]),
|
||||
stderr=to_text(stderr).strip(),
|
||||
results=ret,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_stack_task_info
|
||||
author: "Jose Angel Munoz (@imjoseangel)"
|
||||
short_description: Return information of the tasks on a docker stack
|
||||
description:
|
||||
- Retrieve information on docker stacks tasks using the C(docker stack) command on the target node (see examples).
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.cli_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
attributes:
|
||||
action_group:
|
||||
version_added: 3.6.0
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Stack name.
|
||||
type: str
|
||||
required: true
|
||||
docker_cli:
|
||||
version_added: 3.6.0
|
||||
docker_host:
|
||||
version_added: 3.6.0
|
||||
tls_hostname:
|
||||
version_added: 3.6.0
|
||||
api_version:
|
||||
version_added: 3.6.0
|
||||
ca_path:
|
||||
version_added: 3.6.0
|
||||
client_cert:
|
||||
version_added: 3.6.0
|
||||
client_key:
|
||||
version_added: 3.6.0
|
||||
tls:
|
||||
version_added: 3.6.0
|
||||
validate_certs:
|
||||
version_added: 3.6.0
|
||||
cli_context:
|
||||
version_added: 3.6.0
|
||||
requirements:
|
||||
- Docker CLI tool C(docker)
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
results:
|
||||
description:
|
||||
- List of dictionaries containing the list of tasks associated to a stack name.
|
||||
sample:
|
||||
- CurrentState: Running
|
||||
DesiredState: Running
|
||||
Error: ""
|
||||
ID: 7wqv6m02ugkw
|
||||
Image: busybox
|
||||
Name: test_stack.1
|
||||
Node: swarm
|
||||
Ports: ""
|
||||
returned: always
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Shows stack info
|
||||
community.docker.docker_stack_task_info:
|
||||
name: test_stack
|
||||
register: result
|
||||
|
||||
- name: Show results
|
||||
ansible.builtin.debug:
|
||||
var: result.results
|
||||
"""
|
||||
|
||||
import json
|
||||
import traceback
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_cli import (
|
||||
AnsibleModuleDockerClient,
|
||||
DockerException,
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
client = AnsibleModuleDockerClient(
|
||||
argument_spec={"name": {"type": "str", "required": True}},
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
name = client.module.params["name"]
|
||||
rc, ret, stderr = client.call_cli_json_stream(
|
||||
"stack", "ps", name, "--format={{json .}}", check_rc=True
|
||||
)
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
rc=rc,
|
||||
stdout="\n".join([json.dumps(entry) for entry in ret]),
|
||||
stderr=to_text(stderr).strip(),
|
||||
results=ret,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,755 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright 2016 Red Hat | Ansible
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_swarm
|
||||
short_description: Manage Swarm cluster
|
||||
description:
|
||||
- Create a new Swarm cluster.
|
||||
- Add/Remove nodes or managers to an existing cluster.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker
|
||||
- community.docker._docker.docker_py_2_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
idempotent:
|
||||
support: full
|
||||
|
||||
options:
|
||||
advertise_addr:
|
||||
description:
|
||||
- Externally reachable address advertised to other nodes.
|
||||
- This can either be an address/port combination in the form V(192.168.1.1:4567), or an interface followed by a port
|
||||
number, like V(eth0:4567).
|
||||
- If the port number is omitted, the port number from the listen address is used.
|
||||
- If O(advertise_addr) is not specified, it will be automatically detected when possible.
|
||||
- Only used when swarm is initialised or joined. Because of this it is not considered for idempotency checking.
|
||||
type: str
|
||||
default_addr_pool:
|
||||
description:
|
||||
- Default address pool in CIDR format.
|
||||
- Only used when swarm is initialised. Because of this it is not considered for idempotency checking.
|
||||
- Requires API version >= 1.39.
|
||||
type: list
|
||||
elements: str
|
||||
subnet_size:
|
||||
description:
|
||||
- Default address pool subnet mask length.
|
||||
- Only used when swarm is initialised. Because of this it is not considered for idempotency checking.
|
||||
- Requires API version >= 1.39.
|
||||
type: int
|
||||
listen_addr:
|
||||
description:
|
||||
- Listen address used for inter-manager communication.
|
||||
- This can either be an address/port combination in the form V(192.168.1.1:4567), or an interface followed by a port
|
||||
number, like V(eth0:4567).
|
||||
- If the port number is omitted, the default swarm listening port is used.
|
||||
- Only used when swarm is initialised or joined. Because of this it is not considered for idempotency checking.
|
||||
type: str
|
||||
default: 0.0.0.0:2377
|
||||
force:
|
||||
description:
|
||||
- Use with state V(present) to force creating a new Swarm, even if already part of one.
|
||||
- Use with state V(absent) to Leave the swarm even if this node is a manager.
|
||||
type: bool
|
||||
default: false
|
||||
state:
|
||||
description:
|
||||
- Set to V(present), to create/update a new cluster.
|
||||
- Set to V(join), to join an existing cluster.
|
||||
- Set to V(absent), to leave an existing cluster.
|
||||
- Set to V(remove), to remove an absent node from the cluster. Note that removing requires Docker SDK for Python >=
|
||||
2.4.0.
|
||||
- M(community.docker.docker_node) can be used to demote a manager before removal.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
- join
|
||||
- absent
|
||||
- remove
|
||||
node_id:
|
||||
description:
|
||||
- Swarm id of the node to remove.
|
||||
- Used with O(state=remove).
|
||||
type: str
|
||||
join_token:
|
||||
description:
|
||||
- Swarm token used to join a swarm cluster.
|
||||
- Used with O(state=join).
|
||||
- If this value is specified, the corresponding value in the return values will be censored by Ansible. This is a side-effect
|
||||
of this value not being logged.
|
||||
type: str
|
||||
remote_addrs:
|
||||
description:
|
||||
- Remote address of one or more manager nodes of an existing Swarm to connect to.
|
||||
- Used with O(state=join).
|
||||
type: list
|
||||
elements: str
|
||||
task_history_retention_limit:
|
||||
description:
|
||||
- Maximum number of tasks history stored.
|
||||
- Docker default value is V(5).
|
||||
type: int
|
||||
snapshot_interval:
|
||||
description:
|
||||
- Number of logs entries between snapshot.
|
||||
- Docker default value is V(10000).
|
||||
type: int
|
||||
keep_old_snapshots:
|
||||
description:
|
||||
- Number of snapshots to keep beyond the current snapshot.
|
||||
- Docker default value is V(0).
|
||||
type: int
|
||||
log_entries_for_slow_followers:
|
||||
description:
|
||||
- Number of log entries to keep around to sync up slow followers after a snapshot is created.
|
||||
type: int
|
||||
heartbeat_tick:
|
||||
description:
|
||||
- Amount of ticks (in seconds) between each heartbeat.
|
||||
- Docker default value is V(1) seconds.
|
||||
type: int
|
||||
election_tick:
|
||||
description:
|
||||
- Amount of ticks (in seconds) needed without a leader to trigger a new election.
|
||||
- Docker default value is V(10) seconds.
|
||||
type: int
|
||||
dispatcher_heartbeat_period:
|
||||
description:
|
||||
- The delay (in nanoseconds) for an agent to send a heartbeat to the dispatcher.
|
||||
- Docker default value is 5 seconds, which corresponds to a value of V(5000000000).
|
||||
type: int
|
||||
node_cert_expiry:
|
||||
description:
|
||||
- Automatic expiry for nodes certificates, given in nanoseconds.
|
||||
- Docker default value is 90 days, which corresponds to a value of V(7776000000000000).
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- The name of the swarm.
|
||||
type: str
|
||||
labels:
|
||||
description:
|
||||
- User-defined key/value metadata.
|
||||
- Label operations in this module apply to the docker swarm cluster. Use M(community.docker.docker_node) module to add/modify/remove
|
||||
swarm node labels.
|
||||
- Requires API version >= 1.32.
|
||||
type: dict
|
||||
signing_ca_cert:
|
||||
description:
|
||||
- The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
|
||||
- This must not be a path to a certificate, but the contents of the certificate.
|
||||
- Requires API version >= 1.30.
|
||||
type: str
|
||||
signing_ca_key:
|
||||
description:
|
||||
- The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
|
||||
- This must not be a path to a key, but the contents of the key.
|
||||
- Requires API version >= 1.30.
|
||||
type: str
|
||||
ca_force_rotate:
|
||||
description:
|
||||
- An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified.
|
||||
- Docker default value is V(0).
|
||||
- Requires API version >= 1.30.
|
||||
type: int
|
||||
autolock_managers:
|
||||
description:
|
||||
- If set, generate a key and use it to lock data stored on the managers.
|
||||
- Docker default value is V(false).
|
||||
- M(community.docker.docker_swarm_info) can be used to retrieve the unlock key.
|
||||
type: bool
|
||||
rotate_worker_token:
|
||||
description: Rotate the worker join token.
|
||||
type: bool
|
||||
default: false
|
||||
rotate_manager_token:
|
||||
description: Rotate the manager join token.
|
||||
type: bool
|
||||
default: false
|
||||
data_path_addr:
|
||||
description:
|
||||
- Address or interface to use for data path traffic.
|
||||
- This can either be an address in the form V(192.168.1.1), or an interface, like V(eth0).
|
||||
- Only used when swarm is initialised or joined. Because of this it is not considered for idempotency checking.
|
||||
- Requires API version >= 1.30.
|
||||
type: str
|
||||
version_added: 2.5.0
|
||||
data_path_port:
|
||||
description:
|
||||
- Port to use for data path traffic.
|
||||
- This needs to be a port number like V(9789).
|
||||
- Only used when swarm is initialised. Because of this it is not considered for idempotency checking.
|
||||
- Requires API version >= 1.40.
|
||||
type: int
|
||||
version_added: 3.1.0
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
|
||||
- Docker API >= 1.25
|
||||
author:
|
||||
- Thierry Bouvet (@tbouvet)
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Init a new swarm with default parameters
|
||||
community.docker.docker_swarm:
|
||||
state: present
|
||||
|
||||
- name: Update swarm configuration
|
||||
community.docker.docker_swarm:
|
||||
state: present
|
||||
election_tick: 5
|
||||
|
||||
- name: Add nodes
|
||||
community.docker.docker_swarm:
|
||||
state: join
|
||||
advertise_addr: 192.168.1.2
|
||||
join_token: SWMTKN-1--xxxxx
|
||||
remote_addrs: ['192.168.1.1:2377']
|
||||
|
||||
- name: Leave swarm for a node
|
||||
community.docker.docker_swarm:
|
||||
state: absent
|
||||
|
||||
- name: Remove a swarm manager
|
||||
community.docker.docker_swarm:
|
||||
state: absent
|
||||
force: true
|
||||
|
||||
- name: Remove node from swarm
|
||||
community.docker.docker_swarm:
|
||||
state: remove
|
||||
node_id: mynode
|
||||
|
||||
- name: Init a new swarm with different data path interface
|
||||
community.docker.docker_swarm:
|
||||
state: present
|
||||
advertise_addr: eth0
|
||||
data_path_addr: ens10
|
||||
|
||||
- name: Init a new swarm with a different data path port
|
||||
community.docker.docker_swarm:
|
||||
state: present
|
||||
data_path_port: 9789
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
swarm_facts:
|
||||
description: Information about swarm.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
JoinTokens:
|
||||
description: Tokens to connect to the Swarm.
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
Worker:
|
||||
description:
|
||||
- Token to join the cluster as a new *worker* node.
|
||||
- B(Note:) if this value has been specified as O(join_token), the value here will not be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
|
||||
If you pass O(join_token), make sure your playbook/role does not depend on this return value!
|
||||
returned: success
|
||||
type: str
|
||||
example: SWMTKN-1--xxxxx
|
||||
Manager:
|
||||
description:
|
||||
- Token to join the cluster as a new *manager* node.
|
||||
- B(Note:) if this value has been specified as O(join_token), the value here will not be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER).
|
||||
If you pass O(join_token), make sure your playbook/role does not depend on this return value!
|
||||
returned: success
|
||||
type: str
|
||||
example: SWMTKN-1--xxxxx
|
||||
UnlockKey:
|
||||
description: The swarm unlock-key if O(autolock_managers=true).
|
||||
returned: on success if O(autolock_managers=true) and swarm is initialised, or if O(autolock_managers) has changed.
|
||||
type: str
|
||||
example: SWMKEY-1-xxx
|
||||
|
||||
actions:
|
||||
description: Provides the actions done on the swarm.
|
||||
returned: when action failed.
|
||||
type: list
|
||||
elements: str
|
||||
example: ['This cluster is already a swarm cluster']
|
||||
"""
|
||||
|
||||
import json
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._swarm import (
|
||||
AnsibleDockerSwarmClient,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DifferenceTracker,
|
||||
DockerBaseClass,
|
||||
sanitize_labels,
|
||||
)
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.advertise_addr: str | None = None
|
||||
self.listen_addr: str | None = None
|
||||
self.remote_addrs: list[str] | None = None
|
||||
self.join_token: str | None = None
|
||||
self.data_path_addr: str | None = None
|
||||
self.data_path_port: int | None = None
|
||||
self.spec = None
|
||||
|
||||
# Spec
|
||||
self.snapshot_interval: int | None = None
|
||||
self.task_history_retention_limit: int | None = None
|
||||
self.keep_old_snapshots: int | None = None
|
||||
self.log_entries_for_slow_followers: int | None = None
|
||||
self.heartbeat_tick: int | None = None
|
||||
self.election_tick: int | None = None
|
||||
self.dispatcher_heartbeat_period: int | None = None
|
||||
self.node_cert_expiry: int | None = None
|
||||
self.name: str | None = None
|
||||
self.labels: dict[str, t.Any] | None = None
|
||||
self.log_driver = None
|
||||
self.signing_ca_cert: str | None = None
|
||||
self.signing_ca_key: str | None = None
|
||||
self.ca_force_rotate: int | None = None
|
||||
self.autolock_managers: bool | None = None
|
||||
self.rotate_worker_token: bool | None = None
|
||||
self.rotate_manager_token: bool | None = None
|
||||
self.default_addr_pool: list[str] | None = None
|
||||
self.subnet_size: int | None = None
|
||||
|
||||
@staticmethod
|
||||
def from_ansible_params(client: AnsibleDockerSwarmClient) -> TaskParameters:
|
||||
result = TaskParameters()
|
||||
for key, value in client.module.params.items():
|
||||
if key in result.__dict__:
|
||||
setattr(result, key, value)
|
||||
|
||||
result.update_parameters(client)
|
||||
return result
|
||||
|
||||
def update_from_swarm_info(self, swarm_info: dict[str, t.Any]) -> None:
|
||||
spec = swarm_info["Spec"]
|
||||
|
||||
ca_config = spec.get("CAConfig") or {}
|
||||
if self.node_cert_expiry is None:
|
||||
self.node_cert_expiry = ca_config.get("NodeCertExpiry")
|
||||
if self.ca_force_rotate is None:
|
||||
self.ca_force_rotate = ca_config.get("ForceRotate")
|
||||
|
||||
dispatcher = spec.get("Dispatcher") or {}
|
||||
if self.dispatcher_heartbeat_period is None:
|
||||
self.dispatcher_heartbeat_period = dispatcher.get("HeartbeatPeriod")
|
||||
|
||||
raft = spec.get("Raft") or {}
|
||||
if self.snapshot_interval is None:
|
||||
self.snapshot_interval = raft.get("SnapshotInterval")
|
||||
if self.keep_old_snapshots is None:
|
||||
self.keep_old_snapshots = raft.get("KeepOldSnapshots")
|
||||
if self.heartbeat_tick is None:
|
||||
self.heartbeat_tick = raft.get("HeartbeatTick")
|
||||
if self.log_entries_for_slow_followers is None:
|
||||
self.log_entries_for_slow_followers = raft.get("LogEntriesForSlowFollowers")
|
||||
if self.election_tick is None:
|
||||
self.election_tick = raft.get("ElectionTick")
|
||||
|
||||
orchestration = spec.get("Orchestration") or {}
|
||||
if self.task_history_retention_limit is None:
|
||||
self.task_history_retention_limit = orchestration.get(
|
||||
"TaskHistoryRetentionLimit"
|
||||
)
|
||||
|
||||
encryption_config = spec.get("EncryptionConfig") or {}
|
||||
if self.autolock_managers is None:
|
||||
self.autolock_managers = encryption_config.get("AutoLockManagers")
|
||||
|
||||
if self.name is None:
|
||||
self.name = spec["Name"]
|
||||
|
||||
if self.labels is None:
|
||||
self.labels = spec.get("Labels") or {}
|
||||
|
||||
if "LogDriver" in spec["TaskDefaults"]:
|
||||
self.log_driver = spec["TaskDefaults"]["LogDriver"]
|
||||
|
||||
def update_parameters(self, client: AnsibleDockerSwarmClient) -> None:
|
||||
assign = {
|
||||
"snapshot_interval": "snapshot_interval",
|
||||
"task_history_retention_limit": "task_history_retention_limit",
|
||||
"keep_old_snapshots": "keep_old_snapshots",
|
||||
"log_entries_for_slow_followers": "log_entries_for_slow_followers",
|
||||
"heartbeat_tick": "heartbeat_tick",
|
||||
"election_tick": "election_tick",
|
||||
"dispatcher_heartbeat_period": "dispatcher_heartbeat_period",
|
||||
"node_cert_expiry": "node_cert_expiry",
|
||||
"name": "name",
|
||||
"labels": "labels",
|
||||
"signing_ca_cert": "signing_ca_cert",
|
||||
"signing_ca_key": "signing_ca_key",
|
||||
"ca_force_rotate": "ca_force_rotate",
|
||||
"autolock_managers": "autolock_managers",
|
||||
"log_driver": "log_driver",
|
||||
}
|
||||
params = {}
|
||||
for dest, source in assign.items():
|
||||
if not client.option_minimal_versions[source]["supported"]:
|
||||
continue
|
||||
value = getattr(self, source)
|
||||
if value is not None:
|
||||
params[dest] = value
|
||||
self.spec = client.create_swarm_spec(**params)
|
||||
|
||||
def compare_to_active(
|
||||
self,
|
||||
other: TaskParameters,
|
||||
client: AnsibleDockerSwarmClient,
|
||||
differences: DifferenceTracker,
|
||||
) -> DifferenceTracker:
|
||||
for k in self.__dict__:
|
||||
if k in (
|
||||
"advertise_addr",
|
||||
"listen_addr",
|
||||
"remote_addrs",
|
||||
"join_token",
|
||||
"rotate_worker_token",
|
||||
"rotate_manager_token",
|
||||
"spec",
|
||||
"default_addr_pool",
|
||||
"subnet_size",
|
||||
"data_path_addr",
|
||||
"data_path_port",
|
||||
):
|
||||
continue
|
||||
if not client.option_minimal_versions[k]["supported"]:
|
||||
continue
|
||||
value = getattr(self, k)
|
||||
if value is None:
|
||||
continue
|
||||
other_value = getattr(other, k)
|
||||
if value != other_value:
|
||||
differences.add(k, parameter=value, active=other_value)
|
||||
if self.rotate_worker_token:
|
||||
differences.add("rotate_worker_token", parameter=True, active=False)
|
||||
if self.rotate_manager_token:
|
||||
differences.add("rotate_manager_token", parameter=True, active=False)
|
||||
return differences
|
||||
|
||||
|
||||
class SwarmManager(DockerBaseClass):
|
||||
def __init__(
|
||||
self, client: AnsibleDockerSwarmClient, results: dict[str, t.Any]
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.check_mode = self.client.check_mode
|
||||
self.swarm_info: dict[str, t.Any] = {}
|
||||
|
||||
self.state: t.Literal["present", "join", "absent", "remove"] = (
|
||||
client.module.params["state"]
|
||||
)
|
||||
self.force: bool = client.module.params["force"]
|
||||
self.node_id: str | None = client.module.params["node_id"]
|
||||
|
||||
self.differences = DifferenceTracker()
|
||||
self.parameters = TaskParameters.from_ansible_params(client)
|
||||
|
||||
self.created = False
|
||||
|
||||
def __call__(self) -> None:
|
||||
choice_map = {
|
||||
"present": self.init_swarm,
|
||||
"join": self.join,
|
||||
"absent": self.leave,
|
||||
"remove": self.remove,
|
||||
}
|
||||
|
||||
choice_map[self.state]()
|
||||
|
||||
if self.client.module._diff or self.parameters.debug:
|
||||
diff = {}
|
||||
diff["before"], diff["after"] = self.differences.get_before_after()
|
||||
self.results["diff"] = diff
|
||||
|
||||
def inspect_swarm(self) -> None:
|
||||
try:
|
||||
data = self.client.inspect_swarm()
|
||||
json_str = json.dumps(data, ensure_ascii=False)
|
||||
self.swarm_info = json.loads(json_str)
|
||||
|
||||
self.results["changed"] = False
|
||||
self.results["swarm_facts"] = self.swarm_info
|
||||
|
||||
unlock_key = self.get_unlock_key()
|
||||
self.swarm_info.update(unlock_key)
|
||||
except APIError:
|
||||
pass
|
||||
|
||||
def get_unlock_key(self) -> dict[str, t.Any]:
|
||||
default = {"UnlockKey": None}
|
||||
if not self.has_swarm_lock_changed():
|
||||
return default
|
||||
try:
|
||||
return self.client.get_unlock_key() or default
|
||||
except APIError:
|
||||
return default
|
||||
|
||||
def has_swarm_lock_changed(self) -> bool:
|
||||
return bool(self.parameters.autolock_managers) and (
|
||||
self.created or self.differences.has_difference_for("autolock_managers")
|
||||
)
|
||||
|
||||
def init_swarm(self) -> None:
|
||||
if not self.force and self.client.check_if_swarm_manager():
|
||||
self.__update_swarm()
|
||||
return
|
||||
|
||||
if not self.check_mode:
|
||||
init_arguments: dict[str, t.Any] = {
|
||||
"advertise_addr": self.parameters.advertise_addr,
|
||||
"listen_addr": self.parameters.listen_addr,
|
||||
"force_new_cluster": self.force,
|
||||
"swarm_spec": self.parameters.spec,
|
||||
}
|
||||
if self.parameters.default_addr_pool is not None:
|
||||
init_arguments["default_addr_pool"] = self.parameters.default_addr_pool
|
||||
if self.parameters.subnet_size is not None:
|
||||
init_arguments["subnet_size"] = self.parameters.subnet_size
|
||||
if self.parameters.data_path_addr is not None:
|
||||
init_arguments["data_path_addr"] = self.parameters.data_path_addr
|
||||
if self.parameters.data_path_port is not None:
|
||||
init_arguments["data_path_port"] = self.parameters.data_path_port
|
||||
try:
|
||||
self.client.init_swarm(**init_arguments)
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Can not create a new Swarm Cluster: {exc}")
|
||||
|
||||
if not self.client.check_if_swarm_manager() and not self.check_mode:
|
||||
self.client.fail("Swarm not created or other error!")
|
||||
|
||||
self.created = True
|
||||
self.inspect_swarm()
|
||||
self.results["actions"].append(
|
||||
f"New Swarm cluster created: {self.swarm_info.get('ID')}"
|
||||
)
|
||||
self.differences.add("state", parameter="present", active="absent")
|
||||
self.results["changed"] = True
|
||||
self.results["swarm_facts"] = {
|
||||
"JoinTokens": self.swarm_info.get("JoinTokens"),
|
||||
"UnlockKey": self.swarm_info.get("UnlockKey"),
|
||||
}
|
||||
|
||||
def __update_swarm(self) -> None:
|
||||
try:
|
||||
self.inspect_swarm()
|
||||
version = self.swarm_info["Version"]["Index"]
|
||||
self.parameters.update_from_swarm_info(self.swarm_info)
|
||||
old_parameters = TaskParameters()
|
||||
old_parameters.update_from_swarm_info(self.swarm_info)
|
||||
self.parameters.compare_to_active(
|
||||
old_parameters, self.client, self.differences
|
||||
)
|
||||
if self.differences.empty:
|
||||
self.results["actions"].append("No modification")
|
||||
self.results["changed"] = False
|
||||
return
|
||||
update_parameters = TaskParameters.from_ansible_params(self.client)
|
||||
update_parameters.update_parameters(self.client)
|
||||
if not self.check_mode:
|
||||
self.client.update_swarm(
|
||||
version=version,
|
||||
swarm_spec=update_parameters.spec,
|
||||
rotate_worker_token=self.parameters.rotate_worker_token,
|
||||
rotate_manager_token=self.parameters.rotate_manager_token,
|
||||
)
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Can not update a Swarm Cluster: {exc}")
|
||||
|
||||
self.inspect_swarm()
|
||||
self.results["actions"].append("Swarm cluster updated")
|
||||
self.results["changed"] = True
|
||||
|
||||
def join(self) -> None:
|
||||
if self.client.check_if_swarm_node():
|
||||
self.results["actions"].append("This node is already part of a swarm.")
|
||||
return
|
||||
if not self.check_mode:
|
||||
join_arguments = {
|
||||
"remote_addrs": self.parameters.remote_addrs,
|
||||
"join_token": self.parameters.join_token,
|
||||
"listen_addr": self.parameters.listen_addr,
|
||||
"advertise_addr": self.parameters.advertise_addr,
|
||||
}
|
||||
if self.parameters.data_path_addr is not None:
|
||||
join_arguments["data_path_addr"] = self.parameters.data_path_addr
|
||||
try:
|
||||
self.client.join_swarm(**join_arguments)
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Can not join the Swarm Cluster: {exc}")
|
||||
self.results["actions"].append("New node is added to swarm cluster")
|
||||
self.differences.add("joined", parameter=True, active=False)
|
||||
self.results["changed"] = True
|
||||
|
||||
def leave(self) -> None:
|
||||
if not self.client.check_if_swarm_node():
|
||||
self.results["actions"].append("This node is not part of a swarm.")
|
||||
return
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.leave_swarm(force=self.force)
|
||||
except APIError as exc:
|
||||
self.client.fail(f"This node can not leave the Swarm Cluster: {exc}")
|
||||
self.results["actions"].append("Node has left the swarm cluster")
|
||||
self.differences.add("joined", parameter="absent", active="present")
|
||||
self.results["changed"] = True
|
||||
|
||||
def remove(self) -> None:
|
||||
if not self.client.check_if_swarm_manager():
|
||||
self.client.fail("This node is not a manager.")
|
||||
|
||||
try:
|
||||
status_down = self.client.check_if_swarm_node_is_down(
|
||||
node_id=self.node_id, repeat_check=5
|
||||
)
|
||||
except APIError:
|
||||
return
|
||||
|
||||
if not status_down:
|
||||
self.client.fail(
|
||||
"Can not remove the node. The status node is ready and not down."
|
||||
)
|
||||
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.remove_node(node_id=self.node_id, force=self.force)
|
||||
except APIError as exc:
|
||||
self.client.fail(
|
||||
f"Can not remove the node from the Swarm Cluster: {exc}"
|
||||
)
|
||||
self.results["actions"].append("Node is removed from swarm cluster.")
|
||||
self.differences.add("joined", parameter=False, active=True)
|
||||
self.results["changed"] = True
|
||||
|
||||
|
||||
def _detect_remove_operation(client: AnsibleDockerSwarmClient) -> bool:
|
||||
return client.module.params["state"] == "remove"
|
||||
|
||||
|
||||
def main() -> None:
|
||||
# TODO: missing option log_driver?
|
||||
argument_spec = {
|
||||
"advertise_addr": {"type": "str"},
|
||||
"data_path_addr": {"type": "str"},
|
||||
"data_path_port": {"type": "int"},
|
||||
"state": {
|
||||
"type": "str",
|
||||
"default": "present",
|
||||
"choices": ["present", "join", "absent", "remove"],
|
||||
},
|
||||
"force": {"type": "bool", "default": False},
|
||||
"listen_addr": {"type": "str", "default": "0.0.0.0:2377"},
|
||||
"remote_addrs": {"type": "list", "elements": "str"},
|
||||
"join_token": {"type": "str", "no_log": True},
|
||||
"snapshot_interval": {"type": "int"},
|
||||
"task_history_retention_limit": {"type": "int"},
|
||||
"keep_old_snapshots": {"type": "int"},
|
||||
"log_entries_for_slow_followers": {"type": "int"},
|
||||
"heartbeat_tick": {"type": "int"},
|
||||
"election_tick": {"type": "int"},
|
||||
"dispatcher_heartbeat_period": {"type": "int"},
|
||||
"node_cert_expiry": {"type": "int"},
|
||||
"name": {"type": "str"},
|
||||
"labels": {"type": "dict"},
|
||||
"signing_ca_cert": {"type": "str"},
|
||||
"signing_ca_key": {"type": "str", "no_log": True},
|
||||
"ca_force_rotate": {"type": "int"},
|
||||
"autolock_managers": {"type": "bool"},
|
||||
"node_id": {"type": "str"},
|
||||
"rotate_worker_token": {"type": "bool", "default": False},
|
||||
"rotate_manager_token": {"type": "bool", "default": False},
|
||||
"default_addr_pool": {"type": "list", "elements": "str"},
|
||||
"subnet_size": {"type": "int"},
|
||||
}
|
||||
|
||||
required_if = [
|
||||
("state", "join", ["remote_addrs", "join_token"]),
|
||||
("state", "remove", ["node_id"]),
|
||||
]
|
||||
|
||||
option_minimal_versions = {
|
||||
"labels": {"docker_py_version": "2.6.0", "docker_api_version": "1.32"},
|
||||
"signing_ca_cert": {"docker_py_version": "2.6.0", "docker_api_version": "1.30"},
|
||||
"signing_ca_key": {"docker_py_version": "2.6.0", "docker_api_version": "1.30"},
|
||||
"ca_force_rotate": {"docker_py_version": "2.6.0", "docker_api_version": "1.30"},
|
||||
"autolock_managers": {"docker_py_version": "2.6.0"},
|
||||
"log_driver": {"docker_py_version": "2.6.0"},
|
||||
"remove_operation": {
|
||||
"docker_py_version": "2.4.0",
|
||||
"detect_usage": _detect_remove_operation,
|
||||
"usage_msg": "remove swarm nodes",
|
||||
},
|
||||
"default_addr_pool": {
|
||||
"docker_py_version": "4.0.0",
|
||||
"docker_api_version": "1.39",
|
||||
},
|
||||
"subnet_size": {"docker_py_version": "4.0.0", "docker_api_version": "1.39"},
|
||||
"data_path_addr": {"docker_py_version": "4.0.0", "docker_api_version": "1.30"},
|
||||
"data_path_port": {"docker_py_version": "6.0.0", "docker_api_version": "1.40"},
|
||||
}
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
required_if=required_if,
|
||||
min_docker_version="2.0.0",
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
)
|
||||
sanitize_labels(client.module.params["labels"], "labels", client)
|
||||
|
||||
try:
|
||||
results = {"changed": False, "result": "", "actions": []}
|
||||
|
||||
SwarmManager(client, results)()
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,410 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_swarm_info
|
||||
|
||||
short_description: Retrieves facts about Docker Swarm cluster
|
||||
|
||||
description:
|
||||
- Retrieves facts about a Docker Swarm.
|
||||
- Returns lists of swarm objects names for the services - nodes, services, tasks.
|
||||
- The output differs depending on API version available on docker host.
|
||||
- Must be run on Swarm Manager node; otherwise module fails with error message. It does return boolean flags in on both
|
||||
error and success which indicate whether the docker daemon can be communicated with, whether it is in Swarm mode, and
|
||||
whether it is a Swarm Manager node.
|
||||
author:
|
||||
- Piotr Wojciechowski (@WojciechowskiPiotr)
|
||||
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker
|
||||
- community.docker._docker.docker_py_2_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
|
||||
options:
|
||||
nodes:
|
||||
description:
|
||||
- Whether to list swarm nodes.
|
||||
type: bool
|
||||
default: false
|
||||
nodes_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting nodes to list.
|
||||
- 'For example, C(name: mynode).'
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering) for more information
|
||||
on possible filters.
|
||||
type: dict
|
||||
services:
|
||||
description:
|
||||
- Whether to list swarm services.
|
||||
type: bool
|
||||
default: false
|
||||
services_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting services to list.
|
||||
- 'For example, C(name: myservice).'
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering) for more
|
||||
information on possible filters.
|
||||
type: dict
|
||||
tasks:
|
||||
description:
|
||||
- Whether to list containers.
|
||||
type: bool
|
||||
default: false
|
||||
tasks_filters:
|
||||
description:
|
||||
- A dictionary of filter values used for selecting tasks to list.
|
||||
- 'For example, C(node: mynode-1).'
|
||||
- See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering) for more
|
||||
information on possible filters.
|
||||
type: dict
|
||||
unlock_key:
|
||||
description:
|
||||
- Whether to retrieve the swarm unlock key.
|
||||
type: bool
|
||||
default: false
|
||||
verbose_output:
|
||||
description:
|
||||
- When set to V(true) and O(nodes), O(services), or O(tasks) is set to V(true), then the module output will contain
|
||||
verbose information about objects matching the full output of API method.
|
||||
- For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
|
||||
- The verbose output in this module contains only subset of information returned by this info module for each type of
|
||||
the objects.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Get info on Docker Swarm
|
||||
community.docker.docker_swarm_info:
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- name: Inform about basic flags
|
||||
ansible.builtin.debug:
|
||||
msg: |
|
||||
Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
|
||||
Docker in Swarm mode: {{ result.docker_swarm_active }}
|
||||
This is a Manager node: {{ result.docker_swarm_manager }}
|
||||
|
||||
- name: Get info on Docker Swarm and list of registered nodes
|
||||
community.docker.docker_swarm_info:
|
||||
nodes: true
|
||||
register: result
|
||||
|
||||
- name: Get info on Docker Swarm and extended list of registered nodes
|
||||
community.docker.docker_swarm_info:
|
||||
nodes: true
|
||||
verbose_output: true
|
||||
register: result
|
||||
|
||||
- name: Get info on Docker Swarm and filtered list of registered nodes
|
||||
community.docker.docker_swarm_info:
|
||||
nodes: true
|
||||
nodes_filters:
|
||||
name: mynode
|
||||
register: result
|
||||
|
||||
- name: Show swarm facts
|
||||
ansible.builtin.debug:
|
||||
var: result.swarm_facts
|
||||
|
||||
- name: Get the swarm unlock key
|
||||
community.docker.docker_swarm_info:
|
||||
unlock_key: true
|
||||
register: result
|
||||
|
||||
- name: Print swarm unlock key
|
||||
ansible.builtin.debug:
|
||||
var: result.swarm_unlock_key
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
can_talk_to_docker:
|
||||
description:
|
||||
- Will be V(true) if the module can talk to the docker daemon.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
docker_swarm_active:
|
||||
description:
|
||||
- Will be V(true) if the module can talk to the docker daemon, and the docker daemon is in Swarm mode.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
docker_swarm_manager:
|
||||
description:
|
||||
- Will be V(true) if the module can talk to the docker daemon, the docker daemon is in Swarm mode, and the current node
|
||||
is a manager node.
|
||||
- Only if this one is V(true), the module will not fail.
|
||||
returned: both on success and on error
|
||||
type: bool
|
||||
swarm_facts:
|
||||
description:
|
||||
- Facts representing the basic state of the docker Swarm cluster.
|
||||
- Contains tokens to connect to the Swarm.
|
||||
returned: always
|
||||
type: dict
|
||||
swarm_unlock_key:
|
||||
description:
|
||||
- Contains the key needed to unlock the swarm.
|
||||
returned: When O(unlock_key=true).
|
||||
type: str
|
||||
nodes:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume. Keys matches the C(docker node ls) output unless
|
||||
O(verbose_output=true). See description for O(verbose_output).
|
||||
returned: When O(nodes=true)
|
||||
type: list
|
||||
elements: dict
|
||||
services:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume. Keys matches the C(docker service ls) output
|
||||
unless O(verbose_output=true). See description for O(verbose_output).
|
||||
returned: When O(services=true)
|
||||
type: list
|
||||
elements: dict
|
||||
tasks:
|
||||
description:
|
||||
- List of dict objects containing the basic information about each volume. Keys matches the C(docker service ps) output
|
||||
unless O(verbose_output=true). See description for O(verbose_output).
|
||||
returned: When O(tasks=true)
|
||||
type: list
|
||||
elements: dict
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
try:
|
||||
from docker.errors import APIError, DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker_common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._swarm import (
|
||||
AnsibleDockerSwarmClient,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DockerBaseClass,
|
||||
clean_dict_booleans_for_docker_api,
|
||||
)
|
||||
|
||||
|
||||
class DockerSwarmManager(DockerBaseClass):
|
||||
def __init__(
|
||||
self, client: AnsibleDockerSwarmClient, results: dict[str, t.Any]
|
||||
) -> None:
|
||||
super().__init__()
|
||||
|
||||
self.client = client
|
||||
self.results = results
|
||||
self.verbose_output = self.client.module.params["verbose_output"]
|
||||
|
||||
listed_objects: list[t.Literal["nodes", "tasks", "services"]] = [
|
||||
"tasks",
|
||||
"services",
|
||||
"nodes",
|
||||
]
|
||||
|
||||
self.client.fail_task_if_not_swarm_manager()
|
||||
|
||||
self.results["swarm_facts"] = self.get_docker_swarm_facts()
|
||||
|
||||
for docker_object in listed_objects:
|
||||
if self.client.module.params[docker_object]:
|
||||
returned_name = docker_object
|
||||
filter_name = docker_object + "_filters"
|
||||
filters = clean_dict_booleans_for_docker_api(
|
||||
client.module.params.get(filter_name)
|
||||
)
|
||||
self.results[returned_name] = self.get_docker_items_list(
|
||||
docker_object, filters
|
||||
)
|
||||
if self.client.module.params["unlock_key"]:
|
||||
self.results["swarm_unlock_key"] = self.get_docker_swarm_unlock_key()
|
||||
|
||||
def get_docker_swarm_facts(self) -> dict[str, t.Any]:
|
||||
try:
|
||||
return self.client.inspect_swarm()
|
||||
except APIError as exc:
|
||||
self.client.fail(f"Error inspecting docker swarm: {exc}")
|
||||
|
||||
def get_docker_items_list(
|
||||
self,
|
||||
docker_object: t.Literal["nodes", "tasks", "services"],
|
||||
filters: dict[str, str],
|
||||
) -> list[dict[str, t.Any]]:
|
||||
items_list: list[dict[str, t.Any]] = []
|
||||
|
||||
try:
|
||||
if docker_object == "nodes":
|
||||
items = self.client.nodes(filters=filters)
|
||||
elif docker_object == "tasks":
|
||||
items = self.client.tasks(filters=filters)
|
||||
elif docker_object == "services":
|
||||
items = self.client.services(filters=filters)
|
||||
else:
|
||||
raise ValueError(f"Invalid docker_object {docker_object}")
|
||||
except APIError as exc:
|
||||
self.client.fail(
|
||||
f"Error inspecting docker swarm for object '{docker_object}': {exc}"
|
||||
)
|
||||
|
||||
if self.verbose_output:
|
||||
return items
|
||||
|
||||
for item in items:
|
||||
item_record = {}
|
||||
|
||||
if docker_object == "nodes":
|
||||
item_record = self.get_essential_facts_nodes(item)
|
||||
elif docker_object == "tasks":
|
||||
item_record = self.get_essential_facts_tasks(item)
|
||||
elif docker_object == "services":
|
||||
item_record = self.get_essential_facts_services(item)
|
||||
if item_record.get("Mode") == "Global":
|
||||
item_record["Replicas"] = len(items)
|
||||
items_list.append(item_record)
|
||||
|
||||
return items_list
|
||||
|
||||
@staticmethod
|
||||
def get_essential_facts_nodes(item: dict[str, t.Any]) -> dict[str, t.Any]:
|
||||
object_essentials = {}
|
||||
|
||||
object_essentials["ID"] = item.get("ID")
|
||||
object_essentials["Hostname"] = item["Description"]["Hostname"]
|
||||
object_essentials["Status"] = item["Status"]["State"]
|
||||
object_essentials["Availability"] = item["Spec"]["Availability"]
|
||||
if "ManagerStatus" in item:
|
||||
object_essentials["ManagerStatus"] = item["ManagerStatus"]["Reachability"]
|
||||
if (
|
||||
"Leader" in item["ManagerStatus"]
|
||||
and item["ManagerStatus"]["Leader"] is True
|
||||
):
|
||||
object_essentials["ManagerStatus"] = "Leader"
|
||||
else:
|
||||
object_essentials["ManagerStatus"] = None
|
||||
object_essentials["EngineVersion"] = item["Description"]["Engine"][
|
||||
"EngineVersion"
|
||||
]
|
||||
|
||||
return object_essentials
|
||||
|
||||
def get_essential_facts_tasks(self, item: dict[str, t.Any]) -> dict[str, t.Any]:
|
||||
object_essentials = {}
|
||||
|
||||
object_essentials["ID"] = item["ID"]
|
||||
# Returning container ID to not trigger another connection to host
|
||||
# Container ID is sufficient to get extended info in other tasks
|
||||
object_essentials["ContainerID"] = item["Status"]["ContainerStatus"][
|
||||
"ContainerID"
|
||||
]
|
||||
object_essentials["Image"] = item["Spec"]["ContainerSpec"]["Image"]
|
||||
object_essentials["Node"] = self.client.get_node_name_by_id(item["NodeID"])
|
||||
object_essentials["DesiredState"] = item["DesiredState"]
|
||||
object_essentials["CurrentState"] = item["Status"]["State"]
|
||||
if "Err" in item["Status"]:
|
||||
object_essentials["Error"] = item["Status"]["Err"]
|
||||
else:
|
||||
object_essentials["Error"] = None
|
||||
|
||||
return object_essentials
|
||||
|
||||
@staticmethod
|
||||
def get_essential_facts_services(item: dict[str, t.Any]) -> dict[str, t.Any]:
|
||||
object_essentials = {}
|
||||
|
||||
object_essentials["ID"] = item["ID"]
|
||||
object_essentials["Name"] = item["Spec"]["Name"]
|
||||
if "Replicated" in item["Spec"]["Mode"]:
|
||||
object_essentials["Mode"] = "Replicated"
|
||||
object_essentials["Replicas"] = item["Spec"]["Mode"]["Replicated"][
|
||||
"Replicas"
|
||||
]
|
||||
elif "Global" in item["Spec"]["Mode"]:
|
||||
object_essentials["Mode"] = "Global"
|
||||
# Number of replicas have to be updated in calling method or may be left as None
|
||||
object_essentials["Replicas"] = None
|
||||
object_essentials["Image"] = item["Spec"]["TaskTemplate"]["ContainerSpec"][
|
||||
"Image"
|
||||
]
|
||||
if item["Spec"].get("EndpointSpec") and "Ports" in item["Spec"]["EndpointSpec"]:
|
||||
object_essentials["Ports"] = item["Spec"]["EndpointSpec"]["Ports"]
|
||||
else:
|
||||
object_essentials["Ports"] = []
|
||||
|
||||
return object_essentials
|
||||
|
||||
def get_docker_swarm_unlock_key(self) -> str | None:
|
||||
unlock_key = self.client.get_unlock_key() or {}
|
||||
return unlock_key.get("UnlockKey") or None
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"nodes": {"type": "bool", "default": False},
|
||||
"nodes_filters": {"type": "dict"},
|
||||
"tasks": {"type": "bool", "default": False},
|
||||
"tasks_filters": {"type": "dict"},
|
||||
"services": {"type": "bool", "default": False},
|
||||
"services_filters": {"type": "dict"},
|
||||
"unlock_key": {"type": "bool", "default": False},
|
||||
"verbose_output": {"type": "bool", "default": False},
|
||||
}
|
||||
option_minimal_versions = {
|
||||
"unlock_key": {"docker_py_version": "2.7.0"},
|
||||
}
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version="2.0.0",
|
||||
option_minimal_versions=option_minimal_versions,
|
||||
fail_results={
|
||||
"can_talk_to_docker": False,
|
||||
"docker_swarm_active": False,
|
||||
"docker_swarm_manager": False,
|
||||
},
|
||||
)
|
||||
client.fail_results["can_talk_to_docker"] = True
|
||||
client.fail_results["docker_swarm_active"] = client.check_if_swarm_node()
|
||||
client.fail_results["docker_swarm_manager"] = client.check_if_swarm_manager()
|
||||
|
||||
try:
|
||||
results = {
|
||||
"changed": False,
|
||||
}
|
||||
|
||||
DockerSwarmManager(client, results)
|
||||
results.update(client.fail_results)
|
||||
client.module.exit_json(**results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,116 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_swarm_service_info
|
||||
|
||||
short_description: Retrieves information about docker services from a Swarm Manager
|
||||
|
||||
description:
|
||||
- Retrieves information about a docker service.
|
||||
- Essentially returns the output of C(docker service inspect <name>).
|
||||
- Must be executed on a host running as Swarm Manager, otherwise the module will fail.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker
|
||||
- community.docker._docker.docker_py_2_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- The name of the service to inspect.
|
||||
type: str
|
||||
required: true
|
||||
|
||||
author:
|
||||
- Hannes Ljungberg (@hannseman)
|
||||
|
||||
requirements:
|
||||
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Get info from a service
|
||||
community.docker.docker_swarm_service_info:
|
||||
name: myservice
|
||||
register: result
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the service exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
service:
|
||||
description:
|
||||
- A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
|
||||
- Will be V(none) if service does not exist.
|
||||
returned: always
|
||||
type: dict
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
try:
|
||||
from docker.errors import DockerException
|
||||
except ImportError:
|
||||
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
|
||||
pass
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._swarm import (
|
||||
AnsibleDockerSwarmClient,
|
||||
)
|
||||
|
||||
|
||||
def get_service_info(client: AnsibleDockerSwarmClient) -> dict[str, t.Any] | None:
|
||||
service = client.module.params["name"]
|
||||
return client.get_service_inspect(service_id=service, skip_missing=True)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True},
|
||||
}
|
||||
|
||||
client = AnsibleDockerSwarmClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
min_docker_version="2.0.0",
|
||||
)
|
||||
|
||||
client.fail_task_if_not_swarm_manager()
|
||||
|
||||
try:
|
||||
service = get_service_info(client)
|
||||
|
||||
client.module.exit_json(changed=False, service=service, exists=bool(service))
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,353 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_volume
|
||||
short_description: Manage Docker volumes
|
||||
description:
|
||||
- Create/remove Docker volumes.
|
||||
- Performs largely the same function as the C(docker volume) CLI subcommand.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
|
||||
attributes:
|
||||
check_mode:
|
||||
support: full
|
||||
diff_mode:
|
||||
support: full
|
||||
idempotent:
|
||||
support: partial
|
||||
details:
|
||||
- If O(recreate=always) the module is not idempotent.
|
||||
|
||||
options:
|
||||
volume_name:
|
||||
description:
|
||||
- Name of the volume to operate on.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- name
|
||||
|
||||
driver:
|
||||
description:
|
||||
- Specify the type of volume. Docker provides the V(local) driver, but 3rd party drivers can also be used.
|
||||
type: str
|
||||
default: local
|
||||
|
||||
driver_options:
|
||||
description:
|
||||
- 'Dictionary of volume settings. Consult the Docker documentation for valid options and values:
|
||||
U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options).'
|
||||
type: dict
|
||||
default: {}
|
||||
|
||||
labels:
|
||||
description:
|
||||
- Dictionary of label key/values to set for the volume.
|
||||
type: dict
|
||||
|
||||
recreate:
|
||||
description:
|
||||
- Controls when a volume will be recreated when O(state=present). Please note that recreating an existing volume will
|
||||
cause B(any data in the existing volume to be lost!) The volume will be deleted and a new volume with the same name
|
||||
will be created.
|
||||
- The value V(always) forces the volume to be always recreated.
|
||||
- The value V(never) makes sure the volume will not be recreated.
|
||||
- The value V(options-changed) makes sure the volume will be recreated if the volume already exist and the driver, driver
|
||||
options or labels differ.
|
||||
type: str
|
||||
default: never
|
||||
choices:
|
||||
- always
|
||||
- never
|
||||
- options-changed
|
||||
|
||||
state:
|
||||
description:
|
||||
- V(absent) deletes the volume.
|
||||
- V(present) creates the volume, if it does not already exist.
|
||||
type: str
|
||||
default: present
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
|
||||
author:
|
||||
- Alex Grönholm (@agronholm)
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Create a volume
|
||||
community.docker.docker_volume:
|
||||
name: volume_one
|
||||
|
||||
- name: Remove a volume
|
||||
community.docker.docker_volume:
|
||||
name: volume_one
|
||||
state: absent
|
||||
|
||||
- name: Create a volume with options
|
||||
community.docker.docker_volume:
|
||||
name: volume_two
|
||||
driver_options:
|
||||
type: btrfs
|
||||
device: /dev/sda2
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
volume:
|
||||
description:
|
||||
- Volume inspection results for the affected volume.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: {}
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DifferenceTracker,
|
||||
DockerBaseClass,
|
||||
sanitize_labels,
|
||||
)
|
||||
|
||||
|
||||
class TaskParameters(DockerBaseClass):
|
||||
volume_name: str
|
||||
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
super().__init__()
|
||||
self.client = client
|
||||
|
||||
self.driver: str = "local"
|
||||
self.driver_options: dict[str, t.Any] = {}
|
||||
self.labels: dict[str, t.Any] | None = None
|
||||
self.recreate: t.Literal["always", "never", "options-changed"] = "never"
|
||||
self.debug: bool = False
|
||||
self.state: t.Literal["present", "absent"] = "present"
|
||||
|
||||
for key, value in client.module.params.items():
|
||||
setattr(self, key, value)
|
||||
|
||||
|
||||
class DockerVolumeManager:
|
||||
def __init__(self, client: AnsibleDockerClient) -> None:
|
||||
self.client = client
|
||||
self.parameters = TaskParameters(client)
|
||||
self.check_mode = self.client.check_mode
|
||||
self.actions: list[str] = []
|
||||
self.results: dict[str, t.Any] = {"changed": False, "actions": self.actions}
|
||||
self.diff = self.client.module._diff
|
||||
self.diff_tracker = DifferenceTracker()
|
||||
self.diff_result: dict[str, t.Any] = {}
|
||||
|
||||
self.existing_volume = self.get_existing_volume()
|
||||
|
||||
state = self.parameters.state
|
||||
if state == "present":
|
||||
self.present()
|
||||
elif state == "absent":
|
||||
self.absent()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
if self.diff:
|
||||
self.diff_result["before"], self.diff_result["after"] = (
|
||||
self.diff_tracker.get_before_after()
|
||||
)
|
||||
self.results["diff"] = self.diff_result
|
||||
|
||||
def get_existing_volume(self) -> dict[str, t.Any] | None:
|
||||
try:
|
||||
volumes = self.client.get_json("/volumes")
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
|
||||
if volumes["Volumes"] is None:
|
||||
return None
|
||||
|
||||
for volume in volumes["Volumes"]:
|
||||
if volume["Name"] == self.parameters.volume_name:
|
||||
return volume
|
||||
|
||||
return None
|
||||
|
||||
def has_different_config(self) -> DifferenceTracker:
|
||||
"""
|
||||
Return the list of differences between the current parameters and the existing volume.
|
||||
|
||||
:return: list of options that differ
|
||||
"""
|
||||
assert self.existing_volume is not None
|
||||
differences = DifferenceTracker()
|
||||
if (
|
||||
self.parameters.driver
|
||||
and self.parameters.driver != self.existing_volume["Driver"]
|
||||
):
|
||||
differences.add(
|
||||
"driver",
|
||||
parameter=self.parameters.driver,
|
||||
active=self.existing_volume["Driver"],
|
||||
)
|
||||
if self.parameters.driver_options:
|
||||
if not self.existing_volume.get("Options"):
|
||||
differences.add(
|
||||
"driver_options",
|
||||
parameter=self.parameters.driver_options,
|
||||
active=self.existing_volume.get("Options"),
|
||||
)
|
||||
else:
|
||||
for key, value in self.parameters.driver_options.items():
|
||||
if (
|
||||
not self.existing_volume["Options"].get(key)
|
||||
or value != self.existing_volume["Options"][key]
|
||||
):
|
||||
differences.add(
|
||||
f"driver_options.{key}",
|
||||
parameter=value,
|
||||
active=self.existing_volume["Options"].get(key),
|
||||
)
|
||||
if self.parameters.labels:
|
||||
existing_labels = self.existing_volume.get("Labels") or {}
|
||||
for label in self.parameters.labels:
|
||||
if existing_labels.get(label) != self.parameters.labels.get(label):
|
||||
differences.add(
|
||||
f"labels.{label}",
|
||||
parameter=self.parameters.labels.get(label),
|
||||
active=existing_labels.get(label),
|
||||
)
|
||||
|
||||
return differences
|
||||
|
||||
def create_volume(self) -> None:
|
||||
if not self.existing_volume:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
data = {
|
||||
"Name": self.parameters.volume_name,
|
||||
"Driver": self.parameters.driver,
|
||||
"DriverOpts": self.parameters.driver_options,
|
||||
}
|
||||
if self.parameters.labels is not None:
|
||||
data["Labels"] = self.parameters.labels
|
||||
resp = self.client.post_json_to_json("/volumes/create", data=data)
|
||||
self.existing_volume = self.client.get_json(
|
||||
"/volumes/{0}", resp["Name"]
|
||||
)
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
|
||||
self.actions.append(
|
||||
f"Created volume {self.parameters.volume_name} with driver {self.parameters.driver}"
|
||||
)
|
||||
self.results["changed"] = True
|
||||
|
||||
def remove_volume(self) -> None:
|
||||
if self.existing_volume:
|
||||
if not self.check_mode:
|
||||
try:
|
||||
self.client.delete_call("/volumes/{0}", self.parameters.volume_name)
|
||||
except APIError as e:
|
||||
self.client.fail(to_text(e))
|
||||
|
||||
self.actions.append(f"Removed volume {self.parameters.volume_name}")
|
||||
self.results["changed"] = True
|
||||
|
||||
def present(self) -> None:
|
||||
differences = DifferenceTracker()
|
||||
if self.existing_volume:
|
||||
differences = self.has_different_config()
|
||||
|
||||
self.diff_tracker.add(
|
||||
"exists", parameter=True, active=self.existing_volume is not None
|
||||
)
|
||||
if (
|
||||
not differences.empty and self.parameters.recreate == "options-changed"
|
||||
) or self.parameters.recreate == "always":
|
||||
self.remove_volume()
|
||||
self.existing_volume = None
|
||||
|
||||
self.create_volume()
|
||||
|
||||
if self.diff or self.check_mode or self.parameters.debug:
|
||||
self.diff_result["differences"] = differences.get_legacy_docker_diffs()
|
||||
self.diff_tracker.merge(differences)
|
||||
|
||||
if not self.check_mode and not self.parameters.debug:
|
||||
self.results.pop("actions")
|
||||
|
||||
volume_facts = self.get_existing_volume()
|
||||
self.results["volume"] = volume_facts
|
||||
|
||||
def absent(self) -> None:
|
||||
self.diff_tracker.add(
|
||||
"exists", parameter=False, active=self.existing_volume is not None
|
||||
)
|
||||
self.remove_volume()
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"volume_name": {"type": "str", "required": True, "aliases": ["name"]},
|
||||
"state": {
|
||||
"type": "str",
|
||||
"default": "present",
|
||||
"choices": ["present", "absent"],
|
||||
},
|
||||
"driver": {"type": "str", "default": "local"},
|
||||
"driver_options": {"type": "dict", "default": {}},
|
||||
"labels": {"type": "dict"},
|
||||
"recreate": {
|
||||
"type": "str",
|
||||
"default": "never",
|
||||
"choices": ["always", "never", "options-changed"],
|
||||
},
|
||||
"debug": {"type": "bool", "default": False},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
# "The docker server >= 1.9.0"
|
||||
)
|
||||
sanitize_labels(client.module.params["labels"], "labels", client)
|
||||
|
||||
try:
|
||||
cm = DockerVolumeManager(client)
|
||||
client.module.exit_json(**cm.results)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,126 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
module: docker_volume_info
|
||||
short_description: Retrieve facts about Docker volumes
|
||||
description:
|
||||
- Performs largely the same function as the C(docker volume inspect) CLI subcommand.
|
||||
extends_documentation_fragment:
|
||||
- community.docker._docker.api_documentation
|
||||
- community.docker._attributes
|
||||
- community.docker._attributes.actiongroup_docker
|
||||
- community.docker._attributes.info_module
|
||||
- community.docker._attributes.idempotent_not_modify_state
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the volume to inspect.
|
||||
type: str
|
||||
required: true
|
||||
aliases:
|
||||
- volume_name
|
||||
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
|
||||
requirements:
|
||||
- "Docker API >= 1.25"
|
||||
"""
|
||||
|
||||
EXAMPLES = r"""
|
||||
---
|
||||
- name: Get infos on volume
|
||||
community.docker.docker_volume_info:
|
||||
name: mydata
|
||||
register: result
|
||||
|
||||
- name: Does volume exist?
|
||||
ansible.builtin.debug:
|
||||
msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
|
||||
|
||||
- name: Print information about volume
|
||||
ansible.builtin.debug:
|
||||
var: result.volume
|
||||
when: result.exists
|
||||
"""
|
||||
|
||||
RETURN = r"""
|
||||
exists:
|
||||
description:
|
||||
- Returns whether the volume exists.
|
||||
type: bool
|
||||
returned: always
|
||||
sample: true
|
||||
volume:
|
||||
description:
|
||||
- Volume inspection results for the affected volume.
|
||||
- Will be V(none) if volume does not exist.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{ "CreatedAt": "2018-12-09T17:43:44+01:00", "Driver": "local", "Labels": null, "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
|
||||
"Name": "ansible-test-bd3f6172", "Options": {}, "Scope": "local" }'
|
||||
"""
|
||||
|
||||
import traceback
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
DockerException,
|
||||
NotFound,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
RequestException,
|
||||
)
|
||||
|
||||
|
||||
def get_existing_volume(
|
||||
client: AnsibleDockerClient, volume_name: str
|
||||
) -> dict[str, t.Any] | None:
|
||||
try:
|
||||
return client.get_json("/volumes/{0}", volume_name)
|
||||
except NotFound:
|
||||
return None
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
client.fail(f"Error inspecting volume: {exc}")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
argument_spec = {
|
||||
"name": {"type": "str", "required": True, "aliases": ["volume_name"]},
|
||||
}
|
||||
|
||||
client = AnsibleDockerClient(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
volume = get_existing_volume(client, client.module.params["name"])
|
||||
|
||||
client.module.exit_json(
|
||||
changed=False,
|
||||
exists=bool(volume),
|
||||
volume=volume,
|
||||
)
|
||||
except DockerException as e:
|
||||
client.fail(
|
||||
f"An unexpected Docker error occurred: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
except RequestException as e:
|
||||
client.fail(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}",
|
||||
exception=traceback.format_exc(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleConnectionFailure
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
AnsibleDockerClientBase,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DOCKER_COMMON_ARGS,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.plugins import AnsiblePlugin
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(
|
||||
self,
|
||||
plugin: AnsiblePlugin,
|
||||
min_docker_version: str | None = None,
|
||||
min_docker_api_version: str | None = None,
|
||||
) -> None:
|
||||
self.plugin = plugin
|
||||
self.display = Display()
|
||||
super().__init__(
|
||||
min_docker_version=min_docker_version,
|
||||
min_docker_api_version=min_docker_api_version,
|
||||
)
|
||||
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
if kwargs:
|
||||
msg += "\nContext:\n" + "\n".join(
|
||||
f" {k} = {v!r}" for (k, v) in kwargs.items()
|
||||
)
|
||||
raise AnsibleConnectionFailure(msg)
|
||||
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
self.display.deprecated(
|
||||
msg, version=version, date=date, collection_name=collection_name
|
||||
)
|
||||
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
return {option: self.plugin.get_option(option) for option in DOCKER_COMMON_ARGS}
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleConnectionFailure
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
AnsibleDockerClientBase,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DOCKER_COMMON_ARGS,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.plugins import AnsiblePlugin
|
||||
|
||||
|
||||
class AnsibleDockerClient(AnsibleDockerClientBase):
|
||||
def __init__(
|
||||
self, plugin: AnsiblePlugin, min_docker_api_version: str | None = None
|
||||
) -> None:
|
||||
self.plugin = plugin
|
||||
self.display = Display()
|
||||
super().__init__(min_docker_api_version=min_docker_api_version)
|
||||
|
||||
def fail(self, msg: str, **kwargs: t.Any) -> t.NoReturn:
|
||||
if kwargs:
|
||||
msg += "\nContext:\n" + "\n".join(
|
||||
f" {k} = {v!r}" for (k, v) in kwargs.items()
|
||||
)
|
||||
raise AnsibleConnectionFailure(msg)
|
||||
|
||||
def deprecate(
|
||||
self,
|
||||
msg: str,
|
||||
version: str | None = None,
|
||||
date: str | None = None,
|
||||
collection_name: str | None = None,
|
||||
) -> None:
|
||||
self.display.deprecated(
|
||||
msg, version=version, date=date, collection_name=collection_name
|
||||
)
|
||||
|
||||
def _get_params(self) -> dict[str, t.Any]:
|
||||
return {option: self.plugin.get_option(option) for option in DOCKER_COMMON_ARGS}
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing as t
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._socket_handler import (
|
||||
DockerSocketHandlerBase,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.utils.display import Display
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._socket_helper import (
|
||||
SocketLike,
|
||||
)
|
||||
|
||||
|
||||
class DockerSocketHandler(DockerSocketHandlerBase):
|
||||
def __init__(
|
||||
self, display: Display, sock: SocketLike, container: str | None = None
|
||||
) -> None:
|
||||
super().__init__(sock, log=lambda msg: display.vvvv(msg, host=container))
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
# Copyright (c) 2023, Felix Fontein <felix@fontein.de>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
# Note that this module util is **PRIVATE** to the collection. It can have breaking changes at any time.
|
||||
# Do not use this from other collections or standalone plugins/modules!
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
from collections.abc import Mapping, Set
|
||||
|
||||
from ansible.module_utils.common.collections import is_sequence
|
||||
from ansible.utils.unsafe_proxy import (
|
||||
AnsibleUnsafe,
|
||||
)
|
||||
from ansible.utils.unsafe_proxy import wrap_var as _make_unsafe
|
||||
|
||||
_RE_TEMPLATE_CHARS = re.compile("[{}]")
|
||||
_RE_TEMPLATE_CHARS_BYTES = re.compile(b"[{}]")
|
||||
|
||||
|
||||
def make_unsafe(value: t.Any) -> t.Any:
|
||||
if value is None or isinstance(value, AnsibleUnsafe):
|
||||
return value
|
||||
|
||||
if isinstance(value, Mapping):
|
||||
return dict((make_unsafe(key), make_unsafe(val)) for key, val in value.items())
|
||||
if isinstance(value, Set):
|
||||
return set(make_unsafe(elt) for elt in value)
|
||||
if is_sequence(value):
|
||||
return type(value)(make_unsafe(elt) for elt in value)
|
||||
if isinstance(value, bytes):
|
||||
if _RE_TEMPLATE_CHARS_BYTES.search(value):
|
||||
value = _make_unsafe(value)
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
if _RE_TEMPLATE_CHARS.search(value):
|
||||
value = _make_unsafe(value)
|
||||
return value
|
||||
|
||||
return value
|
||||
Loading…
Add table
Add a link
Reference in a new issue