forked from CCCHH/ansible-infra
Vendor Galaxy Roles and Collections
This commit is contained in:
parent
c1e1897cda
commit
2aed20393f
3553 changed files with 387444 additions and 2 deletions
|
|
@ -0,0 +1,423 @@
|
|||
# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
|
||||
# For the parts taken from the docker inventory script:
|
||||
# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
|
||||
# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
|
||||
# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_containers
|
||||
short_description: Ansible dynamic inventory plugin for Docker containers
|
||||
version_added: 1.1.0
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.docker._docker.api_documentation
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Reads inventories from the Docker API.
|
||||
- Uses a YAML configuration file that ends with V(docker.(yml|yaml\)).
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker.yml) or V(docker.yaml). Other filenames will
|
||||
not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description:
|
||||
- The name of this plugin, it should always be set to V(community.docker.docker_containers) for this plugin to recognize
|
||||
it as its own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [community.docker.docker_containers]
|
||||
|
||||
connection_type:
|
||||
description:
|
||||
- Which connection type to use the containers.
|
||||
- One way to connect to containers is to use SSH (V(ssh)). For this, the options O(default_ip) and O(private_ssh_port)
|
||||
are used. This requires that a SSH daemon is running inside the containers.
|
||||
- Alternatively, V(docker-cli) selects the P(community.docker.docker#connection) connection plugin, and V(docker-api)
|
||||
(default) selects the P(community.docker.docker_api#connection) connection plugin.
|
||||
- When V(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin to the connection
|
||||
plugin. This can be controlled with O(configure_docker_daemon).
|
||||
- Note that the P(community.docker.docker_api#connection) does B(not work with TCP TLS sockets)!
|
||||
See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
|
||||
type: str
|
||||
default: docker-api
|
||||
choices:
|
||||
- ssh
|
||||
- docker-cli
|
||||
- docker-api
|
||||
|
||||
configure_docker_daemon:
|
||||
description:
|
||||
- Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
|
||||
- Only used when O(connection_type=docker-api).
|
||||
type: bool
|
||||
default: true
|
||||
version_added: 1.8.0
|
||||
|
||||
verbose_output:
|
||||
description:
|
||||
- Toggle to (not) include all available inspection metadata.
|
||||
- Note that all top-level keys will be transformed to the format C(docker_xxx). For example, C(HostConfig) is converted
|
||||
to C(docker_hostconfig).
|
||||
- If this is V(false), these values can only be used during O(compose), O(groups), and O(keyed_groups).
|
||||
- The C(docker) inventory script always added these variables, so for compatibility set this to V(true).
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
default_ip:
|
||||
description:
|
||||
- The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'.
|
||||
- Only used if O(connection_type) is V(ssh).
|
||||
type: str
|
||||
default: 127.0.0.1
|
||||
|
||||
private_ssh_port:
|
||||
description:
|
||||
- The port containers use for SSH.
|
||||
- Only used if O(connection_type) is V(ssh).
|
||||
type: int
|
||||
default: 22
|
||||
|
||||
add_legacy_groups:
|
||||
description:
|
||||
- 'Add the same groups as the C(docker) inventory script does. These are the following:'
|
||||
- 'C(<container id>): contains the container of this ID.'
|
||||
- 'C(<container name>): contains the container that has this name.'
|
||||
- 'C(<container short id>): contains the containers that have this short ID (first 13 letters of ID).'
|
||||
- 'C(image_<image name>): contains the containers that have the image C(<image name>).'
|
||||
- 'C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>).'
|
||||
- 'C(service_<service name>): contains the containers that belong to the service C(<service name>).'
|
||||
- 'C(<docker_host>): contains the containers which belong to the Docker daemon O(docker_host). Useful if you run this
|
||||
plugin against multiple Docker daemons.'
|
||||
- 'C(running): contains all containers that are running.'
|
||||
- 'C(stopped): contains all containers that are not running.'
|
||||
- If this is not set to V(true), you should use keyed groups to add the containers to groups. See the examples for how
|
||||
to do that.
|
||||
type: bool
|
||||
default: false
|
||||
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
# Minimal example using local Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: unix:///var/run/docker.sock
|
||||
|
||||
---
|
||||
# Minimal example using remote Docker daemon
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
---
|
||||
# Example using remote Docker daemon with unverified TLS
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: true
|
||||
|
||||
---
|
||||
# Example using remote Docker daemon with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: true
|
||||
ca_path: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
---
|
||||
# Example using constructed features to create groups
|
||||
plugin: community.docker.docker_containers
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: false
|
||||
keyed_groups:
|
||||
# Add containers with primary network foo to a network_foo group
|
||||
- prefix: network
|
||||
key: 'docker_hostconfig.NetworkMode'
|
||||
# Add Linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: docker_platform
|
||||
|
||||
---
|
||||
# Example using SSH connection with an explicit fallback for when port 22 has not been
|
||||
# exported: use container name as ansible_ssh_host and 22 as ansible_ssh_port
|
||||
plugin: community.docker.docker_containers
|
||||
connection_type: ssh
|
||||
compose:
|
||||
ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true)
|
||||
ansible_ssh_port: ansible_ssh_port | default(22, true)
|
||||
|
||||
---
|
||||
# Only consider containers which have a label 'foo', or whose name starts with 'a'
|
||||
plugin: community.docker.docker_containers
|
||||
filters:
|
||||
# Accept all containers which have a label called 'foo'
|
||||
- include: >-
|
||||
"foo" in docker_config.Labels
|
||||
# Next accept all containers whose inventory_hostname starts with 'a'
|
||||
- include: >-
|
||||
inventory_hostname.startswith("a")
|
||||
# Exclude all containers that did not match any of the above filters
|
||||
- exclude: true
|
||||
"""
|
||||
|
||||
import re
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||
filter_host,
|
||||
parse_filters,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._api.errors import (
|
||||
APIError,
|
||||
DockerException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._common_api import (
|
||||
RequestException,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
DOCKER_COMMON_ARGS_VARS,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._common_api import (
|
||||
AnsibleDockerClient,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||
make_unsafe,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
|
||||
MIN_DOCKER_API = None
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
"""Host inventory parser for ansible using Docker daemon as source."""
|
||||
|
||||
NAME = "community.docker.docker_containers"
|
||||
|
||||
def _slugify(self, value: str) -> str:
|
||||
slug = re.sub(r"[^\w-]", "_", value).lower().lstrip("_")
|
||||
return f"docker_{slug}"
|
||||
|
||||
def _populate(self, client: AnsibleDockerClient) -> None:
|
||||
strict = self.get_option("strict")
|
||||
|
||||
ssh_port = self.get_option("private_ssh_port")
|
||||
default_ip = self.get_option("default_ip")
|
||||
hostname = self.get_option("docker_host")
|
||||
verbose_output = self.get_option("verbose_output")
|
||||
connection_type = self.get_option("connection_type")
|
||||
add_legacy_groups = self.get_option("add_legacy_groups")
|
||||
|
||||
if self.inventory is None:
|
||||
raise AssertionError("Inventory must be there")
|
||||
|
||||
try:
|
||||
params = {
|
||||
"limit": -1,
|
||||
"all": 1,
|
||||
"size": 0,
|
||||
"trunc_cmd": 0,
|
||||
"since": None,
|
||||
"before": None,
|
||||
}
|
||||
containers = client.get_json("/containers/json", params=params)
|
||||
except APIError as exc:
|
||||
raise AnsibleError(f"Error listing containers: {exc}") from exc
|
||||
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group("running")
|
||||
self.inventory.add_group("stopped")
|
||||
|
||||
extra_facts = {}
|
||||
if self.get_option("configure_docker_daemon"):
|
||||
for option_name, var_name in DOCKER_COMMON_ARGS_VARS.items():
|
||||
value = self.get_option(option_name)
|
||||
if value is not None:
|
||||
extra_facts[var_name] = value
|
||||
|
||||
filters = parse_filters(self.get_option("filters"))
|
||||
for container in containers:
|
||||
container_id = container.get("Id")
|
||||
short_container_id = container_id[:13]
|
||||
|
||||
try:
|
||||
name = container.get("Names", [])[0].lstrip("/")
|
||||
full_name = name
|
||||
except IndexError:
|
||||
name = short_container_id
|
||||
full_name = container_id
|
||||
|
||||
facts = {
|
||||
"docker_name": make_unsafe(name),
|
||||
"docker_short_id": make_unsafe(short_container_id),
|
||||
}
|
||||
full_facts = {}
|
||||
|
||||
try:
|
||||
inspect = client.get_json("/containers/{0}/json", container_id)
|
||||
except APIError as exc:
|
||||
raise AnsibleError(
|
||||
f"Error inspecting container {name} - {exc}"
|
||||
) from exc
|
||||
|
||||
state = inspect.get("State") or {}
|
||||
config = inspect.get("Config") or {}
|
||||
labels = config.get("Labels") or {}
|
||||
|
||||
running = state.get("Running")
|
||||
|
||||
groups = []
|
||||
|
||||
# Add container to groups
|
||||
image_name = config.get("Image")
|
||||
if image_name and add_legacy_groups:
|
||||
groups.append(f"image_{image_name}")
|
||||
|
||||
stack_name = labels.get("com.docker.stack.namespace")
|
||||
if stack_name:
|
||||
full_facts["docker_stack"] = stack_name
|
||||
if add_legacy_groups:
|
||||
groups.append(f"stack_{stack_name}")
|
||||
|
||||
service_name = labels.get("com.docker.swarm.service.name")
|
||||
if service_name:
|
||||
full_facts["docker_service"] = service_name
|
||||
if add_legacy_groups:
|
||||
groups.append(f"service_{service_name}")
|
||||
|
||||
ansible_connection = None
|
||||
if connection_type == "ssh":
|
||||
# Figure out ssh IP and Port
|
||||
try:
|
||||
# Lookup the public facing port Nat'ed to ssh port.
|
||||
network_settings = inspect.get("NetworkSettings") or {}
|
||||
port_settings = network_settings.get("Ports") or {}
|
||||
port = port_settings.get(f"{ssh_port}/tcp")[0] # type: ignore[index]
|
||||
except (IndexError, AttributeError, TypeError):
|
||||
port = {}
|
||||
|
||||
try:
|
||||
ip = default_ip if port["HostIp"] == "0.0.0.0" else port["HostIp"]
|
||||
except KeyError:
|
||||
ip = ""
|
||||
|
||||
facts.update(
|
||||
{
|
||||
"ansible_ssh_host": ip,
|
||||
"ansible_ssh_port": port.get("HostPort", 0),
|
||||
}
|
||||
)
|
||||
elif connection_type == "docker-cli":
|
||||
facts.update(
|
||||
{
|
||||
"ansible_host": full_name,
|
||||
}
|
||||
)
|
||||
ansible_connection = "community.docker.docker"
|
||||
elif connection_type == "docker-api":
|
||||
facts.update(
|
||||
{
|
||||
"ansible_host": full_name,
|
||||
}
|
||||
)
|
||||
facts.update(extra_facts)
|
||||
ansible_connection = "community.docker.docker_api"
|
||||
|
||||
full_facts.update(facts)
|
||||
for key, value in inspect.items():
|
||||
fact_key = self._slugify(key)
|
||||
full_facts[fact_key] = value
|
||||
|
||||
full_facts = make_unsafe(full_facts)
|
||||
|
||||
if ansible_connection:
|
||||
for d in (facts, full_facts):
|
||||
if "ansible_connection" not in d:
|
||||
d["ansible_connection"] = ansible_connection
|
||||
|
||||
if not filter_host(self, name, full_facts, filters):
|
||||
continue
|
||||
|
||||
if verbose_output:
|
||||
facts.update(full_facts)
|
||||
|
||||
self.inventory.add_host(name)
|
||||
for group in groups:
|
||||
self.inventory.add_group(group)
|
||||
self.inventory.add_host(name, group=group)
|
||||
|
||||
for key, value in facts.items():
|
||||
self.inventory.set_variable(name, key, value)
|
||||
|
||||
# Use constructed if applicable
|
||||
# Composed variables
|
||||
self._set_composite_vars(
|
||||
self.get_option("compose"), full_facts, name, strict=strict
|
||||
)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option("groups"), full_facts, name, strict=strict
|
||||
)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option("keyed_groups"), full_facts, name, strict=strict
|
||||
)
|
||||
|
||||
# We need to do this last since we also add a group called `name`.
|
||||
# When we do this before a set_variable() call, the variables are assigned
|
||||
# to the group, and not to the host.
|
||||
if add_legacy_groups:
|
||||
self.inventory.add_group(container_id)
|
||||
self.inventory.add_host(name, group=container_id)
|
||||
self.inventory.add_group(name)
|
||||
self.inventory.add_host(name, group=name)
|
||||
self.inventory.add_group(short_container_id)
|
||||
self.inventory.add_host(name, group=short_container_id)
|
||||
self.inventory.add_group(hostname)
|
||||
self.inventory.add_host(name, group=hostname)
|
||||
|
||||
if running is True:
|
||||
self.inventory.add_host(name, group="running")
|
||||
else:
|
||||
self.inventory.add_host(name, group="stopped")
|
||||
|
||||
def verify_file(self, path: str) -> bool:
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return super().verify_file(path) and path.endswith(
|
||||
("docker.yaml", "docker.yml")
|
||||
)
|
||||
|
||||
def _create_client(self) -> AnsibleDockerClient:
|
||||
return AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
|
||||
|
||||
def parse(
|
||||
self,
|
||||
inventory: InventoryData,
|
||||
loader: DataLoader,
|
||||
path: str,
|
||||
cache: bool = True,
|
||||
) -> None:
|
||||
super().parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
client = self._create_client()
|
||||
try:
|
||||
self._populate(client)
|
||||
except DockerException as e:
|
||||
raise AnsibleError(f"An unexpected Docker error occurred: {e}") from e
|
||||
except RequestException as e:
|
||||
raise AnsibleError(
|
||||
f"An unexpected requests error occurred when trying to talk to the Docker daemon: {e}"
|
||||
) from e
|
||||
|
|
@ -0,0 +1,359 @@
|
|||
# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_machine
|
||||
author: Ximon Eighteen (@ximon18)
|
||||
short_description: Docker Machine inventory source
|
||||
requirements:
|
||||
- L(Docker Machine,https://docs.docker.com/machine/)
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Get inventory hosts from Docker Machine.
|
||||
- Uses a YAML configuration file that ends with V(docker_machine.(yml|yaml\)).
|
||||
- The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
|
||||
- The plugin stores the Docker Machine 'env' output variables in C(dm_) prefixed host variables.
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker_machine.yml) or V(docker_machine.yaml). Other
|
||||
filenames will not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description: Token that ensures this is a source file for the C(docker_machine) plugin.
|
||||
required: true
|
||||
choices: ['docker_machine', 'community.docker.docker_machine']
|
||||
daemon_env:
|
||||
description:
|
||||
- Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
|
||||
- With V(require) and V(require-silently), fetch them and skip any host for which they cannot be fetched. A warning
|
||||
will be issued for any skipped host if the choice is V(require).
|
||||
- With V(optional) and V(optional-silently), fetch them and not skip hosts for which they cannot be fetched. A warning
|
||||
will be issued for hosts where they cannot be fetched if the choice is V(optional).
|
||||
- With V(skip), do not attempt to fetch the docker daemon connection environment variables.
|
||||
- If fetched successfully, the variables will be prefixed with C(dm_) and stored as host variables.
|
||||
type: str
|
||||
choices:
|
||||
- require
|
||||
- require-silently
|
||||
- optional
|
||||
- optional-silently
|
||||
- skip
|
||||
default: require
|
||||
running_required:
|
||||
description:
|
||||
- When V(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
|
||||
type: bool
|
||||
default: true
|
||||
verbose_output:
|
||||
description:
|
||||
- When V(true), include all available nodes metadata (for example C(Image), C(Region), C(Size)) as a JSON object named
|
||||
C(docker_machine_node_attributes).
|
||||
type: bool
|
||||
default: true
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
# Minimal example
|
||||
plugin: community.docker.docker_machine
|
||||
|
||||
---
|
||||
# Example using constructed features to create a group per Docker Machine driver
|
||||
# (https://docs.docker.com/machine/drivers/), for example:
|
||||
# $ docker-machine create --driver digitalocean ... mymachine
|
||||
# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
|
||||
# {
|
||||
# ...
|
||||
# "digitalocean": {
|
||||
# "hosts": [
|
||||
# "mymachine"
|
||||
# ]
|
||||
# ...
|
||||
# }
|
||||
plugin: community.docker.docker_machine
|
||||
strict: false
|
||||
keyed_groups:
|
||||
- separator: ''
|
||||
key: docker_machine_node_attributes.DriverName
|
||||
|
||||
---
|
||||
# Example grouping hosts by Digital Machine tag
|
||||
plugin: community.docker.docker_machine
|
||||
strict: false
|
||||
keyed_groups:
|
||||
- prefix: tag
|
||||
key: 'dm_tags'
|
||||
|
||||
---
|
||||
# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
|
||||
plugin: community.docker.docker_machine
|
||||
compose:
|
||||
ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import subprocess
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.common.text.converters import to_text
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable, Constructable
|
||||
from ansible.utils.display import Display
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||
filter_host,
|
||||
parse_filters,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||
make_unsafe,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
DaemonEnv = t.Literal[
|
||||
"require", "require-silently", "optional", "optional-silently", "skip"
|
||||
]
|
||||
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
|
||||
"""Host inventory parser for ansible using Docker machine as source."""
|
||||
|
||||
NAME = "community.docker.docker_machine"
|
||||
|
||||
docker_machine_path: str | None = None
|
||||
|
||||
def _run_command(self, args: list[str]) -> str:
|
||||
if not self.docker_machine_path:
|
||||
try:
|
||||
self.docker_machine_path = get_bin_path("docker-machine")
|
||||
except ValueError as e:
|
||||
raise AnsibleError(to_text(e)) from e
|
||||
|
||||
command = [self.docker_machine_path]
|
||||
command.extend(args)
|
||||
display.debug(f"Executing command {command}")
|
||||
try:
|
||||
result = subprocess.check_output(command)
|
||||
except subprocess.CalledProcessError as e:
|
||||
display.warning(
|
||||
f"Exception {type(e).__name__} caught while executing command {command}, this was the original exception: {e}"
|
||||
)
|
||||
raise e
|
||||
|
||||
return to_text(result).strip()
|
||||
|
||||
def _get_docker_daemon_variables(self, machine_name: str) -> list[tuple[str, str]]:
|
||||
"""
|
||||
Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
|
||||
the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
|
||||
"""
|
||||
try:
|
||||
env_lines = self._run_command(
|
||||
["env", "--shell=sh", machine_name]
|
||||
).splitlines()
|
||||
except subprocess.CalledProcessError:
|
||||
# This can happen when the machine is created but provisioning is incomplete
|
||||
return []
|
||||
|
||||
# example output of docker-machine env --shell=sh:
|
||||
# export DOCKER_TLS_VERIFY="1"
|
||||
# export DOCKER_HOST="tcp://134.209.204.160:2376"
|
||||
# export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
|
||||
# export DOCKER_MACHINE_NAME="routinator"
|
||||
# # Run this command to configure your shell:
|
||||
# # eval $(docker-machine env --shell=bash routinator)
|
||||
|
||||
# capture any of the DOCKER_xxx variables that were output and create Ansible host vars
|
||||
# with the same name and value but with a dm_ name prefix.
|
||||
env_vars = []
|
||||
for line in env_lines:
|
||||
match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
|
||||
if match:
|
||||
env_var_name = match.group(1)
|
||||
env_var_value = match.group(2)
|
||||
env_vars.append((env_var_name, env_var_value))
|
||||
|
||||
return env_vars
|
||||
|
||||
def _get_machine_names(self) -> list[str]:
|
||||
# Filter out machines that are not in the Running state as we probably cannot do anything useful actions
|
||||
# with them.
|
||||
ls_command = ["ls", "-q"]
|
||||
if self.get_option("running_required"):
|
||||
ls_command.extend(["--filter", "state=Running"])
|
||||
|
||||
try:
|
||||
ls_lines = self._run_command(ls_command)
|
||||
except subprocess.CalledProcessError:
|
||||
return []
|
||||
|
||||
return ls_lines.splitlines()
|
||||
|
||||
def _inspect_docker_machine_host(self, node: str) -> t.Any | None:
|
||||
try:
|
||||
inspect_lines = self._run_command(["inspect", node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return json.loads(inspect_lines)
|
||||
|
||||
def _ip_addr_docker_machine_host(self, node: str) -> t.Any | None:
|
||||
try:
|
||||
ip_addr = self._run_command(["ip", node])
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
|
||||
return ip_addr
|
||||
|
||||
def _should_skip_host(
|
||||
self,
|
||||
machine_name: str,
|
||||
env_var_tuples: list[tuple[str, str]],
|
||||
daemon_env: DaemonEnv,
|
||||
) -> bool:
|
||||
if not env_var_tuples:
|
||||
warning_prefix = f"Unable to fetch Docker daemon env vars from Docker Machine for host {machine_name}"
|
||||
if daemon_env in ("require", "require-silently"):
|
||||
if daemon_env == "require":
|
||||
display.warning(f"{warning_prefix}: host will be skipped")
|
||||
return True
|
||||
if daemon_env == "optional":
|
||||
display.warning(
|
||||
f"{warning_prefix}: host will lack dm_DOCKER_xxx variables"
|
||||
)
|
||||
# daemon_env is 'optional-silently'
|
||||
return False
|
||||
|
||||
def _populate(self) -> None:
|
||||
if self.inventory is None:
|
||||
raise AssertionError("Inventory must be there")
|
||||
|
||||
daemon_env: DaemonEnv = self.get_option("daemon_env")
|
||||
filters = parse_filters(self.get_option("filters"))
|
||||
try:
|
||||
for node in self._get_machine_names():
|
||||
node_attrs = self._inspect_docker_machine_host(node)
|
||||
if not node_attrs:
|
||||
continue
|
||||
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
|
||||
machine_name = unsafe_node_attrs["Driver"]["MachineName"]
|
||||
if not filter_host(self, machine_name, unsafe_node_attrs, filters):
|
||||
continue
|
||||
|
||||
# query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
|
||||
# that could be used to set environment variables to influence a local Docker client:
|
||||
if daemon_env == "skip":
|
||||
env_var_tuples = []
|
||||
else:
|
||||
env_var_tuples = self._get_docker_daemon_variables(machine_name)
|
||||
if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
|
||||
continue
|
||||
|
||||
# add an entry in the inventory for this host
|
||||
self.inventory.add_host(machine_name)
|
||||
|
||||
# check for valid ip address from inspect output, else explicitly use ip command to find host ip address
|
||||
# this works around an issue seen with Google Compute Platform where the IP address was not available
|
||||
# via the 'inspect' subcommand but was via the 'ip' subcomannd.
|
||||
if unsafe_node_attrs["Driver"]["IPAddress"]:
|
||||
ip_addr = unsafe_node_attrs["Driver"]["IPAddress"]
|
||||
else:
|
||||
ip_addr = self._ip_addr_docker_machine_host(node)
|
||||
|
||||
# set standard Ansible remote host connection settings to details captured from `docker-machine`
|
||||
# see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
|
||||
self.inventory.set_variable(
|
||||
machine_name, "ansible_host", make_unsafe(ip_addr)
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
machine_name, "ansible_port", unsafe_node_attrs["Driver"]["SSHPort"]
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
machine_name, "ansible_user", unsafe_node_attrs["Driver"]["SSHUser"]
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
machine_name,
|
||||
"ansible_ssh_private_key_file",
|
||||
unsafe_node_attrs["Driver"]["SSHKeyPath"],
|
||||
)
|
||||
|
||||
# set variables based on Docker Machine tags
|
||||
tags = unsafe_node_attrs["Driver"].get("Tags") or ""
|
||||
self.inventory.set_variable(machine_name, "dm_tags", make_unsafe(tags))
|
||||
|
||||
# set variables based on Docker Machine env variables
|
||||
for kv in env_var_tuples:
|
||||
self.inventory.set_variable(
|
||||
machine_name, f"dm_{kv[0]}", make_unsafe(kv[1])
|
||||
)
|
||||
|
||||
if self.get_option("verbose_output"):
|
||||
self.inventory.set_variable(
|
||||
machine_name,
|
||||
"docker_machine_node_attributes",
|
||||
unsafe_node_attrs,
|
||||
)
|
||||
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option("strict")
|
||||
|
||||
# Composed variables
|
||||
self._set_composite_vars(
|
||||
self.get_option("compose"),
|
||||
unsafe_node_attrs,
|
||||
machine_name,
|
||||
strict=strict,
|
||||
)
|
||||
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option("groups"),
|
||||
unsafe_node_attrs,
|
||||
machine_name,
|
||||
strict=strict,
|
||||
)
|
||||
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option("keyed_groups"),
|
||||
unsafe_node_attrs,
|
||||
machine_name,
|
||||
strict=strict,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
raise AnsibleError(
|
||||
f"Unable to fetch hosts from Docker Machine, this was the original exception: {e}"
|
||||
) from e
|
||||
|
||||
def verify_file(self, path: str) -> bool:
|
||||
"""Return the possibility of a file being consumable by this plugin."""
|
||||
return super().verify_file(path) and path.endswith(
|
||||
("docker_machine.yaml", "docker_machine.yml")
|
||||
)
|
||||
|
||||
def parse(
|
||||
self,
|
||||
inventory: InventoryData,
|
||||
loader: DataLoader,
|
||||
path: str,
|
||||
cache: bool = True,
|
||||
) -> None:
|
||||
super().parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
|
|
@ -0,0 +1,338 @@
|
|||
# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
|
||||
# Copyright (c) 2018 Ansible Project
|
||||
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
# SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
DOCUMENTATION = r"""
|
||||
name: docker_swarm
|
||||
author:
|
||||
- Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
|
||||
short_description: Ansible dynamic inventory plugin for Docker swarm nodes
|
||||
requirements:
|
||||
- L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
|
||||
extends_documentation_fragment:
|
||||
- ansible.builtin.constructed
|
||||
- community.library_inventory_filtering_v1.inventory_filter
|
||||
description:
|
||||
- Reads inventories from the Docker swarm API.
|
||||
- Uses a YAML configuration file that ends with V(docker_swarm.(yml|yaml\)).
|
||||
- 'The plugin returns following groups of swarm nodes: C(all) - all hosts; C(workers) - all worker nodes; C(managers) -
|
||||
all manager nodes; C(leader) - the swarm leader node; C(nonleaders) - all nodes except the swarm leader.'
|
||||
notes:
|
||||
- The configuration file must be a YAML file whose filename ends with V(docker_swarm.yml) or V(docker_swarm.yaml). Other
|
||||
filenames will not be accepted.
|
||||
options:
|
||||
plugin:
|
||||
description: The name of this plugin, it should always be set to V(community.docker.docker_swarm) for this plugin to recognize
|
||||
it as its own.
|
||||
type: str
|
||||
required: true
|
||||
choices: [docker_swarm, community.docker.docker_swarm]
|
||||
docker_host:
|
||||
description:
|
||||
- Socket of a Docker swarm manager node (C(tcp), C(unix)).
|
||||
- Use V(unix:///var/run/docker.sock) to connect through a local socket.
|
||||
type: str
|
||||
required: true
|
||||
aliases: [docker_url]
|
||||
verbose_output:
|
||||
description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS), C(EngineVersion)).
|
||||
type: bool
|
||||
default: true
|
||||
tls:
|
||||
description: Connect using TLS without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: false
|
||||
validate_certs:
|
||||
description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker host server.
|
||||
type: bool
|
||||
default: false
|
||||
aliases: [tls_verify]
|
||||
client_key:
|
||||
description: Path to the client's TLS key file.
|
||||
type: path
|
||||
aliases: [tls_client_key, key_path]
|
||||
ca_path:
|
||||
description:
|
||||
- Use a CA certificate when performing server verification by providing the path to a CA certificate file.
|
||||
- This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has been added
|
||||
as an alias and can still be used.
|
||||
type: path
|
||||
aliases: [ca_cert, tls_ca_cert, cacert_path]
|
||||
client_cert:
|
||||
description: Path to the client's TLS certificate file.
|
||||
type: path
|
||||
aliases: [tls_client_cert, cert_path]
|
||||
tls_hostname:
|
||||
description: When verifying the authenticity of the Docker host server, provide the expected name of the server.
|
||||
type: str
|
||||
api_version:
|
||||
description:
|
||||
- The version of the Docker API running on the Docker Host.
|
||||
- Defaults to the latest version of the API supported by Docker SDK for Python.
|
||||
type: str
|
||||
aliases: [docker_api_version]
|
||||
timeout:
|
||||
description:
|
||||
- The maximum amount of time in seconds to wait on a response from the API.
|
||||
- If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT). will be used instead.
|
||||
If the environment variable is not set, the default value will be used.
|
||||
type: int
|
||||
default: 60
|
||||
aliases: [time_out]
|
||||
use_ssh_client:
|
||||
description:
|
||||
- For SSH transports, use the C(ssh) CLI tool instead of paramiko.
|
||||
- Requires Docker SDK for Python 4.4.0 or newer.
|
||||
type: bool
|
||||
default: false
|
||||
version_added: 1.5.0
|
||||
include_host_uri:
|
||||
description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the swarm leader
|
||||
in format of V(tcp://172.16.0.1:2376). This value may be used without additional modification as value of option O(docker_host)
|
||||
in Docker Swarm modules when connecting through the API. The port always defaults to V(2376).
|
||||
type: bool
|
||||
default: false
|
||||
include_host_uri_port:
|
||||
description: Override the detected port number included in C(ansible_host_uri).
|
||||
type: int
|
||||
filters:
|
||||
version_added: 3.5.0
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
---
|
||||
# Minimal example using local docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: unix:///var/run/docker.sock
|
||||
|
||||
---
|
||||
# Minimal example using remote docker
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
|
||||
---
|
||||
# Example using remote docker with unverified TLS
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
tls: true
|
||||
|
||||
---
|
||||
# Example using remote docker with verified TLS and client certificate verification
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2376
|
||||
validate_certs: true
|
||||
ca_path: /somewhere/ca.pem
|
||||
client_key: /somewhere/key.pem
|
||||
client_cert: /somewhere/cert.pem
|
||||
|
||||
---
|
||||
# Example using constructed features to create groups and set ansible_host
|
||||
plugin: community.docker.docker_swarm
|
||||
docker_host: tcp://my-docker-host:2375
|
||||
strict: false
|
||||
keyed_groups:
|
||||
# add for example x86_64 hosts to an arch_x86_64 group
|
||||
- prefix: arch
|
||||
key: 'Description.Platform.Architecture'
|
||||
# add for example linux hosts to an os_linux group
|
||||
- prefix: os
|
||||
key: 'Description.Platform.OS'
|
||||
# create a group per node label
|
||||
# for exomple a node labeled w/ "production" ends up in group "label_production"
|
||||
# hint: labels containing special characters will be converted to safe names
|
||||
- key: 'Spec.Labels'
|
||||
prefix: label
|
||||
"""
|
||||
|
||||
import typing as t
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.parsing.utils.addresses import parse_address
|
||||
from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
|
||||
from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import (
|
||||
filter_host,
|
||||
parse_filters,
|
||||
)
|
||||
|
||||
from ansible_collections.community.docker.plugins.module_utils._common import (
|
||||
get_connect_params,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.module_utils._util import (
|
||||
update_tls_hostname,
|
||||
)
|
||||
from ansible_collections.community.docker.plugins.plugin_utils._unsafe import (
|
||||
make_unsafe,
|
||||
)
|
||||
|
||||
if t.TYPE_CHECKING:
|
||||
from ansible.inventory.data import InventoryData
|
||||
from ansible.parsing.dataloader import DataLoader
|
||||
|
||||
|
||||
try:
|
||||
import docker
|
||||
|
||||
HAS_DOCKER = True
|
||||
except ImportError:
|
||||
HAS_DOCKER = False
|
||||
|
||||
|
||||
class InventoryModule(BaseInventoryPlugin, Constructable):
|
||||
"""Host inventory parser for ansible using Docker swarm as source."""
|
||||
|
||||
NAME = "community.docker.docker_swarm"
|
||||
|
||||
def _fail(self, msg: str) -> t.NoReturn:
|
||||
raise AnsibleError(msg)
|
||||
|
||||
def _populate(self) -> None:
|
||||
if self.inventory is None:
|
||||
raise AssertionError("Inventory must be there")
|
||||
|
||||
raw_params = {
|
||||
"docker_host": self.get_option("docker_host"),
|
||||
"tls": self.get_option("tls"),
|
||||
"tls_verify": self.get_option("validate_certs"),
|
||||
"key_path": self.get_option("client_key"),
|
||||
"cacert_path": self.get_option("ca_path"),
|
||||
"cert_path": self.get_option("client_cert"),
|
||||
"tls_hostname": self.get_option("tls_hostname"),
|
||||
"api_version": self.get_option("api_version"),
|
||||
"timeout": self.get_option("timeout"),
|
||||
"use_ssh_client": self.get_option("use_ssh_client"),
|
||||
"debug": None,
|
||||
}
|
||||
update_tls_hostname(raw_params)
|
||||
connect_params = get_connect_params(raw_params, fail_function=self._fail)
|
||||
client = docker.DockerClient(**connect_params)
|
||||
self.inventory.add_group("all")
|
||||
self.inventory.add_group("manager")
|
||||
self.inventory.add_group("worker")
|
||||
self.inventory.add_group("leader")
|
||||
self.inventory.add_group("nonleaders")
|
||||
|
||||
filters = parse_filters(self.get_option("filters"))
|
||||
|
||||
if self.get_option("include_host_uri"):
|
||||
if self.get_option("include_host_uri_port"):
|
||||
host_uri_port = str(self.get_option("include_host_uri_port"))
|
||||
elif self.get_option("tls") or self.get_option("validate_certs"):
|
||||
host_uri_port = "2376"
|
||||
else:
|
||||
host_uri_port = "2375"
|
||||
|
||||
try:
|
||||
nodes = client.nodes.list()
|
||||
for node in nodes:
|
||||
node_attrs = client.nodes.get(node.id).attrs
|
||||
unsafe_node_attrs = make_unsafe(node_attrs)
|
||||
if not filter_host(
|
||||
self, unsafe_node_attrs["ID"], unsafe_node_attrs, filters
|
||||
):
|
||||
continue
|
||||
self.inventory.add_host(unsafe_node_attrs["ID"])
|
||||
self.inventory.add_host(
|
||||
unsafe_node_attrs["ID"], group=unsafe_node_attrs["Spec"]["Role"]
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host",
|
||||
unsafe_node_attrs["Status"]["Addr"],
|
||||
)
|
||||
if self.get_option("include_host_uri"):
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host_uri",
|
||||
make_unsafe(
|
||||
"tcp://"
|
||||
+ unsafe_node_attrs["Status"]["Addr"]
|
||||
+ ":"
|
||||
+ host_uri_port
|
||||
),
|
||||
)
|
||||
if self.get_option("verbose_output"):
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"docker_swarm_node_attributes",
|
||||
unsafe_node_attrs,
|
||||
)
|
||||
if "ManagerStatus" in unsafe_node_attrs:
|
||||
if unsafe_node_attrs["ManagerStatus"].get("Leader"):
|
||||
# This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
|
||||
# Check moby/moby#35437 for details
|
||||
swarm_leader_ip = (
|
||||
parse_address(node_attrs["ManagerStatus"]["Addr"])[0]
|
||||
or unsafe_node_attrs["Status"]["Addr"]
|
||||
)
|
||||
if self.get_option("include_host_uri"):
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host_uri",
|
||||
make_unsafe(
|
||||
"tcp://" + swarm_leader_ip + ":" + host_uri_port
|
||||
),
|
||||
)
|
||||
self.inventory.set_variable(
|
||||
unsafe_node_attrs["ID"],
|
||||
"ansible_host",
|
||||
make_unsafe(swarm_leader_ip),
|
||||
)
|
||||
self.inventory.add_host(unsafe_node_attrs["ID"], group="leader")
|
||||
else:
|
||||
self.inventory.add_host(
|
||||
unsafe_node_attrs["ID"], group="nonleaders"
|
||||
)
|
||||
else:
|
||||
self.inventory.add_host(unsafe_node_attrs["ID"], group="nonleaders")
|
||||
# Use constructed if applicable
|
||||
strict = self.get_option("strict")
|
||||
# Composed variables
|
||||
self._set_composite_vars(
|
||||
self.get_option("compose"),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs["ID"],
|
||||
strict=strict,
|
||||
)
|
||||
# Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
|
||||
self._add_host_to_composed_groups(
|
||||
self.get_option("groups"),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs["ID"],
|
||||
strict=strict,
|
||||
)
|
||||
# Create groups based on variable values and add the corresponding hosts to it
|
||||
self._add_host_to_keyed_groups(
|
||||
self.get_option("keyed_groups"),
|
||||
unsafe_node_attrs,
|
||||
unsafe_node_attrs["ID"],
|
||||
strict=strict,
|
||||
)
|
||||
except Exception as e:
|
||||
raise AnsibleError(
|
||||
f"Unable to fetch hosts from Docker swarm API, this was the original exception: {e}"
|
||||
) from e
|
||||
|
||||
def verify_file(self, path: str) -> bool:
|
||||
"""Return the possibly of a file being consumable by this plugin."""
|
||||
return super().verify_file(path) and path.endswith(
|
||||
("docker_swarm.yaml", "docker_swarm.yml")
|
||||
)
|
||||
|
||||
def parse(
|
||||
self,
|
||||
inventory: InventoryData,
|
||||
loader: DataLoader,
|
||||
path: str,
|
||||
cache: bool = True,
|
||||
) -> None:
|
||||
if not HAS_DOCKER:
|
||||
raise AnsibleError(
|
||||
"The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: "
|
||||
"https://github.com/docker/docker-py."
|
||||
)
|
||||
super().parse(inventory, loader, path, cache)
|
||||
self._read_config_data(path)
|
||||
self._populate()
|
||||
Loading…
Add table
Add a link
Reference in a new issue